drivers/thirdparty: put release-drivers in tree

Only the size of mlnx driver tgz is very big, other drivers source size
is not very big. So, remove release-drivers submoule (sub git repo).

Signed-off-by: Jianping Liu <frankjpliu@tencent.com>
Reviewed-by: Yongliang Gao <leonylgao@tencent.com>
This commit is contained in:
Jianping Liu 2024-08-27 17:06:46 +08:00
parent d791bba469
commit 3db1e8157b
263 changed files with 354491 additions and 50 deletions

3
.gitmodules vendored
View File

@ -1,6 +1,3 @@
[submodule "kernel/tkernel/outtree/emm"]
path = kernel/tkernel/outtree/emm
url = git@git.woa.com:tlinux/EMM/kmod-emm.git
[submodule "drivers/thirdparty/release-drivers"]
path = drivers/thirdparty/release-drivers
url = https://gitee.com/OpenCloudOS/release-drivers.git

View File

@ -3,26 +3,11 @@
check_url_reachable()
{
curl -I https://gitee.com 1>/dev/null 2>&1 || exit 0
curl -I https://content.mellanox.com 1>/dev/null 2>&1 || exit 0
}
thirdparty_clone_git(){
if [ $(stat -c%s release-drivers.tgz) -gt 1024 ]; then
tar -zxf release-drivers.tgz ; return 0
fi
## If clone git fail, using the kernel native drivers to compile.
timeout 600 git clone -q https://gitee.com/OpenCloudOS/release-drivers.git || exit 0
rm -f release-drivers.tgz ; rm -rf release-drivers/.git ; tar -zcf release-drivers.tgz release-drivers
}
thirdparty_rm_git(){
rm -rf release-drivers
}
thirdparty_mlnx(){
mlnx_tgz_url=$(release-drivers/mlnx/get_mlnx_info.sh mlnx_url)
mlnx_tgz_name=$(release-drivers/mlnx/get_mlnx_info.sh mlnx_tgz_name)
mlnx_tgz_url=$(../../drivers/thirdparty/release-drivers/mlnx/get_mlnx_info.sh mlnx_url)
mlnx_tgz_name=$(../../drivers/thirdparty/release-drivers/mlnx/get_mlnx_info.sh mlnx_tgz_name)
get_mlnx_tgz_ok=1
if [ $(stat -c%s ${mlnx_tgz_name}) -gt 1024 ]; then return 0; fi
@ -41,10 +26,6 @@ thirdparty_mlnx(){
##
check_url_reachable
thirdparty_clone_git
thirdparty_mlnx
echo "Having downloaded thirdparty drivers."
thirdparty_rm_git

Binary file not shown.

View File

@ -194,9 +194,8 @@ Source2001: cpupower.config
### Used for download thirdparty drivers
# Start from Source3000 to Source3099, for thirdparty release drivers
Source3000: download-and-copy-drivers.sh
Source3001: release-drivers.tgz
Source3002: MLNX_OFED_LINUX-23.10-3.2.2.0-rhel9.4-x86_64.tgz
Source3003: install.sh
Source3001: MLNX_OFED_LINUX-23.10-3.2.2.0-rhel9.4-x86_64.tgz
Source3002: install.sh
###### Kernel package definations ##############################################
### Main meta package
@ -635,8 +634,7 @@ BuildConfig() {
if [ -e ../../dist/sources ]; then
./copy-drivers.sh
else
cp -a %{SOURCE3001} ./ ; tar -zxf release-drivers.tgz ; rm -f release-drivers.tgz
cp -a %{SOURCE3002} release-drivers/mlnx/
cp -a %{SOURCE3001} release-drivers/mlnx/
fi
popd
@ -1264,7 +1262,7 @@ BuildInstMLNXOFED() {
# Compress it into a new tgz file.
if [[ "${DISTRO}" != "tl3" ]]; then
## "${DISTRO}" == "tl4" or "${DISTRO}" == "oc9"
mlnxfulname=$(basename %{SOURCE3002})
mlnxfulname=$(basename %{SOURCE3001})
mlnxrelease=${mlnxfulname%.*}
else
## "${DISTRO}" == "tl3"
@ -1274,8 +1272,8 @@ BuildInstMLNXOFED() {
# Turn it back to the original file
sed -i 's/! -z $JUMP_ROOT/$UID -ne 0/g' $mlnxrelease-ext.$KernUnameR/mlnx_add_kernel_support.sh
cp -r $signed $mlnxrelease-ext.$KernUnameR/ko_files.signed
sed -i "s/KERNELMODULE_REPLACE/$KernUnameR/g" %{SOURCE3003}
cp -r ko.location %{SOURCE3003} $mlnxrelease-ext.$KernUnameR/
sed -i "s/KERNELMODULE_REPLACE/$KernUnameR/g" %{SOURCE3002}
cp -r ko.location %{SOURCE3002} $mlnxrelease-ext.$KernUnameR/
tar -zcvf $mlnxrelease-ext.$KernUnameR.tgz $mlnxrelease-ext.$KernUnameR
mkdir %{buildroot}/mlnx/
install -m 755 $mlnxrelease-ext.$KernUnameR.tgz %{buildroot}/mlnx/

View File

@ -1,26 +1,18 @@
#!/bin/bash
thirdparty_prepare_source_code(){
if [ ! -e release-drivers/.git ] ; then
## Real release-drivers.tgz will more than 1024 bytes.
## Real release-drivers.tgz will less than 1024bytes.
if [ $(stat -c%s ../../dist/sources/release-drivers.tgz) -gt 1024 ]; then
cp -a ../../dist/sources/release-drivers.tgz ./ ; rm -rf release-drivers
tar -zxf release-drivers.tgz ; rm -f release-drivers.tgz
else
../../dist/sources/download-and-copy-drivers.sh
fi
fi
mlnx_tgz_name=$(release-drivers/mlnx/get_mlnx_info.sh mlnx_tgz_name)
if [ ! -e release-drivers/mlnx/${mlnx_tgz_name} ] ; then
if [ $(stat -c%s ../../dist/sources/${mlnx_tgz_name}) -gt 1024 ]; then
cp -a ../../dist/sources/${mlnx_tgz_name} release-drivers/mlnx/ ; return 0
## This script will only be called when existing ../../dist/sources dir
if [ $(stat -c%s ../../dist/sources/${mlnx_tgz_name}) -lt 1024 ]; then
## Real MLNX_OFED_LINUX-*.tgz will more than 1024 bytes.
## Real MLNX_OFED_LINUX-*.tgz will more than 1024 bytes.
pushd ../../dist/sources
./download-and-copy-drivers.sh
popd
fi
if [ -e ${mlnx_tgz_name} ]; then
mv -f ${mlnx_tgz_name} release-drivers/mlnx/ ; return 0
fi
../../dist/sources/download-and-copy-drivers.sh ; mv -f ${mlnx_tgz_name} release-drivers/mlnx/
cp -a ../../dist/sources/${mlnx_tgz_name} release-drivers/mlnx/
fi
}

@ -1 +0,0 @@
Subproject commit b61dece794c04d9d00421929d0dda87cee8512a3

View File

@ -0,0 +1,339 @@
GNU GENERAL PUBLIC LICENSE
Version 2, June 1991
Copyright (C) 1989, 1991 Free Software Foundation, Inc.,
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
Everyone is permitted to copy and distribute verbatim copies
of this license document, but changing it is not allowed.
Preamble
The licenses for most software are designed to take away your
freedom to share and change it. By contrast, the GNU General Public
License is intended to guarantee your freedom to share and change free
software--to make sure the software is free for all its users. This
General Public License applies to most of the Free Software
Foundation's software and to any other program whose authors commit to
using it. (Some other Free Software Foundation software is covered by
the GNU Lesser General Public License instead.) You can apply it to
your programs, too.
When we speak of free software, we are referring to freedom, not
price. Our General Public Licenses are designed to make sure that you
have the freedom to distribute copies of free software (and charge for
this service if you wish), that you receive source code or can get it
if you want it, that you can change the software or use pieces of it
in new free programs; and that you know you can do these things.
To protect your rights, we need to make restrictions that forbid
anyone to deny you these rights or to ask you to surrender the rights.
These restrictions translate to certain responsibilities for you if you
distribute copies of the software, or if you modify it.
For example, if you distribute copies of such a program, whether
gratis or for a fee, you must give the recipients all the rights that
you have. You must make sure that they, too, receive or can get the
source code. And you must show them these terms so they know their
rights.
We protect your rights with two steps: (1) copyright the software, and
(2) offer you this license which gives you legal permission to copy,
distribute and/or modify the software.
Also, for each author's protection and ours, we want to make certain
that everyone understands that there is no warranty for this free
software. If the software is modified by someone else and passed on, we
want its recipients to know that what they have is not the original, so
that any problems introduced by others will not reflect on the original
authors' reputations.
Finally, any free program is threatened constantly by software
patents. We wish to avoid the danger that redistributors of a free
program will individually obtain patent licenses, in effect making the
program proprietary. To prevent this, we have made it clear that any
patent must be licensed for everyone's free use or not licensed at all.
The precise terms and conditions for copying, distribution and
modification follow.
GNU GENERAL PUBLIC LICENSE
TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION
0. This License applies to any program or other work which contains
a notice placed by the copyright holder saying it may be distributed
under the terms of this General Public License. The "Program", below,
refers to any such program or work, and a "work based on the Program"
means either the Program or any derivative work under copyright law:
that is to say, a work containing the Program or a portion of it,
either verbatim or with modifications and/or translated into another
language. (Hereinafter, translation is included without limitation in
the term "modification".) Each licensee is addressed as "you".
Activities other than copying, distribution and modification are not
covered by this License; they are outside its scope. The act of
running the Program is not restricted, and the output from the Program
is covered only if its contents constitute a work based on the
Program (independent of having been made by running the Program).
Whether that is true depends on what the Program does.
1. You may copy and distribute verbatim copies of the Program's
source code as you receive it, in any medium, provided that you
conspicuously and appropriately publish on each copy an appropriate
copyright notice and disclaimer of warranty; keep intact all the
notices that refer to this License and to the absence of any warranty;
and give any other recipients of the Program a copy of this License
along with the Program.
You may charge a fee for the physical act of transferring a copy, and
you may at your option offer warranty protection in exchange for a fee.
2. You may modify your copy or copies of the Program or any portion
of it, thus forming a work based on the Program, and copy and
distribute such modifications or work under the terms of Section 1
above, provided that you also meet all of these conditions:
a) You must cause the modified files to carry prominent notices
stating that you changed the files and the date of any change.
b) You must cause any work that you distribute or publish, that in
whole or in part contains or is derived from the Program or any
part thereof, to be licensed as a whole at no charge to all third
parties under the terms of this License.
c) If the modified program normally reads commands interactively
when run, you must cause it, when started running for such
interactive use in the most ordinary way, to print or display an
announcement including an appropriate copyright notice and a
notice that there is no warranty (or else, saying that you provide
a warranty) and that users may redistribute the program under
these conditions, and telling the user how to view a copy of this
License. (Exception: if the Program itself is interactive but
does not normally print such an announcement, your work based on
the Program is not required to print an announcement.)
These requirements apply to the modified work as a whole. If
identifiable sections of that work are not derived from the Program,
and can be reasonably considered independent and separate works in
themselves, then this License, and its terms, do not apply to those
sections when you distribute them as separate works. But when you
distribute the same sections as part of a whole which is a work based
on the Program, the distribution of the whole must be on the terms of
this License, whose permissions for other licensees extend to the
entire whole, and thus to each and every part regardless of who wrote it.
Thus, it is not the intent of this section to claim rights or contest
your rights to work written entirely by you; rather, the intent is to
exercise the right to control the distribution of derivative or
collective works based on the Program.
In addition, mere aggregation of another work not based on the Program
with the Program (or with a work based on the Program) on a volume of
a storage or distribution medium does not bring the other work under
the scope of this License.
3. You may copy and distribute the Program (or a work based on it,
under Section 2) in object code or executable form under the terms of
Sections 1 and 2 above provided that you also do one of the following:
a) Accompany it with the complete corresponding machine-readable
source code, which must be distributed under the terms of Sections
1 and 2 above on a medium customarily used for software interchange; or,
b) Accompany it with a written offer, valid for at least three
years, to give any third party, for a charge no more than your
cost of physically performing source distribution, a complete
machine-readable copy of the corresponding source code, to be
distributed under the terms of Sections 1 and 2 above on a medium
customarily used for software interchange; or,
c) Accompany it with the information you received as to the offer
to distribute corresponding source code. (This alternative is
allowed only for noncommercial distribution and only if you
received the program in object code or executable form with such
an offer, in accord with Subsection b above.)
The source code for a work means the preferred form of the work for
making modifications to it. For an executable work, complete source
code means all the source code for all modules it contains, plus any
associated interface definition files, plus the scripts used to
control compilation and installation of the executable. However, as a
special exception, the source code distributed need not include
anything that is normally distributed (in either source or binary
form) with the major components (compiler, kernel, and so on) of the
operating system on which the executable runs, unless that component
itself accompanies the executable.
If distribution of executable or object code is made by offering
access to copy from a designated place, then offering equivalent
access to copy the source code from the same place counts as
distribution of the source code, even though third parties are not
compelled to copy the source along with the object code.
4. You may not copy, modify, sublicense, or distribute the Program
except as expressly provided under this License. Any attempt
otherwise to copy, modify, sublicense or distribute the Program is
void, and will automatically terminate your rights under this License.
However, parties who have received copies, or rights, from you under
this License will not have their licenses terminated so long as such
parties remain in full compliance.
5. You are not required to accept this License, since you have not
signed it. However, nothing else grants you permission to modify or
distribute the Program or its derivative works. These actions are
prohibited by law if you do not accept this License. Therefore, by
modifying or distributing the Program (or any work based on the
Program), you indicate your acceptance of this License to do so, and
all its terms and conditions for copying, distributing or modifying
the Program or works based on it.
6. Each time you redistribute the Program (or any work based on the
Program), the recipient automatically receives a license from the
original licensor to copy, distribute or modify the Program subject to
these terms and conditions. You may not impose any further
restrictions on the recipients' exercise of the rights granted herein.
You are not responsible for enforcing compliance by third parties to
this License.
7. If, as a consequence of a court judgment or allegation of patent
infringement or for any other reason (not limited to patent issues),
conditions are imposed on you (whether by court order, agreement or
otherwise) that contradict the conditions of this License, they do not
excuse you from the conditions of this License. If you cannot
distribute so as to satisfy simultaneously your obligations under this
License and any other pertinent obligations, then as a consequence you
may not distribute the Program at all. For example, if a patent
license would not permit royalty-free redistribution of the Program by
all those who receive copies directly or indirectly through you, then
the only way you could satisfy both it and this License would be to
refrain entirely from distribution of the Program.
If any portion of this section is held invalid or unenforceable under
any particular circumstance, the balance of the section is intended to
apply and the section as a whole is intended to apply in other
circumstances.
It is not the purpose of this section to induce you to infringe any
patents or other property right claims or to contest validity of any
such claims; this section has the sole purpose of protecting the
integrity of the free software distribution system, which is
implemented by public license practices. Many people have made
generous contributions to the wide range of software distributed
through that system in reliance on consistent application of that
system; it is up to the author/donor to decide if he or she is willing
to distribute software through any other system and a licensee cannot
impose that choice.
This section is intended to make thoroughly clear what is believed to
be a consequence of the rest of this License.
8. If the distribution and/or use of the Program is restricted in
certain countries either by patents or by copyrighted interfaces, the
original copyright holder who places the Program under this License
may add an explicit geographical distribution limitation excluding
those countries, so that distribution is permitted only in or among
countries not thus excluded. In such case, this License incorporates
the limitation as if written in the body of this License.
9. The Free Software Foundation may publish revised and/or new versions
of the General Public License from time to time. Such new versions will
be similar in spirit to the present version, but may differ in detail to
address new problems or concerns.
Each version is given a distinguishing version number. If the Program
specifies a version number of this License which applies to it and "any
later version", you have the option of following the terms and conditions
either of that version or of any later version published by the Free
Software Foundation. If the Program does not specify a version number of
this License, you may choose any version ever published by the Free Software
Foundation.
10. If you wish to incorporate parts of the Program into other free
programs whose distribution conditions are different, write to the author
to ask for permission. For software which is copyrighted by the Free
Software Foundation, write to the Free Software Foundation; we sometimes
make exceptions for this. Our decision will be guided by the two goals
of preserving the free status of all derivatives of our free software and
of promoting the sharing and reuse of software generally.
NO WARRANTY
11. BECAUSE THE PROGRAM IS LICENSED FREE OF CHARGE, THERE IS NO WARRANTY
FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN
OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES
PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED
OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS
TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU. SHOULD THE
PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING,
REPAIR OR CORRECTION.
12. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY AND/OR
REDISTRIBUTE THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES,
INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING
OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED
TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY
YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER
PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE
POSSIBILITY OF SUCH DAMAGES.
END OF TERMS AND CONDITIONS
How to Apply These Terms to Your New Programs
If you develop a new program, and you want it to be of the greatest
possible use to the public, the best way to achieve this is to make it
free software which everyone can redistribute and change under these terms.
To do so, attach the following notices to the program. It is safest
to attach them to the start of each source file to most effectively
convey the exclusion of warranty; and each file should have at least
the "copyright" line and a pointer to where the full notice is found.
<one line to give the program's name and a brief idea of what it does.>
Copyright (C) <year> <name of author>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License along
with this program; if not, write to the Free Software Foundation, Inc.,
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
Also add information on how to contact you by electronic and paper mail.
If the program is interactive, make it output a short notice like this
when it starts in an interactive mode:
Gnomovision version 69, Copyright (C) year name of author
Gnomovision comes with ABSOLUTELY NO WARRANTY; for details type `show w'.
This is free software, and you are welcome to redistribute it
under certain conditions; type `show c' for details.
The hypothetical commands `show w' and `show c' should show the appropriate
parts of the General Public License. Of course, the commands you use may
be called something other than `show w' and `show c'; they could even be
mouse-clicks or menu items--whatever suits your program.
You should also get your employer (if you work as a programmer) or your
school, if any, to sign a "copyright disclaimer" for the program, if
necessary. Here is a sample; alter the names:
Yoyodyne, Inc., hereby disclaims all copyright interest in the program
`Gnomovision' (which makes passes at compilers) written by James Hacker.
<signature of Ty Coon>, 1 April 1989
Ty Coon, President of Vice
This General Public License does not permit incorporating your program into
proprietary programs. If your program is a subroutine library, you may
consider it more useful to permit linking proprietary applications with the
library. If this is what you want to do, use the GNU Lesser General
Public License instead of this License.

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,262 @@
d8062fa8ac8fda00de2d2d83a3a822919f50eec96fa384748029c26cdde5629a032da0dbfe2f6769563c9a1be58ab9bfef605c0051aac80a5823cd471037db16 bnxt.c
153c37da2563e1c2247dfdb1abfe2489a40e600714c1b8172582971c663c87b0b92123061af4e667e8abfb3875921c049acd2ccd9d6be309cfd9415f6c86dd3d bnxt_auxbus_compat.c
f28d2ad47cd8de7c4a7ad426cc0ec3e37dd5e3df3285d39876bf8784ee573bee58cf97d87e3ca611abe222fdf7524f12833d83cba8dd348ed0f470ad2775ee48 bnxt_auxbus_compat.h
5d5a81221983ea0c0563ee0989afc835d4a48dbccbadbe56df3e7ea8d6142886436c3006031b152df6f4c6a09829632542afd27d2ceb818fad2a7d49747d8bfe bnxt_compat.h
23741c4e97e4c0affda78daa1e57f6f83cb79667ba2974646f428889c9126771b52e8c9c7788999dda4149cf808e1042a35e9ed23f33803c2010875594f21121 bnxt_compat_link_modes.c
774008f5a76b5c3c689733742223c4490ceb33c82e56d29affeefa7ac78cde27627d881047776b3cee4bced34529788ece1ea32b8420ec259238dc120e0eba7b bnxt_compat_link_modes.h
a57a5a9b7a53ad1a673b91efc7d7407d39a1c16d63b2768a4293e1464b723cc750a981906c63c940fd69934bd2f321941f9f4622b4c081894ab3df7b94baa2a0 bnxt_coredump.c
11f7a08bf85c1cb7cc33f95afba9e12a452f480a7a9219bc99a22f29372c8e017eb87ee30fffa4d0c4789156f04dfe9920d374c603889afe17cc9617bfae3144 bnxt_coredump.h
4c9ab1cef6af1e6c339d9989365ecaa5e3333d6e1a81fdaa43cb8723f3971e1f6c9cfd11d7cd65a917c7533b282ba0a6f3f35291224f6149efb1e6f9c60b47ca bnxt_dbr.h
5711c989b1b271f75396a12254572031a49e9c7488dd38328b88de5828fd24e77484161ac5cb576d62e6a9f7f4ade49b1555e5453d5efa83fc8f45b5a19d1a82 bnxt_dcb.c
9cc26634f820e64f5f39ad169e671b9bb6c0d8f7216e5658b96e52dfa9940342abda4949af92932ac9c9943b20145fff08a3bd0890aeb341cc8dbf57d56fe585 bnxt_dcb.h
865a878d10ef7738d664bdb5dcba9cdbe8f529ffc4619a84278d5198f1f0485eddd7736ba7866d4db3ecd1d590e24744578e8b9b64afaa2b8099ccbd7b03de16 bnxt_debugfs.c
39da82080f3a2580f54da7c574cc802cc3457c68dbc445f8364771e5694e6023cc4a617649d36c7a5b719f237ec05dafff61440326eda6e549a2b32e2ee33706 bnxt_debugfs_cpt.c
1ee1e56cdf043482839d6017769fb13df968a62e988861ea26590b0753b30720377984b80a0280fe4cfcb446ca9290e2e164554d7c12abc78d05044a7b50ca07 bnxt_debugfs.h
7718e925e8e66ff3c56fe8866746c4922d87cbce78fdb043122d642b2838e45363dda125b743a41948103b029b4e2f8a784d9c6b59cdde83b45ec218f83b024d bnxt_devlink.c
977a1016a5d95ad5b8d7da4384c13777b655c386067de7eca76a63cc16eea491fff70f0b5a3339ad32338f884f92a156b870f22a32cfa719aa5f370171f0efc9 bnxt_devlink.h
2e382a8511e9cd914ab05e38dd1e6cf4b6124749acc3c02d8a4d8b290b836fc35a6092e6294247804138900d27e9c052e6465cc658bd93bf9cccdabe7f14a5ea bnxt_dim.c
e0a94914c20021944f64f24fb4e46732a84e90b4923e588e412a8685075ace14745c7dba710432a334d693bac01fa36822c9debb11858dfc80464fcc29f722d7 bnxt_dim.h
2f661ffcae1ebc74a008ee21718d935ce3d35e8f47ea0f7d59d6a03b2d492b2285501f3228c9b80212c4d149e940954bd0b3b11f9720ed9872de61fbaff28b75 bnxt_ethtool.c
47465e9bdf574083906e993a0a91b9a12d689be58f75b2c7867aa3012234ec1cc4926098a6379fc9a76d90e64ed2c4cfe6e0e0feb1b3253dc34748efe9119527 bnxt_ethtool.h
4d2831d6b347c8317b3bf9fd28142c4abac44d18d666ae35518ddb7f9829f4c0b46d5be67b34800ec3be6a59456b562f76353402bb62e9b2e34b21567437785b bnxt_ethtool_compat.c
6da387c84a806102100ba0febe41f760a55874cd2055c936355775473c22791bc3588de1b632e8d92d819c1b0676690665f1f0678096b8b3e00617110d08eb33 bnxt_extra_ver.h
66cd6f263fdf9e54673b6b75ae2417538631e17e9e65c463b52bfb45ba57b906940ddece0b24fd0eb659c00208fd5fe6c777db430e167168abc700a2cf3786e9 bnxt_fw_hdr.h
b389f7c28cd5383caed0c266edfaeb732b740c3dcdc85b2862886e62b96e978ef6e5f950e43dc6fb926f534b31120d4cc9a00559911dc61d8fded4b3d59e5108 bnxt.h
54288743fcc81dc99667e62dfde8091017abe475fd36882edd4b45b37edc5d877f1b71dda3deb59e9195d2a438545ef4beedce0ad493e6fc778173740600d319 bnxt_hdbr.c
21fcddeb6fbccbaa8d2ef5e8ed967786d214b15e7d672d33e94c077c1dcb4c0bb7bd4f393edcf0305abc55767dea343b76cd806afb780cdecbb8d27e9d4ea8ac bnxt_hdbr.h
4e3c8849724cf3992299bd11e20c1ccadbc875571186686588625ecd5860547ae5ef4f2bbda7792168ea957e8ad93477fa432335f0bb633ab77ecbfcd7409f18 bnxt_hsi.h
673752951d2faa0a99b6c159e46b7cf97af4340af679a1f4b2b1f727947d028ef18e3ab812f3a61aa932c55bb4fb609f0bb8cd72ee72fd5cb61a7b73e6802101 bnxt_hwmon.c
1d1dbfd77dd882dd1634abf9e3bacec39dc72563346c3eef4f51eab3c46e7d11e1b156ef4ef207b116b50a7290cdc2e98a671dcaa29ba7b1abbdf477864f3fcd bnxt_hwmon.h
a03972f83fdc74dea86d7152bfe1ea6480005de19964336e4de385f8d453d38e4aaff9c4472c38644e976520be6e216e44ff7dddc773346929d98a7e332b40f5 bnxt_hwrm.c
69987119457af51b1a1203568579591e86be7aabd43832fff5a4b07fd335f2d56175436ce5c25843f61b0157b08f42c1cf3fb35f73113d5aa922d6ea177e9999 bnxt_hwrm.h
758af108fa5977fc6214287a4940ce98e906f27dc1b7273e5a806a3004bc0d5d177c9983c354aeefd3b091ad6286e9a32cc9a27a55d9aff6170ad13a3f47e418 bnxt_lfc.c
1f61dfbe6066e6dd03ac33116486c737994011e57865bb54be854784fd285ee118f7425c4526f5db0161b30f2cc6b1a718cb843d6f066cc9f2ec22077f557677 bnxt_lfc.h
23e4e252721b87518afc355a506959b9584b97d4828609eb6cd758b863e931a4db7a6895958a09f923fe13c4b5f56bcbc1b1c6c782caef6d80f96ada18610a59 bnxt_lfc_ioctl.h
b818313b24a48335a729d27cc48954ebc3a638918a82490ca54d9c93b6664aaa075ffed431b9b7bb1e7572b29dbaa3ca6bccbd7b2efe853ae9128151fc61fe1e bnxt_log.c
db7118ce77a5cced614cef364c60b7be03eea34cfa5a197f72d3deb00eb31e4658712d78e714fd2ae57f8183afbf04e18cd18d404be0e96a3af59015515e4095 bnxt_log.h
a59548bab06706c1593081c1daf2c335135f0fbe384a65da66357504cd2e9ac3b57a3da6a10479bd7646b3f7f3ce4dc8c7588f9c3c482f62c39de50531069088 bnxt_log_data.c
35348f7ea785a47175d05316d62afd99675bfbcc280ecd45b5fe9163959d0a1eccf5856e8ac23f6e4de549bd83a377b8e83efd1892ffce62c55b8ce2ba795432 bnxt_log_data.h
88064db8a603665eec9b6ebac9a29849a3b6fa4be3ef17dc8d47aedc66294f1a4d18ffad1ac6868e613c334d8755ddfbd5cba4a3dc8053c1a1f4f4ecb881064b bnxt_mpc.c
b131c6759654b98d3ef4e3afed9720954187dcaa01d1bfceb62b7c300b6b0637c98f7439d05ea83bdc8b2974f24a771177b8f8d784da2eabedb469ed8d489a16 bnxt_mpc.h
90a8f6020843fbebabb5b6b89d94818188b9072f9e0d3316864efaca9a21f0a59f4e75f27db8869efdc5d959241ab6d6d815029e325443fea46c203987ae3035 bnxt_tfc.c
3cd3f83d5b32bfd3d845399f8403b1e72ba63c4dc36b3a482ecf344f7a9ab55588067478da720c464603c26fe3d8587bc913bf45252abe628d2e0f4ae1ceb8da bnxt_tfc.h
0672b9d4224ab7e0ecd9b05c1bf4515b4ac58713b819a36650292968d4254957f3ba9f475dc195f9a9fe7b272fbf6c732d59ff37aacadac3f15033031e053eb4 bnxt_ktls.c
68bebdc9b4b5552d468d01546cac163ab765e71535499c69c8b0891d1f4b36d6040dcd05fc7bcf9bf9b949a4ea1d493907790cb73c781b0c9ac6003753f792fc bnxt_ktls.h
2e1f15e3444b3856f4d6c1c45e671a21b0f2deee5e4ece48d1c9a87955bd0d8b9bc0ce9cd5a04dd9d52967e2d1f060e222ac3eab13a15b78345273afb1d6a0e1 bnxt_nic_flow.c
cb761b989dd49a79345056dc9d17a17bb18faa70f073197e7399ae7c2c929b81ef745467c9962d988b8329f4b733b4c4c3d5e98406676d692e1cd7fdfbb7b12a bnxt_nic_flow.h
59043b84f804c967d4836d0f1d3ba2a1256597fecf79213a02e48d6c686b7310b5aa1c25c98aff3a78b402a7bf1c1ff0e1cc80c46206c368dd0524ede28ebf6b bnxt_nvm_defs.h
17f9eac2aee974b70e9b70b7eaac5bac9dd12c80247472148112d96984b9a952b1374764ba7cce715e90ad7018d878d9188732c9671bad0732aee420638282e5 bnxt_ptp.c
e06fd97184d4830516c2cb6342a46a1ee11d31299db6ee20151a357d1d83bd7faa94333efa35bb27cbd7bb04978ae7e9cc3615fb8d776d9ab9b428e42c9673a3 bnxt_ptp.h
0c7abe7d3c9879e5937e28f76e1f7d1aee95fd3334bb917a9263047925e33db0e16e17484c2a5e5d8c08d5c4652ee9ac3dd7d18ff54aad20960d71b3b9b39924 bnxt_sriov.c
097d189f8242517a0d78bde0f65d8e19cf0d80d84c74f12af4fb48ad894665e5a6afd6acd5f4ba4fb21637b5ec3e1b9b3b42817dd8a55cce6a7d9f0b9c35133d bnxt_sriov.h
e5060729dae5bc3fb795bb132828fbcc9012b4cdde93c37f1a9887e017380b34fab01a8b3efc826dabe4a7d8321e42cb3404a61d642e9c974702a206838b80a2 bnxt_tc.c
1af9e9768c3543bf23b79e75d7fe41340123fde4d01b74c58f826d8b61680514c558897af4904a6a50b9926d4b893d93c05955161cc769c3dd433174535aa738 bnxt_tc_compat.h
b20290dd8b4ad48229fa3d34c8b4adae98885403f49c77743e9eac0e97d0b9f1d514be9a243b50893238827b64308d0ed78f4aa32b87400b5f5ad2e0bc206bc2 bnxt_tc.h
90a8f6020843fbebabb5b6b89d94818188b9072f9e0d3316864efaca9a21f0a59f4e75f27db8869efdc5d959241ab6d6d815029e325443fea46c203987ae3035 bnxt_tfc.c
3cd3f83d5b32bfd3d845399f8403b1e72ba63c4dc36b3a482ecf344f7a9ab55588067478da720c464603c26fe3d8587bc913bf45252abe628d2e0f4ae1ceb8da bnxt_tfc.h
db0ee5c9700679a4f9e05f08d3acbbb94d62a68c25406bcbde859a1645f3f3228445d26f1872d96682d0d032a90fcbfbbbaa6997bdbdfe9d8b6b6b63adbaba9a bnxt_ulp.c
eaf72af9c36e406dea2ae1ecdfc123b04e5def8503d71d9886c5ed2a019a3014e94b0fd0537b2ae309fe3493c030ab4551f0cfcf119d524d4d61d7a09f0fc346 bnxt_ulp.h
2c7f7ef6da98627b4f46619b0a0b9f26c6a9fca5aef4b9b095fea213966b97e9696933846bb6c192a0960dd60887826e2eb25d9835b22ab7a86ae4ab09ec45a5 bnxt_vfr.c
3568df756f0a67ce434fa6fa2cc6065829b237619a6109da1e6052ad0530847d2e772e3c1f9926e794c8e2022b76edd2b10d53f9a27a779234a36b942733e792 bnxt_vfr.h
15cad5c54432609edc57e4776107c40f9d1e8a5e34182d1fb9e04ff41875aca295ef3deea1c5653ab2b52add8e9b45aacf88ebf01d61e89756cd494887141a0b bnxt_xdp.c
c9c8db6b7fc6b1c4c8f7159ee48b16fe96c4eabcd724318aab84e05e603a92947911d493d8d33c3a1d2645fd5586b0fe6b8d19343dbf8242d82d4eff1bdd2824 bnxt_xdp.h
7203f8e3698c9db9a5360d728049f088e3edd1ba01b410c374f72a9523f83522e64f5cc1a120be1877caf0bab67ebeb25291e363328c12afc5f09eb39047ed27 bnxt_netmap_linux.h
c9f902e02acf77aa81d83a257257c8b3be1169de94b4fa6510d6cfc115d885c0832708c8282132de43ca0bb771265cc0c14b7fbfbddd35dc33c383fd7fa07ca0 bnxt_sriov_sysfs.h
25b1e386fe4c6a5386e49cfcfce9ad8774dc043a93a9ebf70becedf222552459b52d74177fca25441649a63e295b6e8f0d63ae182a8666c7113f58ffcb689cc4 bnxt_sriov_sysfs.c
b31de9889490375c90a237312e7d4345d08390d6040d58f97227904878045c730c34087ffab936c208fd8b5458cd8954a8a526e671212e477be55a8526c8d67b bnxt_udcc.h
71eaee79072dbcdc2065c324e8b0acf14155477c59fa9be106be9697953e1b2ae86efc32c02fec66547a3807ec584d48f50b1d39f3f4128437e5fefa163fca31 bnxt_udcc.c
a9e0938ad553397ba1f10567cce48699ec538d39e9d8207c655b757f88d31c71b9fb4e384a07a3e3d154f582efbb3a4c97e2d5f6454ad63b7e176a7d0e0211ae bnxt_xsk.h
31fb9e824f7a4276e90ec06d5c73e5e6c8aef143e32228891e6c06ba83ecb345cbe5a4c2d40ba417998f5c1fc3368f0591f2ddf3f52a91d6e0928611db80dc56 bnxt_xsk.c
48adf36c3749afa04e7798eb8f4b882b52b44adb242e620afbf0da542ad68b85af10c233bbf6c556147ae4d7dad0867fc5cc954889bf682672307eedcd1351ae bnxt_devlink_compat.h
c5b96a4c6fea042245bbb644b944cd9dfc0d07c62ffc791589271c79216da8a659f6113398252f57ff7640078174bb7f0c71ab305b06d31fe620c01ce2c53c10 tf_core/tf_msg.c
52125a28596a6df30516f441996ee09d941f4510439b2c1c363f0527393d9875f36266cc7a097f24035fc0b19d71b2c37804ef1c15f3fe186f8f833be7745ce6 tf_core/tf_util.c
ca4849bfd14eb0f1033987b58aae9eaeae7f9438b7c07822ef2edce83ff25f7393aca7d531832bbd6ad2e6c03001242f7bb2e08feaa8a406fc7c2482f78bfacd tf_core/tf_util.h
69630d7ba7c1dd30d2c90403063b488b5b9e2cc29dbc5981805b1db2b6d90dcae4de7a1aeb1994f4a48674f0e7748f9c34e72862c233e5f927df45d7be4b352f tf_core/tf_msg.h
f3105ca4ca4e7a459a68b2e026cddb77edc3c59d0d6c0838b29d73fc9f1a5dad0c4be2d06d6f3ed26316982c5a0aba4ad5f61a0f181534d3f7d14c51d885a504 tf_core/tf_core.h
96e738e2ec12f7f0c1bffa2781a9af4189ce910a11c4f859496d003920223674d8af19e13a313ad9780a6a281c6a7a891cf669fa03f57ac51791077e791bfb1e tf_core/tf_device.h
068a124a6bc9c04127965d5b00d076c678fab07a97524817554f2d9b6d15f39772d051f7eed08b53d53d6c15136e4fa7b0344bbe333e72c8d2dfda21f18e9704 tf_core/tf_rm.h
8508a6ad224c731ec9795271c8499f795ff4b3294a84d9304fcafd4a0f5383c062f4035cd47797e296f788f4113c782532aa63491da50476b9966f2a3ca40373 tf_core/tf_session.h
0d84efea119895b29c547a74908686da58a274229c6fd845d972def34cc81500e5bc1cf75a1cb0025088b2866d4cf9daa128a08bd0ebb81fe26a932df3d9d970 tf_core/cfa_resource_types.h
f6bace710a276d4dff096b9e4c366a8f7b1aa1de2d61bfe063d6ed1b53fab4ef65c6020fa6cb2eafec95a5de5737d49aabf9ca5d5c3c7f097f072c09a78db161 tf_core/tf_tcam.h
2ded7987aa69b482118f7b0b8aef7b95a91d682954066506bfd588aa841a766581de999a5498f320f570fecfea867fc185b97b412c77978b3a22ea3a6f3cb86e tf_core/tf_em.h
22885b749e181af379a56db0518794208dace6f76a47b36efa96a152ba7a40087a09840d4c9fba1780d56a0a359d36d06c2ba64a16cd4fb0807351373e79b594 tf_core/tf_tbl.h
16d70d357415cf924454588b3a9cc4ba0acaaef1f02e92434031cb6561935844ab99966e15cc856e2feb935d611051d0d754ac33aa524e6b17953c9427aede39 tf_core/tf_if_tbl.h
21a1f3246b6df62f73fb637f8649ce83df741bce44b1b7bc6a98f5138fbac97d220058dd1bfa821f251139f4e4e98302f9da74c7ed99fc115c68e2ff6a06e34b tf_core/tf_identifier.h
69b3e3be441e90f593ab05919c0df7bc59db74cf8371697cd4117ab772a3613db5e2cd09cc156dd96fdc66fdd545fadf750e360d7355d99a2a293b78fa44ada5 tf_core/tf_global_cfg.h
66904e6c7ac5b371fe7a19fd64491de7a471031f2144dcf1dcb357a89186e61008447e43997495e7c3f5e79862bdeddcd0f3bc0ee20d97abf3c379d5b3bbee04 hcapi/bitalloc.c
98f2af824ac6b7b85584967cd3a8dd0afa18ee2371e97fd6d62f816188999830ef27dbf2771e145e29e308fc702aace0beedf7261b5f2a99b79955745f568101 hcapi/bitalloc.h
078e4339672968bc754eea9777b4f8955ca20ab10c9e0e2ded1aca622c49c64320cb9cc3213a14a5b12428de33c022e7578c1b5ad25a98bddf89ee639920d468 hcapi/cfa/hcapi_cfa_defs.h
1a97c9b741d04a9a7c75fb325c3345e6c4d24ae591d7af73a6d685b92d3eda24612b6fa4cee595d4994c7f4888105d7ccf4094c5adc1fbd92ebc52ad34534882 hcapi/cfa/cfa_p40_hw.h
a587a1a06fc2d8adf243b3de43e8c29a3e823c8af27c5826313c6038584b7145a9e217cfef2224b9d938d6b7b0f9e34eb5ed1b29c64b0d1edad199ca5b8912fe hcapi/cfa/cfa_p58_hw.h
565b94f4a32634e96f4a008ad1fc8f0f81d28a0bb59d507ba625fa070b31e71ecf5e22c681cb70f85acbd005ab72b77ee30735e690cb542e2b2b31bc92295bf6 hcapi/cfa/hcapi_cfa_p4.h
87b5af8e3c75f0edc2b0391764b2535d7ecc38639c6a592c5734113ac05dec0fb6ef6a21c7dabc5100ee0dd4bc29a102c1c56abe0635dff7fc2ab777b2d450ce hcapi/cfa/hcapi_cfa_p58.h
af136ed09afd922d8f264884d3547d272dc85b84b2ec10190f4d0f9f1cffabea80f17781aebdaf8dbb8eecaced4fb8fd68fdb504ebb2cd62152d1f48640a55bb tf_core/tf_session.c
21cffb2ff1bcf4f2ccbaf8a57e15d43975afb20044123be804e275128f6cd6f1c990b7f0574a9efb8a1c421a141f68efe4a8006ed69d8c57a26091d4ebd5ff52 tf_core/tf_rm.c
6020f4a7a183bbe3ea7d8625634353501759d036e1417968da8ffcc1b235d451fe58e5caf8dce50ffb428323dd82f484f66abe28f5b801e99ce3a64a548358a0 tf_core/tf_tcam.c
6b9197087ff8c474a215f8b809054e50be3afc887041e2716957c73d2d0fb734b63d353095522d74cf5d18c64ffb61245f85915b19c02f6a0b2823bdfdf0b2c8 tf_core/tf_tbl.c
7521c9d90cc3c0a50dd2fbd919a281eeaab98126f1638c6878b0d28d1dd3174bb16c563b07b68a53de77719c2c394c4ca82cd036de3c909cc76c15b343492580 tf_core/tf_identifier.c
365112277ca9fb6f81125b8efb2eeba2898f60f64e11df2ec6c1e0b24a9647555dece34dd90081d99939ef34021cd549eed0fa686f337067d0218c99aa28f1f8 tf_core/dpool.c
82109fdb227726eb4dd33b44e41cd66ddf17fddc338c186d29a53a3af4852ec84bc7ecc9f63f03878a81fca4b75528d657bb0f4cb49c5455b2780f3f30010dad tf_core/dpool.h
6d85c1e04bf91809ad60c174a2a46de5efa9a6bc94999ab861c190cf4495ed3dde9bef8b956b2d2ac0e3d2435964a1973fcee23689f93a2aa102e9b721224d33 tf_core/tf_em_hash_internal.c
20816255e84fda278486ee9a760fd511c4d9f4a303a1e7837cd51175e21cc702e2587aa98dae95c7345b5d6e41a0aa386871f9792481c91cfb0642e8d633cef8 tf_core/tf_em_internal.c
e66504b580ad1f3fd02edb31f50ff8a537a3edaafad3b67b344000da683613a0f6254fdf2f4fd1a624d7b41a1436f47ec2cf3a7e653d8c59aa1db2bdafcbe6a2 tf_core/tf_ext_flow_handle.h
129eb54701cfe36e6337c191082ef4b3413727501021fdbc7498e32faada2f60f231f819be8b5c6f17ee223f56cf7cf4bdf9f02c6b94b37080c5f650cddbdf2d tf_core/tf_global_cfg.c
7fdcd884f70bc3aa0d51c711d69426685e9cb0100eb9a45c97dfddc5d3df3d38dd649e2abeec3bdfcb2b9760e0737a0dc074ab2ed64be7496b42e96eb2aa1517 tf_core/tf_if_tbl.c
43b4bf89ff554173348e4af3a18f8542a3fc34e0d83b3ba47bcbbaced9189eebef9decc10751fbe3b3bef96837daee37b3a67b99a4e64fce8980c1f2accd880a tf_core/tf_sram_mgr.c
5f61c15c35d814341a9cf3dffbf7b8d2c5d5b8db0d4821310535bef987e1f06c6b80112b71e248f61d6c73db956675ec2dc7e2c4e724df8692634de71bf26734 tf_core/tf_sram_mgr.h
1ff971ce954f8d4b235eede9c15ab82f46bbfa16d37c035848b77c17c6135e47c5e60edc6e1b5ae9a10a56a4b08d6e240d7958a855f0c1558e8903c183605998 tf_core/tf_tbl_sram.c
f0767b92cdc2be23299805b7ad4dbf842863d4db55c4f4f0d7a48bec314213d4dd74f4cbbbeab87c0f50cb385423be4a1abe44faf79259a69bb5123030545966 tf_core/tf_tbl_sram.h
8a9bcfe6c681f99fd1e7c017826455c9a56357a8f6374e83a152562a125b97382f834ce5e34d9cd30acc022df6818890cb82549543a12cd1eb4c8b689f5b7b52 tf_core/rand.c
7021c8628b208a148251da6753987793ed9a2da0109709a677f9c33eab02264e67dcfb80e0b6bded6cfa2b7236a12e3d7f8449244c2dbfca171be68003b1d8c4 tf_core/rand.h
634bec7939abcf30b301aa0030deb6ab64c903f4ed2b8646c2ef40dc5e89dcc3bed5e52a98c50b769f4862b7a9e6a36a17cecc1c245acea98b5fb17bfb519fab tf_core/tf_device.c
b5189d7e29ee7224838e05fbbf106b5ef25c20381fb29f6dcd901636caf8f45a43b6796e5424c1c5940b5c440dbf32466b8cab74b56d072a158aeaa78ebb25ce tf_core/tf_device_p4.c
54ba80b3ac90d161a2dcbf1408ff9729b2d9616fba4ca5c9404a5324edeb4ebe0a76dfd449c9047a8a5e5d1cdba7d0258c0c1b20cd2ac13d264e7eaab28a96f3 tf_core/tf_device_p4.h
d1c61b4f2bb826ef4e032ca40a1d6341ff3106a370a6ddee83e0007d5f48bc592c4e3e67e4b718bd9d33ea13b1bb1b36d0162d68b69dde2b2397398220712c5b tf_core/tf_device_p58.c
8bad78052fe1749ffe3c6c0ebae89eabd57f9b2f04d41752a5c2a628da474a0544201f889fab20e47d6478abdf1bbff358efabe1332e50fb06d8fc5787f7725b tf_core/tf_device_p58.h
0861e2323d816dafe4b5427e08b23a2307f86a23f485cf8e844c393bd55e39cb85b03a98a04d7cedbcd2442aeb143822faaafe276337a418aceb16862e28a1e9 tf_core/tf_core.c
792b7c608176d4c86957650dea07f2adee05cd6f5868e28587749fa2eec53ce6b91e326d8e6ea326fe83dd1a4202b86376a3e1e52628c38ff01d7623b7f6bed7 tf_core/cfa_tcam_mgr.h
0a502f0ff27fd9069c686dbc1347c633b1e61bbb9d180af469bfa85c5b1861db3781c7df52518441fa43eefff44c5be00fda1b09fc11f8e2bade6549af6ea6f7 tf_core/cfa_tcam_mgr_device.h
01b6d40f3bd0d83347a95cfa5eeb61d0fe1f43c54168383eea7d81a3183c1b0342e75e25355c01bf6046729fd9bc8cecf016e3e1070e8c8351f1a409f30bad0a tf_core/cfa_tcam_mgr_hwop_msg.h
6adcce5d46e76707c732d8c265a5bc801923550deb04249339a805a9c6853c5870ae0d805dd92d0c57b972596d771dce341fc89dbf3414bf440e8879c3c41bf4 tf_core/cfa_tcam_mgr_p4.h
f15ef0cdcc0a37f45b06e8703e009315a903be01bfe538a099da9e4544721641a408833ba8051ab9c486ac3c1de457cafafc3387d14cd7ecaf4491329e663459 tf_core/cfa_tcam_mgr_p58.h
1282c362bca951a12fc303a8f35952803d5b6bc5cf52919f4a28f0eb9264ca3c4daff0c5d0fa622f69dd8d5475f78290b869f78bc01b80c39b92b3908275ed92 tf_core/tf_tcam_mgr_msg.h
65df8882c383d6b5e96b22b18eab774e6eec2cf98087e70aac183edda02c3ef3acb0fdcb6b817ba0c2db8daafd5cf4b99a720c9ce3b1a19eafed3f6f2b6e375a tf_core/cfa_tcam_mgr_hwop_msg.c
660d98c8964d53600d542da8e1e46478636e145c5a00f25fb598fd45a0006a188e2fd123f0a3cb124a40652d7bdc1af256a6e5d9af90247dda7be6a974335b7f tf_core/tf_tcam_mgr_msg.c
39fc8d9a87ab64b7a0db753dbff3190e1ba400bbf30fe1e7dfa61f433168926e1b1810d8e4ddc0972685e71f022bd4be512ddc8de5ec06337b327e704a491291 tf_core/cfa_tcam_mgr.c
00aba4bdf1c0691b54cd78f2bd3252a7e36fc88a3c4376ae878ab14a90a909a8b1afa42845d25cb1aec466073e5bd2c51ca0998fdc375ad9b990027aabd66508 tf_core/cfa_tcam_mgr_p4.c
6dec04c56ef053626ac7fffda3a9a3a2a7fd494feb3a4a6354976d7284feb5bc8a2643accdaa577853c4ff9b7b85916a99c344f4f6d387fa7e79355ca2e0fc85 tf_core/cfa_tcam_mgr_p58.c
a11af38789ae54514cdef2b7dc183836edfc334f3d1b26f28f9dfb4a96330cddb3c70142dba710e31e9efd4f489c5354bc1f59b19762e433e04695e8a046e11d hcapi/cfa_v3/mpc/cfa_bld_mpc.c
65cfdcc5771971780015209b775992989ef1cb4eded3c59dd4ac622f03fda0f9103348ab05f6b2db9f86050233d31eecdc24a7405527fab6268f9021a9402aca hcapi/cfa_v3/mpc/cfa_bld_p70_mpc.c
133eb0a1d7d6ea677f428fe0f9f798266edc7c4a5875063c14d89ee38aa01e7ecec21643e7404e74ee0b5b6fa9a45774a4f26817205f93c8fb85bba0c4cc3423 hcapi/cfa_v3/mpc/cfa_bld_p70_mpcops.c
d6c2bdc2ea06794920be21d7e97b53e3b216246f60f00072f5e90ad8f7bee649f095334e6ceab0b84937928b48ce9b8eafc1825c1301bfa08567455b602892dc hcapi/cfa_v3/mpc/cfa_bld_p70_host_mpc_wrapper.c
b553253a38e2df4d140d29a48d5fabbe5a57626f3729531fbedbbb9ad7cc8eb439a514478c18526f53beaba97b92c198f3e811c7cd167158101f1d01279d0dd9 hcapi/cfa_v3/mpc/include/cfa_bld_p70_mpc.h
2dca63304863f3822814e2e2530ae101a2bc147b01eaa0fa2351af0c5740c1d18559940e6c452cc292eda3e6456abaa969f20158902ac40e62bdcf25a6b4c70c hcapi/cfa_v3/mpc/include/cfa_bld_p70_mpc_defs.h
4ed65784f722e4df9a0a84f4036401662d51f7ca46fa14ab5b6fb47c49ed61d53fac20427e9112bad94a5b7c4339dd427dbcbb532ff295d112f186a8e73f873f hcapi/cfa_v3/mpc/include/cfa_bld_mpcops.h
e9fd0f76ea50f208b9d87368b996f2441f8c15632afeb5ca65fd3e22c5fedeb6d4d1eb5f6f263671c677f9effaea5215fedde09ffdb0cbca5e84254b9e257c7a hcapi/cfa_v3/mpc/include/cfa_p70_mpc_field_ids.h
f5690a98675623fab1513b13e1829c987c373c0ed3e4b3db950367c0e1edb8a49a876c74da8b32f9b4bdf776ce37708121e304b869c60a5938087b05284cb9d0 hcapi/cfa_v3/mpc/include/cfa_bld_p70_mpcops.h
900aa73d42dd87ee8c659973b283b15579971c3691436e1b48240cd5b49902a7196d38cb4ef0ad6db76d466d1167185123a1880d57c3f17846bbb355957fa8e6 hcapi/cfa_v3/mpc/include/cfa_bld_mpc_field_ids.h
bb95d88d69e1a737929cab65a98a8a151b414de066bcfdaa75e2edc4092d78dea6d6892414f53956f1a15142dd9a13a24bf009dd46651e3768c2bb83c1742605 hcapi/cfa_v3/mpc/include/cfa_p70_mpc_cmds.h
10c82813047d744d2d5d5e0d1c2bf9968d93df2a1e8cc48b90b58143bee909b6bf324add61fe9ad5536059035516802baa78215cd985a8d1dcc97667b646065e hcapi/cfa_v3/mpc/include/cfa_p70_mpc_cmpls.h
df2630ed7c4e0b5ada9faabf97b008471a556533e6f8215f28efad7224fb68364c9c7388b0af6791b9f8baf38a7a5dd8eb0040a7874f3e04c423f5618ef6f5f7 hcapi/cfa_v3/mpc/include/cfa_p70_mpc_common.h
27d7fa22d044736bac9fb07161d9be248e4e1c73cd1ea31e5ac45e61969609cc348a9fcba9788899961d80cababd894829b08fff53627ba2e6faca8bf06401f1 hcapi/cfa_v3/mpc/include/cfa_p70_mpc_field_mapping.h
eda10745a9e126f52e2d2e420b0244d227879b8cfd301a9d31188ff61ffbdb18b305192b92f366e76a4fd12d8a24daf633fa76925fa2421ba873061aaff86f37 hcapi/cfa_v3/mpc/include/cfa_bld_p70_host_mpc_wrapper.h
dc507f329427b8533842e2ab034d8a0e9832a37374d42395a72aa4d27bdff684ee5d9bae85a98d532d85f96a3acc5037f5560f80808aad32e0198d11c5f7aeaa hcapi/cfa_v3/mpc/include/cfa_bld_defs.h
e10bd36d106e8f2e5e26e48fef14e018ae49e0649d3662fceaabad0722d8eee6aa1f56ec842a57214dfd36aa2b68be25b95c3731802ad9d0a4076a946b544e57 hcapi/cfa_v3/include/sys_util.h
de4fee416c05c3e3925ff2ec2824a05e6ecd91d703abebc00cc2f4138f7987e28460961e55b951167a1244ccb91337b8dbe24c0361fb886d89e3c6ed921b5513 hcapi/cfa_v3/include/cfa_util.h
d5e0419cd3e5b7f15b3a42912a9f6fb823dae298971caae1f5763d57b3df1427af8c33415dbf3196cc3eb226cc880d605fb9ee922eaffa4edb530e41bd6857f3 hcapi/cfa_v3/include/cfa_types.h
0774a26e3cc3c599aed590ff542f5f9a84a8a8a507e3511e41bf9d817431a60548aead54baa682f04292f0846a7fb30f61302d8a3158351c6d40d68be83c6a1f hcapi/cfa_v3/include/cfa_resources.h
94843d16d767e987a448d3c396d3cfdfea8c798200a04f3ecba205c297a11df9d51ab6eb6959e36b605c9991a3c267fd27b541c86b7640aa0c8d8705f7b2e6cc hcapi/cfa_v3/mm/include/sys_util.h
c6f4c8bff661d4f6482a58887d8a5646005a2c0be733d0660c59dd9c745db7b88689a79dfc82149d543d1b8307a2f44849495ff6064970f523c0a37e7fcb5b3b hcapi/cfa_v3/mm/include/cfa_mm.h
c02e053a98352223b4867c6952aedd03802bb5ae2930c124d04501eccdbddc942dc3c884f73b6cda96d550c44c61a64f71a420f60c13e190c776c3177dc5a618 hcapi/cfa_v3/mm/cfa_mm.c
81addeda4e134a167fb4ea611ff7d6ace78950bd2ba2d1eb24e9f308a6955a518a9893b5d31f6b1b260e01698e3ee48a23639537e8e88562e3747a2b64e5705a hcapi/cfa_v3/tim/cfa_tim.c
5b7fcd8e861ade78f655ecbfca595161f5ebf557f50e7b85972424a39685be80f198a624712519391e10a1d06c9d7a6feebaf9fa30d5cbdff75d7be545d779ee hcapi/cfa_v3/tpm/cfa_tpm.c
c9316fbaeb589c0014d48ddb39d8a514aeca53ff2d1075480b485077e877686f7a18381b5c63cc13439a7b762b4345a750bac3dbb40fe4bd7a662d65583f6987 hcapi/cfa_v3/tim/include/cfa_tim.h
7a0acb7b25d54bd1d03c4532f9587c1548a48b56be9ad58cc499b2024b1b12d926f36a61dc4c763e049964f09bee4806809b3cd5501111de0a80b41b7ca9a336 hcapi/cfa_v3/tpm/include/cfa_tpm.h
565b94f4a32634e96f4a008ad1fc8f0f81d28a0bb59d507ba625fa070b31e71ecf5e22c681cb70f85acbd005ab72b77ee30735e690cb542e2b2b31bc92295bf6 hcapi/cfa/hcapi_cfa_p4.h
87b5af8e3c75f0edc2b0391764b2535d7ecc38639c6a592c5734113ac05dec0fb6ef6a21c7dabc5100ee0dd4bc29a102c1c56abe0635dff7fc2ab777b2d450ce hcapi/cfa/hcapi_cfa_p58.h
078e4339672968bc754eea9777b4f8955ca20ab10c9e0e2ded1aca622c49c64320cb9cc3213a14a5b12428de33c022e7578c1b5ad25a98bddf89ee639920d468 hcapi/cfa/hcapi_cfa_defs.h
6d3fac3d53cfaac858c0bab3c36a09433f22399919e8da863c4b4ff24451c95e0733eefc1f931163301888c8fc466b1ca4c1429111e1c3e7904f430245496338 hcapi/cfa/hcapi_cfa_p4.c
a587a1a06fc2d8adf243b3de43e8c29a3e823c8af27c5826313c6038584b7145a9e217cfef2224b9d938d6b7b0f9e34eb5ed1b29c64b0d1edad199ca5b8912fe hcapi/cfa/cfa_p58_hw.h
dc95276a56cc194bd2753a4eb13d7619a61808f5d14c980c394ed245610b78716563340618909b9b6f619cff167b9901c58e1b60acb2ef6afa04740102fc3117 hcapi/cfa/hcapi_cfa.h
bfaae809318f79c8a16335b2e61bf1a3494ffd7d1195be4ae1ece59ed83598e614d0f1bea2f1d4b2398b8545e273e03bdb79353f9f37c269fd48c0b5456e2386 hcapi/cfa/hcapi_cfa_p58.c
1a97c9b741d04a9a7c75fb325c3345e6c4d24ae591d7af73a6d685b92d3eda24612b6fa4cee595d4994c7f4888105d7ccf4094c5adc1fbd92ebc52ad34534882 hcapi/cfa/cfa_p40_hw.h
49b1752a4ae8602c2f22386bd309a5d725820d9dcdaef131a3c9d9ed8851df7f37935ca11cc0d333f65979177f71e9ce737d4207e544b25c94868935a19ea687 tfc_v3/tfc.h
4617ee278df24456f7e13ca9dbd63b3c7a0705dfd59cd58e2a41966267b32bd0c9caaddad98cdf886d18666c923de73535fac2334419f32184301c0cd4dc5d68 tfc_v3/tfc_act.c
cc9683dc0a067c78c52a29571027a8a9e4114ae73a831c464a728f46c0ea7db3d676f75cd94aa48ca3f4263a78293be93960648c5460b83b296e063701673edf tfc_v3/tfc_action_handle.h
aa53e4e8eecdfc3af56991d94d0fbdb4a58ff20591dc85f2a16186d8ad89e1836e663e8dade4d5f85375e31e5b46f43303ea239ccb2c1bdf652241bd538a8fe3 tfc_v3/tfc_cpm.c
432907089b3115fffb467bb56cfa96b49a1a8db7faf404dbce597d074579f207feaf4a79fd971fdadc9ade1e125ef469616baacd63a09aa1a8b6b68144a20f9b tfc_v3/tfc_cpm.h
17f322abdcf5325da6d18a620049c7a46103fc74d2456fbf9d30deb0c965b62dd0c861624380ec413f27e580053f26e3b6c8074af7fea58059822f360dea7cf4 tfc_v3/tfc_debug.h
db9d7542f0db5a200847b05fa27b52bafc2def66fee2d86f48489bbcba755cbbbe17b69b57d4b38e4d53c1f8de5d00c57b4825cb3ea6e930263b1211c37da051 tfc_v3/tfc_em.c
b90e57a6fc569a6d7470d98071b97a8ca2d344abb7fa23c7b52c38b1086fdba6bf2e09faa44b5196cfa615184ec8cb40e0ae5829ff79513623899b4d0c4066c7 tfc_v3/tfc_em.h
341d795440f475da349320e56aabe3c5f7208bac97df3a3085cda3fd6a772b58ba4777467b6eee8913336b18e60941c021b5c454800495a20a380a3f6845c6d0 tfc_v3/tfc_flow_handle.h
eb7b438ae3efe5c940d4e1a5152978ea0407977559e286a372133f7feee7183a91b2e16a03d6e3d23eb8bb2805e2e3c49fe86dc106afb2b0a0bf6488dd4282a1 tfc_v3/tfc_global_id.c
1fa7d906dc97fb9d7c801798f6952b5bbe6ce9fe4119f3f4406a30e7554dd7ab90967ef95358c4eb6860cf2f3ff67fe9904af3acb68233a8f3dc78cb378f230f tfc_v3/tfc_ident.c
72f0968af9dafe3b9ff481bcec7d56479af3f2319e3fa8f76d1d299f1f584e6489bb38ab11270bb7517a514f1083a39f4d9941c0e8812c9c0045c5c688c0a6b9 tfc_v3/tfc_idx_tbl.c
2d8d6e5e0a6d5f3441d015c1a2e7fe38dd6ae04199513009296dceb21afd0250aa237b394ffeb6a2dd68b4d81488fb3f3acf8ffeb516ddca1261fd949005a6a4 tfc_v3/tfc_if_tbl.c
7fa3ca7601c384cc80b9fe7f452d275c80361679a170c72631b5458469e91a06a05137491b4c4e57c3cee10ca4700b0c9ba04965e84e642227a6de9d96530892 tfc_v3/tfc_init.c
f36b0082ec482c49c4f4e365d5f4a513acd8b677e6fb16337084f8cc10d50424a1595647f0ea4b435ce2dddff744b79ca3ef41e5784de940672dae4c3258c95b tfc_v3/tfc_msg.c
1ff2dd461a766ec8f1593d2c2d9a7b9f54601a100e996469efd9774127e151d822d9373ecec1e721b0b435b2c03260daf23218e3a742cf97eb98b88ab9a07e95 tfc_v3/tfc_msg.h
19ed9c1a215030797cc0f7ae18ce1aac3b3e012c9b22af68dae0026a9152eef12a23f56263eaa22a90c64d4cf07b15a2f3ec44fe18345c7c8f8d3c6cea2abb5d tfc_v3/tfc_priv.c
5722ff116d924191bb69d9ae8d80cd20613d0e07eda9f636ceaaee6dfc61a0b9b2b3813839607150a3abeb4f9edeaca6a7de1e4be8493faf69b73b4e13eab2d5 tfc_v3/tfc_priv.h
ff756314f3325961be395238b85e9c2b9d55c8192c12fbfb6a9a9fd0a96e30e5b41aab28b060d851c750174a5e956bb0536c29f51da9e12140cb0e26656a1f53 tfc_v3/tfc_session.c
8d99e17246d9b965a918095066d59e3b341ac82e9a49c00e4b87ff867a51d1e760b0188dabd702e91e37e05cc5c5de89f9a97cb47202f21d1847cc92897baf1d tfc_v3/tfc_tbl_scope.c
4c71d2e170f0e43d402608d28157aa916e4920266b12f12469b4bf04027186ca40c5bce53f698df537bb23ae7ecf84ca0beef95fcb720d84e997ca192b5a8e8c tfc_v3/tfc_mpc_table.c
7a57e683dbd18c9608d0700157604e1631d4724e3eca2b52eebdd9490cc3f2f2ea0274978e4e6df2eaae771f9af1633fccb16693b71663efccfec4d4dd17cd61 tfc_v3/tfc_tcam.c
5f9c2d2be7a2c89dc94a727a7a352d205cc8e9fbb24ffafbef9378dc49ad229d95870ed44ac0baff804bcf0bc8e3c77d50ffc96a155d46e98572b501d2b25c36 tfc_v3/tfc_util.c
d5ee8fb67293d13c1b70e6e99cfbc780799a66c473daffec4a6cfd5584886442816824c4dd9b066dbcd0286e3c0ae281d10f7e92aad1f3fcb67302117a49aa85 tfc_v3/tfc_util.h
285c4495b7029363c1867cc78ad0249500b5c49d4ada209023188911bc977dbc83db1ffa1ba6fa70a3a36b450b44feecb41834eb6b435674dcc1df419ede1420 tfc_v3/tfc_vf2pf_msg.c
5c5c034546a8e70b09c22a97d593a1cc74277f8ad756f06c01095b4e5cb3a6f9e0e36329818aa162c2ae745edfe895217b15d86b40283df00b63b2ac6adbc4fc tfc_v3/tfc_vf2pf_msg.h
40dc97bb4522308f9b7b0a802e1ece6c414c334ee9db88f011a8adbd9eca90a2220211c22d73795e518ac3faf1e0d3e7e9cd89bb8555f383b541146300bc03a9 tfc_v3/tfo.c
16db101485b823c82d3a25611620ed113fb786ac953d93180c87d08cdef6b95217076e72deee6da51dd0bdd56f24bcf87862510770e91d96d40738719e94b32e tfc_v3/tfo.h
261c39416305693ec92caa1153f539cab39b322f607a5b97d3e503ef825cb55d8cd6684e8d1910aa21c20f0ce8967d5f3e7a57fcb1c48a937233c90d4351f85b tf_ulp/ulp_linux.h
4fa443db84e4485582dbf24f0df7759a8a40777fff36a7c35cc3029d04e0c49b630e74b30ed6375b5937b65bd04951ba4b07bac88f5bfceaa1af2304c34aae78 tf_ulp/ulp_tc_handler_tbl.c
3a46429478521784f32f6a35e322bf18225b64db578ebfd6858c15fab097f6105f09b96685875385ad1bb938f55dc168ba69ad7edbc47393873756ee5c20cf47 tf_ulp/ulp_tc_parser.c
960c7304a30310f01608f9e14ec6d3a229e86ad152c3d0aedf78293c530ef7932114dfa0ca2396386904afb85e23c2b0a570451226d699f976639fe267068eaf tf_ulp/ulp_tc_parser.h
e049b16cd36b891f330a75dd341b6cebd4005e14de56fc25bc619ea5e96cd0e0fbba806a44474d802928bf5d2d41018c9ea72e4ac4b825b2956a2e30ac25ff66 tf_ulp/bnxt_ulp_flow.h
7135c1ea19d1b3c12864c7656e70a1debe26de67ef827c3e8c5e77efb718e8fbee91d565c674d6539f32ae8fe020e238585916d8052353273576a3178a46dbc5 tf_ulp/bnxt_ulp_linux_flow.c
f58f853902be89312730aed1085a39825388fc3d4f38f6ef84b301db3794564d2d9c5a4baf1bfcbaa46eb46e06babee2e7edbace92bea8e3f55471dc52de4211 tf_ulp/ulp_def_rules.c
3d51438bbcb5cab474275fc52f41a7b3b8ef60b51aabceb28d09f1da60e96e07d34e466582a1b4b6f72d95cd56fac9c4c73a8c67f6a4e7879d358cdd2fcb7649 tf_ulp/ulp_matcher.c
8db6c1d0b4a16c91ce6c1cd8e3de0d3bd7724917ea7375a3779886dce48552a158c3d590081cda800d811c8af9b8416942a88362b274597783cf10a9dd3a11f2 tf_ulp/ulp_matcher.h
b5bdfca5e9361ae2ac4d4cc4c8c4f699cccb76449fba90967dbace6419feff01fb7e5dea685b8ba41ea80affec846297446f418f3034c399be0d7146b78105aa tf_ulp/bnxt_tf_ulp.c
54eadc397ce3db4d385f99bcd849c7b10e6936b676908f20037676a2e2f4782e72bf3bdfc867e30e22a919dd877c9f658ca06ea8a983532ec0f0d82c26c25b21 tf_ulp/bnxt_tf_ulp_p5.c
a14960179818137ac599745ab7b59f460ca484d1887d5daa989ae4e9498548bbc4bdc2176f034aa6884352f9707719fc5f67bb2be4e431c16f3273eccd833702 tf_ulp/bnxt_tf_ulp_p5.h
bb9f80c1f6eab79db42b6d3c95eca002ea72bf2afc0584ae0baa7ce2414e2c1579888712b623ddb8dbf34b1d7e3362b51f8b7bbad1086b6be176149148a66174 tf_ulp/bnxt_tf_ulp_p7.c
f45e3c9f40ff977cca0201091aadf0414407bd13c7df655467e8bea9bfbbf2a281a042c539343eb50fde98bfe3e2588effae761ca1346676026740b7044ba01d tf_ulp/bnxt_tf_ulp_p7.h
402a7cf713e3ca2c93d52831eabefaf60b94f9e257cd4cd09baa9eb7c7f9f10d1976c3a94952c75bdacc19f58e400023e2a61e7d021671e86cc51668b4fb3768 tf_ulp/ulp_alloc_tbl.c
363f26445ed32089348b30df9304eb8fb2c85f3bfe4a8c3ec91f862d6675b6707811a0d0d705627913bd193fb8ccc638284bfcecb8655f6d5bf114011e8da0fa tf_ulp/ulp_alloc_tbl.h
b7af6cb0517793c6331fe4322722653b7559b81117902cfd390c179fe53579d79c982310a5947eae4aeba4763b53a7e8aab4621b469a5e16fe5a781d413e4e04 tf_ulp/ulp_fc_mgr.c
4c6d834123892a072177af4ac2280da5df6444a955bdee3999ce0e7b7bb65c3be439e96f81b588b2db9ec1cf07226062b38dc89ab831a0840c8e27b00cff0810 tf_ulp/ulp_fc_mgr_p5.c
92370d7c50b3fa92d365655bb07704beba1ed48f495a7980dc74404bdbba7e796290c87eb96cf6aecaa64d1d28d9ae1395eb1a9bb22958a9f9a37f52ffd753c3 tf_ulp/ulp_fc_mgr_p7.c
73f566f39d1ee038259cd0e59b3f333a9cce8abe3bd22945c56a2097966b596a1b0f1949baeaa654437f0bfc1dcb4b9c52184703497bd8601d253cbfc3af40e5 tf_ulp/ulp_fc_mgr.h
bef799c2d3a9c84f120cbde2f968299702f966a2062df5445dd908187166dc5c9fcf8dc54037986422ebdc565a25d6294deea6af9f74b78e1d68830506704ad5 tf_ulp/ulp_flow_db.c
9a9bf4d86b87005dca44790d44a1ae67085313c104238ea185e6847ed1ee522de8bb8adce3232898679ee86964cbe0f55745f7909c9bea3256197a9780c7f2b5 tf_ulp/ulp_flow_db.h
4fdb97c5671fdd694b33ef9d56f2f44cd5a021c8bbcbc4e6a86e3f21950a5e921fb2cb1e55ecdacd5559408af77f6674162172f22553f629783e7cc086432f27 tf_ulp/ulp_gen_tbl.c
b28a588cac9f163d4b588560e54f5f262c8ddd2bffcbc79fe590a55466f75b126f3d1cc15f9ad8bc0471c8aef6161788384090b59a08ee5475e5fa8180ffd95f tf_ulp/ulp_gen_tbl.h
2fd57ffc5854e2907fca8d280fe8fa159bc4fe24dfe309a4b348bfb0d68be0a25c0da238e4c595a5e7b151854ae314e71aa8be96c48fbbf5def79b6bb9d123ab tf_ulp/ulp_mapper.c
678daa115f448a3befc22689d022cc2cf14848e502aeb980936af33d597df8636eb70a839a265cf416bcf5514f93f787ee361b8f9793a8f7a56ae624e412eb6e tf_ulp/ulp_mapper_p5.c
b4a3a3612c86a5a300b1832260722db53176749730ae9b05504be5a9091632587965d501bcf290d302c9f1c069f07a35b2412926270d28daf97ffe30730e0806 tf_ulp/ulp_mapper_p7.c
e8f71f7c0be9d62815930e826890752d5168dc9bc116d9805d5d2e5a9f82b6c4623ea23a6c4518e98fd985507dbd9030ef2a7dcab3927069e156e4f2a783daf2 tf_ulp/ulp_mapper.h
85375c3825afeb00a1c5e82b3f2cdce4fbecdfb63600912b75012a58e162420cf469d1a7ffdd66a573fde35817a9173644ccf5e65cc74efd529eaee38cb57354 tf_ulp/ulp_mark_mgr.c
305e8072270b4bc9b1987c4b2a3988c277931b70f73904b1258b5e693f2e28f1b6aea701a80f21996de7eceb3807e95ca60874c5c1d4e8a3b6847ecf5bebd6af tf_ulp/ulp_mark_mgr.h
a3248de2f1b3eea5ce72c78a20e707956aa65f9648ff548b0b375734dce8476cc9843a160e944c28a4d8d670c0bb957aa94e6c0a8ed726c5b86071b058b7d1c3 tf_ulp/ulp_nic_flow.c
a87a87af2af9ce9df0a6127c2496221a4105e269a5c1215914431cb78fb9297e5a046871ba5f06718311c35f49f6cd1c44fdb2ac684a1e69980fdf7dbcb2c1eb tf_ulp/ulp_nic_flow.h
08b5634a737a4851eb791712841f1a70b61e29662db5fc26a247021a2cbeafdd4b7515b02afbd92a4e18b0d31e988cf913501288c125e0a79a4d30ab4b7680d5 tf_ulp/ulp_port_db.c
c7445b5edbe81dc53fa8e15028cb8dc7ab2ac92b7ae9c4a958062bd29156a0483813601005ded624152a2c27a93198d535773fb25023d1c358223ea63ac28ead tf_ulp/ulp_port_db.h
965cb00f70a9dd16178c032bcef5aca624143208db5bcc215cd31edda9b8f513b52a363d10218165778d3e90d024d55d0c14b86e5462e3c87fb93dd873ac5611 tf_ulp/ulp_template_debug.c
0223df8e832b7d2c097697d467bbf68f0e79a6e9e77e2527fc27efb05fc7262e41054dae4bce188a66246e26269a1380af4fed60570996c2d8e5b2781b8ec8e8 tf_ulp/ulp_template_debug.h
c92706a421462eb3ef9d0bc68d486855ef1e829a3a43ca7d162dc6f34030a5cfc857349518c424a20b89af656118411247f90260767e47a4015bafbcd6aa950a tf_ulp/ulp_template_debug_proto.h
50da5b635d2dbb5b1612b8b9392e3fdbbf4678dac42d328a3c62e22727620baee1358b90665eb8185938a2e6cc69f060b979f52956e862a1c537684023ab6b35 tf_ulp/ulp_tf_debug.c
42a11db9acaca2b4eef996fe0f7e3d7cceb29cdcebe10288154a6b337a2f84eaf5d3674e9891112f282089cef2723d184cf0a2d4fe875b4ab3bef0a475115db7 tf_ulp/ulp_tf_debug.h
a1cbc1eea55e9eb5743e8c62a056dc60ea29752c4d825f5dc512cadc7fbae1e1b5e43ecce826cfaee5fa6104328c15cbee9425555c96b69effd8dacf89054cb6 tf_ulp/bnxt_tf_common.h
054c3ef48bc169f07efba4e7090e1a7ec8edcec1240639786eff4bc27ce7d80443e1620646370af2481c47b23480689a41b32a0e729745518035d2758ce355af tf_ulp/bnxt_tf_ulp.h
8053ebbb397792cd2445b3af322e23f6f5954c05e9f13d903b06a73d048f45af56adb00c7b78258f8392fa8a3d19de73b73f3686fb1c427fd2060a124c0e7d0a tf_ulp/ulp_utils.c
0fd4e7ee736fd4459922a10dff054cd821170ca97c6329fbb6a1b48345aef17bebbb8f0856066119cd0b7ba98f2cb98787d72d95d1ad680132cf1f5b005be512 tf_ulp/ulp_utils.h
ee94fe0fc5943227b68e5e445b77ab9270421e6d0d39c236c3ac1ee548aaa2b10f64e7ea4dba4a214155ec8493dfc3a675cf77fbe329b3f93eeae4535ca4ac5e tf_ulp/ulp_udcc.c
303350ebfae8d466ce0e2ac74ba7889ab15a52836d4dcf15c8a61297883e22114013d0a545672da2e61e879690d06d8f951ba4658e6599a7c527b0b8cd7315b2 tf_ulp/ulp_udcc.h
50109c653fe840981dd60da4534cd5a41d028f6aea46e733a01b6938e02e49c6801f8d2b7ecd6ec083580854bb3b91b6762b7aba5f7d43ad2594c1cf7f4e9437 tf_ulp/ulp_generic_flow_offload.c
5ef7ccfab8a460c33011c340c72d99d6c0362eaf89f6a936963f41e8c4b86e9d159d30c5ac010b2ddf1688886a3c038f47820ade889d768d2359cebdff7e918c tf_ulp/ulp_generic_flow_offload.h
1a792be0477d81bec9bf0180033538e3a631d27d8f6c670ec1e4ca7f6e6a6144f0a179bfb87a96558ceeec518f9b5c9723a3b900d0ba67652ac8750ffd8fcde1 tf_ulp/bnxt_tf_tc_shim.c
a0b6f461e42903cf5cfb5083fd48c786a491c97303a44ce5f94e4dfd5adec8511bc08fd3fde0a816cc0fe7350c428be5e009872062d6f9b5b67c7c531c66e04a tf_ulp/bnxt_tf_tc_shim.h
b3c38c0b4d95e64e7d6a3f00c183b45620063cbe52f39e50f5aec987f450505cf6747581932722d7f206f19f4a285c00b7f6acdcd39a2e87b359cec7a2a74ddd tf_ulp/bnxt_ulp_meter.c
9f03e34b4222d99beb970a3df85b0822b781d684e230f1098dee1c2087d93cc997a1b9dafaf53f03d6d6df4f65d16d438e842d0eb5c5885c4c0e233e0718e1c2 tf_ulp/ulp_template_struct.h
8532f7004f7fe11ff32d40bccde1be08065d132ee51251b206a6afed764e332a32511f2c8ee3e58a428a2a69065c4f1eae963401d6681a0ef29ba7056765f23e tf_ulp/ulp_tc_custom_offload.c
ea12dd0a939632ca15ee87bd38f0053bf305c46b9ee9731bd28c357851d41f706810e514b590fef7e64e4e1c1e507ad1f08e94f8d165c2b3665617f8d8415568 tf_ulp/ulp_tc_custom_offload.h
1959325afbc5b234d748b5f1974c526664e12fc7e40865218bd76970bd80050f50ad3a3f7e0264480007bed6e156729ad4823e14bc2c6525ea58d6ba907b6890 tf_ulp/ulp_tc_rte_flow_gen.c
44dc39c3b5b41c62edc69e7eaffac0ad92a426c33f0f9054a15d7492257582cbe00a0993428074702e8383beb8b9ff9366014cb096a7835614e50a3e2df3d90b tf_ulp/ulp_tc_rte_flow.h
e1bbd865cb81cafbe22cbe49f0f0429ffea48c8fdf3050474aa94fd798ab429634d2086e7a0df64a7742bf3b3f062cd58620c959ba893365228379608125cfef tf_ulp/generic_templates/ulp_template_db_enum.h
4c77f8fd840a08f9643989d10a4a57ad1a2567f12447bfd6f94559380e7a578eb07291a89fa5de0ea15ae5871de062c21fbd245be0942dbdced4b55570b6c201 tf_ulp/generic_templates/ulp_template_db_field.h
bbf08d3547968c803d62d275c880985fce7f8f4faaf0ea49bee660d17ae6687bbca3f0190dbcd1b1e618738148de2ccb96e51880c94200380aac0a2f73753152 tf_ulp/generic_templates/ulp_template_db_tbl.h
e852825d9707ca4126dcdecc4a3a7143b75fc01db057ecd63b81cbf57c7395a584076d770f755bf35ab3d20cdd45f1fce37a9c6450c11e5b4b9b679b31349b77 tf_ulp/generic_templates/ulp_template_db_wh_plus_act.c
986b3905f14016bd79ed00498659ed906dba7e77dd303ec984c042f1f9a59ad7b634946d89380b8809a0087886c1ff125dabae3d63db932a1794447cb1f0fd85 tf_ulp/generic_templates/ulp_template_db_wh_plus_class.c
02544f03c31c3e7421b314f1e43abb13d6c8be8dc303d507989987e1372401bf3fd0d52f000dbf6809dbd9e0ccc0e1ae1e9444c3900bdaea579ef685e9e7ae8c tf_ulp/generic_templates/ulp_template_db_thor_act.c
81980881e4ee724fb4224f6a5f15ca063a7309aacd361653f76fcdb82f14203e40cac2b011289b9337e73fff66c99720839795aa1b99a784c4008daa040b83fe tf_ulp/generic_templates/ulp_template_db_thor_class.c
0973f0adb013a80f754d87453d26fbe33d6c78bc00006c74fcd5f0f64fd1671b4914f1f702ae64b348e4dc5e5001b81faa03471e35078b226fe8deb207fb3200 tf_ulp/generic_templates/ulp_template_db_thor2_act.c
7c70088a7c6ee59fdd9762100189e8c1a7221aaa459e8b06587822c348c64fe6c862384313a296003c1f41f8afb31934a77ba4d56429f59933c3c7f106279d85 tf_ulp/generic_templates/ulp_template_db_thor2_class.c
2025b41d37752eed09e3c71af706b3588c8f2424eb65bb958e420f98a2e7f550b5f2e73ed5403b32dd407825964daa3615744548d5618a8ac9a4f793d2a32584 tf_ulp/generic_templates/ulp_template_db_tbl.c
150fcea5c75fd6d8131366f261c4b2627535cb1e2151c7fa36a5d7a20272f910fb5bfaca64d4088b31530b56e73a1a6bc97344a44634ed4d53974e51b7c5c4fa tf_ulp/generic_templates/ulp_template_db_act.c
e966c36b159a6749b5928223c442d371023e47796e148953a8743d6fe20fef17f9a97409770b0d3df4b3176a5310d5a9f4e2ac502e59f9ef8515877f76fa0665 tf_ulp/generic_templates/ulp_template_db_class.c
cfec8347dbd37db134e0a6ac0e3747f76f8ef0db6929bbe22503b78821f0ea1fb0ad3b9e1217c9e2d525f1f89ae0b5683aac75d3106994fe21eb02648f94b6a9 find_src.awk
c4ab703e93d6698d0ba17b4cc642eabea3533bd3f6163b523cda06dfa70fd3288386472f981058214dbdd94067c520da325e78d9118c6f93859cd7b1179b4c21 ChangeLog
aee80b1f9f7f4a8a00dcf6e6ce6c41988dcaedc4de19d9d04460cbfb05d99829ffe8f9d038468eabbfba4d65b38e8dbef5ecf5eb8a1b891d9839cda6c48ee957 COPYING
03ecb05c72c926c37386df29c31542751bab78cfa591be913655a78f60f327c1d6e04d8e87a4faf238b15dbd3358a8a5ae4752d82d9fce1bf9365a85e7dcbb26 Makefile
ce37c849b8fb51afea53d4fda67bd82f52a3ff7affd44024a1f0c6a914b3749dd6ad24ee11ac8d685dbd6ff4aec426bf698e208b7228be00938ae56965228c75 README.TXT

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,178 @@
/* Broadcom NetXtreme-C/E network driver.
*
* Copyright (c) 2022-2023 Broadcom Inc.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation.
*/
#if !defined(CONFIG_AUXILIARY_BUS)
#undef HAVE_AUXILIARY_DRIVER
#endif
#ifndef HAVE_AUXILIARY_DRIVER
#include <linux/device.h>
#include <linux/mod_devicetable.h>
#include <linux/slab.h>
#include <linux/string.h>
#include "bnxt_auxbus_compat.h"
static struct list_head bnxt_aux_bus_dev_list = LIST_HEAD_INIT(bnxt_aux_bus_dev_list);
static struct list_head bnxt_aux_bus_drv_list = LIST_HEAD_INIT(bnxt_aux_bus_drv_list);
static DEFINE_MUTEX(bnxt_auxbus_lock);
static const struct auxiliary_device_id *auxiliary_match_id(const struct auxiliary_device_id *id,
const struct auxiliary_device *auxdev)
{
for (; id->name[0]; id++) {
const char *p = strrchr(dev_name(&auxdev->dev), '.');
int match_size;
if (!p)
continue;
match_size = p - dev_name(&auxdev->dev);
/* use dev_name(&auxdev->dev) prefix before last '.' char to match to */
if (strlen(id->name) == match_size &&
!strncmp(dev_name(&auxdev->dev), id->name, match_size))
return id;
}
return NULL;
}
int auxiliary_device_init(struct auxiliary_device *auxdev)
{
struct device *dev = &auxdev->dev;
char *modname = KBUILD_MODNAME;
int ret;
if (!dev->parent) {
pr_err("auxiliary_device has a NULL dev->parent\n");
return -EINVAL;
}
if (!auxdev->name) {
pr_err("auxiliary_device has a NULL name\n");
return -EINVAL;
}
ret = dev_set_name(dev, "%s.%s.%d", modname, auxdev->name, auxdev->id);
if (ret) {
dev_err(dev, "auxiliary device dev_set_name failed: %d\n", ret);
return ret;
}
return 0;
}
int auxiliary_device_add(struct auxiliary_device *auxdev)
{
const struct auxiliary_device_id *id;
struct auxiliary_driver *auxdrv;
bool found = true;
int ret = 0;
mutex_lock(&bnxt_auxbus_lock);
list_for_each_entry(auxdrv, &bnxt_aux_bus_drv_list, list) {
id = auxiliary_match_id(auxdrv->id_table, auxdev);
if (id) {
ret = auxdrv->probe(auxdev, id);
if (!ret)
auxdev->dev.driver = &auxdrv->driver;
else
found = false;
break;
}
}
if (found)
list_add_tail(&auxdev->list, &bnxt_aux_bus_dev_list);
mutex_unlock(&bnxt_auxbus_lock);
return ret;
}
void auxiliary_device_uninit(struct auxiliary_device *auxdev)
{
struct device *dev = &auxdev->dev;
dev->release(dev);
}
void auxiliary_device_delete(struct auxiliary_device *auxdev)
{
struct auxiliary_driver *auxdrv;
mutex_lock(&bnxt_auxbus_lock);
list_for_each_entry(auxdrv, &bnxt_aux_bus_drv_list, list) {
if (auxdev->dev.driver != &auxdrv->driver)
continue;
if (auxdrv->remove)
auxdrv->remove(auxdev);
auxdev->dev.driver = NULL;
}
list_del(&auxdev->list);
mutex_unlock(&bnxt_auxbus_lock);
}
int bnxt_auxiliary_driver_register(struct auxiliary_driver *auxdrv)
{
const struct auxiliary_device_id *id;
struct auxiliary_device *auxdev;
int ret = 0;
if (WARN_ON(!auxdrv->probe) || WARN_ON(!auxdrv->id_table))
return -EINVAL;
if (auxdrv->name)
auxdrv->driver.name = kasprintf(GFP_KERNEL, "%s.%s", KBUILD_MODNAME,
auxdrv->name);
else
auxdrv->driver.name = kasprintf(GFP_KERNEL, "%s", KBUILD_MODNAME);
if (!auxdrv->driver.name)
return -ENOMEM;
mutex_lock(&bnxt_auxbus_lock);
list_for_each_entry(auxdev, &bnxt_aux_bus_dev_list, list) {
if (auxdev->dev.driver)
continue;
id = auxiliary_match_id(auxdrv->id_table, auxdev);
if (id) {
ret = auxdrv->probe(auxdev, id);
if (ret)
continue;
auxdev->dev.driver = &auxdrv->driver;
}
}
list_add_tail(&auxdrv->list, &bnxt_aux_bus_drv_list);
mutex_unlock(&bnxt_auxbus_lock);
return 0;
}
EXPORT_SYMBOL(bnxt_auxiliary_driver_register);
void bnxt_auxiliary_driver_unregister(struct auxiliary_driver *auxdrv)
{
struct auxiliary_device *auxdev;
/* PF auxiliary devices are added to the list first and then VF devices.
* If we remove PF aux device driver first, it causes failures while
* removing VF driver.
* We need to remove VF auxiliary drivers first, so walk backwards.
*/
mutex_lock(&bnxt_auxbus_lock);
list_for_each_entry_reverse(auxdev, &bnxt_aux_bus_dev_list, list) {
if (auxdev->dev.driver != &auxdrv->driver)
continue;
if (auxdrv->remove)
auxdrv->remove(auxdev);
auxdev->dev.driver = NULL;
}
kfree(auxdrv->driver.name);
list_del(&auxdrv->list);
mutex_unlock(&bnxt_auxbus_lock);
}
EXPORT_SYMBOL(bnxt_auxiliary_driver_unregister);
#endif /* HAVE_AUXILIARY_DRIVER */

View File

@ -0,0 +1,111 @@
/* Broadcom NetXtreme-C/E network driver.
*
* Copyright (c) 2022-2023 Broadcom Inc.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation.
*/
#ifndef _BNXT_AUXILIARY_COMPAT_H_
#define _BNXT_AUXILIARY_COMPAT_H_
#if !defined(CONFIG_AUXILIARY_BUS)
#undef HAVE_AUXILIARY_DRIVER
#endif
#ifdef HAVE_AUXILIARY_DRIVER
#include <linux/auxiliary_bus.h>
#endif
#if defined(HAVE_AUXILIARY_DRIVER) && !defined(HAVE_AUX_GET_DRVDATA)
static inline void *auxiliary_get_drvdata(struct auxiliary_device *auxdev)
{
return dev_get_drvdata(&auxdev->dev);
}
static inline void auxiliary_set_drvdata(struct auxiliary_device *auxdev, void *data)
{
dev_set_drvdata(&auxdev->dev, data);
}
#endif
#ifndef HAVE_AUXILIARY_DRIVER
#ifndef AUXILIARY_NAME_SIZE
#define AUXILIARY_NAME_SIZE 32
#endif
#ifndef HAVE_AUX_DEVICE_ID
#include <linux/mod_devicetable.h>
struct auxiliary_device_id {
char name[AUXILIARY_NAME_SIZE];
kernel_ulong_t driver_data;
};
#endif
#include <linux/device.h>
#include <linux/mod_devicetable.h>
#ifndef HAVE_IDA_ALLOC
#include <linux/idr.h>
#endif
struct auxiliary_device {
struct device dev;
const char *name;
u32 id;
struct list_head list;
};
struct auxiliary_driver {
int (*probe)(struct auxiliary_device *auxdev, const struct auxiliary_device_id *id);
void (*remove)(struct auxiliary_device *auxdev);
void (*shutdown)(struct auxiliary_device *auxdev);
int (*suspend)(struct auxiliary_device *auxdev, pm_message_t state);
int (*resume)(struct auxiliary_device *auxdev);
const char *name;
struct device_driver driver;
const struct auxiliary_device_id *id_table;
struct list_head list;
};
int auxiliary_device_init(struct auxiliary_device *auxdev);
int auxiliary_device_add(struct auxiliary_device *auxdev);
void auxiliary_device_uninit(struct auxiliary_device *auxdev);
void auxiliary_device_delete(struct auxiliary_device *auxdev);
int bnxt_auxiliary_driver_register(struct auxiliary_driver *auxdrv);
void bnxt_auxiliary_driver_unregister(struct auxiliary_driver *auxdrv);
#define auxiliary_driver_register bnxt_auxiliary_driver_register
#define auxiliary_driver_unregister bnxt_auxiliary_driver_unregister
static inline void *auxiliary_get_drvdata(struct auxiliary_device *auxdev)
{
return dev_get_drvdata(&auxdev->dev);
}
static inline void auxiliary_set_drvdata(struct auxiliary_device *auxdev, void *data)
{
dev_set_drvdata(&auxdev->dev, data);
}
static inline struct auxiliary_driver *to_auxiliary_drv(struct device_driver *drv)
{
return container_of(drv, struct auxiliary_driver, driver);
}
#endif /* HAVE_AUXILIARY_DRIVER */
#ifndef HAVE_IDA_ALLOC
static inline int ida_alloc(struct ida *ida, gfp_t gfp)
{
return ida_simple_get(ida, 0, 0, gfp);
}
static inline void ida_free(struct ida *ida, unsigned int id)
{
ida_simple_remove(ida, id);
}
#endif /* HAVE_IDA_ALLOC */
#endif /* _BNXT_AUXILIARY_COMPAT_H_ */

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,149 @@
/* THIS A GENERATED FILE - DO NOT EDIT!
*
* To regenerate, use: update_compat.sh
*/
#define __LINK_MODE_LANES_CR 1
#define __LINK_MODE_LANES_CR2 2
#define __LINK_MODE_LANES_CR4 4
#define __LINK_MODE_LANES_CR8 8
#define __LINK_MODE_LANES_DR 1
#define __LINK_MODE_LANES_DR2 2
#define __LINK_MODE_LANES_DR4 4
#define __LINK_MODE_LANES_DR8 8
#define __LINK_MODE_LANES_KR 1
#define __LINK_MODE_LANES_KR2 2
#define __LINK_MODE_LANES_KR4 4
#define __LINK_MODE_LANES_KR8 8
#define __LINK_MODE_LANES_SR 1
#define __LINK_MODE_LANES_SR2 2
#define __LINK_MODE_LANES_SR4 4
#define __LINK_MODE_LANES_SR8 8
#define __LINK_MODE_LANES_ER 1
#define __LINK_MODE_LANES_KX 1
#define __LINK_MODE_LANES_KX4 4
#define __LINK_MODE_LANES_LR 1
#define __LINK_MODE_LANES_LR4 4
#define __LINK_MODE_LANES_LR4_ER4 4
#define __LINK_MODE_LANES_LR_ER_FR 1
#define __LINK_MODE_LANES_LR2_ER2_FR2 2
#define __LINK_MODE_LANES_LR4_ER4_FR4 4
#define __LINK_MODE_LANES_LR8_ER8_FR8 8
#define __LINK_MODE_LANES_LRM 1
#define __LINK_MODE_LANES_MLD2 2
#define __LINK_MODE_LANES_T 1
#define __LINK_MODE_LANES_T1 1
#define __LINK_MODE_LANES_X 1
#define __LINK_MODE_LANES_FX 1
#define __DUPLEX_Half DUPLEX_HALF
#define __DUPLEX_Full DUPLEX_FULL
#define __DEFINE_LINK_MODE_PARAMS(_speed, _type, _duplex) \
[ETHTOOL_LINK_MODE(_speed, _type, _duplex)] = { \
.speed = SPEED_ ## _speed, \
.lanes = __LINK_MODE_LANES_ ## _type, \
.duplex = __DUPLEX_ ## _duplex \
}
#define __DEFINE_SPECIAL_MODE_PARAMS(_mode) \
[ETHTOOL_LINK_MODE_ ## _mode ## _BIT] = { \
.speed = SPEED_UNKNOWN, \
.lanes = 0, \
.duplex = DUPLEX_UNKNOWN, \
}
const struct link_mode_info link_mode_params[] = {
__DEFINE_LINK_MODE_PARAMS(10, T, Half),
__DEFINE_LINK_MODE_PARAMS(10, T, Full),
__DEFINE_LINK_MODE_PARAMS(100, T, Half),
__DEFINE_LINK_MODE_PARAMS(100, T, Full),
__DEFINE_LINK_MODE_PARAMS(1000, T, Half),
__DEFINE_LINK_MODE_PARAMS(1000, T, Full),
__DEFINE_SPECIAL_MODE_PARAMS(Autoneg),
__DEFINE_SPECIAL_MODE_PARAMS(TP),
__DEFINE_SPECIAL_MODE_PARAMS(AUI),
__DEFINE_SPECIAL_MODE_PARAMS(MII),
__DEFINE_SPECIAL_MODE_PARAMS(FIBRE),
__DEFINE_SPECIAL_MODE_PARAMS(BNC),
__DEFINE_LINK_MODE_PARAMS(10000, T, Full),
__DEFINE_SPECIAL_MODE_PARAMS(Pause),
__DEFINE_SPECIAL_MODE_PARAMS(Asym_Pause),
__DEFINE_LINK_MODE_PARAMS(2500, X, Full),
__DEFINE_SPECIAL_MODE_PARAMS(Backplane),
__DEFINE_LINK_MODE_PARAMS(1000, KX, Full),
__DEFINE_LINK_MODE_PARAMS(10000, KX4, Full),
__DEFINE_LINK_MODE_PARAMS(10000, KR, Full),
[ETHTOOL_LINK_MODE_10000baseR_FEC_BIT] = {
.speed = SPEED_10000,
.lanes = 1,
.duplex = DUPLEX_FULL,
},
__DEFINE_LINK_MODE_PARAMS(20000, MLD2, Full),
__DEFINE_LINK_MODE_PARAMS(20000, KR2, Full),
__DEFINE_LINK_MODE_PARAMS(40000, KR4, Full),
__DEFINE_LINK_MODE_PARAMS(40000, CR4, Full),
__DEFINE_LINK_MODE_PARAMS(40000, SR4, Full),
__DEFINE_LINK_MODE_PARAMS(40000, LR4, Full),
__DEFINE_LINK_MODE_PARAMS(56000, KR4, Full),
__DEFINE_LINK_MODE_PARAMS(56000, CR4, Full),
__DEFINE_LINK_MODE_PARAMS(56000, SR4, Full),
__DEFINE_LINK_MODE_PARAMS(56000, LR4, Full),
__DEFINE_LINK_MODE_PARAMS(25000, CR, Full),
__DEFINE_LINK_MODE_PARAMS(25000, KR, Full),
__DEFINE_LINK_MODE_PARAMS(25000, SR, Full),
__DEFINE_LINK_MODE_PARAMS(50000, CR2, Full),
__DEFINE_LINK_MODE_PARAMS(50000, KR2, Full),
__DEFINE_LINK_MODE_PARAMS(100000, KR4, Full),
__DEFINE_LINK_MODE_PARAMS(100000, SR4, Full),
__DEFINE_LINK_MODE_PARAMS(100000, CR4, Full),
__DEFINE_LINK_MODE_PARAMS(100000, LR4_ER4, Full),
__DEFINE_LINK_MODE_PARAMS(50000, SR2, Full),
__DEFINE_LINK_MODE_PARAMS(1000, X, Full),
__DEFINE_LINK_MODE_PARAMS(10000, CR, Full),
__DEFINE_LINK_MODE_PARAMS(10000, SR, Full),
__DEFINE_LINK_MODE_PARAMS(10000, LR, Full),
__DEFINE_LINK_MODE_PARAMS(10000, LRM, Full),
__DEFINE_LINK_MODE_PARAMS(10000, ER, Full),
__DEFINE_LINK_MODE_PARAMS(2500, T, Full),
__DEFINE_LINK_MODE_PARAMS(5000, T, Full),
__DEFINE_SPECIAL_MODE_PARAMS(FEC_NONE),
__DEFINE_SPECIAL_MODE_PARAMS(FEC_RS),
__DEFINE_SPECIAL_MODE_PARAMS(FEC_BASER),
__DEFINE_LINK_MODE_PARAMS(50000, KR, Full),
__DEFINE_LINK_MODE_PARAMS(50000, SR, Full),
__DEFINE_LINK_MODE_PARAMS(50000, CR, Full),
__DEFINE_LINK_MODE_PARAMS(50000, LR_ER_FR, Full),
__DEFINE_LINK_MODE_PARAMS(50000, DR, Full),
__DEFINE_LINK_MODE_PARAMS(100000, KR2, Full),
__DEFINE_LINK_MODE_PARAMS(100000, SR2, Full),
__DEFINE_LINK_MODE_PARAMS(100000, CR2, Full),
__DEFINE_LINK_MODE_PARAMS(100000, LR2_ER2_FR2, Full),
__DEFINE_LINK_MODE_PARAMS(100000, DR2, Full),
__DEFINE_LINK_MODE_PARAMS(200000, KR4, Full),
__DEFINE_LINK_MODE_PARAMS(200000, SR4, Full),
__DEFINE_LINK_MODE_PARAMS(200000, LR4_ER4_FR4, Full),
__DEFINE_LINK_MODE_PARAMS(200000, DR4, Full),
__DEFINE_LINK_MODE_PARAMS(200000, CR4, Full),
__DEFINE_LINK_MODE_PARAMS(100, T1, Full),
__DEFINE_LINK_MODE_PARAMS(1000, T1, Full),
__DEFINE_LINK_MODE_PARAMS(400000, KR8, Full),
__DEFINE_LINK_MODE_PARAMS(400000, SR8, Full),
__DEFINE_LINK_MODE_PARAMS(400000, LR8_ER8_FR8, Full),
__DEFINE_LINK_MODE_PARAMS(400000, DR8, Full),
__DEFINE_LINK_MODE_PARAMS(400000, CR8, Full),
__DEFINE_SPECIAL_MODE_PARAMS(FEC_LLRS),
__DEFINE_LINK_MODE_PARAMS(100000, KR, Full),
__DEFINE_LINK_MODE_PARAMS(100000, SR, Full),
__DEFINE_LINK_MODE_PARAMS(100000, LR_ER_FR, Full),
__DEFINE_LINK_MODE_PARAMS(100000, DR, Full),
__DEFINE_LINK_MODE_PARAMS(100000, CR, Full),
__DEFINE_LINK_MODE_PARAMS(200000, KR2, Full),
__DEFINE_LINK_MODE_PARAMS(200000, SR2, Full),
__DEFINE_LINK_MODE_PARAMS(200000, LR2_ER2_FR2, Full),
__DEFINE_LINK_MODE_PARAMS(200000, DR2, Full),
__DEFINE_LINK_MODE_PARAMS(200000, CR2, Full),
__DEFINE_LINK_MODE_PARAMS(400000, KR4, Full),
__DEFINE_LINK_MODE_PARAMS(400000, SR4, Full),
__DEFINE_LINK_MODE_PARAMS(400000, LR4_ER4_FR4, Full),
__DEFINE_LINK_MODE_PARAMS(400000, DR4, Full),
__DEFINE_LINK_MODE_PARAMS(400000, CR4, Full),
__DEFINE_LINK_MODE_PARAMS(100, FX, Half),
__DEFINE_LINK_MODE_PARAMS(100, FX, Full),
};

View File

@ -0,0 +1,208 @@
/* THIS A GENERATED FILE - DO NOT EDIT!
*
* To regenerate, use: update_compat.sh
*/
#ifdef HAVE_ETHTOOL_LINK_KSETTINGS
/* can't determine existing enum contents using preprocessor,
* so override or supplement as appropriate using #define"
*/
#define ETHTOOL_LINK_MODE_10baseT_Half_BIT 0
#define ETHTOOL_LINK_MODE_10baseT_Full_BIT 1
#define ETHTOOL_LINK_MODE_100baseT_Half_BIT 2
#define ETHTOOL_LINK_MODE_100baseT_Full_BIT 3
#define ETHTOOL_LINK_MODE_1000baseT_Half_BIT 4
#define ETHTOOL_LINK_MODE_1000baseT_Full_BIT 5
#define ETHTOOL_LINK_MODE_Autoneg_BIT 6
#define ETHTOOL_LINK_MODE_TP_BIT 7
#define ETHTOOL_LINK_MODE_AUI_BIT 8
#define ETHTOOL_LINK_MODE_MII_BIT 9
#define ETHTOOL_LINK_MODE_FIBRE_BIT 10
#define ETHTOOL_LINK_MODE_BNC_BIT 11
#define ETHTOOL_LINK_MODE_10000baseT_Full_BIT 12
#define ETHTOOL_LINK_MODE_Pause_BIT 13
#define ETHTOOL_LINK_MODE_Asym_Pause_BIT 14
#define ETHTOOL_LINK_MODE_2500baseX_Full_BIT 15
#define ETHTOOL_LINK_MODE_Backplane_BIT 16
#define ETHTOOL_LINK_MODE_1000baseKX_Full_BIT 17
#define ETHTOOL_LINK_MODE_10000baseKX4_Full_BIT 18
#define ETHTOOL_LINK_MODE_10000baseKR_Full_BIT 19
#define ETHTOOL_LINK_MODE_10000baseR_FEC_BIT 20
#define ETHTOOL_LINK_MODE_20000baseMLD2_Full_BIT 21
#define ETHTOOL_LINK_MODE_20000baseKR2_Full_BIT 22
#define ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT 23
#define ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT 24
#define ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT 25
#define ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT 26
#define ETHTOOL_LINK_MODE_56000baseKR4_Full_BIT 27
#define ETHTOOL_LINK_MODE_56000baseCR4_Full_BIT 28
#define ETHTOOL_LINK_MODE_56000baseSR4_Full_BIT 29
#define ETHTOOL_LINK_MODE_56000baseLR4_Full_BIT 30
#define ETHTOOL_LINK_MODE_25000baseCR_Full_BIT 31
#define ETHTOOL_LINK_MODE_25000baseKR_Full_BIT 32
#define ETHTOOL_LINK_MODE_25000baseSR_Full_BIT 33
#define ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT 34
#define ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT 35
#define ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT 36
#define ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT 37
#define ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT 38
#define ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT 39
#define ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT 40
#define ETHTOOL_LINK_MODE_1000baseX_Full_BIT 41
#define ETHTOOL_LINK_MODE_10000baseCR_Full_BIT 42
#define ETHTOOL_LINK_MODE_10000baseSR_Full_BIT 43
#define ETHTOOL_LINK_MODE_10000baseLR_Full_BIT 44
#define ETHTOOL_LINK_MODE_10000baseLRM_Full_BIT 45
#define ETHTOOL_LINK_MODE_10000baseER_Full_BIT 46
#define ETHTOOL_LINK_MODE_2500baseT_Full_BIT 47
#define ETHTOOL_LINK_MODE_5000baseT_Full_BIT 48
#define ETHTOOL_LINK_MODE_FEC_NONE_BIT 49
#define ETHTOOL_LINK_MODE_FEC_RS_BIT 50
#define ETHTOOL_LINK_MODE_FEC_BASER_BIT 51
#define ETHTOOL_LINK_MODE_50000baseKR_Full_BIT 52
#define ETHTOOL_LINK_MODE_50000baseSR_Full_BIT 53
#define ETHTOOL_LINK_MODE_50000baseCR_Full_BIT 54
#define ETHTOOL_LINK_MODE_50000baseLR_ER_FR_Full_BIT 55
#define ETHTOOL_LINK_MODE_50000baseDR_Full_BIT 56
#define ETHTOOL_LINK_MODE_100000baseKR2_Full_BIT 57
#define ETHTOOL_LINK_MODE_100000baseSR2_Full_BIT 58
#define ETHTOOL_LINK_MODE_100000baseCR2_Full_BIT 59
#define ETHTOOL_LINK_MODE_100000baseLR2_ER2_FR2_Full_BIT 60
#define ETHTOOL_LINK_MODE_100000baseDR2_Full_BIT 61
#define ETHTOOL_LINK_MODE_200000baseKR4_Full_BIT 62
#define ETHTOOL_LINK_MODE_200000baseSR4_Full_BIT 63
#define ETHTOOL_LINK_MODE_200000baseLR4_ER4_FR4_Full_BIT 64
#define ETHTOOL_LINK_MODE_200000baseDR4_Full_BIT 65
#define ETHTOOL_LINK_MODE_200000baseCR4_Full_BIT 66
#define ETHTOOL_LINK_MODE_100baseT1_Full_BIT 67
#define ETHTOOL_LINK_MODE_1000baseT1_Full_BIT 68
#define ETHTOOL_LINK_MODE_400000baseKR8_Full_BIT 69
#define ETHTOOL_LINK_MODE_400000baseSR8_Full_BIT 70
#define ETHTOOL_LINK_MODE_400000baseLR8_ER8_FR8_Full_BIT 71
#define ETHTOOL_LINK_MODE_400000baseDR8_Full_BIT 72
#define ETHTOOL_LINK_MODE_400000baseCR8_Full_BIT 73
#define ETHTOOL_LINK_MODE_FEC_LLRS_BIT 74
#define ETHTOOL_LINK_MODE_100000baseKR_Full_BIT 75
#define ETHTOOL_LINK_MODE_100000baseSR_Full_BIT 76
#define ETHTOOL_LINK_MODE_100000baseLR_ER_FR_Full_BIT 77
#define ETHTOOL_LINK_MODE_100000baseCR_Full_BIT 78
#define ETHTOOL_LINK_MODE_100000baseDR_Full_BIT 79
#define ETHTOOL_LINK_MODE_200000baseKR2_Full_BIT 80
#define ETHTOOL_LINK_MODE_200000baseSR2_Full_BIT 81
#define ETHTOOL_LINK_MODE_200000baseLR2_ER2_FR2_Full_BIT 82
#define ETHTOOL_LINK_MODE_200000baseDR2_Full_BIT 83
#define ETHTOOL_LINK_MODE_200000baseCR2_Full_BIT 84
#define ETHTOOL_LINK_MODE_400000baseKR4_Full_BIT 85
#define ETHTOOL_LINK_MODE_400000baseSR4_Full_BIT 86
#define ETHTOOL_LINK_MODE_400000baseLR4_ER4_FR4_Full_BIT 87
#define ETHTOOL_LINK_MODE_400000baseDR4_Full_BIT 88
#define ETHTOOL_LINK_MODE_400000baseCR4_Full_BIT 89
#define ETHTOOL_LINK_MODE_100baseFX_Half_BIT 90
#define ETHTOOL_LINK_MODE_100baseFX_Full_BIT 91
#else /* !HAVE_ETHTOOL_LINK_KSETTINGS */
/* ethtool_link_mode_bit_indices enum doesn't exist, define it */
enum ethtool_link_mode_bit_indices {
ETHTOOL_LINK_MODE_10baseT_Half_BIT = 0,
ETHTOOL_LINK_MODE_10baseT_Full_BIT = 1,
ETHTOOL_LINK_MODE_100baseT_Half_BIT = 2,
ETHTOOL_LINK_MODE_100baseT_Full_BIT = 3,
ETHTOOL_LINK_MODE_1000baseT_Half_BIT = 4,
ETHTOOL_LINK_MODE_1000baseT_Full_BIT = 5,
ETHTOOL_LINK_MODE_Autoneg_BIT = 6,
ETHTOOL_LINK_MODE_TP_BIT = 7,
ETHTOOL_LINK_MODE_AUI_BIT = 8,
ETHTOOL_LINK_MODE_MII_BIT = 9,
ETHTOOL_LINK_MODE_FIBRE_BIT = 10,
ETHTOOL_LINK_MODE_BNC_BIT = 11,
ETHTOOL_LINK_MODE_10000baseT_Full_BIT = 12,
ETHTOOL_LINK_MODE_Pause_BIT = 13,
ETHTOOL_LINK_MODE_Asym_Pause_BIT = 14,
ETHTOOL_LINK_MODE_2500baseX_Full_BIT = 15,
ETHTOOL_LINK_MODE_Backplane_BIT = 16,
ETHTOOL_LINK_MODE_1000baseKX_Full_BIT = 17,
ETHTOOL_LINK_MODE_10000baseKX4_Full_BIT = 18,
ETHTOOL_LINK_MODE_10000baseKR_Full_BIT = 19,
ETHTOOL_LINK_MODE_10000baseR_FEC_BIT = 20,
ETHTOOL_LINK_MODE_20000baseMLD2_Full_BIT = 21,
ETHTOOL_LINK_MODE_20000baseKR2_Full_BIT = 22,
ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT = 23,
ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT = 24,
ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT = 25,
ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT = 26,
ETHTOOL_LINK_MODE_56000baseKR4_Full_BIT = 27,
ETHTOOL_LINK_MODE_56000baseCR4_Full_BIT = 28,
ETHTOOL_LINK_MODE_56000baseSR4_Full_BIT = 29,
ETHTOOL_LINK_MODE_56000baseLR4_Full_BIT = 30,
ETHTOOL_LINK_MODE_25000baseCR_Full_BIT = 31,
/* Last allowed bit for __ETHTOOL_LINK_MODE_LEGACY_MASK is bit
* 31. Please do NOT define any SUPPORTED_* or ADVERTISED_*
* macro for bits > 31. The only way to use indices > 31 is to
* use the new ETHTOOL_GLINKSETTINGS/ETHTOOL_SLINKSETTINGS API.
*/
ETHTOOL_LINK_MODE_25000baseKR_Full_BIT = 32,
ETHTOOL_LINK_MODE_25000baseSR_Full_BIT = 33,
ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT = 34,
ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT = 35,
ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT = 36,
ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT = 37,
ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT = 38,
ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT = 39,
ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT = 40,
ETHTOOL_LINK_MODE_1000baseX_Full_BIT = 41,
ETHTOOL_LINK_MODE_10000baseCR_Full_BIT = 42,
ETHTOOL_LINK_MODE_10000baseSR_Full_BIT = 43,
ETHTOOL_LINK_MODE_10000baseLR_Full_BIT = 44,
ETHTOOL_LINK_MODE_10000baseLRM_Full_BIT = 45,
ETHTOOL_LINK_MODE_10000baseER_Full_BIT = 46,
ETHTOOL_LINK_MODE_2500baseT_Full_BIT = 47,
ETHTOOL_LINK_MODE_5000baseT_Full_BIT = 48,
ETHTOOL_LINK_MODE_FEC_NONE_BIT = 49,
ETHTOOL_LINK_MODE_FEC_RS_BIT = 50,
ETHTOOL_LINK_MODE_FEC_BASER_BIT = 51,
ETHTOOL_LINK_MODE_50000baseKR_Full_BIT = 52,
ETHTOOL_LINK_MODE_50000baseSR_Full_BIT = 53,
ETHTOOL_LINK_MODE_50000baseCR_Full_BIT = 54,
ETHTOOL_LINK_MODE_50000baseLR_ER_FR_Full_BIT = 55,
ETHTOOL_LINK_MODE_50000baseDR_Full_BIT = 56,
ETHTOOL_LINK_MODE_100000baseKR2_Full_BIT = 57,
ETHTOOL_LINK_MODE_100000baseSR2_Full_BIT = 58,
ETHTOOL_LINK_MODE_100000baseCR2_Full_BIT = 59,
ETHTOOL_LINK_MODE_100000baseLR2_ER2_FR2_Full_BIT = 60,
ETHTOOL_LINK_MODE_100000baseDR2_Full_BIT = 61,
ETHTOOL_LINK_MODE_200000baseKR4_Full_BIT = 62,
ETHTOOL_LINK_MODE_200000baseSR4_Full_BIT = 63,
ETHTOOL_LINK_MODE_200000baseLR4_ER4_FR4_Full_BIT = 64,
ETHTOOL_LINK_MODE_200000baseDR4_Full_BIT = 65,
ETHTOOL_LINK_MODE_200000baseCR4_Full_BIT = 66,
ETHTOOL_LINK_MODE_100baseT1_Full_BIT = 67,
ETHTOOL_LINK_MODE_1000baseT1_Full_BIT = 68,
ETHTOOL_LINK_MODE_400000baseKR8_Full_BIT = 69,
ETHTOOL_LINK_MODE_400000baseSR8_Full_BIT = 70,
ETHTOOL_LINK_MODE_400000baseLR8_ER8_FR8_Full_BIT = 71,
ETHTOOL_LINK_MODE_400000baseDR8_Full_BIT = 72,
ETHTOOL_LINK_MODE_400000baseCR8_Full_BIT = 73,
ETHTOOL_LINK_MODE_FEC_LLRS_BIT = 74,
ETHTOOL_LINK_MODE_100000baseKR_Full_BIT = 75,
ETHTOOL_LINK_MODE_100000baseSR_Full_BIT = 76,
ETHTOOL_LINK_MODE_100000baseLR_ER_FR_Full_BIT = 77,
ETHTOOL_LINK_MODE_100000baseCR_Full_BIT = 78,
ETHTOOL_LINK_MODE_100000baseDR_Full_BIT = 79,
ETHTOOL_LINK_MODE_200000baseKR2_Full_BIT = 80,
ETHTOOL_LINK_MODE_200000baseSR2_Full_BIT = 81,
ETHTOOL_LINK_MODE_200000baseLR2_ER2_FR2_Full_BIT = 82,
ETHTOOL_LINK_MODE_200000baseDR2_Full_BIT = 83,
ETHTOOL_LINK_MODE_200000baseCR2_Full_BIT = 84,
ETHTOOL_LINK_MODE_400000baseKR4_Full_BIT = 85,
ETHTOOL_LINK_MODE_400000baseSR4_Full_BIT = 86,
ETHTOOL_LINK_MODE_400000baseLR4_ER4_FR4_Full_BIT = 87,
ETHTOOL_LINK_MODE_400000baseDR4_Full_BIT = 88,
ETHTOOL_LINK_MODE_400000baseCR4_Full_BIT = 89,
ETHTOOL_LINK_MODE_100baseFX_Half_BIT = 90,
ETHTOOL_LINK_MODE_100baseFX_Full_BIT = 91,
/* must be last entry */
__ETHTOOL_LINK_MODE_MASK_NBITS
};
#endif

View File

@ -0,0 +1,732 @@
/* Broadcom NetXtreme-C/E network driver.
*
* Copyright (c) 2021-2023 Broadcom Inc.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation.
*/
#include "bnxt_compat.h"
#include "bnxt_hsi.h"
#include "bnxt.h"
#include "bnxt_hwrm.h"
#include "bnxt_coredump.h"
#include "bnxt_log.h"
static int bnxt_dbg_hwrm_log_buffer_flush(struct bnxt *bp, u16 type, u32 flags, u32 *offset)
{
struct hwrm_dbg_log_buffer_flush_output *resp;
struct hwrm_dbg_log_buffer_flush_input *req;
int rc;
rc = hwrm_req_init(bp, req, HWRM_DBG_LOG_BUFFER_FLUSH);
if (rc)
return rc;
req->flags = cpu_to_le32(flags);
req->type = cpu_to_le16(type);
resp = hwrm_req_hold(bp, req);
rc = hwrm_req_send(bp, req);
if (!rc)
*offset = le32_to_cpu(resp->current_buffer_offset);
hwrm_req_drop(bp, req);
return rc;
}
static void bnxt_fill_driver_segment_record(struct bnxt *bp,
struct bnxt_driver_segment_record *driver_seg_record,
struct bnxt_ctx_mem_type *ctxm, u16 type)
{
struct bnxt_bs_trace_info *bs_trace = &bp->bs_trace[type];
u32 offset = 0;
int rc = 0;
rc = bnxt_dbg_hwrm_log_buffer_flush(bp, type, 0, &offset);
if (rc)
return;
bnxt_bs_trace_check_wrapping(bs_trace, offset);
driver_seg_record->max_entries = cpu_to_le32(ctxm->max_entries);
driver_seg_record->entry_size = cpu_to_le32(ctxm->entry_size);
driver_seg_record->offset = cpu_to_le32(bs_trace->last_offset);
driver_seg_record->wrapped = bs_trace->wrapped;
}
static void bnxt_retrieve_driver_coredump(struct bnxt *bp, u16 type, u32 *seg_len,
void *buf, u32 offset)
{
struct bnxt_driver_segment_record driver_seg_record = {0};
u32 dump_len, data_offset, record_len, record_offset;
struct bnxt_ctx_mem_info *ctx = bp->ctx;
struct bnxt_ctx_pg_info *ctx_pg;
struct bnxt_ring_mem_info *rmem;
struct bnxt_ctx_mem_type *ctxm;
int k, n = 1;
ctxm = &ctx->ctx_arr[type];
dump_len = 0;
record_len = sizeof(struct bnxt_driver_segment_record);
record_offset = offset;
data_offset = record_offset + sizeof(struct bnxt_driver_segment_record);
bnxt_fill_driver_segment_record(bp, &driver_seg_record, ctxm, type - BNXT_CTX_SRT_TRACE);
ctx_pg = ctxm->pg_info;
if (ctxm->instance_bmap)
n = hweight32(ctxm->instance_bmap);
for (k = 0; k < n ; k++) {
struct bnxt_ctx_pg_info *ctx_pg_block = &ctx_pg[k];
int nr_tbls, i, j;
rmem = &ctx_pg_block->ring_mem;
if (rmem->depth > 1) {
nr_tbls = DIV_ROUND_UP(ctx_pg_block->nr_pages, MAX_CTX_PAGES);
for (i = 0; i < nr_tbls; i++) {
struct bnxt_ctx_pg_info *pg_tbl;
struct bnxt_ring_mem_info *rmem_pde;
pg_tbl = ctx_pg_block->ctx_pg_tbl[i];
rmem_pde = &pg_tbl->ring_mem;
if (i == (nr_tbls - 1)) {
int rem = ctx_pg_block->nr_pages % MAX_CTX_PAGES;
if (rem)
rmem_pde->nr_pages = rem;
}
for (j = 0; j < rmem_pde->nr_pages; j++) {
memcpy(buf + data_offset, rmem_pde->pg_arr[j],
BNXT_PAGE_SIZE);
dump_len += BNXT_PAGE_SIZE;
data_offset += BNXT_PAGE_SIZE;
}
}
} else {
for (i = 0; i < ctx_pg_block->nr_pages; i++) {
memcpy(buf + data_offset, rmem->pg_arr[i], BNXT_PAGE_SIZE);
dump_len += BNXT_PAGE_SIZE;
data_offset += BNXT_PAGE_SIZE;
}
}
memcpy(buf + record_offset, &driver_seg_record, record_len);
*seg_len = dump_len + record_len;
}
}
static int bnxt_hwrm_dbg_dma_data(struct bnxt *bp, void *msg,
struct bnxt_hwrm_dbg_dma_info *info)
{
struct hwrm_dbg_cmn_input *cmn_req = msg;
__le16 *seq_ptr = msg + info->seq_off;
struct hwrm_dbg_cmn_output *cmn_resp;
u16 seq = 0, len, segs_off;
dma_addr_t dma_handle;
void *dma_buf, *resp;
int rc, off = 0;
dma_buf = hwrm_req_dma_slice(bp, msg, info->dma_len, &dma_handle);
if (!dma_buf) {
hwrm_req_drop(bp, msg);
return -ENOMEM;
}
hwrm_req_timeout(bp, msg, HWRM_COREDUMP_TIMEOUT);
cmn_resp = hwrm_req_hold(bp, msg);
resp = cmn_resp;
segs_off = offsetof(struct hwrm_dbg_coredump_list_output, total_segments);
cmn_req->host_dest_addr = cpu_to_le64(dma_handle);
cmn_req->host_buf_len = cpu_to_le32(info->dma_len);
while (1) {
*seq_ptr = cpu_to_le16(seq);
rc = hwrm_req_send(bp, msg);
if (rc)
break;
len = le16_to_cpu(*((__le16 *)(resp + info->data_len_off)));
if (!seq &&
cmn_req->req_type == cpu_to_le16(HWRM_DBG_COREDUMP_LIST)) {
info->segs = le16_to_cpu(*((__le16 *)(resp + segs_off)));
if (!info->segs) {
rc = -EIO;
break;
}
info->dest_buf_size = info->segs *
sizeof(struct coredump_segment_record);
info->dest_buf = kmalloc(info->dest_buf_size, GFP_KERNEL);
if (!info->dest_buf) {
rc = -ENOMEM;
break;
}
}
if (info->dest_buf) {
if ((info->seg_start + off + len) <=
BNXT_COREDUMP_BUF_LEN(info->buf_len)) {
memcpy(info->dest_buf + off, dma_buf, len);
} else {
rc = -ENOBUFS;
break;
}
}
if (cmn_req->req_type == cpu_to_le16(HWRM_DBG_COREDUMP_RETRIEVE))
info->dest_buf_size += len;
if (!(cmn_resp->flags & HWRM_DBG_CMN_FLAGS_MORE))
break;
seq++;
off += len;
}
hwrm_req_drop(bp, msg);
return rc;
}
static int bnxt_hwrm_dbg_coredump_list(struct bnxt *bp,
struct bnxt_coredump *coredump)
{
struct bnxt_hwrm_dbg_dma_info info = {NULL};
struct hwrm_dbg_coredump_list_input *req;
int rc;
rc = hwrm_req_init(bp, req, HWRM_DBG_COREDUMP_LIST);
if (rc)
return rc;
info.dma_len = COREDUMP_LIST_BUF_LEN;
info.seq_off = offsetof(struct hwrm_dbg_coredump_list_input, seq_no);
info.data_len_off = offsetof(struct hwrm_dbg_coredump_list_output,
data_len);
rc = bnxt_hwrm_dbg_dma_data(bp, req, &info);
if (!rc) {
coredump->data = info.dest_buf;
coredump->data_size = info.dest_buf_size;
coredump->total_segs = info.segs;
}
return rc;
}
static int bnxt_hwrm_dbg_coredump_initiate(struct bnxt *bp, u16 component_id,
u16 segment_id)
{
struct hwrm_dbg_coredump_initiate_input *req;
int rc;
rc = hwrm_req_init(bp, req, HWRM_DBG_COREDUMP_INITIATE);
if (rc)
return rc;
hwrm_req_timeout(bp, req, HWRM_COREDUMP_TIMEOUT);
req->component_id = cpu_to_le16(component_id);
req->segment_id = cpu_to_le16(segment_id);
return hwrm_req_send(bp, req);
}
static int bnxt_hwrm_dbg_coredump_retrieve(struct bnxt *bp, u16 component_id,
u16 segment_id, u32 *seg_len,
void *buf, u32 buf_len, u32 offset)
{
struct hwrm_dbg_coredump_retrieve_input *req;
struct bnxt_hwrm_dbg_dma_info info = {NULL};
int rc;
rc = hwrm_req_init(bp, req, HWRM_DBG_COREDUMP_RETRIEVE);
if (rc)
return rc;
req->component_id = cpu_to_le16(component_id);
req->segment_id = cpu_to_le16(segment_id);
info.dma_len = COREDUMP_RETRIEVE_BUF_LEN;
info.seq_off = offsetof(struct hwrm_dbg_coredump_retrieve_input,
seq_no);
info.data_len_off = offsetof(struct hwrm_dbg_coredump_retrieve_output,
data_len);
if (buf) {
info.dest_buf = buf + offset;
info.buf_len = buf_len;
info.seg_start = offset;
}
rc = bnxt_hwrm_dbg_dma_data(bp, req, &info);
if (!rc)
*seg_len = info.dest_buf_size;
return rc;
}
void
bnxt_fill_coredump_seg_hdr(struct bnxt *bp,
struct bnxt_coredump_segment_hdr *seg_hdr,
struct coredump_segment_record *seg_rec, u32 seg_len,
int status, u32 duration, u32 instance, u32 comp_id,
u32 seg_id)
{
memset(seg_hdr, 0, sizeof(*seg_hdr));
memcpy(seg_hdr->signature, "sEgM", 4);
if (seg_rec) {
seg_hdr->component_id = (__force __le32)seg_rec->component_id;
seg_hdr->segment_id = (__force __le32)seg_rec->segment_id;
seg_hdr->low_version = seg_rec->version_low;
seg_hdr->high_version = seg_rec->version_hi;
seg_hdr->flags = seg_rec->compress_flags;
} else {
seg_hdr->component_id = cpu_to_le32(comp_id);
seg_hdr->segment_id = cpu_to_le32(seg_id);
}
seg_hdr->function_id = cpu_to_le16(bp->pdev->devfn);
seg_hdr->length = cpu_to_le32(seg_len);
seg_hdr->status = cpu_to_le32(status);
seg_hdr->duration = cpu_to_le32(duration);
seg_hdr->data_offset = cpu_to_le32(sizeof(*seg_hdr));
seg_hdr->instance = cpu_to_le32(instance);
}
struct bnxt_time bnxt_get_current_time(struct bnxt *bp)
{
struct bnxt_time time;
#if defined(HAVE_TIME64)
time64_to_tm(ktime_get_real_seconds(), -sys_tz.tz_minuteswest * 60, &time.tm);
#else
struct timeval tv;
do_gettimeofday(&tv);
time_to_tm(tv.tv_sec, -sys_tz.tz_minuteswest * 60, &time.tm);
#endif
time.tm.tm_mon += 1;
time.tm.tm_year += 1900;
return time;
}
static void bnxt_fill_cmdline(struct bnxt_coredump_record *record)
{
struct mm_struct *mm = current->mm;
if (mm) {
unsigned long len = mm->arg_end - mm->arg_start;
int i, last = 0;
len = min(len, sizeof(record->commandline) - 1);
if (len && !copy_from_user(record->commandline,
(char __user *) mm->arg_start, len)) {
for (i = 0; i < len; i++) {
if (record->commandline[i])
last = i;
else
record->commandline[i] = ' ';
}
record->commandline[last + 1] = 0;
return;
}
}
strscpy(record->commandline, current->comm, TASK_COMM_LEN);
}
void bnxt_fill_empty_seg(struct bnxt *bp, void *buf, u32 len)
{
struct bnxt_coredump_segment_hdr seg_hdr;
bnxt_fill_coredump_seg_hdr(bp, &seg_hdr, NULL, len, 0, 0, 0, 0, 0);
memcpy(buf, &seg_hdr, sizeof(seg_hdr));
}
void
bnxt_fill_coredump_record(struct bnxt *bp, struct bnxt_coredump_record *record,
struct bnxt_time start, s16 start_utc, u16 total_segs,
int status)
{
struct bnxt_time end = bnxt_get_current_time(bp);
u32 os_ver_major = 0, os_ver_minor = 0;
memset(record, 0, sizeof(*record));
memcpy(record->signature, "cOrE", 4);
record->flags = 0;
record->low_version = 0;
record->high_version = 1;
record->asic_state = 0;
strscpy(record->system_name, utsname()->nodename,
sizeof(record->system_name));
record->year = cpu_to_le16(start.tm.tm_year);
record->month = cpu_to_le16(start.tm.tm_mon);
record->day = cpu_to_le16(start.tm.tm_mday);
record->hour = cpu_to_le16(start.tm.tm_hour);
record->minute = cpu_to_le16(start.tm.tm_min);
record->second = cpu_to_le16(start.tm.tm_sec);
record->utc_bias = cpu_to_le16(start_utc);
bnxt_fill_cmdline(record);
record->total_segments = cpu_to_le32(total_segs);
if (sscanf(utsname()->release, "%u.%u", &os_ver_major, &os_ver_minor) != 2)
netdev_warn(bp->dev, "Unknown OS release in coredump\n");
record->os_ver_major = cpu_to_le32(os_ver_major);
record->os_ver_minor = cpu_to_le32(os_ver_minor);
strscpy(record->os_name, utsname()->sysname, sizeof(record->os_name));
record->end_year = cpu_to_le16(end.tm.tm_year);
record->end_month = cpu_to_le16(end.tm.tm_mon);
record->end_day = cpu_to_le16(end.tm.tm_mday);
record->end_hour = cpu_to_le16(end.tm.tm_hour);
record->end_minute = cpu_to_le16(end.tm.tm_min);
record->end_second = cpu_to_le16(end.tm.tm_sec);
record->end_utc_bias = cpu_to_le16(sys_tz.tz_minuteswest);
record->asic_id1 = cpu_to_le32(bp->chip_num << 16 |
bp->ver_resp.chip_rev << 8 |
bp->ver_resp.chip_metal);
record->asic_id2 = 0;
record->coredump_status = cpu_to_le32(status);
record->ioctl_low_version = 0;
record->ioctl_high_version = 0;
}
static int __bnxt_get_coredump(struct bnxt *bp, void *buf, u32 *dump_len)
{
u32 offset = 0, seg_hdr_len, seg_record_len = 0, buf_len = 0;
u32 ver_get_resp_len = sizeof(struct hwrm_ver_get_output);
struct coredump_segment_record *seg_record = NULL;
u32 driver_comp_id = DRV_COREDUMP_COMP_ID;
struct bnxt_coredump_segment_hdr seg_hdr;
struct bnxt_ctx_mem_info *ctx = bp->ctx;
struct bnxt_coredump coredump = {NULL};
int rc = 0, i, type, drv_seg_count = 0;
u32 driver_seg_id = DRV_SEG_SRT_TRACE;
struct bnxt_ctx_mem_type *ctxm;
struct bnxt_time start_time;
u32 null_seg_len;
s16 start_utc;
if (buf)
buf_len = *dump_len;
start_time = bnxt_get_current_time(bp);
start_utc = sys_tz.tz_minuteswest;
seg_hdr_len = sizeof(seg_hdr);
/* First segment should be hwrm_ver_get response.
* For hwrm_ver_get response Component id = 2 and Segment id = 0
*/
*dump_len = seg_hdr_len + ver_get_resp_len;
if (buf) {
bnxt_fill_coredump_seg_hdr(bp, &seg_hdr, NULL, ver_get_resp_len,
0, 0, 0, 2, 0);
memcpy(buf + offset, &seg_hdr, seg_hdr_len);
offset += seg_hdr_len;
memcpy(buf + offset, &bp->ver_resp, ver_get_resp_len);
offset += ver_get_resp_len;
}
rc = bnxt_hwrm_dbg_coredump_list(bp, &coredump);
if (rc) {
netdev_err(bp->dev, "Failed to get coredump segment list\n");
goto fw_coredump_err;
}
*dump_len += seg_hdr_len * coredump.total_segs;
seg_record = (struct coredump_segment_record *)coredump.data;
seg_record_len = sizeof(*seg_record);
for (i = 0; i < coredump.total_segs; i++) {
u16 comp_id = le16_to_cpu(seg_record->component_id);
u16 seg_id = le16_to_cpu(seg_record->segment_id);
u32 duration = 0, seg_len = 0;
unsigned long start, end;
if (buf && ((offset + seg_hdr_len) > BNXT_COREDUMP_BUF_LEN(buf_len))) {
rc = -ENOBUFS;
goto fw_coredump_err;
}
start = jiffies;
rc = bnxt_hwrm_dbg_coredump_initiate(bp, comp_id, seg_id);
if (rc) {
netdev_err(bp->dev,
"Failed to initiate coredump for seg = %d\n",
seg_record->segment_id);
goto next_seg;
}
/* Write segment data into the buffer */
rc = bnxt_hwrm_dbg_coredump_retrieve(bp, comp_id, seg_id,
&seg_len, buf, buf_len,
offset + seg_hdr_len);
if (rc && rc == -ENOBUFS)
goto fw_coredump_err;
else if (rc)
netdev_err(bp->dev,
"Failed to retrieve coredump for seg = %d\n",
seg_record->segment_id);
next_seg:
end = jiffies;
duration = jiffies_to_msecs(end - start);
bnxt_fill_coredump_seg_hdr(bp, &seg_hdr, seg_record, seg_len,
rc, duration, 0, 0, 0);
if (buf) {
/* Write segment header into the buffer */
memcpy(buf + offset, &seg_hdr, seg_hdr_len);
offset += seg_hdr_len + seg_len;
}
*dump_len += seg_len;
seg_record =
(struct coredump_segment_record *)((u8 *)seg_record +
seg_record_len);
}
fw_coredump_err:
if (!ctx)
goto skip_drv_coredump;
for (type = BNXT_CTX_SRT_TRACE; type <= BNXT_CTX_ROCE_HWRM_TRACE;
type++, driver_seg_id++) {
u32 duration = 0, seg_len = 0;
unsigned long start, end;
ctxm = &ctx->ctx_arr[type];
if (!buf || !(ctxm->flags & BNXT_CTX_MEM_TYPE_VALID))
continue;
*dump_len += seg_hdr_len;
start = jiffies;
bnxt_retrieve_driver_coredump(bp, type, &seg_len, buf, offset + seg_hdr_len);
end = jiffies;
duration = jiffies_to_msecs(end - start);
bnxt_fill_coredump_seg_hdr(bp, &seg_hdr, NULL, seg_len,
rc, duration, 0, driver_comp_id, driver_seg_id);
/* Write segment header into the buffer */
memcpy(buf + offset, &seg_hdr, seg_hdr_len);
offset += seg_hdr_len + seg_len;
*dump_len += seg_len;
seg_record = (struct coredump_segment_record *)((u8 *)seg_record + seg_record_len);
drv_seg_count++;
}
skip_drv_coredump:
null_seg_len = BNXT_COREDUMP_BUF_LEN(buf_len) - *dump_len;
if (buf) {
bnxt_fill_empty_seg(bp, buf + offset, null_seg_len);
/* Fix the coredump record at last 1024 bytes */
offset = buf_len - sizeof(struct bnxt_coredump_record);
bnxt_fill_coredump_record(bp, buf + offset, start_time, start_utc,
coredump.total_segs + drv_seg_count + 2, rc);
}
kfree(coredump.data);
*dump_len += sizeof(struct bnxt_coredump_record) + seg_hdr_len + null_seg_len;
if (rc == -ENOBUFS)
netdev_err(bp->dev, "Firmware returned large coredump buffer\n");
return rc;
}
static u32 bnxt_copy_crash_data(struct bnxt_ring_mem_info *rmem, void *buf,
u32 dump_len)
{
u32 data_copied = 0;
u32 data_len;
int i;
for (i = 0; i < rmem->nr_pages; i++) {
data_len = rmem->page_size;
if (data_copied + data_len > dump_len)
data_len = dump_len - data_copied;
memcpy(buf + data_copied, rmem->pg_arr[i], data_len);
data_copied += data_len;
if (data_copied >= dump_len)
break;
}
return data_copied;
}
static int bnxt_copy_crash_dump(struct bnxt *bp, void *buf, u32 dump_len)
{
struct bnxt_ring_mem_info *rmem;
u32 offset = 0;
if (!bp->fw_crash_mem)
return -EEXIST;
rmem = &bp->fw_crash_mem->ring_mem;
if (rmem->depth > 1) {
int i;
for (i = 0; i < rmem->nr_pages; i++) {
struct bnxt_ctx_pg_info *pg_tbl;
pg_tbl = bp->fw_crash_mem->ctx_pg_tbl[i];
offset += bnxt_copy_crash_data(&pg_tbl->ring_mem,
buf + offset, dump_len - offset);
if (offset >= dump_len)
break;
}
} else {
bnxt_copy_crash_data(rmem, buf, dump_len);
}
return 0;
}
static bool bnxt_crash_dump_avail(struct bnxt *bp)
{
u32 sig = 0;
/* First 4 bytes(signature) of crash dump is always non-zero */
bnxt_copy_crash_dump(bp, &sig, sizeof(u32));
if (!sig)
return false;
return true;
}
int bnxt_get_coredump(struct bnxt *bp, u16 dump_type, void *buf, u32 *dump_len)
{
if (dump_type >= BNXT_DUMP_DRIVER) {
bnxt_start_logging_coredump(bp, buf, dump_len, dump_type);
return 0;
}
if (dump_type == BNXT_DUMP_CRASH) {
if (bp->fw_dbg_cap & BNXT_FW_DBG_CAP_CRASHDUMP_HOST)
return bnxt_copy_crash_dump(bp, buf, *dump_len);
#ifdef CONFIG_TEE_BNXT_FW
else if (bp->fw_dbg_cap & BNXT_FW_DBG_CAP_CRASHDUMP_SOC)
return tee_bnxt_copy_coredump(buf, 0, *dump_len);
#endif
else
return -EOPNOTSUPP;
} else {
return __bnxt_get_coredump(bp, buf, dump_len);
}
}
static void bnxt_get_bs_trace_size(struct bnxt *bp, u8 *segments, u32 *seg_len)
{
struct bnxt_ctx_mem_info *ctx = bp->ctx;
struct bnxt_ctx_pg_info *ctx_pg;
struct bnxt_ctx_mem_type *ctxm;
int k, n = 1;
u16 type;
if (!ctx)
return;
for (type = BNXT_CTX_SRT_TRACE; type <= BNXT_CTX_ROCE_HWRM_TRACE; type++) {
ctxm = &ctx->ctx_arr[type];
if (!(ctxm->flags & BNXT_CTX_MEM_TYPE_VALID))
continue;
ctx_pg = ctxm->pg_info;
if (ctxm->instance_bmap)
n = hweight32(ctxm->instance_bmap);
for (k = 0; k < n ; k++)
*seg_len += ctx_pg[k].nr_pages * BNXT_PAGE_SIZE;
*segments = *segments + 1;
}
}
static void bnxt_append_driver_coredump_len(struct bnxt *bp, u32 *len)
{
u8 segments = 0;
u32 size = 0;
int hdr_len;
bnxt_get_bs_trace_size(bp, &segments, &size);
if (size) {
hdr_len = segments * sizeof(struct bnxt_driver_segment_record);
*len += size + hdr_len;
}
}
int bnxt_hwrm_get_dump_len(struct bnxt *bp, u16 dump_type, u32 *dump_len)
{
struct hwrm_dbg_qcfg_output *resp;
struct hwrm_dbg_qcfg_input *req;
int rc, hdr_len = 0;
if (dump_type >= BNXT_DUMP_DRIVER) {
hdr_len = 2 * sizeof(struct bnxt_coredump_segment_hdr) +
sizeof(struct hwrm_ver_get_output) +
sizeof(struct bnxt_coredump_record);
*dump_len = bnxt_get_loggers_coredump_size(bp, dump_type);
*dump_len = *dump_len + hdr_len;
return 0;
}
if (!(bp->fw_cap & BNXT_FW_CAP_DBG_QCAPS))
return -EOPNOTSUPP;
if (dump_type == BNXT_DUMP_CRASH &&
!(bp->fw_dbg_cap & BNXT_FW_DBG_CAP_CRASHDUMP_SOC ||
(bp->fw_dbg_cap & BNXT_FW_DBG_CAP_CRASHDUMP_HOST)))
return -EOPNOTSUPP;
rc = hwrm_req_init(bp, req, HWRM_DBG_QCFG);
if (rc)
return rc;
req->fid = cpu_to_le16(0xffff);
if (dump_type == BNXT_DUMP_CRASH) {
if (bp->fw_dbg_cap & BNXT_FW_DBG_CAP_CRASHDUMP_SOC)
req->flags = cpu_to_le16(BNXT_DBG_FL_CR_DUMP_SIZE_SOC);
else
req->flags = cpu_to_le16(BNXT_DBG_FL_CR_DUMP_SIZE_HOST);
}
resp = hwrm_req_hold(bp, req);
rc = hwrm_req_send(bp, req);
if (rc)
goto get_dump_len_exit;
if (dump_type == BNXT_DUMP_CRASH) {
if (bp->fw_dbg_cap & BNXT_FW_DBG_CAP_CRASHDUMP_SOC)
*dump_len = BNXT_CRASH_DUMP_LEN;
else
*dump_len = le32_to_cpu(resp->crashdump_size);
} else {
/* Driver adds coredump headers for "HWRM_VER_GET response"
* and null segments additionally to coredump.
*/
hdr_len = 2 * sizeof(struct bnxt_coredump_segment_hdr) +
sizeof(struct hwrm_ver_get_output) +
sizeof(struct bnxt_coredump_record);
*dump_len = le32_to_cpu(resp->coredump_size) + hdr_len;
}
if (*dump_len <= hdr_len)
rc = -EINVAL;
get_dump_len_exit:
hwrm_req_drop(bp, req);
return rc;
}
u32 bnxt_get_coredump_length(struct bnxt *bp, u16 dump_type)
{
u32 len = 0;
if (dump_type == BNXT_DUMP_CRASH &&
bp->fw_dbg_cap & BNXT_FW_DBG_CAP_CRASHDUMP_HOST &&
bp->fw_crash_mem) {
if (!bnxt_crash_dump_avail(bp))
return 0;
return bp->fw_crash_len;
}
if (bnxt_hwrm_get_dump_len(bp, dump_type, &len)) {
if (dump_type == BNXT_DUMP_LIVE)
__bnxt_get_coredump(bp, NULL, &len);
}
if (dump_type == BNXT_DUMP_LIVE)
bnxt_append_driver_coredump_len(bp, &len);
return len;
}

View File

@ -0,0 +1,159 @@
/* Broadcom NetXtreme-C/E network driver.
*
* Copyright (c) 2018 Broadcom Limited
* Copyright (c) 2018-2022 Broadcom Inc.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation.
*/
#ifndef BNXT_COREDUMP_H
#define BNXT_COREDUMP_H
#include <linux/utsname.h>
#include <linux/time.h>
#include <linux/rtc.h>
struct bnxt_coredump_segment_hdr {
__u8 signature[4];
__le32 component_id;
__le32 segment_id;
__le32 flags;
__u8 low_version;
__u8 high_version;
__le16 function_id;
__le32 offset;
__le32 length;
__le32 status;
__le32 duration;
__le32 data_offset;
__le32 instance;
__le32 rsvd[5];
};
struct bnxt_coredump_record {
__u8 signature[4];
__le32 flags;
__u8 low_version;
__u8 high_version;
__u8 asic_state;
__u8 rsvd0[5];
char system_name[32];
__le16 year;
__le16 month;
__le16 day;
__le16 hour;
__le16 minute;
__le16 second;
__le16 utc_bias;
__le16 rsvd1;
char commandline[256];
__le32 total_segments;
__le32 os_ver_major;
__le32 os_ver_minor;
__le32 rsvd2;
char os_name[32];
__le16 end_year;
__le16 end_month;
__le16 end_day;
__le16 end_hour;
__le16 end_minute;
__le16 end_second;
__le16 end_utc_bias;
__le32 asic_id1;
__le32 asic_id2;
__le32 coredump_status;
__u8 ioctl_low_version;
__u8 ioctl_high_version;
__le16 rsvd3[313];
};
struct bnxt_driver_segment_record {
__le32 max_entries;
__le32 entry_size;
__le32 offset;
__u8 wrapped:1;
__u8 unused[3];
};
#define DRV_COREDUMP_COMP_ID 0xD
#define DRV_SEG_SRT_TRACE 1
#define DRV_SEG_SRT2_TRACE 2
#define DRV_SEG_CRT_TRACE 3
#define DRV_SEG_CRT2_TRACE 4
#define DRV_SEG_RIGP0_TRACE 5
#define DRV_SEG_LOG_HWRM_L2_TRACE 6
#define DRV_SEG_LOG_HWRM_ROCE_TRACE 7
#define BNXT_CRASH_DUMP_LEN (8 << 20)
#define COREDUMP_LIST_BUF_LEN 2048
#define COREDUMP_RETRIEVE_BUF_LEN 4096
struct bnxt_coredump {
void *data;
int data_size;
u16 total_segs;
};
struct bnxt_time {
struct tm tm;
};
#define BNXT_COREDUMP_BUF_LEN(len) ((len) - sizeof(struct bnxt_coredump_record) - \
sizeof(struct bnxt_coredump_segment_hdr))
struct bnxt_hwrm_dbg_dma_info {
void *dest_buf;
int dest_buf_size;
u16 dma_len;
u16 seq_off;
u16 data_len_off;
u16 segs;
u32 seg_start;
u32 buf_len;
};
struct hwrm_dbg_cmn_input {
__le16 req_type;
__le16 cmpl_ring;
__le16 seq_id;
__le16 target_id;
__le64 resp_addr;
__le64 host_dest_addr;
__le32 host_buf_len;
};
struct hwrm_dbg_cmn_output {
__le16 error_code;
__le16 req_type;
__le16 seq_id;
__le16 resp_len;
u8 flags;
#define HWRM_DBG_CMN_FLAGS_MORE 1
};
#define BNXT_DBG_FL_CR_DUMP_SIZE_SOC \
(DBG_QCFG_REQ_FLAGS_CRASHDUMP_SIZE_FOR_DEST_DEST_SOC_DDR)
#define BNXT_DBG_FL_CR_DUMP_SIZE_HOST \
(DBG_QCFG_REQ_FLAGS_CRASHDUMP_SIZE_FOR_DEST_DEST_HOST_DDR)
#define BNXT_DBG_CR_DUMP_MDM_CFG_DDR \
(DBG_CRASHDUMP_MEDIUM_CFG_REQ_TYPE_DDR)
u32 bnxt_get_coredump_length(struct bnxt *bp, u16 dump_type);
int bnxt_hwrm_get_dump_len(struct bnxt *bp, u16 dump_type, u32 *dump_len);
int bnxt_get_coredump(struct bnxt *bp, u16 dump_type, void *buf, u32 *dump_len);
void bnxt_fill_coredump_seg_hdr(struct bnxt *bp,
struct bnxt_coredump_segment_hdr *seg_hdr,
struct coredump_segment_record *seg_rec,
u32 seg_len, int status, u32 duration,
u32 instance, u32 comp_id, u32 seg_id);
struct bnxt_time bnxt_get_current_time(struct bnxt *bp);
void bnxt_fill_empty_seg(struct bnxt *bp, void *buf, u32 len);
void
bnxt_fill_coredump_record(struct bnxt *bp, struct bnxt_coredump_record *record,
struct bnxt_time start, s16 start_utc, u16 total_segs,
int status);
#endif

View File

@ -0,0 +1,119 @@
/* Broadcom NetXtreme-C/E network driver.
*
* Copyright (c) 2014-2016 Broadcom Corporation
* Copyright (c) 2016-2018 Broadcom Limited
* Copyright (c) 2018-2022 Broadcom Inc.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation.
*/
#ifndef BNXT_DBR_H
#define BNXT_DBR_H
#include <linux/delay.h>
/* 32-bit XORSHIFT generator. Seed must not be zero. */
static inline u32 xorshift(u32 *state)
{
u32 seed = *state;
seed ^= seed << 13;
seed ^= seed >> 17;
seed ^= seed << 5;
*state = seed;
return seed;
}
static inline u16 rnd(u32 *state, uint16_t range)
{
/* range must be a power of 2 - 1 */
return (xorshift(state) & range);
}
#define BNXT_DB_FIFO_ROOM_MASK 0x1fff8000
#define BNXT_DB_FIFO_ROOM_SHIFT 15
#define BNXT_MAX_FIFO_DEPTH 0x2c00
#define BNXT_DB_PACING_ALGO_THRESHOLD 250
#define BNXT_DEFAULT_PACING_PROBABILITY 0xFFFF
#define BNXT_DBR_PACING_WIN_BASE 0x2000
#define BNXT_DBR_PACING_WIN_MAP_OFF 4
#define BNXT_DBR_PACING_WIN_OFF(reg) (BNXT_DBR_PACING_WIN_BASE + \
((reg) & BNXT_GRC_OFFSET_MASK))
struct bnxt_dbr_sw_stats {
u32 nr_dbr;
u64 total_dbr_us;
u64 avg_dbr_us;
u64 max_dbr_us;
u64 min_dbr_us;
};
struct bnxt_dbr_debug {
u32 recover_interval_ms;
u32 drop_ratio;
u32 drop_cnt;
u8 recover_enable;
u8 drop_enable;
};
struct bnxt_dbr {
u8 enable;
u8 pacing_enable;
atomic_t event_cnt;
/* dedicated workqueue for DB recovery DRA */
struct workqueue_struct *wq;
struct delayed_work dwork;
struct mutex lock; /* protect this data struct */
u32 curr_epoch;
u32 last_l2_epoch;
u32 last_roce_epoch;
u32 last_completed_epoch;
u32 stat_db_fifo_reg;
u32 db_fifo_reg_off;
struct bnxt_dbr_sw_stats sw_stats;
struct bnxt_dbr_debug debug;
};
static inline int __get_fifo_occupancy(void __iomem *bar0, u32 db_fifo_reg_off)
{
u32 val;
val = readl(bar0 + db_fifo_reg_off);
return BNXT_MAX_FIFO_DEPTH -
((val & BNXT_DB_FIFO_ROOM_MASK) >>
BNXT_DB_FIFO_ROOM_SHIFT);
}
/* Caller make sure that the pacing is enabled or not */
static inline void bnxt_do_pacing(void __iomem *bar0, struct bnxt_dbr *dbr,
u32 *seed, u32 pacing_th, u32 pacing_prob)
{
u32 pace_time = 1;
u32 retry = 10;
if (!dbr->pacing_enable)
return;
if (rnd(seed, 0xFFFF) < pacing_prob) {
while (__get_fifo_occupancy(bar0, dbr->db_fifo_reg_off) > pacing_th &&
retry--) {
u32 us_delay;
us_delay = rnd(seed, pace_time - 1);
if (us_delay)
udelay(us_delay);
/* pacing delay time capped at 128 us */
pace_time = min_t(u16, pace_time * 2, 128);
}
}
}
#endif

View File

@ -0,0 +1,938 @@
/* Broadcom NetXtreme-C/E network driver.
*
* Copyright (c) 2014-2016 Broadcom Corporation
* Copyright (c) 2016-2018 Broadcom Limited
* Copyright (c) 2018-2023 Broadcom Inc.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation.
*/
#include <linux/netdevice.h>
#include <linux/types.h>
#include <linux/errno.h>
#include <linux/rtnetlink.h>
#include <linux/interrupt.h>
#include <linux/pci.h>
#include <linux/etherdevice.h>
#include <linux/log2.h>
#include "bnxt_compat.h"
#include "bnxt_hsi.h"
#include "bnxt.h"
#include "bnxt_hwrm.h"
#include "bnxt_dcb.h"
#ifdef CONFIG_BNXT_DCB
static int bnxt_tx_queue_to_tc(struct bnxt *bp, u8 queue_id)
{
int i, j;
for (i = 0; i < bp->max_tc; i++) {
if (bp->tx_q_info[i].queue_id == queue_id) {
for (j = 0; j < bp->max_tc; j++) {
if (bp->tc_to_qidx[j] == i)
return j;
}
}
}
return -EINVAL;
}
static int bnxt_hwrm_queue_pri2cos_cfg(struct bnxt *bp,
struct ieee_ets *ets,
u32 path_dir)
{
struct hwrm_queue_pri2cos_cfg_input *req;
struct bnxt_queue_info *q_info;
u8 *pri2cos;
int rc, i;
rc = hwrm_req_init(bp, req, HWRM_QUEUE_PRI2COS_CFG);
if (rc)
return rc;
req->flags = cpu_to_le32(path_dir | QUEUE_PRI2COS_CFG_REQ_FLAGS_IVLAN);
if (path_dir == QUEUE_PRI2COS_CFG_REQ_FLAGS_PATH_BIDIR ||
path_dir == QUEUE_PRI2COS_CFG_REQ_FLAGS_PATH_TX)
q_info = bp->tx_q_info;
else
q_info = bp->rx_q_info;
pri2cos = &req->pri0_cos_queue_id;
for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
u8 qidx;
req->enables |= cpu_to_le32(QUEUE_PRI2COS_CFG_REQ_ENABLES_PRI0_COS_QUEUE_ID << i);
qidx = bp->tc_to_qidx[ets->prio_tc[i]];
pri2cos[i] = q_info[qidx].queue_id;
}
return hwrm_req_send(bp, req);
}
static int bnxt_hwrm_queue_pri2cos_qcfg(struct bnxt *bp, struct ieee_ets *ets)
{
struct hwrm_queue_pri2cos_qcfg_output *resp;
struct hwrm_queue_pri2cos_qcfg_input *req;
int rc;
rc = hwrm_req_init(bp, req, HWRM_QUEUE_PRI2COS_QCFG);
if (rc)
return rc;
req->flags = cpu_to_le32(QUEUE_PRI2COS_QCFG_REQ_FLAGS_IVLAN);
resp = hwrm_req_hold(bp, req);
rc = hwrm_req_send(bp, req);
if (!rc) {
u8 *pri2cos = &resp->pri0_cos_queue_id;
int i;
for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
u8 queue_id = pri2cos[i];
int tc;
tc = bnxt_tx_queue_to_tc(bp, queue_id);
if (tc >= 0)
ets->prio_tc[i] = tc;
}
}
hwrm_req_drop(bp, req);
return rc;
}
/*
* Caller of this function must call hwrm_req_drop()
* if the function returns success with a valid resp.
*/
static struct bnxt_queue_cos2bw_qcfg_output *
bnxt_hwrm_queue_cos2bw_qcfg(struct bnxt *bp, struct hwrm_queue_cos2bw_qcfg_input **out_req)
{
struct bnxt_queue_cos2bw_qcfg_output *resp;
struct hwrm_queue_cos2bw_qcfg_input *req;
int rc;
rc = hwrm_req_init(bp, req, HWRM_QUEUE_COS2BW_QCFG);
if (rc)
return ERR_PTR(rc);
resp = hwrm_req_hold(bp, req);
rc = hwrm_req_send(bp, req);
if (rc) {
hwrm_req_drop(bp, req);
return ERR_PTR(rc);
}
*out_req = req;
return resp;
}
static __le32 bnxt_get_max_bw_from_queue(struct bnxt *bp,
struct bnxt_queue_cos2bw_qcfg_output *resp, u8 queue_id)
{
int i;
if (resp->queue_id0 == queue_id)
return resp->queue_id0_max_bw;
for (i = 0; i < (IEEE_8021QAZ_MAX_TCS - 1); i++) {
if (resp->cfg[i].queue_id == queue_id)
return resp->cfg[i].queue_id_max_bw;
}
return 0;
}
static int bnxt_hwrm_queue_cos2bw_cfg(struct bnxt *bp, struct ieee_ets *ets,
u8 max_tc)
{
struct bnxt_queue_cos2bw_qcfg_output *cos2bw_qcfg_resp;
struct hwrm_queue_cos2bw_qcfg_input *cos2bw_qcfg_req;
struct bnxt_queue_cos2bw_cfg_input *req;
struct bnxt_cos2bw_cfg cos2bw;
int rc, i;
rc = hwrm_req_init(bp, req, HWRM_QUEUE_COS2BW_CFG);
if (rc)
return rc;
cos2bw_qcfg_resp = bnxt_hwrm_queue_cos2bw_qcfg(bp, &cos2bw_qcfg_req);
if (IS_ERR(cos2bw_qcfg_resp)) {
hwrm_req_drop(bp, req);
return PTR_ERR(cos2bw_qcfg_resp);
}
for (i = 0; i < bp->max_tc; i++) {
u8 qidx = bp->tc_to_qidx[i];
u8 queue_id;
req->enables |=
cpu_to_le32(QUEUE_COS2BW_CFG_REQ_ENABLES_COS_QUEUE_ID0_VALID << qidx);
memset(&cos2bw, 0, sizeof(cos2bw));
queue_id = bp->tx_q_info[qidx].queue_id;
cos2bw.queue_id = queue_id;
if (i >= max_tc)
goto skip_ets;
if (ets->tc_tsa[i] == IEEE_8021QAZ_TSA_STRICT) {
cos2bw.tsa =
QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_TSA_ASSIGN_SP;
cos2bw.pri_lvl = i;
} else {
cos2bw.tsa =
QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_TSA_ASSIGN_ETS;
cos2bw.bw_weight = ets->tc_tx_bw[i];
/* older firmware requires min_bw to be set to the
* same weight value in percent.
*/
#ifdef BNXT_FPGA
if (BNXT_FW_MAJ(bp) < 218 &&
!(bp->flags & BNXT_FLAG_CHIP_P7)) {
#else
if (BNXT_FW_MAJ(bp) < 218) {
#endif
cos2bw.min_bw =
cpu_to_le32((ets->tc_tx_bw[i] * 100) |
BW_VALUE_UNIT_PERCENT1_100);
}
}
skip_ets:
cos2bw.max_bw = bnxt_get_max_bw_from_queue(bp, cos2bw_qcfg_resp, queue_id);
if (qidx == 0) {
req->queue_id0 = cos2bw.queue_id;
req->queue_id0_min_bw = cos2bw.min_bw;
req->queue_id0_max_bw = cos2bw.max_bw;
req->queue_id0_tsa_assign = cos2bw.tsa;
req->queue_id0_pri_lvl = cos2bw.pri_lvl;
req->queue_id0_bw_weight = cos2bw.bw_weight;
} else {
memcpy(&req->cfg[qidx - 1], &cos2bw.cfg, sizeof(cos2bw.cfg));
}
}
hwrm_req_drop(bp, cos2bw_qcfg_req);
return hwrm_req_send(bp, req);
}
static int bnxt_getets(struct bnxt *bp, struct ieee_ets *ets)
{
struct bnxt_queue_cos2bw_qcfg_output *resp;
struct hwrm_queue_cos2bw_qcfg_input *req;
struct bnxt_cos2bw_cfg cos2bw;
int i;
resp = bnxt_hwrm_queue_cos2bw_qcfg(bp, &req);
if (IS_ERR(resp))
return PTR_ERR(resp);
for (i = 0; i < bp->max_tc; i++) {
int tc;
if (i == 0) {
cos2bw.queue_id = resp->queue_id0;
cos2bw.min_bw = resp->queue_id0_min_bw;
cos2bw.max_bw = resp->queue_id0_max_bw;
cos2bw.tsa = resp->queue_id0_tsa_assign;
cos2bw.pri_lvl = resp->queue_id0_pri_lvl;
cos2bw.bw_weight = resp->queue_id0_bw_weight;
} else {
memcpy(&cos2bw.cfg, &resp->cfg[i - 1], sizeof(cos2bw.cfg));
}
tc = bnxt_tx_queue_to_tc(bp, cos2bw.queue_id);
if (tc < 0)
continue;
if (cos2bw.tsa ==
QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_TSA_ASSIGN_SP) {
ets->tc_tsa[tc] = IEEE_8021QAZ_TSA_STRICT;
} else {
ets->tc_tsa[tc] = IEEE_8021QAZ_TSA_ETS;
ets->tc_tx_bw[tc] = cos2bw.bw_weight;
}
}
hwrm_req_drop(bp, req);
return 0;
}
static int bnxt_queue_remap(struct bnxt *bp, unsigned int lltc_mask)
{
unsigned long qmap = 0;
int max = bp->max_tc;
int i, j, rc;
/* Assign lossless TCs first */
for (i = 0, j = 0; i < max; ) {
if (lltc_mask & (1 << i)) {
if (BNXT_LLQ(bp->rx_q_info[j].queue_profile)) {
bp->tc_to_qidx[i] = j;
__set_bit(j, &qmap);
i++;
}
j++;
continue;
}
i++;
}
for (i = 0, j = 0; i < max; i++) {
if (lltc_mask & (1 << i))
continue;
j = find_next_zero_bit(&qmap, max, j);
bp->tc_to_qidx[i] = j;
__set_bit(j, &qmap);
j++;
}
if (netif_running(bp->dev)) {
bnxt_close_nic(bp, false, false);
rc = bnxt_open_nic(bp, false, false);
if (rc) {
netdev_warn(bp->dev, "failed to open NIC, rc = %d\n", rc);
return rc;
}
}
if (bp->ieee_ets) {
int tc = bp->num_tc;
if (!tc)
tc = 1;
rc = bnxt_hwrm_queue_cos2bw_cfg(bp, bp->ieee_ets, tc);
if (rc) {
netdev_warn(bp->dev, "failed to config BW, rc = %d\n", rc);
return rc;
}
rc = bnxt_hwrm_queue_pri2cos_cfg(bp, bp->ieee_ets,
QUEUE_PRI2COS_CFG_REQ_FLAGS_PATH_BIDIR);
if (rc) {
netdev_warn(bp->dev, "failed to config prio, rc = %d\n", rc);
return rc;
}
}
return 0;
}
static int bnxt_hwrm_queue_pfc_cfg(struct bnxt *bp, struct ieee_pfc *pfc)
{
struct hwrm_queue_pfcenable_cfg_input *req;
struct ieee_ets *my_ets = bp->ieee_ets;
unsigned int tc_mask = 0, pri_mask = 0;
u8 i, pri, lltc_count = 0;
bool need_q_remap = false;
int rc;
if (!my_ets)
return -EINVAL;
for (i = 0; i < bp->max_tc; i++) {
for (pri = 0; pri < IEEE_8021QAZ_MAX_TCS; pri++) {
if ((pfc->pfc_en & (1 << pri)) &&
(my_ets->prio_tc[pri] == i)) {
pri_mask |= 1 << pri;
tc_mask |= 1 << i;
}
}
if (tc_mask & (1 << i))
lltc_count++;
}
if (lltc_count > bp->max_lltc)
return -EINVAL;
for (i = 0; i < bp->max_tc; i++) {
if (tc_mask & (1 << i)) {
u8 qidx = bp->tc_to_qidx[i];
if (!BNXT_LLQ(bp->rx_q_info[qidx].queue_profile)) {
need_q_remap = true;
break;
}
}
}
if (need_q_remap)
bnxt_queue_remap(bp, tc_mask);
rc = hwrm_req_init(bp, req, HWRM_QUEUE_PFCENABLE_CFG);
if (rc)
return rc;
req->flags = cpu_to_le32(pri_mask);
return hwrm_req_send(bp, req);
}
static int bnxt_hwrm_queue_pfc_qcfg(struct bnxt *bp, struct ieee_pfc *pfc)
{
struct hwrm_queue_pfcenable_qcfg_output *resp;
struct hwrm_queue_pfcenable_qcfg_input *req;
u8 pri_mask;
int rc;
rc = hwrm_req_init(bp, req, HWRM_QUEUE_PFCENABLE_QCFG);
if (rc)
return rc;
resp = hwrm_req_hold(bp, req);
rc = hwrm_req_send(bp, req);
if (rc) {
hwrm_req_drop(bp, req);
return rc;
}
pri_mask = le32_to_cpu(resp->flags);
pfc->pfc_en = pri_mask;
hwrm_req_drop(bp, req);
return 0;
}
static int bnxt_hwrm_set_dcbx_app(struct bnxt *bp, struct dcb_app *app,
bool add)
{
struct hwrm_fw_set_structured_data_input *set;
struct hwrm_fw_get_structured_data_input *get;
struct hwrm_struct_data_dcbx_app *fw_app;
struct hwrm_struct_hdr *data;
dma_addr_t mapping;
size_t data_len;
int rc, n, i;
if (bp->hwrm_spec_code < 0x10601)
return 0;
rc = hwrm_req_init(bp, get, HWRM_FW_GET_STRUCTURED_DATA);
if (rc)
return rc;
hwrm_req_hold(bp, get);
hwrm_req_alloc_flags(bp, get, GFP_KERNEL | __GFP_ZERO);
n = IEEE_8021QAZ_MAX_TCS;
data_len = sizeof(*data) + sizeof(*fw_app) * n;
data = hwrm_req_dma_slice(bp, get, data_len, &mapping);
if (!data) {
rc = -ENOMEM;
goto set_app_exit;
}
get->dest_data_addr = cpu_to_le64(mapping);
get->structure_id = cpu_to_le16(STRUCT_HDR_STRUCT_ID_DCBX_APP);
get->subtype = cpu_to_le16(HWRM_STRUCT_DATA_SUBTYPE_HOST_OPERATIONAL);
get->count = 0;
rc = hwrm_req_send(bp, get);
if (rc)
goto set_app_exit;
fw_app = (struct hwrm_struct_data_dcbx_app *)(data + 1);
if (data->struct_id != cpu_to_le16(STRUCT_HDR_STRUCT_ID_DCBX_APP)) {
rc = -ENODEV;
goto set_app_exit;
}
n = data->count;
for (i = 0; i < n; i++, fw_app++) {
if (fw_app->protocol_id == cpu_to_be16(app->protocol) &&
fw_app->protocol_selector == app->selector &&
fw_app->priority == app->priority) {
if (add)
goto set_app_exit;
else
break;
}
}
if (add) {
/* append */
n++;
fw_app->protocol_id = cpu_to_be16(app->protocol);
fw_app->protocol_selector = app->selector;
fw_app->priority = app->priority;
fw_app->valid = 1;
} else {
size_t len = 0;
/* not found, nothing to delete */
if (n == i)
goto set_app_exit;
len = (n - 1 - i) * sizeof(*fw_app);
if (len)
memmove(fw_app, fw_app + 1, len);
n--;
memset(fw_app + n, 0, sizeof(*fw_app));
}
data->count = n;
data->len = cpu_to_le16(sizeof(*fw_app) * n);
data->subtype = cpu_to_le16(HWRM_STRUCT_DATA_SUBTYPE_HOST_OPERATIONAL);
rc = hwrm_req_init(bp, set, HWRM_FW_SET_STRUCTURED_DATA);
if (rc)
goto set_app_exit;
set->src_data_addr = cpu_to_le64(mapping);
set->data_len = cpu_to_le16(sizeof(*data) + sizeof(*fw_app) * n);
set->hdr_cnt = 1;
rc = hwrm_req_send(bp, set);
set_app_exit:
hwrm_req_drop(bp, get); /* dropping get request and associated slice */
return rc;
}
static int bnxt_hwrm_queue_dscp_qcaps(struct bnxt *bp)
{
struct hwrm_queue_dscp_qcaps_output *resp;
struct hwrm_queue_dscp_qcaps_input *req;
int rc;
bp->max_dscp_value = 0;
if (bp->hwrm_spec_code < 0x10800 || BNXT_VF(bp))
return 0;
rc = hwrm_req_init(bp, req, HWRM_QUEUE_DSCP_QCAPS);
if (rc)
return rc;
resp = hwrm_req_hold(bp, req);
rc = hwrm_req_send_silent(bp, req);
if (!rc) {
bp->max_dscp_value = (1 << resp->num_dscp_bits) - 1;
if (bp->max_dscp_value < 0x3f)
bp->max_dscp_value = 0;
}
hwrm_req_drop(bp, req);
return rc;
}
static int bnxt_hwrm_queue_dscp2pri_cfg(struct bnxt *bp, struct dcb_app *app,
bool add)
{
struct hwrm_queue_dscp2pri_cfg_input *req;
struct bnxt_dscp2pri_entry *dscp2pri;
dma_addr_t mapping;
int rc;
if (bp->hwrm_spec_code < 0x10800)
return 0;
rc = hwrm_req_init(bp, req, HWRM_QUEUE_DSCP2PRI_CFG);
if (rc)
return rc;
dscp2pri = hwrm_req_dma_slice(bp, req, sizeof(*dscp2pri), &mapping);
if (!dscp2pri) {
hwrm_req_drop(bp, req);
return -ENOMEM;
}
req->src_data_addr = cpu_to_le64(mapping);
dscp2pri->dscp = app->protocol;
if (add)
dscp2pri->mask = 0x3f;
else
dscp2pri->mask = 0;
dscp2pri->pri = app->priority;
req->entry_cnt = cpu_to_le16(1);
rc = hwrm_req_send(bp, req);
return rc;
}
static int bnxt_ets_validate(struct bnxt *bp, struct ieee_ets *ets, u8 *tc)
{
int total_ets_bw = 0;
bool zero = false;
u8 max_tc = 0;
int i;
for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
if (ets->prio_tc[i] > bp->max_tc) {
netdev_err(bp->dev, "priority to TC mapping exceeds TC count %d\n",
ets->prio_tc[i]);
return -EINVAL;
}
if (ets->prio_tc[i] > max_tc)
max_tc = ets->prio_tc[i];
if ((ets->tc_tx_bw[i] || ets->tc_tsa[i]) && i > bp->max_tc)
return -EINVAL;
switch (ets->tc_tsa[i]) {
case IEEE_8021QAZ_TSA_STRICT:
break;
case IEEE_8021QAZ_TSA_ETS:
total_ets_bw += ets->tc_tx_bw[i];
zero = zero || !ets->tc_tx_bw[i];
break;
default:
return -ENOTSUPP;
}
}
if (total_ets_bw > 100) {
netdev_warn(bp->dev, "rejecting ETS config exceeding available bandwidth\n");
return -EINVAL;
}
if (zero && total_ets_bw == 100) {
netdev_warn(bp->dev, "rejecting ETS config starving a TC\n");
return -EINVAL;
}
if (max_tc >= bp->max_tc)
*tc = bp->max_tc;
else
*tc = max_tc + 1;
return 0;
}
static int bnxt_dcbnl_ieee_getets(struct net_device *dev, struct ieee_ets *ets)
{
struct bnxt *bp = netdev_priv(dev);
struct ieee_ets *my_ets = bp->ieee_ets;
int rc;
ets->ets_cap = bp->max_tc;
if (!my_ets) {
if (bp->dcbx_cap & DCB_CAP_DCBX_HOST)
return 0;
my_ets = kzalloc(sizeof(*my_ets), GFP_KERNEL);
if (!my_ets)
return -ENOMEM;
rc = bnxt_getets(bp, my_ets);
if (rc)
goto error;
rc = bnxt_hwrm_queue_pri2cos_qcfg(bp, my_ets);
if (rc)
goto error;
/* cache result */
bp->ieee_ets = my_ets;
}
ets->cbs = my_ets->cbs;
memcpy(ets->tc_tx_bw, my_ets->tc_tx_bw, sizeof(ets->tc_tx_bw));
memcpy(ets->tc_rx_bw, my_ets->tc_rx_bw, sizeof(ets->tc_rx_bw));
memcpy(ets->tc_tsa, my_ets->tc_tsa, sizeof(ets->tc_tsa));
memcpy(ets->prio_tc, my_ets->prio_tc, sizeof(ets->prio_tc));
return 0;
error:
kfree(my_ets);
return rc;
}
static int bnxt_dcbnl_ieee_setets(struct net_device *dev, struct ieee_ets *ets)
{
struct bnxt *bp = netdev_priv(dev);
bool alloc = !bp->ieee_ets;
u8 max_tc = 0;
int rc;
if (!(bp->dcbx_cap & DCB_CAP_DCBX_VER_IEEE) ||
!(bp->dcbx_cap & DCB_CAP_DCBX_HOST))
return -EINVAL;
rc = bnxt_ets_validate(bp, ets, &max_tc);
if (rc)
return rc;
if (alloc) {
bp->ieee_ets = kmalloc(sizeof(*ets), GFP_KERNEL);
if (!bp->ieee_ets)
return -ENOMEM;
}
rc = bnxt_setup_mq_tc(dev, max_tc);
if (rc)
goto error;
rc = bnxt_hwrm_queue_cos2bw_cfg(bp, ets, max_tc);
if (rc)
goto error;
if (!bp->is_asym_q) {
rc = bnxt_hwrm_queue_pri2cos_cfg(bp, ets,
QUEUE_PRI2COS_CFG_REQ_FLAGS_PATH_BIDIR);
if (rc)
goto error;
} else {
rc = bnxt_hwrm_queue_pri2cos_cfg(bp, ets,
QUEUE_PRI2COS_QCFG_REQ_FLAGS_PATH_TX);
if (rc)
goto error;
rc = bnxt_hwrm_queue_pri2cos_cfg(bp, ets,
QUEUE_PRI2COS_QCFG_REQ_FLAGS_PATH_RX);
if (rc)
goto error;
}
memcpy(bp->ieee_ets, ets, sizeof(*ets));
return 0;
error:
if (alloc) {
kfree(bp->ieee_ets);
bp->ieee_ets = NULL;
}
return rc;
}
static int bnxt_dcbnl_ieee_getpfc(struct net_device *dev, struct ieee_pfc *pfc)
{
struct bnxt *bp = netdev_priv(dev);
__le64 *stats = bp->port_stats.hw_stats;
struct ieee_pfc *my_pfc = bp->ieee_pfc;
long rx_off, tx_off;
int i, rc;
pfc->pfc_cap = bp->max_lltc;
if (!my_pfc) {
if (bp->dcbx_cap & DCB_CAP_DCBX_HOST)
return 0;
my_pfc = kzalloc(sizeof(*my_pfc), GFP_KERNEL);
if (!my_pfc)
return 0;
bp->ieee_pfc = my_pfc;
rc = bnxt_hwrm_queue_pfc_qcfg(bp, my_pfc);
if (rc)
return 0;
}
pfc->pfc_en = my_pfc->pfc_en;
pfc->mbc = my_pfc->mbc;
pfc->delay = my_pfc->delay;
if (!stats)
return 0;
rx_off = BNXT_RX_STATS_OFFSET(rx_pfc_ena_frames_pri0);
tx_off = BNXT_TX_STATS_OFFSET(tx_pfc_ena_frames_pri0);
for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++, rx_off++, tx_off++) {
pfc->requests[i] = le64_to_cpu(*(stats + tx_off));
pfc->indications[i] = le64_to_cpu(*(stats + rx_off));
}
return 0;
}
static int bnxt_dcbnl_ieee_setpfc(struct net_device *dev, struct ieee_pfc *pfc)
{
struct bnxt *bp = netdev_priv(dev);
struct ieee_pfc *my_pfc = bp->ieee_pfc;
int rc;
if (!(bp->dcbx_cap & DCB_CAP_DCBX_VER_IEEE) ||
!(bp->dcbx_cap & DCB_CAP_DCBX_HOST) ||
(bp->phy_flags & BNXT_PHY_FL_NO_PAUSE))
return -EINVAL;
if (!my_pfc) {
my_pfc = kzalloc(sizeof(*my_pfc), GFP_KERNEL);
if (!my_pfc)
return -ENOMEM;
bp->ieee_pfc = my_pfc;
}
rc = bnxt_hwrm_queue_pfc_cfg(bp, pfc);
if (!rc)
memcpy(my_pfc, pfc, sizeof(*my_pfc));
return rc;
}
static int bnxt_dcbnl_ieee_dscp_app_prep(struct bnxt *bp, struct dcb_app *app)
{
if (app->selector == IEEE_8021QAZ_APP_SEL_DSCP) {
if (!bp->max_dscp_value)
return -ENOTSUPP;
if (app->protocol > bp->max_dscp_value)
return -EINVAL;
}
return 0;
}
static int bnxt_dcbnl_ieee_setapp(struct net_device *dev, struct dcb_app *app)
{
struct bnxt *bp = netdev_priv(dev);
int rc;
if (!(bp->dcbx_cap & DCB_CAP_DCBX_VER_IEEE) ||
!(bp->dcbx_cap & DCB_CAP_DCBX_HOST))
return -EINVAL;
rc = bnxt_dcbnl_ieee_dscp_app_prep(bp, app);
if (rc)
return rc;
rc = dcb_ieee_setapp(dev, app);
if (rc)
return rc;
if ((app->selector == IEEE_8021QAZ_APP_SEL_ETHERTYPE &&
app->protocol == ETH_P_ROCE) ||
(app->selector == IEEE_8021QAZ_APP_SEL_DGRAM &&
app->protocol == ROCE_V2_UDP_DPORT))
rc = bnxt_hwrm_set_dcbx_app(bp, app, true);
if (app->selector == IEEE_8021QAZ_APP_SEL_DSCP)
rc = bnxt_hwrm_queue_dscp2pri_cfg(bp, app, true);
return rc;
}
static int bnxt_dcbnl_ieee_delapp(struct net_device *dev, struct dcb_app *app)
{
struct bnxt *bp = netdev_priv(dev);
int rc;
if (!(bp->dcbx_cap & DCB_CAP_DCBX_VER_IEEE) ||
!(bp->dcbx_cap & DCB_CAP_DCBX_HOST))
return -EINVAL;
rc = bnxt_dcbnl_ieee_dscp_app_prep(bp, app);
if (rc)
return rc;
rc = dcb_ieee_delapp(dev, app);
if (rc)
return rc;
if ((app->selector == IEEE_8021QAZ_APP_SEL_ETHERTYPE &&
app->protocol == ETH_P_ROCE) ||
(app->selector == IEEE_8021QAZ_APP_SEL_DGRAM &&
app->protocol == ROCE_V2_UDP_DPORT))
rc = bnxt_hwrm_set_dcbx_app(bp, app, false);
if (app->selector == IEEE_8021QAZ_APP_SEL_DSCP)
rc = bnxt_hwrm_queue_dscp2pri_cfg(bp, app, false);
return rc;
}
static void __bnxt_del_roce_app(struct bnxt *bp, struct dcb_app *app)
{
u32 prio_mask = dcb_ieee_getapp_mask(bp->dev, app);
if (!prio_mask)
return;
app->priority = ilog2(prio_mask);
dcb_ieee_delapp(bp->dev, app);
}
static void bnxt_del_roce_apps(struct bnxt *bp)
{
struct dcb_app app;
if (!(bp->dcbx_cap & DCB_CAP_DCBX_VER_IEEE) ||
!(bp->dcbx_cap & DCB_CAP_DCBX_HOST))
return;
app.selector = IEEE_8021QAZ_APP_SEL_ETHERTYPE;
app.protocol = ETH_P_ROCE;
__bnxt_del_roce_app(bp, &app);
app.selector = IEEE_8021QAZ_APP_SEL_DGRAM;
app.protocol = ROCE_V2_UDP_DPORT;
__bnxt_del_roce_app(bp, &app);
}
static void bnxt_del_dscp_apps(struct bnxt *bp)
{
#ifdef HAVE_DSCP_MASK_MAP
struct dcb_ieee_app_prio_map dscp_map;
struct dcb_app app;
int i, j;
if (!(bp->dcbx_cap & DCB_CAP_DCBX_VER_IEEE) ||
!(bp->dcbx_cap & DCB_CAP_DCBX_HOST))
return;
app.selector = IEEE_8021QAZ_APP_SEL_DSCP;
dcb_ieee_getapp_prio_dscp_mask_map(bp->dev, &dscp_map);
for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
for (j = 0; j < 64; j++) {
if (dscp_map.map[i] & (1ULL << j)) {
app.protocol = j;
app.priority = i;
dcb_ieee_delapp(bp->dev, &app);
}
}
}
#endif
}
static u8 bnxt_dcbnl_getdcbx(struct net_device *dev)
{
struct bnxt *bp = netdev_priv(dev);
return bp->dcbx_cap;
}
static u8 bnxt_dcbnl_setdcbx(struct net_device *dev, u8 mode)
{
struct bnxt *bp = netdev_priv(dev);
/* All firmware DCBX settings are set in NVRAM */
if (bp->dcbx_cap & DCB_CAP_DCBX_LLD_MANAGED)
return 1;
if (mode & DCB_CAP_DCBX_HOST) {
if (BNXT_VF(bp) || (bp->fw_cap & BNXT_FW_CAP_LLDP_AGENT))
return 1;
/* only support IEEE */
if ((mode & DCB_CAP_DCBX_VER_CEE) ||
!(mode & DCB_CAP_DCBX_VER_IEEE))
return 1;
}
if (mode == bp->dcbx_cap)
return 0;
bp->dcbx_cap = mode;
return 0;
}
static const struct dcbnl_rtnl_ops dcbnl_ops = {
.ieee_getets = bnxt_dcbnl_ieee_getets,
.ieee_setets = bnxt_dcbnl_ieee_setets,
.ieee_getpfc = bnxt_dcbnl_ieee_getpfc,
.ieee_setpfc = bnxt_dcbnl_ieee_setpfc,
.ieee_setapp = bnxt_dcbnl_ieee_setapp,
.ieee_delapp = bnxt_dcbnl_ieee_delapp,
.getdcbx = bnxt_dcbnl_getdcbx,
.setdcbx = bnxt_dcbnl_setdcbx,
};
void bnxt_dcb_init(struct bnxt *bp)
{
bp->dcbx_cap = 0;
if (bp->hwrm_spec_code < 0x10501)
return;
bnxt_hwrm_queue_dscp_qcaps(bp);
bp->dcbx_cap = DCB_CAP_DCBX_VER_IEEE;
if (BNXT_PF(bp) && !(bp->fw_cap & BNXT_FW_CAP_LLDP_AGENT))
bp->dcbx_cap |= DCB_CAP_DCBX_HOST;
else if (bp->fw_cap & BNXT_FW_CAP_DCBX_AGENT)
bp->dcbx_cap |= DCB_CAP_DCBX_LLD_MANAGED;
bp->dev->dcbnl_ops = &dcbnl_ops;
}
void bnxt_dcb_free(struct bnxt *bp, bool reset)
{
kfree(bp->ieee_pfc);
kfree(bp->ieee_ets);
bp->ieee_pfc = NULL;
bp->ieee_ets = NULL;
if (reset) {
bnxt_del_roce_apps(bp);
bnxt_del_dscp_apps(bp);
}
}
#else
void bnxt_dcb_init(struct bnxt *bp)
{
}
void bnxt_dcb_free(struct bnxt *bp, bool reset)
{
}
#endif

View File

@ -0,0 +1,186 @@
/* Broadcom NetXtreme-C/E network driver.
*
* Copyright (c) 2014-2016 Broadcom Corporation
* Copyright (c) 2016-2018 Broadcom Limited
* Copyright (c) 2018-2022 Broadcom Inc.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation.
*/
#ifndef BNXT_DCB_H
#define BNXT_DCB_H
#include <net/dcbnl.h>
struct bnxt_dcb {
u8 max_tc;
struct ieee_pfc *ieee_pfc;
struct ieee_ets *ieee_ets;
u8 dcbx_cap;
u8 default_pri;
};
struct bnxt_cos2bw_cfg {
u8 pad[3];
struct_group_attr(cfg, __packed,
u8 queue_id;
__le32 min_bw;
__le32 max_bw;
u8 tsa;
u8 pri_lvl;
u8 bw_weight;
);
/* for min_bw / max_bw */
#define BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
u8 unused;
};
struct bnxt_dscp2pri_entry {
u8 dscp;
u8 mask;
u8 pri;
};
#define BNXT_LLQ(q_profile) \
((q_profile) == QUEUE_QPORTCFG_RESP_QUEUE_ID0_SERVICE_PROFILE_LOSSLESS_ROCE || \
(q_profile) == QUEUE_QPORTCFG_RESP_QUEUE_ID0_SERVICE_PROFILE_LOSSLESS_NIC)
#define BNXT_CNPQ(q_profile) \
((q_profile) == QUEUE_QPORTCFG_RESP_QUEUE_ID0_SERVICE_PROFILE_LOSSY_ROCE_CNP)
#define HWRM_STRUCT_DATA_SUBTYPE_HOST_OPERATIONAL 0x0300
/* bnxt_queue_cos2bw_qcfg_output (size:896b/112B)
* This structure is identical in memory layout to
* struct hwrm_queue_cos2bw_qcfg_output in bnxt_hsi.h.
* Using the structure prevents fortify memcpy warnings.
*/
struct bnxt_queue_cos2bw_qcfg_output {
__le16 error_code;
__le16 req_type;
__le16 seq_id;
__le16 resp_len;
u8 queue_id0;
u8 unused_0;
__le16 unused_1;
__le32 queue_id0_min_bw;
__le32 queue_id0_max_bw;
u8 queue_id0_tsa_assign;
u8 queue_id0_pri_lvl;
u8 queue_id0_bw_weight;
struct {
u8 queue_id;
__le32 queue_id_min_bw;
#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_MIN_BW_BW_VALUE_MASK 0xfffffffUL
#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_MIN_BW_BW_VALUE_SFT 0
#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_MIN_BW_SCALE 0x10000000UL
#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_MIN_BW_SCALE_BITS (0x0UL << 28)
#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_MIN_BW_SCALE_BYTES (0x1UL << 28)
#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_MIN_BW_SCALE_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_MIN_BW_SCALE_BYTES
#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_MIN_BW_BW_VALUE_UNIT_SFT 29
#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_MIN_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_MIN_BW_BW_VALUE_UNIT_INVALID
__le32 queue_id_max_bw;
#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_MAX_BW_BW_VALUE_MASK 0xfffffffUL
#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_MAX_BW_BW_VALUE_SFT 0
#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_MAX_BW_SCALE 0x10000000UL
#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_MAX_BW_SCALE_BITS (0x0UL << 28)
#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_MAX_BW_SCALE_BYTES (0x1UL << 28)
#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_MAX_BW_SCALE_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_MAX_BW_SCALE_BYTES
#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_MAX_BW_BW_VALUE_UNIT_SFT 29
#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_MAX_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_MAX_BW_BW_VALUE_UNIT_INVALID
u8 queue_id_tsa_assign;
#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_TSA_ASSIGN_SP 0x0UL
#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_TSA_ASSIGN_ETS 0x1UL
#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_TSA_ASSIGN_RESERVED_FIRST 0x2UL
#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_TSA_ASSIGN_RESERVED_LAST 0xffUL
u8 queue_id_pri_lvl;
u8 queue_id_bw_weight;
} __packed cfg[7];
u8 unused_2[4];
u8 valid;
};
/* bnxt_queue_cos2bw_cfg_input (size:1024b/128B)
* This structure is identical in memory layout to
* struct hwrm_queue_cos2bw_cfg_input in bnxt_hsi.h.
* Using the structure prevents fortify memcpy warnings.
*/
struct bnxt_queue_cos2bw_cfg_input {
__le16 req_type;
__le16 cmpl_ring;
__le16 seq_id;
__le16 target_id;
__le64 resp_addr;
__le32 flags;
__le32 enables;
__le16 port_id;
u8 queue_id0;
u8 unused_0;
__le32 queue_id0_min_bw;
__le32 queue_id0_max_bw;
u8 queue_id0_tsa_assign;
u8 queue_id0_pri_lvl;
u8 queue_id0_bw_weight;
struct {
u8 queue_id;
__le32 queue_id_min_bw;
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID_MIN_BW_BW_VALUE_MASK 0xfffffffUL
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID_MIN_BW_BW_VALUE_SFT 0
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID_MIN_BW_SCALE 0x10000000UL
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID_MIN_BW_SCALE_BITS (0x0UL << 28)
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID_MIN_BW_SCALE_BYTES (0x1UL << 28)
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID_MIN_BW_SCALE_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID_MIN_BW_SCALE_BYTES
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID_MIN_BW_BW_VALUE_UNIT_SFT 29
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID_MIN_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID_MIN_BW_BW_VALUE_UNIT_INVALID
__le32 queue_id_max_bw;
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID_MAX_BW_BW_VALUE_MASK 0xfffffffUL
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID_MAX_BW_BW_VALUE_SFT 0
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID_MAX_BW_SCALE 0x10000000UL
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID_MAX_BW_SCALE_BITS (0x0UL << 28)
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID_MAX_BW_SCALE_BYTES (0x1UL << 28)
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID_MAX_BW_SCALE_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID_MAX_BW_SCALE_BYTES
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID_MAX_BW_BW_VALUE_UNIT_SFT 29
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID_MAX_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID_MAX_BW_BW_VALUE_UNIT_INVALID
u8 queue_id_tsa_assign;
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID_TSA_ASSIGN_SP 0x0UL
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID_TSA_ASSIGN_ETS 0x1UL
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID_TSA_ASSIGN_RESERVED_FIRST 0x2UL
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID_TSA_ASSIGN_RESERVED_LAST 0xffUL
u8 queue_id_pri_lvl;
u8 queue_id_bw_weight;
} __packed cfg[7];
u8 unused_1[5];
};
void bnxt_dcb_init(struct bnxt *bp);
void bnxt_dcb_free(struct bnxt *bp, bool reset);
#endif

View File

@ -0,0 +1,603 @@
/* Broadcom NetXtreme-C/E network driver.
*
* Copyright (c) 2017-2018 Broadcom Limited
* Copyright (c) 2018-2023 Broadcom Inc.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation.
*/
#include <linux/debugfs.h>
#include <linux/module.h>
#include <linux/pci.h>
#include "bnxt_hsi.h"
#include "bnxt_compat.h"
#ifdef HAVE_DIM
#include <linux/dim.h>
#else
#include "bnxt_dim.h"
#endif
#include "bnxt.h"
#include "bnxt_debugfs.h"
#include "bnxt_hdbr.h"
#include "bnxt_udcc.h"
#include "cfa_types.h"
#include "bnxt_vfr.h"
#ifdef CONFIG_DEBUG_FS
static struct dentry *bnxt_debug_mnt;
static struct dentry *bnxt_debug_tf;
#if defined(CONFIG_BNXT_FLOWER_OFFLOAD)
static ssize_t debugfs_session_query_read(struct file *filep, char __user *buffer,
size_t count, loff_t *ppos)
{
struct bnxt_udcc_session_entry *entry = filep->private_data;
struct hwrm_udcc_session_query_output resp;
int len = 0, size = 4096;
char *buf;
int rc;
rc = bnxt_hwrm_udcc_session_query(entry->bp, entry->session_id, &resp);
if (rc)
return rc;
buf = kzalloc(size, GFP_KERNEL);
if (!buf)
return -ENOMEM;
len = scnprintf(buf, size, "min_rtt_ns = %u\n",
le32_to_cpu(resp.min_rtt_ns));
len += scnprintf(buf + len, size - len, "max_rtt_ns = %u\n",
le32_to_cpu(resp.max_rtt_ns));
len += scnprintf(buf + len, size - len, "cur_rate_mbps = %u\n",
le32_to_cpu(resp.cur_rate_mbps));
len += scnprintf(buf + len, size - len, "tx_event_count = %u\n",
le32_to_cpu(resp.tx_event_count));
len += scnprintf(buf + len, size - len, "cnp_rx_event_count = %u\n",
le32_to_cpu(resp.cnp_rx_event_count));
len += scnprintf(buf + len, size - len, "rtt_req_count = %u\n",
le32_to_cpu(resp.rtt_req_count));
len += scnprintf(buf + len, size - len, "rtt_resp_count = %u\n",
le32_to_cpu(resp.rtt_resp_count));
len += scnprintf(buf + len, size - len, "tx_bytes_sent = %u\n",
le32_to_cpu(resp.tx_bytes_count));
len += scnprintf(buf + len, size - len, "tx_pkts_sent = %u\n",
le32_to_cpu(resp.tx_packets_count));
len += scnprintf(buf + len, size - len, "init_probes_sent = %u\n",
le32_to_cpu(resp.init_probes_sent));
len += scnprintf(buf + len, size - len, "term_probes_recv = %u\n",
le32_to_cpu(resp.term_probes_recv));
len += scnprintf(buf + len, size - len, "cnp_packets_recv = %u\n",
le32_to_cpu(resp.cnp_packets_recv));
len += scnprintf(buf + len, size - len, "rto_event_recv = %u\n",
le32_to_cpu(resp.rto_event_recv));
len += scnprintf(buf + len, size - len, "seq_err_nak_recv = %u\n",
le32_to_cpu(resp.seq_err_nak_recv));
len += scnprintf(buf + len, size - len, "qp_count = %u\n",
le32_to_cpu(resp.qp_count));
if (count < strlen(buf)) {
kfree(buf);
return -ENOSPC;
}
len = simple_read_from_buffer(buffer, count, ppos, buf, strlen(buf));
kfree(buf);
return len;
}
static const struct file_operations session_query_fops = {
.owner = THIS_MODULE,
.open = simple_open,
.read = debugfs_session_query_read,
};
void bnxt_debugfs_create_udcc_session(struct bnxt *bp, u32 session_id)
{
struct bnxt_udcc_info *udcc = bp->udcc_info;
struct bnxt_udcc_session_entry *entry;
static char sname[16];
entry = udcc->session_db[session_id];
if (entry->debugfs_dir || !bp->debugfs_pdev)
return;
snprintf(sname, 10, "%d", session_id);
entry->debugfs_dir = debugfs_create_dir(sname, bp->udcc_info->udcc_debugfs_dir);
entry->bp = bp;
debugfs_create_file("session_query", 0644, entry->debugfs_dir, entry, &session_query_fops);
}
void bnxt_debugfs_delete_udcc_session(struct bnxt *bp, u32 session_id)
{
struct bnxt_udcc_info *udcc = bp->udcc_info;
struct bnxt_udcc_session_entry *entry;
entry = udcc->session_db[session_id];
if (!entry->debugfs_dir || !bp->debugfs_pdev)
return;
debugfs_remove_recursive(entry->debugfs_dir);
entry->debugfs_dir = NULL;
}
#endif
static ssize_t debugfs_dim_read(struct file *filep,
char __user *buffer,
size_t count, loff_t *ppos)
{
struct dim *dim = filep->private_data;
int len;
char *buf;
if (*ppos)
return 0;
if (!dim)
return -ENODEV;
buf = kasprintf(GFP_KERNEL,
"state = %d\n" \
"profile_ix = %d\n" \
"mode = %d\n" \
"tune_state = %d\n" \
"steps_right = %d\n" \
"steps_left = %d\n" \
"tired = %d\n",
dim->state,
dim->profile_ix,
dim->mode,
dim->tune_state,
dim->steps_right,
dim->steps_left,
dim->tired);
if (!buf)
return -ENOMEM;
if (count < strlen(buf)) {
kfree(buf);
return -ENOSPC;
}
len = simple_read_from_buffer(buffer, count, ppos, buf, strlen(buf));
kfree(buf);
return len;
}
static const struct file_operations debugfs_dim_fops = {
.owner = THIS_MODULE,
.open = simple_open,
.read = debugfs_dim_read,
};
static void debugfs_dim_ring_init(struct dim *dim, int ring_idx,
struct dentry *dd)
{
static char qname[16];
snprintf(qname, 10, "%d", ring_idx);
debugfs_create_file(qname, 0600, dd, dim, &debugfs_dim_fops);
}
static int dbr_enable_get(void *data, u64 *val)
{
struct bnxt *bp = data;
*val = bp->dbr.enable;
return 0;
}
static int dbr_enable_set(void *data, u64 val)
{
struct bnxt *bp = data;
struct bnxt_dbr *dbr;
int rc;
dbr = &bp->dbr;
if (val) {
dbr->enable = 1;
rc = bnxt_dbr_init(bp);
if (rc) {
netdev_err(bp->dev,
"Failed to initialize DB recovery\n");
dbr->enable = 0;
return rc;
}
} else {
dbr->enable = 0;
bnxt_dbr_exit(bp);
}
return 0;
}
DEFINE_DEBUGFS_ATTRIBUTE(dbr_enable_fops, dbr_enable_get, dbr_enable_set,
"%llu\n");
static ssize_t dbr_stats_read(struct file *filep, char __user *buffer,
size_t count, loff_t *ppos)
{
struct bnxt_dbr_sw_stats *stat = filep->private_data;
char *buf;
int len;
if (*ppos)
return 0;
buf = kasprintf(GFP_KERNEL, "nr_dbr = %u\n" "avg_dbr_us = %llu\n" \
"max_dbr_us = %llu\n" "min_dbr_us = %llu\n",
stat->nr_dbr, stat->avg_dbr_us, stat->max_dbr_us,
stat->min_dbr_us);
if (!buf)
return -ENOMEM;
if (count < strlen(buf)) {
kfree(buf);
return -ENOSPC;
}
len = simple_read_from_buffer(buffer, count, ppos, buf, strlen(buf));
kfree(buf);
return len;
}
static const struct file_operations dbr_stats_fops = {
.owner = THIS_MODULE,
.open = simple_open,
.read = dbr_stats_read,
};
static int dbr_test_recover_enable_get(void *data, u64 *val)
{
struct bnxt *bp = data;
*val = bp->dbr.debug.recover_enable;
return 0;
}
static int dbr_test_recover_enable_set(void *data, u64 val)
{
struct bnxt_dbr_debug *debug;
struct bnxt *bp = data;
struct bnxt_dbr *dbr;
dbr = &bp->dbr;
debug = &dbr->debug;
if (!dbr->enable && val) {
netdev_err(bp->dev,
"Unable to run DB recovery test when DBR is disabled\n");
return -EINVAL;
}
if (val) {
debug->recover_enable = 1;
if (dbr->wq)
/* kick start the recovery work */
if (queue_delayed_work(dbr->wq, &dbr->dwork,
msecs_to_jiffies(debug->recover_interval_ms)))
atomic_inc(&dbr->event_cnt);
} else {
debug->recover_enable = 0;
}
return 0;
}
DEFINE_DEBUGFS_ATTRIBUTE(dbr_test_recover_enable_fops,
dbr_test_recover_enable_get,
dbr_test_recover_enable_set,
"%llu\n");
static ssize_t hdbr_debug_trace_read(struct file *filep, char __user *buffer,
size_t count, loff_t *ppos)
{
struct bnxt *bp = filep->private_data;
int len = 2;
char buf[2];
if (*ppos)
return 0;
if (!bp)
return -ENODEV;
if (count < len)
return -ENOSPC;
if (bp->hdbr_info.debug_trace)
buf[0] = '1';
else
buf[0] = '0';
buf[1] = '\n';
return simple_read_from_buffer(buffer, count, ppos, buf, len);
}
static ssize_t hdbr_debug_trace_write(struct file *file, const char __user *u,
size_t size, loff_t *off)
{
struct bnxt *bp = file->private_data;
char u_in[2];
size_t n;
if (!bp)
return -ENODEV;
if (*off || !size || size > 2)
return -EFAULT;
n = simple_write_to_buffer(u_in, size, off, u, 2);
if (n != size)
return -EFAULT;
if (u_in[0] == '0')
bp->hdbr_info.debug_trace = 0;
else
bp->hdbr_info.debug_trace = 1;
return size;
}
static const struct file_operations hdbr_debug_trace_fops = {
.owner = THIS_MODULE,
.open = simple_open,
.read = hdbr_debug_trace_read,
.write = hdbr_debug_trace_write,
};
static ssize_t debugfs_hdbr_kdmp_read(struct file *filep, char __user *buffer,
size_t count, loff_t *ppos)
{
struct bnxt_hdbr_ktbl *ktbl = *((void **)filep->private_data);
size_t len;
char *buf;
if (*ppos)
return 0;
if (!ktbl)
return -ENODEV;
buf = bnxt_hdbr_ktbl_dump(ktbl);
if (!buf)
return -ENOMEM;
len = strlen(buf);
if (count < len) {
kfree(buf);
return -ENOSPC;
}
len = simple_read_from_buffer(buffer, count, ppos, buf, len);
kfree(buf);
return len;
}
static const struct file_operations hdbr_kdmp_fops = {
.owner = THIS_MODULE,
.open = simple_open,
.read = debugfs_hdbr_kdmp_read,
};
static ssize_t debugfs_hdbr_l2dmp_read(struct file *filep, char __user *buffer,
size_t count, loff_t *ppos)
{
struct bnxt_hdbr_l2_pgs *l2pgs = *((void **)filep->private_data);
size_t len;
char *buf;
if (*ppos)
return 0;
if (!l2pgs)
return -ENODEV;
buf = bnxt_hdbr_l2pg_dump(l2pgs);
if (!buf)
return -ENOMEM;
len = strlen(buf);
if (count < len) {
kfree(buf);
return -ENOSPC;
}
len = simple_read_from_buffer(buffer, count, ppos, buf, len);
kfree(buf);
return len;
}
static const struct file_operations hdbr_l2dmp_fops = {
.owner = THIS_MODULE,
.open = simple_open,
.read = debugfs_hdbr_l2dmp_read,
};
static void bnxt_debugfs_hdbr_init(struct bnxt *bp)
{
const char *pname = pci_name(bp->pdev);
struct dentry *pdevf, *phdbr, *pktbl, *pl2pgs;
int i;
char *names[4] = {"sq", "rq", "srq", "cq"};
if (!bp->hdbr_info.hdbr_enabled)
return;
/* Create top dir */
phdbr = debugfs_create_dir("hdbr", bp->debugfs_pdev);
if (!phdbr) {
pr_err("Failed to create debugfs entry %s/hdbr\n", pname);
return;
}
/* Create debug_trace knob */
pdevf = debugfs_create_file("debug_trace", 0644, phdbr, bp, &hdbr_debug_trace_fops);
if (!pdevf) {
pr_err("Failed to create debugfs entry %s/hdbr/debug_trace\n", pname);
return;
}
/* Create ktbl dir */
pktbl = debugfs_create_dir("ktbl", phdbr);
if (!pktbl) {
pr_err("Failed to create debugfs entry %s/hdbr/ktbl\n", pname);
return;
}
/* Create l2pgs dir */
pl2pgs = debugfs_create_dir("l2pgs", phdbr);
if (!pl2pgs) {
pr_err("Failed to create debugfs entry %s/hdbr/l2pgs\n", pname);
return;
}
/* Create hdbr kernel page and L2 page dumping knobs */
for (i = 0; i < DBC_GROUP_MAX; i++) {
pdevf = debugfs_create_file(names[i], 0644, pktbl, &bp->hdbr_info.ktbl[i],
&hdbr_kdmp_fops);
if (!pdevf) {
pr_err("Failed to create debugfs entry %s/hdbr/ktbl/%s\n",
pname, names[i]);
return;
}
if (i == DBC_GROUP_RQ)
continue;
pdevf = debugfs_create_file(names[i], 0644, pl2pgs, &bp->hdbr_pgs[i],
&hdbr_l2dmp_fops);
if (!pdevf) {
pr_err("Failed to create debugfs entry %s/hdbr/l2pgs/%s\n",
pname, names[i]);
return;
}
}
}
#define BNXT_DEBUGFS_TRUFLOW "truflow"
int bnxt_debug_tf_create(struct bnxt *bp, u8 tsid)
{
char name[32];
struct dentry *port_dir;
bnxt_debug_tf = debugfs_lookup(BNXT_DEBUGFS_TRUFLOW, bnxt_debug_mnt);
if (!bnxt_debug_tf)
return -ENODEV;
/* If not there create the port # directory */
sprintf(name, "%d", bp->pf.port_id);
port_dir = debugfs_lookup(name, bnxt_debug_tf);
if (!port_dir) {
port_dir = debugfs_create_dir(name, bnxt_debug_tf);
if (!port_dir) {
pr_debug("Failed to create TF debugfs port %d directory.\n",
bp->pf.port_id);
return -ENODEV;
}
}
/* Call TF function to create the table scope debugfs seq files */
bnxt_tf_debugfs_create_files(bp, tsid, port_dir);
return 0;
}
void bnxt_debug_tf_delete(struct bnxt *bp)
{
char name[32];
struct dentry *port_dir;
if (!bnxt_debug_tf)
return;
sprintf(name, "%d", bp->pf.port_id);
port_dir = debugfs_lookup(name, bnxt_debug_tf);
if (port_dir)
debugfs_remove_recursive(port_dir);
}
void bnxt_debug_dev_init(struct bnxt *bp)
{
const char *pname = pci_name(bp->pdev);
struct bnxt_dbr_debug *debug;
struct bnxt_dbr *dbr;
struct dentry *dir;
int i;
bp->debugfs_pdev = debugfs_create_dir(pname, bnxt_debug_mnt);
dir = debugfs_create_dir("dim", bp->debugfs_pdev);
/* Create files for each rx ring */
for (i = 0; i < bp->cp_nr_rings; i++) {
struct bnxt_cp_ring_info *cpr = &bp->bnapi[i]->cp_ring;
if (cpr && bp->bnapi[i]->rx_ring)
debugfs_dim_ring_init(&cpr->dim, i, dir);
}
#define DBR_TEST_RECOVER_INTERVAL_MS 1000
#define DBR_TEST_DROP_RATIO 10
dbr = &bp->dbr;
debug = &bp->dbr.debug;
debug->recover_interval_ms = DBR_TEST_RECOVER_INTERVAL_MS;
debug->drop_ratio = DBR_TEST_DROP_RATIO;
dir = debugfs_create_dir("dbr", bp->debugfs_pdev);
debugfs_create_file("dbr_enable", 0644, dir, bp, &dbr_enable_fops);
debugfs_create_file("dbr_stats", 0444, dir, &dbr->sw_stats,
&dbr_stats_fops);
#ifdef DBR_DBG_DROP_ENABLE
debugfs_create_u8("dbr_test_drop_enable", 0644, dir,
&debug->drop_enable);
debugfs_create_u32("dbr_test_drop_ratio", 0644, dir,
&debug->drop_ratio);
#endif
debugfs_create_file("dbr_test_recover_enable", 0644, dir, bp,
&dbr_test_recover_enable_fops);
debugfs_create_u32("dbr_test_recover_interval_ms", 0644, dir,
&debug->recover_interval_ms);
bnxt_debugfs_hdbr_init(bp);
#if defined(CONFIG_BNXT_FLOWER_OFFLOAD)
if (bp->udcc_info)
bp->udcc_info->udcc_debugfs_dir = debugfs_create_dir("udcc", bp->debugfs_pdev);
#endif
}
void bnxt_debug_dev_exit(struct bnxt *bp)
{
struct bnxt_dbr_debug *debug = &bp->dbr.debug;
if (!bp)
return;
memset(debug, 0, sizeof(*debug));
debugfs_remove_recursive(bp->debugfs_pdev);
bp->debugfs_pdev = NULL;
}
void bnxt_debug_init(void)
{
bnxt_debug_mnt = debugfs_create_dir("bnxt_en", NULL);
if (!bnxt_debug_mnt) {
pr_err("failed to init bnxt_en debugfs\n");
return;
}
bnxt_debug_tf = debugfs_create_dir(BNXT_DEBUGFS_TRUFLOW,
bnxt_debug_mnt);
if (!bnxt_debug_tf)
pr_err("Failed to create TF debugfs backingstore directory.\n");
}
void bnxt_debug_exit(void)
{
/* Remove subdirectories. Older kernels have bug in remove for 2 level
* directories.
*/
debugfs_remove_recursive(bnxt_debug_tf);
debugfs_remove_recursive(bnxt_debug_mnt);
}
#endif /* CONFIG_DEBUG_FS */

View File

@ -0,0 +1,31 @@
/* Broadcom NetXtreme-C/E network driver.
*
* Copyright (c) 2017-2018 Broadcom Limited
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation.
*/
#include "bnxt_hsi.h"
#include "bnxt.h"
#ifdef CONFIG_DEBUG_FS
void bnxt_debug_init(void);
void bnxt_debug_exit(void);
void bnxt_debug_dev_init(struct bnxt *bp);
void bnxt_debug_dev_exit(struct bnxt *bp);
void bnxt_debugfs_create_udcc_session(struct bnxt *bp, u32 session_id);
void bnxt_debugfs_delete_udcc_session(struct bnxt *bp, u32 session_id);
int bnxt_debug_tf_create(struct bnxt *bp, u8 tsid);
void bnxt_debug_tf_delete(struct bnxt *bp);
#else
static inline void bnxt_debug_init(void) {}
static inline void bnxt_debug_exit(void) {}
static inline void bnxt_debug_dev_init(struct bnxt *bp) {}
static inline void bnxt_debug_dev_exit(struct bnxt *bp) {}
static inline void bnxt_debugfs_create_udcc_session(struct bnxt *bp, u32 session_id) {}
static inline void bnxt_debugfs_delete_udcc_session(struct bnxt *bp, u32 session_id) {}
static inline int bnxt_debug_tf_create(struct bnxt *bp, u8 tsid) { return 0; }
static inline void bnxt_debug_tf_delete(struct bnxt *bp) {}
#endif

View File

@ -0,0 +1,476 @@
/* Broadcom NetXtreme-C/E network driver.
*
* Copyright (c) 2017-2018 Broadcom Limited
* Copyright (c) 2018-2023 Broadcom Inc.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation.
*/
#include <linux/debugfs.h>
#include <linux/module.h>
#include <linux/pci.h>
#include "bnxt_hsi.h"
#include "bnxt_compat.h"
#ifdef HAVE_DIM
#include <linux/dim.h>
#else
#include "bnxt_dim.h"
#endif
#include "bnxt.h"
#include "bnxt_hdbr.h"
#include "bnxt_udcc.h"
#include "cfa_types.h"
#include "bnxt_vfr.h"
#ifdef CONFIG_DEBUG_FS
static struct dentry *bnxt_debug_mnt;
static struct dentry *bnxt_debug_tf;
#if defined(CONFIG_BNXT_FLOWER_OFFLOAD)
static ssize_t debugfs_session_query_read(struct file *filep, char __user *buffer,
size_t count, loff_t *ppos)
{
struct bnxt_udcc_session_entry *entry = filep->private_data;
struct hwrm_udcc_session_query_output resp;
int len = 0, size = 4096;
char *buf;
int rc;
rc = bnxt_hwrm_udcc_session_query(entry->bp, entry->session_id, &resp);
if (rc)
return rc;
buf = kzalloc(size, GFP_KERNEL);
if (!buf)
return -ENOMEM;
len = scnprintf(buf, size, "min_rtt_ns = %u\n",
le32_to_cpu(resp.min_rtt_ns));
len += scnprintf(buf + len, size - len, "max_rtt_ns = %u\n",
le32_to_cpu(resp.max_rtt_ns));
len += scnprintf(buf + len, size - len, "cur_rate_mbps = %u\n",
le32_to_cpu(resp.cur_rate_mbps));
len += scnprintf(buf + len, size - len, "tx_event_count = %u\n",
le32_to_cpu(resp.tx_event_count));
len += scnprintf(buf + len, size - len, "cnp_rx_event_count = %u\n",
le32_to_cpu(resp.cnp_rx_event_count));
len += scnprintf(buf + len, size - len, "rtt_req_count = %u\n",
le32_to_cpu(resp.rtt_req_count));
len += scnprintf(buf + len, size - len, "rtt_resp_count = %u\n",
le32_to_cpu(resp.rtt_resp_count));
len += scnprintf(buf + len, size - len, "tx_bytes_sent = %u\n",
le32_to_cpu(resp.tx_bytes_count));
len += scnprintf(buf + len, size - len, "tx_pkts_sent = %u\n",
le32_to_cpu(resp.tx_packets_count));
len += scnprintf(buf + len, size - len, "init_probes_sent = %u\n",
le32_to_cpu(resp.init_probes_sent));
len += scnprintf(buf + len, size - len, "term_probes_recv = %u\n",
le32_to_cpu(resp.term_probes_recv));
len += scnprintf(buf + len, size - len, "cnp_packets_recv = %u\n",
le32_to_cpu(resp.cnp_packets_recv));
len += scnprintf(buf + len, size - len, "rto_event_recv = %u\n",
le32_to_cpu(resp.rto_event_recv));
len += scnprintf(buf + len, size - len, "seq_err_nak_recv = %u\n",
le32_to_cpu(resp.seq_err_nak_recv));
len += scnprintf(buf + len, size - len, "qp_count = %u\n",
le32_to_cpu(resp.qp_count));
if (count < strlen(buf)) {
kfree(buf);
return -ENOSPC;
}
len = simple_read_from_buffer(buffer, count, ppos, buf, strlen(buf));
kfree(buf);
return len;
}
static const struct file_operations session_query_fops = {
.owner = THIS_MODULE,
.open = simple_open,
.read = debugfs_session_query_read,
};
void bnxt_debugfs_create_udcc_session(struct bnxt *bp, u32 session_id)
{
struct bnxt_udcc_info *udcc = bp->udcc_info;
struct bnxt_udcc_session_entry *entry;
static char sname[16];
entry = udcc->session_db[session_id];
if (entry->debugfs_dir || !bp->debugfs_pdev)
return;
snprintf(sname, 10, "%d", session_id);
entry->debugfs_dir = debugfs_create_dir(sname, bp->udcc_info->udcc_debugfs_dir);
entry->bp = bp;
debugfs_create_file("session_query", 0644, entry->debugfs_dir, entry, &session_query_fops);
}
void bnxt_debugfs_delete_udcc_session(struct bnxt *bp, u32 session_id)
{
struct bnxt_udcc_info *udcc = bp->udcc_info;
struct bnxt_udcc_session_entry *entry;
entry = udcc->session_db[session_id];
if (!entry->debugfs_dir || !bp->debugfs_pdev)
return;
debugfs_remove_recursive(entry->debugfs_dir);
entry->debugfs_dir = NULL;
}
#endif
static ssize_t debugfs_dim_read(struct file *filep,
char __user *buffer,
size_t count, loff_t *ppos)
{
struct dim *dim = filep->private_data;
int len;
char *buf;
if (*ppos)
return 0;
if (!dim)
return -ENODEV;
buf = kasprintf(GFP_KERNEL,
"state = %d\n" \
"profile_ix = %d\n" \
"mode = %d\n" \
"tune_state = %d\n" \
"steps_right = %d\n" \
"steps_left = %d\n" \
"tired = %d\n",
dim->state,
dim->profile_ix,
dim->mode,
dim->tune_state,
dim->steps_right,
dim->steps_left,
dim->tired);
if (!buf)
return -ENOMEM;
if (count < strlen(buf)) {
kfree(buf);
return -ENOSPC;
}
len = simple_read_from_buffer(buffer, count, ppos, buf, strlen(buf));
kfree(buf);
return len;
}
static const struct file_operations debugfs_dim_fops = {
.owner = THIS_MODULE,
.open = simple_open,
.read = debugfs_dim_read,
};
static struct dentry *debugfs_dim_ring_init(struct dim *dim, int ring_idx,
struct dentry *dd)
{
static char qname[16];
snprintf(qname, 10, "%d", ring_idx);
return debugfs_create_file(qname, 0600, dd,
dim, &debugfs_dim_fops);
}
static ssize_t debugfs_dt_read(struct file *filep, char __user *buffer,
size_t count, loff_t *ppos)
{
struct bnxt *bp = filep->private_data;
int len = 2;
char buf[2];
if (*ppos)
return 0;
if (!bp)
return -ENODEV;
if (count < len)
return -ENOSPC;
if (bp->hdbr_info.debug_trace)
buf[0] = '1';
else
buf[0] = '0';
buf[1] = '\n';
return simple_read_from_buffer(buffer, count, ppos, buf, len);
}
static ssize_t debugfs_dt_write(struct file *file, const char __user *u,
size_t size, loff_t *off)
{
struct bnxt *bp = file->private_data;
char u_in[2];
size_t n;
if (!bp)
return -ENODEV;
if (*off || !size || size > 2)
return -EFAULT;
n = simple_write_to_buffer(u_in, size, off, u, 2);
if (n != size)
return -EFAULT;
if (u_in[0] == '0')
bp->hdbr_info.debug_trace = 0;
else
bp->hdbr_info.debug_trace = 1;
return size;
}
static const struct file_operations debug_trace_fops = {
.owner = THIS_MODULE,
.open = simple_open,
.read = debugfs_dt_read,
.write = debugfs_dt_write,
};
static ssize_t debugfs_hdbr_kdmp_read(struct file *filep, char __user *buffer,
size_t count, loff_t *ppos)
{
struct bnxt_hdbr_ktbl *ktbl = *((void **)filep->private_data);
size_t len;
char *buf;
if (*ppos)
return 0;
if (!ktbl)
return -ENODEV;
buf = bnxt_hdbr_ktbl_dump(ktbl);
if (!buf)
return -ENOMEM;
len = strlen(buf);
if (count < len) {
kfree(buf);
return -ENOSPC;
}
len = simple_read_from_buffer(buffer, count, ppos, buf, len);
kfree(buf);
return len;
}
static const struct file_operations debugfs_hdbr_kdmp_fops = {
.owner = THIS_MODULE,
.open = simple_open,
.read = debugfs_hdbr_kdmp_read,
};
static ssize_t debugfs_hdbr_l2dmp_read(struct file *filep, char __user *buffer,
size_t count, loff_t *ppos)
{
struct bnxt_hdbr_l2_pgs *l2pgs = *((void **)filep->private_data);
size_t len;
char *buf;
if (*ppos)
return 0;
if (!l2pgs)
return -ENODEV;
buf = bnxt_hdbr_l2pg_dump(l2pgs);
if (!buf)
return -ENOMEM;
len = strlen(buf);
if (count < len) {
kfree(buf);
return -ENOSPC;
}
len = simple_read_from_buffer(buffer, count, ppos, buf, len);
kfree(buf);
return len;
}
static const struct file_operations debugfs_hdbr_l2dmp_fops = {
.owner = THIS_MODULE,
.open = simple_open,
.read = debugfs_hdbr_l2dmp_read,
};
static void bnxt_debugfs_hdbr_init(struct bnxt *bp)
{
struct dentry *pdevf, *phdbr, *pktbl, *pl2pgs;
char *names[4] = {"sq", "rq", "srq", "cq"};
const char *pname = pci_name(bp->pdev);
int i;
if (!bp->hdbr_info.hdbr_enabled)
return;
/* Create top dir */
phdbr = debugfs_create_dir("hdbr", bp->debugfs_pdev);
if (!phdbr) {
pr_err("Failed to create debugfs entry %s/hdbr\n", pname);
return;
}
/* Create debug_trace knob */
pdevf = debugfs_create_file("debug_trace", 0600, phdbr, bp, &debug_trace_fops);
if (!pdevf) {
pr_err("Failed to create debugfs entry %s/hdbr/debug_trace\n", pname);
return;
}
/* Create ktbl dir */
pktbl = debugfs_create_dir("ktbl", phdbr);
if (!pktbl) {
pr_err("Failed to create debugfs entry %s/hdbr/ktbl\n", pname);
return;
}
/* Create l2pgs dir */
pl2pgs = debugfs_create_dir("l2pgs", phdbr);
if (!pl2pgs) {
pr_err("Failed to create debugfs entry %s/hdbr/l2pgs\n", pname);
return;
}
/* Create hdbr kernel page and L2 page dumping knobs */
for (i = 0; i < DBC_GROUP_MAX; i++) {
pdevf = debugfs_create_file(names[i], 0600, pktbl,
&bp->hdbr_info.ktbl[i],
&debugfs_hdbr_kdmp_fops);
if (!pdevf) {
pr_err("Failed to create debugfs entry %s/hdbr/ktbl/%s\n",
pname, names[i]);
return;
}
if (i == DBC_GROUP_RQ)
continue;
pdevf = debugfs_create_file(names[i], 0600, pl2pgs,
&bp->hdbr_pgs[i],
&debugfs_hdbr_l2dmp_fops);
if (!pdevf) {
pr_err("Failed to create debugfs entry %s/hdbr/l2pgs/%s\n",
pname, names[i]);
return;
}
}
}
#define BNXT_DEBUGFS_TRUFLOW "truflow"
int bnxt_debug_tf_create(struct bnxt *bp, u8 tsid)
{
char name[32];
struct dentry *port_dir;
bnxt_debug_tf = debugfs_lookup(BNXT_DEBUGFS_TRUFLOW, bnxt_debug_mnt);
if (!bnxt_debug_tf)
return -ENODEV;
/* If not there create the port # directory */
sprintf(name, "%d", bp->pf.port_id);
port_dir = debugfs_lookup(name, bnxt_debug_tf);
if (!port_dir) {
port_dir = debugfs_create_dir(name, bnxt_debug_tf);
if (!port_dir) {
pr_debug("Failed to create TF debugfs port %d directory.\n",
bp->pf.port_id);
return -ENODEV;
}
}
/* Call TF function to create the table scope debugfs seq files */
bnxt_tf_debugfs_create_files(bp, tsid, port_dir);
return 0;
}
void bnxt_debug_tf_delete(struct bnxt *bp)
{
char name[32];
struct dentry *port_dir;
if (!bnxt_debug_tf)
return;
sprintf(name, "%d", bp->pf.port_id);
port_dir = debugfs_lookup(name, bnxt_debug_tf);
if (port_dir)
debugfs_remove_recursive(port_dir);
}
void bnxt_debug_dev_init(struct bnxt *bp)
{
const char *pname = pci_name(bp->pdev);
struct dentry *pdevf;
int i;
bp->debugfs_pdev = debugfs_create_dir(pname, bnxt_debug_mnt);
if (bp->debugfs_pdev) {
pdevf = debugfs_create_dir("dim", bp->debugfs_pdev);
if (!pdevf) {
pr_err("failed to create debugfs entry %s/dim\n", pname);
return;
}
bp->debugfs_dim = pdevf;
/* create files for each rx ring */
for (i = 0; i < bp->cp_nr_rings; i++) {
struct bnxt_cp_ring_info *cpr = &bp->bnapi[i]->cp_ring;
if (cpr && bp->bnapi[i]->rx_ring) {
pdevf = debugfs_dim_ring_init(&cpr->dim, i,
bp->debugfs_dim);
if (!pdevf)
pr_err("failed to create debugfs entry %s/dim/%d\n",
pname, i);
}
}
bnxt_debugfs_hdbr_init(bp);
#if defined(CONFIG_BNXT_FLOWER_OFFLOAD)
if (bp->udcc_info)
bp->udcc_info->udcc_debugfs_dir =
debugfs_create_dir("udcc", bp->debugfs_pdev);
#endif
} else {
pr_err("failed to create debugfs entry %s\n", pname);
}
}
void bnxt_debug_dev_exit(struct bnxt *bp)
{
if (!bp)
return;
debugfs_remove_recursive(bp->debugfs_pdev);
bp->debugfs_pdev = NULL;
}
void bnxt_debug_init(void)
{
bnxt_debug_mnt = debugfs_create_dir("bnxt_en", NULL);
if (!bnxt_debug_mnt) {
pr_err("failed to init bnxt_en debugfs\n");
return;
}
bnxt_debug_tf = debugfs_create_dir(BNXT_DEBUGFS_TRUFLOW,
bnxt_debug_mnt);
if (!bnxt_debug_tf)
pr_err("Failed to create TF debugfs backingstore directory.\n");
}
void bnxt_debug_exit(void)
{
/* Remove subdirectories. Older kernels have bug in remove for 2 level
* directories.
*/
debugfs_remove_recursive(bnxt_debug_tf);
debugfs_remove_recursive(bnxt_debug_mnt);
}
#endif /* CONFIG_DEBUG_FS */

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,133 @@
/* Broadcom NetXtreme-C/E network driver.
*
* Copyright (c) 2017-2018 Broadcom Limited
* Copyright (c) 2018-2022 Broadcom Inc.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation.
*/
#ifndef BNXT_DEVLINK_H
#define BNXT_DEVLINK_H
#if defined(HAVE_DEVLINK_PARAM)
#include <net/devlink.h>
#endif
#if defined(CONFIG_VF_REPS) || defined(HAVE_DEVLINK_PARAM)
/* Struct to hold housekeeping info needed by devlink interface */
struct bnxt_dl {
struct bnxt *bp; /* back ptr to the controlling dev */
bool remote_reset;
};
static inline struct bnxt *bnxt_get_bp_from_dl(struct devlink *dl)
{
return ((struct bnxt_dl *)devlink_priv(dl))->bp;
}
static inline bool bnxt_dl_get_remote_reset(struct devlink *dl)
{
return ((struct bnxt_dl *)devlink_priv(dl))->remote_reset;
}
static inline void bnxt_dl_set_remote_reset(struct devlink *dl, bool value)
{
((struct bnxt_dl *)devlink_priv(dl))->remote_reset = value;
}
#endif /* CONFIG_VF_REPS || HAVE_DEVLINK_PARAM */
union bnxt_nvm_data {
u8 val8;
__le32 val32;
};
#define NVM_OFF_MSIX_VEC_PER_PF_MAX 108
#define NVM_OFF_MSIX_VEC_PER_PF_MIN 114
#define NVM_OFF_IGNORE_ARI 164
#define NVM_OFF_DIS_GRE_VER_CHECK 171
#define NVM_OFF_ENABLE_SRIOV 401
#define NVM_OFF_MSIX_VEC_PER_VF 406
#define NVM_OFF_NVM_CFG_VER 602
#define BNXT_NVM_CFG_VER_BITS 8
#define BNXT_NVM_CFG_VER_BYTES 1
#define BNXT_MSIX_VEC_MAX 512
#define BNXT_MSIX_VEC_MIN_MAX 128
#if defined(CONFIG_VF_REPS) || defined(HAVE_DEVLINK_PARAM)
#ifdef HAVE_DEVLINK_PARAM
enum bnxt_nvm_dir_type {
BNXT_NVM_SHARED_CFG = 40,
BNXT_NVM_PORT_CFG,
BNXT_NVM_FUNC_CFG,
};
struct bnxt_dl_nvm_param {
u16 id;
u16 offset;
u16 dir_type;
u16 nvm_num_bits;
u8 dl_num_bytes;
};
enum bnxt_dl_version_type {
BNXT_VERSION_FIXED,
BNXT_VERSION_RUNNING,
BNXT_VERSION_STORED,
};
#else
static inline int bnxt_dl_params_register(struct bnxt *bp)
{
return 0;
}
#endif /* HAVE_DEVLINK_PARAM */
int bnxt_dl_register(struct bnxt *bp);
void bnxt_dl_unregister(struct bnxt *bp);
#else /* CONFIG_VF_REPS || HAVE_DEVLINK_PARAM */
static inline int bnxt_dl_register(struct bnxt *bp)
{
return 0;
}
static inline void bnxt_dl_unregister(struct bnxt *bp)
{
}
#endif /* CONFIG_VF_REPS || HAVE_DEVLINK_PARAM */
void bnxt_devlink_health_fw_report(struct bnxt *bp);
void bnxt_dl_health_fw_status_update(struct bnxt *bp, bool healthy);
void bnxt_dl_health_fw_recovery_done(struct bnxt *bp);
#ifdef HAVE_DEVLINK_HEALTH_REPORT
void bnxt_dl_fw_reporters_create(struct bnxt *bp);
void bnxt_dl_fw_reporters_destroy(struct bnxt *bp);
#else
static inline void bnxt_dl_fw_reporters_create(struct bnxt *bp)
{
}
static inline void bnxt_dl_fw_reporters_destroy(struct bnxt *bp)
{
}
#endif /* HAVE_DEVLINK_HEALTH_REPORT */
static inline void bnxt_dl_remote_reload(struct bnxt *bp)
{
#ifdef HAVE_DEVLINK_RELOAD_ACTION
devlink_remote_reload_actions_performed(bp->dl, 0,
BIT(DEVLINK_RELOAD_ACTION_DRIVER_REINIT) |
BIT(DEVLINK_RELOAD_ACTION_FW_ACTIVATE));
#endif
}
int bnxt_hwrm_nvm_get_var(struct bnxt *bp, dma_addr_t data_dma_addr,
u16 offset, u16 dim, u16 index, u16 num_bits);
char *bnxt_health_severity_str(enum bnxt_health_severity severity);
char *bnxt_health_remedy_str(enum bnxt_health_remedy remedy);
#endif /* BNXT_DEVLINK_H */

View File

@ -0,0 +1,101 @@
/* Broadcom NetXtreme-C/E network driver.
*
* Copyright (c) 2024 Broadcom Inc.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation.
*/
#ifndef _BNXT_DEVLINK_COMPAT_H_
#define _BNXT_DEVLINK_COMPAT_H_
#ifdef HAVE_DEVLINK
#include <net/devlink.h>
#endif
#ifdef HAVE_DEVLINK_HEALTH_REPORT
#ifndef HAVE_DEVLINK_FMSG_STRING_PAIR_PUT_VOID
static int bnxt_fw_diagnose_compat(struct devlink_health_reporter *reporter,
struct devlink_fmsg *fmsg)
{
struct bnxt *bp = devlink_health_reporter_priv(reporter);
struct bnxt_fw_health *h = bp->fw_health;
u32 fw_status, fw_resets;
int rc;
if (test_bit(BNXT_STATE_IN_FW_RESET, &bp->state))
return devlink_fmsg_string_pair_put(fmsg, "Status", "recovering");
if (!h->status_reliable)
return devlink_fmsg_string_pair_put(fmsg, "Status", "unknown");
mutex_lock(&h->lock);
fw_status = bnxt_fw_health_readl(bp, BNXT_FW_HEALTH_REG);
if (BNXT_FW_IS_BOOTING(fw_status)) {
rc = devlink_fmsg_string_pair_put(fmsg, "Status", "initializing");
if (rc)
goto unlock;
} else if (h->severity || fw_status != BNXT_FW_STATUS_HEALTHY) {
if (!h->severity) {
h->severity = SEVERITY_FATAL;
h->remedy = REMEDY_POWER_CYCLE_DEVICE;
h->diagnoses++;
devlink_health_report(h->fw_reporter,
"FW error diagnosed", h);
}
rc = devlink_fmsg_string_pair_put(fmsg, "Status", "error");
if (rc)
goto unlock;
rc = devlink_fmsg_u32_pair_put(fmsg, "Syndrome", fw_status);
if (rc)
goto unlock;
} else {
rc = devlink_fmsg_string_pair_put(fmsg, "Status", "healthy");
if (rc)
goto unlock;
}
rc = devlink_fmsg_string_pair_put(fmsg, "Severity",
bnxt_health_severity_str(h->severity));
if (rc)
goto unlock;
if (h->severity) {
rc = devlink_fmsg_string_pair_put(fmsg, "Remedy",
bnxt_health_remedy_str(h->remedy));
if (rc)
goto unlock;
if (h->remedy == REMEDY_DEVLINK_RECOVER) {
rc = devlink_fmsg_string_pair_put(fmsg, "Impact",
"traffic+ntuple_cfg");
if (rc)
goto unlock;
}
}
unlock:
mutex_unlock(&h->lock);
if (rc || !h->resets_reliable)
return rc;
fw_resets = bnxt_fw_health_readl(bp, BNXT_FW_RESET_CNT_REG);
rc = devlink_fmsg_u32_pair_put(fmsg, "Resets", fw_resets);
if (rc)
return rc;
rc = devlink_fmsg_u32_pair_put(fmsg, "Arrests", h->arrests);
if (rc)
return rc;
rc = devlink_fmsg_u32_pair_put(fmsg, "Survivals", h->survivals);
if (rc)
return rc;
rc = devlink_fmsg_u32_pair_put(fmsg, "Discoveries", h->discoveries);
if (rc)
return rc;
rc = devlink_fmsg_u32_pair_put(fmsg, "Fatalities", h->fatalities);
if (rc)
return rc;
return devlink_fmsg_u32_pair_put(fmsg, "Diagnoses", h->diagnoses);
}
#endif
#endif
#endif

View File

@ -0,0 +1,68 @@
/* Broadcom NetXtreme-C/E network driver.
*
* Copyright (c) 2017-2018 Broadcom Limited
* Copyright (c) 2018-2020 Broadcom Inc.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation.
*/
#include <linux/pci.h>
#include <linux/netdevice.h>
#include "bnxt_compat.h"
#ifdef HAVE_DIM
#include <linux/dim.h>
#else
#include "bnxt_dim.h"
#endif
#include "bnxt_hsi.h"
#include "bnxt.h"
void bnxt_dim_work(struct work_struct *work)
{
struct dim *dim = container_of(work, struct dim, work);
struct bnxt_cp_ring_info *cpr = container_of(dim,
struct bnxt_cp_ring_info,
dim);
struct bnxt_napi *bnapi = container_of(cpr,
struct bnxt_napi,
cp_ring);
struct dim_cq_moder cur_moder =
net_dim_get_rx_moderation(dim->mode, dim->profile_ix);
cpr->rx_ring_coal.coal_ticks = cur_moder.usec;
cpr->rx_ring_coal.coal_bufs = cur_moder.pkts;
bnxt_hwrm_set_ring_coal(bnapi->bp, bnapi);
dim->state = DIM_START_MEASURE;
}
#ifndef HAVE_DIM
void net_dim(struct dim *dim, struct dim_sample end_sample)
{
struct dim_stats curr_stats;
u16 nevents;
switch (dim->state) {
case DIM_MEASURE_IN_PROGRESS:
nevents = BIT_GAP(BITS_PER_TYPE(u16),
end_sample.event_ctr,
dim->start_sample.event_ctr);
if (nevents < DIM_NEVENTS)
break;
dim_calc_stats(&dim->start_sample, &end_sample, &curr_stats);
if (net_dim_decision(&curr_stats, dim)) {
dim->state = DIM_APPLY_NEW_PROFILE;
schedule_work(&dim->work);
break;
}
fallthrough;
case DIM_START_MEASURE:
dim->state = DIM_MEASURE_IN_PROGRESS;
break;
case DIM_APPLY_NEW_PROFILE:
break;
}
}
#endif

View File

@ -0,0 +1,354 @@
/*
* Copyright (c) 2016, Mellanox Technologies. All rights reserved.
* Copyright (c) 2017-2018, Broadcom Limited. All rights reserved.
* Copyright (c) 2018-2022, Broadcom Inc. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#ifndef NET_DIM_H
#define NET_DIM_H
#include <linux/module.h>
struct dim_cq_moder {
u16 usec;
u16 pkts;
u8 cq_period_mode;
};
struct dim_sample {
ktime_t time;
u32 pkt_ctr;
u32 byte_ctr;
u16 event_ctr;
};
struct dim_stats {
int ppms; /* packets per msec */
int bpms; /* bytes per msec */
int epms; /* events per msec */
};
struct dim { /* Adaptive Moderation */
u8 state;
struct dim_stats prev_stats;
struct dim_sample start_sample;
struct work_struct work;
u8 profile_ix;
u8 mode;
u8 tune_state;
u8 steps_right;
u8 steps_left;
u8 tired;
};
enum {
DIM_CQ_PERIOD_MODE_START_FROM_EQE = 0x0,
DIM_CQ_PERIOD_MODE_START_FROM_CQE = 0x1,
DIM_CQ_PERIOD_NUM_MODES
};
/* Adaptive moderation logic */
enum {
DIM_START_MEASURE,
DIM_MEASURE_IN_PROGRESS,
DIM_APPLY_NEW_PROFILE,
};
enum {
DIM_PARKING_ON_TOP,
DIM_PARKING_TIRED,
DIM_GOING_RIGHT,
DIM_GOING_LEFT,
};
enum {
DIM_STATS_WORSE,
DIM_STATS_SAME,
DIM_STATS_BETTER,
};
enum {
DIM_STEPPED,
DIM_TOO_TIRED,
DIM_ON_EDGE,
};
#define NET_DIM_PARAMS_NUM_PROFILES 5
/* Adaptive moderation profiles */
#define NET_DIM_DEFAULT_RX_CQ_MODERATION_PKTS_FROM_EQE 256
#define NET_DIM_DEF_PROFILE_CQE 1
#define NET_DIM_DEF_PROFILE_EQE 1
/* All profiles sizes must be NET_PARAMS_DIM_NUM_PROFILES */
#define NET_DIM_EQE_PROFILES { \
{1, NET_DIM_DEFAULT_RX_CQ_MODERATION_PKTS_FROM_EQE}, \
{4, NET_DIM_DEFAULT_RX_CQ_MODERATION_PKTS_FROM_EQE}, \
{8, NET_DIM_DEFAULT_RX_CQ_MODERATION_PKTS_FROM_EQE}, \
{32, NET_DIM_DEFAULT_RX_CQ_MODERATION_PKTS_FROM_EQE}, \
{256, NET_DIM_DEFAULT_RX_CQ_MODERATION_PKTS_FROM_EQE}, \
}
#define NET_DIM_CQE_PROFILES { \
{2, 256}, \
{8, 128}, \
{16, 64}, \
{32, 64}, \
{64, 64} \
}
static const struct dim_cq_moder
profile[DIM_CQ_PERIOD_NUM_MODES][NET_DIM_PARAMS_NUM_PROFILES] = {
NET_DIM_EQE_PROFILES,
NET_DIM_CQE_PROFILES,
};
static inline struct dim_cq_moder
net_dim_get_rx_moderation(u8 cq_period_mode, int ix)
{
struct dim_cq_moder cq_moder = profile[cq_period_mode][ix];
cq_moder.cq_period_mode = cq_period_mode;
return cq_moder;
}
static inline struct dim_cq_moder
net_dim_get_def_rx_moderation(u8 rx_cq_period_mode)
{
int default_profile_ix;
if (rx_cq_period_mode == DIM_CQ_PERIOD_MODE_START_FROM_CQE)
default_profile_ix = NET_DIM_DEF_PROFILE_CQE;
else /* DIM_CQ_PERIOD_MODE_START_FROM_EQE */
default_profile_ix = NET_DIM_DEF_PROFILE_EQE;
return net_dim_get_rx_moderation(rx_cq_period_mode, default_profile_ix);
}
static inline bool dim_on_top(struct dim *dim)
{
switch (dim->tune_state) {
case DIM_PARKING_ON_TOP:
case DIM_PARKING_TIRED:
return true;
case DIM_GOING_RIGHT:
return (dim->steps_left > 1) && (dim->steps_right == 1);
default: /* DIM_GOING_LEFT */
return (dim->steps_right > 1) && (dim->steps_left == 1);
}
}
static inline void dim_turn(struct dim *dim)
{
switch (dim->tune_state) {
case DIM_PARKING_ON_TOP:
case DIM_PARKING_TIRED:
break;
case DIM_GOING_RIGHT:
dim->tune_state = DIM_GOING_LEFT;
dim->steps_left = 0;
break;
case DIM_GOING_LEFT:
dim->tune_state = DIM_GOING_RIGHT;
dim->steps_right = 0;
break;
}
}
static inline int net_dim_step(struct dim *dim)
{
if (dim->tired == (NET_DIM_PARAMS_NUM_PROFILES * 2))
return DIM_TOO_TIRED;
switch (dim->tune_state) {
case DIM_PARKING_ON_TOP:
case DIM_PARKING_TIRED:
break;
case DIM_GOING_RIGHT:
if (dim->profile_ix == (NET_DIM_PARAMS_NUM_PROFILES - 1))
return DIM_ON_EDGE;
dim->profile_ix++;
dim->steps_right++;
break;
case DIM_GOING_LEFT:
if (dim->profile_ix == 0)
return DIM_ON_EDGE;
dim->profile_ix--;
dim->steps_left++;
break;
}
dim->tired++;
return DIM_STEPPED;
}
static inline void dim_park_on_top(struct dim *dim)
{
dim->steps_right = 0;
dim->steps_left = 0;
dim->tired = 0;
dim->tune_state = DIM_PARKING_ON_TOP;
}
static inline void dim_park_tired(struct dim *dim)
{
dim->steps_right = 0;
dim->steps_left = 0;
dim->tune_state = DIM_PARKING_TIRED;
}
static inline void net_dim_exit_parking(struct dim *dim)
{
dim->tune_state = dim->profile_ix ? DIM_GOING_LEFT :
DIM_GOING_RIGHT;
net_dim_step(dim);
}
#define IS_SIGNIFICANT_DIFF(val, ref) \
(((100 * abs((val) - (ref))) / (ref)) > 10) /* more than 10% difference */
static inline int net_dim_stats_compare(struct dim_stats *curr,
struct dim_stats *prev)
{
if (!prev->bpms)
return curr->bpms ? DIM_STATS_BETTER :
DIM_STATS_SAME;
if (IS_SIGNIFICANT_DIFF(curr->bpms, prev->bpms))
return (curr->bpms > prev->bpms) ? DIM_STATS_BETTER :
DIM_STATS_WORSE;
if (!prev->ppms)
return curr->ppms ? DIM_STATS_BETTER :
DIM_STATS_SAME;
if (IS_SIGNIFICANT_DIFF(curr->ppms, prev->ppms))
return (curr->ppms > prev->ppms) ? DIM_STATS_BETTER :
DIM_STATS_WORSE;
if (!prev->epms)
return DIM_STATS_SAME;
if (IS_SIGNIFICANT_DIFF(curr->epms, prev->epms))
return (curr->epms < prev->epms) ? DIM_STATS_BETTER :
DIM_STATS_WORSE;
return DIM_STATS_SAME;
}
static inline bool net_dim_decision(struct dim_stats *curr_stats,
struct dim *dim)
{
int prev_state = dim->tune_state;
int prev_ix = dim->profile_ix;
int stats_res;
int step_res;
switch (dim->tune_state) {
case DIM_PARKING_ON_TOP:
stats_res = net_dim_stats_compare(curr_stats, &dim->prev_stats);
if (stats_res != DIM_STATS_SAME)
net_dim_exit_parking(dim);
break;
case DIM_PARKING_TIRED:
dim->tired--;
if (!dim->tired)
net_dim_exit_parking(dim);
break;
case DIM_GOING_RIGHT:
case DIM_GOING_LEFT:
stats_res = net_dim_stats_compare(curr_stats, &dim->prev_stats);
if (stats_res != DIM_STATS_BETTER)
dim_turn(dim);
if (dim_on_top(dim)) {
dim_park_on_top(dim);
break;
}
step_res = net_dim_step(dim);
switch (step_res) {
case DIM_ON_EDGE:
dim_park_on_top(dim);
break;
case DIM_TOO_TIRED:
dim_park_tired(dim);
break;
}
break;
}
if ((prev_state != DIM_PARKING_ON_TOP) ||
(dim->tune_state != DIM_PARKING_ON_TOP))
dim->prev_stats = *curr_stats;
return dim->profile_ix != prev_ix;
}
static inline void dim_update_sample(u16 event_ctr,
u64 packets,
u64 bytes,
struct dim_sample *s)
{
s->time = ktime_get();
s->pkt_ctr = packets;
s->byte_ctr = bytes;
s->event_ctr = event_ctr;
}
#define DIM_NEVENTS 320
#define BITS_PER_TYPE(type) (sizeof(type) * BITS_PER_BYTE)
#define BIT_GAP(bits, end, start) ((((end) - (start)) + BIT_ULL(bits)) & (BIT_ULL(bits) - 1))
static inline void dim_calc_stats(struct dim_sample *start,
struct dim_sample *end,
struct dim_stats *curr_stats)
{
/* u32 holds up to 71 minutes, should be enough */
u32 delta_us = ktime_us_delta(end->time, start->time);
u32 npkts = BIT_GAP(BITS_PER_TYPE(u32), end->pkt_ctr, start->pkt_ctr);
u32 nbytes = BIT_GAP(BITS_PER_TYPE(u32), end->byte_ctr,
start->byte_ctr);
if (!delta_us)
return;
curr_stats->ppms = DIV_ROUND_UP(npkts * USEC_PER_MSEC, delta_us);
curr_stats->bpms = DIV_ROUND_UP(nbytes * USEC_PER_MSEC, delta_us);
curr_stats->epms = DIV_ROUND_UP(DIM_NEVENTS * USEC_PER_MSEC,
delta_us);
}
void net_dim(struct dim *dim, struct dim_sample end_sample);
#endif /* NET_DIM_H */

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,95 @@
/* Broadcom NetXtreme-C/E network driver.
*
* Copyright (c) 2014-2016 Broadcom Corporation
* Copyright (c) 2016-2018 Broadcom Limited
* Copyright (c) 2018-2022 Broadcom Inc.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation.
*/
#ifndef BNXT_ETHTOOL_H
#define BNXT_ETHTOOL_H
#include <linux/firmware.h>
struct bnxt_led_cfg {
u8 led_id;
u8 led_state;
u8 led_color;
u8 unused;
__le16 led_blink_on;
__le16 led_blink_off;
u8 led_group_id;
u8 rsvd;
};
#define BNXT_LED_DFLT_ENA \
(PORT_LED_CFG_REQ_ENABLES_LED0_ID | \
PORT_LED_CFG_REQ_ENABLES_LED0_STATE | \
PORT_LED_CFG_REQ_ENABLES_LED0_BLINK_ON | \
PORT_LED_CFG_REQ_ENABLES_LED0_BLINK_OFF | \
PORT_LED_CFG_REQ_ENABLES_LED0_GROUP_ID)
#define BNXT_LED_DFLT_ENA_SHIFT 6
#define BNXT_LED_DFLT_ENABLES(x) \
cpu_to_le32(BNXT_LED_DFLT_ENA << (BNXT_LED_DFLT_ENA_SHIFT * (x)))
#define BNXT_FW_RESET_CRASHDUMP (ETH_RESET_CRASHDUMP << ETH_RESET_SHARED_SHIFT)
#define BNXT_FW_RESET_AP (ETH_RESET_AP << ETH_RESET_SHARED_SHIFT)
#define BNXT_FW_RESET_CHIP ((ETH_RESET_MGMT | ETH_RESET_IRQ | \
ETH_RESET_DMA | ETH_RESET_FILTER | \
ETH_RESET_OFFLOAD | ETH_RESET_MAC | \
ETH_RESET_PHY | ETH_RESET_RAM) \
<< ETH_RESET_SHARED_SHIFT)
#define BNXT_PXP_REG_LEN 0x3110
#define BNXT_IP_PROTO_FULL_MASK 0xFF
extern const struct ethtool_ops bnxt_ethtool_ops;
u32 bnxt_get_rxfh_indir_size(struct net_device *dev);
#ifdef HAVE_ETHTOOL_KEEE
void _bnxt_fw_to_linkmode(unsigned long *mode, u16 fw_speeds);
#else
u32 _bnxt_fw_to_ethtool_adv_spds(u16, u8);
#endif
u32 bnxt_fw_to_ethtool_speed(u16);
#ifdef HAVE_ETHTOOL_KEEE
u16 bnxt_get_fw_auto_link_speeds(const unsigned long *mode);
#else
u16 bnxt_get_fw_auto_link_speeds(u32);
#endif
int bnxt_hwrm_nvm_get_dev_info(struct bnxt *bp,
struct hwrm_nvm_get_dev_info_output *nvm_dev_info);
int bnxt_flash_package_from_fw_obj(struct net_device *dev, const struct firmware *fw,
u32 install_type, struct netlink_ext_ack *extack);
int bnxt_flash_package_from_file(struct net_device *dev, const char *filename,
u32 install_type, struct netlink_ext_ack *extack);
int bnxt_hwrm_firmware_reset(struct net_device *dev, u8 proc_type,
u8 self_reset, u8 flags);
int bnxt_get_pkginfo(struct net_device *dev, char *ver, int size);
int bnxt_sync_firmware(struct bnxt *bp);
int bnxt_hwrm_get_fw_sync_status(struct bnxt *bp, u16 *fw_status);
int bnxt_hwrm_fw_sync(struct bnxt *bp, u16 fw_status);
void bnxt_ethtool_init(struct bnxt *bp);
void bnxt_ethtool_free(struct bnxt *bp);
int bnxt_find_nvram_item(struct net_device *dev, u16 type, u16 ordinal,
u16 ext, u16 *index, u32 *item_length,
u32 *data_length);
int bnxt_find_nvram_item(struct net_device *dev, u16 type, u16 ordinal,
u16 ext, u16 *index, u32 *item_length,
u32 *data_length);
int bnxt_flash_nvram(struct net_device *dev, u16 dir_type,
u16 dir_ordinal, u16 dir_ext, u16 dir_attr,
u32 dir_item_len, const u8 *data,
size_t data_len);
int bnxt_get_nvram_item(struct net_device *dev, u32 index, u32 offset,
u32 length, u8 *data);
int bnxt_firmware_reset_chip(struct net_device *dev);
int bnxt_firmware_reset_ap(struct net_device *dev);
#endif

View File

@ -0,0 +1,458 @@
/* Broadcom NetXtreme-C/E network driver.
*
* Copyright (c) 2021 Broacom Inc.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation.
*/
#include "bnxt_ethtool.c"
#ifndef HAVE_ETHTOOL_LINK_KSETTINGS
int bnxt_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
{
struct bnxt *bp = netdev_priv(dev);
struct ethtool_link_ksettings ks;
int rc;
memset(&ks, 0, sizeof(ks));
rc = bnxt_get_link_ksettings(dev, &ks);
if (rc)
return rc;
cmd->supported = ks.link_modes.supported[0];
cmd->advertising = ks.link_modes.advertising[0];
cmd->lp_advertising = ks.link_modes.lp_advertising[0];
ethtool_cmd_speed_set(cmd, ks.base.speed);
cmd->duplex = ks.base.duplex;
cmd->autoneg = ks.base.autoneg;
cmd->port = ks.base.port;
cmd->phy_address = ks.base.phy_address;
if (bp->link_info.transceiver ==
PORT_PHY_QCFG_RESP_XCVR_PKG_TYPE_XCVR_INTERNAL)
cmd->transceiver = XCVR_INTERNAL;
else
cmd->transceiver = XCVR_EXTERNAL;
return 0;
}
static void bnxt_fw_to_ethtool_support_spds(struct bnxt_link_info *link_info,
struct ethtool_link_ksettings *ks)
{
u16 fw_speeds = link_info->support_speeds;
u32 supported;
supported = _bnxt_fw_to_ethtool_adv_spds(fw_speeds, 0);
ks->link_modes.supported[0] = supported | SUPPORTED_Pause |
SUPPORTED_Asym_Pause;
}
int bnxt_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
{
struct bnxt *bp = netdev_priv(dev);
struct ethtool_link_ksettings ks;
memset(&ks, 0, sizeof(ks));
if (cmd->autoneg == AUTONEG_ENABLE) {
bnxt_fw_to_ethtool_support_spds(&bp->link_info, &ks);
if (!ks.link_modes.supported) {
netdev_err(dev, "Autoneg not supported\n");
return -EINVAL;
}
if (cmd->advertising & ~(ks.link_modes.supported[0] |
ADVERTISED_Autoneg |
ADVERTISED_TP | ADVERTISED_FIBRE)) {
netdev_err(dev, "Unsupported advertising mask (adv: 0x%x)\n",
cmd->advertising);
return -EINVAL;
}
} else {
/* If received a request for an unknown duplex, assume full*/
if (cmd->duplex == DUPLEX_UNKNOWN)
cmd->duplex = DUPLEX_FULL;
}
ks.link_modes.advertising[0] = cmd->advertising;
ks.base.speed = ethtool_cmd_speed(cmd);
ks.base.duplex = cmd->duplex;
ks.base.autoneg = cmd->autoneg;
return bnxt_set_link_ksettings(dev, &ks);
}
#endif
#ifndef HAVE_ETHTOOL_PARAMS_FROM_LINK_MODE
#define ETHTOOL_LINK_MODE(speed, type, duplex) \
ETHTOOL_LINK_MODE_ ## speed ## base ## type ## _ ## duplex ## _BIT
#include "bnxt_compat_link_modes.c"
void
ethtool_params_from_link_mode(struct ethtool_link_ksettings *link_ksettings,
enum ethtool_link_mode_bit_indices link_mode)
{
const struct link_mode_info *link_info;
if (WARN_ON_ONCE(link_mode >= ARRAY_SIZE(link_mode_params)))
return;
link_info = &link_mode_params[link_mode];
link_ksettings->base.speed = link_info->speed;
#ifdef HAVE_ETHTOOL_LANES
link_ksettings->lanes = link_info->lanes;
#endif
link_ksettings->base.duplex = link_info->duplex;
#ifdef HAVE_ETHTOOL_LINK_MODE
link_ksettings->link_mode = link_mode;
#endif
}
#endif
#if !defined(HAVE_ETHTOOL_RXFH_PARAM)
#if defined(HAVE_ETH_RXFH_CONTEXT_ALLOC)
int bnxt_set_rxfh_context(struct net_device *dev, const u32 *indir,
const u8 *key, const u8 hfunc, u32 *rss_context,
bool delete)
{
struct bnxt *bp = netdev_priv(dev);
struct bnxt_rss_ctx *rss_ctx;
struct bnxt_vnic_info *vnic;
bool modify = false;
int bit_id;
int rc;
if (!BNXT_SUPPORTS_MULTI_RSS_CTX(bp))
return -EOPNOTSUPP;
if (!netif_running(dev))
return -EAGAIN;
if (*rss_context != ETH_RXFH_CONTEXT_ALLOC) {
rss_ctx = bnxt_get_rss_ctx_from_index(bp, *rss_context);
if (!rss_ctx)
return -EINVAL;
if (delete) {
bnxt_del_one_rss_ctx(bp, rss_ctx, true);
return 0;
}
modify = true;
vnic = &rss_ctx->vnic;
goto modify_context;
}
if (hfunc && hfunc != ETH_RSS_HASH_TOP)
return -EOPNOTSUPP;
if (bp->num_rss_ctx >= BNXT_MAX_ETH_RSS_CTX)
return -EINVAL;
if (!bnxt_rfs_capable(bp, true))
return -ENOMEM;
rss_ctx = bnxt_alloc_rss_ctx(bp);
if (!rss_ctx)
return -ENOMEM;
vnic = &rss_ctx->vnic;
vnic->flags |= BNXT_VNIC_RSSCTX_FLAG;
vnic->vnic_id = BNXT_VNIC_ID_INVALID;
rc = bnxt_alloc_rss_ctx_rss_table(bp, rss_ctx);
if (rc)
goto out;
rc = bnxt_alloc_rss_indir_tbl(bp, rss_ctx);
if (rc)
goto out;
bnxt_set_dflt_rss_indir_tbl(bp, rss_ctx);
memcpy(vnic->rss_hash_key, bp->rss_hash_key, HW_HASH_KEY_SIZE);
rc = bnxt_hwrm_vnic_alloc(bp, vnic, 0, bp->rx_nr_rings);
if (rc)
goto out;
rc = bnxt_hwrm_vnic_set_tpa(bp, vnic, bp->flags & BNXT_FLAG_TPA);
if (rc)
goto out;
modify_context:
if (indir) {
u32 i, pad, tbl_size = bnxt_get_rxfh_indir_size(dev);
for (i = 0; i < tbl_size; i++)
rss_ctx->rss_indir_tbl[i] = indir[i];
pad = bp->rss_indir_tbl_entries - tbl_size;
if (pad)
memset(&rss_ctx->rss_indir_tbl[i], 0, pad * sizeof(u16));
}
if (key)
memcpy(vnic->rss_hash_key, key, HW_HASH_KEY_SIZE);
if (modify)
return bnxt_hwrm_vnic_rss_cfg_p5(bp, vnic);
rc = __bnxt_setup_vnic_p5(bp, vnic);
if (rc)
goto out;
bit_id = bitmap_find_free_region(bp->rss_ctx_bmap,
BNXT_RSS_CTX_BMAP_LEN, 0);
if (bit_id < 0) {
rc = -ENOMEM;
goto out;
}
rss_ctx->index = (u16)bit_id;
*rss_context = rss_ctx->index;
return 0;
out:
bnxt_del_one_rss_ctx(bp, rss_ctx, true);
return rc;
}
int bnxt_get_rxfh_context(struct net_device *dev, u32 *indir, u8 *key,
u8 *hfunc, u32 rss_context)
{
struct bnxt *bp = netdev_priv(dev);
struct bnxt_rss_ctx *rss_ctx;
struct bnxt_vnic_info *vnic;
int i;
rss_ctx = bnxt_get_rss_ctx_from_index(bp, rss_context);
if (!rss_ctx)
return -EINVAL;
vnic = &rss_ctx->vnic;
if (hfunc)
*hfunc = ETH_RSS_HASH_TOP;
if (indir)
for (i = 0; i < bnxt_get_rxfh_indir_size(bp->dev); i++)
indir[i] = rss_ctx->rss_indir_tbl[i];
if (key)
memcpy(key, vnic->rss_hash_key, HW_HASH_KEY_SIZE);
return 0;
}
#endif /* HAVE_ETH_RXFH_CONTEXT_ALLOC */
int bnxt_get_rxfh(struct net_device *dev, u32 *indir, u8 *key, u8 *hfunc)
{
struct bnxt *bp = netdev_priv(dev);
struct bnxt_vnic_info *vnic;
u32 i, tbl_size;
/* WIP: Return HWRM_VNIC_RSS_QCFG response, instead of driver cache */
if (hfunc)
*hfunc = bp->rss_hfunc;
if (!bp->vnic_info)
return 0;
vnic = &bp->vnic_info[BNXT_VNIC_DEFAULT];
if (indir && bp->rss_indir_tbl) {
tbl_size = bnxt_get_rxfh_indir_size(dev);
for (i = 0; i < tbl_size; i++)
indir[i] = bp->rss_indir_tbl[i];
}
if (key && vnic->rss_hash_key)
memcpy(key, vnic->rss_hash_key, HW_HASH_KEY_SIZE);
return 0;
}
int bnxt_set_rxfh(struct net_device *dev, const u32 *indir, const u8 *key,
const u8 hfunc)
{
struct bnxt *bp = netdev_priv(dev);
bool skip_key = false;
int rc = 0;
/* Check HW cap and cache hash func details */
switch (hfunc) {
case ETH_RSS_HASH_XOR:
if (!(bp->rss_cap & BNXT_RSS_CAP_XOR_CAP))
return -EOPNOTSUPP;
/* hkey not needed in XOR mode */
skip_key = true;
break;
case ETH_RSS_HASH_TOP:
if (!(bp->rss_cap & BNXT_RSS_CAP_TOEPLITZ_CAP))
return -EOPNOTSUPP;
break;
case ETH_RSS_HASH_CRC32:
/* default keys/indir */
if (!(bp->rss_cap & BNXT_RSS_CAP_TOEPLITZ_CHKSM_CAP))
return -EOPNOTSUPP;
skip_key = true;
break;
case ETH_RSS_HASH_NO_CHANGE:
break;
default:
return -EOPNOTSUPP;
}
/* Repeat of same hfunc with no key or weight */
if (bp->rss_hfunc == hfunc && !key && !indir)
return -EINVAL;
/* for xor and crc32 block hkey config */
if (key && skip_key)
return -EINVAL;
if (key) {
memcpy(bp->rss_hash_key, key, HW_HASH_KEY_SIZE);
bp->rss_hash_key_updated = true;
}
bp->rss_hfunc = hfunc;
if (indir) {
u32 i, pad, tbl_size = bnxt_get_rxfh_indir_size(dev);
for (i = 0; i < tbl_size; i++)
bp->rss_indir_tbl[i] = indir[i];
pad = bp->rss_indir_tbl_entries - tbl_size;
if (pad)
memset(&bp->rss_indir_tbl[i], 0, pad * sizeof(u16));
}
bnxt_clear_usr_fltrs(bp, false);
if (netif_running(bp->dev)) {
bnxt_close_nic(bp, false, false);
rc = bnxt_open_nic(bp, false, false);
}
return rc;
}
#endif /* !HAVE_ETHTOOL_RXFH_PARAM */
#if !defined(HAVE_ETHTOOL_KEEE) || !defined(HAVE_ETHTOOL_LINK_KSETTINGS)
u32 _bnxt_fw_to_ethtool_adv_spds(u16 fw_speeds, u8 fw_pause)
{
u32 speed_mask = 0;
/* TODO: support 25GB, 40GB, 50GB with different cable type */
/* set the advertised speeds */
if (fw_speeds & BNXT_LINK_SPEED_MSK_100MB)
speed_mask |= ADVERTISED_100baseT_Full;
if (fw_speeds & BNXT_LINK_SPEED_MSK_1GB)
speed_mask |= ADVERTISED_1000baseT_Full;
if (fw_speeds & BNXT_LINK_SPEED_MSK_2_5GB)
speed_mask |= ADVERTISED_2500baseX_Full;
if (fw_speeds & BNXT_LINK_SPEED_MSK_10GB)
speed_mask |= ADVERTISED_10000baseT_Full;
if (fw_speeds & BNXT_LINK_SPEED_MSK_40GB)
speed_mask |= ADVERTISED_40000baseCR4_Full;
if ((fw_pause & BNXT_LINK_PAUSE_BOTH) == BNXT_LINK_PAUSE_BOTH)
speed_mask |= ADVERTISED_Pause;
else if (fw_pause & BNXT_LINK_PAUSE_TX)
speed_mask |= ADVERTISED_Asym_Pause;
else if (fw_pause & BNXT_LINK_PAUSE_RX)
speed_mask |= ADVERTISED_Pause | ADVERTISED_Asym_Pause;
return speed_mask;
}
u16 bnxt_get_fw_auto_link_speeds(u32 advertising)
{
u16 fw_speed_mask = 0;
/* only support autoneg at speed 100, 1000, and 10000 */
if (advertising & (ADVERTISED_100baseT_Full |
ADVERTISED_100baseT_Half)) {
fw_speed_mask |= BNXT_LINK_SPEED_MSK_100MB;
}
if (advertising & (ADVERTISED_1000baseT_Full |
ADVERTISED_1000baseT_Half)) {
fw_speed_mask |= BNXT_LINK_SPEED_MSK_1GB;
}
if (advertising & ADVERTISED_10000baseT_Full)
fw_speed_mask |= BNXT_LINK_SPEED_MSK_10GB;
if (advertising & ADVERTISED_40000baseCR4_Full)
fw_speed_mask |= BNXT_LINK_SPEED_MSK_40GB;
return fw_speed_mask;
}
#endif /* !HAVE_ETHTOOL_KEEE || !HAVE_ETHTOOL_LINK_KSETTINGS */
#if defined(ETHTOOL_GEEE) && !defined(GET_ETHTOOL_OP_EXT) && !defined(HAVE_ETHTOOL_KEEE)
int bnxt_set_eee(struct net_device *dev, struct ethtool_eee *edata)
{
struct bnxt *bp = netdev_priv(dev);
struct ethtool_eee *eee = (struct ethtool_eee *)&bp->eee;
struct bnxt_link_info *link_info = &bp->link_info;
u32 advertising;
int rc = 0;
if (!BNXT_PHY_CFG_ABLE(bp))
return -EOPNOTSUPP;
if (!(bp->phy_flags & BNXT_PHY_FL_EEE_CAP))
return -EOPNOTSUPP;
mutex_lock(&bp->link_lock);
advertising = _bnxt_fw_to_ethtool_adv_spds(link_info->advertising, 0);
if (!edata->eee_enabled)
goto eee_ok;
if (!(link_info->autoneg & BNXT_AUTONEG_SPEED)) {
netdev_warn(dev, "EEE requires autoneg\n");
rc = -EINVAL;
goto eee_exit;
}
if (edata->tx_lpi_enabled) {
if (bp->lpi_tmr_hi && (edata->tx_lpi_timer > bp->lpi_tmr_hi ||
edata->tx_lpi_timer < bp->lpi_tmr_lo)) {
netdev_warn(dev, "Valid LPI timer range is %d and %d microsecs\n",
bp->lpi_tmr_lo, bp->lpi_tmr_hi);
rc = -EINVAL;
goto eee_exit;
} else if (!bp->lpi_tmr_hi) {
edata->tx_lpi_timer = eee->tx_lpi_timer;
}
}
if (!edata->advertised) {
edata->advertised = advertising & eee->supported;
} else if (edata->advertised & ~advertising) {
netdev_warn(dev, "EEE advertised %x must be a subset of autoneg advertised speeds %x\n",
edata->advertised, advertising);
rc = -EINVAL;
goto eee_exit;
}
eee->advertised = edata->advertised;
eee->tx_lpi_enabled = edata->tx_lpi_enabled;
eee->tx_lpi_timer = edata->tx_lpi_timer;
eee_ok:
eee->eee_enabled = edata->eee_enabled;
if (netif_running(dev))
rc = bnxt_hwrm_set_link_setting(bp, false, true);
eee_exit:
mutex_unlock(&bp->link_lock);
return rc;
}
int bnxt_get_eee(struct net_device *dev, struct ethtool_eee *edata)
{
struct bnxt *bp = netdev_priv(dev);
if (!(bp->phy_flags & BNXT_PHY_FL_EEE_CAP))
return -EOPNOTSUPP;
memcpy(edata, &bp->eee, sizeof(*edata));
if (!bp->eee.eee_enabled) {
/* Preserve tx_lpi_timer so that the last value will be used
* by default when it is re-enabled.
*/
edata->advertised = 0;
edata->tx_lpi_enabled = 0;
}
if (!bp->eee.eee_active)
edata->lp_advertised = 0;
return 0;
}
#endif

View File

@ -0,0 +1,18 @@
/* Broadcom NetXtreme-C/E network driver.
*
* Copyright (c) 2016-2018 Broadcom Limited
* Copyright (c) 2018-2021 Broadcom Inc.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation.
*/
#ifndef BNXT_EXTRA_VER_H
#define BNXT_EXTRA_VER_H
#ifndef DRV_MODULE_EXTRA_VER
#define DRV_MODULE_EXTRA_VER "-230.2.52.0"
#endif
#endif

View File

@ -0,0 +1,120 @@
/* Broadcom NetXtreme-C/E network driver.
*
* Copyright (c) 2014-2016 Broadcom Corporation
* Copyright (c) 2016-2017 Broadcom Limited
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation.
*/
#ifndef __BNXT_FW_HDR_H__
#define __BNXT_FW_HDR_H__
#define BNXT_FIRMWARE_BIN_SIGNATURE 0x1a4d4342 /* "BCM"+0x1a */
#define BNXT_UCODE_TRAILER_SIGNATURE 0x726c7254 /* "Trlr" */
enum SUPPORTED_FAMILY {
DEVICE_5702_3_4_FAMILY, /* 0 - Denali, Vinson, K2 */
DEVICE_5705_FAMILY, /* 1 - Bachelor */
DEVICE_SHASTA_FAMILY, /* 2 - 5751 */
DEVICE_5706_FAMILY, /* 3 - Teton */
DEVICE_5714_FAMILY, /* 4 - Hamilton */
DEVICE_STANFORD_FAMILY, /* 5 - 5755 */
DEVICE_STANFORD_ME_FAMILY, /* 6 - 5756 */
DEVICE_SOLEDAD_FAMILY, /* 7 - 5761[E] */
DEVICE_CILAI_FAMILY, /* 8 - 57780/60/90/91 */
DEVICE_ASPEN_FAMILY, /* 9 - 57781/85/61/65/91/95 */
DEVICE_ASPEN_PLUS_FAMILY, /* 10 - 57786 */
DEVICE_LOGAN_FAMILY, /* 11 - Any device in the Logan family
*/
DEVICE_LOGAN_5762, /* 12 - Logan Enterprise (aka Columbia)
*/
DEVICE_LOGAN_57767, /* 13 - Logan Client */
DEVICE_LOGAN_57787, /* 14 - Logan Consumer */
DEVICE_LOGAN_5725, /* 15 - Logan Server (TruManage-enabled)
*/
DEVICE_SAWTOOTH_FAMILY, /* 16 - 5717/18 */
DEVICE_COTOPAXI_FAMILY, /* 17 - 5719 */
DEVICE_SNAGGLETOOTH_FAMILY, /* 18 - 5720 */
DEVICE_CUMULUS_FAMILY, /* 19 - Cumulus/Whitney */
MAX_DEVICE_FAMILY
};
enum SUPPORTED_CODE {
CODE_ASF1, /* 0 - ASF VERSION 1.03 <deprecated> */
CODE_ASF2, /* 1 - ASF VERSION 2.00 <deprecated> */
CODE_PASSTHRU, /* 2 - PassThru <deprecated> */
CODE_PT_SEC, /* 3 - PassThru with security <deprecated> */
CODE_UMP, /* 4 - UMP <deprecated> */
CODE_BOOT, /* 5 - Bootcode */
CODE_DASH, /* 6 - TruManage (DASH + ASF + PMCI)
* Management firmwares
*/
CODE_MCTP_PASSTHRU, /* 7 - NCSI / MCTP Passt-hrough firmware */
CODE_PM_OFFLOAD, /* 8 - Power-Management Proxy Offload firmwares
*/
CODE_MDNS_SD_OFFLOAD, /* 9 - Multicast DNS Service Discovery Proxys
* Offload firmware
*/
CODE_DISC_OFFLOAD, /* 10 - Discovery Offload firmware */
CODE_MUSTANG, /* 11 - I2C Error reporting APE firmwares
* <deprecated>
*/
CODE_ARP_BATCH, /* 12 - ARP Batch firmware */
CODE_SMASH, /* 13 - TruManage (SMASH + DCMI/IPMI + PMCI)
* Management firmware
*/
CODE_APE_DIAG, /* 14 - APE Test Diag firmware */
CODE_APE_PATCH, /* 15 - APE Patch firmware */
CODE_TANG_PATCH, /* 16 - TANG Patch firmware */
CODE_KONG_FW, /* 17 - KONG firmware */
CODE_KONG_PATCH, /* 18 - KONG Patch firmware */
CODE_BONO_FW, /* 19 - BONO firmware */
CODE_BONO_PATCH, /* 20 - BONO Patch firmware */
CODE_CHIMP_PATCH, /* 21 - ChiMP Patch firmware */
MAX_CODE_TYPE,
};
enum SUPPORTED_MEDIA {
MEDIA_COPPER, /* 0 */
MEDIA_FIBER, /* 1 */
MEDIA_NONE, /* 2 */
MEDIA_COPPER_FIBER, /* 3 */
MAX_MEDIA_TYPE,
};
struct bnxt_fw_header {
__le32 signature; /* constains the constant value of
* BNXT_FIRMWARE_BIN_SIGNATURE
*/
u8 flags; /* reserved for ChiMP use */
u8 code_type; /* enum SUPPORTED_CODE */
u8 device; /* enum SUPPORTED_FAMILY */
u8 media; /* enum SUPPORTED_MEDIA */
u8 version[16]; /* the null terminated version string to
* indicate the version of the
* file, this will be copied from the binary
* file version string
*/
u8 build;
u8 revision;
u8 minor_ver;
u8 major_ver;
};
/* Microcode and pre-boot software/firmware trailer: */
struct bnxt_ucode_trailer {
u8 rsa_sig[256];
__le16 flags;
u8 version_format;
u8 version_length;
u8 version[16];
__le16 dir_type;
__le16 trailer_length;
__le32 sig; /* BNXT_UCODE_TRAILER_SIGNATURE */
__le32 chksum; /* CRC-32 */
};
#endif

View File

@ -0,0 +1,588 @@
/* Broadcom NetXtreme-C/E network driver.
*
* Copyright (c) 2022-2023 Broadcom Inc.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation.
*/
#include <linux/stddef.h>
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/slab.h>
#include "bnxt_compat.h"
#include "bnxt_hsi.h"
#include "bnxt.h"
#include "bnxt_hdbr.h"
/*
* Map DB type to DB copy group type
*/
int bnxt_hdbr_get_grp(u64 db_val)
{
db_val &= DBC_TYPE_MASK;
switch (db_val) {
case DBR_TYPE_SQ:
return DBC_GROUP_SQ;
case DBR_TYPE_RQ:
return DBC_GROUP_RQ;
case DBR_TYPE_SRQ:
case DBR_TYPE_SRQ_ARM:
case DBR_TYPE_SRQ_ARMENA:
return DBC_GROUP_SRQ;
case DBR_TYPE_CQ:
case DBR_TYPE_CQ_ARMSE:
case DBR_TYPE_CQ_ARMALL:
case DBR_TYPE_CQ_ARMENA:
case DBR_TYPE_CQ_CUTOFF_ACK:
return DBC_GROUP_CQ;
default:
break;
}
return DBC_GROUP_MAX;
}
/*
* Caller of this function is debugfs knob. It dumps the kernel memory table
* main structure value to caller.
* Additionally, dump page content to dmesg. Since we may have many pages, it
* is too large to output to debugfs.
*/
char *bnxt_hdbr_ktbl_dump(struct bnxt_hdbr_ktbl *ktbl)
{
struct dbc_drk64 *slot;
char *buf;
int i, j;
if (!ktbl) {
buf = kasprintf(GFP_KERNEL, "ktbl is NULL\n");
return buf;
}
/* Structure data to debugfs console */
buf = kasprintf(GFP_KERNEL,
"group_type = %d\n"
"first_avail = %d\n"
"first_empty = %d\n"
"last_entry = %d\n"
"slot_avail = %d\n"
"num_4k_pages = %d\n"
"daddr = 0x%016llX\n"
"link_slot = 0x%016llX\n",
ktbl->group_type,
ktbl->first_avail,
ktbl->first_empty,
ktbl->last_entry,
ktbl->slot_avail,
ktbl->num_4k_pages,
ktbl->daddr,
(u64)ktbl->link_slot);
/* Page content dump to dmesg console */
pr_info("====== Dumping ktbl info ======\n%s", buf);
for (i = 0; i < ktbl->num_4k_pages; i++) {
slot = ktbl->pages[i];
pr_info("ktbl->pages[%d]: 0x%016llX\n", i, (u64)slot);
for (j = 0; j < 256; j++) {
if (j && j < 255 && !slot[j].flags && !slot[j].memptr)
continue;
pr_info("pages[%2d][%3d], 0x%016llX, 0x%016llX\n",
i, j, le64_to_cpu(slot[j].flags),
le64_to_cpu(slot[j].memptr));
}
}
return buf;
}
/*
* This function is called during L2 driver context memory allocation time.
* It is on the path of nic open.
* The initialization is allocating the memory for main data structure and
* setup initial values.
* pg_ptr and da are pointing to the first page allocated in
* bnxt_setup_ctxm_pg_tbls()
*/
int bnxt_hdbr_ktbl_init(struct bnxt *bp, int group, void *pg_ptr, dma_addr_t da)
{
struct bnxt_hdbr_ktbl *ktbl;
int i;
ktbl = kzalloc(sizeof(*ktbl), GFP_KERNEL);
if (!ktbl)
return -ENOMEM;
memset(pg_ptr, 0, PAGE_SIZE_4K);
ktbl->pdev = bp->pdev;
spin_lock_init(&ktbl->hdbr_kmem_lock);
ktbl->group_type = group;
ktbl->first_avail = 0;
ktbl->first_empty = 0;
ktbl->last_entry = -1; /* There isn't last entry at first */
ktbl->slot_avail = NSLOT_PER_4K_PAGE;
ktbl->num_4k_pages = 1;
ktbl->pages[0] = pg_ptr;
ktbl->daddr = da;
ktbl->link_slot = pg_ptr + PAGE_SIZE_4K - DBC_KERNEL_ENTRY_SIZE;
for (i = 1; i < ktbl->num_4k_pages; i++) {
pg_ptr += PAGE_SIZE_4K;
ktbl->pages[i] = pg_ptr;
da += PAGE_SIZE_4K;
bnxt_hdbr_set_link(ktbl->link_slot, da);
ktbl->link_slot += PAGE_SIZE_4K;
}
/* Link to main bnxt structure */
bp->hdbr_info.ktbl[group] = ktbl;
return 0;
}
/*
* This function is called during L2 driver context memory free time. It is on
* the path of nic close.
*/
void bnxt_hdbr_ktbl_uninit(struct bnxt *bp, int group)
{
struct bnxt_hdbr_ktbl *ktbl;
struct dbc_drk64 *slot;
dma_addr_t da;
void *ptr;
int i;
/* Tear off from bp structure first */
ktbl = bp->hdbr_info.ktbl[group];
bp->hdbr_info.ktbl[group] = NULL;
if (!ktbl)
return;
/* Free attached pages(first page will be freed by bnxt_free_ctx_pg_tbls() */
for (i = ktbl->num_4k_pages - 1; i >= 1; i--) {
ptr = ktbl->pages[i];
slot = ktbl->pages[i - 1] + PAGE_SIZE_4K - DBC_KERNEL_ENTRY_SIZE;
da = (dma_addr_t)le64_to_cpu(slot->memptr);
dma_free_coherent(&bp->pdev->dev, PAGE_SIZE_4K, ptr, da);
}
/* Free the control structure at last */
kfree(ktbl);
}
/*
* This function is called when dbnxt_hdbr_reg_apg() run out of memory slots.
* hdbr_kmem_lock is held in caller, so it is safe to alter the kernel page
* chain.
*/
static int bnxt_hdbr_alloc_ktbl_pg(struct bnxt_hdbr_ktbl *ktbl)
{
dma_addr_t da;
void *ptr;
/* Development stage guard */
if (ktbl->num_4k_pages >= MAX_KMEM_4K_PAGES) {
pr_err("Must fix: need more than MAX_KMEM_4K_PAGES\n");
return -ENOMEM;
}
/* Alloc one page */
ptr = dma_alloc_coherent(&ktbl->pdev->dev, PAGE_SIZE_4K, &da, GFP_KERNEL | __GFP_ZERO);
if (!ptr)
return -ENOMEM;
/* Chain up with existing pages */
ktbl->pages[ktbl->num_4k_pages] = ptr;
bnxt_hdbr_set_link(ktbl->link_slot, da);
ktbl->link_slot = ptr + PAGE_SIZE_4K - DBC_KERNEL_ENTRY_SIZE;
ktbl->num_4k_pages += 1;
ktbl->slot_avail += NSLOT_PER_4K_PAGE;
return 0;
}
/*
* This function is called when L2 driver, RoCE driver or RoCE driver on
* behalf of rocelib need to register its application memory page.
* Each application memory page is linked in kernel memory table with a
* 16 bytes memory slot.
*/
int bnxt_hdbr_reg_apg(struct bnxt_hdbr_ktbl *ktbl, dma_addr_t ap_da, int *idx, u16 pi)
{
struct dbc_drk64 *slot;
int rc = 0;
spin_lock(&ktbl->hdbr_kmem_lock);
/* Add into kernel talbe */
if (ktbl->slot_avail == 0) {
rc = bnxt_hdbr_alloc_ktbl_pg(ktbl);
if (rc)
goto exit;
}
/* Fill up the new entry */
slot = get_slot(ktbl, ktbl->first_avail);
bnxt_hdbr_set_slot(slot, ap_da, pi, ktbl->first_avail == ktbl->first_empty);
*idx = ktbl->first_avail;
ktbl->slot_avail--;
/* Clear last flag of previous and advance first_avail index */
if (ktbl->first_avail == ktbl->first_empty) {
if (ktbl->last_entry >= 0) {
slot = get_slot(ktbl, ktbl->last_entry);
slot->flags &= cpu_to_le64(~DBC_DRK64_LAST);
}
ktbl->last_entry = ktbl->first_avail;
ktbl->first_avail++;
ktbl->first_empty++;
} else {
while (++ktbl->first_avail < ktbl->first_empty) {
slot = get_slot(ktbl, ktbl->first_avail);
if (slot->flags & cpu_to_le64(DBC_DRK64_VALID))
continue;
break;
}
}
exit:
spin_unlock(&ktbl->hdbr_kmem_lock);
return rc;
}
EXPORT_SYMBOL(bnxt_hdbr_reg_apg);
/*
* This function is called when L2 driver, RoCE driver or RoCE driver on
* behalf of rocelib need to unregister its application memory page.
* The corresponding memory slot need to be cleared.
* Kernel memory table will reuse that slot for later application page.
*/
void bnxt_hdbr_unreg_apg(struct bnxt_hdbr_ktbl *ktbl, int idx)
{
struct dbc_drk64 *slot;
spin_lock(&ktbl->hdbr_kmem_lock);
if (idx == ktbl->last_entry) {
/* Find the new last_entry index, and mark last */
while (--ktbl->last_entry >= 0) {
slot = get_slot(ktbl, ktbl->last_entry);
if (slot->flags & cpu_to_le64(DBC_DRK64_VALID))
break;
}
if (ktbl->last_entry >= 0) {
slot = get_slot(ktbl, ktbl->last_entry);
slot->flags |= cpu_to_le64(DBC_DRK64_LAST);
}
ktbl->first_empty = ktbl->last_entry + 1;
}
/* unregister app page entry */
bnxt_hdbr_clear_slot(get_slot(ktbl, idx));
/* update first_avail index to lower possible */
if (idx < ktbl->first_avail)
ktbl->first_avail = idx;
ktbl->slot_avail++;
spin_unlock(&ktbl->hdbr_kmem_lock);
}
EXPORT_SYMBOL(bnxt_hdbr_unreg_apg);
/*
* Map L2 ring type to DB copy group type
*/
int bnxt_hdbr_r2g(u32 ring_type)
{
switch (ring_type) {
case HWRM_RING_ALLOC_TX:
return DBC_GROUP_SQ;
case HWRM_RING_ALLOC_RX:
case HWRM_RING_ALLOC_AGG:
return DBC_GROUP_SRQ;
case HWRM_RING_ALLOC_CMPL:
return DBC_GROUP_CQ;
default:
break;
}
return DBC_GROUP_MAX;
}
/*
* Allocate a 4K page for L2 DB copies. This is called when running out of
* available DB copy blocks during DB registering.
*/
static int bnxt_hdbr_l2_alloc_page(struct bnxt *bp, int group)
{
struct bnxt_hdbr_l2_pgs *app_pgs;
dma_addr_t da = 0;
int ktbl_idx;
__le64 *ptr;
int rc;
app_pgs = bp->hdbr_pgs[group];
if (app_pgs->alloced_pages >= app_pgs->max_pages) {
dev_err(&bp->pdev->dev, "Max reserved HDBR pages exceeded\n");
return -EINVAL;
}
ptr = dma_zalloc_coherent(&bp->pdev->dev, PAGE_SIZE_4K, &da, GFP_KERNEL);
if (!ptr)
return -ENOMEM;
ptr[0] = cpu_to_le64(DBC_VALUE_LAST);
wmb(); /* Make sure HW see this slot when page linked in */
/* Register to kernel table */
rc = bnxt_hdbr_reg_apg(bp->hdbr_info.ktbl[group], da, &ktbl_idx, 0);
if (rc) {
dma_free_coherent(&bp->pdev->dev, PAGE_SIZE_4K, ptr, da);
return rc;
}
app_pgs->pages[app_pgs->alloced_pages].ptr = ptr;
app_pgs->pages[app_pgs->alloced_pages].da = da;
app_pgs->pages[app_pgs->alloced_pages].ktbl_idx = ktbl_idx;
app_pgs->alloced_pages++;
return 0;
}
/*
* The l2 init function is called after L2 driver configured backing store
* context memory and bnxt_hwrm_func_resc_qcaps.
* The initialization is allocating the management structure and initialize
* it with the proper values.
*
* Inside L2 DB copy app page, DBs are grouped by group type.
* DBC_GROUP_SQ : grp_size = 1,
* offset 0: SQ producer index doorbell
* DBC_GROUP_SRQ : grp_size = 1,
* offset 0: SRQ producer index doorbell
* DBC_GROUP_CQ : grp_size = 3,
* offset 0: CQ consumer index doorbell
* offset 1: CQ_ARMALL/CQ_ARMASE (share slot)
* offset 2: CUTOFF_ACK
*/
static int bnxt_hdbr_l2_init_group(struct bnxt *bp, int group)
{
struct bnxt_hdbr_l2_pgs *app_pgs = NULL;
int grp_size, entries_per_pg, entries, max_pgs;
switch (group) {
case DBC_GROUP_SQ:
grp_size = HDBR_L2_SQ_BLK_SIZE;
entries_per_pg = HDBR_L2_SQ_ENTRY_PER_PAGE;
entries = bp->hw_resc.max_tx_rings;
break;
case DBC_GROUP_SRQ:
grp_size = HDBR_L2_SRQ_BLK_SIZE;
entries_per_pg = HDBR_L2_SRQ_ENTRY_PER_PAGE;
entries = bp->hw_resc.max_rx_rings;
break;
case DBC_GROUP_CQ:
grp_size = HDBR_L2_CQ_BLK_SIZE;
entries_per_pg = HDBR_L2_CQ_ENTRY_PER_PAGE;
entries = bp->hw_resc.max_cp_rings;
break;
default:
/* Other group/DB types are not needed */
goto exit;
}
max_pgs = DIV_ROUND_UP(entries, entries_per_pg);
app_pgs = kzalloc(struct_size(app_pgs, pages, max_pgs), GFP_KERNEL);
if (!app_pgs)
return -ENOMEM;
app_pgs->max_pages = max_pgs;
app_pgs->grp_size = grp_size;
app_pgs->entries_per_pg = entries_per_pg;
exit:
/* Link to main bnxt structure */
bp->hdbr_pgs[group] = app_pgs;
return 0;
}
int bnxt_hdbr_l2_init(struct bnxt *bp)
{
int rc, group;
if (!bp->hdbr_info.hdbr_enabled)
return 0;
for (group = DBC_GROUP_SQ; group < DBC_GROUP_MAX; group++) {
rc = bnxt_hdbr_l2_init_group(bp, group);
if (rc)
return rc;
}
return 0;
}
/*
* This function is called during L2 driver context memory free time. It is on
* the path of nic close.
*/
void bnxt_hdbr_l2_uninit(struct bnxt *bp, int group)
{
struct bnxt_hdbr_l2_pgs *pgs;
struct hdbr_l2_pg *p;
int i;
/* Cut off from main structure */
pgs = bp->hdbr_pgs[group];
bp->hdbr_pgs[group] = NULL;
if (!pgs)
return;
for (i = 0; i < pgs->alloced_pages; i++) {
p = &pgs->pages[i];
/* Unregister from kernel table */
bnxt_hdbr_unreg_apg(bp->hdbr_info.ktbl[group], p->ktbl_idx);
/* Free memory up */
dma_free_coherent(&bp->pdev->dev, PAGE_SIZE_4K, p->ptr, p->da);
}
kfree(pgs);
}
/*
* This function is called when a new db is created.
* It finds a memoty slot in the DB copy application page, and return the
* address.
* Not all DB type need a copy, for those DB types don't need a copy, we
* simply return NULL.
*/
__le64 *bnxt_hdbr_reg_db(struct bnxt *bp, int group)
{
struct bnxt_hdbr_l2_pgs *pgs;
struct hdbr_l2_pg *p;
int rc, i, n, idx;
if (group >= DBC_GROUP_MAX)
return NULL;
pgs = bp->hdbr_pgs[group];
if (!pgs)
return NULL;
if (pgs->next_page == pgs->alloced_pages) {
rc = bnxt_hdbr_l2_alloc_page(bp, group);
if (rc)
return NULL;
}
n = pgs->grp_size;
p = &pgs->pages[pgs->next_page];
idx = pgs->next_entry * n; /* This is what we'll return */
for (i = 0; i < n; i++)
p->ptr[idx + i] = cpu_to_le64(DBC_VALUE_INIT);
pgs->next_entry++;
if (pgs->next_entry == pgs->entries_per_pg) {
pgs->next_page++;
pgs->next_entry = 0;
} else {
p->ptr[pgs->next_entry * n] = cpu_to_le64(DBC_VALUE_LAST);
}
return &p->ptr[idx];
}
/*
* This function is called when all L2 rings are freed.
* Driver is still running, but rings are freed, so that all DB copy slots should be
* reclaimed for later newly created rings' DB.
*/
void bnxt_hdbr_reset_l2pgs(struct bnxt *bp)
{
struct bnxt_hdbr_l2_pgs *pgs;
struct hdbr_l2_pg *p;
int group, i;
for (group = DBC_GROUP_SQ; group < DBC_GROUP_MAX; group++) {
pgs = bp->hdbr_pgs[group];
if (!pgs)
continue;
for (i = 0; i < pgs->alloced_pages; i++) {
p = &pgs->pages[i];
memset(p->ptr, 0, PAGE_SIZE_4K);
p->ptr[0] = cpu_to_le64(DBC_VALUE_LAST);
}
pgs->next_page = 0;
pgs->next_entry = 0;
}
}
/*
* Caller of this function is debugfs knob. It dumps the main structure value
* of L2 driver DB copy region to caller.
* Additionally, dump page content to dmesg. Since we may have many pages, it
* is too large to output to debugfs.
*/
char *bnxt_hdbr_l2pg_dump(struct bnxt_hdbr_l2_pgs *app_pgs)
{
struct hdbr_l2_pg *p;
int used_entries = 0;
u64 dbc_val;
char *buf;
int pi, i;
if (!app_pgs) {
buf = kasprintf(GFP_KERNEL, "No data available!\n");
return buf;
}
if (app_pgs->alloced_pages)
used_entries = app_pgs->next_page * app_pgs->entries_per_pg + app_pgs->next_entry;
/* Structure data to debugfs console */
buf = kasprintf(GFP_KERNEL,
"max_pages = %d\n"
"alloced_pages = %d\n"
"group_size = %d\n"
"entries_per_pg = %d\n"
"used entries = %d\n"
"used db slots = %d\n",
app_pgs->max_pages,
app_pgs->alloced_pages,
app_pgs->grp_size,
app_pgs->entries_per_pg,
used_entries,
used_entries * app_pgs->grp_size);
pr_info("====== Dumping pages info ======\n%s", buf);
for (pi = 0; pi < app_pgs->alloced_pages; pi++) {
p = &app_pgs->pages[pi];
/* Page content dump to dmesg console */
pr_info("page[%d].kernel addr = 0x%016llX\n"
"page[%d].dma addr = 0x%016llX\n"
"page[%d].Kernel index = %d\n",
pi, (u64)p->ptr,
pi, p->da,
pi, p->ktbl_idx);
for (i = 0; i < 512; i++) {
if (i && i < 511 && !p->ptr[i])
continue;
dbc_val = le64_to_cpu(p->ptr[i]);
pr_info("page[%d][%3d] 0x%016llX : type=%llx "
"debug_trace=%d valid=%d path=%llx xID=0x%05llx "
"toggle=%llx epoch=%d index=0x%06llx\n",
pi, i, dbc_val,
(dbc_val & DBC_DBC64_TYPE_MASK) >> DBC_DBC64_TYPE_SFT,
(dbc_val & DBC_DBC64_DEBUG_TRACE) ? 1 : 0,
(dbc_val & DBC_DBC64_VALID) ? 1 : 0,
(dbc_val & DBC_DBC64_PATH_MASK) >> DBC_DBC64_PATH_SFT,
(dbc_val & DBC_DBC64_XID_MASK) >> DBC_DBC64_XID_SFT,
(dbc_val & DBC_DBC64_TOGGLE_MASK) >> DBC_DBC64_TOGGLE_SFT,
(dbc_val & DBC_DBC64_EPOCH) ? 1 : 0,
(dbc_val & DBC_DBC64_INDEX_MASK));
}
}
return buf;
}

View File

@ -0,0 +1,141 @@
/* Broadcom NetXtreme-C/E network driver.
*
* Copyright (c) 2022-2023 Broadcom Inc.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation.
*/
#ifndef __BNXT_HDBR_H__
#define __BNXT_HDBR_H__
/*
* 64-bit doorbell
* +------+-----+-----------+-----+-----+------+-----+------+------+-----+-----+
* |Offset|63,60| 59| 58|57,56| (4) |51,32|31,,27| 26,25| 24| 23,0|
* +------+-----+-----------+-----+-----+------+-----+------+------+-----+-----+
* | | | | | |unused| |unused|toggle|epoch| |
* | 0x0 | type| unused |valid| path|------| xID |------+------+-----+index|
* | | | | | | pi-hi| | pi-lo | |
* +------+-----+-----------+-----+-----+------+-----+------+------+-----+-----+
*
* 64-bit doorbell copy format for HW DBR recovery
* +------+-----+-----------+-----+-----+------+-----+------+------+-----+-----+
* |Offset|63,60| 59| 58|57,56| (4) |51,32| (5) | 26,25| 24| 23,0|
* +------+-----+-----------+-----+-----+------+-----+------+------+-----+-----+
* |0x0 | type|debug_trace|valid| path|unused| xID |unused|toggle|epoch|index|
* +------+-----+-----------+-----+-----+------+-----+------+------+-----+-----+
*/
#define DBC_TYPE_MASK (0xfULL << 60)
#define DBC_VALUE_INIT DBR_INDEX_MASK
#define DBC_VALUE_LAST (DBC_TYPE_MASK | DBR_VALID)
/* Doorbell Recovery Kernel Memory Structures
* +------+------+-----+------+-----+------+------+---------+------+----+-----+
* |Offset| 63,48|47,32| 31,12|11,10| 9,8| 7,4| 3| 2| 1| 0|
* +------+------+-----+------+-----+------+------+---------+------+----+-----+
* |0x0 |unused| pi |unused| size|stride|unused|db_format|linked|last|valid|
* +------+------+-----+------+-----+------+------+---------+------+----+-----+
* |0x8 | memptr |
* +------+-------------------------------------------------------------------+
*/
#define DBC_KERNEL_ENTRY_SIZE 16
#define PAGE_SIZE_4K 4096
#define MAX_KMEM_4K_PAGES 1029
#define NSLOT_PER_4K_PAGE (PAGE_SIZE_4K / DBC_KERNEL_ENTRY_SIZE - 1)
struct bnxt_hdbr_ktbl {
struct pci_dev *pdev;
/* protect this main DB copy kernel memory table data structure */
spinlock_t hdbr_kmem_lock;
int group_type;
int first_avail;
int first_empty;
int last_entry;
int num_4k_pages;
int slot_avail;
void *pages[MAX_KMEM_4K_PAGES];
dma_addr_t daddr;
struct dbc_drk64 *link_slot;
};
static inline struct dbc_drk64 *get_slot(struct bnxt_hdbr_ktbl *ktbl, int idx)
{
return ((struct dbc_drk64 *)ktbl->pages[idx / NSLOT_PER_4K_PAGE])
+ idx % NSLOT_PER_4K_PAGE;
}
static inline void bnxt_hdbr_clear_slot(struct dbc_drk64 *slt)
{
slt->flags = 0;
wmb(); /* Sync flags before clear memory pointer */
slt->memptr = 0;
}
static inline void bnxt_hdbr_set_slot(struct dbc_drk64 *slt, dma_addr_t da,
u16 pi, bool last)
{
u64 flags;
flags = DBC_DRK64_VALID | DBC_DRK64_DB_FORMAT_B64 |
DBC_DRK64_STRIDE_OFF;
flags |= ((u64)pi << DBC_DRK64_PI_SFT);
if (last)
flags |= DBC_DRK64_LAST;
slt->memptr = cpu_to_le64(da);
wmb(); /* Sync memory pointer before setting flags */
slt->flags = cpu_to_le64(flags);
}
static inline void bnxt_hdbr_set_link(struct dbc_drk64 *ls, dma_addr_t da)
{
ls->memptr = cpu_to_le64(da);
wmb(); /* Sync memory pointer before setting flags */
ls->flags = cpu_to_le64(DBC_DRK64_VALID | DBC_DRK64_LINKED);
}
/* L2 driver part HW based doorbell drop recovery defination */
#define HDBR_DB_SIZE 8
#define HDBR_L2_SQ_BLK_SIZE 1
#define HDBR_L2_SRQ_BLK_SIZE 1
#define HDBR_L2_CQ_BLK_SIZE 3
#define HDBR_DB_PER_PAGE (PAGE_SIZE_4K / HDBR_DB_SIZE)
#define HDBR_L2_SQ_ENTRY_PER_PAGE (HDBR_DB_PER_PAGE / HDBR_L2_SQ_BLK_SIZE)
#define HDBR_L2_SRQ_ENTRY_PER_PAGE (HDBR_DB_PER_PAGE / HDBR_L2_SRQ_BLK_SIZE)
#define HDBR_L2_CQ_ENTRY_PER_PAGE (HDBR_DB_PER_PAGE / HDBR_L2_CQ_BLK_SIZE)
struct hdbr_l2_pg {
__le64 *ptr;
dma_addr_t da;
int ktbl_idx;
};
struct bnxt_hdbr_l2_pgs {
int max_pages;
int alloced_pages;
int grp_size;
int entries_per_pg;
int next_page;
int next_entry;
struct hdbr_l2_pg pages[] __counted_by(max_pages);
};
int bnxt_hdbr_r2g(u32 ring_type);
int bnxt_hdbr_get_grp(u64 db_val);
int bnxt_hdbr_ktbl_init(struct bnxt *bp, int group, void *pg_ptr, dma_addr_t da);
void bnxt_hdbr_ktbl_uninit(struct bnxt *bp, int group);
int bnxt_hdbr_reg_apg(struct bnxt_hdbr_ktbl *ktbl, dma_addr_t ap_da, int *idx, u16 pi);
void bnxt_hdbr_unreg_apg(struct bnxt_hdbr_ktbl *ktbl, int idx);
char *bnxt_hdbr_ktbl_dump(struct bnxt_hdbr_ktbl *ktbl);
int bnxt_hdbr_l2_init(struct bnxt *bp);
void bnxt_hdbr_l2_uninit(struct bnxt *bp, int group);
__le64 *bnxt_hdbr_reg_db(struct bnxt *bp, int group);
void bnxt_hdbr_reset_l2pgs(struct bnxt *bp);
char *bnxt_hdbr_l2pg_dump(struct bnxt_hdbr_l2_pgs *app_pgs);
#endif

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,245 @@
/* Broadcom NetXtreme-C/E network driver.
*
* Copyright (c) 2023 Broadcom Limited
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation.
*/
#include <linux/hwmon.h>
#include <linux/hwmon-sysfs.h>
#include "bnxt_compat.h"
#include "bnxt_hsi.h"
#include "bnxt.h"
#include "bnxt_hwrm.h"
#include "bnxt_hwmon.h"
#ifdef CONFIG_BNXT_HWMON
void bnxt_hwmon_notify_event(struct bnxt *bp)
{
u32 attr;
if (!bp->hwmon_dev)
return;
switch (bp->thermal_threshold_type) {
case ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_THRESHOLD_TYPE_WARN:
attr = hwmon_temp_max_alarm;
break;
case ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_THRESHOLD_TYPE_CRITICAL:
attr = hwmon_temp_crit_alarm;
break;
case ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_THRESHOLD_TYPE_FATAL:
case ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_THRESHOLD_TYPE_SHUTDOWN:
attr = hwmon_temp_emergency_alarm;
break;
default:
return;
}
hwmon_notify_event(&bp->pdev->dev, hwmon_temp, attr, 0);
}
static int bnxt_hwrm_temp_query(struct bnxt *bp, u8 *temp)
{
struct hwrm_temp_monitor_query_output *resp;
struct hwrm_temp_monitor_query_input *req;
int rc;
rc = hwrm_req_init(bp, req, HWRM_TEMP_MONITOR_QUERY);
if (rc)
return rc;
resp = hwrm_req_hold(bp, req);
rc = hwrm_req_send_silent(bp, req);
if (rc)
goto drop_req;
if (temp) {
*temp = resp->temp;
} else if (resp->flags &
TEMP_MONITOR_QUERY_RESP_FLAGS_THRESHOLD_VALUES_AVAILABLE) {
bp->fw_cap |= BNXT_FW_CAP_THRESHOLD_TEMP_SUPPORTED;
bp->warn_thresh_temp = resp->warn_threshold;
bp->crit_thresh_temp = resp->critical_threshold;
bp->fatal_thresh_temp = resp->fatal_threshold;
bp->shutdown_thresh_temp = resp->shutdown_threshold;
}
drop_req:
hwrm_req_drop(bp, req);
return rc;
}
static umode_t bnxt_hwmon_is_visible(const void *_data, enum hwmon_sensor_types type,
u32 attr, int channel)
{
const struct bnxt *bp = _data;
if (type != hwmon_temp)
return 0;
switch (attr) {
case hwmon_temp_input:
return 0444;
case hwmon_temp_max:
case hwmon_temp_crit:
case hwmon_temp_emergency:
case hwmon_temp_max_alarm:
case hwmon_temp_crit_alarm:
case hwmon_temp_emergency_alarm:
if (!(bp->fw_cap & BNXT_FW_CAP_THRESHOLD_TEMP_SUPPORTED))
return 0;
return 0444;
default:
return 0;
}
}
static int bnxt_hwmon_read(struct device *dev, enum hwmon_sensor_types type, u32 attr,
int channel, long *val)
{
struct bnxt *bp = dev_get_drvdata(dev);
u8 temp = 0;
int rc;
switch (attr) {
case hwmon_temp_input:
rc = bnxt_hwrm_temp_query(bp, &temp);
if (!rc)
*val = temp * 1000;
return rc;
case hwmon_temp_max:
*val = bp->warn_thresh_temp * 1000;
return 0;
case hwmon_temp_crit:
*val = bp->crit_thresh_temp * 1000;
return 0;
case hwmon_temp_emergency:
*val = bp->fatal_thresh_temp * 1000;
return 0;
case hwmon_temp_max_alarm:
rc = bnxt_hwrm_temp_query(bp, &temp);
if (!rc)
*val = temp >= bp->warn_thresh_temp;
return rc;
case hwmon_temp_crit_alarm:
rc = bnxt_hwrm_temp_query(bp, &temp);
if (!rc)
*val = temp >= bp->crit_thresh_temp;
return rc;
case hwmon_temp_emergency_alarm:
rc = bnxt_hwrm_temp_query(bp, &temp);
if (!rc)
*val = temp >= bp->fatal_thresh_temp;
return rc;
default:
return -EOPNOTSUPP;
}
}
static const struct hwmon_channel_info *bnxt_hwmon_info[] = {
HWMON_CHANNEL_INFO(temp,
HWMON_T_INPUT |
HWMON_T_MAX | HWMON_T_CRIT |
HWMON_T_EMERGENCY | HWMON_T_MAX_ALARM |
HWMON_T_CRIT_ALARM | HWMON_T_EMERGENCY_ALARM),
NULL
};
static const struct hwmon_ops bnxt_hwmon_ops = {
.is_visible = bnxt_hwmon_is_visible,
.read = bnxt_hwmon_read,
};
static const struct hwmon_chip_info bnxt_hwmon_chip_info = {
.ops = &bnxt_hwmon_ops,
.info = bnxt_hwmon_info,
};
static ssize_t temp1_shutdown_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct bnxt *bp = dev_get_drvdata(dev);
return sysfs_emit(buf, "%u\n", bp->shutdown_thresh_temp * 1000);
}
static ssize_t temp1_shutdown_alarm_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct bnxt *bp = dev_get_drvdata(dev);
u8 temp;
int rc;
rc = bnxt_hwrm_temp_query(bp, &temp);
if (rc)
return -EIO;
return sysfs_emit(buf, "%u\n", temp >= bp->shutdown_thresh_temp);
}
static DEVICE_ATTR_RO(temp1_shutdown);
static DEVICE_ATTR_RO(temp1_shutdown_alarm);
static struct attribute *bnxt_temp_extra_attrs[] = {
&dev_attr_temp1_shutdown.attr,
&dev_attr_temp1_shutdown_alarm.attr,
NULL,
};
static umode_t bnxt_temp_extra_attrs_visible(struct kobject *kobj,
struct attribute *attr, int index)
{
struct device *dev = kobj_to_dev(kobj);
struct bnxt *bp = dev_get_drvdata(dev);
/* Shutdown temperature setting in NVM is optional */
if (!(bp->fw_cap & BNXT_FW_CAP_THRESHOLD_TEMP_SUPPORTED) ||
!bp->shutdown_thresh_temp)
return 0;
return attr->mode;
}
static const struct attribute_group bnxt_temp_extra_group = {
.attrs = bnxt_temp_extra_attrs,
.is_visible = bnxt_temp_extra_attrs_visible,
};
__ATTRIBUTE_GROUPS(bnxt_temp_extra);
void bnxt_hwmon_uninit(struct bnxt *bp)
{
if (bp->hwmon_dev) {
hwmon_device_unregister(bp->hwmon_dev);
bp->hwmon_dev = NULL;
}
}
void bnxt_hwmon_init(struct bnxt *bp)
{
struct pci_dev *pdev = bp->pdev;
int rc;
/* temp1_xxx is only sensor, ensure not registered if it will fail */
rc = bnxt_hwrm_temp_query(bp, NULL);
if (rc == -EACCES || rc == -EOPNOTSUPP) {
bnxt_hwmon_uninit(bp);
return;
}
if (bp->hwmon_dev)
return;
bp->hwmon_dev = hwmon_device_register_with_info(&pdev->dev,
DRV_MODULE_NAME, bp,
&bnxt_hwmon_chip_info,
bnxt_temp_extra_groups);
if (IS_ERR(bp->hwmon_dev)) {
bp->hwmon_dev = NULL;
dev_warn(&pdev->dev, "Cannot register hwmon device\n");
}
}
#endif

View File

@ -0,0 +1,30 @@
/* Broadcom NetXtreme-C/E network driver.
*
* Copyright (c) 2023 Broadcom Limited
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation.
*/
#ifndef BNXT_HWMON_H
#define BNXT_HWMON_H
#ifdef CONFIG_BNXT_HWMON
void bnxt_hwmon_init(struct bnxt *bp);
void bnxt_hwmon_uninit(struct bnxt *bp);
void bnxt_hwmon_notify_event(struct bnxt *bp);
#else
static inline void bnxt_hwmon_uninit(struct bnxt *bp)
{
}
static inline void bnxt_hwmon_init(struct bnxt *bp)
{
}
static inline void bnxt_hwmon_notify_event(struct bnxt *bp)
{
}
#endif
#endif

View File

@ -0,0 +1,836 @@
/* Broadcom NetXtreme-C/E network driver.
*
* Copyright (c) 2020-2022 Broadcom Inc.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation.
*/
#include <asm/byteorder.h>
#include <linux/dma-mapping.h>
#include <linux/dmapool.h>
#include <linux/errno.h>
#include <linux/ethtool.h>
#include <linux/if_ether.h>
#include <linux/io.h>
#include <linux/irq.h>
#include <linux/kernel.h>
#include <linux/list.h>
#include <linux/netdevice.h>
#include <linux/pci.h>
#include <linux/skbuff.h>
#include "bnxt_compat.h"
#include "bnxt_hsi.h"
#include "bnxt.h"
#include "bnxt_hwrm.h"
static u64 hwrm_calc_sentinel(struct bnxt_hwrm_ctx *ctx, u16 req_type)
{
return (((uintptr_t)ctx) + req_type) ^ BNXT_HWRM_SENTINEL;
}
/**
* __hwrm_req_init() - Initialize an HWRM request.
* @bp: The driver context.
* @req: A pointer to the request pointer to initialize.
* @req_type: The request type. This will be converted to the little endian
* before being written to the req_type field of the returned request.
* @req_len: The length of the request to be allocated.
*
* Allocate DMA resources and initialize a new HWRM request object of the
* given type. The response address field in the request is configured with
* the DMA bus address that has been mapped for the response and the passed
* request is pointed to kernel virtual memory mapped for the request (such
* that short_input indirection can be accomplished without copying). The
* requests target and completion ring are initialized to default values and
* can be overridden by writing to the returned request object directly.
*
* The initialized request can be further customized by writing to its fields
* directly, taking care to covert such fields to little endian. The request
* object will be consumed (and all its associated resources release) upon
* passing it to hwrm_req_send() unless ownership of the request has been
* claimed by the caller via a call to hwrm_req_hold(). If the request is not
* consumed, either because it is never sent or because ownership has been
* claimed, then it must be released by a call to hwrm_req_drop().
*
* Return: zero on success, negative error code otherwise:
* E2BIG: the type of request pointer is too large to fit.
* ENOMEM: an allocation failure occurred.
*/
int __hwrm_req_init(struct bnxt *bp, void **req, u16 req_type, u32 req_len)
{
struct bnxt_hwrm_ctx *ctx;
dma_addr_t dma_handle;
u8 *req_addr;
if (req_len > BNXT_HWRM_CTX_OFFSET)
return -E2BIG;
req_addr = dma_pool_alloc(bp->hwrm_dma_pool, GFP_KERNEL | __GFP_ZERO,
&dma_handle);
if (!req_addr)
return -ENOMEM;
ctx = (struct bnxt_hwrm_ctx *)(req_addr + BNXT_HWRM_CTX_OFFSET);
/* safety first, sentinel used to check for invalid requests */
ctx->sentinel = hwrm_calc_sentinel(ctx, req_type);
ctx->req_len = req_len;
ctx->req = (struct input *)req_addr;
ctx->resp = (struct output *)(req_addr + BNXT_HWRM_RESP_OFFSET);
ctx->dma_handle = dma_handle;
ctx->flags = 0; /* __GFP_ZERO, but be explicit regarding ownership */
ctx->timeout = bp->hwrm_cmd_timeout ?: DFLT_HWRM_CMD_TIMEOUT;
ctx->allocated = BNXT_HWRM_DMA_SIZE - BNXT_HWRM_CTX_OFFSET;
ctx->gfp = GFP_KERNEL;
ctx->slice_addr = NULL;
/* initialize common request fields */
ctx->req->req_type = cpu_to_le16(req_type);
ctx->req->resp_addr = cpu_to_le64(dma_handle + BNXT_HWRM_RESP_OFFSET);
ctx->req->cmpl_ring = cpu_to_le16(BNXT_HWRM_NO_CMPL_RING);
ctx->req->target_id = cpu_to_le16(BNXT_HWRM_TARGET);
*req = ctx->req;
return 0;
}
static struct bnxt_hwrm_ctx *__hwrm_ctx(struct bnxt *bp, u8 *req_addr)
{
void *ctx_addr = req_addr + BNXT_HWRM_CTX_OFFSET;
struct input *req = (struct input *)req_addr;
struct bnxt_hwrm_ctx *ctx = ctx_addr;
u64 sentinel;
if (!req) {
/* can only be due to software bug, be loud */
netdev_err(bp->dev, "null HWRM request");
dump_stack();
return NULL;
}
/* HWRM API has no type safety, verify sentinel to validate address */
sentinel = hwrm_calc_sentinel(ctx, le16_to_cpu(req->req_type));
if (ctx->sentinel != sentinel) {
/* can only be due to software bug, be loud */
netdev_err(bp->dev, "HWRM sentinel mismatch, req_type = %u\n",
(u32)le16_to_cpu(req->req_type));
dump_stack();
return NULL;
}
return ctx;
}
/**
* hwrm_req_timeout() - Set the completion timeout for the request.
* @bp: The driver context.
* @req: The request to set the timeout.
* @timeout: The timeout in milliseconds.
*
* Set the timeout associated with the request for subsequent calls to
* hwrm_req_send(). Some requests are long running and require a different
* timeout than the default.
*/
void hwrm_req_timeout(struct bnxt *bp, void *req, unsigned int timeout)
{
struct bnxt_hwrm_ctx *ctx = __hwrm_ctx(bp, req);
if (ctx)
ctx->timeout = timeout;
}
/**
* hwrm_req_alloc_flags() - Sets GFP allocation flags for slices.
* @bp: The driver context.
* @req: The request for which calls to hwrm_req_dma_slice() will have altered
* allocation flags.
* @gfp: A bitmask of GFP flags. These flags are passed to dma_alloc_coherent()
* whenever it is used to allocate backing memory for slices. Note that
* calls to hwrm_req_dma_slice() will not always result in new allocations,
* however, memory suballocated from the request buffer is already
* __GFP_ZERO.
*
* Sets the GFP allocation flags associated with the request for subsequent
* calls to hwrm_req_dma_slice(). This can be useful for specifying __GFP_ZERO
* for slice allocations.
*/
void hwrm_req_alloc_flags(struct bnxt *bp, void *req, gfp_t gfp)
{
struct bnxt_hwrm_ctx *ctx = __hwrm_ctx(bp, req);
if (ctx)
ctx->gfp = gfp;
}
/**
* hwrm_req_replace() - Replace request data.
* @bp: The driver context.
* @req: The request to modify. A call to hwrm_req_replace() is conceptually
* an assignment of new_req to req. Subsequent calls to HWRM API functions,
* such as hwrm_req_send(), should thus use req and not new_req (in fact,
* calls to HWRM API functions will fail if non-managed request objects
* are passed).
* @len: The length of new_req.
* @new_req: The pre-built request to copy or reference.
*
* Replaces the request data in req with that of new_req. This is useful in
* scenarios where a request object has already been constructed by a third
* party prior to creating a resource managed request using hwrm_req_init().
* Depending on the length, hwrm_req_replace() will either copy the new
* request data into the DMA memory allocated for req, or it will simply
* reference the new request and use it in lieu of req during subsequent
* calls to hwrm_req_send(). The resource management is associated with
* req and is independent of and does not apply to new_req. The caller must
* ensure that the lifetime of new_req is least as long as req. Any slices
* that may have been associated with the original request are released.
*
* Return: zero on success, negative error code otherwise:
* E2BIG: Request is too large.
* EINVAL: Invalid request to modify.
*/
int hwrm_req_replace(struct bnxt *bp, void *req, void *new_req, u32 len)
{
struct bnxt_hwrm_ctx *ctx = __hwrm_ctx(bp, req);
struct input *internal_req = req;
u16 req_type;
if (!ctx)
return -EINVAL;
if (len > BNXT_HWRM_CTX_OFFSET)
return -E2BIG;
/* free any existing slices */
ctx->allocated = BNXT_HWRM_DMA_SIZE - BNXT_HWRM_CTX_OFFSET;
if (ctx->slice_addr) {
dma_free_coherent(&bp->pdev->dev, ctx->slice_size,
ctx->slice_addr, ctx->slice_handle);
ctx->slice_addr = NULL;
}
ctx->gfp = GFP_KERNEL;
if ((bp->fw_cap & BNXT_FW_CAP_SHORT_CMD) || len > BNXT_HWRM_MAX_REQ_LEN) {
memcpy(internal_req, new_req, len);
} else {
internal_req->req_type = ((struct input *)new_req)->req_type;
ctx->req = new_req;
}
ctx->req_len = len;
ctx->req->resp_addr = cpu_to_le64(ctx->dma_handle +
BNXT_HWRM_RESP_OFFSET);
/* update sentinel for potentially new request type */
req_type = le16_to_cpu(internal_req->req_type);
ctx->sentinel = hwrm_calc_sentinel(ctx, req_type);
return 0;
}
/**
* hwrm_req_flags() - Set non internal flags of the ctx
* @bp: The driver context.
* @req: The request containing the HWRM command
* @flags: ctx flags that don't have BNXT_HWRM_INTERNAL_FLAG set
*
* ctx flags can be used by the callers to instruct how the subsequent
* hwrm_req_send() should behave. Example: callers can use hwrm_req_flags
* with BNXT_HWRM_CTX_SILENT to omit kernel prints of errors of hwrm_req_send()
* or with BNXT_HWRM_FULL_WAIT enforce hwrm_req_send() to wait for full timeout
* even if FW is not responding.
* This generic function can be used to set any flag that is not an internal flag
* of the HWRM module.
*/
void hwrm_req_flags(struct bnxt *bp, void *req, enum bnxt_hwrm_ctx_flags flags)
{
struct bnxt_hwrm_ctx *ctx = __hwrm_ctx(bp, req);
if (ctx)
ctx->flags |= (flags & HWRM_API_FLAGS);
}
/**
* hwrm_req_hold() - Claim ownership of the request's resources.
* @bp: The driver context.
* @req: A pointer to the request to own. The request will no longer be
* consumed by calls to hwrm_req_send().
*
* Take ownership of the request. Ownership places responsibility on the
* caller to free the resources associated with the request via a call to
* hwrm_req_drop(). The caller taking ownership implies that a subsequent
* call to hwrm_req_send() will not consume the request (ie. sending will
* not free the associated resources if the request is owned by the caller).
* Taking ownership returns a reference to the response. Retaining and
* accessing the response data is the most common reason to take ownership
* of the request. Ownership can also be acquired in order to reuse the same
* request object across multiple invocations of hwrm_req_send().
*
* Return: A pointer to the response object.
*
* The resources associated with the response will remain available to the
* caller until ownership of the request is relinquished via a call to
* hwrm_req_drop(). It is not possible for hwrm_req_hold() to return NULL if
* a valid request is provided. A returned NULL value would imply a driver
* bug and the implementation will complain loudly in the logs to aid in
* detection. It should not be necessary to check the result for NULL.
*/
void *hwrm_req_hold(struct bnxt *bp, void *req)
{
struct bnxt_hwrm_ctx *ctx = __hwrm_ctx(bp, req);
struct input *input = (struct input *)req;
if (!ctx)
return NULL;
if (ctx->flags & BNXT_HWRM_INTERNAL_CTX_OWNED) {
/* can only be due to software bug, be loud */
netdev_err(bp->dev, "HWRM context already owned, req_type = %u\n",
(u32)le16_to_cpu(input->req_type));
dump_stack();
return NULL;
}
ctx->flags |= BNXT_HWRM_INTERNAL_CTX_OWNED;
return ((u8 *)req) + BNXT_HWRM_RESP_OFFSET;
}
static void __hwrm_ctx_drop(struct bnxt *bp, struct bnxt_hwrm_ctx *ctx)
{
void *addr = ((u8 *)ctx) - BNXT_HWRM_CTX_OFFSET;
dma_addr_t dma_handle = ctx->dma_handle; /* save before invalidate */
/* unmap any auxiliary DMA slice */
if (ctx->slice_addr)
dma_free_coherent(&bp->pdev->dev, ctx->slice_size,
ctx->slice_addr, ctx->slice_handle);
/* invalidate, ensure ownership, sentinel and dma_handle are cleared */
memset(ctx, 0, sizeof(struct bnxt_hwrm_ctx));
/* return the buffer to the DMA pool */
if (dma_handle)
dma_pool_free(bp->hwrm_dma_pool, addr, dma_handle);
}
/**
* hwrm_req_drop() - Release all resources associated with the request.
* @bp: The driver context.
* @req: The request to consume, releasing the associated resources. The
* request object, any slices, and its associated response are no
* longer valid.
*
* It is legal to call hwrm_req_drop() on an unowned request, provided it
* has not already been consumed by hwrm_req_send() (for example, to release
* an aborted request). A given request should not be dropped more than once,
* nor should it be dropped after having been consumed by hwrm_req_send(). To
* do so is an error (the context will not be found and a stack trace will be
* rendered in the kernel log).
*/
void hwrm_req_drop(struct bnxt *bp, void *req)
{
struct bnxt_hwrm_ctx *ctx = __hwrm_ctx(bp, req);
if (ctx)
__hwrm_ctx_drop(bp, ctx);
}
static int __hwrm_to_stderr(u32 hwrm_err)
{
switch (hwrm_err) {
case HWRM_ERR_CODE_SUCCESS:
return 0;
case HWRM_ERR_CODE_RESOURCE_LOCKED:
return -EROFS;
case HWRM_ERR_CODE_RESOURCE_ACCESS_DENIED:
return -EACCES;
case HWRM_ERR_CODE_RESOURCE_ALLOC_ERROR:
return -ENOSPC;
case HWRM_ERR_CODE_INVALID_PARAMS:
case HWRM_ERR_CODE_INVALID_FLAGS:
case HWRM_ERR_CODE_INVALID_ENABLES:
case HWRM_ERR_CODE_UNSUPPORTED_TLV:
case HWRM_ERR_CODE_UNSUPPORTED_OPTION_ERR:
return -EINVAL;
case HWRM_ERR_CODE_NO_BUFFER:
return -ENOMEM;
case HWRM_ERR_CODE_HOT_RESET_PROGRESS:
case HWRM_ERR_CODE_BUSY:
return -EAGAIN;
case HWRM_ERR_CODE_CMD_NOT_SUPPORTED:
return -EOPNOTSUPP;
case HWRM_ERR_CODE_PF_UNAVAILABLE:
return -ENODEV;
default:
return -EIO;
}
}
static struct bnxt_hwrm_wait_token *
__hwrm_acquire_token(struct bnxt *bp, enum bnxt_hwrm_chnl dst)
__acquires(&bp->hwrm_cmd_lock)
{
struct bnxt_hwrm_wait_token *token;
token = kzalloc(sizeof(*token), GFP_KERNEL);
if (!token)
return NULL;
mutex_lock(&bp->hwrm_cmd_lock);
token->dst = dst;
token->state = BNXT_HWRM_PENDING;
if (dst == BNXT_HWRM_CHNL_CHIMP) {
token->seq_id = bp->hwrm_cmd_seq++;
hlist_add_head_rcu(&token->node, &bp->hwrm_pending_list);
} else {
token->seq_id = bp->hwrm_cmd_kong_seq++;
}
return token;
}
static void
__hwrm_release_token(struct bnxt *bp, struct bnxt_hwrm_wait_token *token)
__releases(&bp->hwrm_cmd_lock)
{
if (token->dst == BNXT_HWRM_CHNL_CHIMP) {
hlist_del_rcu(&token->node);
kfree_rcu(token, rcu);
} else {
kfree(token);
}
mutex_unlock(&bp->hwrm_cmd_lock);
}
void
hwrm_update_token(struct bnxt *bp, u16 seq_id, enum bnxt_hwrm_wait_state state)
{
struct hlist_node __maybe_unused *dummy;
struct bnxt_hwrm_wait_token *token;
rcu_read_lock();
__hlist_for_each_entry_rcu(token, dummy, &bp->hwrm_pending_list, node) {
if (token->seq_id == seq_id) {
WRITE_ONCE(token->state, state);
rcu_read_unlock();
return;
}
}
rcu_read_unlock();
/* hwrm may have completed when we receive deferred event */
if (state != BNXT_HWRM_DEFERRED)
netdev_err(bp->dev, "Invalid hwrm seq id %d\n", seq_id);
}
static void hwrm_req_dbg(struct bnxt *bp, struct input *req)
{
u32 ring = le16_to_cpu(req->cmpl_ring);
u32 type = le16_to_cpu(req->req_type);
u32 tgt = le16_to_cpu(req->target_id);
u32 seq = le16_to_cpu(req->seq_id);
char opt[32] = "\n";
if (unlikely(ring != (u16)BNXT_HWRM_NO_CMPL_RING))
snprintf(opt, 16, " ring %d\n", ring);
if (unlikely(tgt != BNXT_HWRM_TARGET))
snprintf(opt + strlen(opt) - 1, 16, " tgt 0x%x\n", tgt);
netdev_dbg(bp->dev, "sent hwrm req_type 0x%x seq id 0x%x%s",
type, seq, opt);
}
#define hwrm_err(bp, ctx, fmt, ...) \
do { \
if ((ctx)->flags & BNXT_HWRM_CTX_SILENT) \
netdev_dbg((bp)->dev, fmt, __VA_ARGS__); \
else \
netdev_err((bp)->dev, fmt, __VA_ARGS__); \
} while (0)
static inline bool
hwrm_wait_must_abort(struct bnxt *bp, u32 req_type, u32 *fw_status)
{
if (req_type == HWRM_VER_GET)
return false;
if (!bp->fw_health || !bp->fw_health->status_reliable)
return false;
*fw_status = bnxt_fw_health_readl(bp, BNXT_FW_HEALTH_REG);
return *fw_status && !BNXT_FW_IS_HEALTHY(*fw_status);
}
static int __hwrm_send(struct bnxt *bp, struct bnxt_hwrm_ctx *ctx)
{
u32 doorbell_offset = BNXT_GRCPF_REG_CHIMP_COMM_TRIGGER;
enum bnxt_hwrm_chnl dst = BNXT_HWRM_CHNL_CHIMP;
u32 bar_offset = BNXT_GRCPF_REG_CHIMP_COMM;
struct bnxt_hwrm_wait_token *token = NULL;
struct hwrm_short_input short_input = {0};
u16 max_req_len = BNXT_HWRM_MAX_REQ_LEN;
unsigned int i, timeout, tmo_count;
u32 *data = (u32 *)ctx->req;
u32 msg_len = ctx->req_len;
u32 req_type, sts;
int rc = -EBUSY;
u16 len = 0;
u8 *valid;
#ifndef HSI_DBG_DISABLE
decode_hwrm_req(ctx->req);
#endif
if (ctx->flags & BNXT_HWRM_INTERNAL_RESP_DIRTY)
memset(ctx->resp, 0, PAGE_SIZE);
req_type = le16_to_cpu(ctx->req->req_type);
if (BNXT_NO_FW_ACCESS(bp) &&
(req_type != HWRM_FUNC_RESET && req_type != HWRM_VER_GET)) {
netdev_dbg(bp->dev, "hwrm req_type 0x%x skipped, FW channel down\n",
req_type);
goto exit;
}
if (msg_len > BNXT_HWRM_MAX_REQ_LEN &&
msg_len > bp->hwrm_max_ext_req_len) {
netdev_warn(bp->dev, "oversized hwrm request, req_type 0x%x",
req_type);
rc = -E2BIG;
goto exit;
}
if (hwrm_req_kong(bp, ctx->req)) {
dst = BNXT_HWRM_CHNL_KONG;
bar_offset = BNXT_GRCPF_REG_KONG_COMM;
doorbell_offset = BNXT_GRCPF_REG_KONG_COMM_TRIGGER;
if (le16_to_cpu(ctx->req->cmpl_ring) != INVALID_HW_RING_ID) {
netdev_err(bp->dev, "Ring completions not supported for KONG commands, req_type = %d\n",
req_type);
rc = -EINVAL;
goto exit;
}
}
token = __hwrm_acquire_token(bp, dst);
if (!token) {
rc = -ENOMEM;
goto exit;
}
ctx->req->seq_id = cpu_to_le16(token->seq_id);
if ((bp->fw_cap & BNXT_FW_CAP_SHORT_CMD) ||
msg_len > BNXT_HWRM_MAX_REQ_LEN) {
short_input.req_type = ctx->req->req_type;
short_input.signature =
cpu_to_le16(SHORT_REQ_SIGNATURE_SHORT_CMD);
short_input.size = cpu_to_le16(msg_len);
short_input.req_addr = cpu_to_le64(ctx->dma_handle);
data = (u32 *)&short_input;
msg_len = sizeof(short_input);
max_req_len = BNXT_HWRM_SHORT_REQ_LEN;
}
/* Ensure any associated DMA buffers are written before doorbell */
wmb();
/* Write request msg to hwrm channel */
__iowrite32_copy(bp->bar0 + bar_offset, data, msg_len / 4);
for (i = msg_len; i < max_req_len; i += 4)
writel(0, bp->bar0 + bar_offset + i);
/* Ring channel doorbell */
writel(1, bp->bar0 + doorbell_offset);
hwrm_req_dbg(bp, ctx->req);
if (!pci_is_enabled(bp->pdev)) {
rc = -ENODEV;
goto exit;
}
timeout = min(ctx->timeout, bp->hwrm_cmd_max_timeout ?: HWRM_CMD_MAX_TIMEOUT);
/* convert timeout to usec */
timeout *= 1000;
i = 0;
/* Short timeout for the first few iterations:
* number of loops = number of loops for short timeout +
* number of loops for standard timeout.
*/
tmo_count = HWRM_SHORT_TIMEOUT_COUNTER;
timeout = timeout - HWRM_SHORT_MIN_TIMEOUT * HWRM_SHORT_TIMEOUT_COUNTER;
tmo_count += DIV_ROUND_UP(timeout, HWRM_MIN_TIMEOUT);
if (le16_to_cpu(ctx->req->cmpl_ring) != INVALID_HW_RING_ID) {
/* Wait until hwrm response cmpl interrupt is processed */
while (READ_ONCE(token->state) < BNXT_HWRM_COMPLETE &&
i++ < tmo_count) {
/* Abort the wait for completion if the FW health
* check has failed.
*/
if (test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state))
goto exit;
/* on first few passes, just barely sleep */
if (i < HWRM_SHORT_TIMEOUT_COUNTER) {
usleep_range(HWRM_SHORT_MIN_TIMEOUT,
HWRM_SHORT_MAX_TIMEOUT);
} else {
if (hwrm_wait_must_abort(bp, req_type, &sts)) {
hwrm_err(bp, ctx, "Resp cmpl intr abandoning msg: 0x%x due to firmware status: 0x%x\n",
req_type, sts);
goto exit;
}
usleep_range(HWRM_MIN_TIMEOUT,
HWRM_MAX_TIMEOUT);
}
}
if (READ_ONCE(token->state) != BNXT_HWRM_COMPLETE) {
hwrm_err(bp, ctx, "Resp cmpl intr err msg: 0x%x\n",
req_type);
goto exit;
}
len = le16_to_cpu(READ_ONCE(ctx->resp->resp_len));
valid = ((u8 *)ctx->resp) + len - 1;
} else {
__le16 seen_out_of_seq = ctx->req->seq_id; /* will never see */
int j;
/* Check if response len is updated */
for (i = 0; i < tmo_count; i++) {
/* Abort the wait for completion if the FW health
* check has failed.
*/
if (test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state))
goto exit;
if (token &&
READ_ONCE(token->state) == BNXT_HWRM_DEFERRED) {
__hwrm_release_token(bp, token);
token = NULL;
}
len = le16_to_cpu(READ_ONCE(ctx->resp->resp_len));
if (len) {
__le16 resp_seq = READ_ONCE(ctx->resp->seq_id);
if (resp_seq == ctx->req->seq_id)
break;
if (resp_seq != seen_out_of_seq) {
netdev_warn(bp->dev, "Discarding out of seq response: 0x%x for msg {0x%x 0x%x}\n",
le16_to_cpu(resp_seq),
req_type,
le16_to_cpu(ctx->req->seq_id));
seen_out_of_seq = resp_seq;
}
}
/* on first few passes, just barely sleep */
if (i < HWRM_SHORT_TIMEOUT_COUNTER) {
usleep_range(HWRM_SHORT_MIN_TIMEOUT,
HWRM_SHORT_MAX_TIMEOUT);
} else {
if (hwrm_wait_must_abort(bp, req_type, &sts)) {
hwrm_err(bp, ctx, "Abandoning msg {0x%x 0x%x} len: %d due to firmware status: 0x%x\n",
req_type,
le16_to_cpu(ctx->req->seq_id),
len, sts);
goto exit;
}
usleep_range(HWRM_MIN_TIMEOUT,
HWRM_MAX_TIMEOUT);
}
}
if (i >= tmo_count) {
hwrm_err(bp, ctx, "Error (timeout: %u) msg {0x%x 0x%x} len:%d\n",
hwrm_total_timeout(i), req_type,
le16_to_cpu(ctx->req->seq_id), len);
goto exit;
}
/* Last byte of resp contains valid bit */
valid = ((u8 *)ctx->resp) + len - 1;
for (j = 0; j < HWRM_VALID_BIT_DELAY_USEC; ) {
/* make sure we read from updated DMA memory */
dma_rmb();
if (*valid)
break;
if (j < 10) {
udelay(1);
j++;
} else {
usleep_range(20, 30);
j += 20;
}
}
if (j >= HWRM_VALID_BIT_DELAY_USEC) {
hwrm_err(bp, ctx, "Error (timeout: %u) msg {0x%x 0x%x} len:%d v:%d\n",
hwrm_total_timeout(i) + j, req_type,
le16_to_cpu(ctx->req->seq_id), len, *valid);
goto exit;
}
}
/* Zero valid bit for compatibility. Valid bit in an older spec
* may become a new field in a newer spec. We must make sure that
* a new field not implemented by old spec will read zero.
*/
*valid = 0;
rc = le16_to_cpu(ctx->resp->error_code);
if (rc == HWRM_ERR_CODE_BUSY && !(ctx->flags & BNXT_HWRM_CTX_SILENT))
netdev_warn(bp->dev, "FW returned busy, hwrm req_type 0x%x\n",
req_type);
else if (rc && rc != HWRM_ERR_CODE_PF_UNAVAILABLE &&
rc != HWRM_ERR_CODE_ENTITY_NOT_PRESENT)
hwrm_err(bp, ctx, "hwrm req_type 0x%x seq id 0x%x error 0x%x\n",
req_type, le16_to_cpu(ctx->req->seq_id), rc);
#ifndef HSI_DBG_DISABLE
decode_hwrm_resp(ctx->resp);
#endif
rc = __hwrm_to_stderr(rc);
exit:
if (token)
__hwrm_release_token(bp, token);
if (ctx->flags & BNXT_HWRM_INTERNAL_CTX_OWNED)
ctx->flags |= BNXT_HWRM_INTERNAL_RESP_DIRTY;
else
__hwrm_ctx_drop(bp, ctx);
return rc;
}
/**
* hwrm_req_send() - Execute an HWRM command.
* @bp: The driver context.
* @req: A pointer to the request to send. The DMA resources associated with
* the request will be released (ie. the request will be consumed) unless
* ownership of the request has been assumed by the caller via a call to
* hwrm_req_hold().
*
* Send an HWRM request to the device and wait for a response. The request is
* consumed if it is not owned by the caller. This function will block until
* the request has either completed or times out due to an error.
*
* Return: A result code.
*
* The result is zero on success, otherwise the negative error code indicates
* one of the following errors:
* E2BIG: The request was too large.
* EBUSY: The firmware is in a fatal state or the request timed out
* EACCESS: HWRM access denied.
* ENOSPC: HWRM resource allocation error.
* EINVAL: Request parameters are invalid.
* ENOMEM: HWRM has no buffers.
* EAGAIN: HWRM busy or reset in progress.
* EOPNOTSUPP: Invalid request type.
* ENODEV: PCI device is disabled or parent PF is down when issued on VFs.
* EROFS: The request is not allowed due to a secure lock violation.
* EIO: Any other error.
* Error handling is orthogonal to request ownership. An unowned request will
* still be consumed on error. If the caller owns the request, then the caller
* is responsible for releasing the resources. Otherwise, hwrm_req_send() will
* always consume the request.
*/
int hwrm_req_send(struct bnxt *bp, void *req)
{
struct bnxt_hwrm_ctx *ctx = __hwrm_ctx(bp, req);
if (!ctx)
return -EINVAL;
return __hwrm_send(bp, ctx);
}
/**
* hwrm_req_send_silent() - A silent version of hwrm_req_send().
* @bp: The driver context.
* @req: The request to send without logging.
*
* The same as hwrm_req_send(), except that the request is silenced using
* hwrm_req_silence() prior the call. This version of the function is
* provided solely to preserve the legacy APIs flavor for this functionality.
*
* Return: A result code, see hwrm_req_send().
*/
int hwrm_req_send_silent(struct bnxt *bp, void *req)
{
hwrm_req_flags(bp, req, BNXT_HWRM_CTX_SILENT);
return hwrm_req_send(bp, req);
}
/**
* hwrm_req_dma_slice() - Allocate a slice of DMA mapped memory.
* @bp: The driver context.
* @req: The request for which indirect data will be associated.
* @size: The size of the allocation.
* @dma_handle: The bus address associated with the allocation. The HWRM API has
* no knowledge about the type of the request and so cannot infer how the
* caller intends to use the indirect data. Thus, the caller is
* responsible for configuring the request object appropriately to
* point to the associated indirect memory. Note, DMA handle has the
* same definition as it does in dma_alloc_coherent(), the caller is
* responsible for endian conversions via cpu_to_le64() before assigning
* this address.
*
* Allocates DMA mapped memory for indirect data related to a request. The
* lifetime of the DMA resources will be bound to that of the request (ie.
* they will be automatically released when the request is either consumed by
* hwrm_req_send() or dropped by hwrm_req_drop()). Small allocations are
* efficiently suballocated out of the request buffer space, hence the name
* slice, while larger requests are satisfied via an underlying call to
* dma_alloc_coherent(). Multiple suballocations are supported, however, only
* one externally mapped region is.
*
* Return: The kernel virtual address of the DMA mapping.
*/
void *
hwrm_req_dma_slice(struct bnxt *bp, void *req, u32 size, dma_addr_t *dma_handle)
{
struct bnxt_hwrm_ctx *ctx = __hwrm_ctx(bp, req);
u8 *end = ((u8 *)req) + BNXT_HWRM_DMA_SIZE;
struct input *input = req;
u8 *addr, *req_addr = req;
u32 max_offset, offset;
if (!ctx)
return NULL;
max_offset = BNXT_HWRM_DMA_SIZE - ctx->allocated;
offset = max_offset - size;
offset = ALIGN_DOWN(offset, BNXT_HWRM_DMA_ALIGN);
addr = req_addr + offset;
if (addr < req_addr + max_offset && req_addr + ctx->req_len <= addr) {
ctx->allocated = end - addr;
*dma_handle = ctx->dma_handle + offset;
return addr;
}
/* could not suballocate from ctx buffer, try create a new mapping */
if (ctx->slice_addr) {
/* if one exists, can only be due to software bug, be loud */
netdev_err(bp->dev, "HWRM refusing to reallocate DMA slice, req_type = %u\n",
(u32)le16_to_cpu(input->req_type));
dump_stack();
return NULL;
}
addr = dma_alloc_coherent(&bp->pdev->dev, size, dma_handle, ctx->gfp);
if (!addr)
return NULL;
ctx->slice_addr = addr;
ctx->slice_size = size;
ctx->slice_handle = *dma_handle;
return addr;
}

View File

@ -0,0 +1,157 @@
/* Broadcom NetXtreme-C/E network driver.
*
* Copyright (c) 2020-2022 Broadcom Inc.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation.
*/
#ifndef BNXT_HWRM_H
#define BNXT_HWRM_H
#include "bnxt_hsi.h"
enum bnxt_hwrm_ctx_flags {
/* Update the HWRM_API_FLAGS right below for any new non-internal bit added here */
BNXT_HWRM_INTERNAL_CTX_OWNED = BIT(0), /* caller owns the context */
BNXT_HWRM_INTERNAL_RESP_DIRTY = BIT(1), /* response contains data */
BNXT_HWRM_CTX_SILENT = BIT(2), /* squelch firmware errors */
BNXT_HWRM_FULL_WAIT = BIT(3), /* wait for full timeout of HWRM command */
};
#define HWRM_API_FLAGS (BNXT_HWRM_CTX_SILENT | BNXT_HWRM_FULL_WAIT)
struct bnxt_hwrm_ctx {
u64 sentinel;
dma_addr_t dma_handle;
struct output *resp;
struct input *req;
dma_addr_t slice_handle;
void *slice_addr;
u32 slice_size;
u32 req_len;
enum bnxt_hwrm_ctx_flags flags;
unsigned int timeout;
u32 allocated;
gfp_t gfp;
};
enum bnxt_hwrm_wait_state {
BNXT_HWRM_PENDING,
BNXT_HWRM_DEFERRED,
BNXT_HWRM_COMPLETE,
BNXT_HWRM_CANCELLED,
};
enum bnxt_hwrm_chnl { BNXT_HWRM_CHNL_CHIMP, BNXT_HWRM_CHNL_KONG };
struct bnxt_hwrm_wait_token {
struct rcu_head rcu;
struct hlist_node node;
enum bnxt_hwrm_wait_state state;
enum bnxt_hwrm_chnl dst;
u16 seq_id;
};
void hwrm_update_token(struct bnxt *bp, u16 seq, enum bnxt_hwrm_wait_state s);
#define BNXT_HWRM_MAX_REQ_LEN (bp->hwrm_max_req_len)
#define BNXT_HWRM_SHORT_REQ_LEN sizeof(struct hwrm_short_input)
#define SHORT_HWRM_CMD_TIMEOUT 20
#define HWRM_CMD_TIMEOUT (bp->hwrm_cmd_timeout)
#define HWRM_CMD_MAX_TIMEOUT 40000U
#define HWRM_RESET_TIMEOUT ((HWRM_CMD_TIMEOUT) * 4)
#define HWRM_COREDUMP_TIMEOUT (bp->hwrm_cmd_max_timeout)
#ifdef BNXT_FPGA
#define HWRM_FPGA_TIMEOUT 5000
#endif
#define BNXT_HWRM_TARGET 0xffff
#define BNXT_HWRM_NO_CMPL_RING -1
#define BNXT_HWRM_REQ_MAX_SIZE 128
#define BNXT_HWRM_DMA_SIZE (2 * PAGE_SIZE) /* space for req+resp */
#define BNXT_HWRM_RESP_RESERVED PAGE_SIZE
#define BNXT_HWRM_RESP_OFFSET (BNXT_HWRM_DMA_SIZE - \
BNXT_HWRM_RESP_RESERVED)
#define BNXT_HWRM_CTX_OFFSET (BNXT_HWRM_RESP_OFFSET - \
sizeof(struct bnxt_hwrm_ctx))
#define BNXT_HWRM_DMA_ALIGN 16
#define BNXT_HWRM_SENTINEL 0xb6e1f68a12e9a7eb /* arbitrary value */
#define BNXT_HWRM_REQS_PER_PAGE (BNXT_PAGE_SIZE / \
BNXT_HWRM_REQ_MAX_SIZE)
#define HWRM_SHORT_MIN_TIMEOUT 3
#define HWRM_SHORT_MAX_TIMEOUT 10
#define HWRM_SHORT_TIMEOUT_COUNTER 5
#define HWRM_MIN_TIMEOUT 25
#define HWRM_MAX_TIMEOUT 40
static inline unsigned int hwrm_total_timeout(unsigned int n)
{
return n <= HWRM_SHORT_TIMEOUT_COUNTER ? n * HWRM_SHORT_MIN_TIMEOUT :
HWRM_SHORT_TIMEOUT_COUNTER * HWRM_SHORT_MIN_TIMEOUT +
(n - HWRM_SHORT_TIMEOUT_COUNTER) * HWRM_MIN_TIMEOUT;
}
#define HWRM_VALID_BIT_DELAY_USEC 50000
static inline bool hwrm_req_type_cfa(u16 req_type)
{
switch (req_type) {
case HWRM_CFA_ENCAP_RECORD_ALLOC:
case HWRM_CFA_ENCAP_RECORD_FREE:
case HWRM_CFA_DECAP_FILTER_ALLOC:
case HWRM_CFA_DECAP_FILTER_FREE:
case HWRM_CFA_EM_FLOW_ALLOC:
case HWRM_CFA_EM_FLOW_FREE:
case HWRM_CFA_EM_FLOW_CFG:
case HWRM_CFA_FLOW_ALLOC:
case HWRM_CFA_FLOW_FREE:
case HWRM_CFA_FLOW_INFO:
case HWRM_CFA_FLOW_FLUSH:
case HWRM_CFA_FLOW_STATS:
case HWRM_CFA_METER_PROFILE_ALLOC:
case HWRM_CFA_METER_PROFILE_FREE:
case HWRM_CFA_METER_PROFILE_CFG:
case HWRM_CFA_METER_INSTANCE_ALLOC:
case HWRM_CFA_METER_INSTANCE_FREE:
return true;
default:
return false;
}
}
static inline bool hwrm_req_kong(struct bnxt *bp, struct input *req)
{
return (bp->fw_cap & BNXT_FW_CAP_KONG_MB_CHNL &&
(hwrm_req_type_cfa(le16_to_cpu(req->req_type)) ||
le16_to_cpu(req->target_id) == HWRM_TARGET_ID_KONG));
}
int __hwrm_req_init(struct bnxt *bp, void **req, u16 req_type, u32 req_len);
#define hwrm_req_init(bp, req, req_type) \
__hwrm_req_init((bp), (void **)&(req), (req_type), sizeof(*(req)))
void *hwrm_req_hold(struct bnxt *bp, void *req);
void hwrm_req_drop(struct bnxt *bp, void *req);
void hwrm_req_flags(struct bnxt *bp, void *req, enum bnxt_hwrm_ctx_flags flags);
void hwrm_req_timeout(struct bnxt *bp, void *req, unsigned int timeout);
int hwrm_req_send(struct bnxt *bp, void *req);
int hwrm_req_send_silent(struct bnxt *bp, void *req);
int hwrm_req_replace(struct bnxt *bp, void *req, void *new_req, u32 len);
void hwrm_req_alloc_flags(struct bnxt *bp, void *req, gfp_t flags);
void *hwrm_req_dma_slice(struct bnxt *bp, void *req, u32 size, dma_addr_t *dma);
/* Older devices can only support req length of 128.
* HWRM_FUNC_CFG requests which don't need fields starting at
* num_quic_tx_key_ctxs can use this helper to avoid getting -E2BIG.
*/
static inline int
bnxt_hwrm_func_cfg_short_req_init(struct bnxt *bp,
struct hwrm_func_cfg_input **req)
{
u32 req_len;
req_len = min_t(u32, sizeof(**req), bp->hwrm_max_ext_req_len);
return __hwrm_req_init(bp, (void **)req, HWRM_FUNC_CFG, req_len);
}
#endif

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,267 @@
/* Broadcom NetXtreme-C/E network driver.
*
* Copyright (c) 2022-2023 Broadcom Inc.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation.
*/
#ifndef BNXT_KTLS_H
#define BNXT_KTLS_H
#include <linux/hashtable.h>
#define BNXT_MAX_TX_CRYPTO_KEYS 16384
#define BNXT_MAX_RX_CRYPTO_KEYS 16384
#define BNXT_TX_CRYPTO_KEY_TYPE FUNC_KEY_CTX_ALLOC_REQ_KEY_CTX_TYPE_TX
#define BNXT_RX_CRYPTO_KEY_TYPE FUNC_KEY_CTX_ALLOC_REQ_KEY_CTX_TYPE_RX
#define BNXT_KID_BATCH_SIZE 128
struct bnxt_kid_info {
struct list_head list;
u32 start_id;
u32 count;
DECLARE_BITMAP(ids, BNXT_KID_BATCH_SIZE);
};
struct bnxt_kctx {
struct list_head list;
/* to serialize update to the linked list and total_alloc */
spinlock_t lock;
u8 type;
u32 total_alloc;
u32 max_ctx;
atomic_t alloc_pending;
#define BNXT_KCTX_ALLOC_PENDING_MAX 8
wait_queue_head_t alloc_pending_wq;
unsigned long *partition_bmap;
unsigned int next;
};
#define BNXT_KCTX_ALLOC_OK(kctx) \
(atomic_read(&((kctx)->alloc_pending)) < BNXT_KCTX_ALLOC_PENDING_MAX)
struct bnxt_kfltr_info {
u32 kid;
__le64 filter_id;
struct hlist_node hash;
struct rcu_head rcu;
};
#define BNXT_MAX_CRYPTO_KEY_TYPE (BNXT_RX_CRYPTO_KEY_TYPE + 1)
struct bnxt_ktls_info {
u16 max_key_ctxs_alloc;
u16 ctxs_per_partition;
u8 partition_mode:1;
struct bnxt_kctx kctx[BNXT_MAX_CRYPTO_KEY_TYPE];
struct kmem_cache *mpc_cache;
atomic_t pending;
DECLARE_HASHTABLE(filter_tbl, 8);
/* to serialize adding to and deleting from the filter_tbl */
spinlock_t filter_lock;
u32 filter_count;
#define BNXT_MAX_KTLS_FILTER 460
#define BNXT_KTLS_TX_ADD 0
#define BNXT_KTLS_TX_DEL 1
#define BNXT_KTLS_TX_HW_PKT 2
#define BNXT_KTLS_TX_SW_PKT 3
#define BNXT_KTLS_TX_OOO 4
#define BNXT_KTLS_TX_RETRANS 5
#define BNXT_KTLS_TX_REPLAY 6
#define BNXT_KTLS_RX_ADD 7
#define BNXT_KTLS_RX_DEL 8
#define BNXT_KTLS_RX_HW_PKT 9
#define BNXT_KTLS_RX_SW_PKT 10
#define BNXT_KTLS_RX_RESYNC_REQ 11
#define BNXT_KTLS_RX_RESYNC_ACK 12
#define BNXT_KTLS_RX_RESYNC_DISCARD 13
#define BNXT_KTLS_RX_RESYNC_NAK 14
#define BNXT_KTLS_MAX_COUNTERS 15
atomic64_t counters[BNXT_KTLS_MAX_COUNTERS];
};
#define tck kctx[BNXT_TX_CRYPTO_KEY_TYPE]
#define rck kctx[BNXT_RX_CRYPTO_KEY_TYPE]
struct bnxt_ktls_offload_ctx_tx {
u32 tcp_seq_no;
u32 kid;
};
struct bnxt_ktls_offload_ctx_rx {
u32 kid;
/* to protect resync state */
spinlock_t resync_lock;
u32 resync_tcp_seq_no;
u32 bytes_since_resync;
unsigned long resync_timestamp;
u8 resync_pending:1;
};
#define BNXT_KTLS_RESYNC_TMO msecs_to_jiffies(2500)
#define BNXT_KTLS_MAX_RESYNC_BYTES 32768
struct ce_add_cmd {
__le32 ver_algo_kid_opcode;
#define CE_ADD_CMD_OPCODE_MASK 0xfUL
#define CE_ADD_CMD_OPCODE_SFT 0
#define CE_ADD_CMD_OPCODE_ADD 0x1UL
#define CE_ADD_CMD_KID_MASK 0xfffff0UL
#define CE_ADD_CMD_KID_SFT 4
#define CE_ADD_CMD_ALGORITHM_MASK 0xf000000UL
#define CE_ADD_CMD_ALGORITHM_SFT 24
#define CE_ADD_CMD_ALGORITHM_AES_GCM_128 0x1000000UL
#define CE_ADD_CMD_ALGORITHM_AES_GCM_256 0x2000000UL
#define CE_ADD_CMD_VERSION_MASK 0xf0000000UL
#define CE_ADD_CMD_VERSION_SFT 28
#define CE_ADD_CMD_VERSION_TLS1_2 (0x0UL << 28)
#define CE_ADD_CMD_VERSION_TLS1_3 (0x1UL << 28)
u8 ctx_kind;
#define CE_ADD_CMD_CTX_KIND_MASK 0x1fUL
#define CE_ADD_CMD_CTX_KIND_SFT 0
#define CE_ADD_CMD_CTX_KIND_CK_TX 0x11UL
#define CE_ADD_CMD_CTX_KIND_CK_RX 0x12UL
u8 unused0[3];
u8 salt[4];
u8 unused1[4];
__le32 pkt_tcp_seq_num;
__le32 tls_header_tcp_seq_num;
u8 record_seq_num[8];
u8 session_key[32];
u8 addl_iv[8];
};
#define record_seq_num_end record_seq_num[7]
struct ce_delete_cmd {
__le32 ctx_kind_kid_opcode;
#define CE_DELETE_CMD_OPCODE_MASK 0xfUL
#define CE_DELETE_CMD_OPCODE_SFT 0
#define CE_DELETE_CMD_OPCODE_DEL 0x2UL
#define CE_DELETE_CMD_KID_MASK 0xfffff0UL
#define CE_DELETE_CMD_KID_SFT 4
#define CE_DELETE_CMD_CTX_KIND_MASK 0x1f000000UL
#define CE_DELETE_CMD_CTX_KIND_SFT 24
#define CE_DELETE_CMD_CTX_KIND_CK_TX (0x11UL << 24)
#define CE_DELETE_CMD_CTX_KIND_CK_RX (0x12UL << 24)
};
struct ce_resync_resp_ack_cmd {
__le32 resync_status_kid_opcode;
#define CE_RESYNC_RESP_ACK_CMD_OPCODE_MASK 0xfUL
#define CE_RESYNC_RESP_ACK_CMD_OPCODE_SFT 0
#define CE_RESYNC_RESP_ACK_CMD_OPCODE_RESYNC 0x3UL
#define CE_RESYNC_RESP_ACK_CMD_KID_MASK 0xfffff0UL
#define CE_RESYNC_RESP_ACK_CMD_KID_SFT 4
#define CE_RESYNC_RESP_ACK_CMD_RESYNC_STATUS 0x1000000UL
#define CE_RESYNC_RESP_ACK_CMD_RESYNC_STATUS_ACK (0x0UL << 24)
#define CE_RESYNC_RESP_ACK_CMD_RESYNC_STATUS_NAK (0x1UL << 24)
__le32 resync_record_tcp_seq_num;
u8 resync_record_seq_num[8];
};
#define resync_record_seq_num_end resync_record_seq_num[7]
#define CE_CMD_KID_MASK 0xfffff0UL
#define CE_CMD_KID_SFT 4
#define CE_CMD_KID(cmd_p) \
((*(u32 *)(cmd_p) & CE_CMD_KID_MASK) >> CE_CMD_KID_SFT)
#define BNXT_KMPC_OPAQUE(client, kid) \
(((client) << 24) | (kid))
#define BNXT_INV_KMPC_OPAQUE 0xffffffff
struct ce_cmpl {
__le16 client_subtype_type;
#define CE_CMPL_TYPE_MASK 0x3fUL
#define CE_CMPL_TYPE_SFT 0
#define CE_CMPL_TYPE_MID_PATH_SHORT 0x1eUL
#define CE_CMPL_SUBTYPE_MASK 0xf00UL
#define CE_CMPL_SUBTYPE_SFT 8
#define CE_CMPL_SUBTYPE_SOLICITED (0x0UL << 8)
#define CE_CMPL_SUBTYPE_ERR (0x1UL << 8)
#define CE_CMPL_SUBTYPE_RESYNC (0x2UL << 8)
#define CE_CMPL_MP_CLIENT_MASK 0xf000UL
#define CE_CMPL_MP_CLIENT_SFT 12
#define CE_CMPL_MP_CLIENT_TCE (0x0UL << 12)
#define CE_CMPL_MP_CLIENT_RCE (0x1UL << 12)
__le16 status;
#define CE_CMPL_STATUS_MASK 0xfUL
#define CE_CMPL_STATUS_SFT 0
#define CE_CMPL_STATUS_OK 0x0UL
#define CE_CMPL_STATUS_CTX_LD_ERR 0x1UL
#define CE_CMPL_STATUS_FID_CHK_ERR 0x2UL
#define CE_CMPL_STATUS_CTX_VER_ERR 0x3UL
#define CE_CMPL_STATUS_DST_ID_ERR 0x4UL
#define CE_CMPL_STATUS_MP_CMD_ERR 0x5UL
u32 opaque;
__le32 v;
#define CE_CMPL_V 0x1UL
__le32 kid;
#define CE_CMPL_KID_MASK 0xfffffUL
#define CE_CMPL_KID_SFT 0
};
#define CE_CMPL_STATUS(ce_cmpl) \
(le16_to_cpu((ce_cmpl)->status) & CE_CMPL_STATUS_MASK)
#define CE_CMPL_KID(ce_cmpl) \
(le32_to_cpu((ce_cmpl)->kid) & CE_CMPL_KID_MASK)
struct crypto_prefix_cmd {
__le32 flags;
#define CRYPTO_PREFIX_CMD_FLAGS_UPDATE_IN_ORDER_VAR 0x1UL
#define CRYPTO_PREFIX_CMD_FLAGS_FULL_REPLAY_RETRAN 0x2UL
__le32 header_tcp_seq_num;
__le32 start_tcp_seq_num;
__le32 end_tcp_seq_num;
u8 explicit_nonce[8];
u8 record_seq_num[8];
};
#define CRYPTO_PREFIX_CMD_SIZE ((u32)sizeof(struct crypto_prefix_cmd))
#define CRYPTO_PREFIX_CMD_BDS (CRYPTO_PREFIX_CMD_SIZE / sizeof(struct tx_bd))
#define CRYPTO_PRESYNC_BDS (CRYPTO_PREFIX_CMD_BDS + 1)
#define CRYPTO_PRESYNC_BD_CMD \
(cpu_to_le32((CRYPTO_PREFIX_CMD_SIZE << TX_BD_LEN_SHIFT) | \
(CRYPTO_PRESYNC_BDS << TX_BD_FLAGS_BD_CNT_SHIFT) | \
TX_BD_TYPE_PRESYNC_TX_BD))
struct bnxt_crypto_cmd_ctx {
struct completion cmp;
struct ce_cmpl ce_cmp;
};
static inline bool bnxt_ktls_busy(struct bnxt *bp)
{
return bp->ktls_info && atomic_read(&bp->ktls_info->pending) > 0;
}
void bnxt_alloc_ktls_info(struct bnxt *bp, struct hwrm_func_qcaps_output *resp);
void bnxt_clear_cfa_tls_filters_tbl(struct bnxt *bp);
void bnxt_free_ktls_info(struct bnxt *bp);
void bnxt_hwrm_reserve_pf_key_ctxs(struct bnxt *bp,
struct hwrm_func_cfg_input *req);
int bnxt_ktls_init(struct bnxt *bp);
void bnxt_ktls_mpc_cmp(struct bnxt *bp, u32 client, unsigned long handle,
struct bnxt_cmpl_entry cmpl[], u32 entries);
struct sk_buff *bnxt_ktls_xmit(struct bnxt *bp, struct bnxt_tx_ring_info *txr,
struct sk_buff *skb, __le32 *lflags, u32 *kid);
void bnxt_ktls_rx(struct bnxt *bp, struct sk_buff *skb, u8 *data_ptr,
unsigned int len, struct rx_cmp *rxcmp,
struct rx_cmp_ext *rxcmp1);
#endif

View File

@ -0,0 +1,806 @@
/* Broadcom NetXtreme-C/E network driver.
*
* Copyright (c) 2017-2018 Broadcom Limited
* Copyright (c) 2018-2023 Broadcom Inc.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation.
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/netdevice.h>
#include <linux/pci.h>
#include <linux/init.h>
#include <linux/fs.h>
#include <linux/types.h>
#include <linux/cdev.h>
#include <linux/uaccess.h>
#include <linux/interrupt.h>
#include <linux/rtnetlink.h>
#include "bnxt_compat.h"
#include "bnxt_hsi.h"
#include "bnxt.h"
#include "bnxt_hwrm.h"
#include "bnxt_ulp.h"
#include "bnxt_lfc.h"
#include "bnxt_lfc_ioctl.h"
#ifdef CONFIG_BNXT_LFC
#ifdef HAVE_MODULE_IMPORT_NS_DMA_BUF
MODULE_IMPORT_NS(DMA_BUF);
#endif
#define MAX_LFC_CACHED_NET_DEVICES 32
#define PRIME_1 29
#define PRIME_2 31
static struct bnxt_gloabl_dev blfc_global_dev;
static struct bnxt_lfc_dev_array blfc_array[MAX_LFC_CACHED_NET_DEVICES];
static bool bnxt_lfc_inited;
static bool is_domain_available;
static int domain_no;
static int lfc_device_event(struct notifier_block *unused,
unsigned long event, void *ptr)
{
u32 i;
struct bnxt_lfc_dev *blfc_dev;
struct net_device *dev = netdev_notifier_info_to_dev(ptr);
switch (event) {
case NETDEV_UNREGISTER:
for (i = 0; i < MAX_LFC_CACHED_NET_DEVICES; i++) {
blfc_dev = blfc_array[i].bnxt_lfc_dev;
if (blfc_dev && blfc_dev->ndev == dev) {
dev_put(blfc_dev->ndev);
kfree(blfc_dev);
blfc_array[i].bnxt_lfc_dev = NULL;
blfc_array[i].taken = 0;
break;
}
}
break;
default:
/* do nothing */
break;
}
return 0;
}
struct notifier_block lfc_device_notifier = {
.notifier_call = lfc_device_event
};
static u32 bnxt_lfc_get_hash_key(u32 bus, u32 devfn)
{
return ((bus * PRIME_1 + devfn) * PRIME_2) % MAX_LFC_CACHED_NET_DEVICES;
}
static int bnxt_lfc_send_hwrm(struct bnxt *bp, struct bnxt_fw_msg *fw_msg)
{
struct output *resp;
struct input *req;
u32 resp_len;
int rc;
if (bp->fw_reset_state)
return -EBUSY;
rc = hwrm_req_init(bp, req, 0 /* don't care */);
if (rc)
return rc;
rc = hwrm_req_replace(bp, req, fw_msg->msg, fw_msg->msg_len);
if (rc)
return rc;
hwrm_req_timeout(bp, req, fw_msg->timeout);
resp = hwrm_req_hold(bp, req);
rc = hwrm_req_send(bp, req);
resp_len = le16_to_cpu(resp->resp_len);
if (resp_len) {
if (fw_msg->resp_max_len < resp_len)
resp_len = fw_msg->resp_max_len;
memcpy(fw_msg->resp, resp, resp_len);
}
hwrm_req_drop(bp, req);
return rc;
}
static bool bnxt_lfc_is_valid_pdev(struct pci_dev *pdev)
{
int32_t idx;
if (!pdev) {
BNXT_LFC_ERR(NULL, "No such PCI device\n");
return false;
}
if (pdev->vendor != PCI_VENDOR_ID_BROADCOM) {
pci_dev_put(pdev);
BNXT_LFC_ERR(NULL, "Not a Broadcom PCI device\n");
return false;
}
if (strncmp(dev_driver_string(&pdev->dev), "bnxt_en", 7)) {
BNXT_LFC_DEBUG(&pdev->dev,
"This device is not owned by bnxt_en, instead owned by %s\n",
dev_driver_string(&pdev->dev));
pci_dev_put(pdev);
return false;
}
for (idx = 0; bnxt_pci_tbl[idx].device != 0; idx++) {
if (pdev->device == bnxt_pci_tbl[idx].device) {
BNXT_LFC_DEBUG(&pdev->dev, "Found valid PCI device\n");
return true;
}
}
pci_dev_put(pdev);
BNXT_LFC_ERR(NULL, "PCI device not supported\n");
return false;
}
static void bnxt_lfc_init_req_hdr(struct input *req_hdr, u16 req_type,
u16 cpr_id, u16 tgt_id)
{
req_hdr->req_type = cpu_to_le16(req_type);
req_hdr->cmpl_ring = cpu_to_le16(cpr_id);
req_hdr->target_id = cpu_to_le16(tgt_id);
}
static void bnxt_lfc_prep_fw_msg(struct bnxt_fw_msg *fw_msg, void *msg,
int32_t msg_len, void *resp,
int32_t resp_max_len,
int32_t timeout)
{
fw_msg->msg = msg;
fw_msg->msg_len = msg_len;
fw_msg->resp = resp;
fw_msg->resp_max_len = resp_max_len;
fw_msg->timeout = timeout;
}
static int32_t bnxt_lfc_process_nvm_flush(struct bnxt_lfc_dev *blfc_dev)
{
struct bnxt *bp = blfc_dev->bp;
int32_t rc = 0;
struct hwrm_nvm_flush_input req = {0};
struct hwrm_nvm_flush_output resp = {0};
struct bnxt_fw_msg fw_msg;
bnxt_lfc_init_req_hdr((void *)&req,
HWRM_NVM_FLUSH, -1, -1);
bnxt_lfc_prep_fw_msg(&fw_msg, (void *)&req, sizeof(req), (void *)&resp,
sizeof(resp), BNXT_NVM_FLUSH_TIMEOUT);
rc = bnxt_lfc_send_hwrm(bp, &fw_msg);
if (rc)
BNXT_LFC_ERR(&blfc_dev->pdev->dev,
"Failed to send NVM_FLUSH FW msg, rc = 0x%x", rc);
return rc;
}
static int32_t bnxt_lfc_process_nvm_get_var_req(struct bnxt_lfc_dev *blfc_dev,
struct bnxt_lfc_nvm_get_var_req
*nvm_get_var_req)
{
int32_t rc;
uint16_t len_in_bytes;
struct pci_dev *pdev = blfc_dev->pdev;
struct bnxt *bp = blfc_dev->bp;
struct hwrm_nvm_get_variable_input req = {0};
struct hwrm_nvm_get_variable_output resp = {0};
struct bnxt_fw_msg fw_msg;
void *dest_data_addr = NULL;
dma_addr_t dest_data_dma_addr;
if (nvm_get_var_req->len_in_bits == 0) {
BNXT_LFC_ERR(&blfc_dev->pdev->dev, "Invalid Length\n");
return -ENOMEM;
}
len_in_bytes = (nvm_get_var_req->len_in_bits + 7) / 8;
dest_data_addr = dma_alloc_coherent(&pdev->dev,
len_in_bytes,
&dest_data_dma_addr,
GFP_KERNEL);
if (dest_data_addr == NULL) {
BNXT_LFC_ERR(&blfc_dev->pdev->dev,
"Failed to alloc mem for data\n");
return -ENOMEM;
}
bnxt_lfc_init_req_hdr((void *)&req,
HWRM_NVM_GET_VARIABLE, -1, -1);
req.dest_data_addr = cpu_to_le64(dest_data_dma_addr);
req.data_len = cpu_to_le16(nvm_get_var_req->len_in_bits);
req.option_num = cpu_to_le16(nvm_get_var_req->option_num);
req.dimensions = cpu_to_le16(nvm_get_var_req->dimensions);
req.index_0 = cpu_to_le16(nvm_get_var_req->index_0);
req.index_1 = cpu_to_le16(nvm_get_var_req->index_1);
req.index_2 = cpu_to_le16(nvm_get_var_req->index_2);
req.index_3 = cpu_to_le16(nvm_get_var_req->index_3);
bnxt_lfc_prep_fw_msg(&fw_msg, (void *)&req, sizeof(req), (void *)&resp,
sizeof(resp), DFLT_HWRM_CMD_TIMEOUT);
rc = bnxt_lfc_send_hwrm(bp, &fw_msg);
if (rc) {
BNXT_LFC_ERR(&blfc_dev->pdev->dev,
"Failed to send NVM_GET_VARIABLE FW msg, rc = 0x%x",
rc);
goto done;
}
rc = copy_to_user(nvm_get_var_req->out_val, dest_data_addr,
len_in_bytes);
if (rc != 0) {
BNXT_LFC_ERR(&blfc_dev->pdev->dev,
"Failed to send %d characters to the user\n", rc);
rc = -EFAULT;
}
done:
dma_free_coherent(&pdev->dev, (nvm_get_var_req->len_in_bits),
dest_data_addr,
dest_data_dma_addr);
return rc;
}
static int32_t bnxt_lfc_process_nvm_set_var_req(struct bnxt_lfc_dev *blfc_dev,
struct bnxt_lfc_nvm_set_var_req
*nvm_set_var_req)
{
int32_t rc;
uint16_t len_in_bytes;
struct pci_dev *pdev = blfc_dev->pdev;
struct bnxt *bp = blfc_dev->bp;
struct hwrm_nvm_set_variable_input req = {0};
struct hwrm_nvm_set_variable_output resp = {0};
struct bnxt_fw_msg fw_msg;
void *src_data_addr = NULL;
dma_addr_t src_data_dma_addr;
if (nvm_set_var_req->len_in_bits == 0) {
BNXT_LFC_ERR(&blfc_dev->pdev->dev, "Invalid Length\n");
return -ENOMEM;
}
len_in_bytes = (nvm_set_var_req->len_in_bits + 7) / 8;
src_data_addr = dma_alloc_coherent(&pdev->dev,
len_in_bytes,
&src_data_dma_addr,
GFP_KERNEL);
if (src_data_addr == NULL) {
BNXT_LFC_ERR(&blfc_dev->pdev->dev,
"Failed to alloc mem for data\n");
return -ENOMEM;
}
rc = copy_from_user(src_data_addr,
nvm_set_var_req->in_val,
len_in_bytes);
if (rc != 0) {
BNXT_LFC_ERR(&blfc_dev->pdev->dev,
"Failed to send %d bytes from the user\n", rc);
rc = -EFAULT;
goto done;
}
bnxt_lfc_init_req_hdr((void *)&req,
HWRM_NVM_SET_VARIABLE, -1, -1);
req.src_data_addr = cpu_to_le64(src_data_dma_addr);
req.data_len = cpu_to_le16(nvm_set_var_req->len_in_bits);
req.option_num = cpu_to_le16(nvm_set_var_req->option_num);
req.dimensions = cpu_to_le16(nvm_set_var_req->dimensions);
req.index_0 = cpu_to_le16(nvm_set_var_req->index_0);
req.index_1 = cpu_to_le16(nvm_set_var_req->index_1);
req.index_2 = cpu_to_le16(nvm_set_var_req->index_2);
req.index_3 = cpu_to_le16(nvm_set_var_req->index_3);
bnxt_lfc_prep_fw_msg(&fw_msg, (void *)&req, sizeof(req), (void *)&resp,
sizeof(resp), DFLT_HWRM_CMD_TIMEOUT);
rc = bnxt_lfc_send_hwrm(bp, &fw_msg);
if (rc)
BNXT_LFC_ERR(&blfc_dev->pdev->dev,
"Failed to send NVM_SET_VARIABLE FW msg, rc = 0x%x", rc);
done:
dma_free_coherent(&pdev->dev, len_in_bytes,
src_data_addr,
src_data_dma_addr);
return rc;
}
static int32_t bnxt_lfc_fill_fw_msg(struct pci_dev *pdev,
struct bnxt_fw_msg *fw_msg,
struct blfc_fw_msg *msg)
{
int32_t rc = 0;
if (copy_from_user(fw_msg->msg,
(void __user *)((unsigned long)msg->usr_req),
msg->len_req)) {
BNXT_LFC_ERR(&pdev->dev, "Failed to copy data from user\n");
return -EFAULT;
}
fw_msg->msg_len = msg->len_req;
fw_msg->resp_max_len = msg->len_resp;
if (!msg->timeout)
fw_msg->timeout = DFLT_HWRM_CMD_TIMEOUT;
else
fw_msg->timeout = msg->timeout;
return rc;
}
static int32_t bnxt_lfc_prepare_dma_operations(struct bnxt_lfc_dev *blfc_dev,
struct blfc_fw_msg *msg,
struct bnxt_fw_msg *fw_msg)
{
int32_t rc = 0;
uint8_t i, num_allocated = 0;
void *dma_ptr;
for (i = 0; i < msg->num_dma_indications; i++) {
if (msg->dma[i].length == 0 ||
msg->dma[i].length > MAX_DMA_MEM_SIZE) {
BNXT_LFC_ERR(&blfc_dev->pdev->dev,
"Invalid DMA memory length\n");
rc = -EINVAL;
goto err;
}
blfc_dev->dma_virt_addr[i] = dma_alloc_coherent(
&blfc_dev->pdev->dev,
msg->dma[i].length,
&blfc_dev->dma_addr[i],
GFP_KERNEL);
if (!blfc_dev->dma_virt_addr[i]) {
BNXT_LFC_ERR(&blfc_dev->pdev->dev,
"Failed to allocate memory for data_addr[%d]\n",
i);
rc = -ENOMEM;
goto err;
}
num_allocated++;
if (!(msg->dma[i].read_or_write)) {
if (copy_from_user(blfc_dev->dma_virt_addr[i],
(void __user *)(
(unsigned long)(msg->dma[i].data)),
msg->dma[i].length)) {
BNXT_LFC_ERR(&blfc_dev->pdev->dev,
"Failed to copy data from user for data_addr[%d]\n",
i);
rc = -EFAULT;
goto err;
}
}
dma_ptr = fw_msg->msg + msg->dma[i].offset;
if ((PTR_ALIGN(dma_ptr, 8) == dma_ptr) &&
(msg->dma[i].offset < msg->len_req)) {
__le64 *dmap = dma_ptr;
*dmap = cpu_to_le64(blfc_dev->dma_addr[i]);
} else {
BNXT_LFC_ERR(&blfc_dev->pdev->dev,
"Wrong input parameter\n");
rc = -EINVAL;
goto err;
}
}
return rc;
err:
for (i = 0; i < num_allocated; i++)
dma_free_coherent(&blfc_dev->pdev->dev,
msg->dma[i].length,
blfc_dev->dma_virt_addr[i],
blfc_dev->dma_addr[i]);
return rc;
}
static int32_t bnxt_lfc_process_hwrm(struct bnxt_lfc_dev *blfc_dev,
struct bnxt_lfc_req *lfc_req)
{
int32_t rc = 0, i, hwrm_err = 0;
struct bnxt *bp = blfc_dev->bp;
struct pci_dev *pdev = blfc_dev->pdev;
struct bnxt_fw_msg fw_msg;
struct blfc_fw_msg msg, *msg2 = NULL;
if (copy_from_user(&msg,
(void __user *)((unsigned long)lfc_req->req.hreq),
sizeof(msg))) {
BNXT_LFC_ERR(&pdev->dev, "Failed to copy data from user\n");
return -EFAULT;
}
if (msg.len_req > blfc_dev->bp->hwrm_max_ext_req_len ||
msg.len_resp > BNXT_LFC_MAX_HWRM_RESP_LENGTH) {
BNXT_LFC_ERR(&pdev->dev,
"Invalid length\n");
return -EINVAL;
}
fw_msg.msg = kmalloc(msg.len_req, GFP_KERNEL);
if (!fw_msg.msg) {
BNXT_LFC_ERR(&pdev->dev,
"Failed to allocate input req memory\n");
return -ENOMEM;
}
fw_msg.resp = kmalloc(msg.len_resp, GFP_KERNEL);
if (!fw_msg.resp) {
BNXT_LFC_ERR(&pdev->dev,
"Failed to allocate resp memory\n");
rc = -ENOMEM;
goto err;
}
rc = bnxt_lfc_fill_fw_msg(pdev, &fw_msg, &msg);
if (rc) {
BNXT_LFC_ERR(&pdev->dev,
"Failed to fill the FW data\n");
goto err;
}
if (msg.num_dma_indications) {
if (msg.num_dma_indications > MAX_NUM_DMA_INDICATIONS) {
BNXT_LFC_ERR(&pdev->dev,
"Invalid DMA indications\n");
rc = -EINVAL;
goto err1;
}
msg2 = kmalloc((sizeof(struct blfc_fw_msg) +
(msg.num_dma_indications * sizeof(struct dma_info))),
GFP_KERNEL);
if (!msg2) {
BNXT_LFC_ERR(&pdev->dev,
"Failed to allocate memory\n");
rc = -ENOMEM;
goto err;
}
if (copy_from_user((void *)msg2,
(void __user *)((unsigned long)lfc_req->req.hreq),
(sizeof(struct blfc_fw_msg) +
(msg.num_dma_indications *
sizeof(struct dma_info))))) {
BNXT_LFC_ERR(&pdev->dev,
"Failed to copy data from user\n");
rc = -EFAULT;
goto err;
}
rc = bnxt_lfc_prepare_dma_operations(blfc_dev, msg2, &fw_msg);
if (rc) {
BNXT_LFC_ERR(&pdev->dev,
"Failed to perform DMA operaions\n");
goto err;
}
}
hwrm_err = bnxt_lfc_send_hwrm(bp, &fw_msg);
if (hwrm_err) {
struct input *req = fw_msg.msg;
BNXT_LFC_DEBUG(&pdev->dev,
"Failed to send FW msg type = 0x%x, error = 0x%x",
req->req_type, hwrm_err);
goto err;
}
for (i = 0; i < msg.num_dma_indications; i++) {
if (msg2->dma[i].read_or_write) {
if (copy_to_user((void __user *)
((unsigned long)msg2->dma[i].data),
blfc_dev->dma_virt_addr[i],
msg2->dma[i].length)) {
BNXT_LFC_ERR(&pdev->dev,
"Failed to copy data to user\n");
rc = -EFAULT;
goto err;
}
}
}
err:
for (i = 0; i < msg.num_dma_indications; i++)
dma_free_coherent(&pdev->dev, msg2->dma[i].length,
blfc_dev->dma_virt_addr[i],
blfc_dev->dma_addr[i]);
if (hwrm_err != -EBUSY && hwrm_err != -E2BIG) {
if (copy_to_user((void __user *)((unsigned long)msg.usr_resp),
fw_msg.resp,
msg.len_resp)) {
BNXT_LFC_ERR(&pdev->dev,
"Failed to copy data to user\n");
rc = -EFAULT;
}
}
err1:
kfree(msg2);
kfree(fw_msg.msg);
kfree(fw_msg.resp);
/* If HWRM command fails, return the response error code */
if (hwrm_err)
return hwrm_err;
return rc;
}
static int32_t bnxt_lfc_process_req(struct bnxt_lfc_dev *blfc_dev,
struct bnxt_lfc_req *lfc_req)
{
int32_t rc;
switch (lfc_req->hdr.req_type) {
case BNXT_LFC_NVM_GET_VAR_REQ:
rc = bnxt_lfc_process_nvm_get_var_req(blfc_dev,
&lfc_req->req.nvm_get_var_req);
break;
case BNXT_LFC_NVM_SET_VAR_REQ:
rc = bnxt_lfc_process_nvm_set_var_req(blfc_dev,
&lfc_req->req.nvm_set_var_req);
break;
case BNXT_LFC_NVM_FLUSH_REQ:
rc = bnxt_lfc_process_nvm_flush(blfc_dev);
break;
case BNXT_LFC_GENERIC_HWRM_REQ:
rc = bnxt_lfc_process_hwrm(blfc_dev, lfc_req);
break;
default:
BNXT_LFC_DEBUG(&blfc_dev->pdev->dev,
"No valid request found\n");
return -EINVAL;
}
return rc;
}
static int32_t bnxt_lfc_open(struct inode *inode, struct file *flip)
{
BNXT_LFC_DEBUG(NULL, "open is called");
return 0;
}
static ssize_t bnxt_lfc_read(struct file *filp, char __user *buff,
size_t length, loff_t *offset)
{
return -EINVAL;
}
static ssize_t bnxt_lfc_write(struct file *filp, const char __user *ubuff,
size_t len, loff_t *offset)
{
struct bnxt_lfc_generic_msg kbuff;
if (len != sizeof(kbuff)) {
BNXT_LFC_ERR(NULL, "Invalid length provided (%zu)\n", len);
return -EINVAL;
}
if (copy_from_user(&kbuff, (void __user *)ubuff, len)) {
BNXT_LFC_ERR(NULL, "Failed to copy data from user application\n");
return -EFAULT;
}
switch (kbuff.key) {
case BNXT_LFC_KEY_DOMAIN_NO:
is_domain_available = true;
domain_no = kbuff.value;
break;
default:
BNXT_LFC_ERR(NULL, "Invalid Key provided (%u)\n", kbuff.key);
return -EINVAL;
}
return len;
}
static loff_t bnxt_lfc_seek(struct file *filp, loff_t offset, int32_t whence)
{
return -EINVAL;
}
static long bnxt_lfc_ioctl(struct file *flip, unsigned int cmd,
unsigned long args)
{
int32_t rc;
struct bnxt_lfc_req lfc_req;
u32 index;
struct bnxt_lfc_dev *blfc_dev = NULL;
rc = copy_from_user(&lfc_req, (void __user *)args, sizeof(lfc_req));
if (rc) {
BNXT_LFC_ERR(NULL,
"Failed to send %d bytes from the user\n", rc);
return -EINVAL;
}
switch (cmd) {
case BNXT_LFC_REQ:
BNXT_LFC_DEBUG(NULL, "BNXT_LFC_REQ called");
mutex_lock(&blfc_global_dev.bnxt_lfc_lock);
index = bnxt_lfc_get_hash_key(lfc_req.hdr.bus, lfc_req.hdr.devfn);
if (blfc_array[index].taken) {
if (lfc_req.hdr.devfn != blfc_array[index].bnxt_lfc_dev->devfn ||
lfc_req.hdr.bus != blfc_array[index].bnxt_lfc_dev->bus ||
domain_no != blfc_array[index].bnxt_lfc_dev->domain) {
/* we have a false hit. Free the older blfc device
store the new one */
rtnl_lock();
dev_put(blfc_array[index].bnxt_lfc_dev->ndev);
kfree(blfc_array[index].bnxt_lfc_dev);
blfc_array[index].bnxt_lfc_dev = NULL;
blfc_array[index].taken = 0;
rtnl_unlock();
goto not_taken;
}
blfc_dev = blfc_array[index].bnxt_lfc_dev;
}
else {
not_taken:
blfc_dev = kzalloc(sizeof(struct bnxt_lfc_dev), GFP_KERNEL);
if (!blfc_dev) {
mutex_unlock(&blfc_global_dev.bnxt_lfc_lock);
return -EINVAL;
}
blfc_dev->pdev =
pci_get_domain_bus_and_slot(
((is_domain_available == true) ?
domain_no : 0), lfc_req.hdr.bus,
lfc_req.hdr.devfn);
if (bnxt_lfc_is_valid_pdev(blfc_dev->pdev) != true) {
mutex_unlock(&blfc_global_dev.bnxt_lfc_lock);
kfree(blfc_dev);
return -EINVAL;
}
rtnl_lock();
blfc_dev->ndev = pci_get_drvdata(blfc_dev->pdev);
if (!blfc_dev->ndev) {
printk("Driver with provided BDF doesn't exist\n");
pci_dev_put(blfc_dev->pdev);
rtnl_unlock();
mutex_unlock(&blfc_global_dev.bnxt_lfc_lock);
kfree(blfc_dev);
return -EINVAL;
}
dev_hold(blfc_dev->ndev);
rtnl_unlock();
if (try_module_get(blfc_dev->pdev->driver->driver.owner)) {
blfc_dev->bp = netdev_priv(blfc_dev->ndev);
if (!blfc_dev->bp)
rc = -EINVAL;
module_put(blfc_dev->pdev->driver->driver.owner);
} else {
rc = -EINVAL;
}
pci_dev_put(blfc_dev->pdev);
if (rc) {
dev_put(blfc_dev->ndev);
kfree(blfc_dev);
is_domain_available = false;
mutex_unlock(&blfc_global_dev.bnxt_lfc_lock);
return -EINVAL;
}
blfc_dev->bus = lfc_req.hdr.bus;
blfc_dev->devfn = lfc_req.hdr.devfn;
blfc_dev->domain = domain_no;
rtnl_lock();
blfc_array[index].bnxt_lfc_dev = blfc_dev;
blfc_array[index].taken = 1;
rtnl_unlock();
}
rc = bnxt_lfc_process_req(blfc_dev, &lfc_req);
mutex_unlock(&blfc_global_dev.bnxt_lfc_lock);
break;
default:
BNXT_LFC_ERR(NULL, "No Valid IOCTL found\n");
return -EINVAL;
}
return rc;
}
static int32_t bnxt_lfc_release(struct inode *inode, struct file *filp)
{
BNXT_LFC_DEBUG(NULL, "release is called");
return 0;
}
int32_t __init bnxt_lfc_init(void)
{
int32_t rc;
rc = alloc_chrdev_region(&blfc_global_dev.d_dev, 0, 1, BNXT_LFC_DEV_NAME);
if (rc < 0) {
BNXT_LFC_ERR(NULL, "Allocation of char dev region is failed\n");
return rc;
}
blfc_global_dev.d_class = class_create(THIS_MODULE, BNXT_LFC_DEV_NAME);
if (IS_ERR(blfc_global_dev.d_class)) {
BNXT_LFC_ERR(NULL, "Class creation is failed\n");
unregister_chrdev_region(blfc_global_dev.d_dev, 1);
return -1;
}
if (IS_ERR(device_create(blfc_global_dev.d_class, NULL, blfc_global_dev.d_dev, NULL,
BNXT_LFC_DEV_NAME))) {
BNXT_LFC_ERR(NULL, "Device creation is failed\n");
class_destroy(blfc_global_dev.d_class);
unregister_chrdev_region(blfc_global_dev.d_dev, 1);
return -1;
}
blfc_global_dev.fops.owner = THIS_MODULE;
blfc_global_dev.fops.open = bnxt_lfc_open;
blfc_global_dev.fops.read = bnxt_lfc_read;
blfc_global_dev.fops.write = bnxt_lfc_write;
blfc_global_dev.fops.llseek = bnxt_lfc_seek;
blfc_global_dev.fops.unlocked_ioctl = bnxt_lfc_ioctl;
blfc_global_dev.fops.release = bnxt_lfc_release;
cdev_init(&blfc_global_dev.c_dev, &blfc_global_dev.fops);
if (cdev_add(&blfc_global_dev.c_dev, blfc_global_dev.d_dev, 1) == -1) {
BNXT_LFC_ERR(NULL, "Char device addition is failed\n");
device_destroy(blfc_global_dev.d_class, blfc_global_dev.d_dev);
class_destroy(blfc_global_dev.d_class);
unregister_chrdev_region(blfc_global_dev.d_dev, 1);
return -1;
}
mutex_init(&blfc_global_dev.bnxt_lfc_lock);
bnxt_lfc_inited = true;
memset(blfc_array, 0, sizeof(struct bnxt_lfc_dev_array)
* MAX_LFC_CACHED_NET_DEVICES);
rc = bnxt_en_register_netdevice_notifier(&lfc_device_notifier);
if (rc) {
BNXT_LFC_ERR(NULL, "Error on register NETDEV event notifier\n");
return -1;
}
return 0;
}
void bnxt_lfc_exit(void)
{
struct bnxt_lfc_dev *blfc_dev;
u32 i;
if (!bnxt_lfc_inited)
return;
rtnl_lock();
for (i = 0; i < MAX_LFC_CACHED_NET_DEVICES; i++) {
blfc_dev = blfc_array[i].bnxt_lfc_dev;
if (blfc_dev) {
blfc_array[i].bnxt_lfc_dev = NULL;
blfc_array[i].taken = 0;
dev_put(blfc_dev->ndev);
kfree(blfc_dev);
}
}
rtnl_unlock();
bnxt_en_unregister_netdevice_notifier(&lfc_device_notifier);
cdev_del(&blfc_global_dev.c_dev);
device_destroy(blfc_global_dev.d_class, blfc_global_dev.d_dev);
class_destroy(blfc_global_dev.d_class);
unregister_chrdev_region(blfc_global_dev.d_dev, 1);
}
#endif

View File

@ -0,0 +1,98 @@
/* Broadcom NetXtreme-C/E network driver.
*
* Copyright (c) 2017-2018 Broadcom Limited
* Copyright (c) 2018-2020 Broadcom Inc.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation.
*/
#ifndef BNXT_LFC_H
#define BNXT_LFC_H
#ifdef CONFIG_BNXT_LFC
/* Assuming that no HWRM command requires more than 10 DMA address
* as input requests.
*/
#define MAX_NUM_DMA_INDICATIONS 10
#define MAX_DMA_MEM_SIZE 0x10000 /*64K*/
/* To prevent mismatch between bnxtnvm user application and bnxt_lfc
* keeping the max. size as 512.
*/
#define BNXT_LFC_MAX_HWRM_REQ_LENGTH HWRM_MAX_REQ_LEN
#define BNXT_LFC_MAX_HWRM_RESP_LENGTH (512)
#define BNXT_NVM_FLUSH_TIMEOUT ((DFLT_HWRM_CMD_TIMEOUT) * 100)
#define BNXT_LFC_DEV_NAME "bnxt_lfc"
#define DRV_NAME BNXT_LFC_DEV_NAME
#define BNXT_LFC_ERR(dev, fmt, arg...) \
dev_err(dev, "%s: %s:%d: "fmt "\n", \
DRV_NAME, __func__, \
__LINE__, ##arg) \
#define BNXT_LFC_WARN(dev, fmt, arg...) \
dev_warn(dev, "%s: %s:%d: "fmt "\n", \
DRV_NAME, __func__, \
__LINE__, ##arg) \
#define BNXT_LFC_INFO(dev, fmt, arg...) \
dev_info(dev, "%s: %s:%d: "fmt "\n", \
DRV_NAME, __func__, \
__LINE__, ##arg) \
#define BNXT_LFC_DEBUG(dev, fmt, arg...) \
dev_dbg(dev, "%s: %s:%d: "fmt "\n", \
DRV_NAME, __func__, \
__LINE__, ##arg) \
struct bnxt_lfc_dev_array {
u32 taken;
struct bnxt_lfc_dev *bnxt_lfc_dev;
};
struct bnxt_lfc_dev {
struct pci_dev *pdev;
struct net_device *ndev;
struct bnxt *bp;
int domain;
u32 bus;
u32 devfn;
/* dma_virt_addr to hold the virtual address
* of the DMA memory.
*/
void *dma_virt_addr[MAX_NUM_DMA_INDICATIONS];
/* dma_addr to hold the DMA addresses*/
dma_addr_t dma_addr[MAX_NUM_DMA_INDICATIONS];
};
struct bnxt_gloabl_dev {
dev_t d_dev;
struct class *d_class;
struct cdev c_dev;
struct file_operations fops;
struct mutex bnxt_lfc_lock;
};
int32_t bnxt_lfc_init(void);
void bnxt_lfc_exit(void);
#else
static inline int32_t bnxt_lfc_init()
{
}
static inline void bnxt_lfc_exit()
{
}
#endif
#endif /*BNXT_LFC_H*/

View File

@ -0,0 +1,111 @@
/* Broadcom NetXtreme-C/E network driver.
*
* Copyright (c) 2017-2018 Broadcom Limited
* Copyright (c) 2018-2022 Broadcom Inc.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation.
*/
#ifndef BNXT_LFC_IOCTL_H
#define BNXT_LFC_IOCTL_H
#define BNXT_LFC_IOCTL_MAGIC 0x98
#define BNXT_LFC_VER 1
enum bnxt_lfc_req_type {
BNXT_LFC_NVM_GET_VAR_REQ = 1,
BNXT_LFC_NVM_SET_VAR_REQ,
BNXT_LFC_NVM_FLUSH_REQ,
BNXT_LFC_GENERIC_HWRM_REQ,
};
struct bnxt_lfc_req_hdr {
uint32_t ver;
uint32_t bus;
uint32_t devfn;
enum bnxt_lfc_req_type req_type;
};
struct bnxt_lfc_nvm_get_var_req {
uint16_t option_num;
uint16_t dimensions;
uint16_t index_0;
uint16_t index_1;
uint16_t index_2;
uint16_t index_3;
uint16_t len_in_bits;
uint8_t __user *out_val;
};
struct bnxt_lfc_nvm_set_var_req {
uint16_t option_num;
uint16_t dimensions;
uint16_t index_0;
uint16_t index_1;
uint16_t index_2;
uint16_t index_3;
uint16_t len_in_bits;
uint8_t __user *in_val;
};
struct dma_info {
__u64 data;
/* Based on read_or_write parameter
* LFC will either fill or read the
* data to or from the user memory
*/
__u32 length;
/* Length of the data for read/write */
__u16 offset;
/* Offset at which HWRM input structure needs DMA address*/
__u8 read_or_write;
/* It should be 0 for write and 1 for read */
__u8 unused;
};
struct blfc_fw_msg {
__u64 usr_req;
/* HWRM input structure */
__u64 usr_resp;
/* HWRM output structure */
__u32 len_req;
/* HWRM input structure length*/
__u32 len_resp;
/* HWRM output structure length*/
__u32 timeout;
/* HWRM command timeout. If 0 then
* LFC will provide default timeout
*/
__u32 num_dma_indications;
/* Number of DMA addresses used in HWRM command */
#ifdef DECLARE_FLEX_ARRAY
DECLARE_FLEX_ARRAY(struct dma_info, dma);
#else
struct dma_info dma[0];
#endif
/* User should allocate it with
* (sizeof(struct dma_info) * num_dma_indications)
*/
};
struct bnxt_lfc_generic_msg {
__u8 key;
#define BNXT_LFC_KEY_DOMAIN_NO 1
__u8 reserved[3];
__u32 value;
};
struct bnxt_lfc_req {
struct bnxt_lfc_req_hdr hdr;
union {
struct bnxt_lfc_nvm_get_var_req nvm_get_var_req;
struct bnxt_lfc_nvm_set_var_req nvm_set_var_req;
__u64 hreq; /* Pointer to "struct blfc_fw_msg" */
} req;
};
#define BNXT_LFC_REQ _IOW(BNXT_LFC_IOCTL_MAGIC, 1, struct bnxt_lfc_req)
#endif /*BNXT_LFC_IOCTL_H*/

View File

@ -0,0 +1,570 @@
/* Broadcom NetXtreme-C/E network driver.
*
* Copyright (c) 2023 Broadcom Inc.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation.
*/
#include <linux/vmalloc.h>
#include <linux/errno.h>
#include "bnxt_compat.h"
#include "bnxt.h"
#include "bnxt_log.h"
#include "bnxt_coredump.h"
#define BNXT_LOG_MSG_SIZE 256
#define BNXT_LOG_NUM_BUFFERS(x) ((x) / BNXT_LOG_MSG_SIZE)
int l2_ring_contents_seg_list[] = {
BNXT_SEGMENT_L2_RING_CONTENT
};
/* Below list of segment creation will be
* attempted for L2 logger
*/
int l2_seg_list[] = {
BNXT_SEGMENT_L2
};
/* Below list of segment creation will be
* attempted for L2 CTX MEM logger
*/
int l2_ctx_mem_seg_list[] = {
BNXT_SEGMENT_CTX_MEM_QP,
BNXT_SEGMENT_CTX_MEM_SRQ,
BNXT_SEGMENT_CTX_MEM_CQ,
BNXT_SEGMENT_CTX_MEM_VNIC,
BNXT_SEGMENT_CTX_MEM_STAT,
BNXT_SEGMENT_CTX_MEM_SP_TQM_RING,
BNXT_SEGMENT_CTX_MEM_FP_TQM_RING,
BNXT_SEGMENT_CTX_MEM_MRAV,
BNXT_SEGMENT_CTX_MEM_TIM,
BNXT_SEGMENT_CTX_MEM_TX_CK,
BNXT_SEGMENT_CTX_MEM_RX_CK,
BNXT_SEGMENT_CTX_MEM_MP_TQM_RING,
BNXT_SEGMENT_CTX_MEM_SQ_DB_SHADOW,
BNXT_SEGMENT_CTX_MEM_RQ_DB_SHADOW,
BNXT_SEGMENT_CTX_MEM_SRQ_DB_SHADOW,
BNXT_SEGMENT_CTX_MEM_CQ_DB_SHADOW
};
/* Below list of segment creation will be
* attempted for RoCE logger
*/
int roce_seg_list[] = {
BNXT_SEGMENT_QP_CTX,
BNXT_SEGMENT_CQ_CTX,
BNXT_SEGMENT_MR_CTX,
BNXT_SEGMENT_SRQ_CTX,
/* Try to fit fixed sized segment first.*/
BNXT_SEGMENT_ROCE
};
struct bnxt_logger {
struct list_head list;
u16 logger_id;
u32 buffer_size;
u16 head;
u16 tail;
bool valid;
void *msgs;
u32 live_max_size;
void *live_msgs;
u32 max_live_buff_size;
u32 live_msgs_len;
void (*log_live_op)(void *dev, u32 seg_id);
u32 total_segs;
int *seg_list;
};
int bnxt_register_logger(struct bnxt *bp, u16 logger_id, u32 num_buffs,
void (*log_live)(void *, u32), u32 live_max_size)
{
struct bnxt_logger *logger;
void *data;
if (logger_id == BNXT_LOGGER_L2_CTX_MEM ||
logger_id == BNXT_LOGGER_L2_RING_CONTENTS)
goto register_logger;
if (!log_live || !live_max_size)
return -EINVAL;
if (!is_power_of_2(num_buffs))
return -EINVAL;
register_logger:
logger = kzalloc(sizeof(*logger), GFP_KERNEL);
if (!logger)
return -ENOMEM;
logger->logger_id = logger_id;
logger->buffer_size = num_buffs * BNXT_LOG_MSG_SIZE;
logger->log_live_op = log_live;
logger->max_live_buff_size = live_max_size;
switch (logger_id) {
case BNXT_LOGGER_L2:
logger->total_segs = sizeof(l2_seg_list) / sizeof(int);
logger->seg_list = &l2_seg_list[0];
break;
case BNXT_LOGGER_ROCE:
logger->total_segs = sizeof(roce_seg_list) / sizeof(int);
logger->seg_list = &roce_seg_list[0];
break;
case BNXT_LOGGER_L2_CTX_MEM:
logger->total_segs = sizeof(l2_ctx_mem_seg_list) / sizeof(int);
logger->seg_list = &l2_ctx_mem_seg_list[0];
break;
case BNXT_LOGGER_L2_RING_CONTENTS:
logger->total_segs = sizeof(l2_ring_contents_seg_list) / sizeof(int);
logger->seg_list = &l2_ring_contents_seg_list[0];
break;
default:
logger->total_segs = 1;
break;
}
if (logger->buffer_size) {
data = vmalloc(logger->buffer_size);
if (!data) {
kfree(logger);
return -ENOMEM;
}
logger->msgs = data;
}
INIT_LIST_HEAD(&logger->list);
mutex_lock(&bp->log_lock);
list_add_tail(&logger->list, &bp->loggers_list);
mutex_unlock(&bp->log_lock);
return 0;
}
void bnxt_unregister_logger(struct bnxt *bp, u16 logger_id)
{
struct bnxt_logger *l = NULL, *tmp;
mutex_lock(&bp->log_lock);
list_for_each_entry_safe(l, tmp, &bp->loggers_list, list) {
if (l->logger_id == logger_id) {
list_del(&l->list);
break;
}
}
mutex_unlock(&bp->log_lock);
if (!l || l->logger_id != logger_id) {
netdev_err(bp->dev, "logger id %d not registered\n", logger_id);
return;
}
vfree(l->msgs);
kfree(l);
}
int bnxt_log_ring_contents(struct bnxt *bp)
{
struct list_head *list_head, *pos, *lg;
struct bnxt_logger *logger = NULL;
size_t size = 0;
u32 offset = 0;
u8 *data;
int i, len;
mutex_lock(&bp->log_lock);
list_head = &bp->loggers_list;
list_for_each_safe(pos, lg, list_head) {
logger = list_entry(pos, struct bnxt_logger, list);
if (logger->logger_id == BNXT_LOGGER_L2_RING_CONTENTS)
break;
}
if (!logger || logger->logger_id != BNXT_LOGGER_L2_RING_CONTENTS) {
mutex_unlock(&bp->log_lock);
return -EINVAL;
}
/* Include 2 extra u16 size bytes to store ring's producer & consumer index */
size = bp->tx_nr_rings * (2 * sizeof(u16) + (bp->tx_nr_pages * HW_TXBD_RING_SIZE));
if (!logger->msgs || logger->buffer_size < size) {
if (logger->msgs)
vfree(logger->msgs);
logger->msgs = vmalloc(size);
if (!logger->msgs) {
mutex_unlock(&bp->log_lock);
return -ENOMEM;
}
logger->buffer_size = size;
}
data = logger->msgs;
for (i = 0; i < bp->tx_nr_rings; i++) {
struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
u16 prod_id = RING_TX(bp, txr->tx_prod);
u16 cons_id = RING_TX(bp, txr->tx_cons);
struct bnxt_ring_struct *ring;
ring = &txr->tx_ring_struct;
data[offset++] = prod_id && 0xff;
data[offset++] = (prod_id && 0xff00) >> 8;
data[offset++] = cons_id && 0xff;
data[offset++] = (cons_id && 0xff00) >> 8;
len = bnxt_copy_ring(bp, &ring->ring_mem, data, offset);
offset += len;
}
mutex_unlock(&bp->log_lock);
return 0;
}
static int bnxt_log_info(char *buf, size_t max_len, const char *format, va_list args)
{
static char textbuf[BNXT_LOG_MSG_SIZE];
char *text = textbuf;
size_t text_len;
char *next;
text_len = vscnprintf(text, sizeof(textbuf), format, args);
next = memchr(text, '\n', text_len);
if (next)
text_len = next - text;
else if (text[text_len] == '\0')
text[text_len] = '\n';
if (text_len > max_len) {
/* Truncate */
text_len = max_len;
text[text_len] = '\n';
}
memcpy(buf, text, text_len + 1);
return text_len + 1;
}
void bnxt_log_add_msg(struct bnxt *bp, u16 logger_id, const char *format, ...)
{
struct list_head *list_head, *pos, *lg;
struct bnxt_logger *logger = NULL;
u16 start, tail;
va_list args;
void *buf;
u32 mask;
mutex_lock(&bp->log_lock);
list_head = &bp->loggers_list;
list_for_each_safe(pos, lg, list_head) {
logger = list_entry(pos, struct bnxt_logger, list);
if (logger->logger_id == logger_id)
break;
}
if (!logger) {
mutex_unlock(&bp->log_lock);
return;
}
mask = BNXT_LOG_NUM_BUFFERS(logger->buffer_size) - 1;
tail = logger->tail;
start = logger->head;
if (logger->valid && start == tail)
logger->head = ++start & mask;
buf = logger->msgs + BNXT_LOG_MSG_SIZE * logger->tail;
logger->tail = ++tail & mask;
if (!logger->valid)
logger->valid = true;
va_start(args, format);
bnxt_log_info(buf, BNXT_LOG_MSG_SIZE, format, args);
va_end(args);
mutex_unlock(&bp->log_lock);
}
void bnxt_log_raw(struct bnxt *bp, u16 logger_id, void *data, int len)
{
struct list_head *head, *pos, *lg;
struct bnxt_logger *logger = NULL;
bool match_found = false;
head = &bp->loggers_list;
list_for_each_safe(pos, lg, head) {
logger = list_entry(pos, struct bnxt_logger, list);
if ((logger->logger_id == logger_id) && logger->live_msgs) {
match_found = true;
break;
}
}
if (!match_found)
return;
if ((logger->max_live_buff_size - logger->live_msgs_len) >= len) {
memcpy(logger->live_msgs, data, len);
logger->live_msgs_len += len;
logger->live_msgs += len;
}
}
void bnxt_log_live(struct bnxt *bp, u16 logger_id, const char *format, ...)
{
struct list_head *head, *pos, *lg;
struct bnxt_logger *logger = NULL;
va_list args;
int len;
head = &bp->loggers_list;
list_for_each_safe(pos, lg, head) {
logger = list_entry(pos, struct bnxt_logger, list);
if (logger->logger_id == logger_id)
break;
}
if (!logger || !logger->live_msgs || (logger->live_msgs_len >= logger->max_live_buff_size))
return;
va_start(args, format);
len = bnxt_log_info(logger->live_msgs + logger->live_msgs_len,
logger->max_live_buff_size - logger->live_msgs_len,
format, args);
va_end(args);
logger->live_msgs_len += len;
}
static size_t bnxt_get_data_len(char *buf)
{
size_t count = 0;
while (*buf++ != '\n')
count++;
return count + 1;
}
static size_t bnxt_collect_logs_buffer(struct bnxt_logger *logger, char *dest)
{
u32 mask = BNXT_LOG_NUM_BUFFERS(logger->buffer_size) - 1;
u16 head = logger->head;
u16 tail = logger->tail;
size_t total_len = 0;
int count;
if (!logger->valid)
return 0;
count = (tail > head) ? (tail - head) : (tail - head + mask + 1);
while (count--) {
void *src = logger->msgs + BNXT_LOG_MSG_SIZE * (head & mask);
size_t len;
len = bnxt_get_data_len(src);
memcpy(dest + total_len, src, len);
total_len += len;
head++;
}
return total_len;
}
static int bnxt_get_ctx_mem_length(struct bnxt *bp, u32 total_segments)
{
u32 seg_hdr_len = sizeof(struct bnxt_coredump_segment_hdr);
struct bnxt_ctx_mem_info *ctx = bp->ctx;
size_t seg_len;
size_t length = 0;
int i;
if (!ctx)
return 0;
for (i = 0; i < total_segments; i++) {
int type = l2_ctx_mem_seg_list[i] - BNXT_LOG_CTX_MEM_SEG_ID_START;
struct bnxt_ctx_mem_type *ctxm;
ctxm = &ctx->ctx_arr[type];
if (!ctxm)
continue;
seg_len = bnxt_copy_ctx_mem(bp, ctxm, NULL, 0);
length += (seg_hdr_len + seg_len);
}
return length;
}
size_t bnxt_get_loggers_coredump_size(struct bnxt *bp, u16 dump_type)
{
struct list_head *head, *pos, *lg;
struct bnxt_logger *logger;
size_t len = 0;
mutex_lock(&bp->log_lock);
head = &bp->loggers_list;
list_for_each_safe(pos, lg, head) {
logger = list_entry(pos, struct bnxt_logger, list);
if (logger->logger_id == BNXT_LOGGER_L2_CTX_MEM) {
if (dump_type != BNXT_DUMP_DRIVER_WITH_CTX_MEM)
continue;
len += bnxt_get_ctx_mem_length(bp, logger->total_segs);
continue;
}
len += sizeof(struct bnxt_coredump_segment_hdr) +
logger->max_live_buff_size + logger->buffer_size;
}
mutex_unlock(&bp->log_lock);
return len;
}
void bnxt_start_logging_coredump(struct bnxt *bp, char *dest_buf, u32 *dump_len, u16 dump_type)
{
u32 null_seg_len, requested_buf_len, total_segs_per_logger;
u32 ver_get_resp_len = sizeof(struct hwrm_ver_get_output);
u32 offset, seg_hdr_len, total_seg_count;
struct bnxt_coredump_segment_hdr seg_hdr;
u32 prev_live_msgs_len, seg_id_in_hdr;
struct list_head *head, *pos, *lg;
struct bnxt_time start_time;
struct bnxt_logger *logger;
void *seg_hdr_dest;
s16 start_utc;
size_t seg_len;
int i;
seg_hdr_len = sizeof(seg_hdr);
total_seg_count = 0;
offset = 0;
requested_buf_len = *dump_len;
start_time = bnxt_get_current_time(bp);
start_utc = sys_tz.tz_minuteswest;
mutex_lock(&bp->log_lock);
/* First segment should be hwrm_ver_get response.
* For hwrm_ver_get response Component id = 2 and Segment id = 0
*/
bnxt_fill_coredump_seg_hdr(bp, &seg_hdr, NULL, ver_get_resp_len,
0, 0, 0, 2, 0);
memcpy(dest_buf + offset, &seg_hdr, seg_hdr_len);
offset += seg_hdr_len;
memcpy(dest_buf + offset, &bp->ver_resp, ver_get_resp_len);
offset += ver_get_resp_len;
*dump_len = seg_hdr_len + ver_get_resp_len;
head = &bp->loggers_list;
list_for_each_safe(pos, lg, head) {
seg_hdr_dest = NULL;
seg_len = 0;
logger = list_entry(pos, struct bnxt_logger, list);
total_segs_per_logger = logger->total_segs;
logger->live_msgs_len = 0;
prev_live_msgs_len = 0;
if (logger->logger_id == BNXT_LOGGER_L2_CTX_MEM) {
if (dump_type != BNXT_DUMP_DRIVER_WITH_CTX_MEM || !bp->ctx)
continue;
}
netdev_dbg(bp->dev, "logger id %d -> total seg %d\n",
logger->logger_id, total_segs_per_logger);
for (i = 0; i < total_segs_per_logger; i++) {
seg_hdr_dest = dest_buf + offset;
offset += seg_hdr_len;
seg_len = 0;
if (logger->logger_id == BNXT_LOGGER_L2_CTX_MEM) {
struct bnxt_ctx_mem_info *ctx = bp->ctx;
struct bnxt_ctx_mem_type *ctxm;
u16 type;
type = l2_ctx_mem_seg_list[i] - BNXT_LOG_CTX_MEM_SEG_ID_START;
ctxm = &ctx->ctx_arr[type];
seg_len = bnxt_copy_ctx_mem(bp, ctxm, dest_buf, offset);
offset += seg_len;
seg_id_in_hdr = logger->seg_list ?
logger->seg_list[i] : total_seg_count;
} else if (logger->logger_id == BNXT_LOGGER_L2_RING_CONTENTS) {
if (logger->msgs) {
memcpy(dest_buf + offset, logger->msgs,
logger->buffer_size);
seg_len = logger->buffer_size;
offset += seg_len;
}
seg_id_in_hdr = logger->seg_list ?
logger->seg_list[i] : total_seg_count;
} else {
/* First collect logs from buffer */
seg_len = bnxt_collect_logs_buffer(logger, dest_buf + offset);
offset += seg_len;
/* Let logger to collect live messages */
logger->live_msgs = dest_buf + offset;
prev_live_msgs_len = logger->live_msgs_len;
seg_id_in_hdr = logger->seg_list ?
logger->seg_list[i] : total_seg_count;
logger->log_live_op(bp, logger->seg_list ?
logger->seg_list[i] : total_seg_count);
seg_len += (logger->live_msgs_len - prev_live_msgs_len);
offset += seg_len;
}
bnxt_fill_coredump_seg_hdr(bp, &seg_hdr, NULL, seg_len,
0, 0, 0, 13, 0);
seg_hdr.segment_id = cpu_to_le32(seg_id_in_hdr);
memcpy(seg_hdr_dest, &seg_hdr, sizeof(seg_hdr));
total_seg_count++;
*dump_len += (seg_hdr_len + seg_len);
netdev_dbg(bp->dev, "seg 0x%x seg_len (%d + %d) offset %d len %d\n",
seg_id_in_hdr, seg_hdr_len, (unsigned int)seg_len,
offset, *dump_len);
}
}
null_seg_len = BNXT_COREDUMP_BUF_LEN(requested_buf_len) - *dump_len;
offset = *dump_len;
bnxt_fill_empty_seg(bp, dest_buf + offset, null_seg_len);
/* Fix the coredump record at last 1024 bytes */
offset = requested_buf_len - sizeof(struct bnxt_coredump_record);
netdev_dbg(bp->dev, "From %s %d offset %d buf len %d\n",
__func__, __LINE__, offset, requested_buf_len);
bnxt_fill_coredump_record(bp, (void *)dest_buf + offset,
start_time, start_utc,
total_seg_count + 2, 0);
*dump_len = *dump_len + null_seg_len +
sizeof(struct bnxt_coredump_record) +
sizeof(struct bnxt_coredump_segment_hdr);
mutex_unlock(&bp->log_lock);
}
void bnxt_reset_loggers(struct bnxt *bp)
{
struct list_head *head, *pos, *lg;
struct bnxt_logger *logger;
mutex_lock(&bp->log_lock);
head = &bp->loggers_list;
list_for_each_safe(pos, lg, head) {
logger = list_entry(pos, struct bnxt_logger, list);
logger->head = 0;
logger->tail = 0;
logger->valid = false;
}
mutex_unlock(&bp->log_lock);
}

View File

@ -0,0 +1,55 @@
/* Broadcom NetXtreme-C/E network driver.
*
* Copyright (c) 2023 Broadcom Inc.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation.
*/
#ifndef BNXT_LOG_H
#define BNXT_LOG_H
#define BNXT_LOGGER_L2 1
#define BNXT_LOGGER_ROCE 2
#define BNXT_LOGGER_L2_CTX_MEM 3
#define BNXT_LOGGER_L2_RING_CONTENTS 4
#define BNXT_SEGMENT_L2 0
#define BNXT_SEGMENT_ROCE 255
#define BNXT_SEGMENT_QP_CTX 256
#define BNXT_SEGMENT_SRQ_CTX 257
#define BNXT_SEGMENT_CQ_CTX 258
#define BNXT_SEGMENT_MR_CTX 270
#define BNXT_LOG_CTX_MEM_SEG_ID_START 0x100
#define BNXT_SEGMENT_L2_RING_CONTENT 0x200
#define BNXT_SEGMENT_CTX_MEM_QP (BNXT_LOG_CTX_MEM_SEG_ID_START + BNXT_CTX_QP)
#define BNXT_SEGMENT_CTX_MEM_SRQ (BNXT_LOG_CTX_MEM_SEG_ID_START + BNXT_CTX_SRQ)
#define BNXT_SEGMENT_CTX_MEM_CQ (BNXT_LOG_CTX_MEM_SEG_ID_START + BNXT_CTX_CQ)
#define BNXT_SEGMENT_CTX_MEM_VNIC (BNXT_LOG_CTX_MEM_SEG_ID_START + BNXT_CTX_VNIC)
#define BNXT_SEGMENT_CTX_MEM_STAT (BNXT_LOG_CTX_MEM_SEG_ID_START + BNXT_CTX_STAT)
#define BNXT_SEGMENT_CTX_MEM_SP_TQM_RING (BNXT_LOG_CTX_MEM_SEG_ID_START + BNXT_CTX_STQM)
#define BNXT_SEGMENT_CTX_MEM_FP_TQM_RING (BNXT_LOG_CTX_MEM_SEG_ID_START + BNXT_CTX_FTQM)
#define BNXT_SEGMENT_CTX_MEM_MRAV (BNXT_LOG_CTX_MEM_SEG_ID_START + BNXT_CTX_MRAV)
#define BNXT_SEGMENT_CTX_MEM_TIM (BNXT_LOG_CTX_MEM_SEG_ID_START + BNXT_CTX_TIM)
#define BNXT_SEGMENT_CTX_MEM_TX_CK (BNXT_LOG_CTX_MEM_SEG_ID_START + BNXT_CTX_TCK)
#define BNXT_SEGMENT_CTX_MEM_RX_CK (BNXT_LOG_CTX_MEM_SEG_ID_START + BNXT_CTX_RCK)
#define BNXT_SEGMENT_CTX_MEM_MP_TQM_RING (BNXT_LOG_CTX_MEM_SEG_ID_START + BNXT_CTX_MTQM)
#define BNXT_SEGMENT_CTX_MEM_SQ_DB_SHADOW (BNXT_LOG_CTX_MEM_SEG_ID_START + BNXT_CTX_SQDBS)
#define BNXT_SEGMENT_CTX_MEM_RQ_DB_SHADOW (BNXT_LOG_CTX_MEM_SEG_ID_START + BNXT_CTX_RQDBS)
#define BNXT_SEGMENT_CTX_MEM_SRQ_DB_SHADOW (BNXT_LOG_CTX_MEM_SEG_ID_START + BNXT_CTX_SRQDBS)
#define BNXT_SEGMENT_CTX_MEM_CQ_DB_SHADOW (BNXT_LOG_CTX_MEM_SEG_ID_START + BNXT_CTX_CQDBS)
int bnxt_register_logger(struct bnxt *bp, u16 logger_id, u32 num_buffers,
void (*log_live)(void *, u32), u32 live_size);
void bnxt_unregister_logger(struct bnxt *bp, u16 logger_id);
void bnxt_log_add_msg(struct bnxt *bp, u16 logger_id, const char *format, ...);
void bnxt_log_live(struct bnxt *bp, u16 logger_id, const char *format, ...);
void bnxt_log_raw(struct bnxt *bp, u16 logger_id, void *data, int len);
void bnxt_reset_loggers(struct bnxt *bp);
size_t bnxt_get_loggers_coredump_size(struct bnxt *bp, u16 dump_type);
void bnxt_start_logging_coredump(struct bnxt *bp, char *dest_buf, u32 *dump_len, u16 dump_type);
int bnxt_log_ring_contents(struct bnxt *bp);
#endif

View File

@ -0,0 +1,84 @@
/* Broadcom NetXtreme-C/E network driver.
*
* Copyright (c) 2023 Broadcom Inc.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation.
*/
#include "bnxt_compat.h"
#include "bnxt.h"
#include "bnxt_coredump.h"
#include "bnxt_log.h"
#include "bnxt_log_data.h"
static void bnxt_log_drv_version(struct bnxt *bp)
{
bnxt_log_live(bp, BNXT_LOGGER_L2, "\n");
bnxt_log_live(bp, BNXT_LOGGER_L2, "Interface: %s driver version: %s\n",
bp->dev->name, DRV_MODULE_VERSION);
}
static void bnxt_log_tx_sw_state(struct bnxt_napi *bnapi)
{
struct bnxt_tx_ring_info *txr;
struct bnxt *bp = bnapi->bp;
int i = bnapi->index, j;
bnxt_for_each_napi_tx(j, bnapi, txr)
bnxt_log_live(bp, BNXT_LOGGER_L2, "[%d.%d]: tx{fw_ring: %d prod: %x cons: %x}\n",
i, j, txr->tx_ring_struct.fw_ring_id, txr->tx_prod,
txr->tx_cons);
}
static void bnxt_log_rx_sw_state(struct bnxt_napi *bnapi)
{
struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
struct bnxt *bp = bnapi->bp;
int i = bnapi->index;
if (!rxr)
return;
bnxt_log_live(bp, BNXT_LOGGER_L2, "[%d]: rx{fw_ring: %d prod: %x} rx_agg{fw_ring: %d agg_prod: %x sw_agg_prod: %x}\n",
i, rxr->rx_ring_struct.fw_ring_id, rxr->rx_prod,
rxr->rx_agg_ring_struct.fw_ring_id, rxr->rx_agg_prod,
rxr->rx_sw_agg_prod);
}
static void bnxt_log_cp_sw_state(struct bnxt_napi *bnapi)
{
struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring, *cpr2;
struct bnxt *bp = bnapi->bp;
int i = bnapi->index, j;
bnxt_log_live(bp, BNXT_LOGGER_L2, "[%d]: cp{fw_ring: %d raw_cons: %x}\n",
i, cpr->cp_ring_struct.fw_ring_id, cpr->cp_raw_cons);
for (j = 0; j < cpr->cp_ring_count; j++) {
cpr2 = &cpr->cp_ring_arr[j];
if (!cpr2->bnapi)
continue;
bnxt_log_live(bp, BNXT_LOGGER_L2, "[%d.%d]: cp{fw_ring: %d raw_cons: %x}\n",
i, j, cpr2->cp_ring_struct.fw_ring_id, cpr2->cp_raw_cons);
}
}
void bnxt_log_ring_states(struct bnxt *bp)
{
struct bnxt_napi *bnapi;
int i;
bnxt_log_drv_version(bp);
if (!netif_running(bp->dev))
return;
for (i = 0; i < bp->cp_nr_rings; i++) {
bnapi = bp->bnapi[i];
bnxt_log_tx_sw_state(bnapi);
bnxt_log_rx_sw_state(bnapi);
bnxt_log_cp_sw_state(bnapi);
}
}

View File

@ -0,0 +1,17 @@
/* Broadcom NetXtreme-C/E network driver.
*
* Copyright (c) 2023 Broadcom Inc.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation.
*/
#ifndef BNXT_LOG_DATA_H
#define BNXT_LOG_DATA_H
#define BNXT_L2_MAX_LOG_BUFFERS 1024
#define BNXT_L2_MAX_LIVE_LOG_SIZE (4 << 20)
void bnxt_log_ring_states(struct bnxt *bp);
#endif

View File

@ -0,0 +1,543 @@
/* Broadcom NetXtreme-C/E network driver.
*
* Copyright (c) 2022-2023 Broadcom Inc.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation.
*/
#include <linux/stddef.h>
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/slab.h>
#include "bnxt_compat.h"
#include "bnxt_hsi.h"
#include "bnxt.h"
#include "bnxt_mpc.h"
#include "bnxt_ktls.h"
#include "bnxt_tfc.h"
void bnxt_alloc_mpc_info(struct bnxt *bp, u8 mpc_chnls_cap)
{
if (mpc_chnls_cap) {
if (!bp->mpc_info)
bp->mpc_info = kzalloc(sizeof(*bp->mpc_info),
GFP_KERNEL);
} else {
bnxt_free_mpc_info(bp);
}
if (bp->mpc_info)
bp->mpc_info->mpc_chnls_cap = mpc_chnls_cap;
}
void bnxt_free_mpc_info(struct bnxt *bp)
{
kfree(bp->mpc_info);
bp->mpc_info = NULL;
}
int bnxt_mpc_tx_rings_in_use(struct bnxt *bp)
{
struct bnxt_mpc_info *mpc = bp->mpc_info;
int i, mpc_tx = 0;
if (!mpc)
return 0;
for (i = 0; i < BNXT_MPC_TYPE_MAX; i++)
mpc_tx += mpc->mpc_ring_count[i];
return mpc_tx;
}
int bnxt_mpc_cp_rings_in_use(struct bnxt *bp)
{
struct bnxt_mpc_info *mpc = bp->mpc_info;
if (!mpc)
return 0;
return mpc->mpc_cp_rings;
}
bool bnxt_napi_has_mpc(struct bnxt *bp, int i)
{
struct bnxt_mpc_info *mpc = bp->mpc_info;
struct bnxt_napi *bnapi = bp->bnapi[i];
struct bnxt_tx_ring_info *txr;
if (!mpc)
return false;
txr = bnapi->tx_ring[0];
if (txr && !(bnapi->flags & BNXT_NAPI_FLAG_XDP))
return txr->txq_index < mpc->mpc_cp_rings;
return false;
}
void bnxt_set_mpc_cp_ring(struct bnxt *bp, int bnapi_idx,
struct bnxt_cp_ring_info *cpr)
{
struct bnxt_mpc_info *mpc = bp->mpc_info;
struct bnxt_napi *bnapi;
int i, j;
bnapi = bp->bnapi[bnapi_idx];
for (i = 0; i < BNXT_MPC_TYPE_MAX; i++) {
int num = mpc->mpc_ring_count[i];
for (j = 0; j < num; j++) {
struct bnxt_tx_ring_info *txr = &mpc->mpc_rings[i][j];
if (txr->bnapi == bnapi) {
txr->tx_cpr = cpr;
txr->tx_napi_idx = i;
bnapi->tx_mpc_ring[i] = txr;
break;
}
}
}
cpr->cp_ring_type = BNXT_NQ_HDL_TYPE_MP;
}
void bnxt_trim_mpc_rings(struct bnxt *bp)
{
struct bnxt_mpc_info *mpc = bp->mpc_info;
int max = bp->tx_nr_rings_per_tc;
u8 max_cp = 0;
int i;
if (!mpc)
return;
for (i = 0; i < BNXT_MPC_TYPE_MAX; i++) {
mpc->mpc_ring_count[i] = min_t(u8, mpc->mpc_ring_count[i], max);
max_cp = max(max_cp, mpc->mpc_ring_count[i]);
}
mpc->mpc_cp_rings = max_cp;
}
enum bnxt_mpc_type {
BNXT_MPC_CRYPTO,
BNXT_MPC_CFA,
};
static void __bnxt_set_dflt_mpc_rings(struct bnxt *bp, enum bnxt_mpc_type type,
int *avail, int avail_cp)
{
struct bnxt_mpc_info *mpc = bp->mpc_info;
int dflt1, dflt2;
int idx1, idx2;
int min1, min2;
int val1, val2;
if (type == BNXT_MPC_CRYPTO) {
min1 = BNXT_MIN_MPC_TCE;
min2 = BNXT_MIN_MPC_RCE;
dflt1 = BNXT_DFLT_MPC_TCE;
dflt2 = BNXT_DFLT_MPC_RCE;
idx1 = BNXT_MPC_TCE_TYPE;
idx2 = BNXT_MPC_RCE_TYPE;
} else {
min1 = BNXT_MIN_MPC_TE_CFA;
min2 = BNXT_MIN_MPC_RE_CFA;
dflt1 = BNXT_DFLT_MPC_TE_CFA;
dflt2 = BNXT_DFLT_MPC_RE_CFA;
idx1 = BNXT_MPC_TE_CFA_TYPE;
idx2 = BNXT_MPC_RE_CFA_TYPE;
}
if (*avail < (min1 + min2))
return;
val1 = min_t(int, *avail / 2, bp->tx_nr_rings_per_tc);
val2 = val1;
val1 = min_t(int, val1, dflt1);
val2 = min_t(int, val2, dflt2);
if (avail_cp < min1 || avail_cp < min2)
return;
val1 = min(val1, avail_cp);
val2 = min(val2, avail_cp);
mpc->mpc_ring_count[idx1] = val1;
mpc->mpc_ring_count[idx2] = val2;
*avail = *avail - val1 - val2;
}
void bnxt_set_dflt_mpc_rings(struct bnxt *bp)
{
struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
struct bnxt_mpc_info *mpc = bp->mpc_info;
int avail, mpc_cp, i;
int avail_cp;
if (!mpc)
return;
for (i = 0; i < BNXT_MPC_TYPE_MAX; i++)
mpc->mpc_ring_count[i] = 0;
mpc->mpc_cp_rings = 0;
avail = hw_resc->max_tx_rings - bp->tx_nr_rings;
avail_cp = hw_resc->max_cp_rings - bp->tx_nr_rings -
bp->rx_nr_rings;
if (BNXT_MPC_CRYPTO_CAPABLE(bp))
__bnxt_set_dflt_mpc_rings(bp, BNXT_MPC_CRYPTO, &avail, avail_cp);
if (BNXT_MPC_CFA_CAPABLE(bp))
__bnxt_set_dflt_mpc_rings(bp, BNXT_MPC_CFA, &avail, avail_cp);
for (i = 0, mpc_cp = 0; i < BNXT_MPC_TYPE_MAX; i++) {
if (mpc_cp < mpc->mpc_ring_count[i])
mpc_cp = mpc->mpc_ring_count[i];
}
mpc->mpc_cp_rings = mpc_cp;
}
void bnxt_init_mpc_ring_struct(struct bnxt *bp)
{
struct bnxt_mpc_info *mpc = bp->mpc_info;
int i, j;
if (!BNXT_MPC_CRYPTO_CAPABLE(bp) && !BNXT_MPC_CFA_CAPABLE(bp))
return;
for (i = 0; i < BNXT_MPC_TYPE_MAX; i++) {
int num = mpc->mpc_ring_count[i];
struct bnxt_tx_ring_info *txr;
txr = mpc->mpc_rings[i];
if (!txr)
continue;
for (j = 0; j < num; j++) {
struct bnxt_ring_mem_info *rmem;
struct bnxt_ring_struct *ring;
txr = &mpc->mpc_rings[i][j];
txr->tx_ring_struct.ring_mem.flags =
BNXT_RMEM_RING_PTE_FLAG;
txr->bnapi = bp->tx_ring[bp->tx_ring_map[j]].bnapi;
ring = &txr->tx_ring_struct;
rmem = &ring->ring_mem;
rmem->nr_pages = bp->tx_nr_pages;
rmem->page_size = HW_TXBD_RING_SIZE;
rmem->pg_arr = (void **)txr->tx_desc_ring;
rmem->dma_arr = txr->tx_desc_mapping;
rmem->vmem_size = SW_MPC_TXBD_RING_SIZE *
bp->tx_nr_pages;
rmem->vmem = (void **)&txr->tx_buf_ring;
}
}
}
int bnxt_alloc_mpcs(struct bnxt *bp)
{
struct bnxt_mpc_info *mpc = bp->mpc_info;
int i;
if (!BNXT_MPC_CRYPTO_CAPABLE(bp) && !BNXT_MPC_CFA_CAPABLE(bp))
return 0;
for (i = 0; i < BNXT_MPC_TYPE_MAX; i++) {
int num = mpc->mpc_ring_count[i];
struct bnxt_tx_ring_info *txr;
if (!num)
continue;
txr = kcalloc(num, sizeof(*txr), GFP_KERNEL);
if (!txr)
return -ENOMEM;
mpc->mpc_rings[i] = txr;
}
for (i = 0; i < bp->cp_nr_rings; i++) {
struct bnxt_napi *bnapi = bp->bnapi[i];
if (!bnxt_napi_has_mpc(bp, i))
continue;
bnapi->tx_mpc_ring = kcalloc(BNXT_MPC_TYPE_MAX,
sizeof(*bnapi->tx_mpc_ring),
GFP_KERNEL);
if (!bnapi->tx_mpc_ring)
return -ENOMEM;
}
return 0;
}
void bnxt_free_mpcs(struct bnxt *bp)
{
struct bnxt_mpc_info *mpc = bp->mpc_info;
int i;
if (!mpc)
return;
for (i = 0; i < BNXT_MPC_TYPE_MAX; i++) {
kfree(mpc->mpc_rings[i]);
mpc->mpc_rings[i] = NULL;
}
if (!bp->bnapi)
return;
for (i = 0; i < bp->cp_nr_rings; i++) {
struct bnxt_napi *bnapi = bp->bnapi[i];
kfree(bnapi->tx_mpc_ring);
bnapi->tx_mpc_ring = NULL;
}
}
int bnxt_alloc_mpc_rings(struct bnxt *bp)
{
struct bnxt_mpc_info *mpc = bp->mpc_info;
int i, j;
if (!mpc)
return 0;
for (i = 0; i < BNXT_MPC_TYPE_MAX; i++) {
int num = mpc->mpc_ring_count[i], rc;
for (j = 0; j < num; j++) {
struct bnxt_tx_ring_info *txr = &mpc->mpc_rings[i][j];
struct bnxt_ring_struct *ring;
ring = &txr->tx_ring_struct;
rc = bnxt_alloc_ring(bp, &ring->ring_mem);
if (rc)
return rc;
ring->queue_id = BNXT_MPC_QUEUE_ID;
ring->mpc_chnl_type = i;
/* for stats context */
ring->grp_idx = txr->bnapi->index;
spin_lock_init(&txr->tx_lock);
}
}
return 0;
}
void bnxt_free_mpc_rings(struct bnxt *bp)
{
struct bnxt_mpc_info *mpc = bp->mpc_info;
int i, j;
if (!mpc)
return;
for (i = 0; i < BNXT_MPC_TYPE_MAX; i++) {
int num = mpc->mpc_ring_count[i];
if (!mpc->mpc_rings[i])
continue;
for (j = 0; j < num; j++) {
struct bnxt_tx_ring_info *txr = &mpc->mpc_rings[i][j];
struct bnxt_ring_struct *ring = &txr->tx_ring_struct;
bnxt_free_ring(bp, &ring->ring_mem);
}
}
}
void bnxt_init_mpc_rings(struct bnxt *bp)
{
struct bnxt_mpc_info *mpc = bp->mpc_info;
int i, j;
if (!mpc)
return;
mpc->mpc_tx_start_idx = bp->tx_nr_rings;
for (i = 0; i < BNXT_MPC_TYPE_MAX; i++) {
int num = mpc->mpc_ring_count[i];
for (j = 0; j < num; j++) {
struct bnxt_tx_ring_info *txr = &mpc->mpc_rings[i][j];
struct bnxt_ring_struct *ring = &txr->tx_ring_struct;
txr->tx_prod = 0;
txr->tx_cons = 0;
txr->tx_hw_cons = 0;
ring->fw_ring_id = INVALID_HW_RING_ID;
}
}
}
int bnxt_hwrm_mpc_ring_alloc(struct bnxt *bp)
{
struct bnxt_mpc_info *mpc = bp->mpc_info;
int i, j, rc;
u32 tx_idx;
if (!mpc)
return 0;
tx_idx = mpc->mpc_tx_start_idx;
for (i = 0; i < BNXT_MPC_TYPE_MAX; i++) {
int num = mpc->mpc_ring_count[i];
for (j = 0; j < num; j++) {
struct bnxt_tx_ring_info *txr = &mpc->mpc_rings[i][j];
struct bnxt_cp_ring_info *cpr = txr->tx_cpr;
struct bnxt_ring_struct *ring;
ring = &cpr->cp_ring_struct;
if (ring->fw_ring_id == INVALID_HW_RING_ID) {
rc = bnxt_hwrm_cp_ring_alloc_p5(bp, cpr);
if (rc)
return rc;
}
rc = bnxt_hwrm_tx_ring_alloc(bp, txr, tx_idx++);
if (rc)
return rc;
}
}
return 0;
}
void bnxt_hwrm_mpc_ring_free(struct bnxt *bp, bool close_path)
{
struct bnxt_mpc_info *mpc = bp->mpc_info;
int i, j;
if (!mpc)
return;
for (i = 0; i < BNXT_MPC_TYPE_MAX; i++) {
int num = mpc->mpc_ring_count[i];
if (!mpc->mpc_rings[i])
continue;
for (j = 0; j < num; j++)
bnxt_hwrm_tx_ring_free(bp, &mpc->mpc_rings[i][j],
close_path);
}
}
int bnxt_start_xmit_mpc(struct bnxt *bp, struct bnxt_tx_ring_info *txr,
void *data, uint len, unsigned long handle)
{
u32 bds, total_bds, bd_space, free_size;
struct bnxt_sw_mpc_tx_bd *tx_buf;
struct tx_bd *txbd;
u16 prod;
bds = DIV_ROUND_UP(len, sizeof(*txbd));
total_bds = bds + 1;
free_size = bnxt_tx_avail(bp, txr);
if (free_size < total_bds)
return -EBUSY;
prod = txr->tx_prod;
txbd = &txr->tx_desc_ring[TX_RING(bp, prod)][TX_IDX(prod)];
tx_buf = &txr->tx_mpc_buf_ring[RING_TX(bp, prod)];
tx_buf->handle = handle;
tx_buf->inline_bds = total_bds;
txbd->tx_bd_len_flags_type =
cpu_to_le32((len << TX_BD_LEN_SHIFT) | TX_BD_TYPE_MPC_TX_BD |
(total_bds << TX_BD_FLAGS_BD_CNT_SHIFT));
txbd->tx_bd_opaque = SET_TX_OPAQUE(bp, txr, prod, total_bds);
prod = NEXT_TX(prod);
txbd = &txr->tx_desc_ring[TX_RING(bp, prod)][TX_IDX(prod)];
bd_space = TX_DESC_CNT - TX_IDX(prod);
if (bd_space < bds) {
uint len0 = bd_space * sizeof(*txbd);
memcpy(txbd, data, len0);
prod += bd_space;
txbd = &txr->tx_desc_ring[TX_RING(bp, prod)][TX_IDX(prod)];
bds -= bd_space;
len -= len0;
data += len0;
}
memcpy(txbd, data, len);
prod += bds;
txr->tx_prod = prod;
/* Sync BD data before updating doorbell */
wmb();
bnxt_db_write(bp, &txr->tx_db, prod);
return 0;
}
static bool bnxt_mpc_unsolicit(struct mpc_cmp *mpcmp)
{
u32 client = MPC_CMP_CLIENT_TYPE(mpcmp);
if (client != MPC_CMP_CLIENT_TCE && client != MPC_CMP_CLIENT_RCE &&
client != MPC_CMP_CLIENT_TE_CFA && client != MPC_CMP_CLIENT_RE_CFA)
return false;
return MPC_CMP_UNSOLICIT_SUBTYPE(mpcmp);
}
int bnxt_mpc_cmp(struct bnxt *bp, struct bnxt_cp_ring_info *cpr, u32 *raw_cons)
{
struct bnxt_cmpl_entry cmpl_entry_arr[2];
struct bnxt_napi *bnapi = cpr->bnapi;
u16 cons = RING_CMP(*raw_cons);
struct mpc_cmp *mpcmp, *mpcmp1;
u32 tmp_raw_cons = *raw_cons;
unsigned long handle = 0;
u32 client, cmpl_num;
u8 type;
mpcmp = (struct mpc_cmp *)
&cpr->cp_desc_ring[CP_RING(cons)][CP_IDX(cons)];
type = MPC_CMP_CMP_TYPE(mpcmp);
cmpl_entry_arr[0].cmpl = mpcmp;
cmpl_entry_arr[0].len = sizeof(*mpcmp);
cmpl_num = 1;
if (type == MPC_CMP_TYPE_MID_PATH_LONG) {
tmp_raw_cons = NEXT_RAW_CMP(tmp_raw_cons);
cons = RING_CMP(tmp_raw_cons);
mpcmp1 = (struct mpc_cmp *)
&cpr->cp_desc_ring[CP_RING(cons)][CP_IDX(cons)];
if (!MPC_CMP_VALID(bp, mpcmp1, tmp_raw_cons))
return -EBUSY;
/* The valid test of the entry must be done first before
* reading any further.
*/
dma_rmb();
if (mpcmp1 == mpcmp + 1) {
cmpl_entry_arr[cmpl_num - 1].len += sizeof(*mpcmp1);
} else {
cmpl_entry_arr[cmpl_num].cmpl = mpcmp1;
cmpl_entry_arr[cmpl_num].len = sizeof(*mpcmp1);
cmpl_num++;
}
}
client = MPC_CMP_CLIENT_TYPE(mpcmp) >> MPC_CMP_CLIENT_SFT;
if (!bnxt_mpc_unsolicit(mpcmp)) {
struct bnxt_sw_mpc_tx_bd *mpc_buf;
struct bnxt_tx_ring_info *txr;
u16 tx_cons;
u32 opaque;
opaque = mpcmp->mpc_cmp_opaque;
txr = bnapi->tx_mpc_ring[client];
tx_cons = txr->tx_cons;
if (TX_OPAQUE_RING(opaque) != txr->tx_napi_idx)
netdev_warn(bp->dev, "Wrong opaque %x, expected ring %x, idx %x\n",
opaque, txr->tx_napi_idx, txr->tx_cons);
mpc_buf = &txr->tx_mpc_buf_ring[RING_TX(bp, tx_cons)];
handle = mpc_buf->handle;
tx_cons += mpc_buf->inline_bds;
txr->tx_cons = tx_cons;
txr->tx_hw_cons = RING_TX(bp, tx_cons);
}
if (client == BNXT_MPC_TCE_TYPE || client == BNXT_MPC_RCE_TYPE)
bnxt_ktls_mpc_cmp(bp, client, handle, cmpl_entry_arr, cmpl_num);
else if (client == BNXT_MPC_TE_CFA_TYPE || client == BNXT_MPC_RE_CFA_TYPE)
bnxt_tfc_mpc_cmp(bp, client, handle, cmpl_entry_arr, cmpl_num);
*raw_cons = tmp_raw_cons;
return 0;
}

View File

@ -0,0 +1,143 @@
/* Broadcom NetXtreme-C/E network driver.
*
* Copyright (c) 2022-2023 Broadcom Inc.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation.
*/
#ifndef BNXT_MPC_H
#define BNXT_MPC_H
#define BNXT_MPC_TCE_TYPE RING_ALLOC_REQ_MPC_CHNLS_TYPE_TCE
#define BNXT_MPC_RCE_TYPE RING_ALLOC_REQ_MPC_CHNLS_TYPE_RCE
#define BNXT_MPC_TE_CFA_TYPE RING_ALLOC_REQ_MPC_CHNLS_TYPE_TE_CFA
#define BNXT_MPC_RE_CFA_TYPE RING_ALLOC_REQ_MPC_CHNLS_TYPE_RE_CFA
#define BNXT_MPC_TYPE_MAX (BNXT_MPC_RE_CFA_TYPE + 1)
#define BNXT_MAX_MPC 8
#define BNXT_MIN_MPC_TCE 1
#define BNXT_MIN_MPC_RCE 1
#define BNXT_DFLT_MPC_TCE BNXT_MAX_MPC
#define BNXT_DFLT_MPC_RCE BNXT_MAX_MPC
#define BNXT_MIN_MPC_TE_CFA 1
#define BNXT_MIN_MPC_RE_CFA 1
#define BNXT_DFLT_MPC_TE_CFA BNXT_MAX_MPC
#define BNXT_DFLT_MPC_RE_CFA BNXT_MAX_MPC
/* Defines the number of msgs there are in an MPC msg completion event.
* Used to pass an opaque value into the MPC msg xmit function. The
* completion processing uses this value to ring the doorbell correctly to
* signal "completion event processing complete" to the hardware.
*/
#define BNXT_MPC_COMP_MSG_COUNT 1
#define BNXT_MPC_TMO_MSECS 1000
struct bnxt_mpc_info {
u8 mpc_chnls_cap;
u8 mpc_cp_rings;
u8 mpc_ring_count[BNXT_MPC_TYPE_MAX];
u16 mpc_tx_start_idx;
struct bnxt_tx_ring_info *mpc_rings[BNXT_MPC_TYPE_MAX];
};
enum bnxt_mpc_chnl {
BNXT_MPC_CHNL_TCE = 0,
BNXT_MPC_CHNL_RCE = 1,
BNXT_MPC_CHNL_TE_CFA = 2,
BNXT_MPC_CHNL_RE_CFA = 3,
BNXT_MPC_CHNL_PRIMATE = 4,
BNXT_MPC_CHNL_MAX = 5,
};
struct bnxt_sw_mpc_tx_bd {
u8 inline_bds;
unsigned long handle;
};
#define SW_MPC_TXBD_RING_SIZE (sizeof(struct bnxt_sw_mpc_tx_bd) * TX_DESC_CNT)
struct bnxt_cmpl_entry {
void *cmpl;
u32 len;
};
struct mpc_cmp {
__le32 mpc_cmp_client_subtype_type;
#define MPC_CMP_TYPE (0x3f << 0)
#define MPC_CMP_TYPE_MID_PATH_SHORT 0x1e
#define MPC_CMP_TYPE_MID_PATH_LONG 0x1f
#define MPC_CMP_SUBTYPE 0xf00
#define MPC_CMP_SUBTYPE_SFT 8
#define MPC_CMP_SUBTYPE_SOLICITED (0x0 << 8)
#define MPC_CMP_SUBTYPE_ERR (0x1 << 8)
#define MPC_CMP_SUBTYPE_RESYNC (0x2 << 8)
#define MPC_CMP_CLIENT (0xf << 12)
#define MPC_CMP_CLIENT_SFT 12
#define MPC_CMP_CLIENT_TCE (0x0 << 12)
#define MPC_CMP_CLIENT_RCE (0x1 << 12)
#define MPC_CMP_CLIENT_TE_CFA (0x2 << 12)
#define MPC_CMP_CLIENT_RE_CFA (0x3 << 12)
u32 mpc_cmp_opaque;
__le32 mpc_cmp_v;
#define MPC_CMP_V (1 << 0)
__le32 mpc_cmp_filler;
};
#define MPC_CMP_CMP_TYPE(mpcmp) \
(le32_to_cpu((mpcmp)->mpc_cmp_client_subtype_type) & MPC_CMP_TYPE)
#define MPC_CMP_CLIENT_TYPE(mpcmp) \
(le32_to_cpu((mpcmp)->mpc_cmp_client_subtype_type) & MPC_CMP_CLIENT)
#define MPC_CMP_UNSOLICIT_SUBTYPE(mpcmp) \
((le32_to_cpu((mpcmp)->mpc_cmp_client_subtype_type) & \
MPC_CMP_SUBTYPE) == MPC_CMP_SUBTYPE_ERR)
#define MPC_CMP_VALID(bp, mpcmp, raw_cons) \
(!!((mpcmp)->mpc_cmp_v & cpu_to_le32(MPC_CMP_V)) == \
!((raw_cons) & (bp)->cp_bit))
#define BNXT_MPC_CRYPTO_CAP \
(FUNC_QCAPS_RESP_MPC_CHNLS_CAP_TCE | FUNC_QCAPS_RESP_MPC_CHNLS_CAP_RCE)
#define BNXT_MPC_CRYPTO_CAPABLE(bp) \
((bp)->mpc_info ? \
((bp)->mpc_info->mpc_chnls_cap & BNXT_MPC_CRYPTO_CAP) == \
BNXT_MPC_CRYPTO_CAP : false)
#define BNXT_MPC_CFA_CAP \
(FUNC_QCAPS_RESP_MPC_CHNLS_CAP_TE_CFA | FUNC_QCAPS_RESP_MPC_CHNLS_CAP_RE_CFA)
#define BNXT_MPC_CFA_CAPABLE(bp) \
((bp)->mpc_info ? \
((bp)->mpc_info->mpc_chnls_cap & BNXT_MPC_CFA_CAP) == \
BNXT_MPC_CFA_CAP : false)
void bnxt_alloc_mpc_info(struct bnxt *bp, u8 mpc_chnls_cap);
void bnxt_free_mpc_info(struct bnxt *bp);
int bnxt_mpc_tx_rings_in_use(struct bnxt *bp);
int bnxt_mpc_cp_rings_in_use(struct bnxt *bp);
bool bnxt_napi_has_mpc(struct bnxt *bp, int i);
void bnxt_set_mpc_cp_ring(struct bnxt *bp, int bnapi_idx,
struct bnxt_cp_ring_info *cpr);
void bnxt_trim_mpc_rings(struct bnxt *bp);
void bnxt_set_dflt_mpc_rings(struct bnxt *bp);
void bnxt_init_mpc_ring_struct(struct bnxt *bp);
int bnxt_alloc_mpcs(struct bnxt *bp);
void bnxt_free_mpcs(struct bnxt *bp);
int bnxt_alloc_mpc_rings(struct bnxt *bp);
void bnxt_free_mpc_rings(struct bnxt *bp);
void bnxt_init_mpc_rings(struct bnxt *bp);
int bnxt_hwrm_mpc_ring_alloc(struct bnxt *bp);
void bnxt_hwrm_mpc_ring_free(struct bnxt *bp, bool close_path);
int bnxt_start_xmit_mpc(struct bnxt *bp, struct bnxt_tx_ring_info *txr,
void *data, uint len, unsigned long handle);
int bnxt_mpc_cmp(struct bnxt *bp, struct bnxt_cp_ring_info *cpr, u32 *raw_cons);
#endif

View File

@ -0,0 +1,985 @@
/*
* netmap support for Broadcom bnxt Ethernet driver on Linux
*
* Copyright (C) 2015-2018 British Broadcasting Corporation. All rights reserved.
*
* Author: Stuart Grace, BBC Research & Development
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* Some portions are:
*
* Copyright (C) 2012-2014 Matteo Landi, Luigi Rizzo. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* Some portions are:
*
* Copyright (c) 2018-2023 Broadcom Inc.
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#ifndef __BNXT_NETMAP_LINUX_H__
#define __BNXT_NETMAP_LINUX_H__
#include <bsd_glue.h>
#include <net/netmap.h>
#include <dev/netmap/netmap_kern.h>
#ifdef NETMAP_BNXT_MAIN
#define NM_BNXT_ADAPTER bnxt
/* No: of shadow AGG rings; for now stick to 1 ==> same size as normal ring */
#define AGG_NM_RINGS 1
/*
* Register/unregister. We are already under netmap lock.
* Only called on the first register or the last unregister.
*/
int bnxt_netmap_reg(struct netmap_adapter *na, int onoff)
{
struct ifnet *ifp = na->ifp;
struct NM_BNXT_ADAPTER *bp = netdev_priv(ifp);
int err = 0;
nm_prinf("bnxt switching %s native netmap mode", onoff ? "into" : "out of");
if (netif_running(ifp))
bnxt_close_nic(bp, true, false);
/* enable or disable flags and callbacks in na and ifp */
if (onoff) {
nm_set_native_flags(na);
if (!(bp->flags & BNXT_FLAG_JUMBO)) {
bp->flags &= ~BNXT_FLAG_AGG_RINGS;
bp->flags |= BNXT_FLAG_NO_AGG_RINGS;
if (bp->flags & BNXT_FLAG_LRO) {
bp->dev->hw_features &= ~NETIF_F_LRO;
bp->dev->features &= ~NETIF_F_LRO;
netdev_update_features(bp->dev);
}
}
bp->flags |= BNXT_FLAG_DIM;
} else {
bp->flags |= BNXT_FLAG_AGG_RINGS;
bp->flags &= ~BNXT_FLAG_NO_AGG_RINGS;
if (bp->flags & BNXT_FLAG_LRO) {
bp->dev->hw_features |= NETIF_F_LRO;
bp->dev->features |= NETIF_F_LRO;
netdev_update_features(bp->dev);
}
bp->flags &= ~(BNXT_FLAG_DIM);
nm_clear_native_flags(na);
}
if (netif_running(ifp))
return bnxt_open_nic(bp, true, false);
return err;
}
void bnxt_netmap_txflush(struct bnxt_tx_ring_info *txr)
{
struct bnxt *bp = txr->bnapi->bp;
struct bnxt_cp_ring_info *cpr2;
struct bnxt_db_info *db;
u32 raw_cons, tgl = 0;
struct tx_cmp *txcmp;
u16 cons;
cpr2 = txr->tx_cpr;
raw_cons = cpr2->cp_raw_cons;
while (1) {
u8 cmp_type;
cons = RING_CMP(raw_cons);
txcmp = &cpr2->cp_desc_ring[CP_RING(cons)][CP_IDX(cons)];
if (!TX_CMP_VALID(txcmp, raw_cons))
break;
/* The valid test of the entry must be done first before
* reading any further.
*/
dma_rmb();
cmp_type = TX_CMP_TYPE(txcmp);
if (cmp_type == CMP_TYPE_TX_L2_CMP ||
cmp_type == CMP_TYPE_TX_L2_COAL_CMP) {
u32 opaque = txcmp->tx_cmp_opaque;
if (cmp_type == CMP_TYPE_TX_L2_COAL_CMP)
txr->tx_hw_cons = TX_CMP_SQ_CONS_IDX(txcmp);
else
txr->tx_hw_cons = TX_OPAQUE_IDX(opaque);
raw_cons = NEXT_RAW_CMP(raw_cons);
}
}
if (raw_cons != cpr2->cp_raw_cons) {
tgl = cpr2->toggle;
db = &cpr2->cp_db;
cpr2->cp_raw_cons = raw_cons;
/* barrier - before arming the cq */
wmb();
bnxt_writeq(bp, db->db_key64 | DBR_TYPE_CQ_ARMALL | DB_TOGGLE(tgl) |
DB_RING_IDX(db, cpr2->cp_raw_cons), db->doorbell);
}
}
/*
* Reconcile kernel and user view of the transmit ring.
*
* Userspace wants to send packets up to the one before ring->head,
* kernel knows kring->nr_hwcur is the first unsent packet.
*
* Here we push packets out (as many as possible), and possibly
* reclaim buffers from previously completed transmission.
*
* ring->tail is updated on return.
* ring->head is never used here.
*
* The caller (netmap) guarantees that there is only one instance
* running at any time. Any interference with other driver
* methods should be handled by the individual drivers.
*/
int bnxt_netmap_txsync(struct netmap_kring *kring, int flags)
{
u_int const lim = kring->nkr_num_slots - 1;
struct netmap_ring *ring = kring->ring;
struct netmap_adapter *na = kring->na;
struct bnxt_cp_ring_info *cpr2;
u_int const head = kring->rhead;
struct ifnet *ifp = na->ifp;
u_int nm_i; /* index into the netmap ring */
u_int n;
/*
* interrupts on every tx packet are expensive so request
* them every half ring, or where NS_REPORT is set
*/
u_int tosync;
/* device-specific */
struct NM_BNXT_ADAPTER *bp = netdev_priv(ifp);
u16 prod = 0, cons, hw_cons, nr_frags = 0;
struct bnxt_tx_ring_info *txr;
struct bnxt_sw_tx_bd *tx_buf;
struct tx_bd *txbd, *txbd0;
u32 raw_cons, tgl = 0;
struct tx_cmp *txcmp;
struct bnxt_db_info *db;
u16 prod0;
if (!netif_carrier_ok(ifp) || !netif_running(ifp))
return 0;
txr = &bp->tx_ring[bp->tx_ring_map[kring->ring_id]];
if (unlikely(!txr)) {
nm_prlim(1, "ring %s is missing (txr=%p)", kring->name, txr);
return -ENXIO;
}
/*
* First part: process new packets to send.
* nm_i is the current index in the netmap ring,
*
* If we have packets to send (kring->nr_hwcur != kring->rhead)
* iterate over the netmap ring, fetch length and update
* the corresponding slot in the NIC ring. Some drivers also
* need to update the buffer's physical address in the NIC slot
* even NS_BUF_CHANGED is not set (PNMB computes the addresses).
*
* The netmap_reload_map() calls is especially expensive,
* even when (as in this case) the tag is 0, so do only
* when the buffer has actually changed.
*
* If possible do not set the report/intr bit on all slots,
* but only a few times per ring or when NS_REPORT is set.
*
* Finally, on 10G and faster drivers, it might be useful
* to prefetch the next slot and txr entry.
*/
nm_i = kring->nr_hwcur;
if (nm_i != head) { /* we have new packets to send */
nm_prdis("new pkts to send nm_i: %d head: %d\n", nm_i, head);
__builtin_prefetch(&ring->slot[nm_i]);
for (n = 0; nm_i != head; n++) {
struct netmap_slot *slot = &ring->slot[nm_i];
u_int len = slot->len, bd0_len;
uint64_t paddr;
uint64_t offset = nm_get_offset(kring, slot);
/* device-specific */
if (bnxt_tx_avail(bp, txr) < 1) {
nm_prinf("NO TX AVAIL!\n");
break;
}
prod = txr->tx_prod; /* producer index */
txbd = &txr->tx_desc_ring[TX_RING(bp, prod)][TX_IDX(prod)];
tx_buf = &txr->tx_buf_ring[RING_TX(bp, prod)];
/* prefetch for next round */
__builtin_prefetch(&ring->slot[nm_i + 1]);
__builtin_prefetch(&txr->tx_desc_ring[TX_RING(bp, prod + 1)][TX_IDX(prod + 1)]);
PNMB(na, slot, &paddr);
NM_CHECK_ADDR_LEN_OFF(na, len, offset);
/* Fill the slot in the NIC ring. */
txbd->tx_bd_haddr = cpu_to_le64(paddr + offset);
netmap_sync_map_dev(na, (bus_dma_tag_t)na->pdev, &paddr, len, NR_TX);
flags = (len << TX_BD_LEN_SHIFT) |
((nr_frags + 1) << TX_BD_FLAGS_BD_CNT_SHIFT) |
bnxt_lhint_arr[len >> 9];
txbd->tx_bd_len_flags_type = cpu_to_le32(flags);
txbd0 = txbd;
prod0 = prod;
bd0_len = len;
if (slot->flags & NS_MOREFRAG) {
nr_frags++;
for (;;) {
nm_i = nm_next(nm_i, lim);
/* remember that we have to ask for a
* report each time we move past half a
* ring
*/
if (nm_i == head) {
/* XXX should we accept incomplete packets? */
return -EINVAL;
}
slot = &ring->slot[nm_i];
len = slot->len;
PNMB(na, slot, &paddr);
offset = nm_get_offset(kring, slot);
NM_CHECK_ADDR_LEN_OFF(na, len, offset);
prod = NEXT_TX(prod);
txbd = &txr->tx_desc_ring[TX_RING(bp, prod)][TX_IDX(prod)];
txbd->tx_bd_haddr = cpu_to_le64(paddr + offset);
flags = len << TX_BD_LEN_SHIFT;
txbd->tx_bd_len_flags_type = cpu_to_le32(flags);
netmap_sync_map_dev(na, (bus_dma_tag_t)na->pdev,
&paddr, len, NR_TX);
if (!(slot->flags & NS_MOREFRAG))
break;
nr_frags++;
}
tx_buf->nr_frags = nr_frags;
nr_frags = 0;
flags = (bd0_len << TX_BD_LEN_SHIFT) |
((tx_buf->nr_frags + 1) << TX_BD_FLAGS_BD_CNT_SHIFT) |
bnxt_lhint_arr[bd0_len >> 9];
txbd0->tx_bd_len_flags_type = cpu_to_le32(flags);
}
slot->flags &= ~(NS_REPORT | NS_BUF_CHANGED | NS_MOREFRAG);
flags &= ~TX_BD_LEN;
txbd->tx_bd_len_flags_type = cpu_to_le32(((len) << TX_BD_LEN_SHIFT) |
flags | TX_BD_FLAGS_PACKET_END);
prod = NEXT_TX(prod);
txbd0->tx_bd_opaque = SET_TX_OPAQUE(bp, txr, prod0,
tx_buf->nr_frags);
txr->tx_prod = prod;
nm_i = nm_next(nm_i, lim);
}
kring->nr_hwcur = head;
/* synchronize the NIC ring */
nm_prdis("calling bnxt_txr_db_kick with prod:%d cons: %d nr_hwtail: %d\n",
prod, txr->tx_cons, kring->nr_hwtail);
bnxt_txr_db_kick(bp, txr, prod);
}
/*
* Second part: reclaim buffers for completed transmissions.
*/
cpr2 = txr->tx_cpr;
raw_cons = cpr2->cp_raw_cons;
while (1) {
u8 cmp_type;
cons = RING_CMP(raw_cons);
txcmp = &cpr2->cp_desc_ring[CP_RING(cons)][CP_IDX(cons)];
if (!TX_CMP_VALID(txcmp, raw_cons))
break;
/* The valid test of the entry must be done first before
* reading any further.
*/
dma_rmb();
cmp_type = TX_CMP_TYPE(txcmp);
if (cmp_type == CMP_TYPE_TX_L2_CMP ||
cmp_type == CMP_TYPE_TX_L2_COAL_CMP) {
u32 opaque = txcmp->tx_cmp_opaque;
if (cmp_type == CMP_TYPE_TX_L2_COAL_CMP)
txr->tx_hw_cons = TX_CMP_SQ_CONS_IDX(txcmp);
else
txr->tx_hw_cons = TX_OPAQUE_IDX(opaque);
raw_cons = NEXT_RAW_CMP(raw_cons);
}
}
if (raw_cons != cpr2->cp_raw_cons) {
tgl = cpr2->toggle;
db = &cpr2->cp_db;
cpr2->cp_raw_cons = raw_cons;
/* barrier - before arming the cq */
wmb();
bnxt_writeq(bp, db->db_key64 | DBR_TYPE_CQ_ARMALL | DB_TOGGLE(tgl) |
DB_RING_IDX(db, cpr2->cp_raw_cons),
db->doorbell);
}
tosync = nm_next(kring->nr_hwtail, lim);
hw_cons = txr->tx_hw_cons;
cons = txr->tx_cons;
n = 0;
while (RING_TX(bp, cons) != hw_cons) {
/* some tx completed, increment avail */
/* sync all buffers that we are returning to userspace */
struct netmap_slot *slot = &ring->slot[tosync];
struct bnxt_sw_tx_bd *tx_buf;
uint64_t paddr;
int j, last;
(void)PNMB_O(kring, slot, &paddr);
tx_buf = &txr->tx_buf_ring[RING_TX(bp, cons)];
netmap_sync_map_cpu(na, (bus_dma_tag_t)na->pdev, &paddr, slot->len, NR_TX);
tosync = nm_next(tosync, lim);
kring->nr_hwtail = nm_prev(tosync, lim);
last = tx_buf->nr_frags;
for (j = 0; j < last; j++) {
slot = &ring->slot[tosync];
(void)PNMB_O(kring, slot, &paddr);
cons = NEXT_TX(cons);
netmap_sync_map_cpu(na, (bus_dma_tag_t)na->pdev, &paddr, slot->len, NR_TX);
tosync = nm_next(tosync, lim);
kring->nr_hwtail = nm_prev(tosync, lim);
}
cons = NEXT_TX(cons);
n++;
}
if (n) {
nm_prdis("tx_completed [%d] kring->nr_hwtail: %d\n", n, kring->nr_hwtail);
txr->tx_cons = cons;
}
return 0;
}
int __bnxt_netmap_rxsync(struct netmap_kring *kring, int flags)
{
u_int const lim = kring->nkr_num_slots - 1;
struct netmap_adapter *na = kring->na;
struct netmap_ring *ring = kring->ring;
u_int const head = kring->rhead;
u_int stop_i = nm_prev(head, lim); /* stop reclaiming here */
u_int ring_nr = kring->ring_id;
struct ifnet *ifp = na->ifp;
uint16_t slot_flags = 0;
u_int nm_i = 0; /* index into the netmap ring */
/* device-specific */
struct NM_BNXT_ADAPTER *bp = netdev_priv(ifp);
u32 cp_cons, tmp_raw_cons = 0, real_cons = 0;
struct bnxt_rx_ring_info *rxr;
struct bnxt_cp_ring_info *cpr;
u32 lflags, work_done = 0;
struct rx_cmp_ext *rxcmp1;
struct bnxt_db_info *db;
struct rx_cmp *rxcmp;
u32 tgl = 0, len;
uint64_t paddr;
rxr = &bp->rx_ring[kring->ring_id];
cpr = rxr->rx_cpr;
/*
* First part: reclaim buffers that userspace has released:
* (from kring->nr_hwcur to second last [*] slot before ring->head)
* and make the buffers available for reception.
* As usual nm_i is the index in the netmap ring.
* [*] IMPORTANT: we must leave one free slot in the ring
* to avoid ring empty/full confusion in userspace.
*/
nm_i = kring->nr_hwcur;
stop_i = nm_prev(kring->rhead, lim);
if (nm_i != stop_i) {
struct netmap_slot *slot;
u32 prod = rxr->rx_prod;
struct rx_bd *rxbd;
uint64_t offset;
void *addr;
while (nm_i != stop_i) {
slot = &ring->slot[nm_i];
offset = nm_get_offset(kring, slot);
addr = PNMB(na, slot, &paddr); /* find phys address */
if (unlikely(addr == NETMAP_BUF_BASE(na))) { /* bad buf */
nm_prinf("Resetting RX ring %u\n", ring_nr);
goto ring_reset;
}
if (slot->flags & NS_BUF_CHANGED)
slot->flags &= ~NS_BUF_CHANGED;
rxbd = &rxr->rx_desc_ring[RX_RING(bp, prod)][RX_IDX(prod)];
netmap_sync_map_dev(na, (bus_dma_tag_t)na->pdev, &paddr,
NETMAP_BUF_SIZE(na), NR_RX);
rxbd->rx_bd_haddr = cpu_to_le64(paddr + offset);
prod = NEXT_RX(prod);
nm_i = nm_next(nm_i, lim);
}
rxr->rx_prod = prod;
bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod);
kring->nr_hwcur = nm_i;
}
/*
* Second part: import newly received packets.
* We are told about received packets by CQEs in the CQ.
*
* nm_i is the index of the next free slot in the netmap ring:
*/
rmb();
real_cons = cpr->cp_raw_cons;
cp_cons = RING_CMP(real_cons);
nm_i = kring->nr_hwtail;
stop_i = nm_prev(kring->nr_hwcur, lim);
while (nm_i != stop_i) {
rxcmp = (struct rx_cmp *)&cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
tmp_raw_cons = NEXT_RAW_CMP(real_cons);
cp_cons = RING_CMP(tmp_raw_cons);
rxcmp1 = (struct rx_cmp_ext *)
&cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
if (!RX_CMP_VALID(rxcmp1, tmp_raw_cons))
break;
dma_rmb();
lflags = le32_to_cpu(rxcmp->rx_cmp_len_flags_type);
len = lflags >> RX_CMP_LEN_SHIFT;
ring->slot[nm_i].len = len;
ring->slot[nm_i].flags = slot_flags;
PNMB_O(kring, &ring->slot[nm_i], &paddr);
netmap_sync_map_cpu(na, (bus_dma_tag_t)na->pdev,
&paddr, len, NR_RX);
nm_i = nm_next(nm_i, lim);
tmp_raw_cons = NEXT_RAW_CMP(tmp_raw_cons);
cp_cons = RING_CMP(tmp_raw_cons);
real_cons = tmp_raw_cons;
work_done++;
}
if (work_done) {
kring->nr_hwtail = nm_i;
cpr->cp_raw_cons = real_cons;
tgl = cpr->toggle;
db = &cpr->cp_db;
/* barrier - TBD revisit? */
wmb();
bnxt_writeq(bp, db->db_key64 | DBR_TYPE_CQ_ARMALL | DB_TOGGLE(tgl) |
DB_RING_IDX(db, cpr->cp_raw_cons),
db->doorbell);
kring->nr_kflags &= ~NKR_PENDINTR;
}
return 0;
ring_reset:
return netmap_ring_reinit(kring);
}
#define SLOT_SWAP(s1, s2) do { \
u32 tmp; \
tmp = (s1)->buf_idx; \
(s1)->buf_idx = (s2)->buf_idx; \
(s2)->buf_idx = tmp; \
(s1)->flags |= NS_BUF_CHANGED; \
(s2)->flags |= NS_BUF_CHANGED; \
} while (0)
int bnxt_netmap_rxsync_jumbo(struct netmap_kring *kring, int flags)
{
u_int const lim = kring->nkr_num_slots - 1;
struct netmap_adapter *na = kring->na;
struct netmap_ring *ring = kring->ring;
struct netmap_kring *base_kring;
struct netmap_ring *base_nmring;
struct netmap_kring *agg_kring;
struct netmap_ring *agg_nmring;
u_int const head = kring->rhead;
u_int stop_i = nm_prev(head, lim); /* stop reclaiming here */
struct ifnet *ifp = na->ifp;
uint16_t slot_flags = 0;
uint32_t rx_ring_id = 0;
u_int nm_i = 0; /* index into the netmap ring */
/* device-specific */
struct NM_BNXT_ADAPTER *bp = netdev_priv(ifp);
u32 cp_cons, tmp_raw_cons = 0, real_cons = 0;
struct bnxt_rx_ring_info *rxr;
struct bnxt_cp_ring_info *cpr;
u32 lflags, work_done = 0;
struct rx_cmp_ext *rxcmp1;
struct bnxt_db_info *db;
/* jumbo specific */
u32 tgl = 0, len, misc, total_frag_len = 0;
u16 rx_prod, rx_agg_prod, rx_sw_agg_prod;
struct rx_cmp *rxcmp;
struct rx_bd *rxbd;
uint64_t paddr;
u8 agg_bufs;
int i;
/* 0,3,6,N... are the actual rings that will be used by app/userspace
* while [1,2, 4,5, N+1,N+2...] are the shadow rings that map to the base HW
* ring and AGG rings respectively
*/
if ((kring->ring_id % (2 + AGG_NM_RINGS)) != 0)
return 0;
rx_ring_id = kring->ring_id / (2 + AGG_NM_RINGS);
rxr = &bp->rx_ring[rx_ring_id];
cpr = rxr->rx_cpr;
base_kring = na->rx_rings[kring->ring_id + 1];
base_nmring = base_kring->ring;
agg_kring = na->rx_rings[kring->ring_id + 2];
agg_nmring = agg_kring->ring;
if (unlikely(kring->nr_mode == NKR_NETMAP_OFF) ||
base_kring->nr_mode == NKR_NETMAP_OFF || agg_kring->nr_mode == NKR_NETMAP_OFF)
return 0;
/*
* First part: reclaim buffers that userspace has released:
* (from kring->nr_hwcur to second last [*] slot before ring->head)
* and make the buffers available for reception.
* For ring N+0 nothing to be done for the buffers that userspace has released.
* Those are not to be published to the hardware RX ring because the buffer refill
* has happened at slot swap time. So a simple kring->nr_hwcur = kring->rhead
* should be enough. Also, since tail, head and cur are frozen for rings N+1 and N+2,
* rxsync would be a NOP for those.
* In the end, all real work happens in the "import newly received packets" part of the
* rxsync for ring N+0.
*/
kring->nr_hwcur = kring->rhead;
/*
* Second part: import newly received packets.
* We are told about received packets by CQEs in the CQ.
*
* nm_i is the index of the next free slot in the netmap ring:
*/
rmb();
real_cons = cpr->cp_raw_cons;
cp_cons = RING_CMP(real_cons);
nm_i = kring->nr_hwtail;
stop_i = nm_prev(kring->nr_hwcur, lim);
while (nm_i != stop_i) {
rx_agg_prod = rxr->rx_agg_prod;
rx_sw_agg_prod = rxr->rx_sw_agg_prod;
rx_prod = rxr->rx_prod;
rxcmp = (struct rx_cmp *)&cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
tmp_raw_cons = NEXT_RAW_CMP(real_cons);
cp_cons = RING_CMP(tmp_raw_cons);
rxcmp1 = (struct rx_cmp_ext *)
&cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
if (!RX_CMP_VALID(rxcmp1, tmp_raw_cons))
break;
dma_rmb();
lflags = le32_to_cpu(rxcmp->rx_cmp_len_flags_type);
len = lflags >> RX_CMP_LEN_SHIFT;
misc = le32_to_cpu(rxcmp->rx_cmp_misc_v1);
agg_bufs = (misc & RX_CMP_AGG_BUFS) >> RX_CMP_AGG_BUFS_SHIFT;
if (agg_bufs) {
int space = stop_i - nm_i;
if (!bnxt_agg_bufs_valid(bp, cpr, agg_bufs, &tmp_raw_cons))
break;
if (space < 0)
space += kring->nkr_num_slots;
if (space < agg_bufs) {
nm_prinf(" Not enough space!! space_rem: %d agg_bufs: %d\n",
space, agg_bufs);
break;
}
slot_flags |= NS_MOREFRAG;
}
BUG_ON(rxcmp->rx_cmp_opaque > lim);
SLOT_SWAP(&ring->slot[nm_i], &base_nmring->slot[rxcmp->rx_cmp_opaque]);
/* Now that the SLOT SWAP is done, refill the base HW ring BD
* with the new address got from the application ring
*/
rxbd = &rxr->rx_desc_ring[RX_RING(bp, rx_prod)][RX_IDX(rx_prod)];
PNMB_O(base_kring, &base_nmring->slot[rxcmp->rx_cmp_opaque], &paddr);
rxbd->rx_bd_haddr = cpu_to_le64(paddr);
rxbd->rx_bd_opaque = RING_RX(bp, rx_prod);
ring->slot[nm_i].len = len;
ring->slot[nm_i].flags = slot_flags;
PNMB_O(kring, &ring->slot[nm_i], &paddr);
netmap_sync_map_cpu(na, (bus_dma_tag_t)na->pdev,
&paddr, len, NR_RX);
nm_prdis("BEG kring->nr_hwtail: %d slot[%d].len: %d flags: %d agg_bufs: %d rx_cmp_opaque: %d\n",
kring->nr_hwtail, nm_i, len, ring->slot[nm_i].flags, agg_bufs, rxcmp->rx_cmp_opaque);
nm_i = nm_next(nm_i, lim);
if (agg_bufs) {
cp_cons = NEXT_CMP(cp_cons);
for (i = 0; i < agg_bufs; i++) {
u16 cons, frag_len;
struct rx_agg_cmp *agg;
agg = bnxt_get_agg(bp, cpr, cp_cons, i);
cons = agg->rx_agg_cmp_opaque;
frag_len = (le32_to_cpu(agg->rx_agg_cmp_len_flags_type) &
RX_AGG_CMP_LEN) >> RX_AGG_CMP_LEN_SHIFT;
agg_nmring = agg_kring->ring;
BUG_ON(cons > lim);
SLOT_SWAP(&ring->slot[nm_i], &agg_nmring->slot[cons]);
/* Now that the SLOT SWAP is done, refill the AGG HW ring BD
* with the new address got from the application ring
*/
rxbd = &rxr->rx_agg_desc_ring[RX_AGG_RING(bp, rx_agg_prod)][RX_IDX(rx_agg_prod)];
PNMB_O(agg_kring, &agg_nmring->slot[cons], &paddr);
rxbd->rx_bd_haddr = cpu_to_le64(paddr);
rxbd->rx_bd_opaque = rx_sw_agg_prod;
slot_flags = (i < (agg_bufs - 1)) ? NS_MOREFRAG : 0;
ring->slot[nm_i].len = frag_len;
ring->slot[nm_i].flags = slot_flags;
PNMB_O(kring, &ring->slot[nm_i], &paddr);
netmap_sync_map_cpu(na, (bus_dma_tag_t)na->pdev,
&paddr, len, NR_RX);
total_frag_len += frag_len;
nm_prdis("slot[%d].len: %d flags: %d agg_ring_cons: %d bd_opaque: %d rx_agg_prod: %d\n",
nm_i, ring->slot[nm_i].len, ring->slot[nm_i].flags, cons, rxbd->rx_bd_opaque, rx_agg_prod);
nm_i = nm_next(nm_i, lim);
rx_agg_prod = NEXT_RX_AGG(rx_agg_prod);
rx_sw_agg_prod = RING_RX_AGG(bp, NEXT_RX_AGG(rx_sw_agg_prod));
}
rxr->rx_agg_prod = rx_agg_prod;
rxr->rx_sw_agg_prod = rx_sw_agg_prod;
}
tmp_raw_cons = NEXT_RAW_CMP(tmp_raw_cons);
cp_cons = RING_CMP(tmp_raw_cons);
real_cons = tmp_raw_cons;
rxr->rx_prod = NEXT_RX(rx_prod);
work_done++;
}
if (work_done) {
kring->nr_hwtail = nm_i;
cpr->cp_raw_cons = real_cons;
tgl = cpr->toggle;
db = &cpr->cp_db;
/* barrier - TBD revisit? */
wmb();
bnxt_writeq(bp, db->db_key64 | DBR_TYPE_CQ_ARMALL | DB_TOGGLE(tgl) |
DB_RING_IDX(db, cpr->cp_raw_cons), db->doorbell);
kring->nr_kflags &= ~NKR_PENDINTR;
bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod);
bnxt_db_write(bp, &rxr->rx_agg_db, rxr->rx_agg_prod);
nm_prdis("END cp_raw_cons: %d kring->nr_hwtail : %d rx_prod: %d rx_agg_prod: %d\n",
cpr->cp_raw_cons, kring->nr_hwtail, rxr->rx_prod, rxr->rx_agg_prod);
}
return 0;
}
/*
* Reconcile kernel and user view of the receive ring.
* Same as for the txsync, this routine must be efficient.
* The caller guarantees a single invocations, but races against
* the rest of the driver should be handled here.
*
* When called, userspace has released buffers up to ring->head
* (last one excluded).
*
* If (flags & NAF_FORCE_READ) also check for incoming packets irrespective
* of whether or not we received an interrupt.
*/
int bnxt_netmap_rxsync(struct netmap_kring *kring, int flags)
{
u_int const lim = kring->nkr_num_slots - 1;
struct netmap_adapter *na = kring->na;
u_int const head = kring->rhead;
struct ifnet *ifp = na->ifp;
/* device-specific */
struct NM_BNXT_ADAPTER *bp = netdev_priv(ifp);
if (!netif_carrier_ok(ifp) || !netif_running(ifp))
return 0;
if (unlikely(head > lim))
return netmap_ring_reinit(kring);
if (!(bp->flags & BNXT_FLAG_JUMBO))
return __bnxt_netmap_rxsync(kring, flags);
return bnxt_netmap_rxsync_jumbo(kring, flags);
}
/*
* if in netmap mode, attach the netmap buffers to the ring and return true.
* Otherwise return false.
*/
int bnxt_netmap_configure_tx_ring(struct NM_BNXT_ADAPTER *adapter,
int ring_nr)
{
struct netmap_adapter *na = NA(adapter->dev);
struct bnxt_tx_ring_info *txr;
struct netmap_slot *slot;
slot = netmap_reset(na, NR_TX, ring_nr, 0);
if (!slot)
return 0; /* not in native netmap mode */
txr = &adapter->tx_ring[adapter->tx_ring_map[ring_nr]];
txr->tx_cpr->netmapped = 1;
txr->bnapi->cp_ring.netmapped = 1;
/*
* On some cards we would set up the slot addresses now.
* But on bnxt, the address will be written to the WQ when
* each packet arrives in bnxt_netmap_txsync
*/
return 1;
}
int bnxt_netmap_configure_rx_ring(struct NM_BNXT_ADAPTER *adapter, struct bnxt_rx_ring_info *rxr)
{
/*
* In netmap mode, we must preserve the buffers made
* available to userspace before the if_init()
* (this is true by default on the TX side, because
* init makes all buffers available to userspace).
*/
struct netmap_adapter *na = NA(adapter->dev);
struct netmap_slot *slot;
int count = 0, i;
int lim, ring_nr = rxr->netmap_idx;
struct rx_bd *rxbd;
u32 prod;
struct ifnet *ifp = na->ifp;
struct NM_BNXT_ADAPTER *bp = netdev_priv(ifp);
slot = netmap_reset(na, NR_RX, ring_nr, 0);
if (!slot)
return 0; /* not in native netmap mode */
lim = na->num_rx_desc - 1 - nm_kr_rxspace(na->rx_rings[ring_nr]);
rxr->rx_prod = 0;
prod = rxr->rx_prod;
/* Add this so that even if the NM ring reset fails
* the netmapped flag is set and we will not timeout ring_free
* during teardown
*/
rxr->rx_cpr->netmapped = 1;
if (bp->flags & BNXT_FLAG_JUMBO) {
slot = netmap_reset(na, NR_RX, ring_nr + 1, 0);
if (!slot)
return 0; /* not in native netmap mode */
while (count < lim) {
uint64_t paddr;
rxbd = &rxr->rx_desc_ring[RX_RING(bp, prod)][RX_IDX(prod)];
PNMB_O(na->rx_rings[ring_nr + 1], &slot[count], &paddr);
rxbd->rx_bd_haddr = cpu_to_le64(paddr);
rxbd->rx_bd_opaque = prod;
prod = NEXT_RX(prod);
count++;
}
nm_prdis("populated %d Rx bufs in ring %d rxr: %p lim = %d",
count, ring_nr + 1, rxr, lim);
rxr->rx_prod = prod;
rxr->rx_next_cons = 0;
rxr->rx_agg_prod = 0;
prod = rxr->rx_agg_prod;
for (i = 0; i < AGG_NM_RINGS; i++) {
slot = netmap_reset(na, NR_RX, ring_nr + 2 + i, 0);
if (!slot)
return 0; /* not in native netmap mode */
count = 0;
while (count < lim) {
uint64_t paddr;
rxbd = &rxr->rx_agg_desc_ring[RX_AGG_RING(bp, prod)][RX_IDX(prod)];
PNMB_O(na->rx_rings[ring_nr + 2 + i], &slot[count], &paddr);
rxbd->rx_bd_haddr = cpu_to_le64(paddr);
rxbd->rx_bd_opaque = prod;
prod = NEXT_RX_AGG(prod);
count++;
}
nm_prdis("populated %d Rx AGG bufs in ring %d prod = %d",
count, ring_nr + 2 + i, prod);
}
rxr->rx_agg_prod = prod;
rxr->rx_sw_agg_prod = prod;
} else {
while (count < lim) {
uint64_t paddr;
rxbd = &rxr->rx_desc_ring[RX_RING(bp, prod)][RX_IDX(prod)];
PNMB_O(na->rx_rings[ring_nr], slot + count, &paddr);
rxbd->rx_bd_haddr = cpu_to_le64(paddr);
rxbd->rx_bd_opaque = prod;
prod = NEXT_RX(prod);
count++;
}
nm_prdis("populated %d Rx bufs in ring %d lim = %d", count, ring_nr, lim);
}
/* ensure wqes are visible to device before updating doorbell record */
wmb();
if (bp->flags & BNXT_FLAG_JUMBO)
bnxt_db_write(bp, &rxr->rx_agg_db, rxr->rx_agg_prod);
bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod);
return 1;
}
int bnxt_netmap_config(struct netmap_adapter *na, struct nm_config_info *info)
{
struct ifnet *ifp = na->ifp;
struct NM_BNXT_ADAPTER *bp;
bp = netdev_priv(ifp);
info->num_tx_rings = bp->tx_nr_rings_per_tc;
info->num_rx_rings = bp->rx_nr_rings;
if (bp->dev->mtu > NETMAP_BUF_SIZE(na) || bp->flags & BNXT_FLAG_JUMBO) {
info->num_rx_rings = 2 * info->num_rx_rings + info->num_rx_rings * AGG_NM_RINGS;
info->rx_buf_maxsize = BNXT_RX_PAGE_SIZE;
} else {
info->rx_buf_maxsize = NETMAP_BUF_SIZE(na);
}
info->num_tx_descs = bp->tx_ring_size + 1;
info->num_rx_descs = bp->rx_ring_size + 1;
return 0;
}
/*
* The attach routine, called at the end of bnxt_create_netdev(),
* fills the parameters for netmap_attach() and calls it.
* It cannot fail, in the worst case (such as no memory)
* netmap mode will be disabled and the driver will only
* operate in standard mode.
*/
void bnxt_netmap_attach(struct NM_BNXT_ADAPTER *adapter)
{
struct netmap_adapter na;
bzero(&na, sizeof(na));
na.ifp = adapter->dev;
na.pdev = &adapter->pdev->dev;
na.na_flags = NAF_MOREFRAG;
na.num_tx_desc = adapter->tx_ring_size + 1;
na.num_rx_desc = adapter->rx_ring_size + 1;
na.nm_txsync = bnxt_netmap_txsync;
na.nm_rxsync = bnxt_netmap_rxsync;
na.nm_register = bnxt_netmap_reg;
na.nm_config = bnxt_netmap_config;
/* each channel has 1 rx ring and a tx for each tc */
na.num_tx_rings = adapter->tx_nr_rings_per_tc;
na.num_rx_rings = adapter->rx_nr_rings;
na.rx_buf_maxsize = 1500; /* will be overwritten by nm_config */
netmap_attach(&na);
}
#endif /* NETMAP_BNXT_MAIN */
#endif /* __BNXT_NETMAP_LINUX_H__ */
/* end of file */

View File

@ -0,0 +1,313 @@
// SPDX-License-Identifier: BSD-3-Clause
/* Copyright(c) 2024 Broadcom
* All rights reserved.
*/
#include <linux/pci.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include "bnxt_compat.h"
#include "bnxt_hsi.h"
#include "bnxt.h"
#include "bnxt_hwrm.h"
#include "bnxt_ulp_flow.h"
#include "bnxt_nic_flow.h"
#include "ulp_nic_flow.h"
#include "bnxt_vfr.h"
#include "tfc.h"
#if defined(CONFIG_BNXT_FLOWER_OFFLOAD)
/* Max number of filters per PF */
#define NIC_FLOW_FILTER_MAX 2
/* Per L2 filter RoCE flow data */
struct nic_flow_roce {
__le64 l2_filter_id;
u8 mac_addr[ETH_ALEN];
u32 l2_ctxt_id;
u32 prof_func;
u32 flow_id;
u64 flow_cnt_hndl;
u32 cnp_flow_id;
u64 cnp_flow_cnt_hndl;
bool in_use;
};
#define NIC_FLOW_SUPPORTED(bp) \
(BNXT_PF(bp) && BNXT_TF_RX_NIC_FLOW_CAP(bp) && BNXT_UDCC_CAP(bp))
/* NIC flow database */
struct nic_flow_db {
struct nic_flow_roce roce[NIC_FLOW_FILTER_MAX];
};
static int bnxt_hwrm_l2_filter_cfg(struct bnxt *bp, __le64 l2_filter_id,
u32 l2_ctxt_id, u32 prof_func)
{
struct hwrm_cfa_l2_filter_cfg_input *req;
u32 flags;
int rc;
rc = hwrm_req_init(bp, req, HWRM_CFA_L2_FILTER_CFG);
if (rc)
return rc;
req->target_id = cpu_to_le16(0xffff);
flags = CFA_L2_FILTER_CFG_REQ_FLAGS_PATH_RX |
CFA_L2_FILTER_CFG_REQ_FLAGS_REMAP_OP_ENABLE_LKUP;
req->flags = cpu_to_le32(flags);
req->enables = cpu_to_le32(CFA_L2_FILTER_CFG_REQ_ENABLES_L2_CONTEXT_ID |
CFA_L2_FILTER_CFG_REQ_ENABLES_PROF_FUNC);
req->l2_filter_id = l2_filter_id;
req->l2_context_id = l2_ctxt_id;
req->prof_func = prof_func;
return hwrm_req_send(bp, req);
}
/* This function initializes the NIC Flow feature which allows
* TF to insert NIC flows into the CFA.
*/
int bnxt_nic_flows_init(struct bnxt *bp)
{
struct nic_flow_db *nfdb;
u16 sid = 0;
int rc = 0;
if (!NIC_FLOW_SUPPORTED(bp))
return 0;
nfdb = kzalloc(sizeof(*nfdb), GFP_ATOMIC);
if (!nfdb)
return -ENOMEM;
bp->nic_flow_info = nfdb;
/* Set the session id in TF core to the AFM session */
rc = tfc_session_id_set(bp->tfp, sid);
return rc;
}
void bnxt_nic_flows_deinit(struct bnxt *bp)
{
if (!NIC_FLOW_SUPPORTED(bp))
return;
kfree(bp->nic_flow_info);
bp->nic_flow_info = NULL;
}
int bnxt_nic_flows_open(struct bnxt *bp)
{
int rc = 0;
if (!NIC_FLOW_SUPPORTED(bp))
return rc;
rc = bnxt_tf_port_init(bp, BNXT_TF_FLAG_NICFLOW);
if (rc)
return rc;
rc = bnxt_nic_flows_roce_add(bp);
return rc;
}
void bnxt_nic_flows_close(struct bnxt *bp)
{
if (!NIC_FLOW_SUPPORTED(bp))
return;
bnxt_nic_flows_deinit(bp);
bnxt_tf_port_deinit(bp, BNXT_TF_FLAG_NICFLOW);
}
int bnxt_nic_flows_filter_add(struct bnxt *bp, __le64 l2_filter_id, const u8 *mac_addr)
{
struct nic_flow_db *nfdb = bp->nic_flow_info;
struct nic_flow_roce *nfr;
int i;
if (!NIC_FLOW_SUPPORTED(bp))
return 0;
for (i = 0; i < NIC_FLOW_FILTER_MAX; i++) {
nfr = &nfdb->roce[i];
if (nfr->in_use)
continue;
nfr->l2_filter_id = l2_filter_id;
ether_addr_copy(nfr->mac_addr, mac_addr);
nfr->in_use = true;
netdev_dbg(bp->dev, "%s: filter_id(%llx) mac(%pM)\n", __func__,
l2_filter_id, mac_addr);
return 0;
}
netdev_dbg(bp->dev, "%s: no free NIC flow l2 filter entry\n", __func__);
return -EINVAL;
}
int bnxt_nic_flows_roce_add(struct bnxt *bp)
{
struct nic_flow_db *nfdb = bp->nic_flow_info;
struct nic_flow_roce *nfr;
int rc = 0;
u8 i;
if (!NIC_FLOW_SUPPORTED(bp))
return rc;
/* Return until init complete */
if (!bp->nic_flow_info) {
netdev_dbg(bp->dev, "%s: Attempt to add RoCE but db not init\n",
__func__);
return -EINVAL;
}
for (i = 0; i < NIC_FLOW_FILTER_MAX; i++) {
nfr = &nfdb->roce[i];
if (!nfr->in_use)
continue;
rc = bnxt_ulp_nic_flows_roce_add(bp, nfr->l2_filter_id, &nfr->l2_ctxt_id,
&nfr->prof_func, &nfr->flow_id,
&nfr->flow_cnt_hndl, &nfr->cnp_flow_id,
&nfr->cnp_flow_cnt_hndl);
if (rc) {
netdev_dbg(bp->dev, "%s: RoCE NIC flow creation failure(%d)\n",
__func__, rc);
goto error;
}
rc = bnxt_hwrm_l2_filter_cfg(bp, nfr->l2_filter_id, nfr->l2_ctxt_id,
nfr->prof_func);
if (rc) {
netdev_dbg(bp->dev, "%s: L2 filter cfg error(%d)\n",
__func__, rc);
goto error;
}
}
return rc;
error:
rc = bnxt_nic_flows_roce_rem(bp, nfr->l2_filter_id);
return rc;
}
int bnxt_nic_flows_roce_rem(struct bnxt *bp, __le64 l2_filter_id)
{
struct nic_flow_db *nfdb = bp->nic_flow_info;
struct nic_flow_roce *nfr;
int rc = 0;
u8 i;
if (!NIC_FLOW_SUPPORTED(bp))
return 0;
/* Return until init complete */
if (!bp->nic_flow_info)
return 0;
for (i = 0; i < NIC_FLOW_FILTER_MAX; i++) {
nfr = &nfdb->roce[i];
if ((nfr->in_use) && (nfr->l2_filter_id == l2_filter_id)) {
rc = bnxt_ulp_nic_flows_roce_del(bp, l2_filter_id, nfr->l2_ctxt_id,
nfr->prof_func, nfr->flow_id,
nfr->cnp_flow_id);
if (rc)
netdev_dbg(bp->dev, "%s: delete l2_filter_id(%llx) failed rc(%d)\n",
__func__, l2_filter_id, rc);
nfr->l2_filter_id = 0;
nfr->in_use = false;
}
}
return rc;
}
int bnxt_nic_flows_filter_info_get(struct bnxt *bp, __le64 l2_filter_id,
u32 *l2_ctxt_id, u32 *prof_func)
{
struct nic_flow_db *nfdb = bp->nic_flow_info;
struct nic_flow_roce *nfr;
u8 i;
if (!NIC_FLOW_SUPPORTED(bp))
return 0;
if (!bp->nic_flow_info)
return -EINVAL;
for (i = 0; i < NIC_FLOW_FILTER_MAX; i++) {
nfr = &nfdb->roce[i];
if ((nfr->in_use) && (nfr->l2_filter_id == l2_filter_id)) {
*l2_ctxt_id = nfr->l2_ctxt_id;
*prof_func = nfr->prof_func;
return 0;
}
}
netdev_dbg(bp->dev, "%s: l2_filter_id(%llx) not found\n",
__func__, l2_filter_id);
return -ENOENT;
}
int bnxt_nic_flow_dmac_filter_get(struct bnxt *bp, u8 *dmac, __le64 *filter_id)
{
struct nic_flow_db *nfdb = bp->nic_flow_info;
struct nic_flow_roce *nfr;
u8 i;
if (!NIC_FLOW_SUPPORTED(bp))
return 0;
if (!bp->nic_flow_info)
return -EINVAL;
for (i = 0; i < NIC_FLOW_FILTER_MAX; i++) {
nfr = &nfdb->roce[i];
if (!nfr->in_use)
continue;
if (ether_addr_equal(nfr->mac_addr, dmac)) {
*filter_id = nfr->l2_filter_id;
netdev_dbg(bp->dev, "%s: %pM filter=%llx\n", __func__, dmac,
*filter_id);
return 0;
}
}
netdev_dbg(bp->dev, "%s: No matching filter for dmac%pM\n", __func__, dmac);
return -ENOENT;
}
#else /* if defined(CONFIG_BNXT_FLOWER_OFFLOAD) */
int bnxt_nic_flows_init(struct bnxt *bp)
{
return 0;
}
void bnxt_nic_flows_deinit(struct bnxt *bp)
{
}
int bnxt_nic_flows_open(struct bnxt *bp)
{
return 0;
}
void bnxt_nic_flows_close(struct bnxt *bp)
{
}
int bnxt_nic_flows_filter_add(struct bnxt *bp, __le64 filter_id, const u8 *mac_addr)
{
return 0;
}
int bnxt_nic_flows_roce_add(struct bnxt *bp)
{
return 0;
}
int bnxt_nic_flows_roce_rem(struct bnxt *bp, __le64 filter_id)
{
return 0;
}
int bnxt_nic_flows_filter_info_get(struct bnxt *bp, __le64 filter_id,
u32 *l2_ctxt_id, u32 *prof_func)
{
return 0;
}
#endif /* if defined(CONFIG_BNXT_FLOWER_OFFLOAD) */

View File

@ -0,0 +1,18 @@
/* SPDX-License-Identifier: BSD-3-Clause
* Copyright(c) 2024 Broadcom
* All rights reserved.
*/
#ifndef BNXT_NIC_FLOW_H
#define BNXT_NIC_FLOW_H
int bnxt_nic_flows_init(struct bnxt *bp);
void bnxt_nic_flows_deinit(struct bnxt *bp);
int bnxt_nic_flows_open(struct bnxt *bp);
void bnxt_nic_flows_close(struct bnxt *bp);
int bnxt_nic_flows_filter_add(struct bnxt *bp, __le64 filter_id, const u8 *mac_addr);
int bnxt_nic_flows_roce_add(struct bnxt *bp);
int bnxt_nic_flows_roce_rem(struct bnxt *bp, __le64 filter_id);
int bnxt_nic_flows_filter_info_get(struct bnxt *bp, __le64 filter_id,
u32 *l2_ctxt_id, u32 *prof_func);
int bnxt_nic_flow_dmac_filter_get(struct bnxt *bp, u8 *dmac, __le64 *filter_id);
#endif /* BNXT_NIC_FLOW_H */

View File

@ -0,0 +1,73 @@
/* Broadcom NetXtreme-C/E network driver.
*
* Copyright (c) 2014-2016 Broadcom Corporation
* Copyright (c) 2016-2018 Broadcom Limited
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation.
*/
#ifndef _BNXT_NVM_DEFS_H_
#define _BNXT_NVM_DEFS_H_
enum bnxt_nvm_directory_type {
BNX_DIR_TYPE_UNUSED = 0,
BNX_DIR_TYPE_PKG_LOG = 1,
BNX_DIR_TYPE_UPDATE = 2,
BNX_DIR_TYPE_CHIMP_PATCH = 3,
BNX_DIR_TYPE_BOOTCODE = 4,
BNX_DIR_TYPE_VPD = 5,
BNX_DIR_TYPE_EXP_ROM_MBA = 6,
BNX_DIR_TYPE_AVS = 7,
BNX_DIR_TYPE_PCIE = 8,
BNX_DIR_TYPE_PORT_MACRO = 9,
BNX_DIR_TYPE_APE_FW = 10,
BNX_DIR_TYPE_APE_PATCH = 11,
BNX_DIR_TYPE_KONG_FW = 12,
BNX_DIR_TYPE_KONG_PATCH = 13,
BNX_DIR_TYPE_BONO_FW = 14,
BNX_DIR_TYPE_BONO_PATCH = 15,
BNX_DIR_TYPE_TANG_FW = 16,
BNX_DIR_TYPE_TANG_PATCH = 17,
BNX_DIR_TYPE_BOOTCODE_2 = 18,
BNX_DIR_TYPE_CCM = 19,
BNX_DIR_TYPE_PCI_CFG = 20,
BNX_DIR_TYPE_TSCF_UCODE = 21,
BNX_DIR_TYPE_ISCSI_BOOT = 22,
BNX_DIR_TYPE_ISCSI_BOOT_IPV6 = 24,
BNX_DIR_TYPE_ISCSI_BOOT_IPV4N6 = 25,
BNX_DIR_TYPE_ISCSI_BOOT_CFG6 = 26,
BNX_DIR_TYPE_EXT_PHY = 27,
BNX_DIR_TYPE_SHARED_CFG = 40,
BNX_DIR_TYPE_PORT_CFG = 41,
BNX_DIR_TYPE_FUNC_CFG = 42,
BNX_DIR_TYPE_MGMT_CFG = 48,
BNX_DIR_TYPE_MGMT_DATA = 49,
BNX_DIR_TYPE_MGMT_WEB_DATA = 50,
BNX_DIR_TYPE_MGMT_WEB_META = 51,
BNX_DIR_TYPE_MGMT_EVENT_LOG = 52,
BNX_DIR_TYPE_MGMT_AUDIT_LOG = 53
};
#define BNX_DIR_ORDINAL_FIRST 0
#define BNX_DIR_EXT_NONE 0
#define BNX_DIR_EXT_INACTIVE (1 << 0)
#define BNX_DIR_EXT_UPDATE (1 << 1)
#define BNX_DIR_ATTR_NONE 0
#define BNX_DIR_ATTR_NO_CHKSUM (1 << 0)
#define BNX_DIR_ATTR_PROP_STREAM (1 << 1)
enum bnxnvm_pkglog_field_index {
BNX_PKG_LOG_FIELD_IDX_INSTALLED_TIMESTAMP = 0,
BNX_PKG_LOG_FIELD_IDX_PKG_DESCRIPTION = 1,
BNX_PKG_LOG_FIELD_IDX_PKG_VERSION = 2,
BNX_PKG_LOG_FIELD_IDX_PKG_TIMESTAMP = 3,
BNX_PKG_LOG_FIELD_IDX_PKG_CHECKSUM = 4,
BNX_PKG_LOG_FIELD_IDX_INSTALLED_ITEMS = 5,
BNX_PKG_LOG_FIELD_IDX_INSTALLED_MASK = 6
};
#endif /* Don't add anything after this line */

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,201 @@
/* Broadcom NetXtreme-C/E network driver.
*
* Copyright (c) 2017-2018 Broadcom Limited
* Copyright (c) 2018-2023 Broadcom Inc.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation.
*/
#ifndef BNXT_PTP_H
#define BNXT_PTP_H
#ifdef HAVE_IEEE1588_SUPPORT
#include <linux/ptp_clock_kernel.h>
#include <linux/timecounter.h>
#endif
#define BNXT_PTP_GRC_WIN 6
#define BNXT_PTP_GRC_WIN_BASE 0x6000
#define BNXT_PTP_GRC_WIN_VF 1
#define BNXT_PTP_GRC_WIN_BASE_VF 0x1000
#define BNXT_MAX_PHC_DRIFT 31000000
#define BNXT_CYCLES_SHIFT 23
#define BNXT_DEVCLK_FREQ 1000000
#define BNXT_LO_TIMER_MASK 0x0000ffffffffUL
#define BNXT_HI_TIMER_MASK 0xffff00000000UL
#define BNXT_HI_TIMER_MASK64 0xffff000000000000UL
#define BNXT_PTP_DFLT_TX_TMO 1000 /* ms */
#define BNXT_PTP_QTS_TIMEOUT(bp) (((bp)->flags & BNXT_FLAG_CHIP_P5_PLUS) ? 1000 : 62000)
#define BNXT_PTP_QTS_MAX_TMO_US 65535
#define BNXT_PTP_QTS_TX_ENABLES (PORT_TS_QUERY_REQ_ENABLES_PTP_SEQ_ID | \
PORT_TS_QUERY_REQ_ENABLES_TS_REQ_TIMEOUT | \
PORT_TS_QUERY_REQ_ENABLES_PTP_HDR_OFFSET)
#define BNXT_PTP_QTS_RX_ENABLES (PORT_TS_QUERY_REQ_ENABLES_PTP_SEQ_ID)
struct pps_pin {
u8 event;
u8 usage;
u8 state;
};
#define TSIO_PIN_VALID(pin) ((pin) >= 0 && (pin) < (BNXT_MAX_TSIO_PINS))
#define EVENT_DATA2_PPS_EVENT_TYPE(data2) \
((data2) & ASYNC_EVENT_CMPL_PPS_TIMESTAMP_EVENT_DATA2_EVENT_TYPE)
#define EVENT_DATA2_PPS_PIN_NUM(data2) \
(((data2) & \
ASYNC_EVENT_CMPL_PPS_TIMESTAMP_EVENT_DATA2_PIN_NUMBER_MASK) >>\
ASYNC_EVENT_CMPL_PPS_TIMESTAMP_EVENT_DATA2_PIN_NUMBER_SFT)
#define BNXT_DATA2_UPPER_MSK \
ASYNC_EVENT_CMPL_PPS_TIMESTAMP_EVENT_DATA2_PPS_TIMESTAMP_UPPER_MASK
#define BNXT_DATA2_UPPER_SFT \
(32 - \
ASYNC_EVENT_CMPL_PPS_TIMESTAMP_EVENT_DATA2_PPS_TIMESTAMP_UPPER_SFT)
#define BNXT_DATA1_LOWER_MSK \
ASYNC_EVENT_CMPL_PPS_TIMESTAMP_EVENT_DATA1_PPS_TIMESTAMP_LOWER_MASK
#define BNXT_DATA1_LOWER_SFT \
ASYNC_EVENT_CMPL_PPS_TIMESTAMP_EVENT_DATA1_PPS_TIMESTAMP_LOWER_SFT
#define EVENT_PPS_TS(data2, data1) \
(((u64)((data2) & BNXT_DATA2_UPPER_MSK) << BNXT_DATA2_UPPER_SFT) |\
(((data1) & BNXT_DATA1_LOWER_MSK) >> BNXT_DATA1_LOWER_SFT))
#define BNXT_PPS_PIN_DISABLE 0
#define BNXT_PPS_PIN_ENABLE 1
#define BNXT_PPS_PIN_NONE 0
#define BNXT_PPS_PIN_PPS_IN 1
#define BNXT_PPS_PIN_PPS_OUT 2
#define BNXT_PPS_PIN_SYNC_IN 3
#define BNXT_PPS_PIN_SYNC_OUT 4
#define BNXT_PPS_EVENT_INTERNAL 1
#define BNXT_PPS_EVENT_EXTERNAL 2
struct bnxt_pps {
u8 num_pins;
#define BNXT_MAX_TSIO_PINS 4
struct pps_pin pins[BNXT_MAX_TSIO_PINS];
};
#define BNXT_MAX_TX_TS 4
#define NEXT_TXTS(idx) (((idx) + 1) & (BNXT_MAX_TX_TS - 1))
struct bnxt_ptp_tx_req {
struct sk_buff *tx_skb;
u16 tx_seqid;
u16 tx_hdr_off;
unsigned long abs_txts_tmo;
};
struct bnxt_ptp_cfg {
#ifdef HAVE_IEEE1588_SUPPORT
struct ptp_clock_info ptp_info;
struct ptp_clock *ptp_clock;
struct cyclecounter cc;
struct timecounter tc;
struct bnxt_pps pps_info;
/* serialize timecounter access */
spinlock_t ptp_lock;
/* serialize ts tx request queuing */
spinlock_t ptp_tx_lock;
struct sk_buff *rx_skb;
struct bnxt_napi *bnapi;
u32 vlan;
u64 current_time;
u64 old_time;
u64 skb_pre_xmit_ts;
u64 save_ts;
#if !defined HAVE_PTP_DO_AUX_WORK
struct work_struct ptp_ts_task;
#else
unsigned long next_period;
#endif
unsigned long next_overflow_check;
u32 cmult;
/* a 23b shift cyclecounter will overflow in ~36 mins. Check overflow every 18 mins. */
#define BNXT_PHC_OVERFLOW_PERIOD (18 * 60 * HZ)
struct bnxt_ptp_tx_req txts_req[BNXT_MAX_TX_TS];
u16 rx_seqid;
#endif
struct bnxt *bp;
u8 tx_avail;
u16 rxctl;
#define BNXT_PTP_MSG_SYNC (1 << 0)
#define BNXT_PTP_MSG_DELAY_REQ (1 << 1)
#define BNXT_PTP_MSG_PDELAY_REQ (1 << 2)
#define BNXT_PTP_MSG_PDELAY_RESP (1 << 3)
#define BNXT_PTP_MSG_FOLLOW_UP (1 << 8)
#define BNXT_PTP_MSG_DELAY_RESP (1 << 9)
#define BNXT_PTP_MSG_PDELAY_RESP_FOLLOW_UP (1 << 10)
#define BNXT_PTP_MSG_ANNOUNCE (1 << 11)
#define BNXT_PTP_MSG_SIGNALING (1 << 12)
#define BNXT_PTP_MSG_MANAGEMENT (1 << 13)
#define BNXT_PTP_MSG_EVENTS (BNXT_PTP_MSG_SYNC | \
BNXT_PTP_MSG_DELAY_REQ | \
BNXT_PTP_MSG_PDELAY_REQ | \
BNXT_PTP_MSG_PDELAY_RESP)
u8 tx_tstamp_en:1;
int rx_filter;
u32 tstamp_filters;
u32 refclk_regs[2];
u32 refclk_mapped_regs[2];
u32 txts_tmo;
u16 txts_prod;
u16 txts_cons;
};
#if BITS_PER_LONG == 32
#define BNXT_READ_TIME64(ptp, dst, src) \
do { \
spin_lock_bh(&(ptp)->ptp_lock); \
(dst) = (src); \
spin_unlock_bh(&(ptp)->ptp_lock); \
} while (0)
#else
#define BNXT_READ_TIME64(ptp, dst, src) \
((dst) = READ_ONCE(src))
#endif
#define BNXT_PTP_INC_TX_AVAIL(ptp) \
do { \
spin_lock_bh(&(ptp)->ptp_tx_lock); \
(ptp)->tx_avail++; \
spin_unlock_bh(&(ptp)->ptp_tx_lock); \
} while (0)
int bnxt_ptp_parse(struct sk_buff *skb, u16 *seq_id, u16 *hdr_off);
void bnxt_ptp_pps_event(struct bnxt *bp, u32 data1, u32 data2);
void bnxt_ptp_reapply_pps(struct bnxt *bp);
void bnxt_ptp_reapply_phc(struct bnxt *bp);
#ifndef HAVE_PTP_DO_AUX_WORK
void bnxt_ptp_timer(struct bnxt *bp);
#endif
int bnxt_hwtstamp_set(struct net_device *dev, struct ifreq *ifr);
int bnxt_hwtstamp_get(struct net_device *dev, struct ifreq *ifr);
int bnxt_get_rx_ts_p5(struct bnxt *bp, u64 *ts, u32 pkt_ts);
int bnxt_get_rx_ts(struct bnxt *bp, struct bnxt_napi *bnapi, u32 vlan, struct sk_buff *skb);
int bnxt_get_tx_ts(struct bnxt *bp, struct sk_buff *skb, u16 prod);
void bnxt_tx_ts_cmp(struct bnxt *bp, struct bnxt_napi *bnapi,
struct tx_ts_cmp *tscmp);
int bnxt_ptp_init(struct bnxt *bp, bool phc_cfg);
void bnxt_ptp_clear(struct bnxt *bp);
void bnxt_ptp_rtc_timecounter_init(struct bnxt_ptp_cfg *ptp, u64 ns);
int bnxt_ptp_init_rtc(struct bnxt *bp, bool phc_cfg);
void bnxt_ptp_update_current_time(struct bnxt *bp);
void bnxt_ptp_cfg_tstamp_filters(struct bnxt *bp);
void bnxt_ptp_get_skb_pre_xmit_ts(struct bnxt *bp);
void bnxt_save_pre_reset_ts(struct bnxt *bp);
int bnxt_ptp_get_txts_prod(struct bnxt_ptp_cfg *ptp, u16 *prod);
#endif

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,82 @@
/* Broadcom NetXtreme-C/E network driver.
*
* Copyright (c) 2014-2016 Broadcom Corporation
* Copyright (c) 2016-2018 Broadcom Limited
* Copyright (c) 2018-2021 Broadcom Inc.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation.
*/
#ifndef BNXT_SRIOV_H
#define BNXT_SRIOV_H
#define BNXT_FWD_RESP_SIZE_ERR(n) \
((offsetof(struct hwrm_fwd_resp_input, encap_resp) + n) > \
sizeof(struct hwrm_fwd_resp_input))
#define BNXT_EXEC_FWD_RESP_SIZE_ERR(n) \
((offsetof(struct hwrm_exec_fwd_resp_input, encap_request) + n) >\
offsetof(struct hwrm_exec_fwd_resp_input, encap_resp_target_id))
#define BNXT_VF_MIN_RSS_CTX 1
#define BNXT_VF_MAX_RSS_CTX 1
#define BNXT_VF_MIN_L2_CTX 1
#define BNXT_VF_MAX_L2_CTX 4
#ifdef CONFIG_BNXT_SRIOV
#define BNXT_SUPPORTS_SRIOV(pdev) ((pdev)->sriov)
#else
#define BNXT_SUPPORTS_SRIOV(pdev) 0
#endif
#ifdef HAVE_NDO_GET_VF_CONFIG
int bnxt_get_vf_config(struct net_device *, int, struct ifla_vf_info *);
int bnxt_set_vf_mac(struct net_device *, int, u8 *);
#ifdef NEW_NDO_SET_VF_VLAN
int bnxt_set_vf_vlan(struct net_device *, int, u16, u8, __be16);
#else
int bnxt_set_vf_vlan(struct net_device *, int, u16, u8);
#endif
#ifdef HAVE_IFLA_TX_RATE
int bnxt_set_vf_bw(struct net_device *, int, int, int);
#else
int bnxt_set_vf_bw(struct net_device *, int, int);
#endif
#ifdef HAVE_NDO_SET_VF_LINK_STATE
int bnxt_set_vf_link_state(struct net_device *, int, int);
#endif
#ifdef HAVE_VF_SPOOFCHK
int bnxt_set_vf_spoofchk(struct net_device *, int, bool);
#endif
#ifdef HAVE_NDO_SET_VF_TRUST
int bnxt_set_vf_trust(struct net_device *dev, int vf_id, bool trust);
#endif
#ifdef HAVE_NDO_SET_VF_QUEUES
int bnxt_set_vf_queues(struct net_device *dev, int vf_id, int min_txq,
int max_txq, int min_rxq, int max_rxq);
#endif
#endif
int bnxt_sriov_configure(struct pci_dev *pdev, int num_vfs);
#ifndef PCIE_SRIOV_CONFIGURE
void bnxt_start_sriov(struct bnxt *, int);
void bnxt_sriov_init(unsigned int);
void bnxt_sriov_exit(void);
#endif
int bnxt_cfg_hw_sriov(struct bnxt *bp, int *num_vfs, bool reset);
void bnxt_sriov_disable(struct bnxt *bp);
void bnxt_hwrm_exec_fwd_req(struct bnxt *bp);
void bnxt_update_vf_mac(struct bnxt *bp);
int bnxt_approve_mac(struct bnxt *bp, const u8 *mac, bool strict);
void bnxt_update_vf_vnic(struct bnxt *bp, u32 vf_idx, u32 state);
void bnxt_commit_vf_vnic(struct bnxt *bp, u32 vf_idx);
bool bnxt_vf_vnic_state_is_up(struct bnxt *bp, u32 vf_idx);
bool bnxt_vf_cfg_change(struct bnxt *bp, u16 vf_id, u32 data1);
void bnxt_update_vf_cfg(struct bnxt *bp);
bool bnxt_is_trusted_vf(struct bnxt *bp, struct bnxt_vf_info *vf);
int bnxt_alloc_vf_stats_mem(struct bnxt *bp);
void bnxt_free_vf_stats_mem(struct bnxt *bp);
void bnxt_reset_vf_stats(struct bnxt *bp);
int bnxt_hwrm_tf_oem_cmd(struct bnxt *bp, u32 *in, u16 in_len, u32 *out, u16 out_len);
#endif

View File

@ -0,0 +1,266 @@
/* Broadcom NetXtreme-C/E network driver.
*
* Copyright (c) 2014, Mellanox Technologies inc. All rights reserved.
* Copyright (c) 2023 Broadcom Inc.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
*/
#include <linux/sysfs.h>
#include <linux/module.h>
#include "bnxt_hsi.h"
#include "bnxt_compat.h"
#include "bnxt.h"
#include "bnxt_sriov_sysfs.h"
struct vf_attributes {
struct attribute attr;
ssize_t (*show)(struct bnxt_vf_sysfs_obj *vf_so, struct vf_attributes *vfa,
char *buf);
ssize_t (*store)(struct bnxt_vf_sysfs_obj *vf_so, struct vf_attributes *vfa,
const char *buf, size_t count);
};
static ssize_t vf_attr_show(struct kobject *kobj,
struct attribute *attr, char *buf)
{
struct vf_attributes *ga =
container_of(attr, struct vf_attributes, attr);
struct bnxt_vf_sysfs_obj *g = container_of(kobj, struct bnxt_vf_sysfs_obj, kobj);
if (!ga->show)
return -EIO;
return ga->show(g, ga, buf);
}
static ssize_t vf_attr_store(struct kobject *kobj,
struct attribute *attr,
const char *buf, size_t size)
{
struct vf_attributes *ga =
container_of(attr, struct vf_attributes, attr);
struct bnxt_vf_sysfs_obj *g = container_of(kobj, struct bnxt_vf_sysfs_obj, kobj);
if (!ga->store)
return -EIO;
return ga->store(g, ga, buf, size);
}
#define _sprintf(p, buf, format, arg...) \
((PAGE_SIZE - (int)((p) - (buf))) <= 0 ? 0 : \
scnprintf((p), PAGE_SIZE - (int)((p) - (buf)), format, ## arg))
static ssize_t stats_show(struct bnxt_vf_sysfs_obj *g, struct vf_attributes *oa,
char *buf)
{
struct bnxt_stats_mem *stats = &g->stats;
struct bnxt *bp = g->parent_pf_bp;
struct ctx_hw_stats *hw_stats;
u64 rx_dropped, tx_dropped;
u64 rx_packets, rx_bytes;
u64 tx_packets, tx_bytes;
char *p = buf;
int rc;
memset(stats->hw_stats, 0, stats->len);
mutex_lock(&bp->sriov_lock);
rc = bnxt_hwrm_func_qstats(bp, stats,
cpu_to_le16(g->fw_fid), 0);
if (rc) {
mutex_unlock(&bp->sriov_lock);
return rc;
}
hw_stats = stats->hw_stats;
rx_packets = hw_stats->rx_ucast_pkts + hw_stats->rx_mcast_pkts + hw_stats->rx_bcast_pkts;
rx_bytes = hw_stats->rx_ucast_bytes + hw_stats->rx_mcast_bytes + hw_stats->rx_bcast_bytes;
tx_packets = hw_stats->tx_ucast_pkts + hw_stats->tx_mcast_pkts + hw_stats->tx_bcast_pkts;
tx_bytes = hw_stats->tx_ucast_bytes + hw_stats->tx_mcast_bytes + hw_stats->tx_bcast_bytes;
rx_dropped = hw_stats->rx_error_pkts;
tx_dropped = hw_stats->tx_error_pkts;
p += _sprintf(p, buf, "tx_packets : %llu\n", tx_packets);
p += _sprintf(p, buf, "tx_bytes : %llu\n", tx_bytes);
p += _sprintf(p, buf, "tx_dropped : %llu\n", tx_dropped);
p += _sprintf(p, buf, "rx_packets : %llu\n", rx_packets);
p += _sprintf(p, buf, "rx_bytes : %llu\n", rx_bytes);
p += _sprintf(p, buf, "rx_dropped : %llu\n", rx_dropped);
p += _sprintf(p, buf, "rx_multicast : %llu\n", hw_stats->rx_mcast_pkts);
p += _sprintf(p, buf, "rx_broadcast : %llu\n", hw_stats->rx_bcast_pkts);
p += _sprintf(p, buf, "tx_broadcast : %llu\n", hw_stats->tx_bcast_pkts);
p += _sprintf(p, buf, "tx_multicast : %llu\n", hw_stats->tx_mcast_pkts);
mutex_unlock(&bp->sriov_lock);
return (ssize_t)(p - buf);
}
#define VF_ATTR(_name) struct vf_attributes vf_attr_##_name = \
__ATTR(_name, 0644, _name##_show, NULL)
VF_ATTR(stats);
static struct attribute *vf_eth_attrs[] = {
&vf_attr_stats.attr,
NULL
};
#ifdef HAVE_KOBJ_DEFAULT_GROUPS
ATTRIBUTE_GROUPS(vf_eth);
#endif
static const struct sysfs_ops vf_sysfs_ops = {
.show = vf_attr_show,
.store = vf_attr_store,
};
static struct kobj_type vf_type_eth = {
.sysfs_ops = &vf_sysfs_ops,
#ifdef HAVE_KOBJ_DEFAULT_GROUPS
.default_groups = vf_eth_groups
#else
.default_attrs = vf_eth_attrs
#endif
};
int bnxt_sriov_sysfs_init(struct bnxt *bp)
{
struct device *dev = &bp->pdev->dev;
bp->sriov_sysfs_config = kobject_create_and_add("sriov", &dev->kobj);
if (!bp->sriov_sysfs_config)
return -ENOMEM;
return 0;
}
void bnxt_sriov_sysfs_exit(struct bnxt *bp)
{
kobject_put(bp->sriov_sysfs_config);
bp->sriov_sysfs_config = NULL;
}
int bnxt_create_vfs_sysfs(struct bnxt *bp)
{
struct bnxt_vf_sysfs_obj *vf_obj;
static struct kobj_type *sysfs;
struct bnxt_vf_info *vfs, *tmp;
struct bnxt_stats_mem *stats;
int err;
int vf;
sysfs = &vf_type_eth;
bp->vf_sysfs_objs = kcalloc(bp->pf.active_vfs, sizeof(struct bnxt_vf_sysfs_obj),
GFP_KERNEL);
if (!bp->vf_sysfs_objs)
return -ENOMEM;
mutex_lock(&bp->sriov_lock);
vfs = rcu_dereference_protected(bp->pf.vf,
lockdep_is_held(&bp->sriov_lock));
for (vf = 0; vf < bp->pf.active_vfs; vf++) {
tmp = &vfs[vf];
if (!tmp) {
netdev_warn(bp->dev, "create_vfs_syfs vfs[%d] is NULL\n", vf);
continue;
}
vf_obj = &bp->vf_sysfs_objs[vf];
vf_obj->parent_pf_bp = bp;
vf_obj->fw_fid = tmp->fw_fid;
stats = &vf_obj->stats;
stats->len = bp->hw_ring_stats_size;
stats->hw_stats = dma_alloc_coherent(&bp->pdev->dev, stats->len,
&stats->hw_stats_map, GFP_KERNEL);
if (!stats->hw_stats)
goto err_vf_obj;
err = kobject_init_and_add(&vf_obj->kobj, sysfs, bp->sriov_sysfs_config,
"%d", vf);
if (err)
goto err_vf_obj;
kobject_uevent(&vf_obj->kobj, KOBJ_ADD);
}
mutex_unlock(&bp->sriov_lock);
return 0;
err_vf_obj:
for (; vf >= 0; vf--) {
vf_obj = &bp->vf_sysfs_objs[vf];
stats = &vf_obj->stats;
if (stats->hw_stats)
dma_free_coherent(&bp->pdev->dev, stats->len, stats->hw_stats,
stats->hw_stats_map);
if (vf_obj->kobj.state_initialized)
kobject_put(&vf_obj->kobj);
}
kfree(bp->vf_sysfs_objs);
mutex_unlock(&bp->sriov_lock);
return -ENOMEM;
}
void bnxt_destroy_vfs_sysfs(struct bnxt *bp)
{
struct bnxt_vf_sysfs_obj *vf_obj;
struct bnxt_stats_mem *stats;
int vf;
mutex_lock(&bp->sriov_lock);
for (vf = 0; vf < bp->pf.active_vfs; vf++) {
vf_obj = &bp->vf_sysfs_objs[vf];
stats = &vf_obj->stats;
if (stats->hw_stats)
dma_free_coherent(&bp->pdev->dev, stats->len, stats->hw_stats,
stats->hw_stats_map);
kobject_put(&vf_obj->kobj);
}
kfree(bp->vf_sysfs_objs);
mutex_unlock(&bp->sriov_lock);
}

View File

@ -0,0 +1,20 @@
/* Broadcom NetXtreme-C/E network driver.
*
* Copyright (c) 2023 Broadcom Inc.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation.
*/
#ifndef BNXT_SRIOV_SYSFS_H
#define BNXT_SRIOV_SYSFS_H
#include "bnxt_hsi.h"
#include "bnxt.h"
int bnxt_sriov_sysfs_init(struct bnxt *bp);
void bnxt_sriov_sysfs_exit(struct bnxt *bp);
int bnxt_create_vfs_sysfs(struct bnxt *bp);
void bnxt_destroy_vfs_sysfs(struct bnxt *bp);
#endif

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,384 @@
/* Broadcom NetXtreme-C/E network driver.
*
* Copyright (c) 2017-2018 Broadcom Limited
* Copyright (c) 2018-2022 Broadcom Inc.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation.
*/
#ifndef BNXT_TC_H
#define BNXT_TC_H
#ifdef CONFIG_BNXT_FLOWER_OFFLOAD
#include <net/ip_tunnels.h>
/* Structs used for storing the filter/actions of the TC cmd.
*/
struct bnxt_tc_l2_key {
u16 src_fid;
u8 dmac[ETH_ALEN];
u8 smac[ETH_ALEN];
__be16 inner_vlan_tpid;
__be16 inner_vlan_tci;
__be16 ether_type;
u8 num_vlans;
u8 dir;
#define BNXT_DIR_RX 1
#define BNXT_DIR_TX 0
};
struct bnxt_tc_l3_key {
union {
struct {
struct in_addr daddr;
struct in_addr saddr;
} ipv4;
struct {
struct in6_addr daddr;
struct in6_addr saddr;
} ipv6;
};
};
struct bnxt_tc_l4_key {
u8 ip_proto;
union {
struct {
__be16 sport;
__be16 dport;
} ports;
struct {
u8 type;
u8 code;
} icmp;
};
};
struct bnxt_tc_tunnel_key {
struct bnxt_tc_l2_key l2;
struct bnxt_tc_l3_key l3;
struct bnxt_tc_l4_key l4;
__be32 id;
};
#define bnxt_eth_addr_key_mask_invalid(eth_addr, eth_addr_mask) \
((is_wildcard(&(eth_addr)[0], ETH_ALEN) && \
is_wildcard(&(eth_addr)[ETH_ALEN / 2], ETH_ALEN)) || \
(is_wildcard(&(eth_addr_mask)[0], ETH_ALEN) && \
is_wildcard(&(eth_addr_mask)[ETH_ALEN / 2], ETH_ALEN)))
struct bnxt_tc_actions {
u32 flags;
#define BNXT_TC_ACTION_FLAG_FWD BIT(0)
#define BNXT_TC_ACTION_FLAG_FWD_VXLAN BIT(1)
#define BNXT_TC_ACTION_FLAG_PUSH_VLAN BIT(3)
#define BNXT_TC_ACTION_FLAG_POP_VLAN BIT(4)
#define BNXT_TC_ACTION_FLAG_DROP BIT(5)
#define BNXT_TC_ACTION_FLAG_TUNNEL_ENCAP BIT(6)
#define BNXT_TC_ACTION_FLAG_TUNNEL_DECAP BIT(7)
#define BNXT_TC_ACTION_FLAG_L2_REWRITE BIT(8)
#define BNXT_TC_ACTION_FLAG_NAT_XLATE BIT(9)
#define BNXT_TC_ACTION_FLAG_TUNNEL_ENCAP_IPV4 BIT(10)
#define BNXT_TC_ACTION_FLAG_TUNNEL_ENCAP_IPV6 BIT(11)
u16 dst_fid;
struct net_device *dst_dev;
__be16 push_vlan_tpid;
__be16 push_vlan_tci;
/* tunnel encap */
struct ip_tunnel_key tun_encap_key;
#define PEDIT_OFFSET_SMAC_LAST_4_BYTES 0x8
__be16 l2_rewrite_dmac[3];
__be16 l2_rewrite_smac[3];
struct {
bool src_xlate; /* true => translate src,
* false => translate dst
* Mutually exclusive, i.e cannot set both
*/
bool l3_is_ipv4; /* false means L3 is ipv6 */
struct bnxt_tc_l3_key l3;
struct bnxt_tc_l4_key l4;
} nat;
};
struct bnxt_tc_flow {
u32 flags;
#define BNXT_TC_FLOW_FLAGS_ETH_ADDRS BIT(1)
#define BNXT_TC_FLOW_FLAGS_IPV4_ADDRS BIT(2)
#define BNXT_TC_FLOW_FLAGS_IPV6_ADDRS BIT(3)
#define BNXT_TC_FLOW_FLAGS_PORTS BIT(4)
#define BNXT_TC_FLOW_FLAGS_ICMP BIT(5)
#define BNXT_TC_FLOW_FLAGS_TUNL_ETH_ADDRS BIT(6)
#define BNXT_TC_FLOW_FLAGS_TUNL_IPV4_ADDRS BIT(7)
#define BNXT_TC_FLOW_FLAGS_TUNL_IPV6_ADDRS BIT(8)
#define BNXT_TC_FLOW_FLAGS_TUNL_PORTS BIT(9)
#define BNXT_TC_FLOW_FLAGS_TUNL_ID BIT(10)
#define BNXT_TC_FLOW_FLAGS_TUNNEL (BNXT_TC_FLOW_FLAGS_TUNL_ETH_ADDRS | \
BNXT_TC_FLOW_FLAGS_TUNL_IPV4_ADDRS | \
BNXT_TC_FLOW_FLAGS_TUNL_IPV6_ADDRS |\
BNXT_TC_FLOW_FLAGS_TUNL_PORTS |\
BNXT_TC_FLOW_FLAGS_TUNL_ID)
/* flow applicable to pkts ingressing on this fid */
u16 src_fid;
struct bnxt_tc_l2_key l2_key;
struct bnxt_tc_l2_key l2_mask;
struct bnxt_tc_l3_key l3_key;
struct bnxt_tc_l3_key l3_mask;
struct bnxt_tc_l4_key l4_key;
struct bnxt_tc_l4_key l4_mask;
struct ip_tunnel_key tun_key;
struct ip_tunnel_key tun_mask;
struct bnxt_tc_actions actions;
/* updated stats accounting for hw-counter wrap-around */
struct bnxt_tc_flow_stats stats;
/* previous snap-shot of stats */
struct bnxt_tc_flow_stats prev_stats;
unsigned long lastused; /* jiffies */
/* for calculating delta from prev_stats and
* updating prev_stats atomically.
*/
spinlock_t stats_lock;
};
enum bnxt_tc_tunnel_node_type {
BNXT_TC_TUNNEL_NODE_TYPE_NONE,
BNXT_TC_TUNNEL_NODE_TYPE_ENCAP,
BNXT_TC_TUNNEL_NODE_TYPE_DECAP
};
/*
* Tunnel encap/decap hash table
* This table is used to maintain a list of flows that use
* the same tunnel encap/decap params (ip_daddrs, vni, udp_dport)
* and the FW returned handle.
* A separate table is maintained for encap and decap
*/
struct bnxt_tc_tunnel_node {
struct ip_tunnel_key key;
struct rhash_head node;
enum bnxt_tc_tunnel_node_type tunnel_node_type;
/* tunnel l2 info */
struct bnxt_tc_l2_key l2_info;
#define INVALID_TUNNEL_HANDLE cpu_to_le32(0xffffffff)
/* tunnel handle returned by FW */
__le32 tunnel_handle;
u32 refcount;
/* For the shared encap list maintained in neigh node */
struct list_head encap_list_node;
/* A list of flows that share the encap tunnel node */
struct list_head common_encap_flows;
struct bnxt_tc_neigh_node *neigh_node;
struct rcu_head rcu;
};
/*
* L2 hash table
* The same data-struct is used for L2-flow table and L2-tunnel table.
* The L2 part of a flow or tunnel is stored in a hash table.
* A flow that shares the same L2 key/mask with an
* already existing flow/tunnel must refer to it's flow handle or
* decap_filter_id respectively.
*/
struct bnxt_tc_l2_node {
/* hash key: first 16b of key */
#define BNXT_TC_L2_KEY_LEN 18
struct bnxt_tc_l2_key key;
struct rhash_head node;
/* a linked list of flows that share the same l2 key */
struct list_head common_l2_flows;
/* number of flows/tunnels sharing the l2 key */
u16 refcount;
struct rcu_head rcu;
};
/* Track if the TC offload API is invoked on an ingress or egress device. */
enum {
BNXT_TC_DEV_INGRESS = 1,
BNXT_TC_DEV_EGRESS = 2
};
/* Use TC provided cookie along with the src_fid of the device on which
* the offload request is received . This is done to handle shared block
* filters for 2 VFs of the same PF, since they would come with the same
* cookie
*/
struct bnxt_tc_flow_node_key {
/* hash key: provided by TC */
unsigned long cookie;
u32 src_fid;
};
struct bnxt_tc_flow_node {
struct bnxt_tc_flow_node_key key;
struct rhash_head node;
struct bnxt_tc_flow flow;
__le64 ext_flow_handle;
__le16 flow_handle;
__le32 flow_id;
int tc_dev_dir;
/* L2 node in l2 hashtable that shares flow's l2 key */
struct bnxt_tc_l2_node *l2_node;
/* for the shared_flows list maintained in l2_node */
struct list_head l2_list_node;
/* tunnel encap related */
struct bnxt_tc_tunnel_node *encap_node;
/* tunnel decap related */
struct bnxt_tc_tunnel_node *decap_node;
/* L2 node in tunnel-l2 hashtable that shares flow's tunnel l2 key */
struct bnxt_tc_l2_node *decap_l2_node;
/* for the shared_flows list maintained in tunnel decap l2_node */
struct list_head decap_l2_list_node;
/* For the shared flows list maintained in tunnel encap node */
struct list_head encap_flow_list_node;
/* For the shared flows list which re-add failed when get neigh event */
struct list_head failed_add_flow_node;
struct rcu_head rcu;
};
struct bnxt_tc_neigh_key {
struct net_device *dev;
union {
struct in_addr v4;
struct in6_addr v6;
} dst_ip;
int family;
};
struct bnxt_tc_neigh_node {
struct bnxt_tc_neigh_key key;
struct rhash_head node;
/* An encap tunnel list which use the same neigh node */
struct list_head common_encap_list;
u32 refcount;
u8 dmac[ETH_ALEN];
struct rcu_head rcu;
};
struct bnxt_tf_flow_node {
struct bnxt_tc_flow_node_key key;
struct rhash_head node;
u32 flow_id;
#ifdef HAVE_TC_CB_EGDEV
int tc_dev_dir;
#endif
u16 ulp_src_fid;
bool dscp_remap;
/* The below fields are used if the there is a tunnel encap
* action associated with the flow. These members are used to
* manage neighbour update events on the tunnel neighbour.
*/
struct bnxt_tc_tunnel_node *encap_node;
/* For the shared flows list maintained in tunnel encap node */
struct list_head encap_flow_list_node;
/* For the shared flows list when re-add fails during neigh event */
struct list_head failed_add_flow_node;
void *mparms;
struct rcu_head rcu;
};
#ifdef HAVE_TC_CB_EGDEV
int bnxt_tc_setup_flower(struct bnxt *bp, u16 src_fid,
struct flow_cls_offload *cls_flower,
int tc_dev_dir);
#else
int bnxt_tc_setup_flower(struct bnxt *bp, u16 src_fid,
struct flow_cls_offload *cls_flower);
#endif
int bnxt_init_tc(struct bnxt *bp);
void bnxt_shutdown_tc(struct bnxt *bp);
void bnxt_tc_flow_stats_work(struct bnxt *bp);
void bnxt_tc_flush_flows(struct bnxt *bp);
#if defined(HAVE_TC_MATCHALL_FLOW_RULE) && defined(HAVE_FLOW_ACTION_POLICE)
int bnxt_tc_setup_matchall(struct bnxt *bp, u16 src_fid,
struct tc_cls_matchall_offload *cls_matchall);
#endif
void bnxt_tc_update_neigh_work(struct work_struct *work);
u16 bnxt_flow_get_dst_fid(struct bnxt *pf_bp, struct net_device *dev);
int bnxt_tc_resolve_ipv4_tunnel_hdrs(struct bnxt *bp,
struct bnxt_tc_flow_node *flow_node,
struct ip_tunnel_key *tun_key,
struct bnxt_tc_l2_key *l2_info,
struct bnxt_tc_neigh_key *neigh_key);
int bnxt_tc_resolve_ipv6_tunnel_hdrs(struct bnxt *bp,
struct bnxt_tc_flow_node *flow_node,
struct ip_tunnel_key *tun_key,
struct bnxt_tc_l2_key *l2_info,
struct bnxt_tc_neigh_key *neigh_key);
static inline bool bnxt_tc_flower_enabled(struct bnxt *bp)
{
return bp->tc_info && bp->tc_info->enabled;
}
static inline void bnxt_disable_tc_flower(struct bnxt *bp)
{
mutex_lock(&bp->tc_info->lock);
bp->tc_info->enabled = false;
mutex_unlock(&bp->tc_info->lock);
}
static inline void bnxt_enable_tc_flower(struct bnxt *bp)
{
mutex_lock(&bp->tc_info->lock);
bp->tc_info->enabled = true;
mutex_unlock(&bp->tc_info->lock);
}
#else /* CONFIG_BNXT_FLOWER_OFFLOAD */
static inline int bnxt_init_tc(struct bnxt *bp)
{
return 0;
}
static inline void bnxt_shutdown_tc(struct bnxt *bp)
{
}
static inline void bnxt_tc_flow_stats_work(struct bnxt *bp)
{
}
static inline void bnxt_tc_flush_flows(struct bnxt *bp)
{
}
static inline bool bnxt_tc_flower_enabled(struct bnxt *bp)
{
return false;
}
static inline void bnxt_disable_tc_flower(struct bnxt *bp)
{
}
static inline void bnxt_enable_tc_flower(struct bnxt *bp)
{
}
#endif /* CONFIG_BNXT_FLOWER_OFFLOAD */
#endif /* BNXT_TC_H */

View File

@ -0,0 +1,310 @@
/* Broadcom NetXtreme-C/E network driver.
*
* Copyright (c) 2020-2023 Broadcom Inc.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation.
*/
#include "bnxt.h"
#ifdef CONFIG_BNXT_FLOWER_OFFLOAD
#ifdef HAVE_FLOW_OFFLOAD_H
#ifdef HAVE_FLOW_STATS_UPDATE
#if !defined(HAVE_FLOW_STATS_DROPS) && defined(HAVE_FLOW_ACTION_BASIC_HW_STATS_CHECK)
#define flow_stats_update(flow_stats, bytes, pkts, drops, last_used, used_hw_stats) \
flow_stats_update(flow_stats, bytes, pkts, last_used, used_hw_stats)
#elif !defined(HAVE_FLOW_ACTION_BASIC_HW_STATS_CHECK)
#define flow_stats_update(flow_stats, bytes, pkts, drops, last_used, used_hw_stats) \
flow_stats_update(flow_stats, bytes, pkts, last_used)
#endif
#endif
#ifndef HAVE_FLOW_ACTION_BASIC_HW_STATS_CHECK
static inline bool
flow_action_basic_hw_stats_check(const struct flow_action *action,
struct netlink_ext_ack *extack)
{
return true;
}
#endif /* HAVE_FLOW_ACTION_BASIC_HW_STATS_CHECK */
#ifndef HAVE_FLOW_INDR_BLOCK_CLEANUP
#ifdef HAVE_FLOW_INDR_BLOCK_CB_QDISC
#define bnxt_tc_setup_indr_block(netdev, sch, bp, f, data, cleanup) \
bnxt_tc_setup_indr_block(netdev, bp, f)
#else
#define bnxt_tc_setup_indr_block(netdev, bp, f, data, cleanup) \
bnxt_tc_setup_indr_block(netdev, bp, f)
#endif
#ifdef HAVE_FLOW_INDR_BLOCK_CB_QDISC
#define flow_indr_block_cb_alloc(cb, cb_ident, cb_priv, bnxt_tc_setup_indr_rel, \
f, netdev, sch, data, bp, cleanup) \
flow_block_cb_alloc(cb, cb_ident, cb_priv, bnxt_tc_setup_indr_rel)
#else
#define flow_indr_block_cb_alloc(cb, cb_ident, cb_priv, bnxt_tc_setup_indr_rel, \
f, netdev, data, bp, cleanup) \
flow_block_cb_alloc(cb, cb_ident, cb_priv, bnxt_tc_setup_indr_rel)
#endif
#define flow_indr_block_cb_remove(block_cb, f) \
flow_block_cb_remove(block_cb, f)
#ifdef HAVE_FLOW_INDR_BLOCK_CB_QDISC
#define bnxt_tc_setup_indr_cb(netdev, sch, cb_priv, type, type_data, data, cleanup) \
bnxt_tc_setup_indr_cb(netdev, cb_priv, type, type_data)
#else
#define bnxt_tc_setup_indr_cb(netdev, cb_priv, type, type_data, data, cleanup) \
bnxt_tc_setup_indr_cb(netdev, cb_priv, type, type_data)
#endif
#endif /* HAVE_FLOW_INDR_BLOCK_CLEANUP */
#endif /* HAVE_FLOW_OFFLOAD_H */
#if defined(CONFIG_BNXT_FLOWER_OFFLOAD) && defined(HAVE_FLOW_INDR_BLOCK_CB)
#if !defined(HAVE_FLOW_INDR_DEV_RGTR)
int bnxt_tc_indr_block_event(struct notifier_block *nb, unsigned long event,
void *ptr);
static inline int flow_indr_dev_register(flow_indr_block_bind_cb_t *cb,
void *cb_priv)
{
struct bnxt *bp = cb_priv;
bp->tc_netdev_nb.notifier_call = bnxt_tc_indr_block_event;
return register_netdevice_notifier(&bp->tc_netdev_nb);
}
static inline void flow_indr_dev_unregister(flow_indr_block_bind_cb_t *cb,
void *cb_priv,
void (*release)(void *cb_priv))
{
struct bnxt *bp = cb_priv;
unregister_netdevice_notifier(&bp->tc_netdev_nb);
}
#endif /* !HAVE_FLOW_INDR_DEV_RGTR */
#ifdef HAVE_OLD_FLOW_INDR_DEV_UNRGTR
#define flow_indr_dev_unregister(cb, bp, rel) \
flow_indr_dev_unregister(cb, bp, bnxt_tc_setup_indr_block_cb)
#endif /* HAVE_OLD_FLOW_INDR_BLOCK_CB_UNRGTR */
#endif /* CONFIG_BNXT_FLOWER_OFFLOAD && HAVE_FLOW_INDR_BLOCK_CB */
#ifndef HAVE_FLOW_OFFLOAD_H
struct flow_match_basic {
struct flow_dissector_key_basic *key, *mask;
};
struct flow_match_control {
struct flow_dissector_key_control *key, *mask;
};
struct flow_match_eth_addrs {
struct flow_dissector_key_eth_addrs *key, *mask;
};
struct flow_match_vlan {
struct flow_dissector_key_vlan *key, *mask;
};
struct flow_match_ipv4_addrs {
struct flow_dissector_key_ipv4_addrs *key, *mask;
};
struct flow_match_ipv6_addrs {
struct flow_dissector_key_ipv6_addrs *key, *mask;
};
struct flow_match_ip {
struct flow_dissector_key_ip *key, *mask;
};
struct flow_match_ports {
struct flow_dissector_key_ports *key, *mask;
};
struct flow_match_icmp {
struct flow_dissector_key_icmp *key, *mask;
};
struct flow_match_tcp {
struct flow_dissector_key_tcp *key, *mask;
};
struct flow_match_enc_keyid {
struct flow_dissector_key_keyid *key, *mask;
};
struct flow_match {
struct flow_dissector *dissector;
void *mask;
void *key;
};
struct flow_rule {
struct flow_match match;
};
#define FLOW_DISSECTOR_MATCH(__rule, __type, __out) \
const struct flow_match *__m = &(__rule)->match; \
struct flow_dissector *__d = (__m)->dissector; \
\
(__out)->key = skb_flow_dissector_target(__d, __type, (__m)->key); \
(__out)->mask = skb_flow_dissector_target(__d, __type, (__m)->mask) \
static inline bool flow_rule_match_key(const struct flow_rule *rule,
enum flow_dissector_key_id key)
{
return dissector_uses_key(rule->match.dissector, key);
}
static inline void flow_rule_match_basic(const struct flow_rule *rule,
struct flow_match_basic *out)
{
FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_BASIC, out);
}
static inline void flow_rule_match_control(const struct flow_rule *rule,
struct flow_match_control *out)
{
FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_CONTROL, out);
}
static inline void flow_rule_match_eth_addrs(const struct flow_rule *rule,
struct flow_match_eth_addrs *out)
{
FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS, out);
}
static inline void flow_rule_match_vlan(const struct flow_rule *rule,
struct flow_match_vlan *out)
{
FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_VLAN, out);
}
static inline void flow_rule_match_ipv4_addrs(const struct flow_rule *rule,
struct flow_match_ipv4_addrs *out)
{
FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_IPV4_ADDRS, out);
}
static inline void flow_rule_match_ipv6_addrs(const struct flow_rule *rule,
struct flow_match_ipv6_addrs *out)
{
FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_IPV6_ADDRS, out);
}
static inline void flow_rule_match_ip(const struct flow_rule *rule,
struct flow_match_ip *out)
{
FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_IP, out);
}
static inline void flow_rule_match_tcp(const struct flow_rule *rule,
struct flow_match_tcp *out)
{
FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_TCP, out);
}
static inline void flow_rule_match_ports(const struct flow_rule *rule,
struct flow_match_ports *out)
{
FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_PORTS, out);
}
static inline void flow_rule_match_icmp(const struct flow_rule *rule,
struct flow_match_icmp *out)
{
FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_ICMP, out);
}
static inline void flow_rule_match_enc_control(const struct flow_rule *rule,
struct flow_match_control *out)
{
FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_ENC_CONTROL, out);
}
static inline void
flow_rule_match_enc_ipv4_addrs(const struct flow_rule *rule,
struct flow_match_ipv4_addrs *out)
{
FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS, out);
}
static inline void flow_rule_match_enc_ipv6_addrs(const struct flow_rule *rule,
struct flow_match_ipv6_addrs *out)
{
FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS, out);
}
static inline void flow_rule_match_enc_ip(const struct flow_rule *rule,
struct flow_match_ip *out)
{
FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_ENC_IP, out);
}
static inline void flow_rule_match_enc_ports(const struct flow_rule *rule,
struct flow_match_ports *out)
{
FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_ENC_PORTS, out);
}
static inline void flow_rule_match_enc_keyid(const struct flow_rule *rule,
struct flow_match_enc_keyid *out)
{
FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_ENC_KEYID, out);
}
#ifdef flow_cls_offload_flow_rule
#undef flow_cls_offload_flow_rule
#endif
#define flow_cls_offload_flow_rule(cmd) \
(&(struct flow_rule) { \
.match = { \
.dissector = (cmd)->dissector, \
.mask = (cmd)->mask, \
.key = (cmd)->key, \
} \
})
enum flow_action_id {
FLOW_ACTION_ACCEPT = 0,
FLOW_ACTION_DROP,
FLOW_ACTION_TRAP,
FLOW_ACTION_GOTO,
FLOW_ACTION_REDIRECT,
FLOW_ACTION_MIRRED,
FLOW_ACTION_REDIRECT_INGRESS,
FLOW_ACTION_MIRRED_INGRESS,
FLOW_ACTION_VLAN_PUSH,
FLOW_ACTION_VLAN_POP,
FLOW_ACTION_VLAN_MANGLE,
FLOW_ACTION_TUNNEL_ENCAP,
FLOW_ACTION_TUNNEL_DECAP,
FLOW_ACTION_MANGLE,
FLOW_ACTION_ADD,
FLOW_ACTION_CSUM,
FLOW_ACTION_MARK,
FLOW_ACTION_PTYPE,
FLOW_ACTION_PRIORITY,
FLOW_ACTION_WAKE,
FLOW_ACTION_QUEUE,
FLOW_ACTION_SAMPLE,
FLOW_ACTION_POLICE,
FLOW_ACTION_CT,
FLOW_ACTION_CT_METADATA,
FLOW_ACTION_MPLS_PUSH,
FLOW_ACTION_MPLS_POP,
FLOW_ACTION_MPLS_MANGLE,
FLOW_ACTION_GATE,
FLOW_ACTION_PPPOE_PUSH,
FLOW_ACTION_INVALID = NUM_FLOW_ACTIONS
};
#endif /* !HAVE_FLOW_OFFLOAD_H */
#endif /* CONFIG_BNXT_FLOWER_OFFLOAD */

View File

@ -0,0 +1,267 @@
// SPDX-License-Identifier: BSD-3-Clause
/* Copyright (c) 2022-2023 Broadcom Inc.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation.
*/
#include <linux/stddef.h>
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/list.h>
#include <linux/hashtable.h>
#include <linux/ip.h>
#include <linux/ipv6.h>
#include <linux/tcp.h>
#include <net/inet_hashtables.h>
#include <net/inet6_hashtables.h>
#include "bnxt_compat.h"
#include "bnxt_hsi.h"
#include "bnxt.h"
#include "bnxt_hwrm.h"
#include "bnxt_mpc.h"
#include "bnxt_tfc.h"
#define BNXT_MPC_RX_US_SLEEP 10000
#define BNXT_MPC_RX_RETRY 10
#define BNXT_MPC_TIMEOUT (BNXT_MPC_RX_US_SLEEP * BNXT_MPC_RX_RETRY)
#define BNXT_TFC_MPC_TX_RETRIES 150
#define BNXT_TFC_MPC_TX_RETRY_DELAY_MIN_US 500
#define BNXT_TFC_MPC_TX_RETRY_DELAY_MAX_US 1000
#define BNXT_TFC_DISP_BUF_SIZE 128
#define BNXT_TFC_PR_W_1BYTES 1
#define BNXT_TFC_PR_W_2BYTES 2
#define BNXT_TFC_PR_W_4BYTES 4
/*
* bnxt_tfc_buf_dump: Pretty-prints a buffer using the following options
*
* Parameters:
* hdr - A header that is printed as-is
* msg - This is a pointer to the uint8_t buffer to be dumped
* prtwidth - The width of the items to be printed in bytes,
* allowed options 1, 2, 4
* Defaults to 1 if either:
* 1) any other value
* 2) if buffer length is not a multiple of width
* linewidth - The length of the lines printed (in items)
*/
void bnxt_tfc_buf_dump(struct bnxt *bp, char *hdr,
uint8_t *msg, int msglen,
int prtwidth, int linewidth)
{
char msg_line[BNXT_TFC_DISP_BUF_SIZE];
int msg_i = 0, i;
uint16_t *sw_msg = (uint16_t *)msg;
uint32_t *lw_msg = (uint32_t *)msg;
if (hdr)
netdev_dbg(bp->dev, "%s", hdr);
if (msglen % prtwidth) {
netdev_dbg(bp->dev, "msglen[%u] not aligned on width[%u]\n",
msglen, prtwidth);
prtwidth = 1;
linewidth = 16;
}
for (i = 0; i < msglen / prtwidth; i++) {
if ((i % linewidth == 0) && i)
netdev_dbg(bp->dev, "%s\n", msg_line);
if (i % linewidth == 0) {
msg_i = 0;
msg_i += snprintf(&msg_line[msg_i], (sizeof(msg_line) - msg_i),
"%04x: ", i * prtwidth);
}
switch (prtwidth) {
case BNXT_TFC_PR_W_2BYTES:
msg_i += snprintf(&msg_line[msg_i], (sizeof(msg_line) - msg_i),
"%04x ", sw_msg[i]);
break;
case BNXT_TFC_PR_W_4BYTES:
msg_i += snprintf(&msg_line[msg_i], (sizeof(msg_line) - msg_i),
"%08x ", lw_msg[i]);
break;
case BNXT_TFC_PR_W_1BYTES:
default:
msg_i += snprintf(&msg_line[msg_i], (sizeof(msg_line) - msg_i),
"%02x ", msg[i]);
break;
}
}
netdev_dbg(bp->dev, "%s\n", msg_line);
}
void bnxt_free_tfc_mpc_info(struct bnxt *bp)
{
struct bnxt_tfc_mpc_info *tfc_info;
if (!bp)
return;
tfc_info = bp->tfc_info;
if (tfc_info && tfc_info->mpc_cache) {
kmem_cache_destroy(tfc_info->mpc_cache);
tfc_info->mpc_cache = NULL;
}
kfree(bp->tfc_info);
bp->tfc_info = NULL;
}
int bnxt_alloc_tfc_mpc_info(struct bnxt *bp)
{
struct bnxt_tfc_mpc_info *tfc_info =
(struct bnxt_tfc_mpc_info *)(bp->tfc_info);
if (!tfc_info) {
tfc_info = kzalloc(sizeof(*tfc_info), GFP_KERNEL);
if (!tfc_info)
return -ENOMEM;
bp->tfc_info = (void *)tfc_info;
}
tfc_info->mpc_cache = kmem_cache_create("bnxt_tfc",
sizeof(struct bnxt_tfc_cmd_ctx),
0, 0, NULL);
if (!tfc_info->mpc_cache) {
bnxt_free_tfc_mpc_info(bp);
return -ENOMEM;
}
return 0;
}
int bnxt_mpc_send(struct bnxt *bp,
struct bnxt_mpc_mbuf *in_msg,
struct bnxt_mpc_mbuf *out_msg,
uint32_t *opaque)
{
struct bnxt_tfc_mpc_info *tfc = (struct bnxt_tfc_mpc_info *)bp->tfc_info;
struct bnxt_mpc_info *mpc = bp->mpc_info;
struct bnxt_tfc_cmd_ctx *ctx = NULL;
unsigned long tmo_left, handle = 0;
struct bnxt_tx_ring_info *txr;
uint tmo = BNXT_MPC_TIMEOUT;
int retry = 0;
int rc = 0;
if (!mpc || !tfc) {
netdev_dbg(bp->dev, "%s: mpc[%p], tfc[%p]\n", __func__, mpc, tfc);
return -1;
}
if (out_msg->cmp_type != MPC_CMP_TYPE_MID_PATH_SHORT &&
out_msg->cmp_type != MPC_CMP_TYPE_MID_PATH_LONG)
return -1;
do {
atomic_inc(&tfc->pending);
/* Make sure bnxt_close_nic() sees pending before we check the
* BNXT_STATE_OPEN flag.
*/
smp_mb__after_atomic();
if (test_bit(BNXT_STATE_OPEN, &bp->state))
break;
atomic_dec(&tfc->pending);
usleep_range(BNXT_TFC_MPC_TX_RETRY_DELAY_MIN_US,
BNXT_TFC_MPC_TX_RETRY_DELAY_MAX_US);
retry++;
} while (retry < BNXT_TFC_MPC_TX_RETRIES);
if (retry >= BNXT_TFC_MPC_TX_RETRIES) {
netdev_err(bp->dev, "%s: TF MPC send failed after max retries\n",
__func__);
return -EAGAIN;
}
if (in_msg->chnl_id == RING_ALLOC_REQ_MPC_CHNLS_TYPE_TE_CFA)
txr = &mpc->mpc_rings[BNXT_MPC_TE_CFA_TYPE][0];
else
txr = &mpc->mpc_rings[BNXT_MPC_RE_CFA_TYPE][0];
if (!txr) {
netdev_err(bp->dev, "%s: No Tx rings\n", __func__);
rc = -EINVAL;
goto xmit_done;
}
if (tmo) {
ctx = kmem_cache_alloc(tfc->mpc_cache, GFP_KERNEL);
if (!ctx) {
rc = -ENOMEM;
goto xmit_done;
}
init_completion(&ctx->cmp);
handle = (unsigned long)ctx;
ctx->tfc_cmp.opaque = *opaque;
might_sleep();
}
spin_lock(&txr->tx_lock);
rc = bnxt_start_xmit_mpc(bp, txr, in_msg->msg_data,
in_msg->msg_size, handle);
spin_unlock(&txr->tx_lock);
if (rc || !tmo)
goto xmit_done;
tmo_left = wait_for_completion_timeout(&ctx->cmp, msecs_to_jiffies(tmo));
if (!tmo_left) {
ctx->tfc_cmp.opaque = BNXT_INV_TMPC_OPAQUE;
netdev_warn(bp->dev, "TFC MP cmd %08x timed out\n",
*((u32 *)in_msg->msg_data));
rc = -ETIMEDOUT;
goto xmit_done;
}
if (TFC_CMPL_STATUS(&ctx->tfc_cmp) == TFC_CMPL_STATUS_OK) {
/* Copy response/completion back into out_msg */
memcpy(out_msg->msg_data, &ctx->tfc_cmp, sizeof(ctx->tfc_cmp));
rc = 0;
} else {
netdev_err(bp->dev, "MPC status code [%lu]\n",
TFC_CMPL_STATUS(&ctx->tfc_cmp) >> TFC_CMPL_STATUS_SFT);
rc = -EIO;
}
xmit_done:
if (ctx)
kmem_cache_free(tfc->mpc_cache, ctx);
atomic_dec(&tfc->pending);
return rc;
}
void bnxt_tfc_mpc_cmp(struct bnxt *bp, u32 client, unsigned long handle,
struct bnxt_cmpl_entry cmpl[], u32 entries)
{
struct bnxt_tfc_cmd_ctx *ctx;
struct tfc_cmpl *cmp;
u32 len;
cmp = cmpl[0].cmpl;
if (!handle || entries < 1 || entries > 2) {
if (entries < 1 || entries > 2) {
netdev_warn(bp->dev, "Invalid entries %d with handle %lx cmpl %08x in %s()\n",
entries, handle, *(u32 *)cmp, __func__);
}
return;
}
ctx = (void *)handle;
if (entries > 1) {
memcpy(&ctx->tfc_cmp, cmpl[0].cmpl, cmpl[0].len);
memcpy(&ctx->tfc_cmp.l_cmpl[0], cmpl[1].cmpl, cmpl[1].len);
} else {
len = min_t(u32, cmpl[0].len, sizeof(ctx->tfc_cmp));
memcpy(&ctx->tfc_cmp, cmpl[0].cmpl, len);
}
complete(&ctx->cmp);
}

View File

@ -0,0 +1,150 @@
/* SPDX-License-Identifier: BSD-3-Clause
*
* Copyright (c) 2022-2023 Broadcom Inc.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation.
*/
#ifndef BNXT_TFC_H
#define BNXT_TFC_H
#include <linux/hashtable.h>
#include "bnxt_mpc.h"
struct bnxt_tfc_mpc_info {
struct kmem_cache *mpc_cache;
atomic_t pending;
};
struct tfc_cmpl {
__le16 client_status_type;
#define TFC_CMPL_TYPE_MASK 0x3fUL
#define TFC_CMPL_TYPE_SFT 0
#define TFC_CMPL_TYPE_MID_PATH_SHORT 0x1eUL
#define TFC_CMPL_TYPE_MID_PATH_LONG 0x1fUL
#define TFC_CMPL_STATUS_MASK 0xf00UL
#define TFC_CMPL_STATUS_SFT 8
#define TFC_CMPL_STATUS_OK 0x0UL
#define TFC_CMPL_STATUS_UNSPRT_ERR 0x1UL
#define TFC_CMPL_STATUS_FMT_ERR 0x2UL
#define TFC_CMPL_STATUS_SCOPE_ERR 0x3UL
#define TFC_CMPL_STATUS_ADDR_ERR 0x4UL
#define TFC_CMPL_STATUS_CACHE_ERR 0x5UL
#define TFC_CMPL_MP_CLIENT_MASK 0xf000UL
#define TFC_CMPL_MP_CLIENT_SFT 12
#define TFC_CMPL_MP_CLIENT_TE_CFA 0x2UL
#define TFC_CMPL_MP_CLIENT_RE_CFA 0x3UL
__le16 opc_dmalen;
#define TFC_CMPL_OPC_MASK 0xffUL
#define TFC_CMPL_OPC_SFT 0
#define TFC_CMPL_OPC_TBL_READ 0
#define TFC_CMPL_OPC_TBL_WRITE 1
#define TFC_CMPL_OPC_TBL_READ_CLR 2
#define TFC_CMPL_OPC_TBL_INVALIDATE 5
#define TFC_CMPL_OPC_TBL_EVENT_COLLECTION 6
#define TFC_CMPL_OPC_TBL_EM_SEARCH 8
#define TFC_CMPL_OPC_TBL_EM_INSERT 9
#define TFC_CMPL_OPC_TBL_EM_DELETE 10
#define TFC_CMPL_OPC_TBL_EM_CHAIN 11
u32 opaque;
__le32 v_hmsb_tbl_type_scope;
#define TFC_CMPL_V 0x1UL
#define TFC_CMPL_V_MASK 0x1UL
#define TFC_CMPL_V_SFT 0
#define TFC_CMPL_HASH_MSB_MASK 0xfffUL
#define TFC_CMPL_HASH_MSB_SFT 12
#define TFC_CMPL_TBL_TYPE_MASK 0xf000UL
#define TFC_CMPL_TBL_TYPE_SFT 12
#define TFC_CMPL_TBL_TYPE_ACTION 0
#define TFC_CMPL_TBL_TYPE_EM 1
#define TFC_CMPL_TBL_SCOPE_MASK 0x1f000000UL
#define TFC_CMPL_TBL_SCOPE_SFT 24
__le32 v_tbl_index;
#define TFC_CMPL_TBL_IDX_MASK 0x3ffffffUL
#define TFC_CMPL_TBL_IDX_SFT 0
__le32 l_cmpl[4];
};
/*
* Use a combination of opcode, table_type, table_scope and table_index to
* generate a unique opaque field, which can be used to verify the completion
* later.
*
* cccc_ssss_siii_iiii_iiii_iiii_iiii_iiii
* opaque[31:28] (c) opcode
* opaque[27:23] (s) tbl scope
* opaque[22:00] (i) tbl index
*
* 0x1080000a
* 0x01000001
* 0x1000000a
*/
#define TFC_CMPL_OPC_NIB_MASK 0xfUL
#define TFC_CMPL_OPQ_OPC_SFT 28
#define TFC_CMPL_TBL_23B_IDX_MASK 0x7fffffUL
#define TFC_CMPL_TBL_SCOPE_OPQ_SFT 1
#define TFC_CMD_TBL_SCOPE_OPQ_SFT 23
/* Used to generate opaque field for command send */
#define BNXT_TFC_CMD_OPQ(opc, ts, ti) \
((((opc) & TFC_CMPL_OPC_NIB_MASK) << TFC_CMPL_OPQ_OPC_SFT) | \
((ts) << TFC_CMD_TBL_SCOPE_OPQ_SFT) | \
((ti) & TFC_CMPL_TBL_23B_IDX_MASK))
/* Used to generate opaque field for completion verification */
#define BNXT_TFC_CMPL_OPAQUE(tfc_cmpl) \
((((u32)le16_to_cpu((tfc_cmpl)->opc_dmalen) & TFC_CMPL_OPC_NIB_MASK) << \
TFC_CMPL_OPQ_OPC_SFT) | \
((le32_to_cpu((tfc_cmpl)->v_hmsb_tbl_type_scope) & TFC_CMPL_TBL_SCOPE_MASK) >> \
TFC_CMPL_TBL_SCOPE_OPQ_SFT) |\
(le32_to_cpu((tfc_cmpl)->v_tbl_index) & TFC_CMPL_TBL_23B_IDX_MASK))
#define BNXT_INV_TMPC_OPAQUE 0xffffffff
#define TFC_CMPL_STATUS(tfc_cmpl) \
(le16_to_cpu((tfc_cmpl)->client_status_type) & \
TFC_CMPL_STATUS_MASK)
struct bnxt_tfc_cmd_ctx {
struct completion cmp;
struct tfc_cmpl tfc_cmp;
};
struct bnxt_mpc_mbuf {
uint32_t chnl_id;
uint8_t cmp_type;
uint8_t *msg_data;
/* MPC msg size in bytes, must be multiple of 16Bytes */
uint16_t msg_size;
};
static inline bool bnxt_tfc_busy(struct bnxt *bp)
{
struct bnxt_tfc_mpc_info *tfc_info = bp->tfc_info;
return tfc_info && atomic_read(&tfc_info->pending) > 0;
}
void bnxt_tfc_buf_dump(struct bnxt *bp, char *hdr,
uint8_t *msg, int msglen,
int prtwidth, int linewidth);
void bnxt_free_tfc_mpc_info(struct bnxt *bp);
int bnxt_alloc_tfc_mpc_info(struct bnxt *bp);
int bnxt_mpc_send(struct bnxt *bp,
struct bnxt_mpc_mbuf *in_msg,
struct bnxt_mpc_mbuf *out_msg,
uint32_t *opaque);
void bnxt_tfc_mpc_cmp(struct bnxt *bp, u32 client, unsigned long handle,
struct bnxt_cmpl_entry cmpl[], u32 entries);
#endif

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,84 @@
/* Broadcom NetXtreme-C/E network driver.
*
* Copyright (c) 2023 Broadcom Inc.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation.
*/
#ifndef BNXT_UDCC_H
#define BNXT_UDCC_H
#define BNXT_UDCC_MAX_SESSIONS 2048
#define BNXT_UDCC_HASH_SIZE 64
#define BNXT_UDCC_SESSION_CREATE 0
#define BNXT_UDCC_SESSION_DELETE 1
#define BNXT_UDCC_SESSION_UPDATE 2
#define BNXT_UDCC_SESSION_PER_QP(bp) ((bp)->udcc_info->session_type & \
UDCC_QCAPS_RESP_SESSION_TYPE_PER_QP)
struct bnxt_udcc_session_entry {
u32 session_id;
u32 rx_flow_id;
u32 tx_flow_id;
u64 rx_counter_hndl;
u64 tx_counter_hndl;
u8 dest_mac[ETH_ALEN];
u8 src_mac[ETH_ALEN];
u8 dst_mac_mod[ETH_ALEN];
u8 src_mac_mod[ETH_ALEN];
struct in6_addr dst_ip;
struct in6_addr src_ip;
u32 src_qp_num;
u32 dest_qp_num;
struct dentry *debugfs_dir;
struct bnxt *bp;
u8 state;
bool v4_dst;
bool skip_subnet_checking;
};
struct bnxt_udcc_work {
struct work_struct work;
struct bnxt *bp;
u32 session_id;
u8 session_opcode;
bool session_suspend;
};
struct bnxt_udcc_info {
u32 max_sessions;
struct bnxt_udcc_session_entry *session_db[BNXT_UDCC_MAX_SESSIONS];
struct mutex session_db_lock; /* protect session_db */
u32 session_count;
u8 session_type;
struct dentry *udcc_debugfs_dir;
u16 max_comp_cfg_xfer;
u16 max_comp_data_xfer;
unsigned long tf_events;
#define BNXT_UDCC_INFO_TF_EVENT_SUSPEND BIT(0)
#define BNXT_UDCC_INFO_TF_EVENT_UNSUSPEND BIT(1)
/* mode is 0 if udcc is disabled */
u8 mode;
};
static inline u8 bnxt_udcc_get_mode(struct bnxt *bp)
{
return bp->udcc_info ? bp->udcc_info->mode : 0;
}
int bnxt_alloc_udcc_info(struct bnxt *bp);
void bnxt_free_udcc_info(struct bnxt *bp);
void bnxt_udcc_session_db_cleanup(struct bnxt *bp);
void bnxt_udcc_task(struct work_struct *work);
int bnxt_hwrm_udcc_session_query(struct bnxt *bp, u32 session_id,
struct hwrm_udcc_session_query_output *resp_out);
int bnxt_queue_udcc_work(struct bnxt *bp, u32 session_id, u32 session_opcode,
bool suspend);
void bnxt_udcc_update_session(struct bnxt *bp, bool suspend);
void bnxt_udcc_session_debugfs_add(struct bnxt *bp);
void bnxt_udcc_session_debugfs_cleanup(struct bnxt *bp);
#endif

View File

@ -0,0 +1,646 @@
/* Broadcom NetXtreme-C/E network driver.
*
* Copyright (c) 2016-2018 Broadcom Limited
* Copyright (c) 2018-2023 Broadcom Inc.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation.
*/
#include "bnxt_compat.h"
#include "bnxt_hsi.h"
#include "bnxt.h"
#include "bnxt_hwrm.h"
#include "bnxt_ulp.h"
#include "bnxt_log.h"
#include "bnxt_log_data.h"
static DEFINE_IDA(bnxt_aux_dev_ids);
static void bnxt_fill_msix_vecs(struct bnxt *bp, struct bnxt_msix_entry *ent)
{
struct bnxt_en_dev *edev = bp->edev;
int num_msix, i;
num_msix = edev->ulp_tbl->msix_requested;
for (i = 0; i < num_msix; i++) {
ent[i].vector = bp->irq_tbl[i].vector;
ent[i].ring_idx = i;
if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
ent[i].db_offset = bp->db_offset;
else
ent[i].db_offset = i * 0x80;
}
}
int bnxt_get_ulp_msix_num(struct bnxt *bp)
{
if (bp->edev)
return bp->edev->ulp_num_msix_vec;
return 0;
}
void bnxt_set_ulp_msix_num(struct bnxt *bp, int num)
{
if (bp->edev)
bp->edev->ulp_num_msix_vec = num;
}
int bnxt_get_ulp_msix_num_in_use(struct bnxt *bp)
{
if (bnxt_ulp_registered(bp->edev))
return bp->edev->ulp_num_msix_vec;
return 0;
}
int bnxt_get_ulp_stat_ctxs(struct bnxt *bp)
{
if (bp->edev)
return bp->edev->ulp_num_ctxs;
return 0;
}
void bnxt_set_ulp_stat_ctxs(struct bnxt *bp, int num_ulp_ctx)
{
if (bp->edev)
bp->edev->ulp_num_ctxs = num_ulp_ctx;
}
int bnxt_get_ulp_stat_ctxs_in_use(struct bnxt *bp)
{
if (bnxt_ulp_registered(bp->edev))
return bp->edev->ulp_num_ctxs;
return 0;
}
void bnxt_set_dflt_ulp_stat_ctxs(struct bnxt *bp)
{
if (bp->edev) {
bp->edev->ulp_num_ctxs = BNXT_MIN_ROCE_STAT_CTXS;
/* Reserve one additional stat_ctx for PF0 (except
* on 1-port NICs) as it also creates one stat_ctx
* for PF1 in case of RoCE bonding.
*/
if (BNXT_PF(bp) && !bp->pf.port_id &&
bp->port_count > 1)
bp->edev->ulp_num_ctxs++;
}
}
int bnxt_register_dev(struct bnxt_en_dev *edev,
struct bnxt_ulp_ops *ulp_ops, void *handle)
{
struct net_device *dev = edev->net;
struct bnxt *bp = netdev_priv(dev);
unsigned int max_stat_ctxs;
struct bnxt_ulp *ulp;
int rc = 0;
rtnl_lock();
mutex_lock(&edev->en_dev_lock);
if (!bp->irq_tbl) {
rc = -ENODEV;
goto exit;
}
max_stat_ctxs = bnxt_get_max_func_stat_ctxs(bp);
if (max_stat_ctxs <= BNXT_MIN_ROCE_STAT_CTXS ||
bp->cp_nr_rings == max_stat_ctxs) {
rc = -ENOMEM;
goto exit;
}
ulp = edev->ulp_tbl;
ulp->handle = handle;
rcu_assign_pointer(ulp->ulp_ops, ulp_ops);
if (test_bit(BNXT_STATE_OPEN, &bp->state))
bnxt_hwrm_vnic_cfg(bp, &bp->vnic_info[0], 0);
edev->ulp_tbl->msix_requested = bnxt_get_ulp_msix_num(bp);
bnxt_fill_msix_vecs(bp, bp->edev->msix_entries);
edev->flags |= BNXT_EN_FLAG_MSIX_REQUESTED;
exit:
mutex_unlock(&edev->en_dev_lock);
rtnl_unlock();
return rc;
}
EXPORT_SYMBOL(bnxt_register_dev);
int bnxt_unregister_dev(struct bnxt_en_dev *edev)
{
struct net_device *dev = edev->net;
struct bnxt *bp = netdev_priv(dev);
struct bnxt_ulp *ulp;
ulp = edev->ulp_tbl;
rtnl_lock();
if (ulp->msix_requested)
edev->flags &= ~BNXT_EN_FLAG_MSIX_REQUESTED;
edev->ulp_tbl->msix_requested = 0;
if (ulp->max_async_event_id)
bnxt_hwrm_func_drv_rgtr(bp, NULL, 0, true);
RCU_INIT_POINTER(ulp->ulp_ops, NULL);
synchronize_rcu();
ulp->max_async_event_id = 0;
ulp->async_events_bmap = NULL;
rtnl_unlock();
return 0;
}
EXPORT_SYMBOL(bnxt_unregister_dev);
static int bnxt_num_ulp_msix_requested(struct bnxt *bp, int num_msix)
{
int num_msix_want;
if (!(bp->flags & BNXT_FLAG_ROCE_CAP))
return 0;
/*
* Request MSIx based on the function type. This is
* a temporary solution to enable max VFs when NPAR is
* enabled.
* TODO - change the scheme with an adapter specific check
* as the latest adapters can support more NQs. For now
* this change satisfy all adapter versions.
*/
if (BNXT_VF(bp))
num_msix_want = BNXT_MAX_ROCE_MSIX_VF;
else if (bp->port_partition_type)
num_msix_want = BNXT_MAX_ROCE_MSIX_NPAR_PF;
else if ((bp->flags & BNXT_FLAG_CHIP_P5_PLUS) ||
(bp->flags & BNXT_FLAG_CHIP_P7))
#ifdef BNXT_FPGA
num_msix_want = BNXT_MAX_ROCE_MSIX_PF - 1;
#else
num_msix_want = BNXT_MAX_ROCE_MSIX_GEN_P5_PF;
#endif
else
num_msix_want = num_msix;
/*
* Since MSIX vectors are used for both NQs and CREQ, we should try to
* allocate num_online_cpus + 1 by taking into account the CREQ. This
* leaves the number of MSIX vectors for NQs match the number of CPUs
* and allows the system to be fully utilized
*/
num_msix_want = min_t(u32, num_msix_want, num_online_cpus() + 1);
num_msix_want = min_t(u32, num_msix_want, BNXT_MAX_ROCE_MSIX);
num_msix_want = max_t(u32, num_msix_want, BNXT_MIN_ROCE_CP_RINGS);
return num_msix_want;
}
int bnxt_send_msg(struct bnxt_en_dev *edev,
struct bnxt_fw_msg *fw_msg)
{
struct net_device *dev = edev->net;
struct bnxt *bp = netdev_priv(dev);
struct output *resp;
struct input *req;
u32 resp_len;
int rc;
if (bp->fw_reset_state)
return -EBUSY;
rc = hwrm_req_init(bp, req, 0 /* don't care */);
if (rc)
return rc;
rc = hwrm_req_replace(bp, req, fw_msg->msg, fw_msg->msg_len);
if (rc)
return rc;
hwrm_req_timeout(bp, req, fw_msg->timeout);
resp = hwrm_req_hold(bp, req);
rc = hwrm_req_send(bp, req);
resp_len = le16_to_cpu(resp->resp_len);
if (resp_len) {
if (fw_msg->resp_max_len < resp_len)
resp_len = fw_msg->resp_max_len;
memcpy(fw_msg->resp, resp, resp_len);
}
hwrm_req_drop(bp, req);
return rc;
}
EXPORT_SYMBOL(bnxt_send_msg);
void bnxt_ulp_stop(struct bnxt *bp)
{
struct bnxt_aux_priv *bnxt_aux = bp->aux_priv;
struct bnxt_en_dev *edev = bp->edev;
if (!edev)
return;
mutex_lock(&edev->en_dev_lock);
/* This check is needed for RoCE lag case */
if (!bnxt_ulp_registered(edev)) {
mutex_unlock(&edev->en_dev_lock);
return;
}
edev->flags |= BNXT_EN_FLAG_ULP_STOPPED;
edev->en_state = bp->state;
if (bnxt_aux) {
struct auxiliary_device *adev;
adev = &bnxt_aux->aux_dev;
if (adev->dev.driver) {
struct auxiliary_driver *adrv;
pm_message_t pm = {};
adrv = to_auxiliary_drv(adev->dev.driver);
if (adrv->suspend)
adrv->suspend(adev, pm);
}
}
mutex_unlock(&edev->en_dev_lock);
}
void bnxt_ulp_start(struct bnxt *bp, int err)
{
struct bnxt_aux_priv *bnxt_aux = bp->aux_priv;
struct bnxt_en_dev *edev = bp->edev;
if (!edev)
return;
edev->flags &= ~BNXT_EN_FLAG_ULP_STOPPED;
edev->en_state = bp->state;
if (err)
return;
mutex_lock(&edev->en_dev_lock);
/* This check is needed for RoCE lag case */
if (!bnxt_ulp_registered(edev)) {
mutex_unlock(&edev->en_dev_lock);
return;
}
bnxt_fill_msix_vecs(bp, bp->edev->msix_entries);
if (bnxt_aux) {
struct auxiliary_device *adev;
adev = &bnxt_aux->aux_dev;
if (adev->dev.driver) {
struct auxiliary_driver *adrv;
adrv = to_auxiliary_drv(adev->dev.driver);
if (adrv->resume)
adrv->resume(adev);
}
}
mutex_unlock(&edev->en_dev_lock);
}
/*
* In kernels where native Auxbus infrastructure support is not there,
* invoke the auxiliary_driver shutdown function.
*/
#ifndef HAVE_AUXILIARY_DRIVER
void bnxt_ulp_shutdown(struct bnxt *bp)
{
struct bnxt_aux_priv *bnxt_aux = bp->aux_priv;
struct bnxt_en_dev *edev = bp->edev;
if (!edev)
return;
if (bnxt_aux) {
struct auxiliary_device *adev;
adev = &bnxt_aux->aux_dev;
if (adev->dev.driver) {
struct auxiliary_driver *adrv;
adrv = to_auxiliary_drv(adev->dev.driver);
if (adrv->shutdown)
adrv->shutdown(adev);
}
}
}
#endif
void bnxt_ulp_irq_stop(struct bnxt *bp)
{
struct bnxt_en_dev *edev = bp->edev;
struct bnxt_ulp_ops *ops;
bool reset = false;
ASSERT_RTNL();
if (!edev || !(edev->flags & BNXT_EN_FLAG_MSIX_REQUESTED))
return;
if (bnxt_ulp_registered(bp->edev)) {
struct bnxt_ulp *ulp = edev->ulp_tbl;
if (!ulp->msix_requested)
return;
ops = rtnl_dereference(ulp->ulp_ops);
if (!ops || !ops->ulp_irq_stop)
return;
if (test_bit(BNXT_STATE_FW_RESET_DET, &bp->state))
reset = true;
edev->en_state = bp->state;
ops->ulp_irq_stop(ulp->handle, reset);
}
}
void bnxt_ulp_irq_restart(struct bnxt *bp, int err)
{
struct bnxt_en_dev *edev = bp->edev;
struct bnxt_ulp_ops *ops;
ASSERT_RTNL();
if (!edev || !(edev->flags & BNXT_EN_FLAG_MSIX_REQUESTED))
return;
if (bnxt_ulp_registered(bp->edev)) {
struct bnxt_ulp *ulp = edev->ulp_tbl;
struct bnxt_msix_entry *ent = NULL;
if (!ulp->msix_requested)
return;
ops = rtnl_dereference(ulp->ulp_ops);
if (!ops || !ops->ulp_irq_restart)
return;
if (!err) {
ent = kcalloc(ulp->msix_requested, sizeof(*ent),
GFP_KERNEL);
if (!ent)
return;
bnxt_fill_msix_vecs(bp, ent);
}
edev->en_state = bp->state;
ops->ulp_irq_restart(ulp->handle, ent);
kfree(ent);
}
}
void bnxt_logger_ulp_live_data(void *d, u32 seg_id)
{
struct bnxt_en_dev *edev;
struct bnxt *bp;
bp = d;
edev = bp->edev;
if (!edev)
return;
if (bnxt_ulp_registered(edev)) {
struct bnxt_ulp_ops *ops;
struct bnxt_ulp *ulp;
ulp = edev->ulp_tbl;
ops = rtnl_dereference(ulp->ulp_ops);
if (!ops || !ops->ulp_log_live)
return;
ops->ulp_log_live(ulp->handle, seg_id);
}
}
void bnxt_ulp_log_raw(struct bnxt_en_dev *edev, u16 logger_id,
void *data, int len)
{
bnxt_log_raw(netdev_priv(edev->net), logger_id, data, len);
}
EXPORT_SYMBOL(bnxt_ulp_log_raw);
void bnxt_ulp_log_live(struct bnxt_en_dev *edev, u16 logger_id,
const char *format, ...)
{
bnxt_log_live(netdev_priv(edev->net), logger_id, format);
}
EXPORT_SYMBOL(bnxt_ulp_log_live);
void bnxt_ulp_async_events(struct bnxt *bp, struct hwrm_async_event_cmpl *cmpl)
{
u16 event_id = le16_to_cpu(cmpl->event_id);
struct bnxt_en_dev *edev = bp->edev;
struct bnxt_ulp_ops *ops;
struct bnxt_ulp *ulp;
if (!bnxt_ulp_registered(edev))
return;
ulp = edev->ulp_tbl;
rcu_read_lock();
ops = rcu_dereference(ulp->ulp_ops);
if (!ops || !ops->ulp_async_notifier)
goto exit_unlock_rcu;
if (!ulp->async_events_bmap || event_id > ulp->max_async_event_id)
goto exit_unlock_rcu;
/* Read max_async_event_id first before testing the bitmap. */
smp_rmb();
if (edev->flags & BNXT_EN_FLAG_ULP_STOPPED)
goto exit_unlock_rcu;
if (test_bit(event_id, ulp->async_events_bmap))
ops->ulp_async_notifier(ulp->handle, cmpl);
exit_unlock_rcu:
rcu_read_unlock();
}
EXPORT_SYMBOL(bnxt_ulp_async_events);
int bnxt_register_async_events(struct bnxt_en_dev *edev,
unsigned long *events_bmap, u16 max_id)
{
struct net_device *dev = edev->net;
struct bnxt *bp = netdev_priv(dev);
struct bnxt_ulp *ulp;
ulp = edev->ulp_tbl;
ulp->async_events_bmap = events_bmap;
/* Make sure bnxt_ulp_async_events() sees this order */
smp_wmb();
ulp->max_async_event_id = max_id;
bnxt_hwrm_func_drv_rgtr(bp, events_bmap, max_id + 1, true);
return 0;
}
EXPORT_SYMBOL(bnxt_register_async_events);
int bnxt_dbr_complete(struct bnxt_en_dev *edev, u32 epoch)
{
struct net_device *dev = edev->net;
struct bnxt *bp = netdev_priv(dev);
bnxt_dbr_recovery_done(bp, epoch, BNXT_ROCE_ULP);
return 0;
}
EXPORT_SYMBOL(bnxt_dbr_complete);
void bnxt_rdma_aux_device_uninit(struct bnxt *bp)
{
struct bnxt_aux_priv *aux_priv;
struct auxiliary_device *adev;
/* Skip if no auxiliary device init was done. */
if (!bp->aux_priv)
return;
bnxt_unregister_logger(bp, BNXT_LOGGER_ROCE);
aux_priv = bp->aux_priv;
adev = &aux_priv->aux_dev;
auxiliary_device_uninit(adev);
}
static void bnxt_aux_dev_release(struct device *dev)
{
struct bnxt_aux_priv *aux_priv =
container_of(dev, struct bnxt_aux_priv, aux_dev.dev);
struct bnxt *bp = netdev_priv(aux_priv->edev->net);
ida_free(&bnxt_aux_dev_ids, aux_priv->id);
kfree(aux_priv->edev->ulp_tbl);
kfree(aux_priv->edev);
bp->edev = NULL;
kfree(bp->aux_priv);
bp->aux_priv = NULL;
}
void bnxt_rdma_aux_device_del(struct bnxt *bp)
{
if (!bp->edev)
return;
auxiliary_device_delete(&bp->aux_priv->aux_dev);
}
static inline void bnxt_set_edev_info(struct bnxt_en_dev *edev, struct bnxt *bp)
{
edev->net = bp->dev;
edev->pdev = bp->pdev;
edev->l2_db_size = bp->db_size;
edev->l2_db_size_nc = bp->db_size_nc;
edev->l2_db_offset = bp->db_offset;
mutex_init(&edev->en_dev_lock);
if (bp->flags & BNXT_FLAG_ROCEV1_CAP)
edev->flags |= BNXT_EN_FLAG_ROCEV1_CAP;
if (bp->flags & BNXT_FLAG_ROCEV2_CAP)
edev->flags |= BNXT_EN_FLAG_ROCEV2_CAP;
if (bp->is_asym_q)
edev->flags |= BNXT_EN_FLAG_ASYM_Q;
if (bp->flags & BNXT_FLAG_MULTI_HOST)
edev->flags |= BNXT_EN_FLAG_MULTI_HOST;
if (bp->flags & BNXT_FLAG_MULTI_ROOT)
edev->flags |= BNXT_EN_FLAG_MULTI_ROOT;
if (BNXT_VF(bp))
edev->flags |= BNXT_EN_FLAG_VF;
if (bp->fw_cap & BNXT_FW_CAP_HW_LAG_SUPPORTED)
edev->flags |= BNXT_EN_FLAG_HW_LAG;
if (BNXT_ROCE_VF_RESC_CAP(bp))
edev->flags |= BNXT_EN_FLAG_ROCE_VF_RES_MGMT;
if (BNXT_SW_RES_LMT(bp))
edev->flags |= BNXT_EN_FLAG_SW_RES_LMT;
edev->bar0 = bp->bar0;
edev->port_partition_type = bp->port_partition_type;
edev->port_count = bp->port_count;
edev->pf_port_id = bp->pf.port_id;
edev->hw_ring_stats_size = bp->hw_ring_stats_size;
edev->ulp_version = BNXT_ULP_VERSION;
edev->en_dbr = &bp->dbr;
edev->hdbr_info = &bp->hdbr_info;
/* Update chip type used for roce pre-init purposes */
edev->chip_num = bp->chip_num;
}
void bnxt_rdma_aux_device_add(struct bnxt *bp)
{
struct auxiliary_device *aux_dev;
int rc;
if (!bp->edev)
return;
aux_dev = &bp->aux_priv->aux_dev;
rc = auxiliary_device_add(aux_dev);
if (rc) {
netdev_warn(bp->dev, "Failed to add auxiliary device for ROCE\n");
auxiliary_device_uninit(aux_dev);
bp->flags &= ~BNXT_FLAG_ROCE_CAP;
}
}
void bnxt_rdma_aux_device_init(struct bnxt *bp)
{
struct auxiliary_device *aux_dev;
struct bnxt_aux_priv *aux_priv;
struct bnxt_en_dev *edev;
struct bnxt_ulp *ulp;
int rc;
if (!(bp->flags & BNXT_FLAG_ROCE_CAP))
return;
aux_priv = kzalloc(sizeof(*bp->aux_priv), GFP_KERNEL);
if (!aux_priv)
goto exit;
aux_priv->id = ida_alloc(&bnxt_aux_dev_ids, GFP_KERNEL);
if (aux_priv->id < 0) {
netdev_warn(bp->dev, "ida alloc failed for ROCE auxiliary device\n");
kfree(aux_priv);
goto exit;
}
aux_dev = &aux_priv->aux_dev;
aux_dev->id = aux_priv->id;
aux_dev->name = "rdma";
aux_dev->dev.parent = &bp->pdev->dev;
aux_dev->dev.release = bnxt_aux_dev_release;
rc = auxiliary_device_init(aux_dev);
if (rc) {
ida_free(&bnxt_aux_dev_ids, aux_priv->id);
kfree(aux_priv);
goto exit;
}
bp->aux_priv = aux_priv;
/* From this point, all cleanup will happen via the .release callback &
* any error unwinding will need to include a call to
* auxiliary_device_uninit.
*/
edev = kzalloc(sizeof(*edev), GFP_KERNEL);
if (!edev)
goto aux_dev_uninit;
aux_priv->edev = edev;
ulp = kzalloc(sizeof(*ulp), GFP_KERNEL);
if (!ulp)
goto aux_dev_uninit;
edev->ulp_tbl = ulp;
bp->edev = edev;
bnxt_set_edev_info(edev, bp);
bnxt_register_logger(bp, BNXT_LOGGER_ROCE,
BNXT_ULP_MAX_LOG_BUFFERS,
bnxt_logger_ulp_live_data,
BNXT_ULP_MAX_LIVE_LOG_SIZE);
bp->ulp_num_msix_want = bnxt_num_ulp_msix_requested(bp, BNXT_MAX_ROCE_MSIX);
return;
aux_dev_uninit:
auxiliary_device_uninit(aux_dev);
exit:
bp->flags &= ~BNXT_FLAG_ROCE_CAP;
}

View File

@ -0,0 +1,162 @@
/* Broadcom NetXtreme-C/E network driver.
*
* Copyright (c) 2016-2018 Broadcom Limited
* Copyright (c) 2018-2023 Broadcom Inc.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation.
*/
#ifndef BNXT_ULP_H
#define BNXT_ULP_H
#define BNXT_ROCE_ULP 0
#define BNXT_OTHER_ULP 1
#define BNXT_MAX_ULP 2
#define BNXT_MIN_ROCE_CP_RINGS 2
#define BNXT_MIN_ROCE_STAT_CTXS 1
#define BNXT_MAX_ROCE_MSIX_VF 2
#define BNXT_MAX_ROCE_MSIX_PF 9
#define BNXT_MAX_ROCE_MSIX_NPAR_PF 5
#define BNXT_MAX_ROCE_MSIX 64
#define BNXT_MAX_ROCE_MSIX_GEN_P5_PF BNXT_MAX_ROCE_MSIX
#define BNXT_ULP_MAX_LOG_BUFFERS 1024
#define BNXT_ULP_MAX_LIVE_LOG_SIZE (32 << 20)
struct hwrm_async_event_cmpl;
struct bnxt;
struct bnxt_msix_entry {
u32 vector;
u32 ring_idx;
u32 db_offset;
};
struct bnxt_ulp_ops {
/* async_notifier() cannot sleep (in BH context) */
void (*ulp_async_notifier)(void *, struct hwrm_async_event_cmpl *);
void (*ulp_irq_stop)(void *, bool);
void (*ulp_irq_restart)(void *, struct bnxt_msix_entry *);
void (*ulp_log_live)(void *handle, u32 seg_id);
};
struct bnxt_fw_msg {
void *msg;
int msg_len;
void *resp;
int resp_max_len;
int timeout;
};
struct bnxt_ulp {
void *handle;
struct bnxt_ulp_ops __rcu *ulp_ops;
unsigned long *async_events_bmap;
u16 max_async_event_id;
u16 msix_requested;
};
struct bnxt_en_dev {
struct net_device *net;
struct pci_dev *pdev;
struct bnxt_msix_entry msix_entries[BNXT_MAX_ROCE_MSIX];
u32 flags;
#define BNXT_EN_FLAG_ROCEV1_CAP 0x1
#define BNXT_EN_FLAG_ROCEV2_CAP 0x2
#define BNXT_EN_FLAG_ROCE_CAP (BNXT_EN_FLAG_ROCEV1_CAP | \
BNXT_EN_FLAG_ROCEV2_CAP)
#define BNXT_EN_FLAG_MSIX_REQUESTED 0x4
#define BNXT_EN_FLAG_ULP_STOPPED 0x8
#define BNXT_EN_FLAG_ASYM_Q 0x10
#define BNXT_EN_FLAG_MULTI_HOST 0x20
#define BNXT_EN_FLAG_VF 0x40
#define BNXT_EN_FLAG_HW_LAG 0x80
#define BNXT_EN_FLAG_ROCE_VF_RES_MGMT 0x100
#define BNXT_EN_FLAG_MULTI_ROOT 0x200
#define BNXT_EN_FLAG_SW_RES_LMT 0x400
#define BNXT_EN_ASYM_Q(edev) ((edev)->flags & BNXT_EN_FLAG_ASYM_Q)
#define BNXT_EN_MH(edev) ((edev)->flags & BNXT_EN_FLAG_MULTI_HOST)
#define BNXT_EN_VF(edev) ((edev)->flags & BNXT_EN_FLAG_VF)
#define BNXT_EN_HW_LAG(edev) ((edev)->flags & BNXT_EN_FLAG_HW_LAG)
#define BNXT_EN_MR(edev) ((edev)->flags & BNXT_EN_FLAG_MULTI_ROOT)
#define BNXT_EN_SW_RES_LMT(edev) ((edev)->flags & BNXT_EN_FLAG_SW_RES_LMT)
struct bnxt_ulp *ulp_tbl;
int l2_db_size; /* Doorbell BAR size in
* bytes mapped by L2
* driver.
*/
int l2_db_size_nc; /* Doorbell BAR size in
* bytes mapped as non-
* cacheable.
*/
u32 ulp_version; /* bnxt_re checks the
* ulp_version is correct
* to ensure compatibility
* with bnxt_en.
*/
#define BNXT_ULP_VERSION 0x695a000f /* Change this when any interface
* structure or API changes
* between bnxt_en and bnxt_re.
*/
unsigned long en_state;
void __iomem *bar0;
u16 hw_ring_stats_size;
u16 pf_port_id;
u8 port_partition_type;
#define BNXT_EN_NPAR(edev) ((edev)->port_partition_type)
u8 port_count;
struct bnxt_dbr *en_dbr;
struct bnxt_hdbr_info *hdbr_info;
u16 chip_num;
int l2_db_offset; /* Doorbell BAR offset
* of non-cacheable.
*/
u16 ulp_num_msix_vec;
u16 ulp_num_ctxs;
struct mutex en_dev_lock; /* serialize ulp operations */
};
static inline bool bnxt_ulp_registered(struct bnxt_en_dev *edev)
{
if (edev && rcu_access_pointer(edev->ulp_tbl->ulp_ops))
return true;
return false;
}
int bnxt_get_ulp_msix_num(struct bnxt *bp);
int bnxt_get_ulp_msix_num_in_use(struct bnxt *bp);
void bnxt_set_ulp_msix_num(struct bnxt *bp, int num);
int bnxt_get_ulp_stat_ctxs(struct bnxt *bp);
int bnxt_get_ulp_stat_ctxs_in_use(struct bnxt *bp);
void bnxt_set_ulp_stat_ctxs(struct bnxt *bp, int num_ctxs);
void bnxt_set_dflt_ulp_stat_ctxs(struct bnxt *bp);
void bnxt_ulp_stop(struct bnxt *bp);
void bnxt_ulp_start(struct bnxt *bp, int err);
void bnxt_ulp_sriov_cfg(struct bnxt *bp, int num_vfs);
#ifndef HAVE_AUXILIARY_DRIVER
void bnxt_ulp_shutdown(struct bnxt *bp);
#endif
void bnxt_ulp_irq_stop(struct bnxt *bp);
void bnxt_ulp_irq_restart(struct bnxt *bp, int err);
void bnxt_ulp_async_events(struct bnxt *bp, struct hwrm_async_event_cmpl *cmpl);
void bnxt_rdma_aux_device_uninit(struct bnxt *bp);
void bnxt_rdma_aux_device_init(struct bnxt *bp);
void bnxt_rdma_aux_device_add(struct bnxt *bp);
void bnxt_rdma_aux_device_del(struct bnxt *bp);
int bnxt_register_dev(struct bnxt_en_dev *edev,
struct bnxt_ulp_ops *ulp_ops, void *handle);
int bnxt_unregister_dev(struct bnxt_en_dev *edev);
int bnxt_send_msg(struct bnxt_en_dev *edev, struct bnxt_fw_msg *fw_msg);
int bnxt_register_async_events(struct bnxt_en_dev *edev,
unsigned long *events_bmap, u16 max_id);
int bnxt_dbr_complete(struct bnxt_en_dev *edev, u32 epoch);
void bnxt_ulp_log_live(struct bnxt_en_dev *edev, u16 logger_id,
const char *format, ...);
void bnxt_ulp_log_raw(struct bnxt_en_dev *edev, u16 logger_id, void *data, int len);
#endif

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,266 @@
/* Broadcom NetXtreme-C/E network driver.
*
* Copyright (c) 2016-2018 Broadcom Limited
* Copyright (c) 2018-2023 Broadcom Inc.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation.
*/
#ifndef BNXT_VFR_H
#define BNXT_VFR_H
#include <linux/debugfs.h>
#ifdef CONFIG_VF_REPS
#define MAX_CFA_CODE 65536
int bnxt_hwrm_release_afm_func(struct bnxt *bp, u16 fid, u16 rfid,
u8 type, u32 flags);
int bnxt_vf_reps_create(struct bnxt *bp);
void bnxt_vf_reps_destroy(struct bnxt *bp);
void bnxt_vf_reps_close(struct bnxt *bp);
void bnxt_vf_reps_open(struct bnxt *bp);
void bnxt_vf_rep_rx(struct bnxt *bp, struct sk_buff *skb);
struct net_device *bnxt_get_vf_rep(struct bnxt *bp, u16 cfa_code);
struct net_device *bnxt_tf_get_vf_rep(struct bnxt *bp,
struct rx_cmp_ext *rxcmp1,
struct bnxt_tpa_info *tpa_info);
int bnxt_vf_reps_alloc(struct bnxt *bp);
void bnxt_vf_reps_free(struct bnxt *bp);
int bnxt_hwrm_cfa_pair_alloc(struct bnxt *bp, void *vfr);
int bnxt_hwrm_cfa_pair_free(struct bnxt *bp, void *vfr);
int bnxt_hwrm_cfa_pair_exists(struct bnxt *bp, void *vfr);
static inline u16 bnxt_vf_rep_get_fid(struct net_device *dev)
{
struct bnxt_vf_rep *vf_rep = netdev_priv(dev);
struct bnxt *bp = vf_rep->bp;
return bnxt_vf_target_id(&bp->pf, vf_rep->vf_idx);
}
static inline bool bnxt_tc_is_switchdev_mode(struct bnxt *bp)
{
return bp->eswitch_mode == DEVLINK_ESWITCH_MODE_SWITCHDEV;
}
bool bnxt_dev_is_vf_rep(struct net_device *dev);
int bnxt_dl_eswitch_mode_get(struct devlink *devlink, u16 *mode);
#ifdef HAVE_ESWITCH_MODE_SET_EXTACK
int bnxt_dl_eswitch_mode_set(struct devlink *devlink, u16 mode,
struct netlink_ext_ack *extack);
#else
int bnxt_dl_eswitch_mode_set(struct devlink *devlink, u16 mode);
#endif
int bnxt_hwrm_get_dflt_vnic_svif(struct bnxt *bp, u16 fid,
u16 *vnic_id, u16 *svif);
int bnxt_tf_port_init(struct bnxt *bp, u16 flag);
int bnxt_tfo_init(struct bnxt *bp);
void bnxt_tfo_deinit(struct bnxt *bp);
void bnxt_tf_port_deinit(struct bnxt *bp, u16 flag);
void bnxt_custom_tf_port_init(struct bnxt *bp);
void bnxt_custom_tf_port_deinit(struct bnxt *bp);
int bnxt_devlink_tf_port_init(struct bnxt *bp);
void bnxt_devlink_tf_port_deinit(struct bnxt *bp);
#ifdef CONFIG_DEBUG_FS
void bnxt_tf_debugfs_create_files(struct bnxt *bp, u8 tsid, struct dentry *port_dir);
#endif /* CONFIG_DEBUG_FS */
#elif defined CONFIG_BNXT_CUSTOM_FLOWER_OFFLOAD
static inline void bnxt_vf_reps_destroy(struct bnxt *bp)
{
}
static inline void bnxt_vf_reps_close(struct bnxt *bp)
{
}
static inline void bnxt_vf_reps_open(struct bnxt *bp)
{
}
static inline void bnxt_vf_rep_rx(struct bnxt *bp, struct sk_buff *skb)
{
}
static inline struct net_device *bnxt_get_vf_rep(struct bnxt *bp, u16 cfa_code)
{
return NULL;
}
static inline struct net_device *bnxt_tf_get_vf_rep(struct bnxt *bp,
struct rx_cmp_ext *rxcmp1,
struct bnxt_tpa_info
*tpa_info)
{
return NULL;
}
static inline u16 bnxt_vf_rep_get_fid(struct net_device *dev)
{
return 0;
}
static inline bool bnxt_dev_is_vf_rep(struct net_device *dev)
{
return false;
}
static inline int bnxt_vf_reps_alloc(struct bnxt *bp)
{
return -EINVAL;
}
static inline void bnxt_vf_reps_free(struct bnxt *bp)
{
}
static inline bool bnxt_tc_is_switchdev_mode(struct bnxt *bp)
{
return false;
}
static inline int bnxt_hwrm_cfa_pair_alloc(struct bnxt *bp, void *vfr)
{
return -EINVAL;
}
static inline int bnxt_hwrm_cfa_pair_free(struct bnxt *bp, void *vfr)
{
return -EINVAL;
}
static inline int bnxt_hwrm_cfa_pair_exists(struct bnxt *bp, void *vfr)
{
return -EINVAL;
}
int bnxt_tf_port_init(struct bnxt *bp, u16 flag);
int bnxt_tf_port_init_p7(struct bnxt *bp);
void bnxt_tf_port_deinit(struct bnxt *bp, u16 flag);
int bnxt_tfo_init(struct bnxt *bp);
void bnxt_tfo_deinit(struct bnxt *bp);
void bnxt_custom_tf_port_init(struct bnxt *bp);
void bnxt_custom_tf_port_deinit(struct bnxt *bp);
int bnxt_hwrm_get_dflt_vnic_svif(struct bnxt *bp, u16 fid, u16 *vnic_id, u16 *svif);
#ifdef CONFIG_DEBUG_FS
void bnxt_tf_debugfs_create_files(struct bnxt *bp, u8 tsid, struct dentry *port_dir);
#endif /* CONFIG_DEBUG_FS */
#else
static inline int bnxt_vf_reps_create(struct bnxt *bp)
{
return 0;
}
static inline void bnxt_vf_reps_destroy(struct bnxt *bp)
{
}
static inline void bnxt_vf_reps_close(struct bnxt *bp)
{
}
static inline void bnxt_vf_reps_open(struct bnxt *bp)
{
}
static inline void bnxt_vf_rep_rx(struct bnxt *bp, struct sk_buff *skb)
{
}
static inline struct net_device *bnxt_get_vf_rep(struct bnxt *bp, u16 cfa_code)
{
return NULL;
}
static inline struct net_device *bnxt_tf_get_vf_rep(struct bnxt *bp,
struct rx_cmp_ext *rxcmp1,
struct bnxt_tpa_info
*tpa_info)
{
return NULL;
}
static inline u16 bnxt_vf_rep_get_fid(struct net_device *dev)
{
return 0;
}
static inline bool bnxt_dev_is_vf_rep(struct net_device *dev)
{
return false;
}
static inline int bnxt_vf_reps_alloc(struct bnxt *bp)
{
return -EINVAL;
}
static inline void bnxt_vf_reps_free(struct bnxt *bp)
{
}
static inline bool bnxt_tc_is_switchdev_mode(struct bnxt *bp)
{
return false;
}
static inline int bnxt_hwrm_cfa_pair_alloc(struct bnxt *bp, void *vfr)
{
return -EINVAL;
}
static inline int bnxt_hwrm_cfa_pair_free(struct bnxt *bp, void *vfr)
{
return -EINVAL;
}
static inline int bnxt_hwrm_cfa_pair_exists(struct bnxt *bp, void *vfr)
{
return -EINVAL;
}
static inline int bnxt_tf_port_init(struct bnxt *bp)
{
return 0;
}
static inline int bnxt_tf_port_init_p7(struct bnxt *bp)
{
return 0;
}
static inline int bnxt_tfo_init(struct bnxt *bp)
{
return 0;
}
static inline void bnxt_tfo_deinit(struct bnxt *bp)
{
}
static inline void bnxt_tf_port_deinit(struct bnxt *bp)
{
}
static inline void bnxt_custom_tf_port_init(struct bnxt *bp)
{
}
static inline void bnxt_custom_tf_port_deinit(struct bnxt *bp)
{
}
#ifdef CONFIG_DEBUG_FS
void bnxt_tf_debugfs_create_files(struct bnxt *bp, u8 tsid, struct dentry *port_dir)
{
}
#endif /* CONFIG_DEBUG_FS */
#endif /* CONFIG_VF_REPS */
#endif /* BNXT_VFR_H */

View File

@ -0,0 +1,659 @@
/* Broadcom NetXtreme-C/E network driver.
*
* Copyright (c) 2016-2018 Broadcom Limited
* Copyright (c) 2018-2023 Broadcom Inc.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation.
*/
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/pci.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/if_vlan.h>
#ifdef HAVE_NDO_XDP
#include <linux/bpf.h>
#ifdef HAVE_BPF_TRACE
#include <linux/bpf_trace.h>
#endif
#include <linux/filter.h>
#endif
#ifdef CONFIG_PAGE_POOL
#ifdef HAVE_PAGE_POOL_HELPERS_H
#include <net/page_pool/helpers.h>
#else
#include <net/page_pool.h>
#endif
#endif
#include "bnxt_compat.h"
#include "bnxt_hsi.h"
#include "bnxt.h"
#include "bnxt_xdp.h"
#include "bnxt_xsk.h"
DEFINE_STATIC_KEY_FALSE(bnxt_xdp_locking_key);
struct bnxt_sw_tx_bd *bnxt_xmit_bd(struct bnxt *bp,
struct bnxt_tx_ring_info *txr,
dma_addr_t mapping, u32 len,
struct xdp_buff *xdp)
{
struct bnxt_sw_tx_bd *tx_buf;
struct tx_bd *txbd;
int num_frags = 0;
u32 flags;
u16 prod;
struct skb_shared_info *sinfo;
#ifdef HAVE_XDP_MULTI_BUFF
int i;
#endif
if (xdp && xdp_buff_has_frags(xdp)) {
sinfo = xdp_get_shared_info_from_buff(xdp);
num_frags = sinfo->nr_frags;
}
/* fill up the first buffer */
prod = txr->tx_prod;
tx_buf = &txr->tx_buf_ring[RING_TX(bp, prod)];
tx_buf->nr_frags = num_frags;
if (xdp)
tx_buf->page = virt_to_head_page(xdp->data);
txbd = &txr->tx_desc_ring[TX_RING(bp, prod)][TX_IDX(prod)];
flags = (len << TX_BD_LEN_SHIFT) |
((num_frags + 1) << TX_BD_FLAGS_BD_CNT_SHIFT) |
bnxt_lhint_arr[len >> 9];
txbd->tx_bd_len_flags_type = cpu_to_le32(flags);
txbd->tx_bd_haddr = cpu_to_le64(mapping);
txbd->tx_bd_opaque = SET_TX_OPAQUE(bp, txr, prod, 1 + num_frags);
#ifdef HAVE_XDP_MULTI_BUFF
/* now let us fill up the frags into the next buffers */
for (i = 0; i < num_frags ; i++) {
skb_frag_t *frag = &sinfo->frags[i];
struct bnxt_sw_tx_bd *frag_tx_buf;
dma_addr_t frag_mapping;
int frag_len;
prod = NEXT_TX(prod);
WRITE_ONCE(txr->tx_prod, prod);
/* first fill up the first buffer */
frag_tx_buf = &txr->tx_buf_ring[RING_TX(bp, prod)];
frag_tx_buf->page = skb_frag_page(frag);
txbd = &txr->tx_desc_ring[TX_RING(bp, prod)][TX_IDX(prod)];
frag_len = skb_frag_size(frag);
flags = frag_len << TX_BD_LEN_SHIFT;
txbd->tx_bd_len_flags_type = cpu_to_le32(flags);
frag_mapping = page_pool_get_dma_addr(skb_frag_page(frag)) +
skb_frag_off(frag);
txbd->tx_bd_haddr = cpu_to_le64(frag_mapping);
len = frag_len;
}
#endif
flags &= ~TX_BD_LEN;
txbd->tx_bd_len_flags_type = cpu_to_le32(((len) << TX_BD_LEN_SHIFT) | flags |
TX_BD_FLAGS_PACKET_END);
prod = NEXT_TX(prod);
WRITE_ONCE(txr->tx_prod, prod);
/* Sync TX BD */
wmb();
return tx_buf;
}
#ifdef HAVE_NDO_XDP
bool bnxt_xdp_attached(struct bnxt *bp, struct bnxt_rx_ring_info *rxr)
{
struct bpf_prog *xdp_prog = READ_ONCE(rxr->xdp_prog);
return !!xdp_prog;
}
void bnxt_xdp_buff_init(struct bnxt *bp, struct bnxt_rx_ring_info *rxr,
u16 cons, u8 *data_ptr, unsigned int len,
struct xdp_buff *xdp)
{
struct bnxt_sw_rx_bd *rx_buf;
u32 buflen = BNXT_RX_PAGE_SIZE;
struct pci_dev *pdev;
dma_addr_t mapping;
u32 offset;
pdev = bp->pdev;
rx_buf = &rxr->rx_buf_ring[cons];
offset = bp->rx_offset;
mapping = rx_buf->mapping - bp->rx_dma_offset;
dma_sync_single_for_cpu(&pdev->dev, mapping + offset, len, bp->rx_dir);
xdp_init_buff(xdp, buflen, &rxr->xdp_rxq);
xdp_prepare_buff(xdp, data_ptr - offset, offset, len, true);
}
void __bnxt_xmit_xdp(struct bnxt *bp, struct bnxt_tx_ring_info *txr,
dma_addr_t mapping, u32 len, u16 rx_prod,
struct xdp_buff *xdp)
{
struct bnxt_sw_tx_bd *tx_buf;
tx_buf = bnxt_xmit_bd(bp, txr, mapping, len, xdp);
tx_buf->rx_prod = rx_prod;
tx_buf->action = XDP_TX;
txr->xdp_tx_pending++;
}
#ifdef HAVE_XDP_FRAME
static void __bnxt_xmit_xdp_redirect(struct bnxt *bp,
struct bnxt_tx_ring_info *txr,
dma_addr_t mapping, u32 len,
struct xdp_frame *xdpf)
{
struct bnxt_sw_tx_bd *tx_buf;
tx_buf = bnxt_xmit_bd(bp, txr, mapping, len, NULL);
tx_buf->action = XDP_REDIRECT;
tx_buf->xdpf = xdpf;
dma_unmap_addr_set(tx_buf, mapping, mapping);
dma_unmap_len_set(tx_buf, len, 0);
}
#endif
void bnxt_tx_int_xdp(struct bnxt *bp, struct bnxt_napi *bnapi, int budget)
{
struct bnxt_tx_ring_info *txr = bnapi->tx_ring[0];
#ifdef HAVE_XSK_SUPPORT
struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
#endif
struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
bool rx_doorbell_needed = false;
u16 tx_hw_cons = txr->tx_hw_cons;
struct bnxt_sw_tx_bd *tx_buf;
u16 tx_cons = txr->tx_cons;
u16 last_tx_cons = tx_cons;
int i, frags, xsk_tx = 0;
if (!budget)
return;
while (RING_TX(bp, tx_cons) != tx_hw_cons) {
tx_buf = &txr->tx_buf_ring[RING_TX(bp, tx_cons)];
if (tx_buf->action == XDP_REDIRECT) {
struct pci_dev *pdev = bp->pdev;
dma_unmap_single(&pdev->dev,
dma_unmap_addr(tx_buf, mapping),
dma_unmap_len(tx_buf, len),
DMA_TO_DEVICE);
#ifdef HAVE_XDP_FRAME
xdp_return_frame(tx_buf->xdpf);
#endif
tx_buf->action = 0;
tx_buf->xdpf = NULL;
} else if (tx_buf->action == XDP_TX) {
tx_buf->action = 0;
rx_doorbell_needed = true;
last_tx_cons = tx_cons;
frags = tx_buf->nr_frags;
for (i = 0; i < frags; i++) {
tx_cons = NEXT_TX(tx_cons);
tx_buf = &txr->tx_buf_ring[RING_TX(bp, tx_cons)];
#ifdef CONFIG_PAGE_POOL
page_pool_recycle_direct(rxr->page_pool, tx_buf->page);
#else
__free_page(tx_buf->page);
#endif
}
txr->xdp_tx_pending--;
} else if (tx_buf->action == BNXT_XSK_TX) {
rx_doorbell_needed = false;
xsk_tx++;
} else {
bnxt_sched_reset_txr(bp, txr, tx_cons);
return;
}
tx_cons = NEXT_TX(tx_cons);
}
bnapi->events &= ~BNXT_TX_CMP_EVENT;
WRITE_ONCE(txr->tx_cons, tx_cons);
#ifdef HAVE_XSK_SUPPORT
if (txr->xsk_pool && xsk_tx) {
xsk_tx_completed(txr->xsk_pool, xsk_tx);
cpr->sw_stats->xsk_stats.xsk_tx_completed += xsk_tx;
}
if (xsk_uses_need_wakeup(txr->xsk_pool))
xsk_set_tx_need_wakeup(txr->xsk_pool);
#endif
if (rx_doorbell_needed) {
if (!txr->xdp_tx_pending) {
bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod);
} else {
tx_buf = &txr->tx_buf_ring[RING_TX(bp, last_tx_cons)];
bnxt_db_write(bp, &rxr->rx_db, tx_buf->rx_prod);
}
}
}
void bnxt_xdp_buff_frags_free(struct bnxt_rx_ring_info *rxr,
struct xdp_buff *xdp)
{
struct skb_shared_info *shinfo;
int i;
if (!xdp || !xdp_buff_has_frags(xdp))
return;
shinfo = xdp_get_shared_info_from_buff(xdp);
if (!shinfo)
return;
for (i = 0; i < shinfo->nr_frags; i++) {
struct page *page = skb_frag_page(&shinfo->frags[i]);
#ifdef CONFIG_PAGE_POOL
page_pool_recycle_direct(rxr->page_pool, page);
#else
__free_page(page);
#endif
}
shinfo->nr_frags = 0;
}
/* returns the following:
* true - packet consumed by XDP and new buffer is allocated.
* false - packet should be passed to the stack.
*/
bool bnxt_rx_xdp(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, u16 cons,
struct xdp_buff *xdp, struct page *page, u8 **data_ptr,
unsigned int *len, u8 *event)
{
struct bpf_prog *xdp_prog = READ_ONCE(rxr->xdp_prog);
struct bnxt_tx_ring_info *txr;
struct bnxt_sw_rx_bd *rx_buf;
struct pci_dev *pdev;
dma_addr_t mapping;
u32 tx_needed = 1;
void *orig_data;
u32 tx_avail;
u32 offset;
u32 act;
if (!xdp_prog)
return false;
pdev = bp->pdev;
offset = bp->rx_offset;
txr = rxr->bnapi->tx_ring[0];
/* BNXT_RX_PAGE_MODE(bp) when XDP enabled */
orig_data = xdp->data;
act = bpf_prog_run_xdp(xdp_prog, xdp);
tx_avail = bnxt_tx_avail(bp, txr);
/* If there are pending XDP_TX packets, we must not update the rx
* producer yet because some RX buffers may still be on the TX ring.
*/
if (txr->xdp_tx_pending)
*event &= ~BNXT_RX_EVENT;
#if XDP_PACKET_HEADROOM
*len = xdp->data_end - xdp->data;
if (orig_data != xdp->data) {
offset = xdp->data - xdp->data_hard_start;
*data_ptr = xdp->data_hard_start + offset;
}
#endif
switch (act) {
case XDP_PASS:
return false;
case XDP_TX:
rx_buf = &rxr->rx_buf_ring[cons];
mapping = rx_buf->mapping - bp->rx_dma_offset;
*event = 0;
if (unlikely(xdp_buff_has_frags(xdp))) {
struct skb_shared_info *sinfo = xdp_get_shared_info_from_buff(xdp);
tx_needed += sinfo->nr_frags;
*event = BNXT_AGG_EVENT;
}
if (tx_avail < tx_needed) {
trace_xdp_exception(bp->dev, xdp_prog, act);
bnxt_xdp_buff_frags_free(rxr, xdp);
bnxt_reuse_rx_data(rxr, cons, page);
return true;
}
dma_sync_single_for_device(&pdev->dev, mapping + offset, *len,
bp->rx_dir);
*event &= ~BNXT_RX_EVENT;
*event |= BNXT_TX_EVENT;
__bnxt_xmit_xdp(bp, txr, mapping + offset, *len,
NEXT_RX(rxr->rx_prod), xdp);
bnxt_reuse_rx_data(rxr, cons, page);
return true;
case XDP_REDIRECT:
/* if we are calling this here then we know that the
* redirect is coming from a frame received by the
* bnxt_en driver.
*/
rx_buf = &rxr->rx_buf_ring[cons];
mapping = rx_buf->mapping - bp->rx_dma_offset;
dma_unmap_page_attrs(&pdev->dev, mapping,
BNXT_RX_PAGE_SIZE, bp->rx_dir,
DMA_ATTR_WEAK_ORDERING);
/* if we are unable to allocate a new buffer, abort and reuse */
if (bnxt_alloc_rx_data(bp, rxr, rxr->rx_prod, GFP_ATOMIC)) {
trace_xdp_exception(bp->dev, xdp_prog, act);
bnxt_xdp_buff_frags_free(rxr, xdp);
bnxt_reuse_rx_data(rxr, cons, page);
return true;
}
if (xdp_do_redirect(bp->dev, xdp, xdp_prog)) {
trace_xdp_exception(bp->dev, xdp_prog, act);
#ifdef CONFIG_PAGE_POOL
page_pool_recycle_direct(rxr->page_pool, page);
#else
__free_page(page);
#endif
return true;
}
*event |= BNXT_REDIRECT_EVENT;
break;
default:
bpf_warn_invalid_xdp_action(bp->dev, xdp_prog, act);
fallthrough;
case XDP_ABORTED:
trace_xdp_exception(bp->dev, xdp_prog, act);
fallthrough;
case XDP_DROP:
bnxt_xdp_buff_frags_free(rxr, xdp);
bnxt_reuse_rx_data(rxr, cons, page);
break;
}
return true;
}
#ifdef HAVE_XDP_FRAME
#if LINUX_VERSION_CODE >= KERNEL_VERSION(5,13,0)
int bnxt_xdp_xmit(struct net_device *dev, int num_frames,
struct xdp_frame **frames, u32 flags)
{
struct bnxt *bp = netdev_priv(dev);
struct bpf_prog *xdp_prog = READ_ONCE(bp->xdp_prog);
struct pci_dev *pdev = bp->pdev;
struct bnxt_tx_ring_info *txr;
dma_addr_t mapping;
int nxmit = 0;
int ring;
int i;
if (!test_bit(BNXT_STATE_OPEN, &bp->state) ||
!bp->tx_nr_rings_xdp ||
!xdp_prog)
return -EINVAL;
ring = smp_processor_id() % bp->tx_nr_rings_xdp;
txr = &bp->tx_ring[ring];
if (READ_ONCE(txr->dev_state) == BNXT_DEV_STATE_CLOSING)
return -EINVAL;
if (static_branch_unlikely(&bnxt_xdp_locking_key))
spin_lock(&txr->tx_lock);
for (i = 0; i < num_frames; i++) {
struct xdp_frame *xdp = frames[i];
if (!bnxt_tx_avail(bp, txr))
break;
mapping = dma_map_single(&pdev->dev, xdp->data, xdp->len,
DMA_TO_DEVICE);
if (dma_mapping_error(&pdev->dev, mapping))
break;
__bnxt_xmit_xdp_redirect(bp, txr, mapping, xdp->len, xdp);
nxmit++;
}
if (flags & XDP_XMIT_FLUSH) {
/* Sync BD data before updating doorbell */
wmb();
bnxt_db_write(bp, &txr->tx_db, txr->tx_prod);
}
if (static_branch_unlikely(&bnxt_xdp_locking_key))
spin_unlock(&txr->tx_lock);
return nxmit;
}
#else
int bnxt_xdp_xmit(struct net_device *dev, int num_frames,
struct xdp_frame **frames, u32 flags)
{
struct bnxt *bp = netdev_priv(dev);
struct bpf_prog *xdp_prog = READ_ONCE(bp->xdp_prog);
struct pci_dev *pdev = bp->pdev;
struct bnxt_tx_ring_info *txr;
dma_addr_t mapping;
int drops = 0;
int ring;
int i;
if (!test_bit(BNXT_STATE_OPEN, &bp->state) ||
!bp->tx_nr_rings_xdp ||
!xdp_prog)
return -EINVAL;
ring = smp_processor_id() % bp->tx_nr_rings_xdp;
txr = &bp->tx_ring[ring];
if (READ_ONCE(txr->dev_state) == BNXT_DEV_STATE_CLOSING)
return -EINVAL;
if (static_branch_unlikely(&bnxt_xdp_locking_key))
spin_lock(&txr->tx_lock);
for (i = 0; i < num_frames; i++) {
struct xdp_frame *xdp = frames[i];
if (!bnxt_tx_avail(bp, txr)) {
xdp_return_frame_rx_napi(xdp);
drops++;
continue;
}
mapping = dma_map_single(&pdev->dev, xdp->data, xdp->len,
DMA_TO_DEVICE);
if (dma_mapping_error(&pdev->dev, mapping)) {
xdp_return_frame_rx_napi(xdp);
drops++;
continue;
}
__bnxt_xmit_xdp_redirect(bp, txr, mapping, xdp->len, xdp);
}
if (flags & XDP_XMIT_FLUSH) {
/* Sync BD data before updating doorbell */
wmb();
bnxt_db_write(bp, &txr->tx_db, txr->tx_prod);
}
if (static_branch_unlikely(&bnxt_xdp_locking_key))
spin_unlock(&txr->tx_lock);
return num_frames - drops;
}
#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(5,13,0) */
#endif
/* Under rtnl_lock */
static int bnxt_xdp_set(struct bnxt *bp, struct bpf_prog *prog)
{
struct net_device *dev = bp->dev;
int tx_xdp = 0, tx_cp, rc, tc;
struct bpf_prog *old;
#ifndef HAVE_XDP_MULTI_BUFF
if (prog && bp->dev->mtu > BNXT_MAX_PAGE_MODE_MTU(bp)) {
netdev_warn(dev, "MTU %d larger than largest XDP supported MTU %d.\n",
#else
if (prog && !prog->aux->xdp_has_frags &&
bp->dev->mtu > BNXT_MAX_PAGE_MODE_MTU(bp)) {
netdev_warn(dev, "MTU %d larger than %d without XDP frag support.\n",
#endif
bp->dev->mtu, BNXT_MAX_PAGE_MODE_MTU(bp));
return -EOPNOTSUPP;
}
if (!(bp->flags & BNXT_FLAG_SHARED_RINGS)) {
netdev_warn(dev, "ethtool rx/tx channels must be combined to support XDP.\n");
return -EOPNOTSUPP;
}
if (prog)
tx_xdp = bp->rx_nr_rings;
tc = bp->num_tc;
if (!tc)
tc = 1;
rc = bnxt_check_rings(bp, bp->tx_nr_rings_per_tc, bp->rx_nr_rings,
true, tc, tx_xdp);
if (rc) {
netdev_warn(dev, "Unable to reserve enough TX rings to support XDP.\n");
return rc;
}
if (netif_running(dev))
bnxt_close_nic(bp, true, false);
old = xchg(&bp->xdp_prog, prog);
if (old)
bpf_prog_put(old);
if (prog) {
bnxt_set_rx_skb_mode(bp, true);
xdp_features_set_redirect_target(dev, true);
} else {
int rx, tx;
xdp_features_clear_redirect_target(dev);
bnxt_set_rx_skb_mode(bp, false);
bnxt_get_max_rings(bp, &rx, &tx, true);
if (rx > 1) {
bp->flags &= ~BNXT_FLAG_NO_AGG_RINGS;
bp->dev->hw_features |= NETIF_F_LRO;
/* Re-enable TPA if necessary */
netdev_update_features(dev);
}
}
bp->tx_nr_rings_xdp = tx_xdp;
bp->tx_nr_rings = bp->tx_nr_rings_per_tc * tc + tx_xdp;
tx_cp = bnxt_num_tx_to_cp(bp, bp->tx_nr_rings);
bp->cp_nr_rings = max_t(int, tx_cp, bp->rx_nr_rings);
bnxt_set_tpa_flags(bp);
bnxt_set_ring_params(bp);
if (netif_running(dev))
return bnxt_open_nic(bp, true, false);
return 0;
}
int bnxt_xdp(struct net_device *dev, struct netdev_bpf *xdp)
{
struct bnxt *bp = netdev_priv(dev);
int rc;
switch (xdp->command) {
case XDP_SETUP_PROG:
rc = bnxt_xdp_set(bp, xdp->prog);
break;
#ifdef HAVE_XDP_QUERY_PROG
case XDP_QUERY_PROG:
#ifdef HAVE_PROG_ATTACHED
xdp->prog_attached = !!bp->xdp_prog;
#endif
#ifdef HAVE_IFLA_XDP_PROG_ID
xdp->prog_id = bp->xdp_prog ? bp->xdp_prog->aux->id : 0;
#endif
rc = 0;
break;
#endif /* HAVE_XDP_QUERY_PROG */
#ifdef HAVE_XSK_SUPPORT
case XDP_SETUP_XSK_POOL:
netdev_info(bp->dev, "%s(): XDP_SETUP_XSK_POOL on queue_id: %d\n",
__func__, xdp->xsk.queue_id);
return bnxt_xdp_setup_pool(bp, xdp->xsk.pool, xdp->xsk.queue_id);
#endif
default:
rc = -EINVAL;
break;
}
return rc;
}
#ifdef HAVE_XDP_MULTI_BUFF
struct sk_buff *
bnxt_xdp_build_skb(struct bnxt *bp, struct sk_buff *skb, u8 num_frags,
struct page_pool *pool, struct xdp_buff *xdp,
struct rx_cmp_ext *rxcmp1)
{
struct skb_shared_info *sinfo = xdp_get_shared_info_from_buff(xdp);
if (!skb || !sinfo)
return NULL;
skb_checksum_none_assert(skb);
if (RX_CMP_L4_CS_OK(rxcmp1)) {
if (bp->dev->features & NETIF_F_RXCSUM) {
skb->ip_summed = CHECKSUM_UNNECESSARY;
skb->csum_level = RX_CMP_ENCAP(rxcmp1);
}
}
xdp_update_skb_shared_info(skb, num_frags,
sinfo->xdp_frags_size,
BNXT_RX_PAGE_SIZE * sinfo->nr_frags,
xdp_buff_is_frag_pfmemalloc(xdp));
return skb;
}
#endif
#else
void bnxt_tx_int_xdp(struct bnxt *bp, struct bnxt_napi *bnapi, int budget)
{
}
bool bnxt_rx_xdp(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, u16 cons,
struct xdp_buff *xdp, void *page, u8 **data_ptr,
unsigned int *len, u8 *event)
{
return false;
}
bool bnxt_xdp_attached(struct bnxt *bp, struct bnxt_rx_ring_info *rxr)
{
return false;
}
void bnxt_xdp_buff_init(struct bnxt *bp, struct bnxt_rx_ring_info *rxr,
u16 cons, u8 *data_ptr, unsigned int len,
struct xdp_buff *xdp)
{
}
#endif

View File

@ -0,0 +1,57 @@
/* Broadcom NetXtreme-C/E network driver.
*
* Copyright (c) 2016-2018 Broadcom Limited
* Copyright (c) 2018-2022 Broadcom Inc.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation.
*/
#ifndef BNXT_XDP_H
#define BNXT_XDP_H
DECLARE_STATIC_KEY_FALSE(bnxt_xdp_locking_key);
struct bnxt_sw_tx_bd *bnxt_xmit_bd(struct bnxt *bp,
struct bnxt_tx_ring_info *txr,
dma_addr_t mapping, u32 len,
struct xdp_buff *xdp);
void __bnxt_xmit_xdp(struct bnxt *bp, struct bnxt_tx_ring_info *txr,
dma_addr_t mapping, u32 len, u16 rx_prod,
struct xdp_buff *xdp);
void bnxt_tx_int_xdp(struct bnxt *bp, struct bnxt_napi *bnapi, int budget);
#ifdef HAVE_NDO_XDP
bool bnxt_rx_xdp(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, u16 cons,
struct xdp_buff *xdp, struct page *page, u8 **data_ptr,
unsigned int *len, u8 *event);
bool bnxt_rx_xsk(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, u16 cons,
struct xdp_buff *xdp, u8 **data_ptr,
unsigned int *len, u8 *event);
#else
bool bnxt_rx_xdp(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, u16 cons,
struct xdp_buff *xdp, void *page, u8 **data_ptr,
unsigned int *len, u8 *event);
bool bnxt_rx_xsk(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, u16 cons,
struct xdp_buff *xdp, u8 **data_ptr,
unsigned int *len, u8 *event);
#endif
int bnxt_xdp(struct net_device *dev, struct netdev_bpf *xdp);
#ifdef HAVE_XDP_FRAME
int bnxt_xdp_xmit(struct net_device *dev, int num_frames,
struct xdp_frame **frames, u32 flags);
#endif
bool bnxt_xdp_attached(struct bnxt *bp, struct bnxt_rx_ring_info *rxr);
void bnxt_xdp_buff_init(struct bnxt *bp, struct bnxt_rx_ring_info *rxr,
u16 cons, u8 *data_ptr, unsigned int len,
struct xdp_buff *xdp);
#ifdef HAVE_XDP_MULTI_BUFF
struct sk_buff *bnxt_xdp_build_skb(struct bnxt *bp, struct sk_buff *skb,
u8 num_frags, struct page_pool *pool,
struct xdp_buff *xdp,
struct rx_cmp_ext *rxcmp1);
void bnxt_xdp_buff_frags_free(struct bnxt_rx_ring_info *rxr,
struct xdp_buff *xdp);
#endif
#endif

View File

@ -0,0 +1,490 @@
/* Broadcom NetXtreme-C/E network driver.
*
* Copyright (c) 2024 Broadcom Inc.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation.
*/
#include <linux/errno.h>
#include <linux/pci.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#ifdef HAVE_NDO_XDP
#include <linux/bpf.h>
#ifdef HAVE_BPF_TRACE
#include <linux/bpf_trace.h>
#endif
#include <linux/filter.h>
#endif
#ifdef CONFIG_PAGE_POOL
#ifdef HAVE_PAGE_POOL_HELPERS_H
#include <net/page_pool/helpers.h>
#else
#include <net/page_pool.h>
#endif
#endif
#include "bnxt_compat.h"
#include "bnxt_hsi.h"
#include "bnxt.h"
#include "bnxt_xdp.h"
#include "bnxt_xsk.h"
#if defined(CONFIG_XDP_SOCKETS) && defined(HAVE_NDO_BPF) && defined(HAVE_XSK_SUPPORT)
int bnxt_xsk_wakeup(struct net_device *dev, u32 queue_id, u32 flags)
{
struct bnxt *bp = netdev_priv(dev);
struct bnxt_cp_ring_info *cpr;
struct bnxt_rx_ring_info *rxr;
struct bnxt_tx_ring_info *txr;
struct bnxt_napi *bnapi;
if (!test_bit(BNXT_STATE_OPEN, &bp->state))
return -ENETDOWN;
if (queue_id >= bp->rx_nr_rings || queue_id >= bp->tx_nr_rings_xdp)
return -EINVAL;
rxr = &bp->rx_ring[queue_id];
txr = &bp->tx_ring[queue_id];
if (!rxr->xsk_pool && !txr->xsk_pool)
return -ENXIO;
bnapi = bp->bnapi[queue_id];
cpr = &bnapi->cp_ring;
if (!napi_if_scheduled_mark_missed(&bnapi->napi)) {
cpr->sw_stats->xsk_stats.xsk_wakeup++;
napi_schedule(&bnapi->napi);
}
return 0;
}
static void bnxt_xsk_disable_rx_ring(struct bnxt *bp, u16 queue_id)
{
struct bnxt_rx_ring_info *rxr;
struct bnxt_vnic_info *vnic;
struct bnxt_napi *bnapi;
rxr = &bp->rx_ring[queue_id];
bnapi = rxr->bnapi;
vnic = &bp->vnic_info[BNXT_VNIC_NTUPLE];
#ifdef HAVE_XDP_RXQ_INFO
if (xdp_rxq_info_is_reg(&rxr->xdp_rxq))
xdp_rxq_info_unreg(&rxr->xdp_rxq);
#endif
vnic->mru = 0;
bnxt_hwrm_vnic_update(bp, vnic, VNIC_UPDATE_REQ_ENABLES_MRU_VALID);
napi_disable(&bnapi->napi);
bnxt_free_one_rx_buf_ring(bp, rxr);
bnxt_hwrm_rx_ring_free(bp, rxr, 0);
}
static int bnxt_xsk_enable_rx_ring(struct bnxt *bp, u16 queue_id)
{
struct bnxt_rx_ring_info *rxr;
struct bnxt_vnic_info *vnic;
struct bnxt_napi *bnapi;
int rc, i;
u32 prod;
rxr = &bp->rx_ring[queue_id];
bnapi = rxr->bnapi;
vnic = &bp->vnic_info[BNXT_VNIC_NTUPLE];
#ifdef HAVE_XDP_RXQ_INFO
rc = xdp_rxq_info_reg(&rxr->xdp_rxq, bp->dev, queue_id, 0);
if (rc < 0)
return rc;
rxr->xsk_pool = xsk_get_pool_from_qid(bp->dev, queue_id);
if (BNXT_RING_RX_ZC_MODE(rxr) && rxr->xsk_pool) {
rc = xdp_rxq_info_reg_mem_model(&rxr->xdp_rxq,
MEM_TYPE_XSK_BUFF_POOL, NULL);
xsk_pool_set_rxq_info(rxr->xsk_pool, &rxr->xdp_rxq);
netdev_dbg(bp->dev, "%s(): AF_XDP_ZC flag set for rxring:%d\n", __func__, queue_id);
} else {
rc = xdp_rxq_info_reg_mem_model(&rxr->xdp_rxq,
MEM_TYPE_PAGE_POOL, rxr->page_pool);
netdev_dbg(bp->dev, "%s(): AF_XDP_ZC flag RESET for rxring:%d\n",
__func__, queue_id);
}
#endif
rxr->rx_next_cons = 0;
bnxt_hwrm_rx_ring_alloc(bp, rxr, queue_id);
rxr->rx_prod = 0;
prod = rxr->rx_prod;
for (i = 0; i < bp->rx_ring_size; i++) {
if (bnxt_alloc_rx_data(bp, rxr, prod, GFP_KERNEL)) {
netdev_warn(bp->dev, "init'ed rx ring %d with %d/%d skbs only\n",
queue_id, i, bp->rx_ring_size);
break;
}
prod = NEXT_RX(prod);
}
rxr->rx_prod = prod;
bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod);
napi_enable(&bnapi->napi);
vnic->mru = bp->dev->mtu + ETH_HLEN + VLAN_HLEN;
bnxt_hwrm_vnic_update(bp, vnic, VNIC_UPDATE_REQ_ENABLES_MRU_VALID);
return rc;
}
static bool bnxt_check_xsk_q_in_dflt_vnic(struct bnxt *bp, u16 queue_id)
{
u16 tbl_size, i;
tbl_size = bnxt_get_rxfh_indir_size(bp->dev);
for (i = 0; i < tbl_size; i++) {
if (queue_id == bp->rss_indir_tbl[i]) {
netdev_err(bp->dev,
"queue_id: %d is in default RSS context, not supported\n",
queue_id);
return true;
}
}
return false;
}
static int bnxt_validate_xsk(struct bnxt *bp, u16 queue_id)
{
if (!(bp->flags & BNXT_FLAG_RFS)) {
netdev_err(bp->dev,
"nTUPLE feature needs to be on for AF_XDP support\n");
return -EOPNOTSUPP;
}
if (bp->num_rss_ctx) {
netdev_err(bp->dev,
"AF_XDP not supported with additional RSS contexts\n");
return -EOPNOTSUPP;
}
if (bnxt_check_xsk_q_in_dflt_vnic(bp, queue_id))
return -EOPNOTSUPP;
return 0;
}
static int bnxt_xdp_enable_pool(struct bnxt *bp, struct xsk_buff_pool *pool,
u16 queue_id)
{
struct bpf_prog *xdp_prog = READ_ONCE(bp->xdp_prog);
struct device *dev = &bp->pdev->dev;
struct bnxt_rx_ring_info *rxr;
bool needs_reset;
int rc;
rc = bnxt_validate_xsk(bp, queue_id);
if (rc)
return rc;
rxr = &bp->rx_ring[queue_id];
rc = xsk_pool_dma_map(pool, dev, DMA_ATTR_SKIP_CPU_SYNC | DMA_ATTR_WEAK_ORDERING);
if (rc) {
netdev_err(bp->dev, "Failed to map xsk pool\n");
return rc;
}
set_bit(queue_id, bp->af_xdp_zc_qs);
/* Check if XDP program is already attached, in which case
* need to explicitly quiesce traffic, free the regular path
* resources and reallocate AF_XDP resources for the rings.
* Otherwise, in the normal case, resources for AF_XDP will
* get created anyway as part of the XDP program attach
*/
needs_reset = netif_running(bp->dev) && xdp_prog;
if (needs_reset) {
/* Check to differentiate b/n Tx/Rx only modes */
if (xsk_buff_can_alloc(pool, bp->rx_ring_size)) {
bnxt_xsk_disable_rx_ring(bp, queue_id);
rxr->flags |= BNXT_RING_FLAG_AF_XDP_ZC;
bnxt_xsk_enable_rx_ring(bp, queue_id);
} else {
struct bnxt_tx_ring_info *txr = &bp->tx_ring[queue_id];
struct bnxt_napi *bnapi;
bnapi = bp->bnapi[queue_id];
bnxt_lock_napi(bnapi);
txr->xsk_pool = xsk_get_pool_from_qid(bp->dev, queue_id);
bnxt_unlock_napi(bnapi);
}
}
return rc;
}
static int bnxt_xdp_disable_pool(struct bnxt *bp, u16 queue_id)
{
struct bpf_prog *xdp_prog = READ_ONCE(bp->xdp_prog);
struct bnxt_rx_ring_info *rxr;
struct bnxt_tx_ring_info *txr;
struct xsk_buff_pool *pool;
struct bnxt_napi *bnapi;
bool needs_reset;
pool = xsk_get_pool_from_qid(bp->dev, queue_id);
if (!pool)
return -EINVAL;
if (!bp->bnapi ||
test_bit(BNXT_STATE_NAPI_DISABLED, &bp->state)) {
xsk_pool_dma_unmap(pool, DMA_ATTR_SKIP_CPU_SYNC | DMA_ATTR_WEAK_ORDERING);
return 0;
}
rxr = &bp->rx_ring[queue_id];
txr = &bp->tx_ring[queue_id];
bnapi = bp->bnapi[queue_id];
bnxt_lock_napi(bnapi);
clear_bit(queue_id, bp->af_xdp_zc_qs);
xsk_pool_dma_unmap(pool, DMA_ATTR_SKIP_CPU_SYNC | DMA_ATTR_WEAK_ORDERING);
needs_reset = netif_running(bp->dev) && xdp_prog;
if (needs_reset) {
if (xsk_buff_can_alloc(pool, bp->rx_ring_size)) {
bnxt_xsk_disable_rx_ring(bp, queue_id);
rxr->flags &= ~BNXT_RING_FLAG_AF_XDP_ZC;
bnxt_xsk_enable_rx_ring(bp, queue_id);
}
}
txr->xsk_pool = NULL;
bnxt_unlock_napi(bnapi);
return 0;
}
int bnxt_xdp_setup_pool(struct bnxt *bp, struct xsk_buff_pool *pool,
u16 queue_id)
{
if (queue_id >= bp->rx_nr_rings)
return -EINVAL;
return pool ? bnxt_xdp_enable_pool(bp, pool, queue_id) :
bnxt_xdp_disable_pool(bp, queue_id);
}
/* returns the following:
* true - packet consumed by XDP and new buffer is allocated.
* false - packet should be passed to the stack.
*/
bool bnxt_rx_xsk(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, u16 cons,
struct xdp_buff *xdp, u8 **data_ptr, unsigned int *len, u8 *event)
{
struct bpf_prog *xdp_prog = READ_ONCE(bp->xdp_prog);
struct bnxt_cp_ring_info *cpr;
struct bnxt_tx_ring_info *txr;
struct bnxt_sw_rx_bd *rx_buf;
struct bnxt_napi *bnapi;
struct pci_dev *pdev;
dma_addr_t mapping;
u32 tx_needed = 1;
void *orig_data;
u32 tx_avail;
u32 offset;
u32 act;
if (!xdp_prog)
return false;
pdev = bp->pdev;
offset = bp->rx_offset;
txr = rxr->bnapi->tx_ring[0];
xdp->data_end = xdp->data + *len;
orig_data = xdp->data;
xsk_buff_dma_sync_for_cpu(xdp, rxr->xsk_pool);
act = bpf_prog_run_xdp(xdp_prog, xdp);
tx_avail = bnxt_tx_avail(bp, txr);
/* If there are pending XDP_TX packets, we must not update the rx
* producer yet because some RX buffers may still be on the TX ring.
*/
if (txr->xdp_tx_pending)
*event &= ~BNXT_RX_EVENT;
tx_avail = bnxt_tx_avail(bp, txr);
/* If there are pending XDP_TX packets, we must not update the rx
* producer yet because some RX buffers may still be on the TX ring.
*/
if (txr->xdp_tx_pending)
*event &= ~BNXT_RX_EVENT;
#if XDP_PACKET_HEADROOM
*len = xdp->data_end - xdp->data;
if (orig_data != xdp->data) {
offset = xdp->data - xdp->data_hard_start;
*data_ptr = xdp->data_hard_start + offset;
}
#endif
bnapi = rxr->bnapi;
cpr = &bnapi->cp_ring;
switch (act) {
case XDP_PASS:
return false;
case XDP_TX:
rx_buf = &rxr->rx_buf_ring[cons];
mapping = rx_buf->mapping - bp->rx_dma_offset;
*event = 0;
if (tx_avail < tx_needed) {
trace_xdp_exception(bp->dev, xdp_prog, act);
bnxt_reuse_rx_data(rxr, cons, xdp);
return true;
}
dma_sync_single_for_device(&pdev->dev, mapping + offset, *len,
bp->rx_dir);
*event &= ~BNXT_RX_EVENT;
*event |= BNXT_TX_EVENT;
/* Pass NULL as xdp->data here is buffer from the XSK pool i.e userspace */
__bnxt_xmit_xdp(bp, txr, mapping + offset, *len,
NEXT_RX(rxr->rx_prod), NULL);
bnxt_reuse_rx_data(rxr, cons, xdp);
return true;
case XDP_REDIRECT:
/* if we are calling this here then we know that the
* redirect is coming from a frame received by the
* bnxt_en driver.
*/
/* if we are unable to allocate a new buffer, abort and reuse */
if (bnxt_alloc_rx_data(bp, rxr, rxr->rx_prod, GFP_ATOMIC)) {
trace_xdp_exception(bp->dev, xdp_prog, act);
bnxt_reuse_rx_data(rxr, cons, xdp);
cpr->sw_stats->xsk_stats.xsk_rx_alloc_fail++;
return true;
}
if (xdp_do_redirect(bp->dev, xdp, xdp_prog)) {
trace_xdp_exception(bp->dev, xdp_prog, act);
cpr->sw_stats->xsk_stats.xsk_rx_redirect_fail++;
bnxt_reuse_rx_data(rxr, cons, xdp);
return true;
}
*event |= BNXT_REDIRECT_EVENT;
cpr->sw_stats->xsk_stats.xsk_rx_success++;
break;
default:
bpf_warn_invalid_xdp_action(bp->dev, xdp_prog, act);
fallthrough;
case XDP_ABORTED:
trace_xdp_exception(bp->dev, xdp_prog, act);
fallthrough;
case XDP_DROP:
break;
}
return true;
}
bool bnxt_xsk_xmit(struct bnxt *bp, struct bnxt_napi *bnapi, int budget)
{
struct bnxt_tx_ring_info *txr = bnapi->tx_ring[0];
struct bnxt_cp_ring_info *cpr;
int cpu = smp_processor_id();
struct bnxt_sw_tx_bd *tx_buf;
struct netdev_queue *txq;
u16 prod = txr->tx_prod;
bool xsk_more = true;
struct tx_bd *txbd;
dma_addr_t mapping;
int i, xsk_tx = 0;
int num_frags = 0;
u32 len, flags;
cpr = &bnapi->cp_ring;
txq = netdev_get_tx_queue(bp->dev, txr->txq_index);
__netif_tx_lock(txq, cpu);
for (i = 0; i < budget; i++) {
struct xdp_desc desc;
if (bnxt_tx_avail(bp, txr) < 2) {
cpr->sw_stats->xsk_stats.xsk_tx_ring_full++;
xsk_more = false;
break;
}
if (!xsk_tx_peek_desc(txr->xsk_pool, &desc)) {
xsk_more = false;
break;
}
mapping = xsk_buff_raw_get_dma(txr->xsk_pool, desc.addr);
len = desc.len;
xsk_buff_raw_dma_sync_for_device(txr->xsk_pool, mapping, desc.len);
tx_buf = &txr->tx_buf_ring[RING_TX(bp, prod)];
tx_buf->action = BNXT_XSK_TX;
txbd = &txr->tx_desc_ring[TX_RING(bp, prod)][TX_IDX(prod)];
flags = (len << TX_BD_LEN_SHIFT) | ((num_frags + 1) << TX_BD_FLAGS_BD_CNT_SHIFT) |
bnxt_lhint_arr[len >> 9];
txbd->tx_bd_len_flags_type = cpu_to_le32(flags);
txbd->tx_bd_opaque = SET_TX_OPAQUE(bp, txr, prod, 1 + num_frags);
txbd->tx_bd_haddr = cpu_to_le64(mapping);
dma_unmap_addr_set(tx_buf, mapping, mapping);
dma_unmap_len_set(tx_buf, len, len);
flags &= ~TX_BD_LEN;
txbd->tx_bd_len_flags_type = cpu_to_le32(((len) << TX_BD_LEN_SHIFT) |
flags | TX_BD_FLAGS_PACKET_END);
prod = NEXT_TX(prod);
txr->tx_prod = prod;
xsk_tx++;
}
if (xsk_tx) {
/* write the doorbell */
wmb();
xsk_tx_release(txr->xsk_pool);
bnxt_db_write(bp, &txr->tx_db, prod);
cpr->sw_stats->xsk_stats.xsk_tx_sent_pkts += xsk_tx;
}
__netif_tx_unlock(txq);
return xsk_more;
}
#else
bool bnxt_rx_xsk(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, u16 cons,
struct xdp_buff *xdp, u8 **data_ptr, unsigned int *len, u8 *event)
{
return false;
}
int bnxt_xsk_wakeup(struct net_device *dev, u32 queue_id, u32 flags)
{
return 0;
}
int bnxt_xdp_setup_pool(struct bnxt *bp, struct xsk_buff_pool *pool,
u16 queue_id)
{
return 0;
}
bool bnxt_xsk_xmit(struct bnxt *bp, struct bnxt_napi *bnapi, int budget)
{
return false;
}
#endif

View File

@ -0,0 +1,21 @@
/* Broadcom NetXtreme-C/E network driver.
*
* Copyright (c) 2024 Broadcom Inc
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation.
*/
#ifndef __BNXT_XSK_H__
#define __BNXT_XSK_H__
#ifdef HAVE_XSK_SUPPORT
#include <net/xdp_sock_drv.h>
#endif
int bnxt_xsk_wakeup(struct net_device *dev, u32 queue_id, u32 flags);
int bnxt_xdp_setup_pool(struct bnxt *bp, struct xsk_buff_pool *pool,
u16 queue_id);
bool bnxt_xsk_xmit(struct bnxt *bp, struct bnxt_napi *bnapi, int budget);
#endif

View File

@ -0,0 +1,37 @@
#!/usr/bin/awk -f
BEGIN{
if (struct) {
start="struct " struct " {"
} else if (enum) {
start="enum " enum " {"
} else if (define) {
pattern="#define " define
open=1
} else {
print "Usage: find_src.awk -v <struct | enum | define>=<regex> [-v pattern=<regex>]"
print "\nPrints lines associated with matched elements and optionally further constrains matching within such elements by an additional regex pattern."
exit 1
}
}
$0~/{/{
open && open++
}
$0~start{
open=1;
}
{
if (line_cont) {
print $0
line_cont=match($0, /\\$/)
}
}
$0~pattern{
if (open) {
print $0
line_cont=match($0, /\\$/)
}
}
$0~/}/{
open && open--
}

View File

@ -0,0 +1,258 @@
// SPDX-License-Identifier: BSD-3-Clause
/* Copyright(c) 2019-2021 Broadcom
* All rights reserved.
*/
#include <linux/errno.h>
#include <linux/slab.h>
#include <linux/bitmap.h>
#include "bnxt_hsi.h"
#include "bnxt_compat.h"
#include "bitalloc.h"
/**
* bnxt_ba_init - allocate memory for bitmap
* @pool: Pointer to struct bitalloc
* @free: free=true, sets all bits to 1
* In a bitmap, bit 1 means index is free and
* 0 means index in-use, because we need to allocate
* from reverse also and in those cases we can search
* via find_last_set(), dont have find_last_zero() api.
*
* Returns: 0 on success, -ve otherwise
*/
int bnxt_ba_init(struct bitalloc *pool, int size, bool free)
{
if (unlikely(!pool || size < 1 || size > BITALLOC_MAX_SIZE))
return -EINVAL;
pool->bitmap = bitmap_zalloc(size, GFP_KERNEL);
if (unlikely(!pool->bitmap))
return -ENOMEM;
if (free) {
pool->size = size;
pool->free_count = size;
bitmap_set(pool->bitmap, 0, size);
} else {
pool->size = size;
pool->free_count = 0;
}
return 0;
}
/**
* bnxt_ba_deinit - Free the malloced memory for the bitmap
* @pool: Pointer to struct bitalloc
*
* Returns: void
*/
void bnxt_ba_deinit(struct bitalloc *pool)
{
if (unlikely(!pool || !pool->bitmap))
return;
bitmap_free(pool->bitmap);
pool->size = 0;
pool->free_count = 0;
}
/**
* bnxt_ba_alloc - Allocate a lowest free index
* @pool: Pointer to struct bitalloc
*
* Returns: -1 on failure, index on success
*/
int bnxt_ba_alloc(struct bitalloc *pool)
{
int r = -1;
if (unlikely(!pool || !pool->bitmap || !pool->free_count))
return r;
r = find_first_bit(pool->bitmap, pool->size);
if (likely(r < pool->size)) {
clear_bit(r, pool->bitmap);
--pool->free_count;
}
return r;
}
/**
* bnxt_ba_alloc_reverse - Allocate a highest free index
* @pool: Pointer to struct bitalloc
*
* Returns: -1 on failure, index on success
*/
int bnxt_ba_alloc_reverse(struct bitalloc *pool)
{
int r = -1;
if (unlikely(!pool || !pool->bitmap || !pool->free_count))
return r;
r = find_last_bit(pool->bitmap, pool->size);
if (likely(r < pool->size)) {
clear_bit(r, pool->bitmap);
--pool->free_count;
}
return r;
}
/**
* bnxt_ba_alloc_index - Allocate the requested index
* @pool: Pointer to struct bitalloc
* @index: Index to allocate
*
* Returns: -1 on failure, index on success
*/
int bnxt_ba_alloc_index(struct bitalloc *pool, int index)
{
int r = -1;
if (unlikely(!pool || !pool->bitmap ||
index < 0 || index >= (int)pool->size ||
!pool->free_count))
return r;
if (likely(test_bit(index, pool->bitmap))) {
clear_bit(index, pool->bitmap);
--pool->free_count;
r = index;
}
return r;
}
/**
* bnxt_ba_free - Free the requested index if allocated
* @pool: Pointer to struct bitalloc
* @index: Index to free
*
* Returns: -1 on failure, 0 on success
*/
int bnxt_ba_free(struct bitalloc *pool, int index)
{
int r = -1;
if (unlikely(!pool || !pool->bitmap ||
index < 0 || index >= (int)pool->size))
return r;
if (unlikely(test_bit(index, pool->bitmap)))
return r;
set_bit(index, pool->bitmap);
pool->free_count++;
return 0;
}
/**
* bnxt_ba_inuse - Check if the requested index is already allocated
* @pool: Pointer to struct bitalloc
* @index: Index to check availability
*
* Returns: -1 on failure, 0 if it is free, 1 if it is allocated
*/
int bnxt_ba_inuse(struct bitalloc *pool, int index)
{
if (unlikely(!pool || !pool->bitmap ||
index < 0 || index >= (int)pool->size))
return -1;
return !test_bit(index, pool->bitmap);
}
/**
* bnxt_ba_inuse_free - Free the index if it was allocated
* @pool: Pointer to struct bitalloc
* @index: Index to be freed if it was allocated
*
* Returns: -1 on failure, 0 if it is free, 1 if it is in use
*/
int bnxt_ba_inuse_free(struct bitalloc *pool, int index)
{
if (unlikely(!pool || !pool->bitmap ||
index < 0 || index >= (int)pool->size))
return -1;
if (bnxt_ba_free(pool, index) == 0)
return 1;
return 0;
}
/**
* bnxt_ba_find_next_inuse - Find the next index allocated
* @pool: Pointer to struct bitalloc
* @index: Index from where to search for the next inuse index
*
* Returns: -1 on failure or if not found, else next index
*/
int bnxt_ba_find_next_inuse(struct bitalloc *pool, int index)
{
int r = -1;
if (unlikely(!pool || !pool->bitmap ||
index < 0 || index >= (int)pool->size))
return r;
r = find_next_zero_bit(pool->bitmap, pool->size, ++index);
if (unlikely(r == pool->size))
return -1;
return r;
}
/**
* bnxt_ba_find_next_inuse_free - Free the next allocated index
* @pool: Pointer to struct bitalloc
* @index: Index from where to search for the next inuse index
*
* Returns: -1 on failure, else next inuse index that was freed
*/
int bnxt_ba_find_next_inuse_free(struct bitalloc *pool, int index)
{
int r = -1;
if (unlikely(!pool || !pool->bitmap ||
index < 0 || index >= (int)pool->size))
return r;
r = find_next_zero_bit(pool->bitmap, pool->size, ++index);
if (unlikely(r == pool->size))
return -1;
if (likely(bnxt_ba_free(pool, r) == 0))
return r;
return -1;
}
/**
* bnxt_ba_free_count - Available indexes that can be allocated.
* @pool: Pointer to struct bitalloc
*
* Returns: 0 - size, -ve on error
*/
int bnxt_ba_free_count(struct bitalloc *pool)
{
if (unlikely(!pool))
return -EINVAL;
return (int)pool->free_count;
}
/**
* bnxt_ba_inuse_count - Number of already allocated indexes.
* @pool: Pointer to struct bitalloc
*
* Returns: 0 - size, -ve on error
*/
int bnxt_ba_inuse_count(struct bitalloc *pool)
{
if (unlikely(!pool))
return -EINVAL;
return (int)(pool->size) - (int)(pool->free_count);
}

View File

@ -0,0 +1,57 @@
/* SPDX-License-Identifier: BSD-3-Clause
* Copyright(c) 2019-2021 Broadcom
* All rights reserved.
*/
#ifndef _BITALLOC_H_
#define _BITALLOC_H_
#include <linux/types.h>
#include <linux/bitops.h>
struct bitalloc {
u32 size;
u32 free_count;
unsigned long *bitmap;
};
#define BITALLOC_SIZEOF(size) (sizeof(struct bitalloc) + ((size) + 31) / 32)
#define BITALLOC_MAX_SIZE (32 * 32 * 32 * 32 * 32 * 32)
/* Initialize the struct bitalloc and alloc bitmap memory */
int bnxt_ba_init(struct bitalloc *pool, int size, bool free);
/* Deinitialize the struct bitalloc and free bitmap memory */
void bnxt_ba_deinit(struct bitalloc *pool);
/* Allocate a lowest free index */
int bnxt_ba_alloc(struct bitalloc *pool);
/* Allocate the given index */
int bnxt_ba_alloc_index(struct bitalloc *pool, int index);
/* Allocate a highest free index */
int bnxt_ba_alloc_reverse(struct bitalloc *pool);
/* Test if index is in use */
int bnxt_ba_inuse(struct bitalloc *pool, int index);
/* Test if index is in use, but also free the index */
int bnxt_ba_inuse_free(struct bitalloc *pool, int index);
/* Find the next index is in use from a given index */
int bnxt_ba_find_next_inuse(struct bitalloc *pool, int index);
/* Find the next index is in use from a given index, and also free it */
int bnxt_ba_find_next_inuse_free(struct bitalloc *pool, int index);
/* Free the index */
int bnxt_ba_free(struct bitalloc *pool, int index);
/* Available number of indexes for allocation */
int bnxt_ba_free_count(struct bitalloc *pool);
/* Number of indexes that are allocated */
int bnxt_ba_inuse_count(struct bitalloc *pool);
#endif /* _BITALLOC_H_ */

View File

@ -0,0 +1,652 @@
/* SPDX-License-Identifier: BSD-3-Clause
* Copyright(c) 2019-2021 Broadcom
* All rights reserved.
*/
/* Name: cfa_p40_hw.h
*
* Description: header for SWE based on Truflow
*
* Date: taken from 12/16/19 17:18:12
*
* Note: This file was first generated using tflib_decode.py.
*
* Changes have been made due to lack of availability of xml for
* additional tables at this time (EEM Record and union table fields)
* Changes not autogenerated are noted in comments.
*/
#ifndef _CFA_P40_HW_H_
#define _CFA_P40_HW_H_
/* Valid TCAM entry. (for idx 5 ...) */
#define CFA_P40_PROF_L2_CTXT_TCAM_VALID_BITPOS 166
#define CFA_P40_PROF_L2_CTXT_TCAM_VALID_NUM_BITS 1
/* Key type (pass). (for idx 5 ...) */
#define CFA_P40_PROF_L2_CTXT_TCAM_KEY_TYPE_BITPOS 164
#define CFA_P40_PROF_L2_CTXT_TCAM_KEY_TYPE_NUM_BITS 2
/* Tunnel HDR type. (for idx 5 ...) */
#define CFA_P40_PROF_L2_CTXT_TCAM_TUN_HDR_TYPE_BITPOS 160
#define CFA_P40_PROF_L2_CTXT_TCAM_TUN_HDR_TYPE_NUM_BITS 4
/* Number of VLAN tags in tunnel l2 header. (for idx 4 ...) */
#define CFA_P40_PROF_L2_CTXT_TCAM_T_L2_NUMTAGS_BITPOS 158
#define CFA_P40_PROF_L2_CTXT_TCAM_T_L2_NUMTAGS_NUM_BITS 2
/* Number of VLAN tags in l2 header. (for idx 4 ...) */
#define CFA_P40_PROF_L2_CTXT_TCAM_L2_NUMTAGS_BITPOS 156
#define CFA_P40_PROF_L2_CTXT_TCAM_L2_NUMTAGS_NUM_BITS 2
/* Tunnel/Inner Source/Dest. MAC Address. */
#define CFA_P40_PROF_L2_CTXT_TCAM_MAC1_BITPOS 108
#define CFA_P40_PROF_L2_CTXT_TCAM_MAC1_NUM_BITS 48
/* Tunnel Outer VLAN Tag ID. (for idx 3 ...) */
#define CFA_P40_PROF_L2_CTXT_TCAM_T_OVID_BITPOS 96
#define CFA_P40_PROF_L2_CTXT_TCAM_T_OVID_NUM_BITS 12
/* Tunnel Inner VLAN Tag ID. (for idx 2 ...) */
#define CFA_P40_PROF_L2_CTXT_TCAM_T_IVID_BITPOS 84
#define CFA_P40_PROF_L2_CTXT_TCAM_T_IVID_NUM_BITS 12
/* Source Partition. (for idx 2 ...) */
#define CFA_P40_PROF_L2_CTXT_TCAM_SPARIF_BITPOS 80
#define CFA_P40_PROF_L2_CTXT_TCAM_SPARIF_NUM_BITS 4
/* Source Virtual I/F. (for idx 2 ...) */
#define CFA_P40_PROF_L2_CTXT_TCAM_SVIF_BITPOS 72
#define CFA_P40_PROF_L2_CTXT_TCAM_SVIF_NUM_BITS 8
/* Tunnel/Inner Source/Dest. MAC Address. */
#define CFA_P40_PROF_L2_CTXT_TCAM_MAC0_BITPOS 24
#define CFA_P40_PROF_L2_CTXT_TCAM_MAC0_NUM_BITS 48
/* Outer VLAN Tag ID. */
#define CFA_P40_PROF_L2_CTXT_TCAM_OVID_BITPOS 12
#define CFA_P40_PROF_L2_CTXT_TCAM_OVID_NUM_BITS 12
/* Inner VLAN Tag ID. */
#define CFA_P40_PROF_L2_CTXT_TCAM_IVID_BITPOS 0
#define CFA_P40_PROF_L2_CTXT_TCAM_IVID_NUM_BITS 12
enum cfa_p40_prof_l2_ctxt_tcam_flds {
CFA_P40_PROF_L2_CTXT_TCAM_VALID_FLD = 0,
CFA_P40_PROF_L2_CTXT_TCAM_KEY_TYPE_FLD = 1,
CFA_P40_PROF_L2_CTXT_TCAM_TUN_HDR_TYPE_FLD = 2,
CFA_P40_PROF_L2_CTXT_TCAM_T_L2_NUMTAGS_FLD = 3,
CFA_P40_PROF_L2_CTXT_TCAM_L2_NUMTAGS_FLD = 4,
CFA_P40_PROF_L2_CTXT_TCAM_MAC1_FLD = 5,
CFA_P40_PROF_L2_CTXT_TCAM_T_OVID_FLD = 6,
CFA_P40_PROF_L2_CTXT_TCAM_T_IVID_FLD = 7,
CFA_P40_PROF_L2_CTXT_TCAM_SPARIF_FLD = 8,
CFA_P40_PROF_L2_CTXT_TCAM_SVIF_FLD = 9,
CFA_P40_PROF_L2_CTXT_TCAM_MAC0_FLD = 10,
CFA_P40_PROF_L2_CTXT_TCAM_OVID_FLD = 11,
CFA_P40_PROF_L2_CTXT_TCAM_IVID_FLD = 12,
CFA_P40_PROF_L2_CTXT_TCAM_MAX_FLD
};
#define CFA_P40_PROF_L2_CTXT_TCAM_TOTAL_NUM_BITS 167
/* Valid entry. (for idx 2 ...) */
#define CFA_P40_ACT_VEB_TCAM_VALID_BITPOS 79
#define CFA_P40_ACT_VEB_TCAM_VALID_NUM_BITS 1
/* reserved program to 0. (for idx 2 ...) */
#define CFA_P40_ACT_VEB_TCAM_RESERVED_BITPOS 78
#define CFA_P40_ACT_VEB_TCAM_RESERVED_NUM_BITS 1
/* PF Parif Number. (for idx 2 ...) */
#define CFA_P40_ACT_VEB_TCAM_PARIF_IN_BITPOS 74
#define CFA_P40_ACT_VEB_TCAM_PARIF_IN_NUM_BITS 4
/* Number of VLAN Tags. (for idx 2 ...) */
#define CFA_P40_ACT_VEB_TCAM_NUM_VTAGS_BITPOS 72
#define CFA_P40_ACT_VEB_TCAM_NUM_VTAGS_NUM_BITS 2
/* Dest. MAC Address. */
#define CFA_P40_ACT_VEB_TCAM_MAC_BITPOS 24
#define CFA_P40_ACT_VEB_TCAM_MAC_NUM_BITS 48
/* Outer VLAN Tag ID. */
#define CFA_P40_ACT_VEB_TCAM_OVID_BITPOS 12
#define CFA_P40_ACT_VEB_TCAM_OVID_NUM_BITS 12
/* Inner VLAN Tag ID. */
#define CFA_P40_ACT_VEB_TCAM_IVID_BITPOS 0
#define CFA_P40_ACT_VEB_TCAM_IVID_NUM_BITS 12
enum cfa_p40_act_veb_tcam_flds {
CFA_P40_ACT_VEB_TCAM_VALID_FLD = 0,
CFA_P40_ACT_VEB_TCAM_RESERVED_FLD = 1,
CFA_P40_ACT_VEB_TCAM_PARIF_IN_FLD = 2,
CFA_P40_ACT_VEB_TCAM_NUM_VTAGS_FLD = 3,
CFA_P40_ACT_VEB_TCAM_MAC_FLD = 4,
CFA_P40_ACT_VEB_TCAM_OVID_FLD = 5,
CFA_P40_ACT_VEB_TCAM_IVID_FLD = 6,
CFA_P40_ACT_VEB_TCAM_MAX_FLD
};
#define CFA_P40_ACT_VEB_TCAM_TOTAL_NUM_BITS 80
/* Entry is valid. */
#define CFA_P40_LKUP_TCAM_RECORD_MEM_VALID_BITPOS 18
#define CFA_P40_LKUP_TCAM_RECORD_MEM_VALID_NUM_BITS 1
/* Action Record Pointer */
#define CFA_P40_LKUP_TCAM_RECORD_MEM_ACT_REC_PTR_BITPOS 2
#define CFA_P40_LKUP_TCAM_RECORD_MEM_ACT_REC_PTR_NUM_BITS 16
/* for resolving TCAM/EM conflicts */
#define CFA_P40_LKUP_TCAM_RECORD_MEM_STRENGTH_BITPOS 0
#define CFA_P40_LKUP_TCAM_RECORD_MEM_STRENGTH_NUM_BITS 2
enum cfa_p40_lkup_tcam_record_mem_flds {
CFA_P40_LKUP_TCAM_RECORD_MEM_VALID_FLD = 0,
CFA_P40_LKUP_TCAM_RECORD_MEM_ACT_REC_PTR_FLD = 1,
CFA_P40_LKUP_TCAM_RECORD_MEM_STRENGTH_FLD = 2,
CFA_P40_LKUP_TCAM_RECORD_MEM_MAX_FLD
};
#define CFA_P40_LKUP_TCAM_RECORD_MEM_TOTAL_NUM_BITS 19
/* (for idx 1 ...) */
#define CFA_P40_PROF_CTXT_REMAP_MEM_TPID_ANTI_SPOOF_CTL_BITPOS 62
#define CFA_P40_PROF_CTXT_REMAP_MEM_TPID_ANTI_SPOOF_CTL_NUM_BITS 2
enum cfa_p40_prof_ctxt_remap_mem_tpid_anti_spoof_ctl {
CFA_P40_PROF_CTXT_REMAP_MEM_TPID_IGNORE = 0x0UL,
CFA_P40_PROF_CTXT_REMAP_MEM_TPID_DROP = 0x1UL,
CFA_P40_PROF_CTXT_REMAP_MEM_TPID_DEFAULT = 0x2UL,
CFA_P40_PROF_CTXT_REMAP_MEM_TPID_SPIF = 0x3UL,
CFA_P40_PROF_CTXT_REMAP_MEM_TPID_MAX = 0x3UL
};
/* (for idx 1 ...) */
#define CFA_P40_PROF_CTXT_REMAP_MEM_PRI_ANTI_SPOOF_CTL_BITPOS 60
#define CFA_P40_PROF_CTXT_REMAP_MEM_PRI_ANTI_SPOOF_CTL_NUM_BITS 2
enum cfa_p40_prof_ctxt_remap_mem_pri_anti_spoof_ctl {
CFA_P40_PROF_CTXT_REMAP_MEM_PRI_IGNORE = 0x0UL,
CFA_P40_PROF_CTXT_REMAP_MEM_PRI_DROP = 0x1UL,
CFA_P40_PROF_CTXT_REMAP_MEM_PRI_DEFAULT = 0x2UL,
CFA_P40_PROF_CTXT_REMAP_MEM_PRI_SPIF = 0x3UL,
CFA_P40_PROF_CTXT_REMAP_MEM_PRI_MAX = 0x3UL
};
/* Bypass Source Properties Lookup. (for idx 1 ...) */
#define CFA_P40_PROF_CTXT_REMAP_MEM_BYP_SP_LKUP_BITPOS 59
#define CFA_P40_PROF_CTXT_REMAP_MEM_BYP_SP_LKUP_NUM_BITS 1
/* SP Record Pointer. (for idx 1 ...) */
#define CFA_P40_PROF_CTXT_REMAP_MEM_SP_REC_PTR_BITPOS 43
#define CFA_P40_PROF_CTXT_REMAP_MEM_SP_REC_PTR_NUM_BITS 16
/* BD Action pointer passing enable. (for idx 1 ...) */
#define CFA_P40_PROF_CTXT_REMAP_MEM_BD_ACT_EN_BITPOS 42
#define CFA_P40_PROF_CTXT_REMAP_MEM_BD_ACT_EN_NUM_BITS 1
/* Default VLAN TPID. (for idx 1 ...) */
#define CFA_P40_PROF_CTXT_REMAP_MEM_DEFAULT_TPID_BITPOS 39
#define CFA_P40_PROF_CTXT_REMAP_MEM_DEFAULT_TPID_NUM_BITS 3
/* Allowed VLAN TPIDs. (for idx 1 ...) */
#define CFA_P40_PROF_CTXT_REMAP_MEM_ALLOWED_TPID_BITPOS 33
#define CFA_P40_PROF_CTXT_REMAP_MEM_ALLOWED_TPID_NUM_BITS 6
/* Default VLAN PRI. */
#define CFA_P40_PROF_CTXT_REMAP_MEM_DEFAULT_PRI_BITPOS 30
#define CFA_P40_PROF_CTXT_REMAP_MEM_DEFAULT_PRI_NUM_BITS 3
/* Allowed VLAN PRIs. */
#define CFA_P40_PROF_CTXT_REMAP_MEM_ALLOWED_PRI_BITPOS 22
#define CFA_P40_PROF_CTXT_REMAP_MEM_ALLOWED_PRI_NUM_BITS 8
/* Partition. */
#define CFA_P40_PROF_CTXT_REMAP_MEM_PARIF_BITPOS 18
#define CFA_P40_PROF_CTXT_REMAP_MEM_PARIF_NUM_BITS 4
/* Bypass Lookup. */
#define CFA_P40_PROF_CTXT_REMAP_MEM_BYP_LKUP_EN_BITPOS 17
#define CFA_P40_PROF_CTXT_REMAP_MEM_BYP_LKUP_EN_NUM_BITS 1
/* L2 Context Remap Data. Action bypass mode (1) {7'd0,prof_vnic[9:0]} Note:
* should also set byp_lkup_en. Action bypass mode (0) byp_lkup_en(0) -
* {prof_func[6:0],l2_context[9:0]} byp_lkup_en(1) - {1'b0,act_rec_ptr[15:0]}
*/
#define CFA_P40_PROF_CTXT_REMAP_MEM_PROF_VNIC_BITPOS 0
#define CFA_P40_PROF_CTXT_REMAP_MEM_PROF_VNIC_NUM_BITS 12
#define CFA_P40_PROF_CTXT_REMAP_MEM_PROF_FUNC_BITPOS 10
#define CFA_P40_PROF_CTXT_REMAP_MEM_PROF_FUNC_NUM_BITS 7
#define CFA_P40_PROF_CTXT_REMAP_MEM_L2_CTXT_BITPOS 0
#define CFA_P40_PROF_CTXT_REMAP_MEM_L2_CTXT_NUM_BITS 10
#define CFA_P40_PROF_CTXT_REMAP_MEM_ARP_BITPOS 0
#define CFA_P40_PROF_CTXT_REMAP_MEM_ARP_NUM_BITS 16
enum cfa_p40_prof_ctxt_remap_mem_flds {
CFA_P40_PROF_CTXT_REMAP_MEM_TPID_ANTI_SPOOF_CTL_FLD = 0,
CFA_P40_PROF_CTXT_REMAP_MEM_PRI_ANTI_SPOOF_CTL_FLD = 1,
CFA_P40_PROF_CTXT_REMAP_MEM_BYP_SP_LKUP_FLD = 2,
CFA_P40_PROF_CTXT_REMAP_MEM_SP_REC_PTR_FLD = 3,
CFA_P40_PROF_CTXT_REMAP_MEM_BD_ACT_EN_FLD = 4,
CFA_P40_PROF_CTXT_REMAP_MEM_DEFAULT_TPID_FLD = 5,
CFA_P40_PROF_CTXT_REMAP_MEM_ALLOWED_TPID_FLD = 6,
CFA_P40_PROF_CTXT_REMAP_MEM_DEFAULT_PRI_FLD = 7,
CFA_P40_PROF_CTXT_REMAP_MEM_ALLOWED_PRI_FLD = 8,
CFA_P40_PROF_CTXT_REMAP_MEM_PARIF_FLD = 9,
CFA_P40_PROF_CTXT_REMAP_MEM_BYP_LKUP_EN_FLD = 10,
CFA_P40_PROF_CTXT_REMAP_MEM_PROF_VNIC_FLD = 11,
CFA_P40_PROF_CTXT_REMAP_MEM_PROF_FUNC_FLD = 12,
CFA_P40_PROF_CTXT_REMAP_MEM_L2_CTXT_FLD = 13,
CFA_P40_PROF_CTXT_REMAP_MEM_ARP_FLD = 14,
CFA_P40_PROF_CTXT_REMAP_MEM_MAX_FLD
};
#define CFA_P40_PROF_CTXT_REMAP_MEM_TOTAL_NUM_BITS 64
/* Bypass action pointer look up (for idx 1 ...) */
#define CFA_P40_PROF_PROFILE_TCAM_REMAP_MEM_PL_BYP_LKUP_EN_BITPOS 37
#define CFA_P40_PROF_PROFILE_TCAM_REMAP_MEM_PL_BYP_LKUP_EN_NUM_BITS 1
/* Exact match search enable (for idx 1 ...) */
#define CFA_P40_PROF_PROFILE_TCAM_REMAP_MEM_EM_SEARCH_ENB_BITPOS 36
#define CFA_P40_PROF_PROFILE_TCAM_REMAP_MEM_EM_SEARCH_ENB_NUM_BITS 1
/* Exact match profile */
#define CFA_P40_PROF_PROFILE_TCAM_REMAP_MEM_EM_PROFILE_ID_BITPOS 28
#define CFA_P40_PROF_PROFILE_TCAM_REMAP_MEM_EM_PROFILE_ID_NUM_BITS 8
/* Exact match key format */
#define CFA_P40_PROF_PROFILE_TCAM_REMAP_MEM_EM_KEY_ID_BITPOS 23
#define CFA_P40_PROF_PROFILE_TCAM_REMAP_MEM_EM_KEY_ID_NUM_BITS 5
/* Exact match key mask */
#define CFA_P40_PROF_PROFILE_TCAM_REMAP_MEM_EM_KEY_MASK_BITPOS 13
#define CFA_P40_PROF_PROFILE_TCAM_REMAP_MEM_EM_KEY_MASK_NUM_BITS 10
/* TCAM search enable */
#define CFA_P40_PROF_PROFILE_TCAM_REMAP_MEM_TCAM_SEARCH_ENB_BITPOS 12
#define CFA_P40_PROF_PROFILE_TCAM_REMAP_MEM_TCAM_SEARCH_ENB_NUM_BITS 1
/* TCAM profile */
#define CFA_P40_PROF_PROFILE_TCAM_REMAP_MEM_TCAM_PROFILE_ID_BITPOS 4
#define CFA_P40_PROF_PROFILE_TCAM_REMAP_MEM_TCAM_PROFILE_ID_NUM_BITS 8
/* TCAM key format */
#define CFA_P40_PROF_PROFILE_TCAM_REMAP_MEM_TCAM_KEY_ID_BITPOS 0
#define CFA_P40_PROF_PROFILE_TCAM_REMAP_MEM_TCAM_KEY_ID_NUM_BITS 4
#define CFA_P40_PROF_PROFILE_TCAM_REMAP_MEM_BYPASS_OPT_BITPOS 16
#define CFA_P40_PROF_PROFILE_TCAM_REMAP_MEM_BYPASS_OPT_NUM_BITS 2
#define CFA_P40_PROF_PROFILE_TCAM_REMAP_MEM_ACT_REC_PTR_BITPOS 0
#define CFA_P40_PROF_PROFILE_TCAM_REMAP_MEM_ACT_REC_PTR_NUM_BITS 16
enum cfa_p40_prof_profile_tcam_remap_mem_flds {
CFA_P40_PROF_PROFILE_TCAM_REMAP_MEM_PL_BYP_LKUP_EN_FLD = 0,
CFA_P40_PROF_PROFILE_TCAM_REMAP_MEM_EM_SEARCH_ENB_FLD = 1,
CFA_P40_PROF_PROFILE_TCAM_REMAP_MEM_EM_PROFILE_ID_FLD = 2,
CFA_P40_PROF_PROFILE_TCAM_REMAP_MEM_EM_KEY_ID_FLD = 3,
CFA_P40_PROF_PROFILE_TCAM_REMAP_MEM_EM_KEY_MASK_FLD = 4,
CFA_P40_PROF_PROFILE_TCAM_REMAP_MEM_TCAM_SEARCH_ENB_FLD = 5,
CFA_P40_PROF_PROFILE_TCAM_REMAP_MEM_TCAM_PROFILE_ID_FLD = 6,
CFA_P40_PROF_PROFILE_TCAM_REMAP_MEM_TCAM_KEY_ID_FLD = 7,
CFA_P40_PROF_PROFILE_TCAM_REMAP_MEM_BYPASS_OPT_FLD = 8,
CFA_P40_PROF_PROFILE_TCAM_REMAP_MEM_ACT_REC_PTR_FLD = 9,
CFA_P40_PROF_PROFILE_TCAM_REMAP_MEM_MAX_FLD
};
#define CFA_P40_PROF_PROFILE_TCAM_REMAP_MEM_TOTAL_NUM_BITS 38
/* Valid TCAM entry (for idx 2 ...) */
#define CFA_P40_PROF_PROFILE_TCAM_VALID_BITPOS 80
#define CFA_P40_PROF_PROFILE_TCAM_VALID_NUM_BITS 1
/* Packet type (for idx 2 ...) */
#define CFA_P40_PROF_PROFILE_TCAM_PKT_TYPE_BITPOS 76
#define CFA_P40_PROF_PROFILE_TCAM_PKT_TYPE_NUM_BITS 4
/* Pass through CFA (for idx 2 ...) */
#define CFA_P40_PROF_PROFILE_TCAM_RECYCLE_CNT_BITPOS 74
#define CFA_P40_PROF_PROFILE_TCAM_RECYCLE_CNT_NUM_BITS 2
/* Aggregate error (for idx 2 ...) */
#define CFA_P40_PROF_PROFILE_TCAM_AGG_ERROR_BITPOS 73
#define CFA_P40_PROF_PROFILE_TCAM_AGG_ERROR_NUM_BITS 1
/* Profile function (for idx 2 ...) */
#define CFA_P40_PROF_PROFILE_TCAM_PROF_FUNC_BITPOS 66
#define CFA_P40_PROF_PROFILE_TCAM_PROF_FUNC_NUM_BITS 7
/* Reserved for future use. Set to 0. */
#define CFA_P40_PROF_PROFILE_TCAM_RESERVED_BITPOS 57
#define CFA_P40_PROF_PROFILE_TCAM_RESERVED_NUM_BITS 9
/* non-tunnel(0)/tunneled(1) packet (for idx 1 ...) */
#define CFA_P40_PROF_PROFILE_TCAM_HREC_NEXT_BITPOS 56
#define CFA_P40_PROF_PROFILE_TCAM_HREC_NEXT_NUM_BITS 1
/* Tunnel L2 tunnel valid (for idx 1 ...) */
#define CFA_P40_PROF_PROFILE_TCAM_TL2_HDR_VALID_BITPOS 55
#define CFA_P40_PROF_PROFILE_TCAM_TL2_HDR_VALID_NUM_BITS 1
/* Tunnel L2 header type (for idx 1 ...) */
#define CFA_P40_PROF_PROFILE_TCAM_TL2_HDR_TYPE_BITPOS 53
#define CFA_P40_PROF_PROFILE_TCAM_TL2_HDR_TYPE_NUM_BITS 2
/* Remapped tunnel L2 dest_type UC(0)/MC(2)/BC(3) (for idx 1 ...) */
#define CFA_P40_PROF_PROFILE_TCAM_TL2_UC_MC_BC_BITPOS 51
#define CFA_P40_PROF_PROFILE_TCAM_TL2_UC_MC_BC_NUM_BITS 2
/* Tunnel L2 1+ VLAN tags present (for idx 1 ...) */
#define CFA_P40_PROF_PROFILE_TCAM_TL2_VTAG_PRESENT_BITPOS 50
#define CFA_P40_PROF_PROFILE_TCAM_TL2_VTAG_PRESENT_NUM_BITS 1
/* Tunnel L2 2 VLAN tags present (for idx 1 ...) */
#define CFA_P40_PROF_PROFILE_TCAM_TL2_TWO_VTAGS_BITPOS 49
#define CFA_P40_PROF_PROFILE_TCAM_TL2_TWO_VTAGS_NUM_BITS 1
/* Tunnel L3 valid (for idx 1 ...) */
#define CFA_P40_PROF_PROFILE_TCAM_TL3_VALID_BITPOS 48
#define CFA_P40_PROF_PROFILE_TCAM_TL3_VALID_NUM_BITS 1
/* Tunnel L3 error (for idx 1 ...) */
#define CFA_P40_PROF_PROFILE_TCAM_TL3_ERROR_BITPOS 47
#define CFA_P40_PROF_PROFILE_TCAM_TL3_ERROR_NUM_BITS 1
/* Tunnel L3 header type (for idx 1 ...) */
#define CFA_P40_PROF_PROFILE_TCAM_TL3_HDR_TYPE_BITPOS 43
#define CFA_P40_PROF_PROFILE_TCAM_TL3_HDR_TYPE_NUM_BITS 4
/* Tunnel L3 header is IPV4 or IPV6. (for idx 1 ...) */
#define CFA_P40_PROF_PROFILE_TCAM_TL3_HDR_ISIP_BITPOS 42
#define CFA_P40_PROF_PROFILE_TCAM_TL3_HDR_ISIP_NUM_BITS 1
/* Tunnel L3 IPV6 src address is compressed (for idx 1 ...) */
#define CFA_P40_PROF_PROFILE_TCAM_TL3_IPV6_CMP_SRC_BITPOS 41
#define CFA_P40_PROF_PROFILE_TCAM_TL3_IPV6_CMP_SRC_NUM_BITS 1
/* Tunnel L3 IPV6 dest address is compressed (for idx 1 ...) */
#define CFA_P40_PROF_PROFILE_TCAM_TL3_IPV6_CMP_DEST_BITPOS 40
#define CFA_P40_PROF_PROFILE_TCAM_TL3_IPV6_CMP_DEST_NUM_BITS 1
/* Tunnel L4 valid (for idx 1 ...) */
#define CFA_P40_PROF_PROFILE_TCAM_TL4_HDR_VALID_BITPOS 39
#define CFA_P40_PROF_PROFILE_TCAM_TL4_HDR_VALID_NUM_BITS 1
/* Tunnel L4 error (for idx 1 ...) */
#define CFA_P40_PROF_PROFILE_TCAM_TL4_HDR_ERROR_BITPOS 38
#define CFA_P40_PROF_PROFILE_TCAM_TL4_HDR_ERROR_NUM_BITS 1
/* Tunnel L4 header type (for idx 1 ...) */
#define CFA_P40_PROF_PROFILE_TCAM_TL4_HDR_TYPE_BITPOS 34
#define CFA_P40_PROF_PROFILE_TCAM_TL4_HDR_TYPE_NUM_BITS 4
/* Tunnel L4 header is UDP or TCP (for idx 1 ...) */
#define CFA_P40_PROF_PROFILE_TCAM_TL4_HDR_IS_UDP_TCP_BITPOS 33
#define CFA_P40_PROF_PROFILE_TCAM_TL4_HDR_IS_UDP_TCP_NUM_BITS 1
/* Tunnel valid (for idx 1 ...) */
#define CFA_P40_PROF_PROFILE_TCAM_TUN_HDR_VALID_BITPOS 32
#define CFA_P40_PROF_PROFILE_TCAM_TUN_HDR_VALID_NUM_BITS 1
/* Tunnel error */
#define CFA_P40_PROF_PROFILE_TCAM_TUN_HDR_ERR_BITPOS 31
#define CFA_P40_PROF_PROFILE_TCAM_TUN_HDR_ERR_NUM_BITS 1
/* Tunnel header type */
#define CFA_P40_PROF_PROFILE_TCAM_TUN_HDR_TYPE_BITPOS 27
#define CFA_P40_PROF_PROFILE_TCAM_TUN_HDR_TYPE_NUM_BITS 4
/* Tunnel header flags */
#define CFA_P40_PROF_PROFILE_TCAM_TUN_HDR_FLAGS_BITPOS 24
#define CFA_P40_PROF_PROFILE_TCAM_TUN_HDR_FLAGS_NUM_BITS 3
/* L2 header valid */
#define CFA_P40_PROF_PROFILE_TCAM_L2_HDR_VALID_BITPOS 23
#define CFA_P40_PROF_PROFILE_TCAM_L2_HDR_VALID_NUM_BITS 1
/* L2 header error */
#define CFA_P40_PROF_PROFILE_TCAM_L2_HDR_ERROR_BITPOS 22
#define CFA_P40_PROF_PROFILE_TCAM_L2_HDR_ERROR_NUM_BITS 1
/* L2 header type */
#define CFA_P40_PROF_PROFILE_TCAM_L2_HDR_TYPE_BITPOS 20
#define CFA_P40_PROF_PROFILE_TCAM_L2_HDR_TYPE_NUM_BITS 2
/* Remapped L2 dest_type UC(0)/MC(2)/BC(3) */
#define CFA_P40_PROF_PROFILE_TCAM_L2_UC_MC_BC_BITPOS 18
#define CFA_P40_PROF_PROFILE_TCAM_L2_UC_MC_BC_NUM_BITS 2
/* L2 header 1+ VLAN tags present */
#define CFA_P40_PROF_PROFILE_TCAM_L2_VTAG_PRESENT_BITPOS 17
#define CFA_P40_PROF_PROFILE_TCAM_L2_VTAG_PRESENT_NUM_BITS 1
/* L2 header 2 VLAN tags present */
#define CFA_P40_PROF_PROFILE_TCAM_L2_TWO_VTAGS_BITPOS 16
#define CFA_P40_PROF_PROFILE_TCAM_L2_TWO_VTAGS_NUM_BITS 1
/* L3 header valid */
#define CFA_P40_PROF_PROFILE_TCAM_L3_VALID_BITPOS 15
#define CFA_P40_PROF_PROFILE_TCAM_L3_VALID_NUM_BITS 1
/* L3 header error */
#define CFA_P40_PROF_PROFILE_TCAM_L3_ERROR_BITPOS 14
#define CFA_P40_PROF_PROFILE_TCAM_L3_ERROR_NUM_BITS 1
/* L3 header type */
#define CFA_P40_PROF_PROFILE_TCAM_L3_HDR_TYPE_BITPOS 10
#define CFA_P40_PROF_PROFILE_TCAM_L3_HDR_TYPE_NUM_BITS 4
/* L3 header is IPV4 or IPV6. */
#define CFA_P40_PROF_PROFILE_TCAM_L3_HDR_ISIP_BITPOS 9
#define CFA_P40_PROF_PROFILE_TCAM_L3_HDR_ISIP_NUM_BITS 1
/* L3 header IPV6 src address is compressed */
#define CFA_P40_PROF_PROFILE_TCAM_L3_IPV6_CMP_SRC_BITPOS 8
#define CFA_P40_PROF_PROFILE_TCAM_L3_IPV6_CMP_SRC_NUM_BITS 1
/* L3 header IPV6 dest address is compressed */
#define CFA_P40_PROF_PROFILE_TCAM_L3_IPV6_CMP_DEST_BITPOS 7
#define CFA_P40_PROF_PROFILE_TCAM_L3_IPV6_CMP_DEST_NUM_BITS 1
/* L4 header valid */
#define CFA_P40_PROF_PROFILE_TCAM_L4_HDR_VALID_BITPOS 6
#define CFA_P40_PROF_PROFILE_TCAM_L4_HDR_VALID_NUM_BITS 1
/* L4 header error */
#define CFA_P40_PROF_PROFILE_TCAM_L4_HDR_ERROR_BITPOS 5
#define CFA_P40_PROF_PROFILE_TCAM_L4_HDR_ERROR_NUM_BITS 1
/* L4 header type */
#define CFA_P40_PROF_PROFILE_TCAM_L4_HDR_TYPE_BITPOS 1
#define CFA_P40_PROF_PROFILE_TCAM_L4_HDR_TYPE_NUM_BITS 4
/* L4 header is UDP or TCP */
#define CFA_P40_PROF_PROFILE_TCAM_L4_HDR_IS_UDP_TCP_BITPOS 0
#define CFA_P40_PROF_PROFILE_TCAM_L4_HDR_IS_UDP_TCP_NUM_BITS 1
enum cfa_p40_prof_profile_tcam_flds {
CFA_P40_PROF_PROFILE_TCAM_VALID_FLD = 0,
CFA_P40_PROF_PROFILE_TCAM_PKT_TYPE_FLD = 1,
CFA_P40_PROF_PROFILE_TCAM_RECYCLE_CNT_FLD = 2,
CFA_P40_PROF_PROFILE_TCAM_AGG_ERROR_FLD = 3,
CFA_P40_PROF_PROFILE_TCAM_PROF_FUNC_FLD = 4,
CFA_P40_PROF_PROFILE_TCAM_RESERVED_FLD = 5,
CFA_P40_PROF_PROFILE_TCAM_HREC_NEXT_FLD = 6,
CFA_P40_PROF_PROFILE_TCAM_TL2_HDR_VALID_FLD = 7,
CFA_P40_PROF_PROFILE_TCAM_TL2_HDR_TYPE_FLD = 8,
CFA_P40_PROF_PROFILE_TCAM_TL2_UC_MC_BC_FLD = 9,
CFA_P40_PROF_PROFILE_TCAM_TL2_VTAG_PRESENT_FLD = 10,
CFA_P40_PROF_PROFILE_TCAM_TL2_TWO_VTAGS_FLD = 11,
CFA_P40_PROF_PROFILE_TCAM_TL3_VALID_FLD = 12,
CFA_P40_PROF_PROFILE_TCAM_TL3_ERROR_FLD = 13,
CFA_P40_PROF_PROFILE_TCAM_TL3_HDR_TYPE_FLD = 14,
CFA_P40_PROF_PROFILE_TCAM_TL3_HDR_ISIP_FLD = 15,
CFA_P40_PROF_PROFILE_TCAM_TL3_IPV6_CMP_SRC_FLD = 16,
CFA_P40_PROF_PROFILE_TCAM_TL3_IPV6_CMP_DEST_FLD = 17,
CFA_P40_PROF_PROFILE_TCAM_TL4_HDR_VALID_FLD = 18,
CFA_P40_PROF_PROFILE_TCAM_TL4_HDR_ERROR_FLD = 19,
CFA_P40_PROF_PROFILE_TCAM_TL4_HDR_TYPE_FLD = 20,
CFA_P40_PROF_PROFILE_TCAM_TL4_HDR_IS_UDP_TCP_FLD = 21,
CFA_P40_PROF_PROFILE_TCAM_TUN_HDR_VALID_FLD = 22,
CFA_P40_PROF_PROFILE_TCAM_TUN_HDR_ERR_FLD = 23,
CFA_P40_PROF_PROFILE_TCAM_TUN_HDR_TYPE_FLD = 24,
CFA_P40_PROF_PROFILE_TCAM_TUN_HDR_FLAGS_FLD = 25,
CFA_P40_PROF_PROFILE_TCAM_L2_HDR_VALID_FLD = 26,
CFA_P40_PROF_PROFILE_TCAM_L2_HDR_ERROR_FLD = 27,
CFA_P40_PROF_PROFILE_TCAM_L2_HDR_TYPE_FLD = 28,
CFA_P40_PROF_PROFILE_TCAM_L2_UC_MC_BC_FLD = 29,
CFA_P40_PROF_PROFILE_TCAM_L2_VTAG_PRESENT_FLD = 30,
CFA_P40_PROF_PROFILE_TCAM_L2_TWO_VTAGS_FLD = 31,
CFA_P40_PROF_PROFILE_TCAM_L3_VALID_FLD = 32,
CFA_P40_PROF_PROFILE_TCAM_L3_ERROR_FLD = 33,
CFA_P40_PROF_PROFILE_TCAM_L3_HDR_TYPE_FLD = 34,
CFA_P40_PROF_PROFILE_TCAM_L3_HDR_ISIP_FLD = 35,
CFA_P40_PROF_PROFILE_TCAM_L3_IPV6_CMP_SRC_FLD = 36,
CFA_P40_PROF_PROFILE_TCAM_L3_IPV6_CMP_DEST_FLD = 37,
CFA_P40_PROF_PROFILE_TCAM_L4_HDR_VALID_FLD = 38,
CFA_P40_PROF_PROFILE_TCAM_L4_HDR_ERROR_FLD = 39,
CFA_P40_PROF_PROFILE_TCAM_L4_HDR_TYPE_FLD = 40,
CFA_P40_PROF_PROFILE_TCAM_L4_HDR_IS_UDP_TCP_FLD = 41,
CFA_P40_PROF_PROFILE_TCAM_MAX_FLD
};
#define CFA_P40_PROF_PROFILE_TCAM_TOTAL_NUM_BITS 81
/* CFA flexible key layout definition */
enum cfa_p40_key_fld_id {
CFA_P40_KEY_FLD_ID_MAX
};
/**************************************************************************/
/* Non-autogenerated fields */
/* Valid */
#define CFA_P40_EEM_KEY_TBL_VALID_BITPOS 0
#define CFA_P40_EEM_KEY_TBL_VALID_NUM_BITS 1
/* L1 Cacheable */
#define CFA_P40_EEM_KEY_TBL_L1_CACHEABLE_BITPOS 1
#define CFA_P40_EEM_KEY_TBL_L1_CACHEABLE_NUM_BITS 1
/* Strength */
#define CFA_P40_EEM_KEY_TBL_STRENGTH_BITPOS 2
#define CFA_P40_EEM_KEY_TBL_STRENGTH_NUM_BITS 2
/* Key Size */
#define CFA_P40_EEM_KEY_TBL_KEY_SZ_BITPOS 15
#define CFA_P40_EEM_KEY_TBL_KEY_SZ_NUM_BITS 9
/* Record Size */
#define CFA_P40_EEM_KEY_TBL_REC_SZ_BITPOS 24
#define CFA_P40_EEM_KEY_TBL_REC_SZ_NUM_BITS 5
/* Action Record Internal */
#define CFA_P40_EEM_KEY_TBL_ACT_REC_INT_BITPOS 29
#define CFA_P40_EEM_KEY_TBL_ACT_REC_INT_NUM_BITS 1
/* External Flow Counter */
#define CFA_P40_EEM_KEY_TBL_EXT_FLOW_CTR_BITPOS 30
#define CFA_P40_EEM_KEY_TBL_EXT_FLOW_CTR_NUM_BITS 1
/* Action Record Pointer */
#define CFA_P40_EEM_KEY_TBL_AR_PTR_BITPOS 31
#define CFA_P40_EEM_KEY_TBL_AR_PTR_NUM_BITS 33
/* EEM Key omitted - create using keybuilder
* Fields here cannot be larger than a u64
*/
#define CFA_P40_EEM_KEY_TBL_TOTAL_NUM_BITS 64
enum cfa_p40_eem_key_tbl_flds {
CFA_P40_EEM_KEY_TBL_VALID_FLD = 0,
CFA_P40_EEM_KEY_TBL_L1_CACHEABLE_FLD = 1,
CFA_P40_EEM_KEY_TBL_STRENGTH_FLD = 2,
CFA_P40_EEM_KEY_TBL_KEY_SZ_FLD = 3,
CFA_P40_EEM_KEY_TBL_REC_SZ_FLD = 4,
CFA_P40_EEM_KEY_TBL_ACT_REC_INT_FLD = 5,
CFA_P40_EEM_KEY_TBL_EXT_FLOW_CTR_FLD = 6,
CFA_P40_EEM_KEY_TBL_AR_PTR_FLD = 7,
CFA_P40_EEM_KEY_TBL_MAX_FLD
};
/* Mirror Destination 0 Source Property Record Pointer */
#define CFA_P40_MIRROR_TBL_SP_PTR_BITPOS 0
#define CFA_P40_MIRROR_TBL_SP_PTR_NUM_BITS 11
/* ignore or honor drop */
#define CFA_P40_MIRROR_TBL_IGN_DROP_BITPOS 13
#define CFA_P40_MIRROR_TBL_IGN_DROP_NUM_BITS 1
/* ingress or egress copy */
#define CFA_P40_MIRROR_TBL_COPY_BITPOS 14
#define CFA_P40_MIRROR_TBL_COPY_NUM_BITS 1
/* Mirror Destination enable. */
#define CFA_P40_MIRROR_TBL_EN_BITPOS 15
#define CFA_P40_MIRROR_TBL_EN_NUM_BITS 1
/* Action Record Pointer */
#define CFA_P40_MIRROR_TBL_AR_PTR_BITPOS 16
#define CFA_P40_MIRROR_TBL_AR_PTR_NUM_BITS 16
#define CFA_P40_MIRROR_TBL_TOTAL_NUM_BITS 32
enum cfa_p40_mirror_tbl_flds {
CFA_P40_MIRROR_TBL_SP_PTR_FLD = 0,
CFA_P40_MIRROR_TBL_IGN_DROP_FLD = 1,
CFA_P40_MIRROR_TBL_COPY_FLD = 2,
CFA_P40_MIRROR_TBL_EN_FLD = 3,
CFA_P40_MIRROR_TBL_AR_PTR_FLD = 4,
CFA_P40_MIRROR_TBL_MAX_FLD
};
/* P45 Specific Updates (SR) - Non-autogenerated */
/* Valid TCAM entry. */
#define CFA_P45_PROF_L2_CTXT_TCAM_VALID_BITPOS 170
#define CFA_P45_PROF_L2_CTXT_TCAM_VALID_NUM_BITS 1
/* Source Partition. */
#define CFA_P45_PROF_L2_CTXT_TCAM_SPARIF_BITPOS 166
#define CFA_P45_PROF_L2_CTXT_TCAM_SPARIF_NUM_BITS 4
/* Source Virtual I/F. */
#define CFA_P45_PROF_L2_CTXT_TCAM_SVIF_BITPOS 72
#define CFA_P45_PROF_L2_CTXT_TCAM_SVIF_NUM_BITS 12
/* The SR layout of the l2 ctxt key is different from the Wh+. Switch to
* cfa_p45_hw.h definition when available.
*/
enum cfa_p45_prof_l2_ctxt_tcam_flds {
CFA_P45_PROF_L2_CTXT_TCAM_VALID_FLD = 0,
CFA_P45_PROF_L2_CTXT_TCAM_SPARIF_FLD = 1,
CFA_P45_PROF_L2_CTXT_TCAM_KEY_TYPE_FLD = 2,
CFA_P45_PROF_L2_CTXT_TCAM_TUN_HDR_TYPE_FLD = 3,
CFA_P45_PROF_L2_CTXT_TCAM_T_L2_NUMTAGS_FLD = 4,
CFA_P45_PROF_L2_CTXT_TCAM_L2_NUMTAGS_FLD = 5,
CFA_P45_PROF_L2_CTXT_TCAM_MAC1_FLD = 6,
CFA_P45_PROF_L2_CTXT_TCAM_T_OVID_FLD = 7,
CFA_P45_PROF_L2_CTXT_TCAM_T_IVID_FLD = 8,
CFA_P45_PROF_L2_CTXT_TCAM_SVIF_FLD = 9,
CFA_P45_PROF_L2_CTXT_TCAM_MAC0_FLD = 10,
CFA_P45_PROF_L2_CTXT_TCAM_OVID_FLD = 11,
CFA_P45_PROF_L2_CTXT_TCAM_IVID_FLD = 12,
CFA_P45_PROF_L2_CTXT_TCAM_MAX_FLD
};
#define CFA_P45_PROF_L2_CTXT_TCAM_TOTAL_NUM_BITS 171
#endif /* _CFA_P40_HW_H_ */

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,89 @@
/* SPDX-License-Identifier: BSD-3-Clause
* Copyright(c) 2019-2021 Broadcom
* All rights reserved.
*/
/*!
* \file
* \brief Exported functions for CFA HW programming
*/
#ifndef _HCAPI_CFA_H_
#define _HCAPI_CFA_H_
#include <linux/types.h>
#include "hcapi_cfa_defs.h"
struct hcapi_cfa_devops;
/**
* CFA device information
*/
struct hcapi_cfa_devinfo {
/** [out] CFA hw fix formatted layouts */
const struct hcapi_cfa_layout_tbl *layouts;
/** [out] CFA device ops function pointer table */
const struct hcapi_cfa_devops *devops;
};
/**
* \defgroup CFA_HCAPI_DEVICE_API
* HCAPI used for writing to the hardware
* @{
*/
/** CFA device specific function hooks structure
*
* The following device hooks can be defined; unless noted otherwise, they are
* optional and can be filled with a null pointer. The pupose of these hooks
* to support CFA device operations for different device variants.
*/
struct hcapi_cfa_devops {
/** calculate a key hash for the provided key_data
*
* This API computes hash for a key.
*
* @param[in] key_data
* A pointer of the key data buffer
*
* @param[in] bitlen
* Number of bits of the key data
*
* @return
* 0 for SUCCESS, negative value for FAILURE
*/
u64 (*hcapi_cfa_key_hash)(u8 *key_data, u16 bitlen);
};
/*@}*/
extern const size_t CFA_RM_HANDLE_DATA_SIZE;
#if SUPPORT_CFA_HW_ALL
extern const struct hcapi_cfa_devops cfa_p4_devops;
extern const struct hcapi_cfa_devops cfa_p58_devops;
extern const struct hcapi_cfa_devops cfa_p59_devops;
extern const struct hcapi_cfa_layout_tbl cfa_p59_layout_tbl;
u64 hcapi_cfa_p59_key_hash(u64 *key_data, u16 bitlen);
#elif defined(SUPPORT_CFA_HW_P4) && SUPPORT_CFA_HW_P4
extern const struct hcapi_cfa_devops cfa_p4_devops;
u64 hcapi_cfa_p4_key_hash(u64 *key_data, u16 bitlen);
/* SUPPORT_CFA_HW_P4 */
#elif SUPPORT_CFA_HW_P45
/* Firmware function defines */
/* SUPPORT_CFA_HW_P45 */
#elif defined(SUPPORT_CFA_HW_P58) && SUPPORT_CFA_HW_P58
extern const struct hcapi_cfa_devops cfa_p58_devops;
u64 hcapi_cfa_p58_key_hash(u64 *key_data, u16 bitlen);
/* SUPPORT_CFA_HW_P58 */
#elif defined(SUPPORT_CFA_HW_P59) && SUPPORT_CFA_HW_P59
extern const struct hcapi_cfa_devops cfa_p59_devops;
extern const struct hcapi_cfa_layout_tbl cfa_p59_layout_tbl;
u64 hcapi_cfa_p59_key_hash(u64 *key_data, u16 bitlen);
#ifdef CFA_HW_SUPPORT_HOST_IF
#else
#endif
/* SUPPORT_CFA_HW_P59 */
#endif
#endif /* HCAPI_CFA_H_ */

View File

@ -0,0 +1,794 @@
/* SPDX-License-Identifier: BSD-3-Clause
* Copyright(c) 2019-2021 Broadcom
* All rights reserved.
*/
/* Exported functions for CFA HW programming */
#ifndef _HCAPI_CFA_DEFS_H_
#define _HCAPI_CFA_DEFS_H_
#include <linux/types.h>
#define CFA_BITS_PER_BYTE (8)
#define CFA_BITS_PER_WORD (sizeof(u32) * CFA_BITS_PER_BYTE)
#define __CFA_ALIGN_MASK(x, mask) (((x) + (mask)) & ~(mask))
#define CFA_ALIGN(x, a) __CFA_ALIGN_MASK((x), (a) - 1)
#define CFA_ALIGN_256(x) CFA_ALIGN(x, 256)
#define CFA_ALIGN_128(x) CFA_ALIGN(x, 128)
#define CFA_ALIGN_32(x) CFA_ALIGN(x, 32)
#define NUM_WORDS_ALIGN_32BIT(x) (CFA_ALIGN_32(x) / CFA_BITS_PER_WORD)
#define NUM_WORDS_ALIGN_128BIT(x) (CFA_ALIGN_128(x) / CFA_BITS_PER_WORD)
#define NUM_WORDS_ALIGN_256BIT(x) (CFA_ALIGN_256(x) / CFA_BITS_PER_WORD)
/* TODO: redefine according to chip variant */
#define CFA_GLOBAL_CFG_DATA_SZ (100)
#ifndef SUPPORT_CFA_HW_P4
#define SUPPORT_CFA_HW_P4 (0)
#endif
#ifndef SUPPORT_CFA_HW_P45
#define SUPPORT_CFA_HW_P45 (0)
#endif
#ifndef SUPPORT_CFA_HW_P58
#define SUPPORT_CFA_HW_P58 (0)
#endif
#ifndef SUPPORT_CFA_HW_P59
#define SUPPORT_CFA_HW_P59 (0)
#endif
#if SUPPORT_CFA_HW_P4 && SUPPORT_CFA_HW_P45 && SUPPORT_CFA_HW_P58 && \
SUPPORT_CFA_HW_P59
#define SUPPORT_CFA_HW_ALL (1)
#endif
#if SUPPORT_CFA_HW_ALL
#include "hcapi_cfa_p4.h"
#include "hcapi_cfa_p58.h"
#define CFA_PROF_L2CTXT_TCAM_MAX_FIELD_CNT CFA_P58_PROF_L2_CTXT_TCAM_MAX_FLD
#define CFA_PROF_L2CTXT_REMAP_MAX_FIELD_CNT CFA_P58_PROF_L2_CTXT_RMP_DR_MAX_FLD
#define CFA_PROF_MAX_KEY_CFG_SZ sizeof(struct cfa_p58_prof_key_cfg)
#define CFA_KEY_MAX_FIELD_CNT CFA_P58_KEY_FLD_ID_MAX
#define CFA_ACT_MAX_TEMPLATE_SZ sizeof(struct cfa_p58_action_template)
#else
#if SUPPORT_CFA_HW_P4 || SUPPORT_CFA_HW_P45
#include "hcapi_cfa_p4.h"
#define CFA_PROF_L2CTXT_TCAM_MAX_FIELD_CNT CFA_P40_PROF_L2_CTXT_TCAM_MAX_FLD
#define CFA_PROF_L2CTXT_REMAP_MAX_FIELD_CNT CFA_P40_PROF_L2_CTXT_RMP_DR_MAX_FLD
#define CFA_PROF_MAX_KEY_CFG_SZ sizeof(struct cfa_p4_prof_key_cfg)
#define CFA_KEY_MAX_FIELD_CNT CFA_P40_KEY_FLD_ID_MAX
#define CFA_ACT_MAX_TEMPLATE_SZ sizeof(struct cfa_p4_action_template)
#endif
#if SUPPORT_CFA_HW_P58
#include "hcapi_cfa_p58.h"
#define CFA_PROF_L2CTXT_TCAM_MAX_FIELD_CNT CFA_P58_PROF_L2_CTXT_TCAM_MAX_FLD
#define CFA_PROF_L2CTXT_REMAP_MAX_FIELD_CNT CFA_P58_PROF_L2_CTXT_RMP_DR_MAX_FLD
#define CFA_PROF_MAX_KEY_CFG_SZ sizeof(struct cfa_p5_prof_key_cfg)
#define CFA_KEY_MAX_FIELD_CNT CFA_P58_KEY_FLD_ID_MAX
#define CFA_ACT_MAX_TEMPLATE_SZ sizeof(struct cfa_p58_action_template)
#endif
#if SUPPORT_CFA_HW_P59
#include "hcapi_cfa_p59.h"
#define CFA_PROF_L2CTXT_TCAM_MAX_FIELD_CNT CFA_P59_PROF_L2_CTXT_TCAM_MAX_FLD
#define CFA_PROF_L2CTXT_REMAP_MAX_FIELD_CNT CFA_P59_PROF_L2_CTXT_RMP_DR_MAX_FLD
#define CFA_PROF_MAX_KEY_CFG_SZ sizeof(struct cfa_p59_prof_key_cfg)
#define CFA_KEY_MAX_FIELD_CNT CFA_P59_EM_KEY_LAYOUT_MAX_FLD
#define CFA_ACT_MAX_TEMPLATE_SZ sizeof(struct cfa_p59_action_template)
#endif
#endif /* SUPPORT_CFA_HW_ALL */
/* Hashing defines */
#define HCAPI_CFA_LKUP_SEED_MEM_SIZE 512
/* CRC32i support for Key0 hash */
extern const u32 crc32tbl[];
#define ucrc32(ch, crc) (crc32tbl[((crc) ^ (ch)) & 0xff] ^ ((crc) >> 8))
/* CFA HW version definition */
enum hcapi_cfa_ver {
HCAPI_CFA_P40 = 0, /* CFA phase 4.0 */
HCAPI_CFA_P45 = 1, /* CFA phase 4.5 */
HCAPI_CFA_P58 = 2, /* CFA phase 5.8 */
HCAPI_CFA_P59 = 3, /* CFA phase 5.9 */
HCAPI_CFA_PMAX = 4
};
/* CFA direction definition */
enum hcapi_cfa_dir {
HCAPI_CFA_DIR_RX = 0, /* Receive */
HCAPI_CFA_DIR_TX = 1, /* Transmit */
HCAPI_CFA_DIR_MAX = 2
};
/* CFA HW OPCODE definition */
enum hcapi_cfa_hwops {
HCAPI_CFA_HWOPS_PUT, /* Write to HW operation */
HCAPI_CFA_HWOPS_GET, /* Read from HW operation */
HCAPI_CFA_HWOPS_ADD, /* For operations which require more then
* simple writes to HW, this operation is
* used. The distinction with this operation
* when compared to the PUT ops is that this
* operation is used in conjunction with
* the HCAPI_CFA_HWOPS_DEL op to remove
* the operations issued by the ADD OP.
*/
HCAPI_CFA_HWOPS_DEL, /* Beside to delete from the hardware, this
* operation is also undo the add operation
* performed by the HCAPI_CFA_HWOPS_ADD op.
*/
HCAPI_CFA_HWOPS_EVICT, /* This operaton is used to evict entries from
* CFA cache memories. This operation is only
* applicable to tables that use CFA caches.
*/
HCAPI_CFA_HWOPS_MAX
};
/* CFA HW KEY CONTROL OPCODE definition */
enum hcapi_cfa_key_ctrlops {
HCAPI_CFA_KEY_CTRLOPS_INSERT, /* insert control bits */
HCAPI_CFA_KEY_CTRLOPS_STRIP, /* strip control bits */
HCAPI_CFA_KEY_CTRLOPS_MAX
};
/**
* CFA HW field structure definition
* @bitops: Starting bit position pf the HW field within a HW table
* entry.
* @bitlen: Number of bits for the HW field.
*/
struct hcapi_cfa_field {
u16 bitpos;
u16 bitlen;
};
/**
* CFA HW table entry layout structure definition
* @is_msb_order: Bit order of layout
* @total_sz_in_bits: Size in bits of entry
* @field_array: data pointer of the HW layout fields array
* @array_sz: number of HW field entries in the HW layout field array
* @layout_id: layout id associated with the layout
*/
struct hcapi_cfa_layout {
bool is_msb_order;
u32 total_sz_in_bits;
struct hcapi_cfa_field *field_array;
u32 array_sz;
u16 layout_id;
};
/**
* CFA HW data object definition
* @field_id: HW field identifier. Used as an index to a HW table layout
* @val: Value of the HW field
*/
struct hcapi_cfa_data_obj {
u16 field_id;
u64 val;
};
/**
* CFA HW definition
* @base_addr: HW table base address for the operation with optional device
* handle. For on-chip HW table operation, this is the either
* the TX or RX CFA HW base address. For off-chip table, this
* field is the base memory address of the off-chip table.
* @handle: Optional opaque device handle. It is generally used to access
* an GRC register space through PCIE BAR and passed to the BAR
* memory accessor routine.
*/
struct hcapi_cfa_hw {
u64 base_addr;
void *handle;
};
/**
* CFA HW operation definition
* @opcode: HW opcode
* @hw: CFA HW information used by accessor routines
*/
struct hcapi_cfa_hwop {
enum hcapi_cfa_hwops opcode;
struct hcapi_cfa_hw hw;
};
/**
* CFA HW data structure definition
* @union: physical offset to the HW table for the data to be
* written to. If this is an array of registers, this is the
* index into the array of registers. For writing keys, this
* is the byte pointer into the memory where the key should be
* written.
* @data: HW data buffer pointer
* @data_mask: HW data mask buffer pointer. When the CFA data is a FKB and
* data_mask pointer is NULL, then the default mask to enable
* all bit will be used.
* @data_sz: size of the HW data buffer in bytes
*/
struct hcapi_cfa_data {
union {
u32 index;
u32 byte_offset;
};
u8 *data;
u8 *data_mask;
u16 data_sz;
};
/********************** Truflow start ***************************/
enum hcapi_cfa_pg_tbl_lvl {
TF_PT_LVL_0,
TF_PT_LVL_1,
TF_PT_LVL_2,
TF_PT_LVL_MAX
};
enum hcapi_cfa_em_table_type {
TF_KEY0_TABLE,
TF_KEY1_TABLE,
TF_RECORD_TABLE,
TF_EFC_TABLE,
TF_ACTION_TABLE,
TF_EM_LKUP_TABLE,
TF_MAX_TABLE
};
struct hcapi_cfa_em_page_tbl {
u32 pg_count;
u32 pg_size;
void **pg_va_tbl;
u64 *pg_pa_tbl;
};
struct hcapi_cfa_em_table {
int type;
u32 num_entries;
u16 ctx_id;
u32 entry_size;
int num_lvl;
u32 page_cnt[TF_PT_LVL_MAX];
u64 num_data_pages;
void *l0_addr;
u64 l0_dma_addr;
struct hcapi_cfa_em_page_tbl pg_tbl[TF_PT_LVL_MAX];
};
struct hcapi_cfa_em_ctx_mem_info {
struct hcapi_cfa_em_table em_tables[TF_MAX_TABLE];
};
/********************** Truflow end ****************************/
/**
* CFA HW key table definition
* Applicable to EEM and off-chip EM table only.
* @base0: For EEM, this is the KEY0 base mem pointer. For off-chip EM,
* this is the base mem pointer of the key table.
* @size: total size of the key table in bytes. For EEM, this size is
* same for both KEY0 and KEY1 table.
* @num_buckets: number of key buckets, applicable for newer chips
* @base1: For EEM, this is KEY1 base mem pointer. Fo off-chip EM,
* this is the key record memory base pointer within the key
* table, applicable for newer chip
* @bs_db: Optional - If the table is managed by a Backing Store
* database, then this object can be use to configure the EM Key.
* @page_size: Page size for EEM tables
*/
struct hcapi_cfa_key_tbl {
u8 *base0;
u32 size;
u32 num_buckets;
u8 *base1;
struct hcapi_cfa_bs_db *bs_db;
u32 page_size;
};
/**
* CFA HW key buffer definition
* @data: pointer to the key data buffer
* @len: buffer len in bytes
* @layout: Pointer to the key layout
*/
struct hcapi_cfa_key_obj {
u32 *data;
u32 len;
struct hcapi_cfa_key_layout *layout;
};
/**
* CFA HW key data definition
* @offset: For on-chip key table, it is the offset in unit of smallest
* key. For off-chip key table, it is the byte offset relative
* to the key record memory base and adjusted for page and
* entry size.
* @data: HW key data buffer pointer
* @size: size of the key in bytes
* @tbl_scope: optional table scope ID
* @metadata: the fid owner of the key
* stored with the bucket which can be used by
* the caller to retrieve later via the GET HW OP.
*/
struct hcapi_cfa_key_data {
u32 offset;
u8 *data;
u16 size;
u8 tbl_scope;
u64 metadata;
};
/**
* CFA HW key location definition
* @bucket_mem_ptr: on-chip EM bucket offset or off-chip EM bucket
* mem pointer
* @mem_ptr: off-chip EM key offset mem pointer
* @bucket_mem_idx: index within the array of the EM buckets
* @bucket_idx: index within the EM bucket
* @mem_idx: index within the EM records
*/
struct hcapi_cfa_key_loc {
u64 bucket_mem_ptr;
u64 mem_ptr;
u32 bucket_mem_idx;
u8 bucket_idx;
u32 mem_idx;
};
/**
* CFA HW layout table definition
* @tbl: data pointer to an array of fix formatted layouts supported.
* The index to the array is the CFA HW table ID
* @num_layouts: number of fix formatted layouts in the layout array
*/
struct hcapi_cfa_layout_tbl {
const struct hcapi_cfa_layout *tbl;
u16 num_layouts;
};
/**
* Key template consists of key fields that can be enabled/disabled
* individually.
* @field_en: key field enable field array, set 1 to the
* correspeonding field enable to make a field valid
* @is_wc_tcam_key: Identify if the key template is for TCAM. If false,
* the key template is for EM. This field is
* mandantory for device that only support fix key
* formats.
* @is_ipv6_key: Identify if the key template will be use for
* IPv6 Keys.
*/
struct hcapi_cfa_key_template {
u8 field_en[CFA_KEY_MAX_FIELD_CNT];
bool is_wc_tcam_key;
bool is_ipv6_key;
};
/**
* key layout consist of field array, key bitlen, key ID, and other meta data
* pertain to a key
* @layout: key layout data
* @bitlen: actual key size in number of bits
* @id: key identifier and this field is only valid for device
* that supports fix key formats
* @is_wc_tcam_key: Identified the key layout is WC TCAM key
* @is_ipv6_key: Identify if the key template will be use for IPv6 Keys.
* @slices_size: total slices size, valid for WC TCAM key only. It can
* be used by the user to determine the total size of WC
* TCAM key slices in bytes.
*/
struct hcapi_cfa_key_layout {
struct hcapi_cfa_layout *layout;
u16 bitlen;
u16 id;
bool is_wc_tcam_key;
bool is_ipv6_key;
u16 slices_size;
};
/**
* key layout memory contents
* @key_layout: key layouts
* @layout: layout
* @field_array: fields
*/
struct hcapi_cfa_key_layout_contents {
struct hcapi_cfa_key_layout key_layout;
struct hcapi_cfa_layout layout;
struct hcapi_cfa_field field_array[CFA_KEY_MAX_FIELD_CNT];
};
/**
* Action template consists of action fields that can be enabled/disabled
* individually.
* @hw_ver: CFA version for the action template
* @data: action field enable field array, set 1 to the correspeonding
* field enable to make a field valid
*/
struct hcapi_cfa_action_template {
enum hcapi_cfa_ver hw_ver;
u8 data[CFA_ACT_MAX_TEMPLATE_SZ];
};
/**
* Action record info
* @blk_id: action SRAM block ID for on-chip action records or table
* scope of the action backing store
* @offset: offset
*/
struct hcapi_cfa_action_addr {
u16 blk_id;
u32 offset;
};
/**
* Action data definition
* @addr: action record addr info for on-chip action records
* @data: pointer to the action data buffer
* @len: action data buffer len in bytes
*/
struct hcapi_cfa_action_data {
struct hcapi_cfa_action_addr addr;
u32 *data;
u32 len;
};
/**
* Action object definition
* @data: pointer to the action data buffer
* @len: buffer len in bytes
* @layout: pointer to the action layout
*/
struct hcapi_cfa_action_obj {
u32 *data;
u32 len;
struct hcapi_cfa_action_layout *layout;
};
/**
* action layout consist of field array, action wordlen and action format ID
* @id: action identifier
* @layout: action layout data
* @bitlen: actual action record size in number of bits
*/
struct hcapi_cfa_action_layout {
u16 id;
struct hcapi_cfa_layout *layout;
u16 bitlen;
};
/* CFA backing store type definition */
enum hcapi_cfa_bs_type {
HCAPI_CFA_BS_TYPE_LKUP, /* EM LKUP backing store type */
HCAPI_CFA_BS_TYPE_ACT, /* Action backing store type */
HCAPI_CFA_BS_TYPE_MAX
};
/* CFA backing store configuration data object */
struct hcapi_cfa_bs_cfg {
enum hcapi_cfa_bs_type type;
u16 tbl_scope;
struct hcapi_cfa_bs_db *bs_db;
};
/**
* CFA backing store data base object
* @signature: memory manager database signature
* @mgmt_db: memory manager database base pointer (VA)
* @mgmt_db_sz: memory manager database size in bytes
* @bs_ptr: Backing store memory pool base pointer
* (VA backed by IOVA which is DMA accessible)
* @offset: bs_offset - byte offset to the section of the backing
* store memory managed by the backing store memory manager.
* For EM backing store, this is the starting byte offset
* to the EM record memory. For Action backing store, this
* offset is 0.
* @bs_sz: backing store memory pool size in bytes
*/
struct hcapi_cfa_bs_db {
u32 signature;
#define HCAPI_CFA_BS_SIGNATURE 0xCFA0B300
void *mgmt_db;
u32 mgmt_db_sz;
void *bs_ptr;
u32 offset;
u32 bs_sz;
};
/**
* defgroup CFA_HCAPI_PUT_API
* HCAPI used for writing to the hardware
*/
/**
* This API provides the functionality to program a specified value to a
* HW field based on the provided programming layout.
*
* @data_buf: A data pointer to a CFA HW key/mask data
* @layout: A pointer to CFA HW programming layout
* @field_id: ID of the HW field to be programmed
* @val: Value of the HW field to be programmed
*
* @return
* 0 for SUCCESS, negative value for FAILURE
*/
int hcapi_cfa_put_field(u64 *data_buf, const struct hcapi_cfa_layout *layout,
u16 field_id, u64 val);
/**
* This API provides the functionality to program an array of field values
* with corresponding field IDs to a number of profiler sub-block fields
* based on the fixed profiler sub-block hardware programming layout.
*
* @obj_data: A pointer to a CFA profiler key/mask object data
* @layout: A pointer to CFA HW programming layout
* @field_tbl: A pointer to an array that consists of the object field
* ID/value pairs
* @field_tbl_sz: Number of entries in the table
*
* @return
* 0 for SUCCESS, negative value for FAILURE
*/
int hcapi_cfa_put_fields(u64 *obj_data, const struct hcapi_cfa_layout *layout,
struct hcapi_cfa_data_obj *field_tbl,
u16 field_tbl_sz);
/**
* This API provides the functionality to program an array of field values
* with corresponding field IDs to a number of profiler sub-block fields
* based on the fixed profiler sub-block hardware programming layout. This
* API will swap the n byte blocks before programming the field array.
*
* @obj_data: A pointer to a CFA profiler key/mask object data
* @layout: A pointer to CFA HW programming layout
* @field_tbl: A pointer to an array that consists of the object field
* ID/value pairs
* @field_tbl_sz: Number of entries in the table
* @data_size: size of the data in bytes
* @n: block size in bytes
*
* @return
* 0 for SUCCESS, negative value for FAILURE
*/
int hcapi_cfa_put_fields_swap(u64 *obj_data,
const struct hcapi_cfa_layout *layout,
struct hcapi_cfa_data_obj *field_tbl,
u16 field_tbl_sz, u16 data_size,
u16 n);
/**
* This API provides the functionality to write a value to a
* field within the bit position and bit length of a HW data
* object based on a provided programming layout.
*
* @act_obj: A pointer of the action object to be initialized
* @layout: A pointer of the programming layout
* @field_id: Identifier of the HW field
* @bitpos_adj: Bit position adjustment value
* @bitlen_adj: Bit length adjustment value
* @val: HW field value to be programmed
*
* @return
* 0 for SUCCESS, negative value for FAILURE
*/
int hcapi_cfa_put_field_rel(u64 *obj_data,
const struct hcapi_cfa_layout *layout,
u16 field_id, int16_t bitpos_adj,
s16 bitlen_adj, u64 val);
/**
* defgroup CFA_HCAPI_GET_API
* HCAPI used for reading from the hardware
*/
/**
* This API provides the functionality to get the word length of
* a layout object.
*
* @layout: A pointer of the HW layout
* @return:
* Word length of the layout object
*/
u16 hcapi_cfa_get_wordlen(const struct hcapi_cfa_layout *layout);
/**
* The API provides the functionality to get bit offset and bit
* length information of a field from a programming layout.
*
* @layout: A pointer of the action layout
* @slice: A pointer to the action offset info data structure
*
* @return:
* 0 for SUCCESS, negative value for FAILURE
*/
int hcapi_cfa_get_slice(const struct hcapi_cfa_layout *layout,
u16 field_id, struct hcapi_cfa_field *slice);
/**
* This API provides the functionality to read the value of a
* CFA HW field from CFA HW data object based on the hardware
* programming layout.
*
* @obj_data: A pointer to a CFA HW key/mask object data
* @layout: A pointer to CFA HW programming layout
* @field_id: ID of the HW field to be programmed
* @val: Value of the HW field
*
* @return:
* 0 for SUCCESS, negative value for FAILURE
*/
int hcapi_cfa_get_field(u64 *obj_data,
const struct hcapi_cfa_layout *layout,
u16 field_id, u64 *val);
/**
* This API provides the functionality to read 128-bit value of
* a CFA HW field from CFA HW data object based on the hardware
* programming layout.
*
* @obj_data: A pointer to a CFA HW key/mask object data
* @layout: A pointer to CFA HW programming layout
* @field_id: ID of the HW field to be programmed
* @val_msb: Msb value of the HW field
* @val_lsb: Lsb value of the HW field
*
* @return
* 0 for SUCCESS, negative value for FAILURE
*/
int hcapi_cfa_get128_field(u64 *obj_data,
const struct hcapi_cfa_layout *layout,
u16 field_id, u64 *val_msb,
u64 *val_lsb);
/**
* This API provides the functionality to read a number of
* HW fields from a CFA HW data object based on the hardware
* programming layout.
*
* @obj_data: A pointer to a CFA profiler key/mask object data
* @layout: A pointer to CFA HW programming layout
* @field_tbl: A pointer to an array that consists of the object field
* ID/value pairs
* @field_tbl_sz: Number of entries in the table
*
* @return:
* 0 for SUCCESS, negative value for FAILURE
*/
int hcapi_cfa_get_fields(u64 *obj_data,
const struct hcapi_cfa_layout *layout,
struct hcapi_cfa_data_obj *field_tbl,
u16 field_tbl_sz);
/**
* This API provides the functionality to read a number of
* HW fields from a CFA HW data object based on the hardware
* programming layout.This API will swap the n byte blocks before
* retrieving the field array.
*
* @obj_data: A pointer to a CFA profiler key/mask object data
* @layout: A pointer to CFA HW programming layout
* @field_tbl: A pointer to an array that consists of the object field
* ID/value pairs
* @field_tbl_sz: Number of entries in the table
* @data_size: size of the data in bytes
* @n: block size in bytes
*
* @return:
* 0 for SUCCESS, negative value for FAILURE
*/
int hcapi_cfa_get_fields_swap(u64 *obj_data,
const struct hcapi_cfa_layout *layout,
struct hcapi_cfa_data_obj *field_tbl,
u16 field_tbl_sz, u16 data_size,
u16 n);
/**
* Get a value to a specific location relative to a HW field
*
* This API provides the functionality to read HW field from
* a section of a HW data object identified by the bit position
* and bit length from a given programming layout in order to avoid
* reading the entire HW data object.
*
* @obj_data: A pointer of the data object to read from
* @layout: A pointer of the programming layout
* @field_id: Identifier of the HW field
* @bitpos_adj: Bit position adjustment value
* @bitlen_adj: Bit length adjustment value
* @val: Value of the HW field
*
* @return
* 0 for SUCCESS, negative value for FAILURE
*/
int hcapi_cfa_get_field_rel(u64 *obj_data,
const struct hcapi_cfa_layout *layout,
u16 field_id, int16_t bitpos_adj,
s16 bitlen_adj, u64 *val);
/**
* Get the length of the layout in words
*
* @layout: A pointer to the layout to determine the number of words
* required
*
* @return
* number of words needed for the given layout
*/
u16 cfa_hw_get_wordlen(const struct hcapi_cfa_layout *layout);
/**
* This function is used to initialize a layout_contents structure
*
* The struct hcapi_cfa_key_layout is complex as there are three
* layers of abstraction. Each of those layer need to be properly
* initialized.
*
* @contents: A pointer of the layout contents to initialize
*
* @return
* 0 for SUCCESS, negative value for FAILURE
*/
int hcapi_cfa_init_key_contents(struct hcapi_cfa_key_layout_contents
*contents);
/**
* This function is used to validate a key template
*
* The struct hcapi_cfa_key_template is complex as there are three
* layers of abstraction. Each of those layer need to be properly
* validated.
*
* @key_template: A pointer of the key template contents to validate
*
* @return
* 0 for SUCCESS, negative value for FAILURE
*/
int hcapi_cfa_is_valid_key_template(struct hcapi_cfa_key_template
*key_template);
/**
* This function is used to validate a key layout
*
* The struct hcapi_cfa_key_layout is complex as there are three
* layers of abstraction. Each of those layer need to be properly
* validated.
*
* @key_layout: A pointer of the key layout contents to validate
*
* @return
* 0 for SUCCESS, negative value for FAILURE
*/
int hcapi_cfa_is_valid_key_layout(struct hcapi_cfa_key_layout *key_layout);
/**
* This function is used to hash E/EM keys
*
* @key_data: A pointer of the key
* @bitlen: Number of bits in the key
*
* @return
* CRC32 and Lookup3 hashes of the input key
*/
u64 hcapi_cfa_key_hash(u8 *key_data, u16 bitlen);
/**
* This function is used to execute an operation
*
* @op: Operation
* @key_tbl: Table
* @key_obj: Key data
* @key_key_loc: Key location
*
* @return
* 0 for SUCCESS, negative value for FAILURE
*/
int hcapi_cfa_key_hw_op(struct hcapi_cfa_hwop *op,
struct hcapi_cfa_key_tbl *key_tbl,
struct hcapi_cfa_key_data *key_obj,
struct hcapi_cfa_key_loc *key_loc);
u64 hcapi_get_table_page(struct hcapi_cfa_em_table *mem, u32 page);
u64 hcapi_cfa_p4_key_hash(u8 *key_data, u16 bitlen);
u64 hcapi_cfa_p58_key_hash(u8 *key_data, u16 bitlen);
#endif /* HCAPI_CFA_DEFS_H_ */

View File

@ -0,0 +1,137 @@
// SPDX-License-Identifier: BSD-3-Clause
/* Copyright(c) 2019-2021 Broadcom
* All rights reserved.
*/
#include <linux/types.h>
#include "bnxt_compat.h"
#include <linux/jhash.h>
#include <linux/crc32.h>
#include "rand.h"
#include "hcapi_cfa.h"
#include "hcapi_cfa_defs.h"
static u32 hcapi_cfa_lkup_lkup3_init_cfg;
static u32 hcapi_cfa_lkup_em_seed_mem[HCAPI_CFA_LKUP_SEED_MEM_SIZE];
static bool hcapi_cfa_lkup_init;
static void hcapi_cfa_seeds_init(void)
{
int i;
u32 r;
if (hcapi_cfa_lkup_init)
return;
hcapi_cfa_lkup_init = true;
/* Initialize the lfsr */
rand_init();
/* RX and TX use the same seed values */
hcapi_cfa_lkup_lkup3_init_cfg = swahb32(rand32());
for (i = 0; i < HCAPI_CFA_LKUP_SEED_MEM_SIZE / 2; i++) {
r = swahb32(rand32());
hcapi_cfa_lkup_em_seed_mem[i * 2] = r;
r = swahb32(rand32());
hcapi_cfa_lkup_em_seed_mem[i * 2 + 1] = (r & 0x1);
}
}
static u32 hcapi_cfa_crc32_hash(u8 *key)
{
u8 *kptr = key;
u32 val1, val2;
u8 temp[4];
u32 index;
int i;
/* Do byte-wise XOR of the 52-byte HASH key first. */
index = *key;
kptr++;
for (i = 0; i < (CFA_P58_EEM_KEY_MAX_SIZE - 1); i++) {
index = index ^ *kptr;
kptr++;
}
/* Get seeds */
val1 = hcapi_cfa_lkup_em_seed_mem[index * 2];
val2 = hcapi_cfa_lkup_em_seed_mem[index * 2 + 1];
temp[0] = (u8)(val1 >> 24);
temp[1] = (u8)(val1 >> 16);
temp[2] = (u8)(val1 >> 8);
temp[3] = (u8)(val1 & 0xff);
val1 = 0;
/* Start with seed */
if (!(val2 & 0x1))
val1 = ~(crc32(~val1, temp, 4));
val1 = ~(crc32(~val1,
key,
CFA_P58_EEM_KEY_MAX_SIZE));
/* End with seed */
if (val2 & 0x1)
val1 = ~(crc32(~val1, temp, 4));
return val1;
}
static u32 hcapi_cfa_lookup3_hash(u8 *in_key)
{
u32 val1;
val1 = jhash2(((u32 *)in_key),
CFA_P4_EEM_KEY_MAX_SIZE / (sizeof(u32)),
hcapi_cfa_lkup_lkup3_init_cfg);
return val1;
}
u64 hcapi_get_table_page(struct hcapi_cfa_em_table *mem, u32 page)
{
int level = 0;
u64 addr;
if (!mem)
return 0;
/* Use the level according to the num_level of page table */
level = mem->num_lvl - 1;
addr = (u64)mem->pg_tbl[level].pg_va_tbl[page];
return addr;
}
/* Approximation of HCAPI hcapi_cfa_key_hash() */
u64 hcapi_cfa_p4_key_hash(u8 *key_data, u16 bitlen)
{
u32 key0_hash;
u32 key1_hash;
u32 *key_word = (u32 *)key_data;
u32 lk3_key[CFA_P4_EEM_KEY_MAX_SIZE / sizeof(u32)];
u32 i;
/* Init the seeds if needed */
if (!hcapi_cfa_lkup_init)
hcapi_cfa_seeds_init();
key0_hash = hcapi_cfa_crc32_hash(key_data);
for (i = 0; i < (bitlen / 8) / sizeof(uint32_t); i++)
lk3_key[i] = swab32(key_word[i]);
key1_hash = hcapi_cfa_lookup3_hash((u8 *)lk3_key);
return ((u64)key0_hash) << 32 | (u64)key1_hash;
}
const struct hcapi_cfa_devops cfa_p4_devops = {
.hcapi_cfa_key_hash = hcapi_cfa_p4_key_hash,
};

View File

@ -0,0 +1,452 @@
/* SPDX-License-Identifier: BSD-3-Clause
* Copyright(c) 2019-2021 Broadcom
* All rights reserved.
*/
#ifndef _HCAPI_CFA_P4_H_
#define _HCAPI_CFA_P4_H_
#include "cfa_p40_hw.h"
/* CFA phase 4 fix formatted table(layout) ID definition */
enum cfa_p4_tbl_id {
CFA_P4_TBL_L2CTXT_TCAM = 0,
CFA_P4_TBL_L2CTXT_REMAP,
CFA_P4_TBL_PROF_TCAM,
CFA_P4_TBL_PROF_TCAM_REMAP,
CFA_P4_TBL_WC_TCAM,
CFA_P4_TBL_WC_TCAM_REC,
CFA_P4_TBL_WC_TCAM_REMAP,
CFA_P4_TBL_VEB_TCAM,
CFA_P4_TBL_SP_TCAM,
CFA_P4_TBL_PROF_SPIF_DFLT_L2CTXT,
CFA_P4_TBL_PROF_PARIF_DFLT_ACT_REC_PTR,
CFA_P4_TBL_PROF_PARIF_ERR_ACT_REC_PTR,
CFA_P4_TBL_LKUP_PARIF_DFLT_ACT_REC_PTR,
CFA_P4_TBL_MAX
};
#define CFA_P4_PROF_MAX_KEYS 4
enum cfa_p4_mac_sel_mode {
CFA_P4_MAC_SEL_MODE_FIRST = 0,
CFA_P4_MAC_SEL_MODE_LOWEST = 1,
};
struct cfa_p4_prof_key_cfg {
u8 mac_sel[CFA_P4_PROF_MAX_KEYS];
#define CFA_P4_PROF_MAC_SEL_DMAC0 BIT(0)
#define CFA_P4_PROF_MAC_SEL_T_MAC0 BIT(1)
#define CFA_P4_PROF_MAC_SEL_OUTERMOST_MAC0 BIT(2)
#define CFA_P4_PROF_MAC_SEL_DMAC1 BIT(3)
#define CFA_P4_PROF_MAC_SEL_T_MAC1 BIT(4)
#define CFA_P4_PROF_MAC_OUTERMOST_MAC1 BIT(5)
u8 pass_cnt;
enum cfa_p4_mac_sel_mode mode;
};
/* CFA action layout definition */
#define CFA_P4_ACTION_MAX_LAYOUT_SIZE 184
/**
* Action object template structure
*
* Template structure presents data fields that are necessary to know
* at the beginning of Action Builder (AB) processing. Like before the
* AB compilation. One such example could be a template that is
* flexible in size (Encap Record) and the presence of these fields
* allows for determining the template size as well as where the
* fields are located in the record.
*
* The template may also present fields that are not made visible to
* the caller by way of the action fields.
*
* Template fields also allow for additional checking on user visible
* fields. One such example could be the encap pointer behavior on a
* CFA_P4_ACT_OBJ_TYPE_ACT or CFA_P4_ACT_OBJ_TYPE_ACT_SRAM.
*/
struct cfa_p4_action_template {
/** Action Object type
*
* Controls the type of the Action Template
*/
enum {
/** Select this type to build an Action Record Object
*/
CFA_P4_ACT_OBJ_TYPE_ACT,
/** Select this type to build an Action Statistics
* Object
*/
CFA_P4_ACT_OBJ_TYPE_STAT,
/** Select this type to build a SRAM Action Record
* Object.
*/
CFA_P4_ACT_OBJ_TYPE_ACT_SRAM,
/** Select this type to build a SRAM Action
* Encapsulation Object.
*/
CFA_P4_ACT_OBJ_TYPE_ENCAP_SRAM,
/** Select this type to build a SRAM Action Modify
* Object, with IPv4 capability.
*/
/* In case of Stingray the term Modify is used for the 'NAT
* action'. Action builder is leveraged to fill in the NAT
* object which then can be referenced by the action
* record.
*/
CFA_P4_ACT_OBJ_TYPE_MODIFY_IPV4_SRAM,
/** Select this type to build a SRAM Action Source
* Property Object.
*/
/* In case of Stingray this is not a 'pure' action record.
* Action builder is leveraged to full in the Source Property
* object which can then be referenced by the action
* record.
*/
CFA_P4_ACT_OBJ_TYPE_SRC_PROP_SRAM,
/** Select this type to build a SRAM Action Statistics
* Object
*/
CFA_P4_ACT_OBJ_TYPE_STAT_SRAM,
} obj_type;
/** Action Control
*
* Controls the internals of the Action Template
*
* act is valid when:
* (obj_type == CFA_P4_ACT_OBJ_TYPE_ACT)
*/
/* Stat and encap are always inline for EEM as table scope
* allocation does not allow for separate Stats allocation,
* but has the xx_inline flags as to be forward compatible
* with Stingray 2, always treated as TRUE.
*/
struct {
/** Set to CFA_HCAPI_TRUE to enable statistics
*/
u8 stat_enable;
/** Set to CFA_HCAPI_TRUE to enable statistics to be inlined
*/
u8 stat_inline;
/** Set to CFA_HCAPI_TRUE to enable encapsulation
*/
u8 encap_enable;
/** Set to CFA_HCAPI_TRUE to enable encapsulation to be inlined
*/
u8 encap_inline;
} act;
/** Modify Setting
*
* Controls the type of the Modify Action the template is
* describing
*
* modify is valid when:
* (obj_type == CFA_P4_ACT_OBJ_TYPE_MODIFY_SRAM)
*/
enum {
/** Set to enable Modify of Source IPv4 Address
*/
CFA_P4_MR_REPLACE_SOURCE_IPV4 = 0,
/** Set to enable Modify of Destination IPv4 Address
*/
CFA_P4_MR_REPLACE_DEST_IPV4
} modify;
/** Encap Control
* Controls the type of encapsulation the template is
* describing
*
* encap is valid when:
* ((obj_type == CFA_P4_ACT_OBJ_TYPE_ACT) &&
* act.encap_enable) ||
* ((obj_type == CFA_P4_ACT_OBJ_TYPE_SRC_PROP_SRAM)
*/
struct {
/* Direction is required as Stingray Encap on RX is
* limited to l2 and VTAG only.
*/
/** Receive or Transmit direction
*/
u8 direction;
/** Set to CFA_HCAPI_TRUE to enable L2 capability in the
* template
*/
u8 l2_enable;
/** vtag controls the Encap Vector - VTAG Encoding, 4 bits
*
*
* CFA_P4_ACT_ENCAP_VTAGS_PUSH_0, default, no VLAN
* Tags applied
* CFA_P4_ACT_ENCAP_VTAGS_PUSH_1, adds capability to
* set 1 VLAN Tag. Action Template compile adds
* the following field to the action object
* ::TF_ER_VLAN1
* CFA_P4_ACT_ENCAP_VTAGS_PUSH_2, adds capability to
* set 2 VLAN Tags. Action Template compile adds
* the following fields to the action object
* ::TF_ER_VLAN1 and ::TF_ER_VLAN2
*
*/
enum { CFA_P4_ACT_ENCAP_VTAGS_PUSH_0 = 0,
CFA_P4_ACT_ENCAP_VTAGS_PUSH_1,
CFA_P4_ACT_ENCAP_VTAGS_PUSH_2 } vtag;
/* The remaining fields are NOT supported when
* direction is RX and ((obj_type ==
* CFA_P4_ACT_OBJ_TYPE_ACT) && act.encap_enable).
* ab_compile_layout will perform the checking and
* skip remaining fields.
*/
/** L3 Encap controls the Encap Vector - L3 Encoding,
* 3 bits. Defines the type of L3 Encapsulation the
* template is describing.
*
* CFA_P4_ACT_ENCAP_L3_NONE, default, no L3
* Encapsulation processing.
* CFA_P4_ACT_ENCAP_L3_IPV4, enables L3 IPv4
* Encapsulation.
* CFA_P4_ACT_ENCAP_L3_IPV6, enables L3 IPv6
* Encapsulation.
* CFA_P4_ACT_ENCAP_L3_MPLS_8847, enables L3 MPLS
* 8847 Encapsulation.
* CFA_P4_ACT_ENCAP_L3_MPLS_8848, enables L3 MPLS
* 8848 Encapsulation.
*
*/
enum {
/** Set to disable any L3 encapsulation
* processing, default
*/
CFA_P4_ACT_ENCAP_L3_NONE = 0,
/** Set to enable L3 IPv4 encapsulation
*/
CFA_P4_ACT_ENCAP_L3_IPV4 = 4,
/** Set to enable L3 IPv6 encapsulation
*/
CFA_P4_ACT_ENCAP_L3_IPV6 = 5,
/** Set to enable L3 MPLS 8847 encapsulation
*/
CFA_P4_ACT_ENCAP_L3_MPLS_8847 = 6,
/** Set to enable L3 MPLS 8848 encapsulation
*/
CFA_P4_ACT_ENCAP_L3_MPLS_8848 = 7
} l3;
#define CFA_P4_ACT_ENCAP_MAX_MPLS_LABELS 8
/** 1-8 labels, valid when
* (l3 == CFA_P4_ACT_ENCAP_L3_MPLS_8847) ||
* (l3 == CFA_P4_ACT_ENCAP_L3_MPLS_8848)
*
* MAX number of MPLS Labels 8.
*/
u8 l3_num_mpls_labels;
/** Set to CFA_HCAPI_TRUE to enable L4 capability in the
* template.
*
* CFA_HCAPI_TRUE adds ::TF_EN_UDP_SRC_PORT and
* ::TF_EN_UDP_DST_PORT to the template.
*/
u8 l4_enable;
/** Tunnel Encap controls the Encap Vector - Tunnel
* Encap, 3 bits. Defines the type of Tunnel
* encapsulation the template is describing
*
* CFA_P4_ACT_ENCAP_TNL_NONE, default, no Tunnel
* Encapsulation processing.
* CFA_P4_ACT_ENCAP_TNL_GENERIC_FULL
* CFA_P4_ACT_ENCAP_TNL_VXLAN. NOTE: Expects
* l4_enable set to CFA_P4_TRUE;
* CFA_P4_ACT_ENCAP_TNL_NGE. NOTE: Expects l4_enable
* set to CFA_P4_TRUE;
* CFA_P4_ACT_ENCAP_TNL_NVGRE. NOTE: only valid if
* l4_enable set to CFA_HCAPI_FALSE.
* CFA_P4_ACT_ENCAP_TNL_GRE.NOTE: only valid if
* l4_enable set to CFA_HCAPI_FALSE.
* CFA_P4_ACT_ENCAP_TNL_GENERIC_AFTER_TL4
* CFA_P4_ACT_ENCAP_TNL_GENERIC_AFTER_TNL
*
*/
enum {
/** Set to disable Tunnel header encapsulation
* processing, default
*/
CFA_P4_ACT_ENCAP_TNL_NONE = 0,
/** Set to enable Tunnel Generic Full header
* encapsulation
*/
CFA_P4_ACT_ENCAP_TNL_GENERIC_FULL,
/** Set to enable VXLAN header encapsulation
*/
CFA_P4_ACT_ENCAP_TNL_VXLAN,
/** Set to enable NGE (VXLAN2) header encapsulation
*/
CFA_P4_ACT_ENCAP_TNL_NGE,
/** Set to enable NVGRE header encapsulation
*/
CFA_P4_ACT_ENCAP_TNL_NVGRE,
/** Set to enable GRE header encapsulation
*/
CFA_P4_ACT_ENCAP_TNL_GRE,
/** Set to enable Generic header after Tunnel
* L4 encapsulation
*/
CFA_P4_ACT_ENCAP_TNL_GENERIC_AFTER_TL4,
/** Set to enable Generic header after Tunnel
* encapsulation
*/
CFA_P4_ACT_ENCAP_TNL_GENERIC_AFTER_TNL
} tnl;
/** Number of bytes of generic tunnel header,
* valid when
* (tnl == CFA_P4_ACT_ENCAP_TNL_GENERIC_FULL) ||
* (tnl == CFA_P4_ACT_ENCAP_TNL_GENERIC_AFTER_TL4) ||
* (tnl == CFA_P4_ACT_ENCAP_TNL_GENERIC_AFTER_TNL)
*/
u8 tnl_generic_size;
/** Number of 32b words of nge options,
* valid when
* (tnl == CFA_P4_ACT_ENCAP_TNL_NGE)
*/
u8 tnl_nge_op_len;
/* Currently not planned */
/* Custom Header */
/* u8 custom_enable; */
} encap;
};
/**
* Enumeration of SRAM entry types, used for allocation of
* fixed SRAM entities. The memory model for CFA HCAPI
* determines if an SRAM entry type is supported.
* NOTE: Any additions to this enum must be reflected on FW
* side as well.
*/
enum cfa_p4_action_sram_entry_type {
CFA_P4_ACTION_SRAM_ENTRY_TYPE_FULL_ACTION, /* SRAM Action Record */
CFA_P4_ACTION_SRAM_ENTRY_TYPE_FORMAT_0_ACTION,
CFA_P4_ACTION_SRAM_ENTRY_TYPE_FORMAT_1_ACTION,
CFA_P4_ACTION_SRAM_ENTRY_TYPE_FORMAT_2_ACTION,
CFA_P4_ACTION_SRAM_ENTRY_TYPE_FORMAT_3_ACTION,
CFA_P4_ACTION_SRAM_ENTRY_TYPE_FORMAT_4_ACTION,
CFA_P4_ACTION_SRAM_ENTRY_TYPE_ENCAP_8B, /* SRAM Action Encap
* 8 Bytes
*/
CFA_P4_ACTION_SRAM_ENTRY_TYPE_ENCAP_16B,/* SRAM Action Encap
* 16 Bytes
*/
CFA_P4_ACTION_SRAM_ENTRY_TYPE_ENCAP_64B,/* SRAM Action Encap
* 64 Bytes
*/
CFA_P4_ACTION_SRAM_ENTRY_TYPE_MODIFY_PORT_SRC,
CFA_P4_ACTION_SRAM_ENTRY_TYPE_MODIFY_PORT_DEST,
CFA_P4_ACTION_SRAM_ENTRY_TYPE_MODIFY_IPV4_SRC, /* SRAM Action Modify
* IPv4 Source
*/
CFA_P4_ACTION_SRAM_ENTRY_TYPE_MODIFY_IPV4_DEST, /* SRAM Action Modify
* IPv4 Destination
*/
CFA_P4_ACTION_SRAM_ENTRY_TYPE_SP_SMAC, /* SRAM Action Source
* Properties SMAC
*/
CFA_P4_ACTION_SRAM_ENTRY_TYPE_SP_SMAC_IPV4, /* SRAM Action Source
* Props SMAC IPv4
*/
CFA_P4_ACTION_SRAM_ENTRY_TYPE_SP_SMAC_IPV6, /* SRAM Action Source
* Props SMAC IPv6
*/
CFA_P4_ACTION_SRAM_ENTRY_TYPE_STATS_64, /* SRAM Action
* Stats 64 Bits
*/
CFA_P4_ACTION_SRAM_ENTRY_TYPE_MAX
};
/**
* SRAM Action Record structure holding either an action index or an
* action ptr.
* @act_idx: SRAM Action idx specifies the offset of the SRAM
* element within its SRAM Entry Type block. This
* index can be written into i.e. an L2 Context. Use
* this type for all SRAM Action Record types except
* SRAM Full Action records. Use act_ptr instead.
* @act_ptr: SRAM Full Action is special in that it needs an
* action record pointer. This pointer can be written
* into i.e. a Wildcard TCAM entry.
*/
union cfa_p4_action_sram_act_record {
u16 act_idx;
u32 act_ptr;
};
/**
* cfa_p4_action_param parameter definition
* @dir: receive or transmit direction
* @type: type of the sram allocation type
* @record: action record to set. The 'type' specified lists the
* record definition to use in the passed in record.
* @act_size: number of elements in act_data
* @act_data: ptr to array of action data
*/
struct cfa_p4_action_param {
u8 dir;
enum cfa_p4_action_sram_entry_type type;
union cfa_p4_action_sram_act_record record;
u32 act_size;
u64 *act_data;
};
/* EEM Key entry sizes */
#define CFA_P4_EEM_KEY_MAX_SIZE 52
#define CFA_P4_EEM_KEY_RECORD_SIZE 64
/**
* cfa_eem_entry_hdr
* @pointer: eem entry pointer
* @word1: The header is made up of two words, this is the first word.
* This field has multiple subfields, there is no suitable
* single name for it so just going with word1.
*/
struct cfa_p4_eem_entry_hdr {
u32 pointer;
u32 word1;
#define CFA_P4_EEM_ENTRY_VALID_SHIFT 31
#define CFA_P4_EEM_ENTRY_VALID_MASK 0x80000000
#define CFA_P4_EEM_ENTRY_L1_CACHEABLE_SHIFT 30
#define CFA_P4_EEM_ENTRY_L1_CACHEABLE_MASK 0x40000000
#define CFA_P4_EEM_ENTRY_STRENGTH_SHIFT 28
#define CFA_P4_EEM_ENTRY_STRENGTH_MASK 0x30000000
#define CFA_P4_EEM_ENTRY_RESERVED_SHIFT 17
#define CFA_P4_EEM_ENTRY_RESERVED_MASK 0x0FFE0000
#define CFA_P4_EEM_ENTRY_KEY_SIZE_SHIFT 8
#define CFA_P4_EEM_ENTRY_KEY_SIZE_MASK 0x0001FF00
#define CFA_P4_EEM_ENTRY_ACT_REC_SIZE_SHIFT 3
#define CFA_P4_EEM_ENTRY_ACT_REC_SIZE_MASK 0x000000F8
#define CFA_P4_EEM_ENTRY_ACT_REC_INT_SHIFT 2
#define CFA_P4_EEM_ENTRY_ACT_REC_INT_MASK 0x00000004
#define CFA_P4_EEM_ENTRY_EXT_FLOW_CTR_SHIFT 1
#define CFA_P4_EEM_ENTRY_EXT_FLOW_CTR_MASK 0x00000002
#define CFA_P4_EEM_ENTRY_ACT_PTR_MSB_SHIFT 0
#define CFA_P4_EEM_ENTRY_ACT_PTR_MSB_MASK 0x00000001
};
/**
* cfa_p4_eem_key_entry
* @key: Key is 448 bits - 56 bytes
* @hdr: Header is 8 bytes long
*/
struct cfa_p4_eem_64b_entry {
u8 key[CFA_P4_EEM_KEY_RECORD_SIZE -
sizeof(struct cfa_p4_eem_entry_hdr)];
struct cfa_p4_eem_entry_hdr hdr;
};
#endif /* _CFA_HW_P4_H_ */

View File

@ -0,0 +1,116 @@
// SPDX-License-Identifier: BSD-3-Clause
/* Copyright(c) 2019-2021 Broadcom
* All rights reserved.
*/
#include <linux/types.h>
#include "bnxt_compat.h"
#include <linux/jhash.h>
#include <linux/crc32.h>
#include "rand.h"
#include "hcapi_cfa_defs.h"
static u32 hcapi_cfa_lkup_lkup3_init_cfg;
static u32 hcapi_cfa_lkup_em_seed_mem[HCAPI_CFA_LKUP_SEED_MEM_SIZE];
static bool hcapi_cfa_lkup_init;
static void hcapi_cfa_seeds_init(void)
{
int i;
u32 r;
if (hcapi_cfa_lkup_init)
return;
hcapi_cfa_lkup_init = true;
/* Initialize the lfsr */
rand_init();
/* RX and TX use the same seed values */
hcapi_cfa_lkup_lkup3_init_cfg = rand32();
for (i = 0; i < HCAPI_CFA_LKUP_SEED_MEM_SIZE / 2; i++) {
r = rand32();
hcapi_cfa_lkup_em_seed_mem[i * 2] = r;
r = rand32();
hcapi_cfa_lkup_em_seed_mem[i * 2 + 1] = (r & 0x1);
}
}
static u32 hcapi_cfa_crc32_hash(u8 *key)
{
u8 *kptr = key;
u32 val1, val2;
u8 temp[4];
u32 index;
int i;
/* Do byte-wise XOR of the 52-byte HASH key first. */
index = *key;
kptr++;
for (i = 0; i < (CFA_P58_EEM_KEY_MAX_SIZE - 1); i++) {
index = index ^ *kptr;
kptr++;
}
/* Get seeds */
val1 = hcapi_cfa_lkup_em_seed_mem[index * 2];
val2 = hcapi_cfa_lkup_em_seed_mem[index * 2 + 1];
temp[0] = (u8)(val1 >> 24);
temp[1] = (u8)(val1 >> 16);
temp[2] = (u8)(val1 >> 8);
temp[3] = (u8)(val1 & 0xff);
val1 = 0;
/* Start with seed */
if (!(val2 & 0x1))
val1 = ~(crc32(~val1, temp, 4));
val1 = ~(crc32(~val1,
key,
CFA_P58_EEM_KEY_MAX_SIZE));
/* End with seed */
if (val2 & 0x1)
val1 = ~(crc32(~val1, temp, 4));
return val1;
}
static u32 hcapi_cfa_lookup3_hash(u8 *in_key)
{
u32 val1;
val1 = jhash2(((u32 *)in_key),
CFA_P58_EEM_KEY_MAX_SIZE / (sizeof(u32)),
hcapi_cfa_lkup_lkup3_init_cfg);
return val1;
}
/* Approximation of HCAPI hcapi_cfa_key_hash() */
u64 hcapi_cfa_p58_key_hash(u8 *key_data, u16 bitlen)
{
u32 key0_hash;
u32 key1_hash;
u32 *key_word = (u32 *)key_data;
u32 lk3_key[CFA_P58_EEM_KEY_MAX_SIZE / sizeof(u32)];
u32 i;
/* Init the seeds if needed */
if (!hcapi_cfa_lkup_init)
hcapi_cfa_seeds_init();
key0_hash = hcapi_cfa_crc32_hash(key_data);
for (i = 0; i < (bitlen / (8 * sizeof(uint32_t))); i++)
lk3_key[i] = swab32(key_word[i]);
key1_hash = hcapi_cfa_lookup3_hash((u8 *)lk3_key);
return ((u64)key0_hash) << 32 | (u64)key1_hash;
}

View File

@ -0,0 +1,411 @@
/* SPDX-License-Identifier: BSD-3-Clause
* Copyright(c) 2019-2021 Broadcom
* All rights reserved.
*/
#ifndef _HCAPI_CFA_P58_H_
#define _HCAPI_CFA_P58_H_
#include "cfa_p58_hw.h"
/* EEM Key entry sizes */
#define CFA_P58_EEM_KEY_MAX_SIZE 80
#define CFA_P58_EEM_KEY_RECORD_SIZE 80
#define CFA_P58_EM_FKB_NUM_WORDS 4
#define CFA_P58_EM_FKB_NUM_ENTRIES 64
#define CFA_P58_WC_TCAM_FKB_NUM_WORDS 4
#define CFA_P58_WC_TCAM_FKB_NUM_ENTRIES 64
/* CFA phase 5.8 fix formatted table(layout) ID definition */
enum cfa_p58_tbl_id {
CFA_P58_TBL_ILT = 0,
CFA_P58_TBL_L2CTXT_TCAM,
CFA_P58_TBL_L2CTXT_REMAP,
CFA_P58_TBL_PROF_TCAM,
CFA_P58_TBL_PROF_TCAM_REMAP,
CFA_P58_TBL_WC_TCAM,
CFA_P58_TBL_WC_TCAM_REC,
CFA_P58_TBL_VEB_TCAM,
CFA_P58_TBL_SP_TCAM,
CFA_P58_TBL_PROF_PARIF_DFLT_ACT_REC_PTR, /* Default Profile TCAM/Lookup
* Action Record Ptr Table
*/
CFA_P58_TBL_PROF_PARIF_ERR_ACT_REC_PTR, /* Error Profile TCAM Miss
* Action Record Ptr Table
*/
CFA_P58_TBL_VSPT, /* VNIC/SVIF Props Table */
CFA_P58_TBL_MAX
};
#define CFA_P58_PROF_MAX_KEYS 4
enum cfa_p58_mac_sel_mode {
CFA_P58_MAC_SEL_MODE_FIRST = 0,
CFA_P58_MAC_SEL_MODE_LOWEST = 1,
};
struct cfa_p58_prof_key_cfg {
u8 mac_sel[CFA_P58_PROF_MAX_KEYS];
#define CFA_P58_PROF_MAC_SEL_DMAC0 BIT(0)
#define CFA_P58_PROF_MAC_SEL_T_MAC0 BIT(1)
#define CFA_P58_PROF_MAC_SEL_OUTERMOST_MAC0 BIT(2)
#define CFA_P58_PROF_MAC_SEL_DMAC1 BIT(3)
#define CFA_P58_PROF_MAC_SEL_T_MAC1 BIT(4)
#define CFA_P58_PROF_MAC_OUTERMOST_MAC1 BIT(5)
u8 vlan_sel[CFA_P58_PROF_MAX_KEYS];
#define CFA_P58_PROFILER_VLAN_SEL_INNER_HDR 0
#define CFA_P58_PROFILER_VLAN_SEL_TUNNEL_HDR 1
#define CFA_P58_PROFILER_VLAN_SEL_OUTERMOST_HDR 2
u8 pass_cnt;
enum cfa_p58_mac_sel_mode mode;
};
/* CFA action layout definition */
#define CFA_P58_ACTION_MAX_LAYOUT_SIZE 184
/**
* Action object template structure
*
* Template structure presents data fields that are necessary to know
* at the beginning of Action Builder (AB) processing. Like before the
* AB compilation. One such example could be a template that is
* flexible in size (Encap Record) and the presence of these fields
* allows for determining the template size as well as where the
* fields are located in the record.
*
* The template may also present fields that are not made visible to
* the caller by way of the action fields.
*
* Template fields also allow for additional checking on user visible
* fields. One such example could be the encap pointer behavior on a
* CFA_P58_ACT_OBJ_TYPE_ACT or CFA_P58_ACT_OBJ_TYPE_ACT_SRAM.
*/
struct cfa_p58_action_template {
/** Action Object type
*
* Controls the type of the Action Template
*/
enum {
/** Select this type to build an Action Record Object
*/
CFA_P58_ACT_OBJ_TYPE_ACT,
/** Select this type to build an Action Statistics
* Object
*/
CFA_P58_ACT_OBJ_TYPE_STAT,
/** Select this type to build a SRAM Action Record
* Object.
*/
CFA_P58_ACT_OBJ_TYPE_ACT_SRAM,
/** Select this type to build a SRAM Action
* Encapsulation Object.
*/
CFA_P58_ACT_OBJ_TYPE_ENCAP_SRAM,
/** Select this type to build a SRAM Action Modify
* Object, with IPv4 capability.
*/
/* In case of Stingray the term Modify is used for the 'NAT
* action'. Action builder is leveraged to fill in the NAT
* object which then can be referenced by the action
* record.
*/
CFA_P58_ACT_OBJ_TYPE_MODIFY_IPV4_SRAM,
/** Select this type to build a SRAM Action Source
* Property Object.
*/
/* In case of Stingray this is not a 'pure' action record.
* Action builder is leveraged to full in the Source Property
* object which can then be referenced by the action
* record.
*/
CFA_P58_ACT_OBJ_TYPE_SRC_PROP_SRAM,
/** Select this type to build a SRAM Action Statistics
* Object
*/
CFA_P58_ACT_OBJ_TYPE_STAT_SRAM,
} obj_type;
/** Action Control
*
* Controls the internals of the Action Template
*
* act is valid when:
* (obj_type == CFA_P58_ACT_OBJ_TYPE_ACT)
*/
/* Stat and encap are always inline for EEM as table scope
* allocation does not allow for separate Stats allocation,
* but has the xx_inline flags as to be forward compatible
* with Stingray 2, always treated as TRUE.
*/
struct {
/** Set to CFA_HCAPI_TRUE to enable statistics
*/
u8 stat_enable;
/** Set to CFA_HCAPI_TRUE to enable statistics to be inlined
*/
u8 stat_inline;
/** Set to CFA_HCAPI_TRUE to enable encapsulation
*/
u8 encap_enable;
/** Set to CFA_HCAPI_TRUE to enable encapsulation to be inlined
*/
u8 encap_inline;
} act;
/** Modify Setting
*
* Controls the type of the Modify Action the template is
* describing
*
* modify is valid when:
* (obj_type == CFA_P58_ACT_OBJ_TYPE_MODIFY_SRAM)
*/
enum {
/** Set to enable Modify of Source IPv4 Address
*/
CFA_P58_MR_REPLACE_SOURCE_IPV4 = 0,
/** Set to enable Modify of Destination IPv4 Address
*/
CFA_P58_MR_REPLACE_DEST_IPV4
} modify;
/** Encap Control
* Controls the type of encapsulation the template is
* describing
*
* encap is valid when:
* ((obj_type == CFA_P58_ACT_OBJ_TYPE_ACT) &&
* act.encap_enable) ||
* ((obj_type == CFA_P58_ACT_OBJ_TYPE_SRC_PROP_SRAM)
*/
struct {
/* Direction is required as Stingray Encap on RX is
* limited to l2 and VTAG only.
*/
/** Receive or Transmit direction
*/
u8 direction;
/** Set to CFA_HCAPI_TRUE to enable L2 capability in the
* template
*/
u8 l2_enable;
/** vtag controls the Encap Vector - VTAG Encoding, 4 bits
*
*
* CFA_P58_ACT_ENCAP_VTAGS_PUSH_0, default, no VLAN
* Tags applied
* CFA_P58_ACT_ENCAP_VTAGS_PUSH_1, adds capability to
* set 1 VLAN Tag. Action Template compile adds
* the following field to the action object
* ::TF_ER_VLAN1
* CFA_P58_ACT_ENCAP_VTAGS_PUSH_2, adds capability to
* set 2 VLAN Tags. Action Template compile adds
* the following fields to the action object
* ::TF_ER_VLAN1 and ::TF_ER_VLAN2
*
*/
enum { CFA_P58_ACT_ENCAP_VTAGS_PUSH_0 = 0,
CFA_P58_ACT_ENCAP_VTAGS_PUSH_1,
CFA_P58_ACT_ENCAP_VTAGS_PUSH_2 } vtag;
/* The remaining fields are NOT supported when
* direction is RX and ((obj_type ==
* CFA_P58_ACT_OBJ_TYPE_ACT) && act.encap_enable).
* ab_compile_layout will perform the checking and
* skip remaining fields.
*/
/** L3 Encap controls the Encap Vector - L3 Encoding,
* 3 bits. Defines the type of L3 Encapsulation the
* template is describing.
*
* CFA_P58_ACT_ENCAP_L3_NONE, default, no L3
* Encapsulation processing.
* CFA_P58_ACT_ENCAP_L3_IPV4, enables L3 IPv4
* Encapsulation.
* CFA_P58_ACT_ENCAP_L3_IPV6, enables L3 IPv6
* Encapsulation.
* CFA_P58_ACT_ENCAP_L3_MPLS_8847, enables L3 MPLS
* 8847 Encapsulation.
* CFA_P58_ACT_ENCAP_L3_MPLS_8848, enables L3 MPLS
* 8848 Encapsulation.
*
*/
enum {
/** Set to disable any L3 encapsulation
* processing, default
*/
CFA_P58_ACT_ENCAP_L3_NONE = 0,
/** Set to enable L3 IPv4 encapsulation
*/
CFA_P58_ACT_ENCAP_L3_IPV4 = 4,
/** Set to enable L3 IPv6 encapsulation
*/
CFA_P58_ACT_ENCAP_L3_IPV6 = 5,
/** Set to enable L3 MPLS 8847 encapsulation
*/
CFA_P58_ACT_ENCAP_L3_MPLS_8847 = 6,
/** Set to enable L3 MPLS 8848 encapsulation
*/
CFA_P58_ACT_ENCAP_L3_MPLS_8848 = 7
} l3;
#define CFA_P58_ACT_ENCAP_MAX_MPLS_LABELS 8
/** 1-8 labels, valid when
* (l3 == CFA_P58_ACT_ENCAP_L3_MPLS_8847) ||
* (l3 == CFA_P58_ACT_ENCAP_L3_MPLS_8848)
*
* MAX number of MPLS Labels 8.
*/
u8 l3_num_mpls_labels;
/** Set to CFA_HCAPI_TRUE to enable L4 capability in the
* template.
*
* CFA_HCAPI_TRUE adds ::TF_EN_UDP_SRC_PORT and
* ::TF_EN_UDP_DST_PORT to the template.
*/
u8 l4_enable;
/** Tunnel Encap controls the Encap Vector - Tunnel
* Encap, 3 bits. Defines the type of Tunnel
* encapsulation the template is describing
*
* CFA_P58_ACT_ENCAP_TNL_NONE, default, no Tunnel
* Encapsulation processing.
* CFA_P58_ACT_ENCAP_TNL_GENERIC_FULL
* CFA_P58_ACT_ENCAP_TNL_VXLAN. NOTE: Expects
* l4_enable set to CFA_P58_TRUE;
* CFA_P58_ACT_ENCAP_TNL_NGE. NOTE: Expects l4_enable
* set to CFA_P58_TRUE;
* CFA_P58_ACT_ENCAP_TNL_NVGRE. NOTE: only valid if
* l4_enable set to CFA_HCAPI_FALSE.
* CFA_P58_ACT_ENCAP_TNL_GRE.NOTE: only valid if
* l4_enable set to CFA_HCAPI_FALSE.
* CFA_P58_ACT_ENCAP_TNL_GENERIC_AFTER_TL4
* CFA_P58_ACT_ENCAP_TNL_GENERIC_AFTER_TNL
*
*/
enum {
/** Set to disable Tunnel header encapsulation
* processing, default
*/
CFA_P58_ACT_ENCAP_TNL_NONE = 0,
/** Set to enable Tunnel Generic Full header
* encapsulation
*/
CFA_P58_ACT_ENCAP_TNL_GENERIC_FULL,
/** Set to enable VXLAN header encapsulation
*/
CFA_P58_ACT_ENCAP_TNL_VXLAN,
/** Set to enable NGE (VXLAN2) header encapsulation
*/
CFA_P58_ACT_ENCAP_TNL_NGE,
/** Set to enable NVGRE header encapsulation
*/
CFA_P58_ACT_ENCAP_TNL_NVGRE,
/** Set to enable GRE header encapsulation
*/
CFA_P58_ACT_ENCAP_TNL_GRE,
/** Set to enable Generic header after Tunnel
* L4 encapsulation
*/
CFA_P58_ACT_ENCAP_TNL_GENERIC_AFTER_TL4,
/** Set to enable Generic header after Tunnel
* encapsulation
*/
CFA_P58_ACT_ENCAP_TNL_GENERIC_AFTER_TNL
} tnl;
/** Number of bytes of generic tunnel header,
* valid when
* (tnl == CFA_P58_ACT_ENCAP_TNL_GENERIC_FULL) ||
* (tnl == CFA_P58_ACT_ENCAP_TNL_GENERIC_AFTER_TL4) ||
* (tnl == CFA_P58_ACT_ENCAP_TNL_GENERIC_AFTER_TNL)
*/
u8 tnl_generic_size;
/** Number of 32b words of nge options,
* valid when
* (tnl == CFA_P58_ACT_ENCAP_TNL_NGE)
*/
u8 tnl_nge_op_len;
/* Currently not planned */
/* Custom Header */
/* u8 custom_enable; */
} encap;
};
/**
* Enumeration of SRAM entry types, used for allocation of
* fixed SRAM entities. The memory model for CFA HCAPI
* determines if an SRAM entry type is supported.
* NOTE: Any additions to this enum must be reflected on FW
* side as well.
*/
enum cfa_p58_action_sram_entry_type {
CFA_P58_ACTION_SRAM_ENTRY_TYPE_ACT, /* SRAM Action Record */
CFA_P58_ACTION_SRAM_ENTRY_TYPE_ENCAP_8B, /* SRAM Action Encap
* 8 Bytes
*/
CFA_P58_ACTION_SRAM_ENTRY_TYPE_ENCAP_16B, /* SRAM Action Encap
* 16 Bytes
*/
CFA_P58_ACTION_SRAM_ENTRY_TYPE_ENCAP_64B, /* SRAM Action Encap
* 64 Bytes
*/
CFA_P58_ACTION_SRAM_ENTRY_TYPE_MODIFY_IPV4_SRC, /* SRAM Action Modify
* IPv4 Source
*/
CFA_P58_ACTION_SRAM_ENTRY_TYPE_MODIFY_IPV4_DEST,/* SRAM Action Modify
* IPv4 Destination
*/
CFA_P58_ACTION_SRAM_ENTRY_TYPE_SP_SMAC, /* SRAM Action Source
* Properties SMAC
*/
CFA_P58_ACTION_SRAM_ENTRY_TYPE_SP_SMAC_IPV4, /* SRAM Action Source
* Props SMAC IPv4
*/
CFA_P58_ACTION_SRAM_ENTRY_TYPE_SP_SMAC_IPV6, /* SRAM Action Source
* Props SMAC IPv6
*/
CFA_P58_ACTION_SRAM_ENTRY_TYPE_STATS_64, /* SRAM Action Stats
* 64 Bits
*/
CFA_P58_ACTION_SRAM_ENTRY_TYPE_MAX
};
/**
* SRAM Action Record structure holding either an action index or an
* action ptr.
* @act_idx: SRAM Action idx specifies the offset of the SRAM
* element within its SRAM Entry Type block. This
* index can be written into i.e. an L2 Context. Use
* this type for all SRAM Action Record types except
* SRAM Full Action records. Use act_ptr instead.
* @act_ptr: SRAM Full Action is special in that it needs an
* action record pointer. This pointer can be written
* into i.e. a Wildcard TCAM entry.
*/
union cfa_p58_action_sram_act_record {
u16 act_idx;
u32 act_ptr;
};
/**
* cfa_p58_action_param parameter definition
* @dir: receive or transmit direction
* @type: type of the sram allocation type
* @record: action record to set. The 'type' specified lists the
* record definition to use in the passed in record.
* @act_size: number of elements in act_data
* @act_data: ptr to array of action data
*/
struct cfa_p58_action_param {
u8 dir;
enum cfa_p58_action_sram_entry_type type;
union cfa_p58_action_sram_act_record record;
u32 act_size;
u64 *act_data;
};
#endif /* _CFA_HW_P58_H_ */

View File

@ -0,0 +1,180 @@
/* SPDX-License-Identifier: BSD-3-Clause
* Copyright(c) 2019-2023 Broadcom
* All rights reserved.
*/
#ifndef _CFA_RESOURCES_H_
#define _CFA_RESOURCES_H_
/**
* @addtogroup CFA_RESC_TYPES CFA Resource Types
* \ingroup CFA_V3
* CFA HW resource types and sub types definition
* @{
*/
/**
* CFA hardware Resource Type
*
* Depending on the type of CFA hardware resource, the resources are divided
* into multiple groups. This group is identified by resource type. The
* following enum defines all the CFA resource types
*/
enum cfa_resource_type {
/** CFA resources using fixed identifiers (IDM)
*/
CFA_RTYPE_IDENT = 0,
/** CFA resources accessed by fixed indices (TBM)
*/
CFA_RTYPE_IDX_TBL,
/** CFA TCAM resources
*/
CFA_RTYPE_TCAM,
/** CFA interface tables (IFM)
*/
CFA_RTYPE_IF_TBL,
/** CFA resources accessed using CFA memory manager index
*/
CFA_RTYPE_CMM,
/** CFA Global fields (e.g. registers which configure global settings)
*/
CFA_RTYPE_GLB_FLD,
/** CFA Firmware internal ONLY definitions reserved starting with 12.
*/
CFA_RTYPE_HW_MAX = 12,
/** FIrmware only types
*/
/** CFA Firmware Session Manager
*/
CFA_RTYPE_SM = CFA_RTYPE_HW_MAX,
/** CFA Firmware Table Scope Manager
*/
CFA_RTYPE_TSM,
/** CFA Firmware Table Scope Instance Manager
*/
CFA_RTYPE_TIM,
/** CFA Firmware Global Id Manager
*/
CFA_RTYPE_GIM,
CFA_RTYPE_MAX
};
/**
* Resource sub-types for CFA_RTYPE_IDENT
*/
enum cfa_resource_subtype_ident {
CFA_RSUBTYPE_IDENT_L2CTX = 0, /**< Remapped L2 contexts */
CFA_RSUBTYPE_IDENT_PROF_FUNC, /**< Profile functions */
CFA_RSUBTYPE_IDENT_WC_PROF, /**< WC TCAM profile IDs */
CFA_RSUBTYPE_IDENT_EM_PROF, /**< EM profile IDs */
CFA_RSUBTYPE_IDENT_L2_FUNC, /**< L2 functions */
CFA_RSUBTYPE_IDENT_LAG_ID, /**< LAG IDs */
CFA_RSUBTYPE_IDENT_MAX
};
/**
* Resource sub-types for CFA_RTYPE_IDX
*/
enum cfa_resource_subtype_idx_tbl {
CFA_RSUBTYPE_IDX_TBL_STAT64 = 0, /**< Statistics */
CFA_RSUBTYPE_IDX_TBL_METER_PROF, /**< Meter profile */
CFA_RSUBTYPE_IDX_TBL_METER_INST, /**< Meter instances */
CFA_RSUBTYPE_IDX_TBL_METER_DROP_CNT, /**< Meter Drop Count */
CFA_RSUBTYPE_IDX_TBL_MIRROR, /**< Mirror table */
/* Metadata mask for profiler block */
CFA_RSUBTYPE_IDX_TBL_METADATA_PROF,
/* Metadata mask for lookup block (for recycling) */
CFA_RSUBTYPE_IDX_TBL_METADATA_LKUP,
/* Metadata mask for action block */
CFA_RSUBTYPE_IDX_TBL_METADATA_ACT,
CFA_RSUBTYPE_IDX_TBL_CT_STATE, /**< Connection tracking */
CFA_RSUBTYPE_IDX_TBL_RANGE_PROF, /**< Range profile */
CFA_RSUBTYPE_IDX_TBL_RANGE_ENTRY, /**< Range entry */
CFA_RSUBTYPE_IDX_TBL_EM_FKB, /**< EM FKB table */
CFA_RSUBTYPE_IDX_TBL_WC_FKB, /**< WC TCAM FKB table */
CFA_RSUBTYPE_IDX_TBL_EM_FKB_MASK, /**< EM FKB Mask table */
CFA_RSUBTYPE_IDX_TBL_MAX
};
/**
* Resource sub-types for CFA_RTYPE_TCAM
*/
enum cfa_resource_subtype_tcam {
CFA_RSUBTYPE_TCAM_L2CTX = 0, /**< L2 contexts TCAM */
CFA_RSUBTYPE_TCAM_PROF_TCAM, /**< Profile TCAM */
CFA_RSUBTYPE_TCAM_WC, /**< WC lookup TCAM */
CFA_RSUBTYPE_TCAM_CT_RULE, /**< Connection tracking TCAM */
CFA_RSUBTYPE_TCAM_VEB, /**< VEB TCAM */
CFA_RSUBTYPE_TCAM_FEATURE_CHAIN, /**< Feature chain TCAM */
CFA_RSUBTYPE_TCAM_MAX
};
/**
* Resource sub-types for CFA_RTYPE_IF_TBL
*/
enum cfa_resource_subtype_if_tbl {
/** ILT table indexed by SVIF
*/
CFA_RSUBTYPE_IF_TBL_ILT = 0,
/** VSPT table
*/
CFA_RSUBTYPE_IF_TBL_VSPT,
/** Profiler partition default action record pointer
*/
CFA_RSUBTYPE_IF_TBL_PROF_PARIF_DFLT_ACT_PTR,
/** Profiler partition error action record pointer
*/
CFA_RSUBTYPE_IF_TBL_PROF_PARIF_ERR_ACT_PTR,
CFA_RSUBTYPE_IF_TBL_EPOCH0, /**< Epoch0 mask table */
CFA_RSUBTYPE_IF_TBL_EPOCH1, /**< Epoch1 mask table */
CFA_RSUBTYPE_IF_TBL_LAG, /**< LAG Table */
CFA_RSUBTYPE_IF_TBL_MAX
};
/**
* Resource sub-types for CFA_RTYPE_CMM
*/
enum cfa_resource_subtype_cmm {
CFA_RSUBTYPE_CMM_INT_ACT_B0 = 0, /**< SRAM Bank 0 */
CFA_RSUBTYPE_CMM_INT_ACT_B1, /**< SRAM Bank 0 */
CFA_RSUBTYPE_CMM_INT_ACT_B2, /**< SRAM Bank 0 */
CFA_RSUBTYPE_CMM_INT_ACT_B3, /**< SRAM Bank 0 */
CFA_RSUBTYPE_CMM_ACT, /**< Action table */
CFA_RSUBTYPE_CMM_LKUP, /**< EM lookup table */
CFA_RSUBTYPE_CMM_MAX
};
#define CFA_RSUBTYPE_GLB_FLD_MAX 1
#define CFA_RSUBTYPE_SM_MAX 1
#define CFA_RSUBTYPE_TSM_MAX 1
#define CFA_RSUBTYPE_TIM_MAX 1
/**
* Resource sub-types for CFA_RTYPE_GIM
*/
enum cfa_resource_subtype_gim {
CFA_RSUBTYPE_GIM_DOMAIN_0 = 0, /**< Domain 0 */
CFA_RSUBTYPE_GIM_DOMAIN_1, /**< Domain 1 */
CFA_RSUBTYPE_GIM_DOMAIN_2, /**< Domain 2 */
CFA_RSUBTYPE_GIM_DOMAIN_3, /**< Domain 3 */
CFA_RSUBTYPE_GIM_MAX
};
/**
* Total number of resource subtypes
*/
#define CFA_NUM_RSUBTYPES \
(CFA_RSUBTYPE_IDENT_MAX + CFA_RSUBTYPE_IDX_TBL_MAX + \
CFA_RSUBTYPE_TCAM_MAX + CFA_RSUBTYPE_IF_TBL_MAX + \
CFA_RSUBTYPE_CMM_MAX + CFA_RSUBTYPE_GLB_FLD_MAX + \
CFA_RSUBTYPE_SM_MAX + CFA_RSUBTYPE_TSM_MAX + CFA_RSUBTYPE_TIM_MAX + \
CFA_RSUBTYPE_GIM_MAX)
/**
* @}
*/
#endif /* _CFA_RESOURCES_H_ */

View File

@ -0,0 +1,107 @@
/* SPDX-License-Identifier: BSD-3-Clause
* Copyright(c) 2019-2023 Broadcom
* All rights reserved.
*/
#ifndef _CFA_TYPES_H_
#define _CFA_TYPES_H_
/*
*
* The primary goal of the CFA common HW access framework is to unify the CFA
* resource management and hardware programming design for different CFA
* applications so the CFA hardware can be properly shared with different
* entities. This framework is collection of the following CFA resource
* managers and Libraries listed below:
*
* 1. CFA Memory Manager
* 2. CFA Object Instance Manager
* 3. CFA Session Manager
* 4. CFA TCAM Manager
* 5. CFA Table Scope Manager
* 6. CFA Hardware Access Library
* 7. CFA Builder Library
* 8. CFA Index table manager
* 9. CFA Utilities Library
*
*/
/*
* CFA HW version definition
*/
enum cfa_ver {
CFA_P40 = 0, /* CFA phase 4.0 */
CFA_P45 = 1, /* CFA phase 4.5 */
CFA_P58 = 2, /* CFA phase 5.8 */
CFA_P59 = 3, /* CFA phase 5.9 */
CFA_P70 = 4, /* CFA phase 7.0 */
CFA_PMAX = 5
};
/*
* CFA direction definition
*/
enum cfa_dir {
CFA_DIR_RX = 0, /* Receive */
CFA_DIR_TX = 1, /* Transmit */
CFA_DIR_MAX = 2
};
/*
* CFA Remap Table Type
*/
enum cfa_remap_tbl_type {
CFA_REMAP_TBL_TYPE_NORMAL = 0,
CFA_REMAP_TBL_TYPE_BYPASS,
CFA_REMAP_TBL_TYPE_MAX
};
/*
* CFA tracker types
*/
enum cfa_track_type {
CFA_TRACK_TYPE_INVALID = 0, /* Invalid */
CFA_TRACK_TYPE_SID, /* Tracked by session id */
CFA_TRACK_TYPE_FIRST = CFA_TRACK_TYPE_SID,
CFA_TRACK_TYPE_FID, /* Tracked by function id */
CFA_TRACK_TYPE_MAX
};
/*
* CFA Region Type
*/
enum cfa_region_type {
CFA_REGION_TYPE_LKUP = 0,
CFA_REGION_TYPE_ACT,
CFA_REGION_TYPE_MAX
};
/*
* CFA application type
*/
enum cfa_app_type {
CFA_APP_TYPE_AFM = 0, /* AFM firmware */
CFA_APP_TYPE_TF = 1, /* TruFlow firmware */
CFA_APP_TYPE_MAX = 2,
CFA_APP_TYPE_INVALID = CFA_APP_TYPE_MAX,
};
/*
* CFA FID types
*/
enum cfa_fid_type {
CFA_FID_TYPE_FID = 0, /* General */
CFA_FID_TYPE_RFID = 1, /* Representor */
CFA_FID_TYPE_EFID = 2 /* Endpoint */
};
/*
* CFA srchm modes
*/
enum cfa_srch_mode {
CFA_SRCH_MODE_FIRST = 0, /* Start new iteration */
CFA_SRCH_MODE_NEXT, /* Next item in iteration */
CFA_SRCH_MODE_MAX
};
#endif /* _CFA_TYPES_H_ */

View File

@ -0,0 +1,19 @@
/* SPDX-License-Identifier: BSD-3-Clause
* Copyright(c) 2019-2023 Broadcom
* All rights reserved.
*/
#ifndef _CFA_UTIL_H_
#define _CFA_UTIL_H_
/*
* CFA specific utility macros
*/
/* Bounds (closed interval) check helper macro */
#define CFA_CHECK_BOUNDS(x, l, h) (((x) >= (l)) && ((x) <= (h)))
#define CFA_CHECK_UPPER_BOUNDS(x, h) ((x) <= (h))
#define CFA_ALIGN_LN2(x) (((x) < 3U) ? (x) : 32U - __builtin_clz((x) - 1U) + 1U)
#endif /* _CFA_UTIL_H_ */

View File

@ -0,0 +1,57 @@
/* SPDX-License-Identifier: BSD-3-Clause
* Copyright(c) 2019-2023 Broadcom
* All rights reserved.
*/
#ifndef _SYS_UTIL_H_
#define _SYS_UTIL_H_
#include "linux/kernel.h"
#define INVALID_U64 U64_MAX
#define INVALID_U32 U32_MAX
#define INVALID_U16 U16_MAX
#define INVALID_U8 U8_MAX
#define ALIGN_256(x) ALIGN(x, 256)
#define ALIGN_128(x) ALIGN(x, 128)
#define ALIGN_64(x) ALIGN(x, 64)
#define ALIGN_32(x) ALIGN(x, 32)
#define ALIGN_16(x) ALIGN(x, 16)
#define ALIGN_8(x) ALIGN(x, 8)
#define ALIGN_4(x) ALIGN(x, 4)
#define NUM_ALIGN_UNITS(x, unit) (((x) + (unit) - (1)) / (unit))
#define NUM_WORDS_ALIGN_32BIT(x) (ALIGN_32(x) / BITS_PER_WORD)
#define NUM_WORDS_ALIGN_64BIT(x) (ALIGN_64(x) / BITS_PER_WORD)
#define NUM_WORDS_ALIGN_128BIT(x) (ALIGN_128(x) / BITS_PER_WORD)
#define NUM_WORDS_ALIGN_256BIT(x) (ALIGN_256(x) / BITS_PER_WORD)
#ifndef MAX
#define MAX(A, B) ((A) > (B) ? (A) : (B))
#endif
#ifndef MIN
#define MIN(A, B) ((A) < (B) ? (A) : (B))
#endif
#ifndef STRINGIFY
#define STRINGIFY(X) #X
#endif
/* Helper macros to get/set/clear Nth bit in a u8 bitmap */
#define BMP_GETBIT(BMP, N) \
((*((u8 *)(BMP) + ((N) / 8)) >> ((N) % 8)) & 0x1)
#define BMP_SETBIT(BMP, N) \
do { \
u32 n = (N); \
*((u8 *)(BMP) + (n / 8)) |= (0x1U << (n % 8)); \
} while (0)
#define BMP_CLRBIT(BMP, N) \
do { \
u32 n = (N); \
*((u8 *)(BMP) + (n / 8)) &= \
(u8)(~(0x1U << (n % 8))); \
} while (0)
#endif /* _SYS_UTIL_H_ */

View File

@ -0,0 +1,673 @@
// SPDX-License-Identifier: BSD-3-Clause
/* Copyright(c) 2019-2023 Broadcom
* All rights reserved.
*/
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/errno.h>
#include <linux/string.h>
#include "sys_util.h"
#include "cfa_util.h"
#include "cfa_types.h"
#include "cfa_mm.h"
#include "bnxt_compat.h"
#define CFA_MM_SIGNATURE 0xCFA66C89
#define CFA_MM_INVALID8 U8_MAX
#define CFA_MM_INVALID16 U16_MAX
#define CFA_MM_INVALID32 U32_MAX
#define CFA_MM_INVALID64 U64_MAX
#define CFA_MM_MAX_RECORDS (64 * 1024 * 1024)
#define CFA_MM_MAX_CONTIG_RECORDS 8
#define CFA_MM_RECORDS_PER_BYTE 8
#define CFA_MM_MIN_RECORDS_PER_BLOCK 8
/* CFA Records block
*
* Structure used to store the CFA record block info
*/
struct cfa_mm_blk {
/* Index of the previous block in the list */
u32 prev_blk_idx;
/* Index of the next block in the list */
u32 next_blk_idx;
/* Number of free records available in the block */
u16 num_free_records;
/* Location of first free record in the block */
u16 first_free_record;
/* Number of contiguous records */
u16 num_contig_records;
/* Reserved for future use */
u16 reserved;
};
/* CFA Record block list
*
* Structure used to store CFA Record block list info
*/
struct cfa_mm_blk_list {
/* Index of the first block in the list */
u32 first_blk_idx;
/* Index of the current block having free records */
u32 current_blk_idx;
};
/* CFA memory manager Database
*
* Structure used to store CFA memory manager database info
*/
struct cfa_mm {
/* Signature of the CFA Memory Manager Database */
u32 signature;
/* Maximum number of CFA Records */
u32 max_records;
/* Number of CFA Records in use*/
u32 records_in_use;
/* Number of Records per block */
u16 records_per_block;
/* Maximum number of contiguous records */
u16 max_contig_records;
/**
* Block list table stores the info of lists of blocks
* for various numbers of contiguous records
*/
struct cfa_mm_blk_list *blk_list_tbl;
/**
* Block table stores the info about the blocks of CFA Records
*/
struct cfa_mm_blk *blk_tbl;
/**
* Block bitmap table stores bit maps for the blocks of CFA Records
*/
u8 *blk_bmap_tbl;
};
static void cfa_mm_db_info(u32 max_records, u16 max_contig_records,
u16 *records_per_block, u32 *num_blocks,
u16 *num_lists, u32 *db_size)
{
*records_per_block =
MAX(CFA_MM_MIN_RECORDS_PER_BLOCK, max_contig_records);
*num_blocks = (max_records / (*records_per_block));
*num_lists = CFA_ALIGN_LN2(max_contig_records) + 1;
*db_size = sizeof(struct cfa_mm) +
((*num_blocks) * NUM_ALIGN_UNITS(*records_per_block,
CFA_MM_RECORDS_PER_BYTE)) +
((*num_blocks) * sizeof(struct cfa_mm_blk)) +
((*num_lists) * sizeof(struct cfa_mm_blk_list));
}
int cfa_mm_query(struct cfa_mm_query_parms *parms)
{
u16 max_contig_records, num_lists, records_per_block;
u32 max_records, num_blocks;
if (!parms) {
netdev_dbg(NULL, "parms = %p\n", parms);
return -EINVAL;
}
max_records = parms->max_records;
max_contig_records = (u16)parms->max_contig_records;
if (!(CFA_CHECK_BOUNDS(max_records, 1, CFA_MM_MAX_RECORDS) &&
is_power_of_2(max_contig_records) &&
CFA_CHECK_BOUNDS(max_contig_records, 1,
CFA_MM_MAX_CONTIG_RECORDS))) {
netdev_dbg(NULL, "parms = %p, max_records = %d, max_contig_records = %d\n",
parms, parms->max_records,
parms->max_contig_records);
return -EINVAL;
}
cfa_mm_db_info(max_records, max_contig_records, &records_per_block,
&num_blocks, &num_lists, &parms->db_size);
return 0;
}
int cfa_mm_open(void *cmm, struct cfa_mm_open_parms *parms)
{
u16 max_contig_records, num_lists, records_per_block;
struct cfa_mm *context = (struct cfa_mm *)cmm;
u32 max_records, num_blocks, db_size, i;
if (!cmm || !parms) {
netdev_dbg(NULL, "cmm = %p, parms = %p\n", cmm, parms);
return -EINVAL;
}
max_records = parms->max_records;
max_contig_records = (u16)parms->max_contig_records;
if (!(CFA_CHECK_BOUNDS(max_records, 1, CFA_MM_MAX_RECORDS) &&
is_power_of_2(max_contig_records) &&
CFA_CHECK_BOUNDS(max_contig_records, 1,
CFA_MM_MAX_CONTIG_RECORDS))) {
netdev_dbg(NULL, "cmm = %p, parms = %p, db_mem_size = %d, ",
cmm, parms, parms->db_mem_size);
netdev_dbg(NULL, "max_records = %d max_contig_records = %d\n",
max_records, max_contig_records);
return -EINVAL;
}
cfa_mm_db_info(max_records, max_contig_records, &records_per_block,
&num_blocks, &num_lists, &db_size);
if (parms->db_mem_size < db_size) {
netdev_dbg(NULL, "cmm = %p, parms = %p, db_mem_size = %d, ",
cmm, parms, parms->db_mem_size);
netdev_dbg(NULL, "max_records = %d max_contig_records = %d\n",
max_records, max_contig_records);
return -EINVAL;
}
memset(context, 0, parms->db_mem_size);
context->signature = CFA_MM_SIGNATURE;
context->max_records = max_records;
context->records_in_use = 0;
context->records_per_block = records_per_block;
context->max_contig_records = max_contig_records;
context->blk_list_tbl = (struct cfa_mm_blk_list *)(context + 1);
context->blk_tbl =
(struct cfa_mm_blk *)(context->blk_list_tbl + num_lists);
context->blk_bmap_tbl = (u8 *)(context->blk_tbl + num_blocks);
context->blk_list_tbl[0].first_blk_idx = 0;
context->blk_list_tbl[0].current_blk_idx = 0;
for (i = 1; i < num_lists; i++) {
context->blk_list_tbl[i].first_blk_idx = CFA_MM_INVALID32;
context->blk_list_tbl[i].current_blk_idx = CFA_MM_INVALID32;
}
for (i = 0; i < num_blocks; i++) {
context->blk_tbl[i].prev_blk_idx = i - 1;
context->blk_tbl[i].next_blk_idx = i + 1;
context->blk_tbl[i].num_free_records = records_per_block;
context->blk_tbl[i].first_free_record = 0;
context->blk_tbl[i].num_contig_records = 0;
}
context->blk_tbl[num_blocks - 1].next_blk_idx = CFA_MM_INVALID32;
memset(context->blk_bmap_tbl, 0,
num_blocks * NUM_ALIGN_UNITS(records_per_block,
CFA_MM_RECORDS_PER_BYTE));
return 0;
}
int cfa_mm_close(void *cmm)
{
struct cfa_mm *context = (struct cfa_mm *)cmm;
u16 num_lists, records_per_block;
u32 db_size, num_blocks;
if (!cmm || context->signature != CFA_MM_SIGNATURE) {
netdev_err(NULL, "cmm = %p\n", cmm);
return -EINVAL;
}
cfa_mm_db_info(context->max_records, context->max_contig_records,
&records_per_block, &num_blocks, &num_lists, &db_size);
memset(cmm, 0, db_size);
return 0;
}
static u32 cfa_mm_blk_alloc(struct cfa_mm *context)
{
struct cfa_mm_blk_list *free_list;
u32 blk_idx;
free_list = context->blk_list_tbl;
blk_idx = free_list->first_blk_idx;
if (blk_idx == CFA_MM_INVALID32) {
netdev_err(NULL, "Out of record blocks\n");
return CFA_MM_INVALID32;
}
free_list->first_blk_idx =
context->blk_tbl[free_list->first_blk_idx].next_blk_idx;
free_list->current_blk_idx = free_list->first_blk_idx;
if (free_list->first_blk_idx != CFA_MM_INVALID32) {
context->blk_tbl[free_list->first_blk_idx].prev_blk_idx =
CFA_MM_INVALID32;
}
context->blk_tbl[blk_idx].prev_blk_idx = CFA_MM_INVALID32;
context->blk_tbl[blk_idx].next_blk_idx = CFA_MM_INVALID32;
return blk_idx;
}
static void cfa_mm_blk_free(struct cfa_mm *context, u32 blk_idx)
{
struct cfa_mm_blk_list *free_list = context->blk_list_tbl;
context->blk_tbl[blk_idx].prev_blk_idx = CFA_MM_INVALID32;
context->blk_tbl[blk_idx].next_blk_idx = free_list->first_blk_idx;
context->blk_tbl[blk_idx].num_free_records = context->records_per_block;
context->blk_tbl[blk_idx].first_free_record = 0;
context->blk_tbl[blk_idx].num_contig_records = 0;
if (free_list->first_blk_idx != CFA_MM_INVALID32) {
context->blk_tbl[free_list->first_blk_idx].prev_blk_idx =
blk_idx;
}
free_list->first_blk_idx = blk_idx;
free_list->current_blk_idx = blk_idx;
}
static void cfa_mm_blk_insert(struct cfa_mm *context,
struct cfa_mm_blk_list *blk_list,
u32 blk_idx)
{
if (blk_list->first_blk_idx == CFA_MM_INVALID32) {
blk_list->first_blk_idx = blk_idx;
blk_list->current_blk_idx = blk_idx;
} else {
struct cfa_mm_blk *blk_info = &context->blk_tbl[blk_idx];
blk_info->prev_blk_idx = CFA_MM_INVALID32;
blk_info->next_blk_idx = blk_list->first_blk_idx;
context->blk_tbl[blk_list->first_blk_idx].prev_blk_idx =
blk_idx;
blk_list->first_blk_idx = blk_idx;
blk_list->current_blk_idx = blk_idx;
}
}
static void cfa_mm_blk_delete(struct cfa_mm *context,
struct cfa_mm_blk_list *blk_list,
u32 blk_idx)
{
struct cfa_mm_blk *blk_info = &context->blk_tbl[blk_idx];
if (blk_list->first_blk_idx == CFA_MM_INVALID32)
return;
if (blk_list->first_blk_idx == blk_idx) {
blk_list->first_blk_idx = blk_info->next_blk_idx;
if (blk_list->first_blk_idx != CFA_MM_INVALID32) {
context->blk_tbl[blk_list->first_blk_idx].prev_blk_idx =
CFA_MM_INVALID32;
}
if (blk_list->current_blk_idx == blk_idx)
blk_list->current_blk_idx = blk_list->first_blk_idx;
return;
}
if (blk_info->prev_blk_idx != CFA_MM_INVALID32) {
context->blk_tbl[blk_info->prev_blk_idx].next_blk_idx =
blk_info->next_blk_idx;
}
if (blk_info->next_blk_idx != CFA_MM_INVALID32) {
context->blk_tbl[blk_info->next_blk_idx].prev_blk_idx =
blk_info->prev_blk_idx;
}
if (blk_list->current_blk_idx == blk_idx) {
if (blk_info->next_blk_idx != CFA_MM_INVALID32) {
blk_list->current_blk_idx = blk_info->next_blk_idx;
} else {
if (blk_info->prev_blk_idx != CFA_MM_INVALID32) {
blk_list->current_blk_idx =
blk_info->prev_blk_idx;
} else {
blk_list->current_blk_idx =
blk_list->first_blk_idx;
}
}
}
}
/* Returns true if the bit in the bitmap is set to 'val' else returns false */
static bool cfa_mm_test_bit(u8 *bmap, u16 index, u8 val)
{
u8 shift;
bmap += index / CFA_MM_RECORDS_PER_BYTE;
index %= CFA_MM_RECORDS_PER_BYTE;
shift = CFA_MM_RECORDS_PER_BYTE - (index + 1);
if (val) {
if ((*bmap >> shift) & 0x1)
return true;
} else {
if (!((*bmap >> shift) & 0x1))
return true;
}
return false;
}
static int cfa_mm_test_and_set_bits(u8 *bmap, u16 start,
u16 count, u8 val)
{
u8 mask[NUM_ALIGN_UNITS(CFA_MM_MAX_CONTIG_RECORDS,
CFA_MM_RECORDS_PER_BYTE) + 1];
u16 i, j, nbits;
bmap += start / CFA_MM_RECORDS_PER_BYTE;
start %= CFA_MM_RECORDS_PER_BYTE;
if ((start + count - 1) < CFA_MM_RECORDS_PER_BYTE) {
nbits = CFA_MM_RECORDS_PER_BYTE - (start + count);
mask[0] = (u8)(((u16)1 << count) - 1);
mask[0] <<= nbits;
if (val) {
if (*bmap & mask[0])
return -EINVAL;
*bmap |= mask[0];
} else {
if ((*bmap & mask[0]) != mask[0])
return -EINVAL;
*bmap &= ~(mask[0]);
}
return 0;
}
i = 0;
nbits = CFA_MM_RECORDS_PER_BYTE - start;
mask[i++] = (u8)(((u16)1 << nbits) - 1);
count -= nbits;
while (count > CFA_MM_RECORDS_PER_BYTE) {
count -= CFA_MM_RECORDS_PER_BYTE;
mask[i++] = 0xff;
}
mask[i] = (u8)(((u16)1 << count) - 1);
mask[i++] <<= (CFA_MM_RECORDS_PER_BYTE - count);
for (j = 0; j < i; j++) {
if (val) {
if (bmap[j] & mask[j])
return -EINVAL;
} else {
if ((bmap[j] & mask[j]) != mask[j])
return -EINVAL;
}
}
for (j = 0; j < i; j++) {
if (val)
bmap[j] |= mask[j];
else
bmap[j] &= ~(mask[j]);
}
return 0;
}
int cfa_mm_alloc(void *cmm, struct cfa_mm_alloc_parms *parms)
{
struct cfa_mm *context = (struct cfa_mm *)cmm;
struct cfa_mm_blk_list *blk_list;
u32 i, cnt, blk_idx, record_idx;
struct cfa_mm_blk *blk_info;
u16 list_idx, num_records;
u8 *blk_bmap;
int ret = 0;
if (!cmm || !parms ||
context->signature != CFA_MM_SIGNATURE) {
netdev_dbg(NULL, "cmm = %p parms = %p\n", cmm, parms);
return -EINVAL;
}
if (!(CFA_CHECK_BOUNDS(parms->num_contig_records, 1,
context->max_contig_records) &&
is_power_of_2(parms->num_contig_records))) {
netdev_dbg(NULL, "cmm = %p parms = %p num_records = %d\n", cmm,
parms, parms->num_contig_records);
return -EINVAL;
}
list_idx = CFA_ALIGN_LN2(parms->num_contig_records);
blk_list = context->blk_list_tbl + list_idx;
num_records = 1 << (list_idx - 1);
if (context->records_in_use + num_records > context->max_records) {
netdev_err(NULL, "Requested number (%d) of records not available\n",
num_records);
ret = -ENOMEM;
goto cfa_mm_alloc_exit;
}
if (blk_list->first_blk_idx == CFA_MM_INVALID32) {
blk_idx = cfa_mm_blk_alloc(context);
if (blk_idx == CFA_MM_INVALID32) {
ret = -ENOMEM;
goto cfa_mm_alloc_exit;
}
cfa_mm_blk_insert(context, blk_list, blk_idx);
blk_info = &context->blk_tbl[blk_idx];
blk_info->num_contig_records = num_records;
} else {
blk_idx = blk_list->current_blk_idx;
blk_info = &context->blk_tbl[blk_idx];
}
while (blk_info->num_free_records < num_records) {
if (blk_info->next_blk_idx == CFA_MM_INVALID32) {
blk_idx = cfa_mm_blk_alloc(context);
if (blk_idx == CFA_MM_INVALID32) {
ret = -ENOMEM;
goto cfa_mm_alloc_exit;
}
cfa_mm_blk_insert(context, blk_list, blk_idx);
blk_info = &context->blk_tbl[blk_idx];
blk_info->num_contig_records = num_records;
} else {
blk_idx = blk_info->next_blk_idx;
blk_info = &context->blk_tbl[blk_idx];
blk_list->current_blk_idx = blk_idx;
}
}
blk_bmap = context->blk_bmap_tbl + blk_idx *
context->records_per_block /
CFA_MM_RECORDS_PER_BYTE;
record_idx = blk_info->first_free_record;
if (cfa_mm_test_and_set_bits(blk_bmap, record_idx, num_records, 1)) {
netdev_dbg(NULL,
"Records are already allocated. record_idx = %d, num_records = %d\n",
record_idx, num_records);
return -EINVAL;
}
parms->record_offset =
(blk_idx * context->records_per_block) + record_idx;
parms->num_contig_records = num_records;
blk_info->num_free_records -= num_records;
if (!blk_info->num_free_records) {
blk_info->first_free_record = context->records_per_block;
} else {
cnt = NUM_ALIGN_UNITS(context->records_per_block,
CFA_MM_RECORDS_PER_BYTE);
for (i = (record_idx + num_records) / CFA_MM_RECORDS_PER_BYTE;
i < cnt; i++) {
if (blk_bmap[i] != 0xff) {
u8 bmap = blk_bmap[i];
blk_info->first_free_record =
i * CFA_MM_RECORDS_PER_BYTE;
while (bmap & 0x80) {
bmap <<= 1;
blk_info->first_free_record++;
}
break;
}
}
}
context->records_in_use += num_records;
ret = 0;
cfa_mm_alloc_exit:
parms->used_count = context->records_in_use;
parms->all_used = (context->records_in_use >= context->max_records);
return ret;
}
int cfa_mm_free(void *cmm, struct cfa_mm_free_parms *parms)
{
struct cfa_mm *context = (struct cfa_mm *)cmm;
struct cfa_mm_blk_list *blk_list;
struct cfa_mm_blk *blk_info;
u16 list_idx, num_records;
u32 blk_idx, record_idx;
uint8_t *blk_bmap;
if (!cmm || !parms ||
context->signature != CFA_MM_SIGNATURE) {
netdev_err(NULL, "cmm = %p parms = %p\n", cmm, parms);
return -EINVAL;
}
if (!(parms->record_offset < context->max_records &&
CFA_CHECK_BOUNDS(parms->num_contig_records, 1,
context->max_contig_records) &&
is_power_of_2(parms->num_contig_records))) {
netdev_dbg(NULL,
"cmm = %p, parms = %p, record_offset = %d, num_contig_records = %d\n",
cmm, parms, parms->record_offset, parms->num_contig_records);
return -EINVAL;
}
record_idx = parms->record_offset % context->records_per_block;
blk_idx = parms->record_offset / context->records_per_block;
list_idx = CFA_ALIGN_LN2(parms->num_contig_records);
blk_list = &context->blk_list_tbl[list_idx];
if (blk_list->first_blk_idx == CFA_MM_INVALID32) {
netdev_err(NULL, "Records were not allocated\n");
return -EINVAL;
}
num_records = 1 << (list_idx - 1);
blk_info = &context->blk_tbl[blk_idx];
if (blk_info->num_contig_records != num_records) {
netdev_dbg(NULL,
"num_contig_records (%d) and num_records (%d) mismatch\n",
num_records, blk_info->num_contig_records);
return -EINVAL;
}
blk_bmap = context->blk_bmap_tbl + blk_idx *
context->records_per_block /
CFA_MM_RECORDS_PER_BYTE;
if (cfa_mm_test_and_set_bits(blk_bmap, record_idx, num_records, 0)) {
netdev_dbg(NULL, "Records are not allocated. record_idx = %d, num_records = %d\n",
record_idx, num_records);
return -EINVAL;
}
blk_info->num_free_records += num_records;
if (blk_info->num_free_records >= context->records_per_block) {
cfa_mm_blk_delete(context, blk_list, blk_idx);
cfa_mm_blk_free(context, blk_idx);
} else {
if (blk_info->num_free_records == num_records) {
cfa_mm_blk_delete(context, blk_list, blk_idx);
cfa_mm_blk_insert(context, blk_list, blk_idx);
blk_info->first_free_record = record_idx;
} else {
if (record_idx < blk_info->first_free_record)
blk_info->first_free_record = record_idx;
}
}
context->records_in_use -= num_records;
parms->used_count = context->records_in_use;
return 0;
}
int cfa_mm_entry_size_get(void *cmm, u32 entry_id, u8 *size)
{
struct cfa_mm *context = (struct cfa_mm *)cmm;
struct cfa_mm_blk *blk_info;
u32 blk_idx, record_idx;
u8 *blk_bmap;
if (!cmm || !size || context->signature != CFA_MM_SIGNATURE)
return -EINVAL;
if (!(entry_id < context->max_records)) {
netdev_dbg(NULL, "cmm = %p, entry_id = %d\n", cmm, entry_id);
return -EINVAL;
}
blk_idx = entry_id / context->records_per_block;
blk_info = &context->blk_tbl[blk_idx];
record_idx = entry_id % context->records_per_block;
/*
* Block is unused if num contig records is 0 and
* there are no allocated entries in the block
*/
if (blk_info->num_contig_records == 0)
return -ENOENT;
/*
* Check the entry is indeed allocated. Suffices to check if
* the first bit in the bitmap is set.
*/
blk_bmap = context->blk_bmap_tbl + blk_idx * context->records_per_block /
CFA_MM_RECORDS_PER_BYTE;
if (cfa_mm_test_bit(blk_bmap, record_idx, 1)) {
*size = blk_info->num_contig_records;
return 0;
} else {
return -ENOENT;
}
}

View File

@ -0,0 +1,156 @@
/* SPDX-License-Identifier: BSD-3-Clause
* Copyright(c) 2019-2023 Broadcom
* All rights reserved.
*/
#ifndef _CFA_MM_H_
#define _CFA_MM_H_
/**
* CFA_MM CFA Memory Manager
* A CFA memory manager (Document Control:DCSG00988445) is a object instance
* within the CFA service module that is responsible for managing CFA related
* memories such as Thor2 CFA backings stores, Thor CFA action SRAM, etc. It
* is designed to operate in firmware or as part of the host Truflow stack.
* Each manager instance consists of a number of bank databases with each
* database managing a pool of CFA memory.
*/
/** CFA Memory Manager database query params structure
*
* Structure of database params
* @max_records: [in] Maximum number of CFA records
* @max_contig_records: [in] Max contiguous CFA records per Alloc (Must be a power of 2).
* @db_size: [out] Memory required for Database
*/
struct cfa_mm_query_parms {
u32 max_records;
u32 max_contig_records;
u32 db_size;
};
/** CFA Memory Manager open parameters
*
* Structure to store CFA MM open parameters
* @db_mem_size: [in] Size of memory allocated for CFA MM database
* @max_records: [in] Max number of CFA records
* @max_contig_records: [in] Maximum number of contiguous CFA records
*/
struct cfa_mm_open_parms {
u32 db_mem_size;
u32 max_records;
u16 max_contig_records;
};
/** CFA Memory Manager record alloc parameters
*
* Structure to contain parameters for record alloc
* @num_contig_records - [in] Number of contiguous CFA records
* @record_offset: [out] Offset of the first of the records allocated
* @used_count: [out] Total number of records already allocated
* @all_used: [out] Flag to indicate if all the records are allocated
*/
struct cfa_mm_alloc_parms {
u32 num_contig_records;
u32 record_offset;
u32 used_count;
u32 all_used;
};
/** CFA Memory Manager record free parameters
*
* Structure to contain parameters for record free
* @record_offset: [in] Offset of the first of the records allocated
* @num_contig_records: [in] Number of contiguous CFA records
* @used_count: [out] Total number of records already allocated
*/
struct cfa_mm_free_parms {
u32 record_offset;
u32 num_contig_records;
u32 used_count;
};
/** CFA Memory Manager query API
*
* This API returns the size of memory required for internal data structures to
* manage the pool of CFA Records with given parameters.
*
* @parms: [in,out] CFA Memory manager query data base parameters.
*
* Returns
* - (0) if successful.
* - (-ERRNO) on failure
*/
int cfa_mm_query(struct cfa_mm_query_parms *parms);
/** CFA Memory Manager open API
*
* This API initializes the CFA Memory Manager database
*
* @cmm: [in] Pointer to the memory used for the CFA Mmeory Manager Database
*
* @parms: [in] CFA Memory manager data base parameters.
*
* Returns
* - (0) if successful.
* - (-ERRNO) on failure
*/
int cfa_mm_open(void *cmm, struct cfa_mm_open_parms *parms);
/** CFA Memory Manager close API
*
* This API frees the CFA Memory NManager database
*
* @cmm: [in] Pointer to the database memory for the record pool
*
* Returns
* - (0) if successful.
* - (-ERRNO) on failure
*/
int cfa_mm_close(void *cmm);
/** CFA Memory Manager Allocate CFA Records API
*
* This API allocates the request number of contiguous CFA Records
*
* @cmm: [in] Pointer to the database from which to allocate CFA Records
*
* @parms: [in,out] CFA MM alloc records parameters
*
* Returns
* - (0) if successful.
* - (-ERRNO) on failure
*/
int cfa_mm_alloc(void *cmm, struct cfa_mm_alloc_parms *parms);
/** CFA MemoryManager Free CFA Records API
*
* This API frees the requested number of contiguous CFA Records
*
* @cmm: [in] Pointer to the database from which to free CFA Records
*
* @parms: [in,out] CFA MM free records parameters
*
* Returns
* - (0) if successful.
* - (-ERRNO) on failure
*/
int cfa_mm_free(void *cmm, struct cfa_mm_free_parms *parms);
/** CFA Memory Manager Get Entry Size API
*
* This API retrieves the size of an allocated CMM entry.
*
* @cmm: [in] Pointer to the database from which to allocate CFA Records
*
* @entry_id: [in] Index of the allocated entry.
*
* @size: [out] Number of contiguous records in the entry.
*
* Returns
* - (0) if successful.
* - (-ERRNO) on failure
*/
int cfa_mm_entry_size_get(void *cmm, u32 entry_id, u8 *size);
#endif /* _CFA_MM_H_ */

View File

@ -0,0 +1,13 @@
/* SPDX-License-Identifier: BSD-3-Clause
* Copyright(c) 2019-2023 Broadcom
* All rights reserved.
*/
#ifndef _SYS_UTIL_H_
#define _SYS_UTIL_H_
#define Y_NUM_ALIGN_UNITS(x, unit) (((x) + (unit) - (1)) / (unit))
#define Y_IS_POWER_2(x) (((x) != 0) && (((x) & ((x) - (1))) == 0))
#endif /* _SYS_UTIL_H_ */

View File

@ -0,0 +1,33 @@
// SPDX-License-Identifier: BSD-3-Clause
/* Copyright(c) 2019-2023 Broadcom
* All rights reserved.
*/
#include <linux/errno.h>
#include <linux/string.h>
#include <linux/types.h>
#include "bnxt_compat.h"
#include "sys_util.h"
#include "cfa_types.h"
#include "cfa_bld_p70_mpc.h"
#include "cfa_bld_p70_mpc_defs.h"
#include "cfa_bld_p70_mpcops.h"
int cfa_bld_mpc_bind(enum cfa_ver hw_ver, struct cfa_bld_mpcinfo *mpcinfo)
{
if (!mpcinfo)
return -EINVAL;
switch (hw_ver) {
case CFA_P40:
case CFA_P45:
case CFA_P58:
case CFA_P59:
return -ENOTSUPP;
case CFA_P70:
return cfa_bld_p70_mpc_bind(hw_ver, mpcinfo);
default:
return -EINVAL;
}
}

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,883 @@
// SPDX-License-Identifier: BSD-3-Clause
/* Copyright(c) 2019-2023 Broadcom
* All rights reserved.
*/
#include <linux/errno.h>
#include <linux/string.h>
#include <linux/types.h>
#include "bnxt_compat.h"
#include "sys_util.h"
#include "cfa_types.h"
#include "cfa_bld_p70_mpc.h"
#include "cfa_bld_p70_mpc_defs.h"
#include "cfa_p70_mpc_cmds.h"
#include "cfa_p70_mpc_cmpls.h"
/* CFA MPC client ids */
#define MP_CLIENT_TE_CFA READ_CMP_MP_CLIENT_TE_CFA
#define MP_CLIENT_RE_CFA READ_CMP_MP_CLIENT_RE_CFA
/* MPC Client id check in CFA completion messages */
#define ASSERT_CFA_MPC_CLIENT_ID(MPCID) \
do { \
if ((MPCID) != MP_CLIENT_TE_CFA && \
(MPCID) != MP_CLIENT_RE_CFA) { \
netdev_warn(NULL, \
"Unexpected MPC client id in response: %d\n", \
(MPCID)); \
} \
} while (0)
/** Add MPC header information to MPC command message */
static int fill_mpc_header(u8 *cmd, u32 size, u32 opaque_val)
{
struct mpc_header hdr = {
.opaque = opaque_val,
};
if (size < sizeof(struct mpc_header)) {
netdev_dbg(NULL, "%s: invalid parameter: size:%d too small\n", __func__, size);
ASSERT_RTNL();
return -EINVAL;
}
memcpy(cmd, &hdr, sizeof(hdr));
return 0;
}
/** Compose Table read-clear message */
static int compose_mpc_read_clr_msg(u8 *cmd_buff, u32 *cmd_buff_len,
struct cfa_mpc_cache_axs_params *parms)
{
u32 cmd_size = sizeof(struct mpc_header) + TFC_MPC_CMD_TBL_RDCLR_SIZE;
struct cfa_mpc_cache_read_params *rd_parms = &parms->read;
u8 *cmd;
if (parms->data_size != 1) {
netdev_dbg(NULL, "%s: invalid parameter: data_size:%d\n",
__func__, parms->data_size);
ASSERT_RTNL();
return -EINVAL;
}
if (parms->tbl_type >= CFA_HW_TABLE_MAX) {
netdev_dbg(NULL, "%s: invalid parameter: tbl_typed: %d out of range\n",
__func__, parms->tbl_type);
ASSERT_RTNL();
return -EINVAL;
}
if (*cmd_buff_len < cmd_size) {
netdev_dbg(NULL, "%s: invalid parameter: cmd_buff_len too small\n", __func__);
ASSERT_RTNL();
return -EINVAL;
}
cmd = cmd_buff + sizeof(struct mpc_header);
/* Populate CFA MPC command header */
memset(cmd, 0, TFC_MPC_CMD_TBL_RDCLR_SIZE);
TFC_MPC_CMD_TBL_RDCLR_SET_OPCODE(cmd, TFC_MPC_CMD_OPCODE_READ_CLR);
TFC_MPC_CMD_TBL_RDCLR_SET_TABLE_TYPE(cmd, parms->tbl_type);
TFC_MPC_CMD_TBL_RDCLR_SET_TABLE_SCOPE(cmd, parms->tbl_scope);
TFC_MPC_CMD_TBL_RDCLR_SET_DATA_SIZE(cmd, parms->data_size);
TFC_MPC_CMD_TBL_RDCLR_SET_TABLE_INDEX(cmd, parms->tbl_index);
TFC_MPC_CMD_TBL_RDCLR_SET_HOST_ADDRESS_0(cmd, (u32)rd_parms->host_address);
TFC_MPC_CMD_TBL_RDCLR_SET_HOST_ADDRESS_1(cmd, (u32)(rd_parms->host_address >> 32));
switch (rd_parms->mode) {
case CFA_MPC_RD_EVICT:
TFC_MPC_CMD_TBL_RDCLR_SET_CACHE_OPTION(cmd, CACHE_READ_CLR_OPTION_EVICT);
break;
case CFA_MPC_RD_NORMAL:
default:
TFC_MPC_CMD_TBL_RDCLR_SET_CACHE_OPTION(cmd, CACHE_READ_CLR_OPTION_NORMAL);
break;
}
TFC_MPC_CMD_TBL_RDCLR_SET_CLEAR_MASK(cmd, rd_parms->clear_mask);
*cmd_buff_len = cmd_size;
return 0;
}
/** Compose Table read message */
static int compose_mpc_read_msg(u8 *cmd_buff, u32 *cmd_buff_len,
struct cfa_mpc_cache_axs_params *parms)
{
u32 cmd_size = sizeof(struct mpc_header) + TFC_MPC_CMD_TBL_RD_SIZE;
struct cfa_mpc_cache_read_params *rd_parms = &parms->read;
u8 *cmd;
if (parms->data_size < 1 || parms->data_size > 4) {
netdev_dbg(NULL, "%s: invalid parameter: data_size:%d out of range\n",
__func__, parms->data_size);
ASSERT_RTNL();
return -EINVAL;
}
if (parms->tbl_type >= CFA_HW_TABLE_MAX) {
netdev_dbg(NULL, "%s: invalid parameter: tbl_typed: %d out of range\n",
__func__, parms->tbl_type);
ASSERT_RTNL();
return -EINVAL;
}
if (*cmd_buff_len < cmd_size) {
netdev_dbg(NULL, "%s: invalid parameter: cmd_buff_len too small\n", __func__);
ASSERT_RTNL();
return -EINVAL;
}
cmd = (cmd_buff + sizeof(struct mpc_header));
/* Populate CFA MPC command header */
memset(cmd, 0, TFC_MPC_CMD_TBL_RD_SIZE);
TFC_MPC_CMD_TBL_RD_SET_OPCODE(cmd, TFC_MPC_CMD_OPCODE_READ);
TFC_MPC_CMD_TBL_RD_SET_TABLE_TYPE(cmd, parms->tbl_type);
TFC_MPC_CMD_TBL_RD_SET_TABLE_SCOPE(cmd, parms->tbl_scope);
TFC_MPC_CMD_TBL_RD_SET_DATA_SIZE(cmd, parms->data_size);
TFC_MPC_CMD_TBL_RD_SET_TABLE_INDEX(cmd, parms->tbl_index);
TFC_MPC_CMD_TBL_RD_SET_HOST_ADDRESS_0(cmd, (u32)rd_parms->host_address);
TFC_MPC_CMD_TBL_RD_SET_HOST_ADDRESS_1(cmd, (u32)(rd_parms->host_address >> 32));
switch (rd_parms->mode) {
case CFA_MPC_RD_EVICT:
TFC_MPC_CMD_TBL_RD_SET_CACHE_OPTION(cmd, CACHE_READ_OPTION_EVICT);
break;
case CFA_MPC_RD_DEBUG_LINE:
TFC_MPC_CMD_TBL_RD_SET_CACHE_OPTION(cmd, CACHE_READ_OPTION_DEBUG_LINE);
break;
case CFA_MPC_RD_DEBUG_TAG:
TFC_MPC_CMD_TBL_RD_SET_CACHE_OPTION(cmd, CACHE_READ_OPTION_DEBUG_TAG);
break;
case CFA_MPC_RD_NORMAL:
default:
TFC_MPC_CMD_TBL_RD_SET_CACHE_OPTION(cmd, CACHE_READ_OPTION_NORMAL);
break;
}
*cmd_buff_len = cmd_size;
return 0;
}
/** Compose Table write message */
static int compose_mpc_write_msg(u8 *cmd_buff, u32 *cmd_buff_len,
struct cfa_mpc_cache_axs_params *parms)
{
u32 cmd_size = sizeof(struct mpc_header) + TFC_MPC_CMD_TBL_WR_SIZE +
parms->data_size * MPC_CFA_CACHE_ACCESS_UNIT_SIZE;
struct cfa_mpc_cache_write_params *wr_parms = &parms->write;
u8 *cmd;
if (parms->data_size < 1 || parms->data_size > 4) {
ASSERT_RTNL();
return -EINVAL;
}
if (parms->tbl_type >= CFA_HW_TABLE_MAX) {
netdev_dbg(NULL, "%s: invalid parameter: tbl_typed: %d out of range\n",
__func__, parms->tbl_type);
ASSERT_RTNL();
return -EINVAL;
}
if (!parms->write.data_ptr) {
ASSERT_RTNL();
return -EINVAL;
}
if (*cmd_buff_len < cmd_size) {
netdev_dbg(NULL, "%s: invalid parameter: cmd_buff_len too small\n", __func__);
ASSERT_RTNL();
return -EINVAL;
}
cmd = (cmd_buff + sizeof(struct mpc_header));
/* Populate CFA MPC command header */
memset(cmd, 0, TFC_MPC_CMD_TBL_WR_SIZE);
TFC_MPC_CMD_TBL_WR_SET_OPCODE(cmd, TFC_MPC_CMD_OPCODE_WRITE);
TFC_MPC_CMD_TBL_WR_SET_TABLE_TYPE(cmd, parms->tbl_type);
TFC_MPC_CMD_TBL_WR_SET_TABLE_SCOPE(cmd, parms->tbl_scope);
TFC_MPC_CMD_TBL_WR_SET_DATA_SIZE(cmd, parms->data_size);
TFC_MPC_CMD_TBL_WR_SET_TABLE_INDEX(cmd, parms->tbl_index);
switch (wr_parms->mode) {
case CFA_MPC_WR_WRITE_THRU:
TFC_MPC_CMD_TBL_WR_SET_CACHE_OPTION(cmd, CACHE_WRITE_OPTION_WRITE_THRU);
break;
case CFA_MPC_WR_WRITE_BACK:
default:
TFC_MPC_CMD_TBL_WR_SET_CACHE_OPTION(cmd, CACHE_WRITE_OPTION_WRITE_BACK);
break;
}
/* Populate CFA MPC command payload following the header */
memcpy(cmd + TFC_MPC_CMD_TBL_WR_SIZE, wr_parms->data_ptr,
parms->data_size * MPC_CFA_CACHE_ACCESS_UNIT_SIZE);
*cmd_buff_len = cmd_size;
return 0;
}
/** Compose Invalidate message */
static int compose_mpc_evict_msg(u8 *cmd_buff, u32 *cmd_buff_len,
struct cfa_mpc_cache_axs_params *parms)
{
u32 cmd_size = sizeof(struct mpc_header) + TFC_MPC_CMD_TBL_INV_SIZE;
struct cfa_mpc_cache_evict_params *ev_parms = &parms->evict;
u8 *cmd;
if (parms->data_size < 1 || parms->data_size > 4) {
ASSERT_RTNL();
return -EINVAL;
}
if (parms->tbl_type >= CFA_HW_TABLE_MAX) {
netdev_dbg(NULL, "%s: invalid parameter: tbl_typed: %d out of range\n",
__func__, parms->tbl_type);
ASSERT_RTNL();
return -EINVAL;
}
if (*cmd_buff_len < cmd_size) {
netdev_dbg(NULL, "%s: invalid parameter: cmd_buff_len too small\n", __func__);
ASSERT_RTNL();
return -EINVAL;
}
cmd = cmd_buff + sizeof(struct mpc_header);
/* Populate CFA MPC command header */
memset(cmd, 0, TFC_MPC_CMD_TBL_INV_SIZE);
TFC_MPC_CMD_TBL_INV_SET_OPCODE(cmd, TFC_MPC_CMD_OPCODE_INVALIDATE);
TFC_MPC_CMD_TBL_INV_SET_TABLE_TYPE(cmd, parms->tbl_type);
TFC_MPC_CMD_TBL_INV_SET_TABLE_SCOPE(cmd, parms->tbl_scope);
TFC_MPC_CMD_TBL_INV_SET_DATA_SIZE(cmd, parms->data_size);
TFC_MPC_CMD_TBL_INV_SET_TABLE_INDEX(cmd, parms->tbl_index);
switch (ev_parms->mode) {
case CFA_MPC_EV_EVICT_LINE:
TFC_MPC_CMD_TBL_INV_SET_CACHE_OPTION(cmd, CACHE_EVICT_OPTION_LINE);
break;
case CFA_MPC_EV_EVICT_CLEAN_LINES:
TFC_MPC_CMD_TBL_INV_SET_CACHE_OPTION(cmd, CACHE_EVICT_OPTION_CLEAN_LINES);
break;
case CFA_MPC_EV_EVICT_CLEAN_FAST_EVICT_LINES:
TFC_MPC_CMD_TBL_INV_SET_CACHE_OPTION(cmd, CACHE_EVICT_OPTION_CLEAN_FAST_LINES);
break;
case CFA_MPC_EV_EVICT_CLEAN_AND_CLEAN_FAST_EVICT_LINES:
TFC_MPC_CMD_TBL_INV_SET_CACHE_OPTION(cmd,
CACHE_EVICT_OPTION_CLEAN_AND_FAST_LINES);
break;
case CFA_MPC_EV_EVICT_TABLE_SCOPE:
/* Not supported */
ASSERT_RTNL();
return -EOPNOTSUPP;
case CFA_MPC_EV_EVICT_SCOPE_ADDRESS:
default:
TFC_MPC_CMD_TBL_INV_SET_CACHE_OPTION(cmd, CACHE_EVICT_OPTION_SCOPE_ADDRESS);
break;
}
*cmd_buff_len = cmd_size;
return 0;
}
/**
* Build MPC CFA Cache access command
*
* @param [in] opc MPC opcode
*
* @param [out] cmd_buff Command data buffer to write the command to
*
* @param [in/out] cmd_buff_len Pointer to command buffer size param
* Set by caller to indicate the input cmd_buff size.
* Set to the actual size of the command generated by the api.
*
* @param [in] parms Pointer to MPC cache access command parameters
*
* @return 0 on Success, negative errno on failure
*/
int cfa_mpc_build_cache_axs_cmd(enum cfa_mpc_opcode opc, u8 *cmd_buff,
u32 *cmd_buff_len,
struct cfa_mpc_cache_axs_params *parms)
{
int rc;
if (!cmd_buff || !cmd_buff_len || *cmd_buff_len == 0 || !parms) {
netdev_dbg(NULL, "%s: invalid parameter: cmd_buff_len too small\n", __func__);
ASSERT_RTNL();
return -EINVAL;
}
rc = fill_mpc_header(cmd_buff, *cmd_buff_len, parms->opaque);
if (rc)
return rc;
switch (opc) {
case CFA_MPC_READ_CLR:
return compose_mpc_read_clr_msg(cmd_buff, cmd_buff_len, parms);
case CFA_MPC_READ:
return compose_mpc_read_msg(cmd_buff, cmd_buff_len, parms);
case CFA_MPC_WRITE:
return compose_mpc_write_msg(cmd_buff, cmd_buff_len, parms);
case CFA_MPC_INVALIDATE:
return compose_mpc_evict_msg(cmd_buff, cmd_buff_len, parms);
default:
ASSERT_RTNL();
return -EOPNOTSUPP;
}
}
/** Compose EM Search message */
static int compose_mpc_em_search_msg(u8 *cmd_buff, u32 *cmd_buff_len,
struct cfa_mpc_em_op_params *parms)
{
struct cfa_mpc_em_search_params *e = &parms->search;
u8 *cmd;
u32 cmd_size = 0;
cmd_size = sizeof(struct mpc_header) + TFC_MPC_CMD_EM_SEARCH_SIZE +
e->data_size * MPC_CFA_CACHE_ACCESS_UNIT_SIZE;
if (e->data_size < 1 || e->data_size > 4) {
ASSERT_RTNL();
return -EINVAL;
}
if (*cmd_buff_len < cmd_size) {
netdev_dbg(NULL, "%s: invalid parameter: cmd_buff_len too small\n", __func__);
ASSERT_RTNL();
return -EINVAL;
}
if (!e->em_entry) {
ASSERT_RTNL();
return -EINVAL;
}
cmd = cmd_buff + sizeof(struct mpc_header);
/* Populate CFA MPC command header */
memset(cmd, 0, TFC_MPC_CMD_EM_SEARCH_SIZE);
TFC_MPC_CMD_EM_SEARCH_SET_OPCODE(cmd, TFC_MPC_CMD_OPCODE_EM_SEARCH);
TFC_MPC_CMD_EM_SEARCH_SET_TABLE_SCOPE(cmd, parms->tbl_scope);
TFC_MPC_CMD_EM_SEARCH_SET_DATA_SIZE(cmd, e->data_size);
/* Default to normal read cache option for EM search */
TFC_MPC_CMD_EM_SEARCH_SET_CACHE_OPTION(cmd, CACHE_READ_OPTION_NORMAL);
/* Populate CFA MPC command payload following the header */
memcpy(cmd + TFC_MPC_CMD_EM_SEARCH_SIZE, e->em_entry,
e->data_size * MPC_CFA_CACHE_ACCESS_UNIT_SIZE);
*cmd_buff_len = cmd_size;
return 0;
}
/** Compose EM Insert message */
static int compose_mpc_em_insert_msg(u8 *cmd_buff, u32 *cmd_buff_len,
struct cfa_mpc_em_op_params *parms)
{
struct cfa_mpc_em_insert_params *e = &parms->insert;
u8 *cmd;
u32 cmd_size = 0;
cmd_size = sizeof(struct mpc_header) + TFC_MPC_CMD_EM_INSERT_SIZE +
e->data_size * MPC_CFA_CACHE_ACCESS_UNIT_SIZE;
if (e->data_size < 1 || e->data_size > 4) {
ASSERT_RTNL();
return -EINVAL;
}
if (*cmd_buff_len < cmd_size) {
netdev_dbg(NULL, "%s: invalid parameter: cmd_buff_len too small\n", __func__);
ASSERT_RTNL();
return -EINVAL;
}
if (!e->em_entry) {
ASSERT_RTNL();
return -EINVAL;
}
cmd = (cmd_buff + sizeof(struct mpc_header));
/* Populate CFA MPC command header */
memset(cmd, 0, TFC_MPC_CMD_EM_INSERT_SIZE);
TFC_MPC_CMD_EM_INSERT_SET_OPCODE(cmd, TFC_MPC_CMD_OPCODE_EM_INSERT);
TFC_MPC_CMD_EM_INSERT_SET_WRITE_THROUGH(cmd, 1);
TFC_MPC_CMD_EM_INSERT_SET_TABLE_SCOPE(cmd, parms->tbl_scope);
TFC_MPC_CMD_EM_INSERT_SET_DATA_SIZE(cmd, e->data_size);
TFC_MPC_CMD_EM_INSERT_SET_REPLACE(cmd, e->replace);
TFC_MPC_CMD_EM_INSERT_SET_TABLE_INDEX(cmd, e->entry_idx);
TFC_MPC_CMD_EM_INSERT_SET_TABLE_INDEX2(cmd, e->bucket_idx);
/* Default to normal read cache option for EM insert */
TFC_MPC_CMD_EM_INSERT_SET_CACHE_OPTION(cmd, CACHE_READ_OPTION_NORMAL);
/* Default to write through cache write option for EM insert */
TFC_MPC_CMD_EM_INSERT_SET_CACHE_OPTION2(cmd, CACHE_WRITE_OPTION_WRITE_THRU);
/* Populate CFA MPC command payload following the header */
memcpy(cmd + TFC_MPC_CMD_EM_INSERT_SIZE, e->em_entry,
e->data_size * MPC_CFA_CACHE_ACCESS_UNIT_SIZE);
*cmd_buff_len = cmd_size;
return 0;
}
/** Compose EM Delete message */
static int compose_mpc_em_delete_msg(u8 *cmd_buff, u32 *cmd_buff_len,
struct cfa_mpc_em_op_params *parms)
{
u32 cmd_size = sizeof(struct mpc_header) + TFC_MPC_CMD_EM_DELETE_SIZE;
struct cfa_mpc_em_delete_params *e = &parms->del;
u8 *cmd;
if (*cmd_buff_len < cmd_size) {
netdev_dbg(NULL, "%s: invalid parameter: cmd_buff_len too small\n", __func__);
ASSERT_RTNL();
return -EINVAL;
}
/* Populate CFA MPC command header */
cmd = cmd_buff + sizeof(struct mpc_header);
memset(cmd, 0, TFC_MPC_CMD_EM_DELETE_SIZE);
TFC_MPC_CMD_EM_DELETE_SET_OPCODE(cmd, TFC_MPC_CMD_OPCODE_EM_DELETE);
TFC_MPC_CMD_EM_DELETE_SET_TABLE_SCOPE(cmd, parms->tbl_scope);
TFC_MPC_CMD_EM_DELETE_SET_TABLE_INDEX(cmd, e->entry_idx);
TFC_MPC_CMD_EM_DELETE_SET_TABLE_INDEX2(cmd, e->bucket_idx);
/* Default to normal read cache option for EM delete */
TFC_MPC_CMD_EM_DELETE_SET_CACHE_OPTION(cmd, CACHE_READ_OPTION_NORMAL);
/* Default to write through cache write option for EM delete */
TFC_MPC_CMD_EM_DELETE_SET_CACHE_OPTION2(cmd, CACHE_WRITE_OPTION_WRITE_THRU);
*cmd_buff_len = cmd_size;
return 0;
}
/** Compose EM Chain message */
static int compose_mpc_em_chain_msg(u8 *cmd_buff, u32 *cmd_buff_len,
struct cfa_mpc_em_op_params *parms)
{
u32 cmd_size = sizeof(struct mpc_header) + TFC_MPC_CMD_EM_MATCH_CHAIN_SIZE;
struct cfa_mpc_em_chain_params *e = &parms->chain;
u8 *cmd;
if (*cmd_buff_len < cmd_size) {
netdev_dbg(NULL, "%s: invalid parameter: cmd_buff_len too small\n", __func__);
ASSERT_RTNL();
return -EINVAL;
}
/* Populate CFA MPC command header */
cmd = cmd_buff + TFC_MPC_CMD_EM_MATCH_CHAIN_SIZE;
memset(cmd, 0, TFC_MPC_CMD_EM_MATCH_CHAIN_SIZE);
TFC_MPC_CMD_EM_MATCH_CHAIN_SET_OPCODE(cmd, TFC_MPC_CMD_OPCODE_EM_CHAIN);
TFC_MPC_CMD_EM_MATCH_CHAIN_SET_TABLE_SCOPE(cmd, parms->tbl_scope);
TFC_MPC_CMD_EM_MATCH_CHAIN_SET_TABLE_INDEX(cmd, e->entry_idx);
TFC_MPC_CMD_EM_MATCH_CHAIN_SET_TABLE_INDEX2(cmd, e->bucket_idx);
/* Default to normal read cache option for EM delete */
TFC_MPC_CMD_EM_MATCH_CHAIN_SET_CACHE_OPTION(cmd, CACHE_READ_OPTION_NORMAL);
/* Default to write through cache write option for EM delete */
TFC_MPC_CMD_EM_MATCH_CHAIN_SET_CACHE_OPTION2(cmd, CACHE_WRITE_OPTION_WRITE_THRU);
*cmd_buff_len = cmd_size;
return 0;
}
/**
* Build MPC CFA EM operation command
*
* @param [in] opc MPC EM opcode
*
* @param [in] cmd_buff Command data buffer to write the command to
*
* @param [in/out] cmd_buff_len Pointer to command buffer size param
* Set by caller to indicate the input cmd_buff size.
* Set to the actual size of the command generated by the api.
*
* @param [in] parms Pointer to MPC cache access command parameters
*
* @return 0 on Success, negative errno on failure
*/
int cfa_mpc_build_em_op_cmd(enum cfa_mpc_opcode opc, u8 *cmd_buff, u32 *cmd_buff_len,
struct cfa_mpc_em_op_params *parms)
{
int rc;
if (!cmd_buff || !cmd_buff_len || *cmd_buff_len == 0 || !parms) {
netdev_dbg(NULL, "%s: invalid parameter: cmd_buff_len too small\n", __func__);
ASSERT_RTNL();
return -EINVAL;
}
rc = fill_mpc_header(cmd_buff, *cmd_buff_len, parms->opaque);
if (rc)
return rc;
switch (opc) {
case CFA_MPC_EM_SEARCH:
return compose_mpc_em_search_msg(cmd_buff, cmd_buff_len, parms);
case CFA_MPC_EM_INSERT:
return compose_mpc_em_insert_msg(cmd_buff, cmd_buff_len, parms);
case CFA_MPC_EM_DELETE:
return compose_mpc_em_delete_msg(cmd_buff, cmd_buff_len, parms);
case CFA_MPC_EM_CHAIN:
return compose_mpc_em_chain_msg(cmd_buff, cmd_buff_len, parms);
default:
ASSERT_RTNL();
return -EOPNOTSUPP;
}
return 0;
}
/** Parse MPC read clear completion */
static int parse_mpc_read_clr_result(u8 *resp_buff, u32 resp_buff_len,
struct cfa_mpc_cache_axs_result *result)
{
u8 *cmp;
u32 resp_size, rd_size;
u8 *rd_data;
/* Minimum data size = 1 32B unit */
rd_size = MPC_CFA_CACHE_ACCESS_UNIT_SIZE;
resp_size = sizeof(struct mpc_header) +
TFC_MPC_TBL_RDCLR_CMPL_SIZE +
sizeof(struct mpc_cr_short_dma_data) + rd_size;
cmp = resp_buff + sizeof(struct mpc_header);
if (resp_buff_len < resp_size ||
result->data_len < rd_size ||
!result->rd_data) {
ASSERT_RTNL();
return -EINVAL;
}
ASSERT_CFA_MPC_CLIENT_ID((int)TFC_MPC_TBL_RDCLR_CMPL_GET_MP_CLIENT(cmp));
result->status = TFC_MPC_TBL_RDCLR_CMPL_GET_STATUS(cmp);
result->error_data = TFC_MPC_TBL_RDCLR_CMPL_GET_HASH_MSB(cmp);
result->opaque = TFC_MPC_TBL_RDCLR_CMPL_GET_OPAQUE(cmp);
/* No data to copy if there was an error, return early */
if (result->status != TFC_MPC_TBL_RDCLR_CMPL_STATUS_OK)
return 0;
/* Copy the read data - starting at the end of the completion header including dma data */
rd_data = resp_buff + sizeof(struct mpc_header) +
TFC_MPC_TBL_RDCLR_CMPL_SIZE +
sizeof(struct mpc_cr_short_dma_data);
memcpy(result->rd_data, rd_data, rd_size);
return 0;
}
/** Parse MPC table read completion */
static int parse_mpc_read_result(u8 *resp_buff, u32 resp_buff_len,
struct cfa_mpc_cache_axs_result *result)
{
u8 *cmp;
u32 resp_size, rd_size;
u8 *rd_data;
/* Minimum data size = 1 32B unit */
rd_size = MPC_CFA_CACHE_ACCESS_UNIT_SIZE;
resp_size = sizeof(struct mpc_header) +
TFC_MPC_TBL_RD_CMPL_SIZE +
sizeof(struct mpc_cr_short_dma_data) + rd_size;
cmp = (resp_buff + sizeof(struct mpc_header));
if (resp_buff_len < resp_size ||
result->data_len < rd_size ||
!result->rd_data) {
ASSERT_RTNL();
return -EINVAL;
}
ASSERT_CFA_MPC_CLIENT_ID((int)TFC_MPC_TBL_RD_CMPL_GET_MP_CLIENT(cmp));
result->status = TFC_MPC_TBL_RD_CMPL_GET_STATUS(cmp);
result->error_data = TFC_MPC_TBL_RD_CMPL_GET_HASH_MSB(cmp);
result->opaque = TFC_MPC_TBL_RD_CMPL_GET_OPAQUE(cmp);
/* No data to copy if there was an error, return early */
if (result->status != TFC_MPC_TBL_RD_CMPL_STATUS_OK)
return 0;
/* Copy max of 4 32B words that can fit into the return buffer */
rd_size = MIN(4 * MPC_CFA_CACHE_ACCESS_UNIT_SIZE, result->data_len);
/* Copy the read data - starting at the end of the completion header */
rd_data = resp_buff + sizeof(struct mpc_header) +
TFC_MPC_TBL_RD_CMPL_SIZE +
sizeof(struct mpc_cr_short_dma_data);
memcpy(result->rd_data, rd_data, rd_size);
return 0;
}
/** Parse MPC table write completion */
static int parse_mpc_write_result(u8 *resp_buff, u32 resp_buff_len,
struct cfa_mpc_cache_axs_result *result)
{
u32 resp_size;
u8 *cmp;
resp_size = sizeof(struct mpc_header) + TFC_MPC_TBL_WR_CMPL_SIZE;
cmp = (resp_buff + sizeof(struct mpc_header));
if (resp_buff_len < resp_size) {
ASSERT_RTNL();
return -EINVAL;
}
ASSERT_CFA_MPC_CLIENT_ID((int)TFC_MPC_TBL_WR_CMPL_GET_MP_CLIENT(cmp));
result->status = TFC_MPC_TBL_WR_CMPL_GET_STATUS(cmp);
result->error_data = TFC_MPC_TBL_WR_CMPL_GET_HASH_MSB(cmp);
result->opaque = TFC_MPC_TBL_WR_CMPL_GET_OPAQUE(cmp);
return 0;
}
/** Parse MPC table evict completion */
static int parse_mpc_evict_result(u8 *resp_buff, u32 resp_buff_len,
struct cfa_mpc_cache_axs_result *result)
{
u8 *cmp;
u32 resp_size;
resp_size = sizeof(struct mpc_header) +
TFC_MPC_TBL_INV_CMPL_SIZE;
cmp = resp_buff + sizeof(struct mpc_header);
if (resp_buff_len < resp_size) {
ASSERT_RTNL();
return -EINVAL;
}
ASSERT_CFA_MPC_CLIENT_ID((int)TFC_MPC_TBL_INV_CMPL_GET_MP_CLIENT(cmp));
result->status = TFC_MPC_TBL_INV_CMPL_GET_STATUS(cmp);
result->error_data = TFC_MPC_TBL_INV_CMPL_GET_HASH_MSB(cmp);
result->opaque = TFC_MPC_TBL_INV_CMPL_GET_OPAQUE(cmp);
return 0;
}
/**
* Parse MPC CFA Cache access command completion result
*
* @param [in] opc MPC cache access opcode
*
* @param [in] resp_buff Data buffer containing the response to parse
*
* @param [in] resp_buff_len Response buffer size
*
* @param [out] result Pointer to MPC cache access result object. This
* object will contain the fields parsed and extracted from the
* response buffer.
*
* @return 0 on Success, negative errno on failure
*/
int cfa_mpc_parse_cache_axs_resp(enum cfa_mpc_opcode opc, u8 *resp_buff,
u32 resp_buff_len,
struct cfa_mpc_cache_axs_result *result)
{
if (!resp_buff || resp_buff_len == 0 || !result) {
ASSERT_RTNL();
return -EINVAL;
}
switch (opc) {
case CFA_MPC_READ_CLR:
return parse_mpc_read_clr_result(resp_buff, resp_buff_len,
result);
case CFA_MPC_READ:
return parse_mpc_read_result(resp_buff, resp_buff_len, result);
case CFA_MPC_WRITE:
return parse_mpc_write_result(resp_buff, resp_buff_len, result);
case CFA_MPC_INVALIDATE:
return parse_mpc_evict_result(resp_buff, resp_buff_len, result);
default:
ASSERT_RTNL();
return -EOPNOTSUPP;
}
}
/** Parse MPC EM Search completion */
static int parse_mpc_em_search_result(u8 *resp_buff,
u32 resp_buff_len,
struct cfa_mpc_em_op_result *result)
{
u8 *cmp;
u32 resp_size;
cmp = resp_buff + sizeof(struct mpc_header);
resp_size = sizeof(struct mpc_header) +
TFC_MPC_TBL_EM_SEARCH_CMPL_SIZE;
if (resp_buff_len < resp_size) {
ASSERT_RTNL();
return -EINVAL;
}
ASSERT_CFA_MPC_CLIENT_ID((int)TFC_MPC_TBL_EM_SEARCH_CMPL_GET_MP_CLIENT(cmp));
result->status = TFC_MPC_TBL_EM_SEARCH_CMPL_GET_STATUS(cmp);
result->error_data = result->status != CFA_MPC_OK ?
TFC_MPC_TBL_EM_SEARCH_CMPL_GET_HASH_MSB(cmp) : 0;
result->opaque = TFC_MPC_TBL_EM_SEARCH_CMPL_GET_OPAQUE(cmp);
result->search.bucket_num = TFC_MPC_TBL_EM_SEARCH_CMPL_GET_BKT_NUM(cmp);
result->search.num_entries = TFC_MPC_TBL_EM_SEARCH_CMPL_GET_NUM_ENTRIES(cmp);
result->search.hash_msb = TFC_MPC_TBL_EM_SEARCH_CMPL_GET_HASH_MSB(cmp);
result->search.match_idx = TFC_MPC_TBL_EM_SEARCH_CMPL_GET_TABLE_INDEX(cmp);
result->search.bucket_idx = TFC_MPC_TBL_EM_SEARCH_CMPL_GET_TABLE_INDEX2(cmp);
return 0;
}
/** Parse MPC EM Insert completion */
static int parse_mpc_em_insert_result(u8 *resp_buff,
u32 resp_buff_len,
struct cfa_mpc_em_op_result *result)
{
u8 *cmp;
u32 resp_size;
cmp = resp_buff + sizeof(struct mpc_header);
resp_size = sizeof(struct mpc_header) + TFC_MPC_TBL_EM_INSERT_CMPL_SIZE;
if (resp_buff_len < resp_size) {
ASSERT_RTNL();
return -EINVAL;
}
ASSERT_CFA_MPC_CLIENT_ID((int)TFC_MPC_TBL_EM_INSERT_CMPL_GET_MP_CLIENT(cmp));
result->status = TFC_MPC_TBL_EM_INSERT_CMPL_GET_STATUS(cmp);
result->error_data = (result->status != TFC_MPC_TBL_EM_INSERT_CMPL_STATUS_OK) ?
(u32)TFC_MPC_TBL_EM_INSERT_CMPL_GET_HASH_MSB(cmp) : 0UL;
result->opaque = TFC_MPC_TBL_EM_INSERT_CMPL_GET_OPAQUE(cmp);
result->insert.bucket_num = TFC_MPC_TBL_EM_INSERT_CMPL_GET_BKT_NUM(cmp);
result->insert.num_entries = TFC_MPC_TBL_EM_INSERT_CMPL_GET_NUM_ENTRIES(cmp);
result->insert.hash_msb = TFC_MPC_TBL_EM_DELETE_CMPL_GET_HASH_MSB(cmp);
result->insert.match_idx = TFC_MPC_TBL_EM_INSERT_CMPL_GET_TABLE_INDEX4(cmp);
result->insert.bucket_idx = TFC_MPC_TBL_EM_INSERT_CMPL_GET_TABLE_INDEX3(cmp);
result->insert.replaced = TFC_MPC_TBL_EM_INSERT_CMPL_GET_REPLACED_ENTRY(cmp);
result->insert.chain_update = TFC_MPC_TBL_EM_INSERT_CMPL_GET_CHAIN_UPD(cmp);
return 0;
}
/** Parse MPC EM Delete completion */
static int parse_mpc_em_delete_result(u8 *resp_buff,
u32 resp_buff_len,
struct cfa_mpc_em_op_result *result)
{
u8 *cmp;
u32 resp_size;
cmp = resp_buff + sizeof(struct mpc_header);
resp_size = sizeof(struct mpc_header) +
TFC_MPC_TBL_EM_DELETE_CMPL_SIZE;
if (resp_buff_len < resp_size) {
ASSERT_RTNL();
return -EINVAL;
}
ASSERT_CFA_MPC_CLIENT_ID((int)TFC_MPC_TBL_EM_DELETE_CMPL_GET_MP_CLIENT(cmp));
result->status = TFC_MPC_TBL_EM_DELETE_CMPL_GET_STATUS(cmp);
result->error_data = TFC_MPC_TBL_EM_DELETE_CMPL_GET_HASH_MSB(cmp);
result->opaque = TFC_MPC_TBL_EM_DELETE_CMPL_GET_OPAQUE(cmp);
result->del.bucket_num = TFC_MPC_TBL_EM_DELETE_CMPL_GET_BKT_NUM(cmp);
result->del.num_entries = TFC_MPC_TBL_EM_DELETE_CMPL_GET_NUM_ENTRIES(cmp);
result->del.prev_tail = TFC_MPC_TBL_EM_DELETE_CMPL_GET_TABLE_INDEX3(cmp);
result->del.new_tail = TFC_MPC_TBL_EM_DELETE_CMPL_GET_TABLE_INDEX4(cmp);
result->del.chain_update = TFC_MPC_TBL_EM_DELETE_CMPL_GET_CHAIN_UPD(cmp);
return 0;
}
/** Parse MPC EM Chain completion */
static int parse_mpc_em_chain_result(u8 *resp_buff, u32 resp_buff_len,
struct cfa_mpc_em_op_result *result)
{
u8 *cmp;
u32 resp_size;
cmp = resp_buff + sizeof(struct mpc_header);
resp_size =
sizeof(struct mpc_header) + TFC_MPC_TBL_EM_CHAIN_CMPL_SIZE;
if (resp_buff_len < resp_size) {
ASSERT_RTNL();
return -EINVAL;
}
ASSERT_CFA_MPC_CLIENT_ID((int)TFC_MPC_TBL_EM_CHAIN_CMPL_GET_MP_CLIENT(cmp));
result->status = TFC_MPC_TBL_EM_CHAIN_CMPL_GET_STATUS(cmp);
result->error_data = TFC_MPC_TBL_EM_CHAIN_CMPL_GET_HASH_MSB(cmp);
result->opaque = TFC_MPC_TBL_EM_CHAIN_CMPL_GET_OPAQUE(cmp);
result->chain.bucket_num = TFC_MPC_TBL_EM_CHAIN_CMPL_GET_BKT_NUM(cmp);
result->chain.num_entries = TFC_MPC_TBL_EM_CHAIN_CMPL_GET_NUM_ENTRIES(cmp);
return 0;
}
/**
* Parse MPC CFA EM operation command completion result
*
* @param [in] opc MPC cache access opcode
*
* @param [in] resp_buff Data buffer containing the response to parse
*
* @param [in] resp_buff_len Response buffer size
*
* @param [out] result Pointer to MPC EM operation result object. This
* object will contain the fields parsed and extracted from the
* response buffer.
*
* @return 0 on Success, negative errno on failure
*/
int cfa_mpc_parse_em_op_resp(enum cfa_mpc_opcode opc, u8 *resp_buff,
u32 resp_buff_len,
struct cfa_mpc_em_op_result *result)
{
if (!resp_buff || resp_buff_len == 0 || !result) {
ASSERT_RTNL();
return -EINVAL;
}
switch (opc) {
case CFA_MPC_EM_SEARCH:
return parse_mpc_em_search_result(resp_buff, resp_buff_len,
result);
case CFA_MPC_EM_INSERT:
return parse_mpc_em_insert_result(resp_buff, resp_buff_len,
result);
case CFA_MPC_EM_DELETE:
return parse_mpc_em_delete_result(resp_buff, resp_buff_len,
result);
case CFA_MPC_EM_CHAIN:
return parse_mpc_em_chain_result(resp_buff, resp_buff_len,
result);
default:
ASSERT_RTNL();
return -EOPNOTSUPP;
}
}

Some files were not shown because too many files have changed in this diff Show More