Merge branches 'tracing/ftrace', 'tracing/kprobes', 'tracing/tasks' and 'linus' into tracing/core

This commit is contained in:
Ingo Molnar 2009-03-20 10:14:53 +01:00
commit 22de89b371
91 changed files with 1402 additions and 13239 deletions

View File

@ -567,6 +567,9 @@ KBUILD_CFLAGS += $(call cc-option,-Wdeclaration-after-statement,)
# disable pointer signed / unsigned warnings in gcc 4.0
KBUILD_CFLAGS += $(call cc-option,-Wno-pointer-sign,)
# disable invalid "can't wrap" optimzations for signed / pointers
KBUILD_CFLAGS += $(call cc-option,-fwrapv)
# Add user supplied CPPFLAGS, AFLAGS and CFLAGS as the last assignments
# But warn user when we do so
warn-assign = \

View File

@ -1,7 +1,7 @@
#
# Automatically generated make config: don't edit
# Linux kernel version: 2.6.27-rc3
# Wed Aug 20 08:16:53 2008
# Linux kernel version: 2.6.29-rc8
# Fri Mar 13 09:28:45 2009
#
CONFIG_PPC64=y
@ -16,13 +16,14 @@ CONFIG_PPC_FPU=y
CONFIG_ALTIVEC=y
# CONFIG_VSX is not set
CONFIG_PPC_STD_MMU=y
CONFIG_PPC_STD_MMU_64=y
CONFIG_PPC_MM_SLICES=y
CONFIG_VIRT_CPU_ACCOUNTING=y
CONFIG_SMP=y
CONFIG_NR_CPUS=2
CONFIG_64BIT=y
CONFIG_WORD_SIZE=64
CONFIG_PPC_MERGE=y
CONFIG_ARCH_PHYS_ADDR_T_64BIT=y
CONFIG_MMU=y
CONFIG_GENERIC_CMOS_UPDATE=y
CONFIG_GENERIC_TIME=y
@ -46,7 +47,7 @@ CONFIG_PPC=y
CONFIG_EARLY_PRINTK=y
CONFIG_COMPAT=y
CONFIG_SYSVIPC_COMPAT=y
CONFIG_SCHED_NO_NO_OMIT_FRAME_POINTER=y
CONFIG_SCHED_OMIT_FRAME_POINTER=y
CONFIG_ARCH_MAY_HAVE_PC_FDC=y
CONFIG_PPC_OF=y
CONFIG_OF=y
@ -74,10 +75,19 @@ CONFIG_POSIX_MQUEUE=y
# CONFIG_BSD_PROCESS_ACCT is not set
# CONFIG_TASKSTATS is not set
# CONFIG_AUDIT is not set
#
# RCU Subsystem
#
CONFIG_CLASSIC_RCU=y
# CONFIG_TREE_RCU is not set
# CONFIG_PREEMPT_RCU is not set
# CONFIG_TREE_RCU_TRACE is not set
# CONFIG_PREEMPT_RCU_TRACE is not set
# CONFIG_IKCONFIG is not set
CONFIG_LOG_BUF_SHIFT=17
# CONFIG_CGROUPS is not set
# CONFIG_GROUP_SCHED is not set
# CONFIG_CGROUPS is not set
CONFIG_SYSFS_DEPRECATED=y
CONFIG_SYSFS_DEPRECATED_V2=y
# CONFIG_RELAY is not set
@ -86,11 +96,13 @@ CONFIG_NAMESPACES=y
# CONFIG_IPC_NS is not set
# CONFIG_USER_NS is not set
# CONFIG_PID_NS is not set
# CONFIG_NET_NS is not set
CONFIG_BLK_DEV_INITRD=y
CONFIG_INITRAMFS_SOURCE=""
CONFIG_CC_OPTIMIZE_FOR_SIZE=y
CONFIG_SYSCTL=y
# CONFIG_EMBEDDED is not set
CONFIG_ANON_INODES=y
CONFIG_EMBEDDED=y
CONFIG_SYSCTL_SYSCALL=y
CONFIG_KALLSYMS=y
CONFIG_KALLSYMS_ALL=y
@ -99,37 +111,36 @@ CONFIG_HOTPLUG=y
CONFIG_PRINTK=y
CONFIG_BUG=y
CONFIG_ELF_CORE=y
# CONFIG_COMPAT_BRK is not set
CONFIG_BASE_FULL=y
CONFIG_FUTEX=y
CONFIG_ANON_INODES=y
CONFIG_EPOLL=y
CONFIG_SIGNALFD=y
CONFIG_TIMERFD=y
CONFIG_EVENTFD=y
CONFIG_SHMEM=y
CONFIG_AIO=y
CONFIG_VM_EVENT_COUNTERS=y
# CONFIG_COMPAT_BRK is not set
CONFIG_SLAB=y
# CONFIG_SLUB is not set
# CONFIG_SLOB is not set
CONFIG_PROFILING=y
# CONFIG_MARKERS is not set
CONFIG_TRACEPOINTS=y
CONFIG_MARKERS=y
CONFIG_OPROFILE=m
CONFIG_HAVE_OPROFILE=y
# CONFIG_KPROBES is not set
CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS=y
CONFIG_HAVE_SYSCALL_WRAPPERS=y
CONFIG_HAVE_IOREMAP_PROT=y
CONFIG_HAVE_KPROBES=y
CONFIG_HAVE_KRETPROBES=y
CONFIG_HAVE_ARCH_TRACEHOOK=y
CONFIG_HAVE_DMA_ATTRS=y
CONFIG_USE_GENERIC_SMP_HELPERS=y
# CONFIG_HAVE_CLK is not set
CONFIG_PROC_PAGE_MONITOR=y
# CONFIG_HAVE_GENERIC_DMA_COHERENT is not set
CONFIG_SLABINFO=y
CONFIG_RT_MUTEXES=y
# CONFIG_TINY_SHMEM is not set
CONFIG_BASE_SMALL=0
CONFIG_MODULES=y
# CONFIG_MODULE_FORCE_LOAD is not set
@ -137,7 +148,6 @@ CONFIG_MODULE_UNLOAD=y
# CONFIG_MODULE_FORCE_UNLOAD is not set
# CONFIG_MODVERSIONS is not set
# CONFIG_MODULE_SRCVERSION_ALL is not set
CONFIG_KMOD=y
CONFIG_STOP_MACHINE=y
CONFIG_BLOCK=y
# CONFIG_BLK_DEV_IO_TRACE is not set
@ -157,7 +167,7 @@ CONFIG_DEFAULT_AS=y
# CONFIG_DEFAULT_CFQ is not set
# CONFIG_DEFAULT_NOOP is not set
CONFIG_DEFAULT_IOSCHED="anticipatory"
CONFIG_CLASSIC_RCU=y
# CONFIG_FREEZER is not set
#
# Platform support
@ -183,18 +193,20 @@ CONFIG_PS3_STORAGE=y
CONFIG_PS3_DISK=y
CONFIG_PS3_ROM=y
CONFIG_PS3_FLASH=y
CONFIG_OPROFILE_PS3=y
CONFIG_PS3_VRAM=m
CONFIG_PS3_LPM=m
CONFIG_PPC_CELL=y
# CONFIG_PPC_CELL_NATIVE is not set
# CONFIG_PPC_IBM_CELL_BLADE is not set
# CONFIG_PPC_CELLEB is not set
# CONFIG_PPC_CELL_QPACE is not set
#
# Cell Broadband Engine options
#
CONFIG_SPU_FS=y
CONFIG_SPU_FS_64K_LS=y
# CONFIG_SPU_TRACE is not set
CONFIG_SPU_BASE=y
# CONFIG_PQ2ADS is not set
# CONFIG_IPIC is not set
@ -210,6 +222,7 @@ CONFIG_SPU_BASE=y
# CONFIG_GENERIC_IOMAP is not set
# CONFIG_CPU_FREQ is not set
# CONFIG_FSL_ULI1575 is not set
# CONFIG_SIMPLE_GPIO is not set
#
# Kernel options
@ -229,6 +242,8 @@ CONFIG_PREEMPT_NONE=y
# CONFIG_PREEMPT is not set
CONFIG_BINFMT_ELF=y
CONFIG_COMPAT_BINFMT_ELF=y
# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
# CONFIG_HAVE_AOUT is not set
CONFIG_BINFMT_MISC=y
CONFIG_HUGETLB_PAGE_SIZE_VARIABLE=y
# CONFIG_IOMMU_VMERGE is not set
@ -251,7 +266,6 @@ CONFIG_SELECT_MEMORY_MODEL=y
CONFIG_SPARSEMEM_MANUAL=y
CONFIG_SPARSEMEM=y
CONFIG_HAVE_MEMORY_PRESENT=y
# CONFIG_SPARSEMEM_STATIC is not set
CONFIG_SPARSEMEM_EXTREME=y
CONFIG_SPARSEMEM_VMEMMAP_ENABLE=y
# CONFIG_SPARSEMEM_VMEMMAP is not set
@ -261,11 +275,14 @@ CONFIG_MEMORY_HOTPLUG_SPARSE=y
CONFIG_PAGEFLAGS_EXTENDED=y
CONFIG_SPLIT_PTLOCK_CPUS=4
CONFIG_MIGRATION=y
CONFIG_RESOURCES_64BIT=y
CONFIG_PHYS_ADDR_T_64BIT=y
CONFIG_ZONE_DMA_FLAG=1
CONFIG_BOUNCE=y
CONFIG_UNEVICTABLE_LRU=y
CONFIG_ARCH_MEMORY_PROBE=y
CONFIG_PPC_HAS_HASH_64K=y
CONFIG_PPC_4K_PAGES=y
# CONFIG_PPC_16K_PAGES is not set
# CONFIG_PPC_64K_PAGES is not set
CONFIG_FORCE_MAX_ZONEORDER=13
CONFIG_SCHED_SMT=y
@ -299,6 +316,7 @@ CONFIG_NET=y
#
# Networking options
#
CONFIG_COMPAT_NET_DEV_OPS=y
CONFIG_PACKET=y
CONFIG_PACKET_MMAP=y
CONFIG_UNIX=y
@ -361,6 +379,7 @@ CONFIG_IPV6_NDISC_NODETYPE=y
# CONFIG_TIPC is not set
# CONFIG_ATM is not set
# CONFIG_BRIDGE is not set
# CONFIG_NET_DSA is not set
# CONFIG_VLAN_8021Q is not set
# CONFIG_DECNET is not set
# CONFIG_LLC2 is not set
@ -371,6 +390,7 @@ CONFIG_IPV6_NDISC_NODETYPE=y
# CONFIG_ECONET is not set
# CONFIG_WAN_ROUTER is not set
# CONFIG_NET_SCHED is not set
# CONFIG_DCB is not set
#
# Network testing
@ -392,39 +412,37 @@ CONFIG_BT_HIDP=m
#
# Bluetooth device drivers
#
CONFIG_BT_HCIUSB=m
CONFIG_BT_HCIUSB_SCO=y
CONFIG_BT_HCIBTUSB=m
# CONFIG_BT_HCIUART is not set
# CONFIG_BT_HCIBCM203X is not set
# CONFIG_BT_HCIBPA10X is not set
# CONFIG_BT_HCIBFUSB is not set
# CONFIG_BT_HCIVHCI is not set
# CONFIG_AF_RXRPC is not set
#
# Wireless
#
# CONFIG_PHONET is not set
CONFIG_WIRELESS=y
CONFIG_CFG80211=m
# CONFIG_CFG80211_REG_DEBUG is not set
CONFIG_NL80211=y
# CONFIG_WIRELESS_OLD_REGULATORY is not set
CONFIG_WIRELESS_EXT=y
# CONFIG_WIRELESS_EXT_SYSFS is not set
# CONFIG_LIB80211 is not set
CONFIG_MAC80211=m
#
# Rate control algorithm selection
#
CONFIG_MAC80211_RC_PID=y
# CONFIG_MAC80211_RC_MINSTREL is not set
CONFIG_MAC80211_RC_DEFAULT_PID=y
# CONFIG_MAC80211_RC_DEFAULT_MINSTREL is not set
CONFIG_MAC80211_RC_DEFAULT="pid"
# CONFIG_MAC80211_MESH is not set
# CONFIG_MAC80211_LEDS is not set
# CONFIG_MAC80211_DEBUGFS is not set
# CONFIG_MAC80211_DEBUG_MENU is not set
CONFIG_IEEE80211=m
# CONFIG_IEEE80211_DEBUG is not set
CONFIG_IEEE80211_CRYPT_WEP=m
CONFIG_IEEE80211_CRYPT_CCMP=m
CONFIG_IEEE80211_CRYPT_TKIP=m
# CONFIG_WIMAX is not set
# CONFIG_RFKILL is not set
# CONFIG_NET_9P is not set
@ -450,6 +468,7 @@ CONFIG_MTD_DEBUG=y
CONFIG_MTD_DEBUG_VERBOSE=0
# CONFIG_MTD_CONCAT is not set
# CONFIG_MTD_PARTITIONS is not set
# CONFIG_MTD_TESTS is not set
#
# User Modules And Translation Layers
@ -494,7 +513,6 @@ CONFIG_MTD_CFI_I2=y
#
# CONFIG_MTD_SLRAM is not set
# CONFIG_MTD_PHRAM is not set
CONFIG_MTD_PS3VRAM=y
# CONFIG_MTD_MTDRAM is not set
# CONFIG_MTD_BLOCK2MTD is not set
@ -507,6 +525,11 @@ CONFIG_MTD_PS3VRAM=y
# CONFIG_MTD_NAND is not set
# CONFIG_MTD_ONENAND is not set
#
# LPDDR flash memory drivers
#
# CONFIG_MTD_LPDDR is not set
#
# UBI - Unsorted block images
#
@ -528,8 +551,13 @@ CONFIG_BLK_DEV_RAM_SIZE=65535
# CONFIG_ATA_OVER_ETH is not set
# CONFIG_BLK_DEV_HD is not set
CONFIG_MISC_DEVICES=y
# CONFIG_EEPROM_93CX6 is not set
# CONFIG_ENCLOSURE_SERVICES is not set
# CONFIG_C2PORT is not set
#
# EEPROM support
#
# CONFIG_EEPROM_93CX6 is not set
CONFIG_HAVE_IDE=y
# CONFIG_IDE is not set
@ -575,7 +603,17 @@ CONFIG_SCSI_WAIT_SCAN=m
# CONFIG_SCSI_LOWLEVEL is not set
# CONFIG_SCSI_DH is not set
# CONFIG_ATA is not set
# CONFIG_MD is not set
CONFIG_MD=y
# CONFIG_BLK_DEV_MD is not set
CONFIG_BLK_DEV_DM=m
# CONFIG_DM_DEBUG is not set
# CONFIG_DM_CRYPT is not set
# CONFIG_DM_SNAPSHOT is not set
# CONFIG_DM_MIRROR is not set
# CONFIG_DM_ZERO is not set
# CONFIG_DM_MULTIPATH is not set
# CONFIG_DM_DELAY is not set
# CONFIG_DM_UEVENT is not set
# CONFIG_MACINTOSH_DRIVERS is not set
CONFIG_NETDEVICES=y
# CONFIG_DUMMY is not set
@ -591,6 +629,9 @@ CONFIG_MII=m
# CONFIG_IBM_NEW_EMAC_RGMII is not set
# CONFIG_IBM_NEW_EMAC_TAH is not set
# CONFIG_IBM_NEW_EMAC_EMAC4 is not set
# CONFIG_IBM_NEW_EMAC_NO_FLOW_CTRL is not set
# CONFIG_IBM_NEW_EMAC_MAL_CLR_ICINTSTAT is not set
# CONFIG_IBM_NEW_EMAC_MAL_COMMON_ERR is not set
# CONFIG_B44 is not set
CONFIG_NETDEV_1000=y
CONFIG_GELIC_NET=y
@ -604,6 +645,7 @@ CONFIG_GELIC_WIRELESS_OLD_PSK_INTERFACE=y
# CONFIG_WLAN_PRE80211 is not set
CONFIG_WLAN_80211=y
# CONFIG_LIBERTAS is not set
# CONFIG_LIBERTAS_THINFIRM is not set
# CONFIG_USB_ZD1201 is not set
# CONFIG_USB_NET_RNDIS_WLAN is not set
# CONFIG_RTL8187 is not set
@ -615,13 +657,11 @@ CONFIG_WLAN_80211=y
# CONFIG_B43LEGACY is not set
CONFIG_ZD1211RW=m
# CONFIG_ZD1211RW_DEBUG is not set
CONFIG_RT2X00=m
CONFIG_RT2X00_LIB=m
CONFIG_RT2X00_LIB_USB=m
CONFIG_RT2X00_LIB_FIRMWARE=y
# CONFIG_RT2500USB is not set
CONFIG_RT73USB=m
# CONFIG_RT2X00_DEBUG is not set
# CONFIG_RT2X00 is not set
#
# Enable WiMAX (Networking options) to see the WiMAX drivers
#
#
# USB Network Adapters
@ -634,6 +674,7 @@ CONFIG_USB_USBNET=m
CONFIG_USB_NET_AX8817X=m
# CONFIG_USB_NET_CDCETHER is not set
# CONFIG_USB_NET_DM9601 is not set
# CONFIG_USB_NET_SMSC95XX is not set
# CONFIG_USB_NET_GL620A is not set
# CONFIG_USB_NET_NET1080 is not set
# CONFIG_USB_NET_PLUSB is not set
@ -664,7 +705,7 @@ CONFIG_SLHC=m
# Input device support
#
CONFIG_INPUT=y
# CONFIG_INPUT_FF_MEMLESS is not set
CONFIG_INPUT_FF_MEMLESS=m
# CONFIG_INPUT_POLLDEV is not set
#
@ -735,8 +776,10 @@ CONFIG_DEVKMEM=y
# Non-8250 serial port support
#
CONFIG_UNIX98_PTYS=y
# CONFIG_DEVPTS_MULTIPLE_INSTANCES is not set
CONFIG_LEGACY_PTYS=y
CONFIG_LEGACY_PTY_COUNT=16
# CONFIG_HVC_UDBG is not set
# CONFIG_IPMI_HANDLER is not set
# CONFIG_HW_RANDOM is not set
# CONFIG_R3964 is not set
@ -753,11 +796,11 @@ CONFIG_ARCH_WANT_OPTIONAL_GPIOLIB=y
# CONFIG_THERMAL is not set
# CONFIG_THERMAL_HWMON is not set
# CONFIG_WATCHDOG is not set
CONFIG_SSB_POSSIBLE=y
#
# Sonics Silicon Backplane
#
CONFIG_SSB_POSSIBLE=y
# CONFIG_SSB is not set
#
@ -767,6 +810,7 @@ CONFIG_SSB_POSSIBLE=y
# CONFIG_MFD_SM501 is not set
# CONFIG_HTC_PASIC3 is not set
# CONFIG_MFD_TMIO is not set
# CONFIG_REGULATOR is not set
#
# Multimedia devices
@ -792,6 +836,7 @@ CONFIG_VIDEO_OUTPUT_CONTROL=m
CONFIG_FB=y
# CONFIG_FIRMWARE_EDID is not set
# CONFIG_FB_DDC is not set
# CONFIG_FB_BOOT_VESA_SUPPORT is not set
# CONFIG_FB_CFB_FILLRECT is not set
# CONFIG_FB_CFB_COPYAREA is not set
# CONFIG_FB_CFB_IMAGEBLIT is not set
@ -817,6 +862,8 @@ CONFIG_FB_SYS_FOPS=y
CONFIG_FB_PS3=y
CONFIG_FB_PS3_DEFAULT_SIZE_M=9
# CONFIG_FB_VIRTUAL is not set
# CONFIG_FB_METRONOME is not set
# CONFIG_FB_MB862XX is not set
# CONFIG_BACKLIGHT_LCD_SUPPORT is not set
#
@ -841,6 +888,7 @@ CONFIG_FB_LOGO_EXTRA=y
# CONFIG_LOGO_LINUX_VGA16 is not set
CONFIG_LOGO_LINUX_CLUT224=y
CONFIG_SOUND=m
# CONFIG_SOUND_OSS_CORE is not set
CONFIG_SND=m
CONFIG_SND_TIMER=m
CONFIG_SND_PCM=m
@ -849,6 +897,7 @@ CONFIG_SND_RAWMIDI=m
# CONFIG_SND_SEQUENCER is not set
# CONFIG_SND_MIXER_OSS is not set
# CONFIG_SND_PCM_OSS is not set
# CONFIG_SND_HRTIMER is not set
# CONFIG_SND_DYNAMIC_MINORS is not set
CONFIG_SND_SUPPORT_OLD_API=y
CONFIG_SND_VERBOSE_PROCFS=y
@ -873,15 +922,40 @@ CONFIG_HIDRAW=y
# USB Input Devices
#
CONFIG_USB_HID=m
# CONFIG_USB_HIDINPUT_POWERBOOK is not set
# CONFIG_HID_FF is not set
# CONFIG_USB_HIDDEV is not set
# CONFIG_HID_PID is not set
CONFIG_USB_HIDDEV=y
#
# USB HID Boot Protocol drivers
#
# CONFIG_USB_KBD is not set
# CONFIG_USB_MOUSE is not set
#
# Special HID drivers
#
# CONFIG_HID_COMPAT is not set
# CONFIG_HID_A4TECH is not set
# CONFIG_HID_APPLE is not set
# CONFIG_HID_BELKIN is not set
# CONFIG_HID_CHERRY is not set
# CONFIG_HID_CHICONY is not set
# CONFIG_HID_CYPRESS is not set
# CONFIG_HID_EZKEY is not set
# CONFIG_HID_GYRATION is not set
# CONFIG_HID_LOGITECH is not set
# CONFIG_HID_MICROSOFT is not set
# CONFIG_HID_MONTEREY is not set
# CONFIG_HID_NTRIG is not set
# CONFIG_HID_PANTHERLORD is not set
# CONFIG_HID_PETALYNX is not set
# CONFIG_HID_SAMSUNG is not set
# CONFIG_HID_SONY is not set
# CONFIG_HID_SUNPLUS is not set
# CONFIG_GREENASIA_FF is not set
# CONFIG_HID_TOPSEED is not set
# CONFIG_THRUSTMASTER_FF is not set
# CONFIG_ZEROPLUS_FF is not set
CONFIG_USB_SUPPORT=y
CONFIG_USB_ARCH_HAS_HCD=y
CONFIG_USB_ARCH_HAS_OHCI=y
@ -898,7 +972,11 @@ CONFIG_USB_DEVICEFS=y
# CONFIG_USB_DYNAMIC_MINORS is not set
CONFIG_USB_SUSPEND=y
# CONFIG_USB_OTG is not set
CONFIG_USB_MON=y
# CONFIG_USB_OTG_WHITELIST is not set
# CONFIG_USB_OTG_BLACKLIST_HUB is not set
CONFIG_USB_MON=m
# CONFIG_USB_WUSB is not set
# CONFIG_USB_WUSB_CBAF is not set
#
# USB Host Controller Drivers
@ -909,6 +987,7 @@ CONFIG_USB_EHCI_HCD=m
# CONFIG_USB_EHCI_TT_NEWSCHED is not set
CONFIG_USB_EHCI_BIG_ENDIAN_MMIO=y
# CONFIG_USB_EHCI_HCD_PPC_OF is not set
# CONFIG_USB_OXU210HP_HCD is not set
# CONFIG_USB_ISP116X_HCD is not set
# CONFIG_USB_ISP1760_HCD is not set
CONFIG_USB_OHCI_HCD=m
@ -918,6 +997,7 @@ CONFIG_USB_OHCI_BIG_ENDIAN_MMIO=y
CONFIG_USB_OHCI_LITTLE_ENDIAN=y
# CONFIG_USB_SL811_HCD is not set
# CONFIG_USB_R8A66597_HCD is not set
# CONFIG_USB_HWA_HCD is not set
#
# Enable Host or Gadget support to see Inventra options
@ -929,20 +1009,20 @@ CONFIG_USB_OHCI_LITTLE_ENDIAN=y
# CONFIG_USB_ACM is not set
# CONFIG_USB_PRINTER is not set
# CONFIG_USB_WDM is not set
# CONFIG_USB_TMC is not set
#
# NOTE: USB_STORAGE enables SCSI, and 'SCSI disk support'
# NOTE: USB_STORAGE depends on SCSI but BLK_DEV_SD may also be needed;
#
#
# may also be needed; see USB_STORAGE Help for more information
# see USB_STORAGE Help for more information
#
CONFIG_USB_STORAGE=m
# CONFIG_USB_STORAGE_DEBUG is not set
# CONFIG_USB_STORAGE_DATAFAB is not set
# CONFIG_USB_STORAGE_FREECOM is not set
# CONFIG_USB_STORAGE_ISD200 is not set
# CONFIG_USB_STORAGE_DPCM is not set
# CONFIG_USB_STORAGE_USBAT is not set
# CONFIG_USB_STORAGE_SDDR09 is not set
# CONFIG_USB_STORAGE_SDDR55 is not set
@ -950,7 +1030,6 @@ CONFIG_USB_STORAGE=m
# CONFIG_USB_STORAGE_ALAUDA is not set
# CONFIG_USB_STORAGE_ONETOUCH is not set
# CONFIG_USB_STORAGE_KARMA is not set
# CONFIG_USB_STORAGE_SIERRA is not set
# CONFIG_USB_STORAGE_CYPRESS_ATACB is not set
# CONFIG_USB_LIBUSUAL is not set
@ -971,6 +1050,7 @@ CONFIG_USB_STORAGE=m
# CONFIG_USB_EMI62 is not set
# CONFIG_USB_EMI26 is not set
# CONFIG_USB_ADUTUX is not set
# CONFIG_USB_SEVSEG is not set
# CONFIG_USB_RIO500 is not set
# CONFIG_USB_LEGOTOWER is not set
# CONFIG_USB_LCD is not set
@ -988,7 +1068,12 @@ CONFIG_USB_STORAGE=m
# CONFIG_USB_IOWARRIOR is not set
# CONFIG_USB_TEST is not set
# CONFIG_USB_ISIGHTFW is not set
# CONFIG_USB_VST is not set
# CONFIG_USB_GADGET is not set
#
# OTG and related infrastructure
#
# CONFIG_MMC is not set
# CONFIG_MEMSTICK is not set
# CONFIG_NEW_LEDS is not set
@ -1014,12 +1099,15 @@ CONFIG_RTC_INTF_DEV=y
# Platform RTC drivers
#
# CONFIG_RTC_DRV_CMOS is not set
# CONFIG_RTC_DRV_DS1286 is not set
# CONFIG_RTC_DRV_DS1511 is not set
# CONFIG_RTC_DRV_DS1553 is not set
# CONFIG_RTC_DRV_DS1742 is not set
# CONFIG_RTC_DRV_STK17TA8 is not set
# CONFIG_RTC_DRV_M48T86 is not set
# CONFIG_RTC_DRV_M48T35 is not set
# CONFIG_RTC_DRV_M48T59 is not set
# CONFIG_RTC_DRV_BQ4802 is not set
# CONFIG_RTC_DRV_V3020 is not set
#
@ -1028,6 +1116,7 @@ CONFIG_RTC_INTF_DEV=y
CONFIG_RTC_DRV_PPC=m
# CONFIG_DMADEVICES is not set
# CONFIG_UIO is not set
# CONFIG_STAGING is not set
#
# File systems
@ -1035,26 +1124,35 @@ CONFIG_RTC_DRV_PPC=m
CONFIG_EXT2_FS=m
# CONFIG_EXT2_FS_XATTR is not set
# CONFIG_EXT2_FS_XIP is not set
CONFIG_EXT3_FS=y
CONFIG_EXT3_FS=m
CONFIG_EXT3_FS_XATTR=y
# CONFIG_EXT3_FS_POSIX_ACL is not set
# CONFIG_EXT3_FS_SECURITY is not set
# CONFIG_EXT4DEV_FS is not set
CONFIG_JBD=y
CONFIG_EXT4_FS=y
# CONFIG_EXT4DEV_COMPAT is not set
CONFIG_EXT4_FS_XATTR=y
# CONFIG_EXT4_FS_POSIX_ACL is not set
# CONFIG_EXT4_FS_SECURITY is not set
CONFIG_JBD=m
# CONFIG_JBD_DEBUG is not set
CONFIG_JBD2=y
# CONFIG_JBD2_DEBUG is not set
CONFIG_FS_MBCACHE=y
# CONFIG_REISERFS_FS is not set
# CONFIG_JFS_FS is not set
# CONFIG_FS_POSIX_ACL is not set
CONFIG_FILE_LOCKING=y
# CONFIG_XFS_FS is not set
# CONFIG_GFS2_FS is not set
# CONFIG_OCFS2_FS is not set
# CONFIG_BTRFS_FS is not set
CONFIG_DNOTIFY=y
CONFIG_INOTIFY=y
CONFIG_INOTIFY_USER=y
CONFIG_QUOTA=y
# CONFIG_QUOTA_NETLINK_INTERFACE is not set
CONFIG_PRINT_QUOTA_WARNING=y
CONFIG_QUOTA_TREE=y
# CONFIG_QFMT_V1 is not set
CONFIG_QFMT_V2=y
CONFIG_QUOTACTL=y
@ -1087,16 +1185,14 @@ CONFIG_FAT_DEFAULT_IOCHARSET="iso8859-1"
CONFIG_PROC_FS=y
CONFIG_PROC_KCORE=y
CONFIG_PROC_SYSCTL=y
CONFIG_PROC_PAGE_MONITOR=y
CONFIG_SYSFS=y
CONFIG_TMPFS=y
# CONFIG_TMPFS_POSIX_ACL is not set
CONFIG_HUGETLBFS=y
CONFIG_HUGETLB_PAGE=y
# CONFIG_CONFIGFS_FS is not set
#
# Miscellaneous filesystems
#
CONFIG_MISC_FILESYSTEMS=y
# CONFIG_ADFS_FS is not set
# CONFIG_AFFS_FS is not set
# CONFIG_HFS_FS is not set
@ -1106,6 +1202,7 @@ CONFIG_HUGETLB_PAGE=y
# CONFIG_EFS_FS is not set
# CONFIG_JFFS2_FS is not set
# CONFIG_CRAMFS is not set
# CONFIG_SQUASHFS is not set
# CONFIG_VXFS_FS is not set
# CONFIG_MINIX_FS is not set
# CONFIG_OMFS_FS is not set
@ -1126,6 +1223,7 @@ CONFIG_LOCKD_V4=y
CONFIG_NFS_COMMON=y
CONFIG_SUNRPC=y
CONFIG_SUNRPC_GSS=y
# CONFIG_SUNRPC_REGISTER_V4 is not set
CONFIG_RPCSEC_GSS_KRB5=y
# CONFIG_RPCSEC_GSS_SPKM3 is not set
# CONFIG_SMB_FS is not set
@ -1190,9 +1288,9 @@ CONFIG_NLS_ISO8859_1=y
# Library routines
#
CONFIG_BITREVERSE=y
# CONFIG_GENERIC_FIND_FIRST_BIT is not set
CONFIG_GENERIC_FIND_LAST_BIT=y
CONFIG_CRC_CCITT=m
# CONFIG_CRC16 is not set
CONFIG_CRC16=y
CONFIG_CRC_T10DIF=y
CONFIG_CRC_ITU_T=m
CONFIG_CRC32=y
@ -1250,27 +1348,44 @@ CONFIG_DEBUG_WRITECOUNT=y
CONFIG_DEBUG_MEMORY_INIT=y
CONFIG_DEBUG_LIST=y
# CONFIG_DEBUG_SG is not set
CONFIG_FRAME_POINTER=y
# CONFIG_DEBUG_NOTIFIERS is not set
# CONFIG_BOOT_PRINTK_DELAY is not set
# CONFIG_RCU_TORTURE_TEST is not set
# CONFIG_RCU_CPU_STALL_DETECTOR is not set
# CONFIG_BACKTRACE_SELF_TEST is not set
# CONFIG_DEBUG_BLOCK_EXT_DEVT is not set
# CONFIG_FAULT_INJECTION is not set
# CONFIG_LATENCYTOP is not set
CONFIG_SYSCTL_SYSCALL_CHECK=y
CONFIG_HAVE_FTRACE=y
CONFIG_NOP_TRACER=y
CONFIG_HAVE_FUNCTION_TRACER=y
CONFIG_HAVE_DYNAMIC_FTRACE=y
# CONFIG_FTRACE is not set
CONFIG_HAVE_FTRACE_MCOUNT_RECORD=y
CONFIG_RING_BUFFER=y
CONFIG_TRACING=y
#
# Tracers
#
# CONFIG_FUNCTION_TRACER is not set
# CONFIG_IRQSOFF_TRACER is not set
# CONFIG_SCHED_TRACER is not set
# CONFIG_CONTEXT_SWITCH_TRACER is not set
# CONFIG_BOOT_TRACER is not set
# CONFIG_TRACE_BRANCH_PROFILING is not set
# CONFIG_STACK_TRACER is not set
# CONFIG_FTRACE_STARTUP_TEST is not set
# CONFIG_DYNAMIC_PRINTK_DEBUG is not set
# CONFIG_SAMPLES is not set
CONFIG_HAVE_ARCH_KGDB=y
# CONFIG_KGDB is not set
CONFIG_PRINT_STACK_DEPTH=64
CONFIG_DEBUG_STACKOVERFLOW=y
# CONFIG_DEBUG_STACK_USAGE is not set
# CONFIG_DEBUG_PAGEALLOC is not set
# CONFIG_CODE_PATCHING_SELFTEST is not set
# CONFIG_FTR_FIXUP_SELFTEST is not set
# CONFIG_MSI_BITMAP_SELFTEST is not set
# CONFIG_XMON is not set
CONFIG_IRQSTACKS=y
# CONFIG_VIRQ_DEBUG is not set
@ -1282,16 +1397,26 @@ CONFIG_IRQSTACKS=y
#
# CONFIG_KEYS is not set
# CONFIG_SECURITY is not set
# CONFIG_SECURITYFS is not set
# CONFIG_SECURITY_FILE_CAPABILITIES is not set
CONFIG_CRYPTO=y
#
# Crypto core or helper
#
# CONFIG_CRYPTO_FIPS is not set
CONFIG_CRYPTO_ALGAPI=y
CONFIG_CRYPTO_ALGAPI2=y
CONFIG_CRYPTO_AEAD=m
CONFIG_CRYPTO_AEAD2=y
CONFIG_CRYPTO_BLKCIPHER=y
CONFIG_CRYPTO_BLKCIPHER2=y
CONFIG_CRYPTO_HASH=y
CONFIG_CRYPTO_HASH2=y
CONFIG_CRYPTO_RNG=m
CONFIG_CRYPTO_RNG2=y
CONFIG_CRYPTO_MANAGER=y
CONFIG_CRYPTO_MANAGER2=y
CONFIG_CRYPTO_GF128MUL=m
# CONFIG_CRYPTO_NULL is not set
# CONFIG_CRYPTO_CRYPTD is not set
@ -1363,6 +1488,11 @@ CONFIG_CRYPTO_SALSA20=m
#
# CONFIG_CRYPTO_DEFLATE is not set
CONFIG_CRYPTO_LZO=m
#
# Random Number Generation
#
# CONFIG_CRYPTO_ANSI_CPRNG is not set
CONFIG_CRYPTO_HW=y
# CONFIG_PPC_CLOCK is not set
# CONFIG_VIRTUALIZATION is not set

View File

@ -241,9 +241,11 @@ extern const char *powerpc_base_platform;
/* We need to mark all pages as being coherent if we're SMP or we have a
* 74[45]x and an MPC107 host bridge. Also 83xx and PowerQUICC II
* require it for PCI "streaming/prefetch" to work properly.
* This is also required by 52xx family.
*/
#if defined(CONFIG_SMP) || defined(CONFIG_MPC10X_BRIDGE) \
|| defined(CONFIG_PPC_83xx) || defined(CONFIG_8260)
|| defined(CONFIG_PPC_83xx) || defined(CONFIG_8260) \
|| defined(CONFIG_PPC_MPC52xx)
#define CPU_FTR_COMMON CPU_FTR_NEED_COHERENT
#else
#define CPU_FTR_COMMON 0

View File

@ -511,7 +511,7 @@ InstructionTLBMiss:
and r1,r1,r2 /* writable if _RW and _DIRTY */
rlwimi r3,r3,32-1,30,30 /* _PAGE_USER -> PP msb */
rlwimi r3,r3,32-1,31,31 /* _PAGE_USER -> PP lsb */
ori r1,r1,0xe14 /* clear out reserved bits and M */
ori r1,r1,0xe04 /* clear out reserved bits */
andc r1,r3,r1 /* PP = user? (rw&dirty? 2: 3): 0 */
mtspr SPRN_RPA,r1
mfspr r3,SPRN_IMISS
@ -585,7 +585,7 @@ DataLoadTLBMiss:
and r1,r1,r2 /* writable if _RW and _DIRTY */
rlwimi r3,r3,32-1,30,30 /* _PAGE_USER -> PP msb */
rlwimi r3,r3,32-1,31,31 /* _PAGE_USER -> PP lsb */
ori r1,r1,0xe14 /* clear out reserved bits and M */
ori r1,r1,0xe04 /* clear out reserved bits */
andc r1,r3,r1 /* PP = user? (rw&dirty? 2: 3): 0 */
mtspr SPRN_RPA,r1
mfspr r3,SPRN_DMISS
@ -653,7 +653,7 @@ DataStoreTLBMiss:
stw r3,0(r2) /* update PTE (accessed/dirty bits) */
/* Convert linux-style PTE to low word of PPC-style PTE */
rlwimi r3,r3,32-1,30,30 /* _PAGE_USER -> PP msb */
li r1,0xe15 /* clear out reserved bits and M */
li r1,0xe05 /* clear out reserved bits & PP lsb */
andc r1,r3,r1 /* PP = user? 2: 0 */
mtspr SPRN_RPA,r1
mfspr r3,SPRN_DMISS

View File

@ -128,6 +128,13 @@ config PS3_FLASH
be disabled on the kernel command line using "ps3flash=off", to
not allocate this fixed buffer.
config PS3_VRAM
tristate "PS3 Video RAM Storage Driver"
depends on FB_PS3=y && BLOCK && m
help
This driver allows you to use excess PS3 video RAM as volatile
storage or system swap.
config PS3_LPM
tristate "PS3 Logical Performance Monitor support"
depends on PPC_PS3

View File

@ -22,4 +22,9 @@
#define MCL_CURRENT 1 /* lock all current mappings */
#define MCL_FUTURE 2 /* lock all future mappings */
#if defined(__KERNEL__) && !defined(__ASSEMBLY__) && defined(CONFIG_64BIT)
int s390_mmap_check(unsigned long addr, unsigned long len);
#define arch_mmap_check(addr,len,flags) s390_mmap_check(addr,len)
#endif
#endif /* __S390_MMAN_H__ */

View File

@ -61,7 +61,7 @@ extern void print_cpu_info(struct cpuinfo_S390 *);
extern int get_cpu_capability(unsigned int *);
/*
* User space process size: 2GB for 31 bit, 4TB for 64 bit.
* User space process size: 2GB for 31 bit, 4TB or 8PT for 64 bit.
*/
#ifndef __s390x__
@ -70,8 +70,7 @@ extern int get_cpu_capability(unsigned int *);
#else /* __s390x__ */
#define TASK_SIZE_OF(tsk) (test_tsk_thread_flag(tsk,TIF_31BIT) ? \
(1UL << 31) : (1UL << 53))
#define TASK_SIZE_OF(tsk) ((tsk)->mm->context.asce_limit)
#define TASK_UNMAPPED_BASE (test_thread_flag(TIF_31BIT) ? \
(1UL << 30) : (1UL << 41))
#define TASK_SIZE TASK_SIZE_OF(current)

View File

@ -30,6 +30,8 @@ static inline void s390_init_cpu_topology(void)
};
#endif
#define SD_MC_INIT SD_CPU_INIT
#include <asm-generic/topology.h>
#endif /* _ASM_S390_TOPOLOGY_H */

View File

@ -5,6 +5,8 @@
*
*/
#include <asm/asm-offsets.h>
#ifndef CONFIG_64BIT
.globl _mcount
_mcount:
@ -14,7 +16,7 @@ _mcount:
ahi %r15,-96
l %r3,100(%r15)
la %r2,0(%r14)
st %r1,0(%r15)
st %r1,__SF_BACKCHAIN(%r15)
la %r3,0(%r3)
bras %r14,0f
.long ftrace_trace_function
@ -38,7 +40,7 @@ _mcount:
stg %r14,112(%r15)
lgr %r1,%r15
aghi %r15,-160
stg %r1,0(%r15)
stg %r1,__SF_BACKCHAIN(%r15)
lgr %r2,%r14
lg %r3,168(%r15)
larl %r14,ftrace_trace_function

View File

@ -61,7 +61,7 @@ static uint32_t __div64_31(uint64_t *n, uint32_t base)
" clr %0,%3\n"
" jl 0f\n"
" slr %0,%3\n"
" alr %1,%2\n"
" ahi %1,1\n"
"0:\n"
: "+d" (reg2), "+d" (reg3), "=d" (tmp)
: "d" (base), "2" (1UL) : "cc" );

View File

@ -119,8 +119,6 @@ retry:
goto fault;
pfn = pte_pfn(*pte);
if (!pfn_valid(pfn))
goto out;
offset = uaddr & (PAGE_SIZE - 1);
size = min(n - done, PAGE_SIZE - offset);
@ -135,7 +133,6 @@ retry:
done += size;
uaddr += size;
} while (done < n);
out:
spin_unlock(&mm->page_table_lock);
return n - done;
fault:
@ -163,9 +160,6 @@ retry:
goto fault;
pfn = pte_pfn(*pte);
if (!pfn_valid(pfn))
goto out;
ret = (pfn << PAGE_SHIFT) + (uaddr & (PAGE_SIZE - 1));
out:
return ret;
@ -244,11 +238,6 @@ retry:
goto fault;
pfn = pte_pfn(*pte);
if (!pfn_valid(pfn)) {
done = -1;
goto out;
}
offset = uaddr & (PAGE_SIZE-1);
addr = (char *)(pfn << PAGE_SHIFT) + offset;
len = min(count - done, PAGE_SIZE - offset);
@ -256,7 +245,6 @@ retry:
done += len_str;
uaddr += len_str;
} while ((len_str == len) && (done < count));
out:
spin_unlock(&mm->page_table_lock);
return done + 1;
fault:
@ -325,12 +313,7 @@ retry:
}
pfn_from = pte_pfn(*pte_from);
if (!pfn_valid(pfn_from))
goto out;
pfn_to = pte_pfn(*pte_to);
if (!pfn_valid(pfn_to))
goto out;
offset_from = uaddr_from & (PAGE_SIZE-1);
offset_to = uaddr_from & (PAGE_SIZE-1);
offset_max = max(offset_from, offset_to);
@ -342,7 +325,6 @@ retry:
uaddr_from += size;
uaddr_to += size;
} while (done < n);
out:
spin_unlock(&mm->page_table_lock);
return n - done;
fault:

View File

@ -35,7 +35,7 @@
* Leave an at least ~128 MB hole.
*/
#define MIN_GAP (128*1024*1024)
#define MAX_GAP (TASK_SIZE/6*5)
#define MAX_GAP (STACK_TOP/6*5)
static inline unsigned long mmap_base(void)
{
@ -46,7 +46,7 @@ static inline unsigned long mmap_base(void)
else if (gap > MAX_GAP)
gap = MAX_GAP;
return TASK_SIZE - (gap & PAGE_MASK);
return STACK_TOP - (gap & PAGE_MASK);
}
static inline int mmap_is_legacy(void)
@ -89,42 +89,58 @@ EXPORT_SYMBOL_GPL(arch_pick_mmap_layout);
#else
int s390_mmap_check(unsigned long addr, unsigned long len)
{
if (!test_thread_flag(TIF_31BIT) &&
len >= TASK_SIZE && TASK_SIZE < (1UL << 53))
return crst_table_upgrade(current->mm, 1UL << 53);
return 0;
}
static unsigned long
s390_get_unmapped_area(struct file *filp, unsigned long addr,
unsigned long len, unsigned long pgoff, unsigned long flags)
{
struct mm_struct *mm = current->mm;
unsigned long area;
int rc;
addr = arch_get_unmapped_area(filp, addr, len, pgoff, flags);
if (addr & ~PAGE_MASK)
return addr;
if (unlikely(mm->context.asce_limit < addr + len)) {
rc = crst_table_upgrade(mm, addr + len);
area = arch_get_unmapped_area(filp, addr, len, pgoff, flags);
if (!(area & ~PAGE_MASK))
return area;
if (area == -ENOMEM &&
!test_thread_flag(TIF_31BIT) && TASK_SIZE < (1UL << 53)) {
/* Upgrade the page table to 4 levels and retry. */
rc = crst_table_upgrade(mm, 1UL << 53);
if (rc)
return (unsigned long) rc;
area = arch_get_unmapped_area(filp, addr, len, pgoff, flags);
}
return addr;
return area;
}
static unsigned long
s390_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
s390_get_unmapped_area_topdown(struct file *filp, const unsigned long addr,
const unsigned long len, const unsigned long pgoff,
const unsigned long flags)
{
struct mm_struct *mm = current->mm;
unsigned long addr = addr0;
unsigned long area;
int rc;
addr = arch_get_unmapped_area_topdown(filp, addr, len, pgoff, flags);
if (addr & ~PAGE_MASK)
return addr;
if (unlikely(mm->context.asce_limit < addr + len)) {
rc = crst_table_upgrade(mm, addr + len);
area = arch_get_unmapped_area_topdown(filp, addr, len, pgoff, flags);
if (!(area & ~PAGE_MASK))
return area;
if (area == -ENOMEM &&
!test_thread_flag(TIF_31BIT) && TASK_SIZE < (1UL << 53)) {
/* Upgrade the page table to 4 levels and retry. */
rc = crst_table_upgrade(mm, 1UL << 53);
if (rc)
return (unsigned long) rc;
area = arch_get_unmapped_area_topdown(filp, addr, len,
pgoff, flags);
}
return addr;
return area;
}
/*
* This function, called very early during the creation of a new

View File

@ -117,6 +117,7 @@ repeat:
crst_table_init(table, entry);
pgd_populate(mm, (pgd_t *) table, (pud_t *) pgd);
mm->pgd = (pgd_t *) table;
mm->task_size = mm->context.asce_limit;
table = NULL;
}
spin_unlock(&mm->page_table_lock);
@ -154,6 +155,7 @@ void crst_table_downgrade(struct mm_struct *mm, unsigned long limit)
BUG();
}
mm->pgd = (pgd_t *) (pgd_val(*pgd) & _REGION_ENTRY_ORIGIN);
mm->task_size = mm->context.asce_limit;
crst_table_free(mm, (unsigned long *) pgd);
}
update_mm(mm, current);

View File

@ -9,6 +9,7 @@ obj-$(CONFIG_MAC_FLOPPY) += swim3.o
obj-$(CONFIG_BLK_DEV_FD) += floppy.o
obj-$(CONFIG_AMIGA_FLOPPY) += amiflop.o
obj-$(CONFIG_PS3_DISK) += ps3disk.o
obj-$(CONFIG_PS3_VRAM) += ps3vram.o
obj-$(CONFIG_ATARI_FLOPPY) += ataflop.o
obj-$(CONFIG_AMIGA_Z2RAM) += z2ram.o
obj-$(CONFIG_BLK_DEV_RAM) += brd.o

865
drivers/block/ps3vram.c Normal file
View File

@ -0,0 +1,865 @@
/*
* ps3vram - Use extra PS3 video ram as MTD block device.
*
* Copyright 2009 Sony Corporation
*
* Based on the MTD ps3vram driver, which is
* Copyright (c) 2007-2008 Jim Paris <jim@jtan.com>
* Added support RSX DMA Vivien Chappelier <vivien.chappelier@free.fr>
*/
#include <linux/blkdev.h>
#include <linux/delay.h>
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
#include <asm/firmware.h>
#include <asm/lv1call.h>
#include <asm/ps3.h>
#define DEVICE_NAME "ps3vram"
#define XDR_BUF_SIZE (2 * 1024 * 1024) /* XDR buffer (must be 1MiB aligned) */
#define XDR_IOIF 0x0c000000
#define FIFO_BASE XDR_IOIF
#define FIFO_SIZE (64 * 1024)
#define DMA_PAGE_SIZE (4 * 1024)
#define CACHE_PAGE_SIZE (256 * 1024)
#define CACHE_PAGE_COUNT ((XDR_BUF_SIZE - FIFO_SIZE) / CACHE_PAGE_SIZE)
#define CACHE_OFFSET CACHE_PAGE_SIZE
#define FIFO_OFFSET 0
#define CTRL_PUT 0x10
#define CTRL_GET 0x11
#define CTRL_TOP 0x15
#define UPLOAD_SUBCH 1
#define DOWNLOAD_SUBCH 2
#define NV_MEMORY_TO_MEMORY_FORMAT_OFFSET_IN 0x0000030c
#define NV_MEMORY_TO_MEMORY_FORMAT_NOTIFY 0x00000104
#define L1GPU_CONTEXT_ATTRIBUTE_FB_BLIT 0x601
#define CACHE_PAGE_PRESENT 1
#define CACHE_PAGE_DIRTY 2
struct ps3vram_tag {
unsigned int address;
unsigned int flags;
};
struct ps3vram_cache {
unsigned int page_count;
unsigned int page_size;
struct ps3vram_tag *tags;
unsigned int hit;
unsigned int miss;
};
struct ps3vram_priv {
struct request_queue *queue;
struct gendisk *gendisk;
u64 size;
u64 memory_handle;
u64 context_handle;
u32 *ctrl;
u32 *reports;
u8 __iomem *ddr_base;
u8 *xdr_buf;
u32 *fifo_base;
u32 *fifo_ptr;
struct ps3vram_cache cache;
/* Used to serialize cache/DMA operations */
struct mutex lock;
};
static int ps3vram_major;
static struct block_device_operations ps3vram_fops = {
.owner = THIS_MODULE,
};
#define DMA_NOTIFIER_HANDLE_BASE 0x66604200 /* first DMA notifier handle */
#define DMA_NOTIFIER_OFFSET_BASE 0x1000 /* first DMA notifier offset */
#define DMA_NOTIFIER_SIZE 0x40
#define NOTIFIER 7 /* notifier used for completion report */
static char *size = "256M";
module_param(size, charp, 0);
MODULE_PARM_DESC(size, "memory size");
static u32 *ps3vram_get_notifier(u32 *reports, int notifier)
{
return (void *)reports + DMA_NOTIFIER_OFFSET_BASE +
DMA_NOTIFIER_SIZE * notifier;
}
static void ps3vram_notifier_reset(struct ps3_system_bus_device *dev)
{
struct ps3vram_priv *priv = dev->core.driver_data;
u32 *notify = ps3vram_get_notifier(priv->reports, NOTIFIER);
int i;
for (i = 0; i < 4; i++)
notify[i] = 0xffffffff;
}
static int ps3vram_notifier_wait(struct ps3_system_bus_device *dev,
unsigned int timeout_ms)
{
struct ps3vram_priv *priv = dev->core.driver_data;
u32 *notify = ps3vram_get_notifier(priv->reports, NOTIFIER);
unsigned long timeout = jiffies + msecs_to_jiffies(timeout_ms);
do {
if (!notify[3])
return 0;
msleep(1);
} while (time_before(jiffies, timeout));
return -ETIMEDOUT;
}
static void ps3vram_init_ring(struct ps3_system_bus_device *dev)
{
struct ps3vram_priv *priv = dev->core.driver_data;
priv->ctrl[CTRL_PUT] = FIFO_BASE + FIFO_OFFSET;
priv->ctrl[CTRL_GET] = FIFO_BASE + FIFO_OFFSET;
}
static int ps3vram_wait_ring(struct ps3_system_bus_device *dev,
unsigned int timeout_ms)
{
struct ps3vram_priv *priv = dev->core.driver_data;
unsigned long timeout = jiffies + msecs_to_jiffies(timeout_ms);
do {
if (priv->ctrl[CTRL_PUT] == priv->ctrl[CTRL_GET])
return 0;
msleep(1);
} while (time_before(jiffies, timeout));
dev_warn(&dev->core, "FIFO timeout (%08x/%08x/%08x)\n",
priv->ctrl[CTRL_PUT], priv->ctrl[CTRL_GET],
priv->ctrl[CTRL_TOP]);
return -ETIMEDOUT;
}
static void ps3vram_out_ring(struct ps3vram_priv *priv, u32 data)
{
*(priv->fifo_ptr)++ = data;
}
static void ps3vram_begin_ring(struct ps3vram_priv *priv, u32 chan, u32 tag,
u32 size)
{
ps3vram_out_ring(priv, (size << 18) | (chan << 13) | tag);
}
static void ps3vram_rewind_ring(struct ps3_system_bus_device *dev)
{
struct ps3vram_priv *priv = dev->core.driver_data;
int status;
ps3vram_out_ring(priv, 0x20000000 | (FIFO_BASE + FIFO_OFFSET));
priv->ctrl[CTRL_PUT] = FIFO_BASE + FIFO_OFFSET;
/* asking the HV for a blit will kick the FIFO */
status = lv1_gpu_context_attribute(priv->context_handle,
L1GPU_CONTEXT_ATTRIBUTE_FB_BLIT, 0,
0, 0, 0);
if (status)
dev_err(&dev->core,
"%s: lv1_gpu_context_attribute failed %d\n", __func__,
status);
priv->fifo_ptr = priv->fifo_base;
}
static void ps3vram_fire_ring(struct ps3_system_bus_device *dev)
{
struct ps3vram_priv *priv = dev->core.driver_data;
int status;
mutex_lock(&ps3_gpu_mutex);
priv->ctrl[CTRL_PUT] = FIFO_BASE + FIFO_OFFSET +
(priv->fifo_ptr - priv->fifo_base) * sizeof(u32);
/* asking the HV for a blit will kick the FIFO */
status = lv1_gpu_context_attribute(priv->context_handle,
L1GPU_CONTEXT_ATTRIBUTE_FB_BLIT, 0,
0, 0, 0);
if (status)
dev_err(&dev->core,
"%s: lv1_gpu_context_attribute failed %d\n", __func__,
status);
if ((priv->fifo_ptr - priv->fifo_base) * sizeof(u32) >
FIFO_SIZE - 1024) {
dev_dbg(&dev->core, "FIFO full, rewinding\n");
ps3vram_wait_ring(dev, 200);
ps3vram_rewind_ring(dev);
}
mutex_unlock(&ps3_gpu_mutex);
}
static void ps3vram_bind(struct ps3_system_bus_device *dev)
{
struct ps3vram_priv *priv = dev->core.driver_data;
ps3vram_begin_ring(priv, UPLOAD_SUBCH, 0, 1);
ps3vram_out_ring(priv, 0x31337303);
ps3vram_begin_ring(priv, UPLOAD_SUBCH, 0x180, 3);
ps3vram_out_ring(priv, DMA_NOTIFIER_HANDLE_BASE + NOTIFIER);
ps3vram_out_ring(priv, 0xfeed0001); /* DMA system RAM instance */
ps3vram_out_ring(priv, 0xfeed0000); /* DMA video RAM instance */
ps3vram_begin_ring(priv, DOWNLOAD_SUBCH, 0, 1);
ps3vram_out_ring(priv, 0x3137c0de);
ps3vram_begin_ring(priv, DOWNLOAD_SUBCH, 0x180, 3);
ps3vram_out_ring(priv, DMA_NOTIFIER_HANDLE_BASE + NOTIFIER);
ps3vram_out_ring(priv, 0xfeed0000); /* DMA video RAM instance */
ps3vram_out_ring(priv, 0xfeed0001); /* DMA system RAM instance */
ps3vram_fire_ring(dev);
}
static int ps3vram_upload(struct ps3_system_bus_device *dev,
unsigned int src_offset, unsigned int dst_offset,
int len, int count)
{
struct ps3vram_priv *priv = dev->core.driver_data;
ps3vram_begin_ring(priv, UPLOAD_SUBCH,
NV_MEMORY_TO_MEMORY_FORMAT_OFFSET_IN, 8);
ps3vram_out_ring(priv, XDR_IOIF + src_offset);
ps3vram_out_ring(priv, dst_offset);
ps3vram_out_ring(priv, len);
ps3vram_out_ring(priv, len);
ps3vram_out_ring(priv, len);
ps3vram_out_ring(priv, count);
ps3vram_out_ring(priv, (1 << 8) | 1);
ps3vram_out_ring(priv, 0);
ps3vram_notifier_reset(dev);
ps3vram_begin_ring(priv, UPLOAD_SUBCH,
NV_MEMORY_TO_MEMORY_FORMAT_NOTIFY, 1);
ps3vram_out_ring(priv, 0);
ps3vram_begin_ring(priv, UPLOAD_SUBCH, 0x100, 1);
ps3vram_out_ring(priv, 0);
ps3vram_fire_ring(dev);
if (ps3vram_notifier_wait(dev, 200) < 0) {
dev_warn(&dev->core, "%s: Notifier timeout\n", __func__);
return -1;
}
return 0;
}
static int ps3vram_download(struct ps3_system_bus_device *dev,
unsigned int src_offset, unsigned int dst_offset,
int len, int count)
{
struct ps3vram_priv *priv = dev->core.driver_data;
ps3vram_begin_ring(priv, DOWNLOAD_SUBCH,
NV_MEMORY_TO_MEMORY_FORMAT_OFFSET_IN, 8);
ps3vram_out_ring(priv, src_offset);
ps3vram_out_ring(priv, XDR_IOIF + dst_offset);
ps3vram_out_ring(priv, len);
ps3vram_out_ring(priv, len);
ps3vram_out_ring(priv, len);
ps3vram_out_ring(priv, count);
ps3vram_out_ring(priv, (1 << 8) | 1);
ps3vram_out_ring(priv, 0);
ps3vram_notifier_reset(dev);
ps3vram_begin_ring(priv, DOWNLOAD_SUBCH,
NV_MEMORY_TO_MEMORY_FORMAT_NOTIFY, 1);
ps3vram_out_ring(priv, 0);
ps3vram_begin_ring(priv, DOWNLOAD_SUBCH, 0x100, 1);
ps3vram_out_ring(priv, 0);
ps3vram_fire_ring(dev);
if (ps3vram_notifier_wait(dev, 200) < 0) {
dev_warn(&dev->core, "%s: Notifier timeout\n", __func__);
return -1;
}
return 0;
}
static void ps3vram_cache_evict(struct ps3_system_bus_device *dev, int entry)
{
struct ps3vram_priv *priv = dev->core.driver_data;
struct ps3vram_cache *cache = &priv->cache;
if (!(cache->tags[entry].flags & CACHE_PAGE_DIRTY))
return;
dev_dbg(&dev->core, "Flushing %d: 0x%08x\n", entry,
cache->tags[entry].address);
if (ps3vram_upload(dev, CACHE_OFFSET + entry * cache->page_size,
cache->tags[entry].address, DMA_PAGE_SIZE,
cache->page_size / DMA_PAGE_SIZE) < 0) {
dev_err(&dev->core,
"Failed to upload from 0x%x to " "0x%x size 0x%x\n",
entry * cache->page_size, cache->tags[entry].address,
cache->page_size);
}
cache->tags[entry].flags &= ~CACHE_PAGE_DIRTY;
}
static void ps3vram_cache_load(struct ps3_system_bus_device *dev, int entry,
unsigned int address)
{
struct ps3vram_priv *priv = dev->core.driver_data;
struct ps3vram_cache *cache = &priv->cache;
dev_dbg(&dev->core, "Fetching %d: 0x%08x\n", entry, address);
if (ps3vram_download(dev, address,
CACHE_OFFSET + entry * cache->page_size,
DMA_PAGE_SIZE,
cache->page_size / DMA_PAGE_SIZE) < 0) {
dev_err(&dev->core,
"Failed to download from 0x%x to 0x%x size 0x%x\n",
address, entry * cache->page_size, cache->page_size);
}
cache->tags[entry].address = address;
cache->tags[entry].flags |= CACHE_PAGE_PRESENT;
}
static void ps3vram_cache_flush(struct ps3_system_bus_device *dev)
{
struct ps3vram_priv *priv = dev->core.driver_data;
struct ps3vram_cache *cache = &priv->cache;
int i;
dev_dbg(&dev->core, "FLUSH\n");
for (i = 0; i < cache->page_count; i++) {
ps3vram_cache_evict(dev, i);
cache->tags[i].flags = 0;
}
}
static unsigned int ps3vram_cache_match(struct ps3_system_bus_device *dev,
loff_t address)
{
struct ps3vram_priv *priv = dev->core.driver_data;
struct ps3vram_cache *cache = &priv->cache;
unsigned int base;
unsigned int offset;
int i;
static int counter;
offset = (unsigned int) (address & (cache->page_size - 1));
base = (unsigned int) (address - offset);
/* fully associative check */
for (i = 0; i < cache->page_count; i++) {
if ((cache->tags[i].flags & CACHE_PAGE_PRESENT) &&
cache->tags[i].address == base) {
cache->hit++;
dev_dbg(&dev->core, "Found entry %d: 0x%08x\n", i,
cache->tags[i].address);
return i;
}
}
/* choose a random entry */
i = (jiffies + (counter++)) % cache->page_count;
dev_dbg(&dev->core, "Using entry %d\n", i);
ps3vram_cache_evict(dev, i);
ps3vram_cache_load(dev, i, base);
cache->miss++;
return i;
}
static int ps3vram_cache_init(struct ps3_system_bus_device *dev)
{
struct ps3vram_priv *priv = dev->core.driver_data;
priv->cache.page_count = CACHE_PAGE_COUNT;
priv->cache.page_size = CACHE_PAGE_SIZE;
priv->cache.tags = kzalloc(sizeof(struct ps3vram_tag) *
CACHE_PAGE_COUNT, GFP_KERNEL);
if (priv->cache.tags == NULL) {
dev_err(&dev->core, "Could not allocate cache tags\n");
return -ENOMEM;
}
dev_info(&dev->core, "Created ram cache: %d entries, %d KiB each\n",
CACHE_PAGE_COUNT, CACHE_PAGE_SIZE / 1024);
return 0;
}
static void ps3vram_cache_cleanup(struct ps3_system_bus_device *dev)
{
struct ps3vram_priv *priv = dev->core.driver_data;
ps3vram_cache_flush(dev);
kfree(priv->cache.tags);
}
static int ps3vram_read(struct ps3_system_bus_device *dev, loff_t from,
size_t len, size_t *retlen, u_char *buf)
{
struct ps3vram_priv *priv = dev->core.driver_data;
unsigned int cached, count;
dev_dbg(&dev->core, "%s: from=0x%08x len=0x%zx\n", __func__,
(unsigned int)from, len);
if (from >= priv->size)
return -EIO;
if (len > priv->size - from)
len = priv->size - from;
/* Copy from vram to buf */
count = len;
while (count) {
unsigned int offset, avail;
unsigned int entry;
offset = (unsigned int) (from & (priv->cache.page_size - 1));
avail = priv->cache.page_size - offset;
mutex_lock(&priv->lock);
entry = ps3vram_cache_match(dev, from);
cached = CACHE_OFFSET + entry * priv->cache.page_size + offset;
dev_dbg(&dev->core, "%s: from=%08x cached=%08x offset=%08x "
"avail=%08x count=%08x\n", __func__,
(unsigned int)from, cached, offset, avail, count);
if (avail > count)
avail = count;
memcpy(buf, priv->xdr_buf + cached, avail);
mutex_unlock(&priv->lock);
buf += avail;
count -= avail;
from += avail;
}
*retlen = len;
return 0;
}
static int ps3vram_write(struct ps3_system_bus_device *dev, loff_t to,
size_t len, size_t *retlen, const u_char *buf)
{
struct ps3vram_priv *priv = dev->core.driver_data;
unsigned int cached, count;
if (to >= priv->size)
return -EIO;
if (len > priv->size - to)
len = priv->size - to;
/* Copy from buf to vram */
count = len;
while (count) {
unsigned int offset, avail;
unsigned int entry;
offset = (unsigned int) (to & (priv->cache.page_size - 1));
avail = priv->cache.page_size - offset;
mutex_lock(&priv->lock);
entry = ps3vram_cache_match(dev, to);
cached = CACHE_OFFSET + entry * priv->cache.page_size + offset;
dev_dbg(&dev->core, "%s: to=%08x cached=%08x offset=%08x "
"avail=%08x count=%08x\n", __func__, (unsigned int)to,
cached, offset, avail, count);
if (avail > count)
avail = count;
memcpy(priv->xdr_buf + cached, buf, avail);
priv->cache.tags[entry].flags |= CACHE_PAGE_DIRTY;
mutex_unlock(&priv->lock);
buf += avail;
count -= avail;
to += avail;
}
*retlen = len;
return 0;
}
static int ps3vram_proc_show(struct seq_file *m, void *v)
{
struct ps3vram_priv *priv = m->private;
seq_printf(m, "hit:%u\nmiss:%u\n", priv->cache.hit, priv->cache.miss);
return 0;
}
static int ps3vram_proc_open(struct inode *inode, struct file *file)
{
return single_open(file, ps3vram_proc_show, PDE(inode)->data);
}
static const struct file_operations ps3vram_proc_fops = {
.owner = THIS_MODULE,
.open = ps3vram_proc_open,
.read = seq_read,
.llseek = seq_lseek,
.release = single_release,
};
static void __devinit ps3vram_proc_init(struct ps3_system_bus_device *dev)
{
struct ps3vram_priv *priv = dev->core.driver_data;
struct proc_dir_entry *pde;
pde = proc_create(DEVICE_NAME, 0444, NULL, &ps3vram_proc_fops);
if (!pde) {
dev_warn(&dev->core, "failed to create /proc entry\n");
return;
}
pde->owner = THIS_MODULE;
pde->data = priv;
}
static int ps3vram_make_request(struct request_queue *q, struct bio *bio)
{
struct ps3_system_bus_device *dev = q->queuedata;
int write = bio_data_dir(bio) == WRITE;
const char *op = write ? "write" : "read";
loff_t offset = bio->bi_sector << 9;
int error = 0;
struct bio_vec *bvec;
unsigned int i;
dev_dbg(&dev->core, "%s\n", __func__);
bio_for_each_segment(bvec, bio, i) {
/* PS3 is ppc64, so we don't handle highmem */
char *ptr = page_address(bvec->bv_page) + bvec->bv_offset;
size_t len = bvec->bv_len, retlen;
dev_dbg(&dev->core, " %s %zu bytes at offset %llu\n", op,
len, offset);
if (write)
error = ps3vram_write(dev, offset, len, &retlen, ptr);
else
error = ps3vram_read(dev, offset, len, &retlen, ptr);
if (error) {
dev_err(&dev->core, "%s failed\n", op);
goto out;
}
if (retlen != len) {
dev_err(&dev->core, "Short %s\n", op);
goto out;
}
offset += len;
}
dev_dbg(&dev->core, "%s completed\n", op);
out:
bio_endio(bio, error);
return 0;
}
static int __devinit ps3vram_probe(struct ps3_system_bus_device *dev)
{
struct ps3vram_priv *priv;
int error, status;
struct request_queue *queue;
struct gendisk *gendisk;
u64 ddr_lpar, ctrl_lpar, info_lpar, reports_lpar, ddr_size,
reports_size;
char *rest;
priv = kzalloc(sizeof(*priv), GFP_KERNEL);
if (!priv) {
error = -ENOMEM;
goto fail;
}
mutex_init(&priv->lock);
dev->core.driver_data = priv;
priv = dev->core.driver_data;
/* Allocate XDR buffer (1MiB aligned) */
priv->xdr_buf = (void *)__get_free_pages(GFP_KERNEL,
get_order(XDR_BUF_SIZE));
if (priv->xdr_buf == NULL) {
dev_err(&dev->core, "Could not allocate XDR buffer\n");
error = -ENOMEM;
goto fail_free_priv;
}
/* Put FIFO at begginning of XDR buffer */
priv->fifo_base = (u32 *) (priv->xdr_buf + FIFO_OFFSET);
priv->fifo_ptr = priv->fifo_base;
/* XXX: Need to open GPU, in case ps3fb or snd_ps3 aren't loaded */
if (ps3_open_hv_device(dev)) {
dev_err(&dev->core, "ps3_open_hv_device failed\n");
error = -EAGAIN;
goto out_close_gpu;
}
/* Request memory */
status = -1;
ddr_size = ALIGN(memparse(size, &rest), 1024*1024);
if (!ddr_size) {
dev_err(&dev->core, "Specified size is too small\n");
error = -EINVAL;
goto out_close_gpu;
}
while (ddr_size > 0) {
status = lv1_gpu_memory_allocate(ddr_size, 0, 0, 0, 0,
&priv->memory_handle,
&ddr_lpar);
if (!status)
break;
ddr_size -= 1024*1024;
}
if (status) {
dev_err(&dev->core, "lv1_gpu_memory_allocate failed %d\n",
status);
error = -ENOMEM;
goto out_free_xdr_buf;
}
/* Request context */
status = lv1_gpu_context_allocate(priv->memory_handle, 0,
&priv->context_handle, &ctrl_lpar,
&info_lpar, &reports_lpar,
&reports_size);
if (status) {
dev_err(&dev->core, "lv1_gpu_context_allocate failed %d\n",
status);
error = -ENOMEM;
goto out_free_memory;
}
/* Map XDR buffer to RSX */
status = lv1_gpu_context_iomap(priv->context_handle, XDR_IOIF,
ps3_mm_phys_to_lpar(__pa(priv->xdr_buf)),
XDR_BUF_SIZE, 0);
if (status) {
dev_err(&dev->core, "lv1_gpu_context_iomap failed %d\n",
status);
error = -ENOMEM;
goto out_free_context;
}
priv->ddr_base = ioremap_flags(ddr_lpar, ddr_size, _PAGE_NO_CACHE);
if (!priv->ddr_base) {
dev_err(&dev->core, "ioremap DDR failed\n");
error = -ENOMEM;
goto out_free_context;
}
priv->ctrl = ioremap(ctrl_lpar, 64 * 1024);
if (!priv->ctrl) {
dev_err(&dev->core, "ioremap CTRL failed\n");
error = -ENOMEM;
goto out_unmap_vram;
}
priv->reports = ioremap(reports_lpar, reports_size);
if (!priv->reports) {
dev_err(&dev->core, "ioremap REPORTS failed\n");
error = -ENOMEM;
goto out_unmap_ctrl;
}
mutex_lock(&ps3_gpu_mutex);
ps3vram_init_ring(dev);
mutex_unlock(&ps3_gpu_mutex);
priv->size = ddr_size;
ps3vram_bind(dev);
mutex_lock(&ps3_gpu_mutex);
error = ps3vram_wait_ring(dev, 100);
mutex_unlock(&ps3_gpu_mutex);
if (error < 0) {
dev_err(&dev->core, "Failed to initialize channels\n");
error = -ETIMEDOUT;
goto out_unmap_reports;
}
ps3vram_cache_init(dev);
ps3vram_proc_init(dev);
queue = blk_alloc_queue(GFP_KERNEL);
if (!queue) {
dev_err(&dev->core, "blk_alloc_queue failed\n");
error = -ENOMEM;
goto out_cache_cleanup;
}
priv->queue = queue;
queue->queuedata = dev;
blk_queue_make_request(queue, ps3vram_make_request);
blk_queue_max_phys_segments(queue, MAX_PHYS_SEGMENTS);
blk_queue_max_hw_segments(queue, MAX_HW_SEGMENTS);
blk_queue_max_segment_size(queue, MAX_SEGMENT_SIZE);
blk_queue_max_sectors(queue, SAFE_MAX_SECTORS);
gendisk = alloc_disk(1);
if (!gendisk) {
dev_err(&dev->core, "alloc_disk failed\n");
error = -ENOMEM;
goto fail_cleanup_queue;
}
priv->gendisk = gendisk;
gendisk->major = ps3vram_major;
gendisk->first_minor = 0;
gendisk->fops = &ps3vram_fops;
gendisk->queue = queue;
gendisk->private_data = dev;
gendisk->driverfs_dev = &dev->core;
strlcpy(gendisk->disk_name, DEVICE_NAME, sizeof(gendisk->disk_name));
set_capacity(gendisk, priv->size >> 9);
dev_info(&dev->core, "%s: Using %lu MiB of GPU memory\n",
gendisk->disk_name, get_capacity(gendisk) >> 11);
add_disk(gendisk);
return 0;
fail_cleanup_queue:
blk_cleanup_queue(queue);
out_cache_cleanup:
remove_proc_entry(DEVICE_NAME, NULL);
ps3vram_cache_cleanup(dev);
out_unmap_reports:
iounmap(priv->reports);
out_unmap_ctrl:
iounmap(priv->ctrl);
out_unmap_vram:
iounmap(priv->ddr_base);
out_free_context:
lv1_gpu_context_free(priv->context_handle);
out_free_memory:
lv1_gpu_memory_free(priv->memory_handle);
out_close_gpu:
ps3_close_hv_device(dev);
out_free_xdr_buf:
free_pages((unsigned long) priv->xdr_buf, get_order(XDR_BUF_SIZE));
fail_free_priv:
kfree(priv);
dev->core.driver_data = NULL;
fail:
return error;
}
static int ps3vram_remove(struct ps3_system_bus_device *dev)
{
struct ps3vram_priv *priv = dev->core.driver_data;
del_gendisk(priv->gendisk);
put_disk(priv->gendisk);
blk_cleanup_queue(priv->queue);
remove_proc_entry(DEVICE_NAME, NULL);
ps3vram_cache_cleanup(dev);
iounmap(priv->reports);
iounmap(priv->ctrl);
iounmap(priv->ddr_base);
lv1_gpu_context_free(priv->context_handle);
lv1_gpu_memory_free(priv->memory_handle);
ps3_close_hv_device(dev);
free_pages((unsigned long) priv->xdr_buf, get_order(XDR_BUF_SIZE));
kfree(priv);
dev->core.driver_data = NULL;
return 0;
}
static struct ps3_system_bus_driver ps3vram = {
.match_id = PS3_MATCH_ID_GPU,
.match_sub_id = PS3_MATCH_SUB_ID_GPU_RAMDISK,
.core.name = DEVICE_NAME,
.core.owner = THIS_MODULE,
.probe = ps3vram_probe,
.remove = ps3vram_remove,
.shutdown = ps3vram_remove,
};
static int __init ps3vram_init(void)
{
int error;
if (!firmware_has_feature(FW_FEATURE_PS3_LV1))
return -ENODEV;
error = register_blkdev(0, DEVICE_NAME);
if (error <= 0) {
pr_err("%s: register_blkdev failed %d\n", DEVICE_NAME, error);
return error;
}
ps3vram_major = error;
pr_info("%s: registered block device major %d\n", DEVICE_NAME,
ps3vram_major);
error = ps3_system_bus_driver_register(&ps3vram);
if (error)
unregister_blkdev(ps3vram_major, DEVICE_NAME);
return error;
}
static void __exit ps3vram_exit(void)
{
ps3_system_bus_driver_unregister(&ps3vram);
unregister_blkdev(ps3vram_major, DEVICE_NAME);
}
module_init(ps3vram_init);
module_exit(ps3vram_exit);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("PS3 Video RAM Storage Driver");
MODULE_AUTHOR("Sony Corporation");
MODULE_ALIAS(PS3_MODULE_ALIAS_GPU_RAMDISK);

View File

@ -306,7 +306,7 @@ static int hiddev_open(struct inode *inode, struct file *file)
return 0;
bail:
file->private_data = NULL;
kfree(list->hiddev);
kfree(list);
return res;
}
@ -323,7 +323,7 @@ static ssize_t hiddev_write(struct file * file, const char __user * buffer, size
*/
static ssize_t hiddev_read(struct file * file, char __user * buffer, size_t count, loff_t *ppos)
{
DECLARE_WAITQUEUE(wait, current);
DEFINE_WAIT(wait);
struct hiddev_list *list = file->private_data;
int event_size;
int retval;

View File

@ -120,13 +120,6 @@ config MTD_PHRAM
doesn't have access to, memory beyond the mem=xxx limit, nvram,
memory on the video card, etc...
config MTD_PS3VRAM
tristate "PS3 video RAM"
depends on FB_PS3
help
This driver allows you to use excess PS3 video RAM as volatile
storage or system swap.
config MTD_LART
tristate "28F160xx flash driver for LART"
depends on SA1100_LART

View File

@ -16,4 +16,3 @@ obj-$(CONFIG_MTD_LART) += lart.o
obj-$(CONFIG_MTD_BLOCK2MTD) += block2mtd.o
obj-$(CONFIG_MTD_DATAFLASH) += mtd_dataflash.o
obj-$(CONFIG_MTD_M25P80) += m25p80.o
obj-$(CONFIG_MTD_PS3VRAM) += ps3vram.o

View File

@ -1,768 +0,0 @@
/**
* ps3vram - Use extra PS3 video ram as MTD block device.
*
* Copyright (c) 2007-2008 Jim Paris <jim@jtan.com>
* Added support RSX DMA Vivien Chappelier <vivien.chappelier@free.fr>
*/
#include <linux/io.h>
#include <linux/mm.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/list.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/slab.h>
#include <linux/version.h>
#include <linux/gfp.h>
#include <linux/delay.h>
#include <linux/mtd/mtd.h>
#include <asm/lv1call.h>
#include <asm/ps3.h>
#define DEVICE_NAME "ps3vram"
#define XDR_BUF_SIZE (2 * 1024 * 1024) /* XDR buffer (must be 1MiB aligned) */
#define XDR_IOIF 0x0c000000
#define FIFO_BASE XDR_IOIF
#define FIFO_SIZE (64 * 1024)
#define DMA_PAGE_SIZE (4 * 1024)
#define CACHE_PAGE_SIZE (256 * 1024)
#define CACHE_PAGE_COUNT ((XDR_BUF_SIZE - FIFO_SIZE) / CACHE_PAGE_SIZE)
#define CACHE_OFFSET CACHE_PAGE_SIZE
#define FIFO_OFFSET 0
#define CTRL_PUT 0x10
#define CTRL_GET 0x11
#define CTRL_TOP 0x15
#define UPLOAD_SUBCH 1
#define DOWNLOAD_SUBCH 2
#define NV_MEMORY_TO_MEMORY_FORMAT_OFFSET_IN 0x0000030c
#define NV_MEMORY_TO_MEMORY_FORMAT_NOTIFY 0x00000104
#define L1GPU_CONTEXT_ATTRIBUTE_FB_BLIT 0x601
struct mtd_info ps3vram_mtd;
#define CACHE_PAGE_PRESENT 1
#define CACHE_PAGE_DIRTY 2
struct ps3vram_tag {
unsigned int address;
unsigned int flags;
};
struct ps3vram_cache {
unsigned int page_count;
unsigned int page_size;
struct ps3vram_tag *tags;
};
struct ps3vram_priv {
u64 memory_handle;
u64 context_handle;
u32 *ctrl;
u32 *reports;
u8 __iomem *ddr_base;
u8 *xdr_buf;
u32 *fifo_base;
u32 *fifo_ptr;
struct device *dev;
struct ps3vram_cache cache;
/* Used to serialize cache/DMA operations */
struct mutex lock;
};
#define DMA_NOTIFIER_HANDLE_BASE 0x66604200 /* first DMA notifier handle */
#define DMA_NOTIFIER_OFFSET_BASE 0x1000 /* first DMA notifier offset */
#define DMA_NOTIFIER_SIZE 0x40
#define NOTIFIER 7 /* notifier used for completion report */
/* A trailing '-' means to subtract off ps3fb_videomemory.size */
char *size = "256M-";
module_param(size, charp, 0);
MODULE_PARM_DESC(size, "memory size");
static u32 *ps3vram_get_notifier(u32 *reports, int notifier)
{
return (void *) reports +
DMA_NOTIFIER_OFFSET_BASE +
DMA_NOTIFIER_SIZE * notifier;
}
static void ps3vram_notifier_reset(struct mtd_info *mtd)
{
int i;
struct ps3vram_priv *priv = mtd->priv;
u32 *notify = ps3vram_get_notifier(priv->reports, NOTIFIER);
for (i = 0; i < 4; i++)
notify[i] = 0xffffffff;
}
static int ps3vram_notifier_wait(struct mtd_info *mtd, unsigned int timeout_ms)
{
struct ps3vram_priv *priv = mtd->priv;
u32 *notify = ps3vram_get_notifier(priv->reports, NOTIFIER);
unsigned long timeout = jiffies + msecs_to_jiffies(timeout_ms);
do {
if (!notify[3])
return 0;
msleep(1);
} while (time_before(jiffies, timeout));
return -ETIMEDOUT;
}
static void ps3vram_init_ring(struct mtd_info *mtd)
{
struct ps3vram_priv *priv = mtd->priv;
priv->ctrl[CTRL_PUT] = FIFO_BASE + FIFO_OFFSET;
priv->ctrl[CTRL_GET] = FIFO_BASE + FIFO_OFFSET;
}
static int ps3vram_wait_ring(struct mtd_info *mtd, unsigned int timeout_ms)
{
struct ps3vram_priv *priv = mtd->priv;
unsigned long timeout = jiffies + msecs_to_jiffies(timeout_ms);
do {
if (priv->ctrl[CTRL_PUT] == priv->ctrl[CTRL_GET])
return 0;
msleep(1);
} while (time_before(jiffies, timeout));
dev_dbg(priv->dev, "%s:%d: FIFO timeout (%08x/%08x/%08x)\n", __func__,
__LINE__, priv->ctrl[CTRL_PUT], priv->ctrl[CTRL_GET],
priv->ctrl[CTRL_TOP]);
return -ETIMEDOUT;
}
static void ps3vram_out_ring(struct ps3vram_priv *priv, u32 data)
{
*(priv->fifo_ptr)++ = data;
}
static void ps3vram_begin_ring(struct ps3vram_priv *priv, u32 chan,
u32 tag, u32 size)
{
ps3vram_out_ring(priv, (size << 18) | (chan << 13) | tag);
}
static void ps3vram_rewind_ring(struct mtd_info *mtd)
{
struct ps3vram_priv *priv = mtd->priv;
u64 status;
ps3vram_out_ring(priv, 0x20000000 | (FIFO_BASE + FIFO_OFFSET));
priv->ctrl[CTRL_PUT] = FIFO_BASE + FIFO_OFFSET;
/* asking the HV for a blit will kick the fifo */
status = lv1_gpu_context_attribute(priv->context_handle,
L1GPU_CONTEXT_ATTRIBUTE_FB_BLIT,
0, 0, 0, 0);
if (status)
dev_err(priv->dev, "%s:%d: lv1_gpu_context_attribute failed\n",
__func__, __LINE__);
priv->fifo_ptr = priv->fifo_base;
}
static void ps3vram_fire_ring(struct mtd_info *mtd)
{
struct ps3vram_priv *priv = mtd->priv;
u64 status;
mutex_lock(&ps3_gpu_mutex);
priv->ctrl[CTRL_PUT] = FIFO_BASE + FIFO_OFFSET +
(priv->fifo_ptr - priv->fifo_base) * sizeof(u32);
/* asking the HV for a blit will kick the fifo */
status = lv1_gpu_context_attribute(priv->context_handle,
L1GPU_CONTEXT_ATTRIBUTE_FB_BLIT,
0, 0, 0, 0);
if (status)
dev_err(priv->dev, "%s:%d: lv1_gpu_context_attribute failed\n",
__func__, __LINE__);
if ((priv->fifo_ptr - priv->fifo_base) * sizeof(u32) >
FIFO_SIZE - 1024) {
dev_dbg(priv->dev, "%s:%d: fifo full, rewinding\n", __func__,
__LINE__);
ps3vram_wait_ring(mtd, 200);
ps3vram_rewind_ring(mtd);
}
mutex_unlock(&ps3_gpu_mutex);
}
static void ps3vram_bind(struct mtd_info *mtd)
{
struct ps3vram_priv *priv = mtd->priv;
ps3vram_begin_ring(priv, UPLOAD_SUBCH, 0, 1);
ps3vram_out_ring(priv, 0x31337303);
ps3vram_begin_ring(priv, UPLOAD_SUBCH, 0x180, 3);
ps3vram_out_ring(priv, DMA_NOTIFIER_HANDLE_BASE + NOTIFIER);
ps3vram_out_ring(priv, 0xfeed0001); /* DMA system RAM instance */
ps3vram_out_ring(priv, 0xfeed0000); /* DMA video RAM instance */
ps3vram_begin_ring(priv, DOWNLOAD_SUBCH, 0, 1);
ps3vram_out_ring(priv, 0x3137c0de);
ps3vram_begin_ring(priv, DOWNLOAD_SUBCH, 0x180, 3);
ps3vram_out_ring(priv, DMA_NOTIFIER_HANDLE_BASE + NOTIFIER);
ps3vram_out_ring(priv, 0xfeed0000); /* DMA video RAM instance */
ps3vram_out_ring(priv, 0xfeed0001); /* DMA system RAM instance */
ps3vram_fire_ring(mtd);
}
static int ps3vram_upload(struct mtd_info *mtd, unsigned int src_offset,
unsigned int dst_offset, int len, int count)
{
struct ps3vram_priv *priv = mtd->priv;
ps3vram_begin_ring(priv, UPLOAD_SUBCH,
NV_MEMORY_TO_MEMORY_FORMAT_OFFSET_IN, 8);
ps3vram_out_ring(priv, XDR_IOIF + src_offset);
ps3vram_out_ring(priv, dst_offset);
ps3vram_out_ring(priv, len);
ps3vram_out_ring(priv, len);
ps3vram_out_ring(priv, len);
ps3vram_out_ring(priv, count);
ps3vram_out_ring(priv, (1 << 8) | 1);
ps3vram_out_ring(priv, 0);
ps3vram_notifier_reset(mtd);
ps3vram_begin_ring(priv, UPLOAD_SUBCH,
NV_MEMORY_TO_MEMORY_FORMAT_NOTIFY, 1);
ps3vram_out_ring(priv, 0);
ps3vram_begin_ring(priv, UPLOAD_SUBCH, 0x100, 1);
ps3vram_out_ring(priv, 0);
ps3vram_fire_ring(mtd);
if (ps3vram_notifier_wait(mtd, 200) < 0) {
dev_dbg(priv->dev, "%s:%d: notifier timeout\n", __func__,
__LINE__);
return -1;
}
return 0;
}
static int ps3vram_download(struct mtd_info *mtd, unsigned int src_offset,
unsigned int dst_offset, int len, int count)
{
struct ps3vram_priv *priv = mtd->priv;
ps3vram_begin_ring(priv, DOWNLOAD_SUBCH,
NV_MEMORY_TO_MEMORY_FORMAT_OFFSET_IN, 8);
ps3vram_out_ring(priv, src_offset);
ps3vram_out_ring(priv, XDR_IOIF + dst_offset);
ps3vram_out_ring(priv, len);
ps3vram_out_ring(priv, len);
ps3vram_out_ring(priv, len);
ps3vram_out_ring(priv, count);
ps3vram_out_ring(priv, (1 << 8) | 1);
ps3vram_out_ring(priv, 0);
ps3vram_notifier_reset(mtd);
ps3vram_begin_ring(priv, DOWNLOAD_SUBCH,
NV_MEMORY_TO_MEMORY_FORMAT_NOTIFY, 1);
ps3vram_out_ring(priv, 0);
ps3vram_begin_ring(priv, DOWNLOAD_SUBCH, 0x100, 1);
ps3vram_out_ring(priv, 0);
ps3vram_fire_ring(mtd);
if (ps3vram_notifier_wait(mtd, 200) < 0) {
dev_dbg(priv->dev, "%s:%d: notifier timeout\n", __func__,
__LINE__);
return -1;
}
return 0;
}
static void ps3vram_cache_evict(struct mtd_info *mtd, int entry)
{
struct ps3vram_priv *priv = mtd->priv;
struct ps3vram_cache *cache = &priv->cache;
if (cache->tags[entry].flags & CACHE_PAGE_DIRTY) {
dev_dbg(priv->dev, "%s:%d: flushing %d : 0x%08x\n", __func__,
__LINE__, entry, cache->tags[entry].address);
if (ps3vram_upload(mtd,
CACHE_OFFSET + entry * cache->page_size,
cache->tags[entry].address,
DMA_PAGE_SIZE,
cache->page_size / DMA_PAGE_SIZE) < 0) {
dev_dbg(priv->dev, "%s:%d: failed to upload from "
"0x%x to 0x%x size 0x%x\n", __func__, __LINE__,
entry * cache->page_size,
cache->tags[entry].address, cache->page_size);
}
cache->tags[entry].flags &= ~CACHE_PAGE_DIRTY;
}
}
static void ps3vram_cache_load(struct mtd_info *mtd, int entry,
unsigned int address)
{
struct ps3vram_priv *priv = mtd->priv;
struct ps3vram_cache *cache = &priv->cache;
dev_dbg(priv->dev, "%s:%d: fetching %d : 0x%08x\n", __func__, __LINE__,
entry, address);
if (ps3vram_download(mtd,
address,
CACHE_OFFSET + entry * cache->page_size,
DMA_PAGE_SIZE,
cache->page_size / DMA_PAGE_SIZE) < 0) {
dev_err(priv->dev, "%s:%d: failed to download from "
"0x%x to 0x%x size 0x%x\n", __func__, __LINE__, address,
entry * cache->page_size, cache->page_size);
}
cache->tags[entry].address = address;
cache->tags[entry].flags |= CACHE_PAGE_PRESENT;
}
static void ps3vram_cache_flush(struct mtd_info *mtd)
{
struct ps3vram_priv *priv = mtd->priv;
struct ps3vram_cache *cache = &priv->cache;
int i;
dev_dbg(priv->dev, "%s:%d: FLUSH\n", __func__, __LINE__);
for (i = 0; i < cache->page_count; i++) {
ps3vram_cache_evict(mtd, i);
cache->tags[i].flags = 0;
}
}
static unsigned int ps3vram_cache_match(struct mtd_info *mtd, loff_t address)
{
struct ps3vram_priv *priv = mtd->priv;
struct ps3vram_cache *cache = &priv->cache;
unsigned int base;
unsigned int offset;
int i;
static int counter;
offset = (unsigned int) (address & (cache->page_size - 1));
base = (unsigned int) (address - offset);
/* fully associative check */
for (i = 0; i < cache->page_count; i++) {
if ((cache->tags[i].flags & CACHE_PAGE_PRESENT) &&
cache->tags[i].address == base) {
dev_dbg(priv->dev, "%s:%d: found entry %d : 0x%08x\n",
__func__, __LINE__, i, cache->tags[i].address);
return i;
}
}
/* choose a random entry */
i = (jiffies + (counter++)) % cache->page_count;
dev_dbg(priv->dev, "%s:%d: using entry %d\n", __func__, __LINE__, i);
ps3vram_cache_evict(mtd, i);
ps3vram_cache_load(mtd, i, base);
return i;
}
static int ps3vram_cache_init(struct mtd_info *mtd)
{
struct ps3vram_priv *priv = mtd->priv;
priv->cache.page_count = CACHE_PAGE_COUNT;
priv->cache.page_size = CACHE_PAGE_SIZE;
priv->cache.tags = kzalloc(sizeof(struct ps3vram_tag) *
CACHE_PAGE_COUNT, GFP_KERNEL);
if (priv->cache.tags == NULL) {
dev_err(priv->dev, "%s:%d: could not allocate cache tags\n",
__func__, __LINE__);
return -ENOMEM;
}
dev_info(priv->dev, "created ram cache: %d entries, %d KiB each\n",
CACHE_PAGE_COUNT, CACHE_PAGE_SIZE / 1024);
return 0;
}
static void ps3vram_cache_cleanup(struct mtd_info *mtd)
{
struct ps3vram_priv *priv = mtd->priv;
ps3vram_cache_flush(mtd);
kfree(priv->cache.tags);
}
static int ps3vram_erase(struct mtd_info *mtd, struct erase_info *instr)
{
struct ps3vram_priv *priv = mtd->priv;
if (instr->addr + instr->len > mtd->size)
return -EINVAL;
mutex_lock(&priv->lock);
ps3vram_cache_flush(mtd);
/* Set bytes to 0xFF */
memset_io(priv->ddr_base + instr->addr, 0xFF, instr->len);
mutex_unlock(&priv->lock);
instr->state = MTD_ERASE_DONE;
mtd_erase_callback(instr);
return 0;
}
static int ps3vram_read(struct mtd_info *mtd, loff_t from, size_t len,
size_t *retlen, u_char *buf)
{
struct ps3vram_priv *priv = mtd->priv;
unsigned int cached, count;
dev_dbg(priv->dev, "%s:%d: from=0x%08x len=0x%zx\n", __func__, __LINE__,
(unsigned int)from, len);
if (from >= mtd->size)
return -EINVAL;
if (len > mtd->size - from)
len = mtd->size - from;
/* Copy from vram to buf */
count = len;
while (count) {
unsigned int offset, avail;
unsigned int entry;
offset = (unsigned int) (from & (priv->cache.page_size - 1));
avail = priv->cache.page_size - offset;
mutex_lock(&priv->lock);
entry = ps3vram_cache_match(mtd, from);
cached = CACHE_OFFSET + entry * priv->cache.page_size + offset;
dev_dbg(priv->dev, "%s:%d: from=%08x cached=%08x offset=%08x "
"avail=%08x count=%08x\n", __func__, __LINE__,
(unsigned int)from, cached, offset, avail, count);
if (avail > count)
avail = count;
memcpy(buf, priv->xdr_buf + cached, avail);
mutex_unlock(&priv->lock);
buf += avail;
count -= avail;
from += avail;
}
*retlen = len;
return 0;
}
static int ps3vram_write(struct mtd_info *mtd, loff_t to, size_t len,
size_t *retlen, const u_char *buf)
{
struct ps3vram_priv *priv = mtd->priv;
unsigned int cached, count;
if (to >= mtd->size)
return -EINVAL;
if (len > mtd->size - to)
len = mtd->size - to;
/* Copy from buf to vram */
count = len;
while (count) {
unsigned int offset, avail;
unsigned int entry;
offset = (unsigned int) (to & (priv->cache.page_size - 1));
avail = priv->cache.page_size - offset;
mutex_lock(&priv->lock);
entry = ps3vram_cache_match(mtd, to);
cached = CACHE_OFFSET + entry * priv->cache.page_size + offset;
dev_dbg(priv->dev, "%s:%d: to=%08x cached=%08x offset=%08x "
"avail=%08x count=%08x\n", __func__, __LINE__,
(unsigned int)to, cached, offset, avail, count);
if (avail > count)
avail = count;
memcpy(priv->xdr_buf + cached, buf, avail);
priv->cache.tags[entry].flags |= CACHE_PAGE_DIRTY;
mutex_unlock(&priv->lock);
buf += avail;
count -= avail;
to += avail;
}
*retlen = len;
return 0;
}
static int __devinit ps3vram_probe(struct ps3_system_bus_device *dev)
{
struct ps3vram_priv *priv;
int status;
u64 ddr_lpar;
u64 ctrl_lpar;
u64 info_lpar;
u64 reports_lpar;
u64 ddr_size;
u64 reports_size;
int ret = -ENOMEM;
char *rest;
ret = -EIO;
ps3vram_mtd.priv = kzalloc(sizeof(struct ps3vram_priv), GFP_KERNEL);
if (!ps3vram_mtd.priv)
goto out;
priv = ps3vram_mtd.priv;
mutex_init(&priv->lock);
priv->dev = &dev->core;
/* Allocate XDR buffer (1MiB aligned) */
priv->xdr_buf = (void *)__get_free_pages(GFP_KERNEL,
get_order(XDR_BUF_SIZE));
if (priv->xdr_buf == NULL) {
dev_dbg(&dev->core, "%s:%d: could not allocate XDR buffer\n",
__func__, __LINE__);
ret = -ENOMEM;
goto out_free_priv;
}
/* Put FIFO at begginning of XDR buffer */
priv->fifo_base = (u32 *) (priv->xdr_buf + FIFO_OFFSET);
priv->fifo_ptr = priv->fifo_base;
/* XXX: Need to open GPU, in case ps3fb or snd_ps3 aren't loaded */
if (ps3_open_hv_device(dev)) {
dev_err(&dev->core, "%s:%d: ps3_open_hv_device failed\n",
__func__, __LINE__);
ret = -EAGAIN;
goto out_close_gpu;
}
/* Request memory */
status = -1;
ddr_size = memparse(size, &rest);
if (*rest == '-')
ddr_size -= ps3fb_videomemory.size;
ddr_size = ALIGN(ddr_size, 1024*1024);
if (ddr_size <= 0) {
dev_err(&dev->core, "%s:%d: specified size is too small\n",
__func__, __LINE__);
ret = -EINVAL;
goto out_close_gpu;
}
while (ddr_size > 0) {
status = lv1_gpu_memory_allocate(ddr_size, 0, 0, 0, 0,
&priv->memory_handle,
&ddr_lpar);
if (!status)
break;
ddr_size -= 1024*1024;
}
if (status || ddr_size <= 0) {
dev_err(&dev->core, "%s:%d: lv1_gpu_memory_allocate failed\n",
__func__, __LINE__);
ret = -ENOMEM;
goto out_free_xdr_buf;
}
/* Request context */
status = lv1_gpu_context_allocate(priv->memory_handle,
0,
&priv->context_handle,
&ctrl_lpar,
&info_lpar,
&reports_lpar,
&reports_size);
if (status) {
dev_err(&dev->core, "%s:%d: lv1_gpu_context_allocate failed\n",
__func__, __LINE__);
ret = -ENOMEM;
goto out_free_memory;
}
/* Map XDR buffer to RSX */
status = lv1_gpu_context_iomap(priv->context_handle, XDR_IOIF,
ps3_mm_phys_to_lpar(__pa(priv->xdr_buf)),
XDR_BUF_SIZE, 0);
if (status) {
dev_err(&dev->core, "%s:%d: lv1_gpu_context_iomap failed\n",
__func__, __LINE__);
ret = -ENOMEM;
goto out_free_context;
}
priv->ddr_base = ioremap_flags(ddr_lpar, ddr_size, _PAGE_NO_CACHE);
if (!priv->ddr_base) {
dev_err(&dev->core, "%s:%d: ioremap failed\n", __func__,
__LINE__);
ret = -ENOMEM;
goto out_free_context;
}
priv->ctrl = ioremap(ctrl_lpar, 64 * 1024);
if (!priv->ctrl) {
dev_err(&dev->core, "%s:%d: ioremap failed\n", __func__,
__LINE__);
ret = -ENOMEM;
goto out_unmap_vram;
}
priv->reports = ioremap(reports_lpar, reports_size);
if (!priv->reports) {
dev_err(&dev->core, "%s:%d: ioremap failed\n", __func__,
__LINE__);
ret = -ENOMEM;
goto out_unmap_ctrl;
}
mutex_lock(&ps3_gpu_mutex);
ps3vram_init_ring(&ps3vram_mtd);
mutex_unlock(&ps3_gpu_mutex);
ps3vram_mtd.name = "ps3vram";
ps3vram_mtd.size = ddr_size;
ps3vram_mtd.flags = MTD_CAP_RAM;
ps3vram_mtd.erase = ps3vram_erase;
ps3vram_mtd.point = NULL;
ps3vram_mtd.unpoint = NULL;
ps3vram_mtd.read = ps3vram_read;
ps3vram_mtd.write = ps3vram_write;
ps3vram_mtd.owner = THIS_MODULE;
ps3vram_mtd.type = MTD_RAM;
ps3vram_mtd.erasesize = CACHE_PAGE_SIZE;
ps3vram_mtd.writesize = 1;
ps3vram_bind(&ps3vram_mtd);
mutex_lock(&ps3_gpu_mutex);
ret = ps3vram_wait_ring(&ps3vram_mtd, 100);
mutex_unlock(&ps3_gpu_mutex);
if (ret < 0) {
dev_err(&dev->core, "%s:%d: failed to initialize channels\n",
__func__, __LINE__);
ret = -ETIMEDOUT;
goto out_unmap_reports;
}
ps3vram_cache_init(&ps3vram_mtd);
if (add_mtd_device(&ps3vram_mtd)) {
dev_err(&dev->core, "%s:%d: add_mtd_device failed\n",
__func__, __LINE__);
ret = -EAGAIN;
goto out_cache_cleanup;
}
dev_info(&dev->core, "reserved %u MiB of gpu memory\n",
(unsigned int)(ddr_size / 1024 / 1024));
return 0;
out_cache_cleanup:
ps3vram_cache_cleanup(&ps3vram_mtd);
out_unmap_reports:
iounmap(priv->reports);
out_unmap_ctrl:
iounmap(priv->ctrl);
out_unmap_vram:
iounmap(priv->ddr_base);
out_free_context:
lv1_gpu_context_free(priv->context_handle);
out_free_memory:
lv1_gpu_memory_free(priv->memory_handle);
out_close_gpu:
ps3_close_hv_device(dev);
out_free_xdr_buf:
free_pages((unsigned long) priv->xdr_buf, get_order(XDR_BUF_SIZE));
out_free_priv:
kfree(ps3vram_mtd.priv);
ps3vram_mtd.priv = NULL;
out:
return ret;
}
static int ps3vram_shutdown(struct ps3_system_bus_device *dev)
{
struct ps3vram_priv *priv;
priv = ps3vram_mtd.priv;
del_mtd_device(&ps3vram_mtd);
ps3vram_cache_cleanup(&ps3vram_mtd);
iounmap(priv->reports);
iounmap(priv->ctrl);
iounmap(priv->ddr_base);
lv1_gpu_context_free(priv->context_handle);
lv1_gpu_memory_free(priv->memory_handle);
ps3_close_hv_device(dev);
free_pages((unsigned long) priv->xdr_buf, get_order(XDR_BUF_SIZE));
kfree(priv);
return 0;
}
static struct ps3_system_bus_driver ps3vram_driver = {
.match_id = PS3_MATCH_ID_GPU,
.match_sub_id = PS3_MATCH_SUB_ID_GPU_RAMDISK,
.core.name = DEVICE_NAME,
.core.owner = THIS_MODULE,
.probe = ps3vram_probe,
.remove = ps3vram_shutdown,
.shutdown = ps3vram_shutdown,
};
static int __init ps3vram_init(void)
{
return ps3_system_bus_driver_register(&ps3vram_driver);
}
static void __exit ps3vram_exit(void)
{
ps3_system_bus_driver_unregister(&ps3vram_driver);
}
module_init(ps3vram_init);
module_exit(ps3vram_exit);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Jim Paris <jim@jtan.com>");
MODULE_DESCRIPTION("MTD driver for PS3 video RAM");
MODULE_ALIAS(PS3_MODULE_ALIAS_GPU_RAMDISK);

View File

@ -73,8 +73,6 @@ source "drivers/staging/rt2860/Kconfig"
source "drivers/staging/rt2870/Kconfig"
source "drivers/staging/benet/Kconfig"
source "drivers/staging/comedi/Kconfig"
source "drivers/staging/asus_oled/Kconfig"

View File

@ -19,7 +19,6 @@ obj-$(CONFIG_AGNX) += agnx/
obj-$(CONFIG_OTUS) += otus/
obj-$(CONFIG_RT2860) += rt2860/
obj-$(CONFIG_RT2870) += rt2870/
obj-$(CONFIG_BENET) += benet/
obj-$(CONFIG_COMEDI) += comedi/
obj-$(CONFIG_ASUS_OLED) += asus_oled/
obj-$(CONFIG_PANEL) += panel/

View File

@ -1,7 +0,0 @@
config BENET
tristate "ServerEngines 10Gb NIC - BladeEngine"
depends on PCI && INET
select INET_LRO
help
This driver implements the NIC functionality for ServerEngines
10Gb network adapter BladeEngine (EC 3210).

View File

@ -1,6 +0,0 @@
SERVER ENGINES 10Gbe NIC - BLADE-ENGINE
P: Subbu Seetharaman
M: subbus@serverengines.com
L: netdev@vger.kernel.org
W: http://www.serverengines.com
S: Supported

View File

@ -1,14 +0,0 @@
#
# Makefile to build the network driver for ServerEngine's BladeEngine
#
obj-$(CONFIG_BENET) += benet.o
benet-y := be_init.o \
be_int.o \
be_netif.o \
be_ethtool.o \
funcobj.o \
cq.o \
eq.o \
mpu.o \
eth.o

View File

@ -1,6 +0,0 @@
TODO:
- remove wrappers around common iowrite functions
- full netdev audit of common problems/issues
Please send all patches and questions to Subbu Seetharaman
<subbus@serverengines.com> and Greg Kroah-Hartman <greg@kroah.com>

View File

@ -1,82 +0,0 @@
/*
* Copyright (C) 2005 - 2008 ServerEngines
* All rights reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License version 2
* as published by the Free Software Foundation. The full GNU General
* Public License is included in this distribution in the file called COPYING.
*
* Contact Information:
* linux-drivers@serverengines.com
*
* ServerEngines
* 209 N. Fair Oaks Ave
* Sunnyvale, CA 94085
*/
/*
* Autogenerated by srcgen version: 0127
*/
#ifndef __asyncmesg_amap_h__
#define __asyncmesg_amap_h__
#include "fwcmd_common.h"
/* --- ASYNC_EVENT_CODES --- */
#define ASYNC_EVENT_CODE_LINK_STATE (1)
#define ASYNC_EVENT_CODE_ISCSI (2)
/* --- ASYNC_LINK_STATES --- */
#define ASYNC_EVENT_LINK_DOWN (0) /* Link Down on a port */
#define ASYNC_EVENT_LINK_UP (1) /* Link Up on a port */
/*
* The last 4 bytes of the async events have this common format. It allows
* the driver to distinguish [link]MCC_CQ_ENTRY[/link] structs from
* asynchronous events. Both arrive on the same completion queue. This
* structure also contains the common fields used to decode the async event.
*/
struct BE_ASYNC_EVENT_TRAILER_AMAP {
u8 rsvd0[8]; /* DWORD 0 */
u8 event_code[8]; /* DWORD 0 */
u8 event_type[8]; /* DWORD 0 */
u8 rsvd1[6]; /* DWORD 0 */
u8 async_event; /* DWORD 0 */
u8 valid; /* DWORD 0 */
} __packed;
struct ASYNC_EVENT_TRAILER_AMAP {
u32 dw[1];
};
/*
* Applicable in Initiator, Target and NIC modes.
* A link state async event is seen by all device drivers as soon they
* create an MCC ring. Thereafter, anytime the link status changes the
* drivers will receive a link state async event. Notifications continue to
* be sent until a driver destroys its MCC ring. A link down event is
* reported when either port loses link. A link up event is reported
* when either port regains link. When BE's failover mechanism is enabled, a
* link down on the active port causes traffic to be diverted to the standby
* port by the BE's ARM firmware (assuming the standby port has link). In
* this case, the standy port assumes the active status. Note: when link is
* restored on the failed port, traffic continues on the currently active
* port. The ARM firmware does not attempt to 'fail back' traffic to
* the restored port.
*/
struct BE_ASYNC_EVENT_LINK_STATE_AMAP {
u8 port0_link_status[8];
u8 port1_link_status[8];
u8 active_port[8];
u8 rsvd0[8]; /* DWORD 0 */
u8 port0_duplex[8];
u8 port0_speed[8];
u8 port1_duplex[8];
u8 port1_speed[8];
u8 port0_fault[8];
u8 port1_fault[8];
u8 rsvd1[2][8]; /* DWORD 2 */
struct BE_ASYNC_EVENT_TRAILER_AMAP trailer;
} __packed;
struct ASYNC_EVENT_LINK_STATE_AMAP {
u32 dw[4];
};
#endif /* __asyncmesg_amap_h__ */

View File

@ -1,134 +0,0 @@
/*
* Copyright (C) 2005 - 2008 ServerEngines
* All rights reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License version 2
* as published by the Free Software Foundation. The full GNU General
* Public License is included in this distribution in the file called COPYING.
*
* Contact Information:
* linux-drivers@serverengines.com
*
* ServerEngines
* 209 N. Fair Oaks Ave
* Sunnyvale, CA 94085
*/
/*
* Autogenerated by srcgen version: 0127
*/
#ifndef __be_cm_amap_h__
#define __be_cm_amap_h__
#include "be_common.h"
#include "etx_context.h"
#include "mpu_context.h"
/*
* --- CEV_WATERMARK_ENUM ---
* CQ/EQ Watermark Encodings. Encoded as number of free entries in
* Queue when Watermark is reached.
*/
#define CEV_WMARK_0 (0) /* Watermark when Queue full */
#define CEV_WMARK_16 (1) /* Watermark at 16 free entries */
#define CEV_WMARK_32 (2) /* Watermark at 32 free entries */
#define CEV_WMARK_48 (3) /* Watermark at 48 free entries */
#define CEV_WMARK_64 (4) /* Watermark at 64 free entries */
#define CEV_WMARK_80 (5) /* Watermark at 80 free entries */
#define CEV_WMARK_96 (6) /* Watermark at 96 free entries */
#define CEV_WMARK_112 (7) /* Watermark at 112 free entries */
#define CEV_WMARK_128 (8) /* Watermark at 128 free entries */
#define CEV_WMARK_144 (9) /* Watermark at 144 free entries */
#define CEV_WMARK_160 (10) /* Watermark at 160 free entries */
#define CEV_WMARK_176 (11) /* Watermark at 176 free entries */
#define CEV_WMARK_192 (12) /* Watermark at 192 free entries */
#define CEV_WMARK_208 (13) /* Watermark at 208 free entries */
#define CEV_WMARK_224 (14) /* Watermark at 224 free entries */
#define CEV_WMARK_240 (15) /* Watermark at 240 free entries */
/*
* --- CQ_CNT_ENUM ---
* Completion Queue Count Encodings.
*/
#define CEV_CQ_CNT_256 (0) /* CQ has 256 entries */
#define CEV_CQ_CNT_512 (1) /* CQ has 512 entries */
#define CEV_CQ_CNT_1024 (2) /* CQ has 1024 entries */
/*
* --- EQ_CNT_ENUM ---
* Event Queue Count Encodings.
*/
#define CEV_EQ_CNT_256 (0) /* EQ has 256 entries (16-byte EQEs only) */
#define CEV_EQ_CNT_512 (1) /* EQ has 512 entries (16-byte EQEs only) */
#define CEV_EQ_CNT_1024 (2) /* EQ has 1024 entries (4-byte or */
/* 16-byte EQEs only) */
#define CEV_EQ_CNT_2048 (3) /* EQ has 2048 entries (4-byte or */
/* 16-byte EQEs only) */
#define CEV_EQ_CNT_4096 (4) /* EQ has 4096 entries (4-byte EQEs only) */
/*
* --- EQ_SIZE_ENUM ---
* Event Queue Entry Size Encoding.
*/
#define CEV_EQ_SIZE_4 (0) /* EQE is 4 bytes */
#define CEV_EQ_SIZE_16 (1) /* EQE is 16 bytes */
/*
* Completion Queue Context Table Entry. Contains the state of a CQ.
* Located in RAM within the CEV block.
*/
struct BE_CQ_CONTEXT_AMAP {
u8 Cidx[11]; /* DWORD 0 */
u8 Watermark[4]; /* DWORD 0 */
u8 NoDelay; /* DWORD 0 */
u8 EPIdx[11]; /* DWORD 0 */
u8 Count[2]; /* DWORD 0 */
u8 valid; /* DWORD 0 */
u8 SolEvent; /* DWORD 0 */
u8 Eventable; /* DWORD 0 */
u8 Pidx[11]; /* DWORD 1 */
u8 PD[10]; /* DWORD 1 */
u8 EQID[7]; /* DWORD 1 */
u8 Func; /* DWORD 1 */
u8 WME; /* DWORD 1 */
u8 Stalled; /* DWORD 1 */
u8 Armed; /* DWORD 1 */
} __packed;
struct CQ_CONTEXT_AMAP {
u32 dw[2];
};
/*
* Event Queue Context Table Entry. Contains the state of an EQ.
* Located in RAM in the CEV block.
*/
struct BE_EQ_CONTEXT_AMAP {
u8 Cidx[13]; /* DWORD 0 */
u8 rsvd0[2]; /* DWORD 0 */
u8 Func; /* DWORD 0 */
u8 EPIdx[13]; /* DWORD 0 */
u8 valid; /* DWORD 0 */
u8 rsvd1; /* DWORD 0 */
u8 Size; /* DWORD 0 */
u8 Pidx[13]; /* DWORD 1 */
u8 rsvd2[3]; /* DWORD 1 */
u8 PD[10]; /* DWORD 1 */
u8 Count[3]; /* DWORD 1 */
u8 SolEvent; /* DWORD 1 */
u8 Stalled; /* DWORD 1 */
u8 Armed; /* DWORD 1 */
u8 Watermark[4]; /* DWORD 2 */
u8 WME; /* DWORD 2 */
u8 rsvd3[3]; /* DWORD 2 */
u8 EventVect[6]; /* DWORD 2 */
u8 rsvd4[2]; /* DWORD 2 */
u8 Delay[8]; /* DWORD 2 */
u8 rsvd5[6]; /* DWORD 2 */
u8 TMR; /* DWORD 2 */
u8 rsvd6; /* DWORD 2 */
u8 rsvd7[32]; /* DWORD 3 */
} __packed;
struct EQ_CONTEXT_AMAP {
u32 dw[4];
};
#endif /* __be_cm_amap_h__ */

View File

@ -1,53 +0,0 @@
/*
* Copyright (C) 2005 - 2008 ServerEngines
* All rights reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License version 2
* as published by the Free Software Foundation. The full GNU General
* Public License is included in this distribution in the file called COPYING.
*
* Contact Information:
* linux-drivers@serverengines.com
*
* ServerEngines
* 209 N. Fair Oaks Ave
* Sunnyvale, CA 94085
*/
/*
* Autogenerated by srcgen version: 0127
*/
#ifndef __be_common_amap_h__
#define __be_common_amap_h__
/* Physical Address. */
struct BE_PHYS_ADDR_AMAP {
u8 lo[32]; /* DWORD 0 */
u8 hi[32]; /* DWORD 1 */
} __packed;
struct PHYS_ADDR_AMAP {
u32 dw[2];
};
/* Virtual Address. */
struct BE_VIRT_ADDR_AMAP {
u8 lo[32]; /* DWORD 0 */
u8 hi[32]; /* DWORD 1 */
} __packed;
struct VIRT_ADDR_AMAP {
u32 dw[2];
};
/* Scatter gather element. */
struct BE_SGE_AMAP {
u8 addr_hi[32]; /* DWORD 0 */
u8 addr_lo[32]; /* DWORD 1 */
u8 rsvd0[32]; /* DWORD 2 */
u8 len[16]; /* DWORD 3 */
u8 rsvd1[16]; /* DWORD 3 */
} __packed;
struct SGE_AMAP {
u32 dw[4];
};
#endif /* __be_common_amap_h__ */

View File

@ -1,348 +0,0 @@
/*
* Copyright (C) 2005 - 2008 ServerEngines
* All rights reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License version 2
* as published by the Free Software Foundation. The full GNU General
* Public License is included in this distribution in the file called COPYING.
*
* Contact Information:
* linux-drivers@serverengines.com
*
* ServerEngines
* 209 N. Fair Oaks Ave
* Sunnyvale, CA 94085
*/
/*
* be_ethtool.c
*
* This file contains various functions that ethtool can use
* to talk to the driver and the BE H/W.
*/
#include "benet.h"
#include <linux/ethtool.h>
static const char benet_gstrings_stats[][ETH_GSTRING_LEN] = {
/* net_device_stats */
"rx_packets",
"tx_packets",
"rx_bytes",
"tx_bytes",
"rx_errors",
"tx_errors",
"rx_dropped",
"tx_dropped",
"multicast",
"collisions",
"rx_length_errors",
"rx_over_errors",
"rx_crc_errors",
"rx_frame_errors",
"rx_fifo_errors",
"rx_missed_errors",
"tx_aborted_errors",
"tx_carrier_errors",
"tx_fifo_errors",
"tx_heartbeat_errors",
"tx_window_errors",
"rx_compressed",
"tc_compressed",
/* BE driver Stats */
"bes_tx_reqs",
"bes_tx_fails",
"bes_fwd_reqs",
"bes_tx_wrbs",
"bes_interrupts",
"bes_events",
"bes_tx_events",
"bes_rx_events",
"bes_tx_compl",
"bes_rx_compl",
"bes_ethrx_post_fail",
"bes_802_3_dropped_frames",
"bes_802_3_malformed_frames",
"bes_rx_misc_pkts",
"bes_eth_tx_rate",
"bes_eth_rx_rate",
"Num Packets collected",
"Num Times Flushed",
};
#define NET_DEV_STATS_LEN \
(sizeof(struct net_device_stats)/sizeof(unsigned long))
#define BENET_STATS_LEN ARRAY_SIZE(benet_gstrings_stats)
static void
be_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo)
{
struct be_net_object *pnob = netdev_priv(netdev);
struct be_adapter *adapter = pnob->adapter;
strncpy(drvinfo->driver, be_driver_name, 32);
strncpy(drvinfo->version, be_drvr_ver, 32);
strncpy(drvinfo->fw_version, be_fw_ver, 32);
strcpy(drvinfo->bus_info, pci_name(adapter->pdev));
drvinfo->testinfo_len = 0;
drvinfo->regdump_len = 0;
drvinfo->eedump_len = 0;
}
static int
be_get_coalesce(struct net_device *netdev, struct ethtool_coalesce *coalesce)
{
struct be_net_object *pnob = netdev_priv(netdev);
struct be_adapter *adapter = pnob->adapter;
coalesce->rx_max_coalesced_frames = adapter->max_rx_coal;
coalesce->rx_coalesce_usecs = adapter->cur_eqd;
coalesce->rx_coalesce_usecs_high = adapter->max_eqd;
coalesce->rx_coalesce_usecs_low = adapter->min_eqd;
coalesce->tx_coalesce_usecs = adapter->cur_eqd;
coalesce->tx_coalesce_usecs_high = adapter->max_eqd;
coalesce->tx_coalesce_usecs_low = adapter->min_eqd;
coalesce->use_adaptive_rx_coalesce = adapter->enable_aic;
coalesce->use_adaptive_tx_coalesce = adapter->enable_aic;
return 0;
}
/*
* This routine is used to set interrup coalescing delay *as well as*
* the number of pkts to coalesce for LRO.
*/
static int
be_set_coalesce(struct net_device *netdev, struct ethtool_coalesce *coalesce)
{
struct be_net_object *pnob = netdev_priv(netdev);
struct be_adapter *adapter = pnob->adapter;
struct be_eq_object *eq_objectp;
u32 max, min, cur;
int status;
adapter->max_rx_coal = coalesce->rx_max_coalesced_frames;
if (adapter->max_rx_coal >= BE_LRO_MAX_PKTS)
adapter->max_rx_coal = BE_LRO_MAX_PKTS;
if (adapter->enable_aic == 0 &&
coalesce->use_adaptive_rx_coalesce == 1) {
/* if AIC is being turned on now, start with an EQD of 0 */
adapter->cur_eqd = 0;
}
adapter->enable_aic = coalesce->use_adaptive_rx_coalesce;
/* round off to nearest multiple of 8 */
max = (((coalesce->rx_coalesce_usecs_high + 4) >> 3) << 3);
min = (((coalesce->rx_coalesce_usecs_low + 4) >> 3) << 3);
cur = (((coalesce->rx_coalesce_usecs + 4) >> 3) << 3);
if (adapter->enable_aic) {
/* accept low and high if AIC is enabled */
if (max > MAX_EQD)
max = MAX_EQD;
if (min > max)
min = max;
adapter->max_eqd = max;
adapter->min_eqd = min;
if (adapter->cur_eqd > max)
adapter->cur_eqd = max;
if (adapter->cur_eqd < min)
adapter->cur_eqd = min;
} else {
/* accept specified coalesce_usecs only if AIC is disabled */
if (cur > MAX_EQD)
cur = MAX_EQD;
eq_objectp = &pnob->event_q_obj;
status =
be_eq_modify_delay(&pnob->fn_obj, 1, &eq_objectp, &cur,
NULL, NULL, NULL);
if (status == BE_SUCCESS)
adapter->cur_eqd = cur;
}
return 0;
}
static u32 be_get_rx_csum(struct net_device *netdev)
{
struct be_net_object *pnob = netdev_priv(netdev);
struct be_adapter *adapter = pnob->adapter;
return adapter->rx_csum;
}
static int be_set_rx_csum(struct net_device *netdev, uint32_t data)
{
struct be_net_object *pnob = netdev_priv(netdev);
struct be_adapter *adapter = pnob->adapter;
if (data)
adapter->rx_csum = 1;
else
adapter->rx_csum = 0;
return 0;
}
static void
be_get_strings(struct net_device *netdev, uint32_t stringset, uint8_t *data)
{
switch (stringset) {
case ETH_SS_STATS:
memcpy(data, *benet_gstrings_stats,
sizeof(benet_gstrings_stats));
break;
}
}
static int be_get_stats_count(struct net_device *netdev)
{
return BENET_STATS_LEN;
}
static void
be_get_ethtool_stats(struct net_device *netdev,
struct ethtool_stats *stats, uint64_t *data)
{
struct be_net_object *pnob = netdev_priv(netdev);
struct be_adapter *adapter = pnob->adapter;
int i;
benet_get_stats(netdev);
for (i = 0; i <= NET_DEV_STATS_LEN; i++)
data[i] = ((unsigned long *)&adapter->benet_stats)[i];
data[i] = adapter->be_stat.bes_tx_reqs;
data[i++] = adapter->be_stat.bes_tx_fails;
data[i++] = adapter->be_stat.bes_fwd_reqs;
data[i++] = adapter->be_stat.bes_tx_wrbs;
data[i++] = adapter->be_stat.bes_ints;
data[i++] = adapter->be_stat.bes_events;
data[i++] = adapter->be_stat.bes_tx_events;
data[i++] = adapter->be_stat.bes_rx_events;
data[i++] = adapter->be_stat.bes_tx_compl;
data[i++] = adapter->be_stat.bes_rx_compl;
data[i++] = adapter->be_stat.bes_ethrx_post_fail;
data[i++] = adapter->be_stat.bes_802_3_dropped_frames;
data[i++] = adapter->be_stat.bes_802_3_malformed_frames;
data[i++] = adapter->be_stat.bes_rx_misc_pkts;
data[i++] = adapter->be_stat.bes_eth_tx_rate;
data[i++] = adapter->be_stat.bes_eth_rx_rate;
data[i++] = adapter->be_stat.bes_rx_coal;
data[i++] = adapter->be_stat.bes_rx_flush;
}
static int be_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
{
ecmd->speed = SPEED_10000;
ecmd->duplex = DUPLEX_FULL;
ecmd->autoneg = AUTONEG_DISABLE;
return 0;
}
/* Get the Ring parameters from the pnob */
static void
be_get_ringparam(struct net_device *netdev, struct ethtool_ringparam *ring)
{
struct be_net_object *pnob = netdev_priv(netdev);
/* Pre Set Maxims */
ring->rx_max_pending = pnob->rx_q_len;
ring->rx_mini_max_pending = ring->rx_mini_max_pending;
ring->rx_jumbo_max_pending = ring->rx_jumbo_max_pending;
ring->tx_max_pending = pnob->tx_q_len;
/* Current hardware Settings */
ring->rx_pending = atomic_read(&pnob->rx_q_posted);
ring->rx_mini_pending = ring->rx_mini_pending;
ring->rx_jumbo_pending = ring->rx_jumbo_pending;
ring->tx_pending = atomic_read(&pnob->tx_q_used);
}
static void
be_get_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *ecmd)
{
struct be_net_object *pnob = netdev_priv(netdev);
bool rxfc, txfc;
int status;
status = be_eth_get_flow_control(&pnob->fn_obj, &txfc, &rxfc);
if (status != BE_SUCCESS) {
dev_info(&netdev->dev, "Unable to get pause frame settings\n");
/* return defaults */
ecmd->rx_pause = 1;
ecmd->tx_pause = 0;
ecmd->autoneg = AUTONEG_ENABLE;
return;
}
if (txfc == true)
ecmd->tx_pause = 1;
else
ecmd->tx_pause = 0;
if (rxfc == true)
ecmd->rx_pause = 1;
else
ecmd->rx_pause = 0;
ecmd->autoneg = AUTONEG_ENABLE;
}
static int
be_set_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *ecmd)
{
struct be_net_object *pnob = netdev_priv(netdev);
bool txfc, rxfc;
int status;
if (ecmd->autoneg != AUTONEG_ENABLE)
return -EINVAL;
if (ecmd->tx_pause)
txfc = true;
else
txfc = false;
if (ecmd->rx_pause)
rxfc = true;
else
rxfc = false;
status = be_eth_set_flow_control(&pnob->fn_obj, txfc, rxfc);
if (status != BE_SUCCESS) {
dev_info(&netdev->dev, "Unable to set pause frame settings\n");
return -1;
}
return 0;
}
struct ethtool_ops be_ethtool_ops = {
.get_settings = be_get_settings,
.get_drvinfo = be_get_drvinfo,
.get_link = ethtool_op_get_link,
.get_coalesce = be_get_coalesce,
.set_coalesce = be_set_coalesce,
.get_ringparam = be_get_ringparam,
.get_pauseparam = be_get_pauseparam,
.set_pauseparam = be_set_pauseparam,
.get_rx_csum = be_get_rx_csum,
.set_rx_csum = be_set_rx_csum,
.get_tx_csum = ethtool_op_get_tx_csum,
.set_tx_csum = ethtool_op_set_tx_csum,
.get_sg = ethtool_op_get_sg,
.set_sg = ethtool_op_set_sg,
.get_tso = ethtool_op_get_tso,
.set_tso = ethtool_op_set_tso,
.get_strings = be_get_strings,
.get_stats_count = be_get_stats_count,
.get_ethtool_stats = be_get_ethtool_stats,
};

File diff suppressed because it is too large Load Diff

View File

@ -1,863 +0,0 @@
/*
* Copyright (C) 2005 - 2008 ServerEngines
* All rights reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License version 2
* as published by the Free Software Foundation. The full GNU General
* Public License is included in this distribution in the file called COPYING.
*
* Contact Information:
* linux-drivers@serverengines.com
*
* ServerEngines
* 209 N. Fair Oaks Ave
* Sunnyvale, CA 94085
*/
#include <linux/if_vlan.h>
#include <linux/inet_lro.h>
#include "benet.h"
/* number of bytes of RX frame that are copied to skb->data */
#define BE_HDR_LEN 64
#define NETIF_RX(skb) netif_receive_skb(skb)
#define VLAN_ACCEL_RX(skb, pnob, vt) \
vlan_hwaccel_rx(skb, pnob->vlan_grp, vt)
/*
This function notifies BladeEngine of the number of completion
entries processed from the specified completion queue by writing
the number of popped entries to the door bell.
pnob - Pointer to the NetObject structure
n - Number of completion entries processed
cq_id - Queue ID of the completion queue for which notification
is being done.
re_arm - 1 - rearm the completion ring to generate an event.
- 0 - dont rearm the completion ring to generate an event
*/
void be_notify_cmpl(struct be_net_object *pnob, int n, int cq_id, int re_arm)
{
struct CQ_DB_AMAP cqdb;
cqdb.dw[0] = 0;
AMAP_SET_BITS_PTR(CQ_DB, qid, &cqdb, cq_id);
AMAP_SET_BITS_PTR(CQ_DB, rearm, &cqdb, re_arm);
AMAP_SET_BITS_PTR(CQ_DB, num_popped, &cqdb, n);
PD_WRITE(&pnob->fn_obj, cq_db, cqdb.dw[0]);
}
/*
* adds additional receive frags indicated by BE starting from given
* frag index (fi) to specified skb's frag list
*/
static void
add_skb_frags(struct be_net_object *pnob, struct sk_buff *skb,
u32 nresid, u32 fi)
{
struct be_adapter *adapter = pnob->adapter;
u32 sk_frag_idx, n;
struct be_rx_page_info *rx_page_info;
u32 frag_sz = pnob->rx_buf_size;
sk_frag_idx = skb_shinfo(skb)->nr_frags;
while (nresid) {
index_inc(&fi, pnob->rx_q_len);
rx_page_info = (struct be_rx_page_info *)pnob->rx_ctxt[fi];
pnob->rx_ctxt[fi] = NULL;
if ((rx_page_info->page_offset) ||
(pnob->rx_pg_shared == false)) {
pci_unmap_page(adapter->pdev,
pci_unmap_addr(rx_page_info, bus),
frag_sz, PCI_DMA_FROMDEVICE);
}
n = min(nresid, frag_sz);
skb_shinfo(skb)->frags[sk_frag_idx].page = rx_page_info->page;
skb_shinfo(skb)->frags[sk_frag_idx].page_offset
= rx_page_info->page_offset;
skb_shinfo(skb)->frags[sk_frag_idx].size = n;
sk_frag_idx++;
skb->len += n;
skb->data_len += n;
skb_shinfo(skb)->nr_frags++;
nresid -= n;
memset(rx_page_info, 0, sizeof(struct be_rx_page_info));
atomic_dec(&pnob->rx_q_posted);
}
}
/*
* This function processes incoming nic packets over various Rx queues.
* This function takes the adapter, the current Rx status descriptor
* entry and the Rx completion queue ID as argument.
*/
static inline int process_nic_rx_completion(struct be_net_object *pnob,
struct ETH_RX_COMPL_AMAP *rxcp)
{
struct be_adapter *adapter = pnob->adapter;
struct sk_buff *skb;
int udpcksm, tcpcksm;
int n;
u32 nresid, fi;
u32 frag_sz = pnob->rx_buf_size;
u8 *va;
struct be_rx_page_info *rx_page_info;
u32 numfrags, vtp, vtm, vlan_tag, pktsize;
fi = AMAP_GET_BITS_PTR(ETH_RX_COMPL, fragndx, rxcp);
BUG_ON(fi >= (int)pnob->rx_q_len);
BUG_ON(fi < 0);
rx_page_info = (struct be_rx_page_info *)pnob->rx_ctxt[fi];
BUG_ON(!rx_page_info->page);
pnob->rx_ctxt[fi] = NULL;
/*
* If one page is used per fragment or if this is the second half of
* of the page, unmap the page here
*/
if ((rx_page_info->page_offset) || (pnob->rx_pg_shared == false)) {
pci_unmap_page(adapter->pdev,
pci_unmap_addr(rx_page_info, bus), frag_sz,
PCI_DMA_FROMDEVICE);
}
atomic_dec(&pnob->rx_q_posted);
udpcksm = AMAP_GET_BITS_PTR(ETH_RX_COMPL, udpcksm, rxcp);
tcpcksm = AMAP_GET_BITS_PTR(ETH_RX_COMPL, tcpcksm, rxcp);
pktsize = AMAP_GET_BITS_PTR(ETH_RX_COMPL, pktsize, rxcp);
/*
* get rid of RX flush completions first.
*/
if ((tcpcksm) && (udpcksm) && (pktsize == 32)) {
put_page(rx_page_info->page);
memset(rx_page_info, 0, sizeof(struct be_rx_page_info));
return 0;
}
skb = netdev_alloc_skb(pnob->netdev, BE_HDR_LEN + NET_IP_ALIGN);
if (skb == NULL) {
dev_info(&pnob->netdev->dev, "alloc_skb() failed\n");
put_page(rx_page_info->page);
memset(rx_page_info, 0, sizeof(struct be_rx_page_info));
goto free_frags;
}
skb_reserve(skb, NET_IP_ALIGN);
skb->dev = pnob->netdev;
n = min(pktsize, frag_sz);
va = page_address(rx_page_info->page) + rx_page_info->page_offset;
prefetch(va);
skb->len = n;
skb->data_len = n;
if (n <= BE_HDR_LEN) {
memcpy(skb->data, va, n);
put_page(rx_page_info->page);
skb->data_len -= n;
skb->tail += n;
} else {
/* Setup the SKB with page buffer information */
skb_shinfo(skb)->frags[0].page = rx_page_info->page;
skb_shinfo(skb)->nr_frags++;
/* Copy the header into the skb_data */
memcpy(skb->data, va, BE_HDR_LEN);
skb_shinfo(skb)->frags[0].page_offset =
rx_page_info->page_offset + BE_HDR_LEN;
skb_shinfo(skb)->frags[0].size = n - BE_HDR_LEN;
skb->data_len -= BE_HDR_LEN;
skb->tail += BE_HDR_LEN;
}
memset(rx_page_info, 0, sizeof(struct be_rx_page_info));
nresid = pktsize - n;
skb->protocol = eth_type_trans(skb, pnob->netdev);
if ((tcpcksm || udpcksm) && adapter->rx_csum)
skb->ip_summed = CHECKSUM_UNNECESSARY;
else
skb->ip_summed = CHECKSUM_NONE;
/*
* if we have more bytes left, the frame has been
* given to us in multiple fragments. This happens
* with Jumbo frames. Add the remaining fragments to
* skb->frags[] array.
*/
if (nresid)
add_skb_frags(pnob, skb, nresid, fi);
/* update the the true size of the skb. */
skb->truesize = skb->len + sizeof(struct sk_buff);
/*
* If a 802.3 frame or 802.2 LLC frame
* (i.e) contains length field in MAC Hdr
* and frame len is greater than 64 bytes
*/
if (((skb->protocol == ntohs(ETH_P_802_2)) ||
(skb->protocol == ntohs(ETH_P_802_3)))
&& (pktsize > BE_HDR_LEN)) {
/*
* If the length given in Mac Hdr is less than frame size
* Erraneous frame, Drop it
*/
if ((ntohs(*(u16 *) (va + 12)) + ETH_HLEN) < pktsize) {
/* Increment Non Ether type II frames dropped */
adapter->be_stat.bes_802_3_dropped_frames++;
kfree_skb(skb);
return 0;
}
/*
* else if the length given in Mac Hdr is greater than
* frame size, should not be seeing this sort of frames
* dump the pkt and pass to stack
*/
else if ((ntohs(*(u16 *) (va + 12)) + ETH_HLEN) > pktsize) {
/* Increment Non Ether type II frames malformed */
adapter->be_stat.bes_802_3_malformed_frames++;
}
}
vtp = AMAP_GET_BITS_PTR(ETH_RX_COMPL, vtp, rxcp);
vtm = AMAP_GET_BITS_PTR(ETH_RX_COMPL, vtm, rxcp);
if (vtp && vtm) {
/* Vlan tag present in pkt and BE found
* that the tag matched an entry in VLAN table
*/
if (!pnob->vlan_grp || pnob->num_vlans == 0) {
/* But we have no VLANs configured.
* This should never happen. Drop the packet.
*/
dev_info(&pnob->netdev->dev,
"BladeEngine: Unexpected vlan tagged packet\n");
kfree_skb(skb);
return 0;
}
/* pass the VLAN packet to stack */
vlan_tag = AMAP_GET_BITS_PTR(ETH_RX_COMPL, vlan_tag, rxcp);
VLAN_ACCEL_RX(skb, pnob, be16_to_cpu(vlan_tag));
} else {
NETIF_RX(skb);
}
return 0;
free_frags:
/* free all frags associated with the current rxcp */
numfrags = AMAP_GET_BITS_PTR(ETH_RX_COMPL, numfrags, rxcp);
while (numfrags-- > 1) {
index_inc(&fi, pnob->rx_q_len);
rx_page_info = (struct be_rx_page_info *)
pnob->rx_ctxt[fi];
pnob->rx_ctxt[fi] = (void *)NULL;
if (rx_page_info->page_offset || !pnob->rx_pg_shared) {
pci_unmap_page(adapter->pdev,
pci_unmap_addr(rx_page_info, bus),
frag_sz, PCI_DMA_FROMDEVICE);
}
put_page(rx_page_info->page);
memset(rx_page_info, 0, sizeof(struct be_rx_page_info));
atomic_dec(&pnob->rx_q_posted);
}
return -ENOMEM;
}
static void process_nic_rx_completion_lro(struct be_net_object *pnob,
struct ETH_RX_COMPL_AMAP *rxcp)
{
struct be_adapter *adapter = pnob->adapter;
struct skb_frag_struct rx_frags[BE_MAX_FRAGS_PER_FRAME];
unsigned int udpcksm, tcpcksm;
u32 numfrags, vlanf, vtm, vlan_tag, nresid;
u16 vlant;
unsigned int fi, idx, n;
struct be_rx_page_info *rx_page_info;
u32 frag_sz = pnob->rx_buf_size, pktsize;
bool rx_coal = (adapter->max_rx_coal <= 1) ? 0 : 1;
u8 err, *va;
__wsum csum = 0;
if (AMAP_GET_BITS_PTR(ETH_RX_COMPL, ipsec, rxcp)) {
/* Drop the pkt and move to the next completion. */
adapter->be_stat.bes_rx_misc_pkts++;
return;
}
err = AMAP_GET_BITS_PTR(ETH_RX_COMPL, err, rxcp);
if (err || !rx_coal) {
/* We won't coalesce Rx pkts if the err bit set.
* take the path of normal completion processing */
process_nic_rx_completion(pnob, rxcp);
return;
}
fi = AMAP_GET_BITS_PTR(ETH_RX_COMPL, fragndx, rxcp);
BUG_ON(fi >= (int)pnob->rx_q_len);
BUG_ON(fi < 0);
rx_page_info = (struct be_rx_page_info *)pnob->rx_ctxt[fi];
BUG_ON(!rx_page_info->page);
pnob->rx_ctxt[fi] = (void *)NULL;
/* If one page is used per fragment or if this is the
* second half of the page, unmap the page here
*/
if (rx_page_info->page_offset || !pnob->rx_pg_shared) {
pci_unmap_page(adapter->pdev,
pci_unmap_addr(rx_page_info, bus),
frag_sz, PCI_DMA_FROMDEVICE);
}
numfrags = AMAP_GET_BITS_PTR(ETH_RX_COMPL, numfrags, rxcp);
udpcksm = AMAP_GET_BITS_PTR(ETH_RX_COMPL, udpcksm, rxcp);
tcpcksm = AMAP_GET_BITS_PTR(ETH_RX_COMPL, tcpcksm, rxcp);
vlan_tag = AMAP_GET_BITS_PTR(ETH_RX_COMPL, vlan_tag, rxcp);
vlant = be16_to_cpu(vlan_tag);
vlanf = AMAP_GET_BITS_PTR(ETH_RX_COMPL, vtp, rxcp);
vtm = AMAP_GET_BITS_PTR(ETH_RX_COMPL, vtm, rxcp);
pktsize = AMAP_GET_BITS_PTR(ETH_RX_COMPL, pktsize, rxcp);
atomic_dec(&pnob->rx_q_posted);
if (tcpcksm && udpcksm && pktsize == 32) {
/* flush completion entries */
put_page(rx_page_info->page);
memset(rx_page_info, 0, sizeof(struct be_rx_page_info));
return;
}
/* Only one of udpcksum and tcpcksum can be set */
BUG_ON(udpcksm && tcpcksm);
/* jumbo frames could come in multiple fragments */
BUG_ON(numfrags != ((pktsize + (frag_sz - 1)) / frag_sz));
n = min(pktsize, frag_sz);
nresid = pktsize - n; /* will be useful for jumbo pkts */
idx = 0;
va = page_address(rx_page_info->page) + rx_page_info->page_offset;
prefetch(va);
rx_frags[idx].page = rx_page_info->page;
rx_frags[idx].page_offset = (rx_page_info->page_offset);
rx_frags[idx].size = n;
memset(rx_page_info, 0, sizeof(struct be_rx_page_info));
/* If we got multiple fragments, we have more data. */
while (nresid) {
idx++;
index_inc(&fi, pnob->rx_q_len);
rx_page_info = (struct be_rx_page_info *)pnob->rx_ctxt[fi];
pnob->rx_ctxt[fi] = (void *)NULL;
if (rx_page_info->page_offset || !pnob->rx_pg_shared) {
pci_unmap_page(adapter->pdev,
pci_unmap_addr(rx_page_info, bus),
frag_sz, PCI_DMA_FROMDEVICE);
}
n = min(nresid, frag_sz);
rx_frags[idx].page = rx_page_info->page;
rx_frags[idx].page_offset = (rx_page_info->page_offset);
rx_frags[idx].size = n;
nresid -= n;
memset(rx_page_info, 0, sizeof(struct be_rx_page_info));
atomic_dec(&pnob->rx_q_posted);
}
if (likely(!(vlanf && vtm))) {
lro_receive_frags(&pnob->lro_mgr, rx_frags,
pktsize, pktsize,
(void *)(unsigned long)csum, csum);
} else {
/* Vlan tag present in pkt and BE found
* that the tag matched an entry in VLAN table
*/
if (unlikely(!pnob->vlan_grp || pnob->num_vlans == 0)) {
/* But we have no VLANs configured.
* This should never happen. Drop the packet.
*/
dev_info(&pnob->netdev->dev,
"BladeEngine: Unexpected vlan tagged packet\n");
return;
}
/* pass the VLAN packet to stack */
lro_vlan_hwaccel_receive_frags(&pnob->lro_mgr,
rx_frags, pktsize, pktsize,
pnob->vlan_grp, vlant,
(void *)(unsigned long)csum,
csum);
}
adapter->be_stat.bes_rx_coal++;
}
struct ETH_RX_COMPL_AMAP *be_get_rx_cmpl(struct be_net_object *pnob)
{
struct ETH_RX_COMPL_AMAP *rxcp = &pnob->rx_cq[pnob->rx_cq_tl];
u32 valid, ct;
valid = AMAP_GET_BITS_PTR(ETH_RX_COMPL, valid, rxcp);
if (valid == 0)
return NULL;
ct = AMAP_GET_BITS_PTR(ETH_RX_COMPL, ct, rxcp);
if (ct != 0) {
/* Invalid chute #. treat as error */
AMAP_SET_BITS_PTR(ETH_RX_COMPL, err, rxcp, 1);
}
be_adv_rxcq_tl(pnob);
AMAP_SET_BITS_PTR(ETH_RX_COMPL, valid, rxcp, 0);
return rxcp;
}
static void update_rx_rate(struct be_adapter *adapter)
{
/* update the rate once in two seconds */
if ((jiffies - adapter->eth_rx_jiffies) > 2 * (HZ)) {
u32 r;
r = adapter->eth_rx_bytes /
((jiffies - adapter->eth_rx_jiffies) / (HZ));
r = (r / 1000000); /* MB/Sec */
/* Mega Bits/Sec */
adapter->be_stat.bes_eth_rx_rate = (r * 8);
adapter->eth_rx_jiffies = jiffies;
adapter->eth_rx_bytes = 0;
}
}
static int process_rx_completions(struct be_net_object *pnob, int max_work)
{
struct be_adapter *adapter = pnob->adapter;
struct ETH_RX_COMPL_AMAP *rxcp;
u32 nc = 0;
unsigned int pktsize;
while (max_work && (rxcp = be_get_rx_cmpl(pnob))) {
prefetch(rxcp);
pktsize = AMAP_GET_BITS_PTR(ETH_RX_COMPL, pktsize, rxcp);
process_nic_rx_completion_lro(pnob, rxcp);
adapter->eth_rx_bytes += pktsize;
update_rx_rate(adapter);
nc++;
max_work--;
adapter->be_stat.bes_rx_compl++;
}
if (likely(adapter->max_rx_coal > 1)) {
adapter->be_stat.bes_rx_flush++;
lro_flush_all(&pnob->lro_mgr);
}
/* Refill the queue */
if (atomic_read(&pnob->rx_q_posted) < 900)
be_post_eth_rx_buffs(pnob);
return nc;
}
static struct ETH_TX_COMPL_AMAP *be_get_tx_cmpl(struct be_net_object *pnob)
{
struct ETH_TX_COMPL_AMAP *txcp = &pnob->tx_cq[pnob->tx_cq_tl];
u32 valid;
valid = AMAP_GET_BITS_PTR(ETH_TX_COMPL, valid, txcp);
if (valid == 0)
return NULL;
AMAP_SET_BITS_PTR(ETH_TX_COMPL, valid, txcp, 0);
be_adv_txcq_tl(pnob);
return txcp;
}
void process_one_tx_compl(struct be_net_object *pnob, u32 end_idx)
{
struct be_adapter *adapter = pnob->adapter;
int cur_index, tx_wrbs_completed = 0;
struct sk_buff *skb;
u64 busaddr, pa, pa_lo, pa_hi;
struct ETH_WRB_AMAP *wrb;
u32 frag_len, last_index, j;
last_index = tx_compl_lastwrb_idx_get(pnob);
BUG_ON(last_index != end_idx);
pnob->tx_ctxt[pnob->tx_q_tl] = NULL;
do {
cur_index = pnob->tx_q_tl;
wrb = &pnob->tx_q[cur_index];
pa_hi = AMAP_GET_BITS_PTR(ETH_WRB, frag_pa_hi, wrb);
pa_lo = AMAP_GET_BITS_PTR(ETH_WRB, frag_pa_lo, wrb);
frag_len = AMAP_GET_BITS_PTR(ETH_WRB, frag_len, wrb);
busaddr = (pa_hi << 32) | pa_lo;
if (busaddr != 0) {
pa = le64_to_cpu(busaddr);
pci_unmap_single(adapter->pdev, pa,
frag_len, PCI_DMA_TODEVICE);
}
if (cur_index == last_index) {
skb = (struct sk_buff *)pnob->tx_ctxt[cur_index];
BUG_ON(!skb);
for (j = 0; j < skb_shinfo(skb)->nr_frags; j++) {
struct skb_frag_struct *frag;
frag = &skb_shinfo(skb)->frags[j];
pci_unmap_page(adapter->pdev,
(ulong) frag->page, frag->size,
PCI_DMA_TODEVICE);
}
kfree_skb(skb);
pnob->tx_ctxt[cur_index] = NULL;
} else {
BUG_ON(pnob->tx_ctxt[cur_index]);
}
tx_wrbs_completed++;
be_adv_txq_tl(pnob);
} while (cur_index != last_index);
atomic_sub(tx_wrbs_completed, &pnob->tx_q_used);
}
/* there is no need to take an SMP lock here since currently
* we have only one instance of the tasklet that does completion
* processing.
*/
static void process_nic_tx_completions(struct be_net_object *pnob)
{
struct be_adapter *adapter = pnob->adapter;
struct ETH_TX_COMPL_AMAP *txcp;
struct net_device *netdev = pnob->netdev;
u32 end_idx, num_processed = 0;
adapter->be_stat.bes_tx_events++;
while ((txcp = be_get_tx_cmpl(pnob))) {
end_idx = AMAP_GET_BITS_PTR(ETH_TX_COMPL, wrb_index, txcp);
process_one_tx_compl(pnob, end_idx);
num_processed++;
adapter->be_stat.bes_tx_compl++;
}
be_notify_cmpl(pnob, num_processed, pnob->tx_cq_id, 1);
/*
* We got Tx completions and have usable WRBs.
* If the netdev's queue has been stopped
* because we had run out of WRBs, wake it now.
*/
spin_lock(&adapter->txq_lock);
if (netif_queue_stopped(netdev)
&& atomic_read(&pnob->tx_q_used) < pnob->tx_q_len / 2) {
netif_wake_queue(netdev);
}
spin_unlock(&adapter->txq_lock);
}
static u32 post_rx_buffs(struct be_net_object *pnob, struct list_head *rxbl)
{
u32 nposted = 0;
struct ETH_RX_D_AMAP *rxd = NULL;
struct be_recv_buffer *rxbp;
void **rx_ctxp;
struct RQ_DB_AMAP rqdb;
rx_ctxp = pnob->rx_ctxt;
while (!list_empty(rxbl) &&
(rx_ctxp[pnob->rx_q_hd] == NULL) && nposted < 255) {
rxbp = list_first_entry(rxbl, struct be_recv_buffer, rxb_list);
list_del(&rxbp->rxb_list);
rxd = pnob->rx_q + pnob->rx_q_hd;
AMAP_SET_BITS_PTR(ETH_RX_D, fragpa_lo, rxd, rxbp->rxb_pa_lo);
AMAP_SET_BITS_PTR(ETH_RX_D, fragpa_hi, rxd, rxbp->rxb_pa_hi);
rx_ctxp[pnob->rx_q_hd] = rxbp->rxb_ctxt;
be_adv_rxq_hd(pnob);
nposted++;
}
if (nposted) {
/* Now press the door bell to notify BladeEngine. */
rqdb.dw[0] = 0;
AMAP_SET_BITS_PTR(RQ_DB, numPosted, &rqdb, nposted);
AMAP_SET_BITS_PTR(RQ_DB, rq, &rqdb, pnob->rx_q_id);
PD_WRITE(&pnob->fn_obj, erx_rq_db, rqdb.dw[0]);
}
atomic_add(nposted, &pnob->rx_q_posted);
return nposted;
}
void be_post_eth_rx_buffs(struct be_net_object *pnob)
{
struct be_adapter *adapter = pnob->adapter;
u32 num_bufs, r;
u64 busaddr = 0, tmp_pa;
u32 max_bufs, pg_hd;
u32 frag_size;
struct be_recv_buffer *rxbp;
struct list_head rxbl;
struct be_rx_page_info *rx_page_info;
struct page *page = NULL;
u32 page_order = 0;
gfp_t alloc_flags = GFP_ATOMIC;
BUG_ON(!adapter);
max_bufs = 64; /* should be even # <= 255. */
frag_size = pnob->rx_buf_size;
page_order = get_order(frag_size);
if (frag_size == 8192)
alloc_flags |= (gfp_t) __GFP_COMP;
/*
* Form a linked list of RECV_BUFFFER structure to be be posted.
* We will post even number of buffer so that pages can be
* shared.
*/
INIT_LIST_HEAD(&rxbl);
for (num_bufs = 0; num_bufs < max_bufs &&
!pnob->rx_page_info[pnob->rx_pg_info_hd].page; ++num_bufs) {
rxbp = &pnob->eth_rx_bufs[num_bufs];
pg_hd = pnob->rx_pg_info_hd;
rx_page_info = &pnob->rx_page_info[pg_hd];
if (!page) {
page = alloc_pages(alloc_flags, page_order);
if (unlikely(page == NULL)) {
adapter->be_stat.bes_ethrx_post_fail++;
pnob->rxbuf_post_fail++;
break;
}
pnob->rxbuf_post_fail = 0;
busaddr = pci_map_page(adapter->pdev, page, 0,
frag_size, PCI_DMA_FROMDEVICE);
rx_page_info->page_offset = 0;
rx_page_info->page = page;
/*
* If we are sharing a page among two skbs,
* alloc a new one on the next iteration
*/
if (pnob->rx_pg_shared == false)
page = NULL;
} else {
get_page(page);
rx_page_info->page_offset += frag_size;
rx_page_info->page = page;
/*
* We are finished with the alloced page,
* Alloc a new one on the next iteration
*/
page = NULL;
}
rxbp->rxb_ctxt = (void *)rx_page_info;
index_inc(&pnob->rx_pg_info_hd, pnob->rx_q_len);
pci_unmap_addr_set(rx_page_info, bus, busaddr);
tmp_pa = busaddr + rx_page_info->page_offset;
rxbp->rxb_pa_lo = (tmp_pa & 0xFFFFFFFF);
rxbp->rxb_pa_hi = (tmp_pa >> 32);
rxbp->rxb_len = frag_size;
list_add_tail(&rxbp->rxb_list, &rxbl);
} /* End of for */
r = post_rx_buffs(pnob, &rxbl);
BUG_ON(r != num_bufs);
return;
}
/*
* Interrupt service for network function. We just schedule the
* tasklet which does all completion processing.
*/
irqreturn_t be_int(int irq, void *dev)
{
struct net_device *netdev = dev;
struct be_net_object *pnob = netdev_priv(netdev);
struct be_adapter *adapter = pnob->adapter;
u32 isr;
isr = CSR_READ(&pnob->fn_obj, cev.isr1);
if (unlikely(!isr))
return IRQ_NONE;
spin_lock(&adapter->int_lock);
adapter->isr |= isr;
spin_unlock(&adapter->int_lock);
adapter->be_stat.bes_ints++;
tasklet_schedule(&adapter->sts_handler);
return IRQ_HANDLED;
}
/*
* Poll function called by NAPI with a work budget.
* We process as many UC. BC and MC receive completions
* as the budget allows and return the actual number of
* RX ststutses processed.
*/
int be_poll(struct napi_struct *napi, int budget)
{
struct be_net_object *pnob =
container_of(napi, struct be_net_object, napi);
u32 work_done;
pnob->adapter->be_stat.bes_polls++;
work_done = process_rx_completions(pnob, budget);
BUG_ON(work_done > budget);
/* All consumed */
if (work_done < budget) {
netif_rx_complete(napi);
/* enable intr */
be_notify_cmpl(pnob, work_done, pnob->rx_cq_id, 1);
} else {
/* More to be consumed; continue with interrupts disabled */
be_notify_cmpl(pnob, work_done, pnob->rx_cq_id, 0);
}
return work_done;
}
static struct EQ_ENTRY_AMAP *get_event(struct be_net_object *pnob)
{
struct EQ_ENTRY_AMAP *eqp = &(pnob->event_q[pnob->event_q_tl]);
if (!AMAP_GET_BITS_PTR(EQ_ENTRY, Valid, eqp))
return NULL;
be_adv_eq_tl(pnob);
return eqp;
}
/*
* Processes all valid events in the event ring associated with given
* NetObject. Also, notifies BE the number of events processed.
*/
static inline u32 process_events(struct be_net_object *pnob)
{
struct be_adapter *adapter = pnob->adapter;
struct EQ_ENTRY_AMAP *eqp;
u32 rid, num_events = 0;
struct net_device *netdev = pnob->netdev;
while ((eqp = get_event(pnob)) != NULL) {
adapter->be_stat.bes_events++;
rid = AMAP_GET_BITS_PTR(EQ_ENTRY, ResourceID, eqp);
if (rid == pnob->rx_cq_id) {
adapter->be_stat.bes_rx_events++;
netif_rx_schedule(&pnob->napi);
} else if (rid == pnob->tx_cq_id) {
process_nic_tx_completions(pnob);
} else if (rid == pnob->mcc_cq_id) {
be_mcc_process_cq(&pnob->mcc_q_obj, 1);
} else {
dev_info(&netdev->dev,
"Invalid EQ ResourceID %d\n", rid);
}
AMAP_SET_BITS_PTR(EQ_ENTRY, Valid, eqp, 0);
AMAP_SET_BITS_PTR(EQ_ENTRY, ResourceID, eqp, 0);
num_events++;
}
return num_events;
}
static void update_eqd(struct be_adapter *adapter, struct be_net_object *pnob)
{
int status;
struct be_eq_object *eq_objectp;
/* update once a second */
if ((jiffies - adapter->ips_jiffies) > 1 * (HZ)) {
/* One second elapsed since last update */
u32 r, new_eqd = -1;
r = adapter->be_stat.bes_ints - adapter->be_stat.bes_prev_ints;
r = r / ((jiffies - adapter->ips_jiffies) / (HZ));
adapter->be_stat.bes_ips = r;
adapter->ips_jiffies = jiffies;
adapter->be_stat.bes_prev_ints = adapter->be_stat.bes_ints;
if (r > IPS_HI_WM && adapter->cur_eqd < adapter->max_eqd)
new_eqd = (adapter->cur_eqd + 8);
if (r < IPS_LO_WM && adapter->cur_eqd > adapter->min_eqd)
new_eqd = (adapter->cur_eqd - 8);
if (adapter->enable_aic && new_eqd != -1) {
eq_objectp = &pnob->event_q_obj;
status = be_eq_modify_delay(&pnob->fn_obj, 1,
&eq_objectp, &new_eqd, NULL,
NULL, NULL);
if (status == BE_SUCCESS)
adapter->cur_eqd = new_eqd;
}
}
}
/*
This function notifies BladeEngine of how many events were processed
from the event queue by ringing the corresponding door bell and
optionally re-arms the event queue.
n - number of events processed
re_arm - 1 - re-arm the EQ, 0 - do not re-arm the EQ
*/
static void be_notify_event(struct be_net_object *pnob, int n, int re_arm)
{
struct CQ_DB_AMAP eqdb;
eqdb.dw[0] = 0;
AMAP_SET_BITS_PTR(CQ_DB, qid, &eqdb, pnob->event_q_id);
AMAP_SET_BITS_PTR(CQ_DB, rearm, &eqdb, re_arm);
AMAP_SET_BITS_PTR(CQ_DB, event, &eqdb, 1);
AMAP_SET_BITS_PTR(CQ_DB, num_popped, &eqdb, n);
/*
* Under some situations we see an interrupt and no valid
* EQ entry. To keep going, we need to ring the DB even if
* numPOsted is 0.
*/
PD_WRITE(&pnob->fn_obj, cq_db, eqdb.dw[0]);
return;
}
/*
* Called from the tasklet scheduled by ISR. All real interrupt processing
* is done here.
*/
void be_process_intr(unsigned long context)
{
struct be_adapter *adapter = (struct be_adapter *)context;
struct be_net_object *pnob = adapter->net_obj;
u32 isr, n;
ulong flags = 0;
isr = adapter->isr;
/*
* we create only one NIC event queue in Linux. Event is
* expected only in the first event queue
*/
BUG_ON(isr & 0xfffffffe);
if ((isr & 1) == 0)
return; /* not our interrupt */
n = process_events(pnob);
/*
* Clear the event bit. adapter->isr is set by
* hard interrupt. Prevent race with lock.
*/
spin_lock_irqsave(&adapter->int_lock, flags);
adapter->isr &= ~1;
spin_unlock_irqrestore(&adapter->int_lock, flags);
be_notify_event(pnob, n, 1);
/*
* If previous allocation attempts had failed and
* BE has used up all posted buffers, post RX buffers here
*/
if (pnob->rxbuf_post_fail && atomic_read(&pnob->rx_q_posted) == 0)
be_post_eth_rx_buffs(pnob);
update_eqd(adapter, pnob);
return;
}

View File

@ -1,705 +0,0 @@
/*
* Copyright (C) 2005 - 2008 ServerEngines
* All rights reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License version 2
* as published by the Free Software Foundation. The full GNU General
* Public License is included in this distribution in the file called COPYING.
*
* Contact Information:
* linux-drivers@serverengines.com
*
* ServerEngines
* 209 N. Fair Oaks Ave
* Sunnyvale, CA 94085
*/
/*
* be_netif.c
*
* This file contains various entry points of drivers seen by tcp/ip stack.
*/
#include <linux/if_vlan.h>
#include <linux/in.h>
#include "benet.h"
#include <linux/ip.h>
#include <linux/inet_lro.h>
/* Strings to print Link properties */
static const char *link_speed[] = {
"Invalid link Speed Value",
"10 Mbps",
"100 Mbps",
"1 Gbps",
"10 Gbps"
};
static const char *link_duplex[] = {
"Invalid Duplex Value",
"Half Duplex",
"Full Duplex"
};
static const char *link_state[] = {
"",
"(active)"
};
void be_print_link_info(struct BE_LINK_STATUS *lnk_status)
{
u16 si, di, ai;
/* Port 0 */
if (lnk_status->mac0_speed && lnk_status->mac0_duplex) {
/* Port is up and running */
si = (lnk_status->mac0_speed < 5) ? lnk_status->mac0_speed : 0;
di = (lnk_status->mac0_duplex < 3) ?
lnk_status->mac0_duplex : 0;
ai = (lnk_status->active_port == 0) ? 1 : 0;
printk(KERN_INFO "PortNo. 0: Speed - %s %s %s\n",
link_speed[si], link_duplex[di], link_state[ai]);
} else
printk(KERN_INFO "PortNo. 0: Down\n");
/* Port 1 */
if (lnk_status->mac1_speed && lnk_status->mac1_duplex) {
/* Port is up and running */
si = (lnk_status->mac1_speed < 5) ? lnk_status->mac1_speed : 0;
di = (lnk_status->mac1_duplex < 3) ?
lnk_status->mac1_duplex : 0;
ai = (lnk_status->active_port == 0) ? 1 : 0;
printk(KERN_INFO "PortNo. 1: Speed - %s %s %s\n",
link_speed[si], link_duplex[di], link_state[ai]);
} else
printk(KERN_INFO "PortNo. 1: Down\n");
return;
}
static int
be_get_frag_header(struct skb_frag_struct *frag, void **mac_hdr,
void **ip_hdr, void **tcpudp_hdr,
u64 *hdr_flags, void *priv)
{
struct ethhdr *eh;
struct vlan_ethhdr *veh;
struct iphdr *iph;
u8 *va = page_address(frag->page) + frag->page_offset;
unsigned long ll_hlen;
/* find the mac header, abort if not IPv4 */
prefetch(va);
eh = (struct ethhdr *)va;
*mac_hdr = eh;
ll_hlen = ETH_HLEN;
if (eh->h_proto != htons(ETH_P_IP)) {
if (eh->h_proto == htons(ETH_P_8021Q)) {
veh = (struct vlan_ethhdr *)va;
if (veh->h_vlan_encapsulated_proto != htons(ETH_P_IP))
return -1;
ll_hlen += VLAN_HLEN;
} else {
return -1;
}
}
*hdr_flags = LRO_IPV4;
iph = (struct iphdr *)(va + ll_hlen);
*ip_hdr = iph;
if (iph->protocol != IPPROTO_TCP)
return -1;
*hdr_flags |= LRO_TCP;
*tcpudp_hdr = (u8 *) (*ip_hdr) + (iph->ihl << 2);
return 0;
}
static int benet_open(struct net_device *netdev)
{
struct be_net_object *pnob = netdev_priv(netdev);
struct be_adapter *adapter = pnob->adapter;
struct net_lro_mgr *lro_mgr;
if (adapter->dev_state < BE_DEV_STATE_INIT)
return -EAGAIN;
lro_mgr = &pnob->lro_mgr;
lro_mgr->dev = netdev;
lro_mgr->features = LRO_F_NAPI;
lro_mgr->ip_summed = CHECKSUM_UNNECESSARY;
lro_mgr->ip_summed_aggr = CHECKSUM_UNNECESSARY;
lro_mgr->max_desc = BE_MAX_LRO_DESCRIPTORS;
lro_mgr->lro_arr = pnob->lro_desc;
lro_mgr->get_frag_header = be_get_frag_header;
lro_mgr->max_aggr = adapter->max_rx_coal;
lro_mgr->frag_align_pad = 2;
if (lro_mgr->max_aggr > MAX_SKB_FRAGS)
lro_mgr->max_aggr = MAX_SKB_FRAGS;
adapter->max_rx_coal = BE_LRO_MAX_PKTS;
be_update_link_status(adapter);
/*
* Set carrier on only if Physical Link up
* Either of the port link status up signifies this
*/
if ((adapter->port0_link_sts == BE_PORT_LINK_UP) ||
(adapter->port1_link_sts == BE_PORT_LINK_UP)) {
netif_start_queue(netdev);
netif_carrier_on(netdev);
}
adapter->dev_state = BE_DEV_STATE_OPEN;
napi_enable(&pnob->napi);
be_enable_intr(pnob);
be_enable_eq_intr(pnob);
/*
* RX completion queue may be in dis-armed state. Arm it.
*/
be_notify_cmpl(pnob, 0, pnob->rx_cq_id, 1);
return 0;
}
static int benet_close(struct net_device *netdev)
{
struct be_net_object *pnob = netdev_priv(netdev);
struct be_adapter *adapter = pnob->adapter;
netif_stop_queue(netdev);
synchronize_irq(netdev->irq);
be_wait_nic_tx_cmplx_cmpl(pnob);
adapter->dev_state = BE_DEV_STATE_INIT;
netif_carrier_off(netdev);
adapter->port0_link_sts = BE_PORT_LINK_DOWN;
adapter->port1_link_sts = BE_PORT_LINK_DOWN;
be_disable_intr(pnob);
be_disable_eq_intr(pnob);
napi_disable(&pnob->napi);
return 0;
}
/*
* Setting a Mac Address for BE
* Takes netdev and a void pointer as arguments.
* The pointer holds the new addres to be used.
*/
static int benet_set_mac_addr(struct net_device *netdev, void *p)
{
struct sockaddr *addr = p;
struct be_net_object *pnob = netdev_priv(netdev);
memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
be_rxf_mac_address_read_write(&pnob->fn_obj, 0, 0, false, true, false,
netdev->dev_addr, NULL, NULL);
/*
* Since we are doing Active-Passive failover, both
* ports should have matching MAC addresses everytime.
*/
be_rxf_mac_address_read_write(&pnob->fn_obj, 1, 0, false, true, false,
netdev->dev_addr, NULL, NULL);
return 0;
}
void be_get_stats_timer_handler(unsigned long context)
{
struct be_timer_ctxt *ctxt = (struct be_timer_ctxt *)context;
if (atomic_read(&ctxt->get_stat_flag)) {
atomic_dec(&ctxt->get_stat_flag);
up((void *)ctxt->get_stat_sem_addr);
}
del_timer(&ctxt->get_stats_timer);
return;
}
void be_get_stat_cb(void *context, int status,
struct MCC_WRB_AMAP *optional_wrb)
{
struct be_timer_ctxt *ctxt = (struct be_timer_ctxt *)context;
/*
* just up the semaphore if the get_stat_flag
* reads 1. so that the waiter can continue.
* If it is 0, then it was handled by the timer handler.
*/
del_timer(&ctxt->get_stats_timer);
if (atomic_read(&ctxt->get_stat_flag)) {
atomic_dec(&ctxt->get_stat_flag);
up((void *)ctxt->get_stat_sem_addr);
}
}
struct net_device_stats *benet_get_stats(struct net_device *dev)
{
struct be_net_object *pnob = netdev_priv(dev);
struct be_adapter *adapter = pnob->adapter;
u64 pa;
struct be_timer_ctxt *ctxt = &adapter->timer_ctxt;
if (adapter->dev_state != BE_DEV_STATE_OPEN) {
/* Return previously read stats */
return &(adapter->benet_stats);
}
/* Get Physical Addr */
pa = pci_map_single(adapter->pdev, adapter->eth_statsp,
sizeof(struct FWCMD_ETH_GET_STATISTICS),
PCI_DMA_FROMDEVICE);
ctxt->get_stat_sem_addr = (unsigned long)&adapter->get_eth_stat_sem;
atomic_inc(&ctxt->get_stat_flag);
be_rxf_query_eth_statistics(&pnob->fn_obj, adapter->eth_statsp,
cpu_to_le64(pa), be_get_stat_cb, ctxt,
NULL);
ctxt->get_stats_timer.data = (unsigned long)ctxt;
mod_timer(&ctxt->get_stats_timer, (jiffies + (HZ * 2)));
down((void *)ctxt->get_stat_sem_addr); /* callback will unblock us */
/* Adding port0 and port1 stats. */
adapter->benet_stats.rx_packets =
adapter->eth_statsp->params.response.p0recvdtotalframes +
adapter->eth_statsp->params.response.p1recvdtotalframes;
adapter->benet_stats.tx_packets =
adapter->eth_statsp->params.response.p0xmitunicastframes +
adapter->eth_statsp->params.response.p1xmitunicastframes;
adapter->benet_stats.tx_bytes =
adapter->eth_statsp->params.response.p0xmitbyteslsd +
adapter->eth_statsp->params.response.p1xmitbyteslsd;
adapter->benet_stats.rx_errors =
adapter->eth_statsp->params.response.p0crcerrors +
adapter->eth_statsp->params.response.p1crcerrors;
adapter->benet_stats.rx_errors +=
adapter->eth_statsp->params.response.p0alignmentsymerrs +
adapter->eth_statsp->params.response.p1alignmentsymerrs;
adapter->benet_stats.rx_errors +=
adapter->eth_statsp->params.response.p0inrangelenerrors +
adapter->eth_statsp->params.response.p1inrangelenerrors;
adapter->benet_stats.rx_bytes =
adapter->eth_statsp->params.response.p0recvdtotalbytesLSD +
adapter->eth_statsp->params.response.p1recvdtotalbytesLSD;
adapter->benet_stats.rx_crc_errors =
adapter->eth_statsp->params.response.p0crcerrors +
adapter->eth_statsp->params.response.p1crcerrors;
adapter->benet_stats.tx_packets +=
adapter->eth_statsp->params.response.p0xmitmulticastframes +
adapter->eth_statsp->params.response.p1xmitmulticastframes;
adapter->benet_stats.tx_packets +=
adapter->eth_statsp->params.response.p0xmitbroadcastframes +
adapter->eth_statsp->params.response.p1xmitbroadcastframes;
adapter->benet_stats.tx_errors = 0;
adapter->benet_stats.multicast =
adapter->eth_statsp->params.response.p0xmitmulticastframes +
adapter->eth_statsp->params.response.p1xmitmulticastframes;
adapter->benet_stats.rx_fifo_errors =
adapter->eth_statsp->params.response.p0rxfifooverflowdropped +
adapter->eth_statsp->params.response.p1rxfifooverflowdropped;
adapter->benet_stats.rx_frame_errors =
adapter->eth_statsp->params.response.p0alignmentsymerrs +
adapter->eth_statsp->params.response.p1alignmentsymerrs;
adapter->benet_stats.rx_length_errors =
adapter->eth_statsp->params.response.p0inrangelenerrors +
adapter->eth_statsp->params.response.p1inrangelenerrors;
adapter->benet_stats.rx_length_errors +=
adapter->eth_statsp->params.response.p0outrangeerrors +
adapter->eth_statsp->params.response.p1outrangeerrors;
adapter->benet_stats.rx_length_errors +=
adapter->eth_statsp->params.response.p0frametoolongerrors +
adapter->eth_statsp->params.response.p1frametoolongerrors;
pci_unmap_single(adapter->pdev, (ulong) adapter->eth_statsp,
sizeof(struct FWCMD_ETH_GET_STATISTICS),
PCI_DMA_FROMDEVICE);
return &(adapter->benet_stats);
}
static void be_start_tx(struct be_net_object *pnob, u32 nposted)
{
#define CSR_ETH_MAX_SQPOSTS 255
struct SQ_DB_AMAP sqdb;
sqdb.dw[0] = 0;
AMAP_SET_BITS_PTR(SQ_DB, cid, &sqdb, pnob->tx_q_id);
while (nposted) {
if (nposted > CSR_ETH_MAX_SQPOSTS) {
AMAP_SET_BITS_PTR(SQ_DB, numPosted, &sqdb,
CSR_ETH_MAX_SQPOSTS);
nposted -= CSR_ETH_MAX_SQPOSTS;
} else {
AMAP_SET_BITS_PTR(SQ_DB, numPosted, &sqdb, nposted);
nposted = 0;
}
PD_WRITE(&pnob->fn_obj, etx_sq_db, sqdb.dw[0]);
}
return;
}
static void update_tx_rate(struct be_adapter *adapter)
{
/* update the rate once in two seconds */
if ((jiffies - adapter->eth_tx_jiffies) > 2 * (HZ)) {
u32 r;
r = adapter->eth_tx_bytes /
((jiffies - adapter->eth_tx_jiffies) / (HZ));
r = (r / 1000000); /* M bytes/s */
adapter->be_stat.bes_eth_tx_rate = (r * 8); /* M bits/s */
adapter->eth_tx_jiffies = jiffies;
adapter->eth_tx_bytes = 0;
}
}
static int wrb_cnt_in_skb(struct sk_buff *skb)
{
int cnt = 0;
while (skb) {
if (skb->len > skb->data_len)
cnt++;
cnt += skb_shinfo(skb)->nr_frags;
skb = skb_shinfo(skb)->frag_list;
}
BUG_ON(cnt > BE_MAX_TX_FRAG_COUNT);
return cnt;
}
static void wrb_fill(struct ETH_WRB_AMAP *wrb, u64 addr, int len)
{
AMAP_SET_BITS_PTR(ETH_WRB, frag_pa_hi, wrb, addr >> 32);
AMAP_SET_BITS_PTR(ETH_WRB, frag_pa_lo, wrb, addr & 0xFFFFFFFF);
AMAP_SET_BITS_PTR(ETH_WRB, frag_len, wrb, len);
}
static void wrb_fill_extra(struct ETH_WRB_AMAP *wrb, struct sk_buff *skb,
struct be_net_object *pnob)
{
wrb->dw[2] = 0;
wrb->dw[3] = 0;
AMAP_SET_BITS_PTR(ETH_WRB, crc, wrb, 1);
if (skb_shinfo(skb)->gso_segs > 1 && skb_shinfo(skb)->gso_size) {
AMAP_SET_BITS_PTR(ETH_WRB, lso, wrb, 1);
AMAP_SET_BITS_PTR(ETH_WRB, lso_mss, wrb,
skb_shinfo(skb)->gso_size);
} else if (skb->ip_summed == CHECKSUM_PARTIAL) {
u8 proto = ((struct iphdr *)ip_hdr(skb))->protocol;
if (proto == IPPROTO_TCP)
AMAP_SET_BITS_PTR(ETH_WRB, tcpcs, wrb, 1);
else if (proto == IPPROTO_UDP)
AMAP_SET_BITS_PTR(ETH_WRB, udpcs, wrb, 1);
}
if (pnob->vlan_grp && vlan_tx_tag_present(skb)) {
AMAP_SET_BITS_PTR(ETH_WRB, vlan, wrb, 1);
AMAP_SET_BITS_PTR(ETH_WRB, vlan_tag, wrb, vlan_tx_tag_get(skb));
}
}
static inline void wrb_copy_extra(struct ETH_WRB_AMAP *to,
struct ETH_WRB_AMAP *from)
{
to->dw[2] = from->dw[2];
to->dw[3] = from->dw[3];
}
/* Returns the actual count of wrbs used including a possible dummy */
static int copy_skb_to_txq(struct be_net_object *pnob, struct sk_buff *skb,
u32 wrb_cnt, u32 *copied)
{
u64 busaddr;
struct ETH_WRB_AMAP *wrb = NULL, *first = NULL;
u32 i;
bool dummy = true;
struct pci_dev *pdev = pnob->adapter->pdev;
if (wrb_cnt & 1)
wrb_cnt++;
else
dummy = false;
atomic_add(wrb_cnt, &pnob->tx_q_used);
while (skb) {
if (skb->len > skb->data_len) {
int len = skb->len - skb->data_len;
busaddr = pci_map_single(pdev, skb->data, len,
PCI_DMA_TODEVICE);
busaddr = cpu_to_le64(busaddr);
wrb = &pnob->tx_q[pnob->tx_q_hd];
if (first == NULL) {
wrb_fill_extra(wrb, skb, pnob);
first = wrb;
} else {
wrb_copy_extra(wrb, first);
}
wrb_fill(wrb, busaddr, len);
be_adv_txq_hd(pnob);
*copied += len;
}
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
struct skb_frag_struct *frag =
&skb_shinfo(skb)->frags[i];
busaddr = pci_map_page(pdev, frag->page,
frag->page_offset, frag->size,
PCI_DMA_TODEVICE);
busaddr = cpu_to_le64(busaddr);
wrb = &pnob->tx_q[pnob->tx_q_hd];
if (first == NULL) {
wrb_fill_extra(wrb, skb, pnob);
first = wrb;
} else {
wrb_copy_extra(wrb, first);
}
wrb_fill(wrb, busaddr, frag->size);
be_adv_txq_hd(pnob);
*copied += frag->size;
}
skb = skb_shinfo(skb)->frag_list;
}
if (dummy) {
wrb = &pnob->tx_q[pnob->tx_q_hd];
BUG_ON(first == NULL);
wrb_copy_extra(wrb, first);
wrb_fill(wrb, 0, 0);
be_adv_txq_hd(pnob);
}
AMAP_SET_BITS_PTR(ETH_WRB, complete, wrb, 1);
AMAP_SET_BITS_PTR(ETH_WRB, last, wrb, 1);
return wrb_cnt;
}
/* For each skb transmitted, tx_ctxt stores the num of wrbs in the
* start index and skb pointer in the end index
*/
static inline void be_tx_wrb_info_remember(struct be_net_object *pnob,
struct sk_buff *skb, int wrb_cnt,
u32 start)
{
*(u32 *) (&pnob->tx_ctxt[start]) = wrb_cnt;
index_adv(&start, wrb_cnt - 1, pnob->tx_q_len);
pnob->tx_ctxt[start] = skb;
}
static int benet_xmit(struct sk_buff *skb, struct net_device *netdev)
{
struct be_net_object *pnob = netdev_priv(netdev);
struct be_adapter *adapter = pnob->adapter;
u32 wrb_cnt, copied = 0;
u32 start = pnob->tx_q_hd;
adapter->be_stat.bes_tx_reqs++;
wrb_cnt = wrb_cnt_in_skb(skb);
spin_lock_bh(&adapter->txq_lock);
if ((pnob->tx_q_len - 2 - atomic_read(&pnob->tx_q_used)) <= wrb_cnt) {
netif_stop_queue(pnob->netdev);
spin_unlock_bh(&adapter->txq_lock);
adapter->be_stat.bes_tx_fails++;
return NETDEV_TX_BUSY;
}
spin_unlock_bh(&adapter->txq_lock);
wrb_cnt = copy_skb_to_txq(pnob, skb, wrb_cnt, &copied);
be_tx_wrb_info_remember(pnob, skb, wrb_cnt, start);
be_start_tx(pnob, wrb_cnt);
adapter->eth_tx_bytes += copied;
adapter->be_stat.bes_tx_wrbs += wrb_cnt;
update_tx_rate(adapter);
netdev->trans_start = jiffies;
return NETDEV_TX_OK;
}
/*
* This is the driver entry point to change the mtu of the device
* Returns 0 for success and errno for failure.
*/
static int benet_change_mtu(struct net_device *netdev, int new_mtu)
{
/*
* BE supports jumbo frame size upto 9000 bytes including the link layer
* header. Considering the different variants of frame formats possible
* like VLAN, SNAP/LLC, the maximum possible value for MTU is 8974 bytes
*/
if (new_mtu < (ETH_ZLEN + ETH_FCS_LEN) || (new_mtu > BE_MAX_MTU)) {
dev_info(&netdev->dev, "Invalid MTU requested. "
"Must be between %d and %d bytes\n",
(ETH_ZLEN + ETH_FCS_LEN), BE_MAX_MTU);
return -EINVAL;
}
dev_info(&netdev->dev, "MTU changed from %d to %d\n",
netdev->mtu, new_mtu);
netdev->mtu = new_mtu;
return 0;
}
/*
* This is the driver entry point to register a vlan with the device
*/
static void benet_vlan_register(struct net_device *netdev,
struct vlan_group *grp)
{
struct be_net_object *pnob = netdev_priv(netdev);
be_disable_eq_intr(pnob);
pnob->vlan_grp = grp;
pnob->num_vlans = 0;
be_enable_eq_intr(pnob);
}
/*
* This is the driver entry point to add a vlan vlan_id
* with the device netdev
*/
static void benet_vlan_add_vid(struct net_device *netdev, u16 vlan_id)
{
struct be_net_object *pnob = netdev_priv(netdev);
if (pnob->num_vlans == (BE_NUM_VLAN_SUPPORTED - 1)) {
/* no way to return an error */
dev_info(&netdev->dev,
"BladeEngine: Cannot configure more than %d Vlans\n",
BE_NUM_VLAN_SUPPORTED);
return;
}
/* The new vlan tag will be in the slot indicated by num_vlans. */
pnob->vlan_tag[pnob->num_vlans++] = vlan_id;
be_rxf_vlan_config(&pnob->fn_obj, false, pnob->num_vlans,
pnob->vlan_tag, NULL, NULL, NULL);
}
/*
* This is the driver entry point to remove a vlan vlan_id
* with the device netdev
*/
static void benet_vlan_rem_vid(struct net_device *netdev, u16 vlan_id)
{
struct be_net_object *pnob = netdev_priv(netdev);
u32 i, value;
/*
* In Blade Engine, we support 32 vlan tag filters across both ports.
* To program a vlan tag, the RXF_RTPR_CSR register is used.
* Each 32-bit value of RXF_RTDR_CSR can address 2 vlan tag entries.
* The Vlan table is of depth 16. thus we support 32 tags.
*/
value = vlan_id | VLAN_VALID_BIT;
for (i = 0; i < BE_NUM_VLAN_SUPPORTED; i++) {
if (pnob->vlan_tag[i] == vlan_id)
break;
}
if (i == BE_NUM_VLAN_SUPPORTED)
return;
/* Now compact the vlan tag array by removing hole created. */
while ((i + 1) < BE_NUM_VLAN_SUPPORTED) {
pnob->vlan_tag[i] = pnob->vlan_tag[i + 1];
i++;
}
if ((i + 1) == BE_NUM_VLAN_SUPPORTED)
pnob->vlan_tag[i] = (u16) 0x0;
pnob->num_vlans--;
be_rxf_vlan_config(&pnob->fn_obj, false, pnob->num_vlans,
pnob->vlan_tag, NULL, NULL, NULL);
}
/*
* This function is called to program multicast
* address in the multicast filter of the ASIC.
*/
static void be_set_multicast_filter(struct net_device *netdev)
{
struct be_net_object *pnob = netdev_priv(netdev);
struct dev_mc_list *mc_ptr;
u8 mac_addr[32][ETH_ALEN];
int i;
if (netdev->flags & IFF_ALLMULTI) {
/* set BE in Multicast promiscuous */
be_rxf_multicast_config(&pnob->fn_obj, true, 0, NULL, NULL,
NULL, NULL);
return;
}
for (mc_ptr = netdev->mc_list, i = 0; mc_ptr;
mc_ptr = mc_ptr->next, i++) {
memcpy(&mac_addr[i][0], mc_ptr->dmi_addr, ETH_ALEN);
}
/* reset the promiscuous mode also. */
be_rxf_multicast_config(&pnob->fn_obj, false, i,
&mac_addr[0][0], NULL, NULL, NULL);
}
/*
* This is the driver entry point to set multicast list
* with the device netdev. This function will be used to
* set promiscuous mode or multicast promiscuous mode
* or multicast mode....
*/
static void benet_set_multicast_list(struct net_device *netdev)
{
struct be_net_object *pnob = netdev_priv(netdev);
if (netdev->flags & IFF_PROMISC) {
be_rxf_promiscuous(&pnob->fn_obj, 1, 1, NULL, NULL, NULL);
} else {
be_rxf_promiscuous(&pnob->fn_obj, 0, 0, NULL, NULL, NULL);
be_set_multicast_filter(netdev);
}
}
int benet_init(struct net_device *netdev)
{
struct be_net_object *pnob = netdev_priv(netdev);
struct be_adapter *adapter = pnob->adapter;
ether_setup(netdev);
netdev->open = &benet_open;
netdev->stop = &benet_close;
netdev->hard_start_xmit = &benet_xmit;
netdev->get_stats = &benet_get_stats;
netdev->set_multicast_list = &benet_set_multicast_list;
netdev->change_mtu = &benet_change_mtu;
netdev->set_mac_address = &benet_set_mac_addr;
netdev->vlan_rx_register = benet_vlan_register;
netdev->vlan_rx_add_vid = benet_vlan_add_vid;
netdev->vlan_rx_kill_vid = benet_vlan_rem_vid;
netdev->features =
NETIF_F_SG | NETIF_F_HIGHDMA | NETIF_F_HW_VLAN_RX | NETIF_F_TSO |
NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_FILTER | NETIF_F_IP_CSUM;
netdev->flags |= IFF_MULTICAST;
/* If device is DAC Capable, set the HIGHDMA flag for netdevice. */
if (adapter->dma_64bit_cap)
netdev->features |= NETIF_F_HIGHDMA;
SET_ETHTOOL_OPS(netdev, &be_ethtool_ops);
return 0;
}

View File

@ -1,429 +0,0 @@
/*
* Copyright (C) 2005 - 2008 ServerEngines
* All rights reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License version 2
* as published by the Free Software Foundation. The full GNU General
* Public License is included in this distribution in the file called COPYING.
*
* Contact Information:
* linux-drivers@serverengines.com
*
* ServerEngines
* 209 N. Fair Oaks Ave
* Sunnyvale, CA 94085
*/
#ifndef _BENET_H_
#define _BENET_H_
#include <linux/pci.h>
#include <linux/netdevice.h>
#include <linux/inet_lro.h>
#include "hwlib.h"
#define _SA_MODULE_NAME "net-driver"
#define VLAN_VALID_BIT 0x8000
#define BE_NUM_VLAN_SUPPORTED 32
#define BE_PORT_LINK_DOWN 0000
#define BE_PORT_LINK_UP 0001
#define BE_MAX_TX_FRAG_COUNT (30)
/* Flag bits for send operation */
#define IPCS (1 << 0) /* Enable IP checksum offload */
#define UDPCS (1 << 1) /* Enable UDP checksum offload */
#define TCPCS (1 << 2) /* Enable TCP checksum offload */
#define LSO (1 << 3) /* Enable Large Segment offload */
#define ETHVLAN (1 << 4) /* Enable VLAN insert */
#define ETHEVENT (1 << 5) /* Generate event on completion */
#define ETHCOMPLETE (1 << 6) /* Generate completion when done */
#define IPSEC (1 << 7) /* Enable IPSEC */
#define FORWARD (1 << 8) /* Send the packet in forwarding path */
#define FIN (1 << 9) /* Issue FIN segment */
#define BE_MAX_MTU 8974
#define BE_MAX_LRO_DESCRIPTORS 8
#define BE_LRO_MAX_PKTS 64
#define BE_MAX_FRAGS_PER_FRAME 6
extern const char be_drvr_ver[];
extern char be_fw_ver[];
extern char be_driver_name[];
extern struct ethtool_ops be_ethtool_ops;
#define BE_DEV_STATE_NONE 0
#define BE_DEV_STATE_INIT 1
#define BE_DEV_STATE_OPEN 2
#define BE_DEV_STATE_SUSPEND 3
/* This structure is used to describe physical fragments to use
* for DMAing data from NIC.
*/
struct be_recv_buffer {
struct list_head rxb_list; /* for maintaining a linked list */
void *rxb_va; /* buffer virtual address */
u32 rxb_pa_lo; /* low part of physical address */
u32 rxb_pa_hi; /* high part of physical address */
u32 rxb_len; /* length of recv buffer */
void *rxb_ctxt; /* context for OSM driver to use */
};
/*
* fragment list to describe scattered data.
*/
struct be_tx_frag_list {
u32 txb_len; /* Size of this fragment */
u32 txb_pa_lo; /* Lower 32 bits of 64 bit physical addr */
u32 txb_pa_hi; /* Higher 32 bits of 64 bit physical addr */
};
struct be_rx_page_info {
struct page *page;
dma_addr_t bus;
u16 page_offset;
};
/*
* This structure is the main tracking structure for a NIC interface.
*/
struct be_net_object {
/* MCC Ring - used to send fwcmds to embedded ARM processor */
struct MCC_WRB_AMAP *mcc_q; /* VA of the start of the ring */
u32 mcc_q_len; /* # of WRB entries in this ring */
u32 mcc_q_size;
u32 mcc_q_hd; /* MCC ring head */
u8 mcc_q_created; /* flag to help cleanup */
struct be_mcc_object mcc_q_obj; /* BECLIB's MCC ring Object */
dma_addr_t mcc_q_bus; /* DMA'ble bus address */
/* MCC Completion Ring - FW responses to fwcmds sent from MCC ring */
struct MCC_CQ_ENTRY_AMAP *mcc_cq; /* VA of the start of the ring */
u32 mcc_cq_len; /* # of compl. entries in this ring */
u32 mcc_cq_size;
u32 mcc_cq_tl; /* compl. ring tail */
u8 mcc_cq_created; /* flag to help cleanup */
struct be_cq_object mcc_cq_obj; /* BECLIB's MCC compl. ring object */
u32 mcc_cq_id; /* MCC ring ID */
dma_addr_t mcc_cq_bus; /* DMA'ble bus address */
struct ring_desc mb_rd; /* RD for MCC_MAIL_BOX */
void *mb_ptr; /* mailbox ptr to be freed */
dma_addr_t mb_bus; /* DMA'ble bus address */
u32 mb_size;
/* BEClib uses an array of context objects to track outstanding
* requests to the MCC. We need allocate the same number of
* conext entries as the number of entries in the MCC WRB ring
*/
u32 mcc_wrb_ctxt_size;
void *mcc_wrb_ctxt; /* pointer to the context area */
u32 mcc_wrb_ctxtLen; /* Number of entries in the context */
/*
* NIC send request ring - used for xmitting raw ether frames.
*/
struct ETH_WRB_AMAP *tx_q; /* VA of the start of the ring */
u32 tx_q_len; /* # if entries in the send ring */
u32 tx_q_size;
u32 tx_q_hd; /* Head index. Next req. goes here */
u32 tx_q_tl; /* Tail indx. oldest outstanding req. */
u8 tx_q_created; /* flag to help cleanup */
struct be_ethsq_object tx_q_obj;/* BECLIB's send Q handle */
dma_addr_t tx_q_bus; /* DMA'ble bus address */
u32 tx_q_id; /* send queue ring ID */
u32 tx_q_port; /* 0 no binding, 1 port A, 2 port B */
atomic_t tx_q_used; /* # of WRBs used */
/* ptr to an array in which we store context info for each send req. */
void **tx_ctxt;
/*
* NIC Send compl. ring - completion status for all NIC frames xmitted.
*/
struct ETH_TX_COMPL_AMAP *tx_cq;/* VA of start of the ring */
u32 txcq_len; /* # of entries in the ring */
u32 tx_cq_size;
/*
* index into compl ring where the host expects next completion entry
*/
u32 tx_cq_tl;
u32 tx_cq_id; /* completion queue id */
u8 tx_cq_created; /* flag to help cleanup */
struct be_cq_object tx_cq_obj;
dma_addr_t tx_cq_bus; /* DMA'ble bus address */
/*
* Event Queue - all completion entries post events here.
*/
struct EQ_ENTRY_AMAP *event_q; /* VA of start of event queue */
u32 event_q_len; /* # of entries */
u32 event_q_size;
u32 event_q_tl; /* Tail of the event queue */
u32 event_q_id; /* Event queue ID */
u8 event_q_created; /* flag to help cleanup */
struct be_eq_object event_q_obj; /* Queue handle */
dma_addr_t event_q_bus; /* DMA'ble bus address */
/*
* NIC receive queue - Data buffers to be used for receiving unicast,
* broadcast and multi-cast frames are posted here.
*/
struct ETH_RX_D_AMAP *rx_q; /* VA of start of the queue */
u32 rx_q_len; /* # of entries */
u32 rx_q_size;
u32 rx_q_hd; /* Head of the queue */
atomic_t rx_q_posted; /* number of posted buffers */
u32 rx_q_id; /* queue ID */
u8 rx_q_created; /* flag to help cleanup */
struct be_ethrq_object rx_q_obj; /* NIC RX queue handle */
dma_addr_t rx_q_bus; /* DMA'ble bus address */
/*
* Pointer to an array of opaque context object for use by OSM driver
*/
void **rx_ctxt;
/*
* NIC unicast RX completion queue - all unicast ether frame completion
* statuses from BE come here.
*/
struct ETH_RX_COMPL_AMAP *rx_cq; /* VA of start of the queue */
u32 rx_cq_len; /* # of entries */
u32 rx_cq_size;
u32 rx_cq_tl; /* Tail of the queue */
u32 rx_cq_id; /* queue ID */
u8 rx_cq_created; /* flag to help cleanup */
struct be_cq_object rx_cq_obj; /* queue handle */
dma_addr_t rx_cq_bus; /* DMA'ble bus address */
struct be_function_object fn_obj; /* function object */
bool fn_obj_created;
u32 rx_buf_size; /* Size of the RX buffers */
struct net_device *netdev;
struct be_recv_buffer eth_rx_bufs[256]; /* to pass Rx buffer
addresses */
struct be_adapter *adapter; /* Pointer to OSM adapter */
u32 devno; /* OSM, network dev no. */
u32 use_port; /* Current active port */
struct be_rx_page_info *rx_page_info; /* Array of Rx buf pages */
u32 rx_pg_info_hd; /* Head of queue */
int rxbuf_post_fail; /* RxBuff posting fail count */
bool rx_pg_shared; /* Is an allocsted page shared as two frags ? */
struct vlan_group *vlan_grp;
u32 num_vlans; /* Number of vlans in BE's filter */
u16 vlan_tag[BE_NUM_VLAN_SUPPORTED]; /* vlans currently configured */
struct napi_struct napi;
struct net_lro_mgr lro_mgr;
struct net_lro_desc lro_desc[BE_MAX_LRO_DESCRIPTORS];
};
#define NET_FH(np) (&(np)->fn_obj)
/*
* BE driver statistics.
*/
struct be_drvr_stat {
u32 bes_tx_reqs; /* number of TX requests initiated */
u32 bes_tx_fails; /* number of TX requests that failed */
u32 bes_fwd_reqs; /* number of send reqs through forwarding i/f */
u32 bes_tx_wrbs; /* number of tx WRBs used */
u32 bes_ints; /* number of interrupts */
u32 bes_polls; /* number of times NAPI called poll function */
u32 bes_events; /* total evet entries processed */
u32 bes_tx_events; /* number of tx completion events */
u32 bes_rx_events; /* number of ucast rx completion events */
u32 bes_tx_compl; /* number of tx completion entries processed */
u32 bes_rx_compl; /* number of rx completion entries
processed */
u32 bes_ethrx_post_fail; /* number of ethrx buffer alloc
failures */
/*
* number of non ether type II frames dropped where
* frame len > length field of Mac Hdr
*/
u32 bes_802_3_dropped_frames;
/*
* number of non ether type II frames malformed where
* in frame len < length field of Mac Hdr
*/
u32 bes_802_3_malformed_frames;
u32 bes_ips; /* interrupts / sec */
u32 bes_prev_ints; /* bes_ints at last IPS calculation */
u16 bes_eth_tx_rate; /* ETH TX rate - Mb/sec */
u16 bes_eth_rx_rate; /* ETH RX rate - Mb/sec */
u32 bes_rx_coal; /* Num pkts coalasced */
u32 bes_rx_flush; /* Num times coalasced */
u32 bes_link_change_physical; /*Num of times physical link changed */
u32 bes_link_change_virtual; /*Num of times virtual link changed */
u32 bes_rx_misc_pkts; /* Misc pkts received */
};
/* Maximum interrupt delay (in microseconds) allowed */
#define MAX_EQD 120
/*
* timer to prevent system shutdown hang for ever if h/w stops responding
*/
struct be_timer_ctxt {
atomic_t get_stat_flag;
struct timer_list get_stats_timer;
unsigned long get_stat_sem_addr;
} ;
/* This structure is the main BladeEngine driver context. */
struct be_adapter {
struct net_device *netdevp;
struct be_drvr_stat be_stat;
struct net_device_stats benet_stats;
/* PCI BAR mapped addresses */
u8 __iomem *csr_va; /* CSR */
u8 __iomem *db_va; /* Door Bell */
u8 __iomem *pci_va; /* PCI Config */
struct tasklet_struct sts_handler;
struct timer_list cq_timer;
spinlock_t int_lock; /* to protect the isr field in adapter */
struct FWCMD_ETH_GET_STATISTICS *eth_statsp;
/*
* This will enable the use of ethtool to enable or disable
* Checksum on Rx pkts to be obeyed or disobeyed.
* If this is true = 1, then whatever is the checksum on the
* Received pkt as per BE, it will be given to the stack.
* Else the stack will re calculate it.
*/
bool rx_csum;
/*
* This will enable the use of ethtool to enable or disable
* Coalese on Rx pkts to be obeyed or disobeyed.
* If this is grater than 0 and less than 16 then coalascing
* is enabled else it is disabled
*/
u32 max_rx_coal;
struct pci_dev *pdev; /* Pointer to OS's PCI dvice */
spinlock_t txq_lock; /* to stop/wake queue based on tx_q_used */
u32 isr; /* copy of Intr status reg. */
u32 port0_link_sts; /* Port 0 link status */
u32 port1_link_sts; /* port 1 list status */
struct BE_LINK_STATUS *be_link_sts;
/* pointer to the first netobject of this adapter */
struct be_net_object *net_obj;
/* Flags to indicate what to clean up */
bool tasklet_started;
bool isr_registered;
/*
* adaptive interrupt coalescing (AIC) related
*/
bool enable_aic; /* 1 if AIC is enabled */
u16 min_eqd; /* minimum EQ delay in usec */
u16 max_eqd; /* minimum EQ delay in usec */
u16 cur_eqd; /* current EQ delay in usec */
/*
* book keeping for interrupt / sec and TX/RX rate calculation
*/
ulong ips_jiffies; /* jiffies at last IPS calc */
u32 eth_tx_bytes;
ulong eth_tx_jiffies;
u32 eth_rx_bytes;
ulong eth_rx_jiffies;
struct semaphore get_eth_stat_sem;
/* timer ctxt to prevent shutdown hanging due to un-responsive BE */
struct be_timer_ctxt timer_ctxt;
#define BE_MAX_MSIX_VECTORS 32
#define BE_MAX_REQ_MSIX_VECTORS 1 /* only one EQ in Linux driver */
struct msix_entry msix_entries[BE_MAX_MSIX_VECTORS];
bool msix_enabled;
bool dma_64bit_cap; /* the Device DAC capable or not */
u8 dev_state; /* The current state of the device */
u8 dev_pm_state; /* The State of device before going to suspend */
};
/*
* Every second we look at the ints/sec and adjust eq_delay
* between adapter->min_eqd and adapter->max_eqd to keep the ints/sec between
* IPS_HI_WM and IPS_LO_WM.
*/
#define IPS_HI_WM 18000
#define IPS_LO_WM 8000
static inline void index_adv(u32 *index, u32 val, u32 limit)
{
BUG_ON(limit & (limit-1));
*index = (*index + val) & (limit - 1);
}
static inline void index_inc(u32 *index, u32 limit)
{
BUG_ON(limit & (limit-1));
*index = (*index + 1) & (limit - 1);
}
static inline void be_adv_eq_tl(struct be_net_object *pnob)
{
index_inc(&pnob->event_q_tl, pnob->event_q_len);
}
static inline void be_adv_txq_hd(struct be_net_object *pnob)
{
index_inc(&pnob->tx_q_hd, pnob->tx_q_len);
}
static inline void be_adv_txq_tl(struct be_net_object *pnob)
{
index_inc(&pnob->tx_q_tl, pnob->tx_q_len);
}
static inline void be_adv_txcq_tl(struct be_net_object *pnob)
{
index_inc(&pnob->tx_cq_tl, pnob->txcq_len);
}
static inline void be_adv_rxq_hd(struct be_net_object *pnob)
{
index_inc(&pnob->rx_q_hd, pnob->rx_q_len);
}
static inline void be_adv_rxcq_tl(struct be_net_object *pnob)
{
index_inc(&pnob->rx_cq_tl, pnob->rx_cq_len);
}
static inline u32 tx_compl_lastwrb_idx_get(struct be_net_object *pnob)
{
return (pnob->tx_q_tl + *(u32 *)&pnob->tx_ctxt[pnob->tx_q_tl] - 1)
& (pnob->tx_q_len - 1);
}
int benet_init(struct net_device *);
int be_ethtool_ioctl(struct net_device *, struct ifreq *);
struct net_device_stats *benet_get_stats(struct net_device *);
void be_process_intr(unsigned long context);
irqreturn_t be_int(int irq, void *dev);
void be_post_eth_rx_buffs(struct be_net_object *);
void be_get_stat_cb(void *, int, struct MCC_WRB_AMAP *);
void be_get_stats_timer_handler(unsigned long);
void be_wait_nic_tx_cmplx_cmpl(struct be_net_object *);
void be_print_link_info(struct BE_LINK_STATUS *);
void be_update_link_status(struct be_adapter *);
void be_init_procfs(struct be_adapter *);
void be_cleanup_procfs(struct be_adapter *);
int be_poll(struct napi_struct *, int);
struct ETH_RX_COMPL_AMAP *be_get_rx_cmpl(struct be_net_object *);
void be_notify_cmpl(struct be_net_object *, int, int, int);
void be_enable_intr(struct be_net_object *);
void be_enable_eq_intr(struct be_net_object *);
void be_disable_intr(struct be_net_object *);
void be_disable_eq_intr(struct be_net_object *);
int be_set_uc_mac_adr(struct be_net_object *, u8, u8, u8,
u8 *, mcc_wrb_cqe_callback, void *);
int be_get_flow_ctl(struct be_function_object *pFnObj, bool *, bool *);
void process_one_tx_compl(struct be_net_object *pnob, u32 end_idx);
#endif /* _BENET_H_ */

View File

@ -1,103 +0,0 @@
/*
* Copyright (C) 2005 - 2008 ServerEngines
* All rights reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License version 2
* as published by the Free Software Foundation. The full GNU General
* Public License is included in this distribution in the file called COPYING.
*
* Contact Information:
* linux-drivers@serverengines.com
*
* ServerEngines
* 209 N. Fair Oaks Ave
* Sunnyvale, CA 94085
*/
#ifndef _BESTATUS_H_
#define _BESTATUS_H_
#define BE_SUCCESS (0x00000000L)
/*
* MessageId: BE_PENDING
* The BladeEngine Driver call succeeded, and pended operation.
*/
#define BE_PENDING (0x20070001L)
#define BE_STATUS_PENDING (BE_PENDING)
/*
* MessageId: BE_NOT_OK
* An error occurred.
*/
#define BE_NOT_OK (0xE0070002L)
/*
* MessageId: BE_STATUS_SYSTEM_RESOURCES
* Insufficient host system resources exist to complete the API.
*/
#define BE_STATUS_SYSTEM_RESOURCES (0xE0070003L)
/*
* MessageId: BE_STATUS_CHIP_RESOURCES
* Insufficient chip resources exist to complete the API.
*/
#define BE_STATUS_CHIP_RESOURCES (0xE0070004L)
/*
* MessageId: BE_STATUS_NO_RESOURCE
* Insufficient resources to complete request.
*/
#define BE_STATUS_NO_RESOURCE (0xE0070005L)
/*
* MessageId: BE_STATUS_BUSY
* Resource is currently busy.
*/
#define BE_STATUS_BUSY (0xE0070006L)
/*
* MessageId: BE_STATUS_INVALID_PARAMETER
* Invalid Parameter in request.
*/
#define BE_STATUS_INVALID_PARAMETER (0xE0000007L)
/*
* MessageId: BE_STATUS_NOT_SUPPORTED
* Requested operation is not supported.
*/
#define BE_STATUS_NOT_SUPPORTED (0xE000000DL)
/*
* ***************************************************************************
* E T H E R N E T S T A T U S
* ***************************************************************************
*/
/*
* MessageId: BE_ETH_TX_ERROR
* The Ethernet device driver failed to transmit a packet.
*/
#define BE_ETH_TX_ERROR (0xE0070101L)
/*
* ***************************************************************************
* S H A R E D S T A T U S
* ***************************************************************************
*/
/*
* MessageId: BE_STATUS_VBD_INVALID_VERSION
* The device driver is not compatible with this version of the VBD.
*/
#define BE_STATUS_INVALID_VERSION (0xE0070402L)
/*
* MessageId: BE_STATUS_DOMAIN_DENIED
* The operation failed to complete due to insufficient access
* rights for the requesting domain.
*/
#define BE_STATUS_DOMAIN_DENIED (0xE0070403L)
/*
* MessageId: BE_STATUS_TCP_NOT_STARTED
* The embedded TCP/IP stack has not been started.
*/
#define BE_STATUS_TCP_NOT_STARTED (0xE0070409L)
/*
* MessageId: BE_STATUS_NO_MCC_WRB
* No free MCC WRB are available for posting the request.
*/
#define BE_STATUS_NO_MCC_WRB (0xE0070414L)
#endif /* _BESTATUS_ */

View File

@ -1,243 +0,0 @@
/*
* Copyright (C) 2005 - 2008 ServerEngines
* All rights reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License version 2
* as published by the Free Software Foundation. The full GNU General
* Public License is included in this distribution in the file called COPYING.
*
* Contact Information:
* linux-drivers@serverengines.com
*
* ServerEngines
* 209 N. Fair Oaks Ave
* Sunnyvale, CA 94085
*/
/*
* Autogenerated by srcgen version: 0127
*/
#ifndef __cev_amap_h__
#define __cev_amap_h__
#include "ep.h"
/*
* Host Interrupt Status Register 0. The first of four application
* interrupt status registers. This register contains the interrupts
* for Event Queues EQ0 through EQ31.
*/
struct BE_CEV_ISR0_CSR_AMAP {
u8 interrupt0; /* DWORD 0 */
u8 interrupt1; /* DWORD 0 */
u8 interrupt2; /* DWORD 0 */
u8 interrupt3; /* DWORD 0 */
u8 interrupt4; /* DWORD 0 */
u8 interrupt5; /* DWORD 0 */
u8 interrupt6; /* DWORD 0 */
u8 interrupt7; /* DWORD 0 */
u8 interrupt8; /* DWORD 0 */
u8 interrupt9; /* DWORD 0 */
u8 interrupt10; /* DWORD 0 */
u8 interrupt11; /* DWORD 0 */
u8 interrupt12; /* DWORD 0 */
u8 interrupt13; /* DWORD 0 */
u8 interrupt14; /* DWORD 0 */
u8 interrupt15; /* DWORD 0 */
u8 interrupt16; /* DWORD 0 */
u8 interrupt17; /* DWORD 0 */
u8 interrupt18; /* DWORD 0 */
u8 interrupt19; /* DWORD 0 */
u8 interrupt20; /* DWORD 0 */
u8 interrupt21; /* DWORD 0 */
u8 interrupt22; /* DWORD 0 */
u8 interrupt23; /* DWORD 0 */
u8 interrupt24; /* DWORD 0 */
u8 interrupt25; /* DWORD 0 */
u8 interrupt26; /* DWORD 0 */
u8 interrupt27; /* DWORD 0 */
u8 interrupt28; /* DWORD 0 */
u8 interrupt29; /* DWORD 0 */
u8 interrupt30; /* DWORD 0 */
u8 interrupt31; /* DWORD 0 */
} __packed;
struct CEV_ISR0_CSR_AMAP {
u32 dw[1];
};
/*
* Host Interrupt Status Register 1. The second of four application
* interrupt status registers. This register contains the interrupts
* for Event Queues EQ32 through EQ63.
*/
struct BE_CEV_ISR1_CSR_AMAP {
u8 interrupt32; /* DWORD 0 */
u8 interrupt33; /* DWORD 0 */
u8 interrupt34; /* DWORD 0 */
u8 interrupt35; /* DWORD 0 */
u8 interrupt36; /* DWORD 0 */
u8 interrupt37; /* DWORD 0 */
u8 interrupt38; /* DWORD 0 */
u8 interrupt39; /* DWORD 0 */
u8 interrupt40; /* DWORD 0 */
u8 interrupt41; /* DWORD 0 */
u8 interrupt42; /* DWORD 0 */
u8 interrupt43; /* DWORD 0 */
u8 interrupt44; /* DWORD 0 */
u8 interrupt45; /* DWORD 0 */
u8 interrupt46; /* DWORD 0 */
u8 interrupt47; /* DWORD 0 */
u8 interrupt48; /* DWORD 0 */
u8 interrupt49; /* DWORD 0 */
u8 interrupt50; /* DWORD 0 */
u8 interrupt51; /* DWORD 0 */
u8 interrupt52; /* DWORD 0 */
u8 interrupt53; /* DWORD 0 */
u8 interrupt54; /* DWORD 0 */
u8 interrupt55; /* DWORD 0 */
u8 interrupt56; /* DWORD 0 */
u8 interrupt57; /* DWORD 0 */
u8 interrupt58; /* DWORD 0 */
u8 interrupt59; /* DWORD 0 */
u8 interrupt60; /* DWORD 0 */
u8 interrupt61; /* DWORD 0 */
u8 interrupt62; /* DWORD 0 */
u8 interrupt63; /* DWORD 0 */
} __packed;
struct CEV_ISR1_CSR_AMAP {
u32 dw[1];
};
/*
* Host Interrupt Status Register 2. The third of four application
* interrupt status registers. This register contains the interrupts
* for Event Queues EQ64 through EQ95.
*/
struct BE_CEV_ISR2_CSR_AMAP {
u8 interrupt64; /* DWORD 0 */
u8 interrupt65; /* DWORD 0 */
u8 interrupt66; /* DWORD 0 */
u8 interrupt67; /* DWORD 0 */
u8 interrupt68; /* DWORD 0 */
u8 interrupt69; /* DWORD 0 */
u8 interrupt70; /* DWORD 0 */
u8 interrupt71; /* DWORD 0 */
u8 interrupt72; /* DWORD 0 */
u8 interrupt73; /* DWORD 0 */
u8 interrupt74; /* DWORD 0 */
u8 interrupt75; /* DWORD 0 */
u8 interrupt76; /* DWORD 0 */
u8 interrupt77; /* DWORD 0 */
u8 interrupt78; /* DWORD 0 */
u8 interrupt79; /* DWORD 0 */
u8 interrupt80; /* DWORD 0 */
u8 interrupt81; /* DWORD 0 */
u8 interrupt82; /* DWORD 0 */
u8 interrupt83; /* DWORD 0 */
u8 interrupt84; /* DWORD 0 */
u8 interrupt85; /* DWORD 0 */
u8 interrupt86; /* DWORD 0 */
u8 interrupt87; /* DWORD 0 */
u8 interrupt88; /* DWORD 0 */
u8 interrupt89; /* DWORD 0 */
u8 interrupt90; /* DWORD 0 */
u8 interrupt91; /* DWORD 0 */
u8 interrupt92; /* DWORD 0 */
u8 interrupt93; /* DWORD 0 */
u8 interrupt94; /* DWORD 0 */
u8 interrupt95; /* DWORD 0 */
} __packed;
struct CEV_ISR2_CSR_AMAP {
u32 dw[1];
};
/*
* Host Interrupt Status Register 3. The fourth of four application
* interrupt status registers. This register contains the interrupts
* for Event Queues EQ96 through EQ127.
*/
struct BE_CEV_ISR3_CSR_AMAP {
u8 interrupt96; /* DWORD 0 */
u8 interrupt97; /* DWORD 0 */
u8 interrupt98; /* DWORD 0 */
u8 interrupt99; /* DWORD 0 */
u8 interrupt100; /* DWORD 0 */
u8 interrupt101; /* DWORD 0 */
u8 interrupt102; /* DWORD 0 */
u8 interrupt103; /* DWORD 0 */
u8 interrupt104; /* DWORD 0 */
u8 interrupt105; /* DWORD 0 */
u8 interrupt106; /* DWORD 0 */
u8 interrupt107; /* DWORD 0 */
u8 interrupt108; /* DWORD 0 */
u8 interrupt109; /* DWORD 0 */
u8 interrupt110; /* DWORD 0 */
u8 interrupt111; /* DWORD 0 */
u8 interrupt112; /* DWORD 0 */
u8 interrupt113; /* DWORD 0 */
u8 interrupt114; /* DWORD 0 */
u8 interrupt115; /* DWORD 0 */
u8 interrupt116; /* DWORD 0 */
u8 interrupt117; /* DWORD 0 */
u8 interrupt118; /* DWORD 0 */
u8 interrupt119; /* DWORD 0 */
u8 interrupt120; /* DWORD 0 */
u8 interrupt121; /* DWORD 0 */
u8 interrupt122; /* DWORD 0 */
u8 interrupt123; /* DWORD 0 */
u8 interrupt124; /* DWORD 0 */
u8 interrupt125; /* DWORD 0 */
u8 interrupt126; /* DWORD 0 */
u8 interrupt127; /* DWORD 0 */
} __packed;
struct CEV_ISR3_CSR_AMAP {
u32 dw[1];
};
/* Completions and Events block Registers. */
struct BE_CEV_CSRMAP_AMAP {
u8 rsvd0[32]; /* DWORD 0 */
u8 rsvd1[32]; /* DWORD 1 */
u8 rsvd2[32]; /* DWORD 2 */
u8 rsvd3[32]; /* DWORD 3 */
struct BE_CEV_ISR0_CSR_AMAP isr0;
struct BE_CEV_ISR1_CSR_AMAP isr1;
struct BE_CEV_ISR2_CSR_AMAP isr2;
struct BE_CEV_ISR3_CSR_AMAP isr3;
u8 rsvd4[32]; /* DWORD 8 */
u8 rsvd5[32]; /* DWORD 9 */
u8 rsvd6[32]; /* DWORD 10 */
u8 rsvd7[32]; /* DWORD 11 */
u8 rsvd8[32]; /* DWORD 12 */
u8 rsvd9[32]; /* DWORD 13 */
u8 rsvd10[32]; /* DWORD 14 */
u8 rsvd11[32]; /* DWORD 15 */
u8 rsvd12[32]; /* DWORD 16 */
u8 rsvd13[32]; /* DWORD 17 */
u8 rsvd14[32]; /* DWORD 18 */
u8 rsvd15[32]; /* DWORD 19 */
u8 rsvd16[32]; /* DWORD 20 */
u8 rsvd17[32]; /* DWORD 21 */
u8 rsvd18[32]; /* DWORD 22 */
u8 rsvd19[32]; /* DWORD 23 */
u8 rsvd20[32]; /* DWORD 24 */
u8 rsvd21[32]; /* DWORD 25 */
u8 rsvd22[32]; /* DWORD 26 */
u8 rsvd23[32]; /* DWORD 27 */
u8 rsvd24[32]; /* DWORD 28 */
u8 rsvd25[32]; /* DWORD 29 */
u8 rsvd26[32]; /* DWORD 30 */
u8 rsvd27[32]; /* DWORD 31 */
u8 rsvd28[32]; /* DWORD 32 */
u8 rsvd29[32]; /* DWORD 33 */
u8 rsvd30[192]; /* DWORD 34 */
u8 rsvd31[192]; /* DWORD 40 */
u8 rsvd32[160]; /* DWORD 46 */
u8 rsvd33[160]; /* DWORD 51 */
u8 rsvd34[160]; /* DWORD 56 */
u8 rsvd35[96]; /* DWORD 61 */
u8 rsvd36[192][32]; /* DWORD 64 */
} __packed;
struct CEV_CSRMAP_AMAP {
u32 dw[256];
};
#endif /* __cev_amap_h__ */

View File

@ -1,211 +0,0 @@
/*
* Copyright (C) 2005 - 2008 ServerEngines
* All rights reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License version 2
* as published by the Free Software Foundation. The full GNU General
* Public License is included in this distribution in the file called COPYING.
*
* Contact Information:
* linux-drivers@serverengines.com
*
* ServerEngines
* 209 N. Fair Oaks Ave
* Sunnyvale, CA 94085
*/
#include "hwlib.h"
#include "bestatus.h"
/*
* Completion Queue Objects
*/
/*
*============================================================================
* P U B L I C R O U T I N E S
*============================================================================
*/
/*
This routine creates a completion queue based on the client completion
queue configuration information.
FunctionObject - Handle to a function object
CqBaseVa - Base VA for a the CQ ring
NumEntries - CEV_CQ_CNT_* values
solEventEnable - 0 = All CQEs can generate Events if CQ is eventable
1 = only CQEs with solicited bit set are eventable
eventable - Eventable CQ, generates interrupts.
nodelay - 1 = Force interrupt, relevent if CQ eventable.
Interrupt is asserted immediately after EQE
write is confirmed, regardless of EQ Timer
or watermark settings.
wme - Enable watermark based coalescing
wmThresh - High watermark(CQ fullness at which event
or interrupt should be asserted). These are the
CEV_WATERMARK encoded values.
EqObject - EQ Handle to assign to this CQ
ppCqObject - Internal CQ Handle returned.
Returns BE_SUCCESS if successfull, otherwise a useful error code is
returned.
IRQL < DISPATCH_LEVEL
*/
int be_cq_create(struct be_function_object *pfob,
struct ring_desc *rd, u32 length, bool solicited_eventable,
bool no_delay, u32 wm_thresh,
struct be_eq_object *eq_object, struct be_cq_object *cq_object)
{
int status = BE_SUCCESS;
u32 num_entries_encoding;
u32 num_entries = length / sizeof(struct MCC_CQ_ENTRY_AMAP);
struct FWCMD_COMMON_CQ_CREATE *fwcmd = NULL;
struct MCC_WRB_AMAP *wrb = NULL;
u32 n;
unsigned long irql;
ASSERT(rd);
ASSERT(cq_object);
ASSERT(length % sizeof(struct MCC_CQ_ENTRY_AMAP) == 0);
switch (num_entries) {
case 256:
num_entries_encoding = CEV_CQ_CNT_256;
break;
case 512:
num_entries_encoding = CEV_CQ_CNT_512;
break;
case 1024:
num_entries_encoding = CEV_CQ_CNT_1024;
break;
default:
ASSERT(0);
return BE_STATUS_INVALID_PARAMETER;
}
/*
* All cq entries all the same size. Use iSCSI version
* as a test for the proper rd length.
*/
memset(cq_object, 0, sizeof(*cq_object));
atomic_set(&cq_object->ref_count, 0);
cq_object->parent_function = pfob;
cq_object->eq_object = eq_object;
cq_object->num_entries = num_entries;
/* save for MCC cq processing */
cq_object->va = rd->va;
/* map into UT. */
length = num_entries * sizeof(struct MCC_CQ_ENTRY_AMAP);
spin_lock_irqsave(&pfob->post_lock, irql);
wrb = be_function_peek_mcc_wrb(pfob);
if (!wrb) {
ASSERT(wrb);
TRACE(DL_ERR, "No free MCC WRBs in create EQ.");
status = BE_STATUS_NO_MCC_WRB;
goto Error;
}
/* Prepares an embedded fwcmd, including request/response sizes. */
fwcmd = BE_PREPARE_EMBEDDED_FWCMD(pfob, wrb, COMMON_CQ_CREATE);
fwcmd->params.request.num_pages = PAGES_SPANNED(OFFSET_IN_PAGE(rd->va),
length);
AMAP_SET_BITS_PTR(CQ_CONTEXT, valid, &fwcmd->params.request.context, 1);
n = pfob->pci_function_number;
AMAP_SET_BITS_PTR(CQ_CONTEXT, Func, &fwcmd->params.request.context, n);
n = (eq_object != NULL);
AMAP_SET_BITS_PTR(CQ_CONTEXT, Eventable,
&fwcmd->params.request.context, n);
AMAP_SET_BITS_PTR(CQ_CONTEXT, Armed, &fwcmd->params.request.context, 1);
n = eq_object ? eq_object->eq_id : 0;
AMAP_SET_BITS_PTR(CQ_CONTEXT, EQID, &fwcmd->params.request.context, n);
AMAP_SET_BITS_PTR(CQ_CONTEXT, Count,
&fwcmd->params.request.context, num_entries_encoding);
n = 0; /* Protection Domain is always 0 in Linux driver */
AMAP_SET_BITS_PTR(CQ_CONTEXT, PD, &fwcmd->params.request.context, n);
AMAP_SET_BITS_PTR(CQ_CONTEXT, NoDelay,
&fwcmd->params.request.context, no_delay);
AMAP_SET_BITS_PTR(CQ_CONTEXT, SolEvent,
&fwcmd->params.request.context, solicited_eventable);
n = (wm_thresh != 0xFFFFFFFF);
AMAP_SET_BITS_PTR(CQ_CONTEXT, WME, &fwcmd->params.request.context, n);
n = (n ? wm_thresh : 0);
AMAP_SET_BITS_PTR(CQ_CONTEXT, Watermark,
&fwcmd->params.request.context, n);
/* Create a page list for the FWCMD. */
be_rd_to_pa_list(rd, fwcmd->params.request.pages,
ARRAY_SIZE(fwcmd->params.request.pages));
/* Post the f/w command */
status = be_function_post_mcc_wrb(pfob, wrb, NULL, NULL, NULL,
NULL, NULL, fwcmd, NULL);
if (status != BE_SUCCESS) {
TRACE(DL_ERR, "MCC to create CQ failed.");
goto Error;
}
/* Remember the CQ id. */
cq_object->cq_id = fwcmd->params.response.cq_id;
/* insert this cq into eq_object reference */
if (eq_object) {
atomic_inc(&eq_object->ref_count);
list_add_tail(&cq_object->cqlist_for_eq,
&eq_object->cq_list_head);
}
Error:
spin_unlock_irqrestore(&pfob->post_lock, irql);
if (pfob->pend_queue_driving && pfob->mcc) {
pfob->pend_queue_driving = 0;
be_drive_mcc_wrb_queue(pfob->mcc);
}
return status;
}
/*
Deferences the given object. Once the object's reference count drops to
zero, the object is destroyed and all resources that are held by this object
are released. The on-chip context is also destroyed along with the queue
ID, and any mappings made into the UT.
cq_object - CQ handle returned from cq_object_create.
returns the current reference count on the object
IRQL: IRQL < DISPATCH_LEVEL
*/
int be_cq_destroy(struct be_cq_object *cq_object)
{
int status = 0;
/* Nothing should reference this CQ at this point. */
ASSERT(atomic_read(&cq_object->ref_count) == 0);
/* Send fwcmd to destroy the CQ. */
status = be_function_ring_destroy(cq_object->parent_function,
cq_object->cq_id, FWCMD_RING_TYPE_CQ,
NULL, NULL, NULL, NULL);
ASSERT(status == 0);
/* Remove reference if this is an eventable CQ. */
if (cq_object->eq_object) {
atomic_dec(&cq_object->eq_object->ref_count);
list_del(&cq_object->cqlist_for_eq);
}
return BE_SUCCESS;
}

View File

@ -1,71 +0,0 @@
/*
* Copyright (C) 2005 - 2008 ServerEngines
* All rights reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License version 2
* as published by the Free Software Foundation. The full GNU General
* Public License is included in this distribution in the file called COPYING.
*
* Contact Information:
* linux-drivers@serverengines.com
*
* ServerEngines
* 209 N. Fair Oaks Ave
* Sunnyvale, CA 94085
*/
/*
* Autogenerated by srcgen version: 0127
*/
#ifndef __descriptors_amap_h__
#define __descriptors_amap_h__
/*
* --- IPC_NODE_ID_ENUM ---
* IPC processor id values
*/
#define TPOST_NODE_ID (0) /* TPOST ID */
#define TPRE_NODE_ID (1) /* TPRE ID */
#define TXULP0_NODE_ID (2) /* TXULP0 ID */
#define TXULP1_NODE_ID (3) /* TXULP1 ID */
#define TXULP2_NODE_ID (4) /* TXULP2 ID */
#define RXULP0_NODE_ID (5) /* RXULP0 ID */
#define RXULP1_NODE_ID (6) /* RXULP1 ID */
#define RXULP2_NODE_ID (7) /* RXULP2 ID */
#define MPU_NODE_ID (15) /* MPU ID */
/*
* --- MAC_ID_ENUM ---
* Meaning of the mac_id field in rxpp_eth_d
*/
#define PORT0_HOST_MAC0 (0) /* PD 0, Port 0, host networking, MAC 0. */
#define PORT0_HOST_MAC1 (1) /* PD 0, Port 0, host networking, MAC 1. */
#define PORT0_STORAGE_MAC0 (2) /* PD 0, Port 0, host storage, MAC 0. */
#define PORT0_STORAGE_MAC1 (3) /* PD 0, Port 0, host storage, MAC 1. */
#define PORT1_HOST_MAC0 (4) /* PD 0, Port 1 host networking, MAC 0. */
#define PORT1_HOST_MAC1 (5) /* PD 0, Port 1 host networking, MAC 1. */
#define PORT1_STORAGE_MAC0 (6) /* PD 0, Port 1 host storage, MAC 0. */
#define PORT1_STORAGE_MAC1 (7) /* PD 0, Port 1 host storage, MAC 1. */
#define FIRST_VM_MAC (8) /* PD 1 MAC. Protection domains have IDs */
/* from 0x8-0x26, one per PD. */
#define LAST_VM_MAC (38) /* PD 31 MAC. */
#define MGMT_MAC (39) /* Management port MAC. */
#define MARBLE_MAC0 (59) /* Used for flushing function 0 receive */
/*
* queues before re-using a torn-down
* receive ring. the DA =
* 00-00-00-00-00-00, and the MSB of the
* SA = 00
*/
#define MARBLE_MAC1 (60) /* Used for flushing function 1 receive */
/*
* queues before re-using a torn-down
* receive ring. the DA =
* 00-00-00-00-00-00, and the MSB of the
* SA != 00
*/
#define NULL_MAC (61) /* Promiscuous mode, indicates no match */
#define MCAST_MAC (62) /* Multicast match. */
#define BCAST_MATCH (63) /* Broadcast match. */
#endif /* __descriptors_amap_h__ */

View File

@ -1,179 +0,0 @@
/*
* Copyright (C) 2005 - 2008 ServerEngines
* All rights reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License version 2
* as published by the Free Software Foundation. The full GNU General
* Public License is included in this distribution in the file called COPYING.
*
* Contact Information:
* linux-drivers@serverengines.com
*
* ServerEngines
* 209 N. Fair Oaks Ave
* Sunnyvale, CA 94085
*/
/*
* Autogenerated by srcgen version: 0127
*/
#ifndef __doorbells_amap_h__
#define __doorbells_amap_h__
/* The TX/RDMA send queue doorbell. */
struct BE_SQ_DB_AMAP {
u8 cid[11]; /* DWORD 0 */
u8 rsvd0[5]; /* DWORD 0 */
u8 numPosted[14]; /* DWORD 0 */
u8 rsvd1[2]; /* DWORD 0 */
} __packed;
struct SQ_DB_AMAP {
u32 dw[1];
};
/* The receive queue doorbell. */
struct BE_RQ_DB_AMAP {
u8 rq[10]; /* DWORD 0 */
u8 rsvd0[13]; /* DWORD 0 */
u8 Invalidate; /* DWORD 0 */
u8 numPosted[8]; /* DWORD 0 */
} __packed;
struct RQ_DB_AMAP {
u32 dw[1];
};
/*
* The CQ/EQ doorbell. Software MUST set reserved fields in this
* descriptor to zero, otherwise (CEV) hardware will not execute the
* doorbell (flagging a bad_db_qid error instead).
*/
struct BE_CQ_DB_AMAP {
u8 qid[10]; /* DWORD 0 */
u8 rsvd0[4]; /* DWORD 0 */
u8 rearm; /* DWORD 0 */
u8 event; /* DWORD 0 */
u8 num_popped[13]; /* DWORD 0 */
u8 rsvd1[3]; /* DWORD 0 */
} __packed;
struct CQ_DB_AMAP {
u32 dw[1];
};
struct BE_TPM_RQ_DB_AMAP {
u8 qid[10]; /* DWORD 0 */
u8 rsvd0[6]; /* DWORD 0 */
u8 numPosted[11]; /* DWORD 0 */
u8 mss_cnt[5]; /* DWORD 0 */
} __packed;
struct TPM_RQ_DB_AMAP {
u32 dw[1];
};
/*
* Post WRB Queue Doorbell Register used by the host Storage stack
* to notify the controller of a posted Work Request Block
*/
struct BE_WRB_POST_DB_AMAP {
u8 wrb_cid[10]; /* DWORD 0 */
u8 rsvd0[6]; /* DWORD 0 */
u8 wrb_index[8]; /* DWORD 0 */
u8 numberPosted[8]; /* DWORD 0 */
} __packed;
struct WRB_POST_DB_AMAP {
u32 dw[1];
};
/*
* Update Default PDU Queue Doorbell Register used to communicate
* to the controller that the driver has stopped processing the queue
* and where in the queue it stopped, this is
* a CQ Entry Type. Used by storage driver.
*/
struct BE_DEFAULT_PDU_DB_AMAP {
u8 qid[10]; /* DWORD 0 */
u8 rsvd0[4]; /* DWORD 0 */
u8 rearm; /* DWORD 0 */
u8 event; /* DWORD 0 */
u8 cqproc[14]; /* DWORD 0 */
u8 rsvd1[2]; /* DWORD 0 */
} __packed;
struct DEFAULT_PDU_DB_AMAP {
u32 dw[1];
};
/* Management Command and Controller default fragment ring */
struct BE_MCC_DB_AMAP {
u8 rid[11]; /* DWORD 0 */
u8 rsvd0[5]; /* DWORD 0 */
u8 numPosted[14]; /* DWORD 0 */
u8 rsvd1[2]; /* DWORD 0 */
} __packed;
struct MCC_DB_AMAP {
u32 dw[1];
};
/*
* Used for bootstrapping the Host interface. This register is
* used for driver communication with the MPU when no MCC Rings exist.
* The software must write this register twice to post any MCC
* command. First, it writes the register with hi=1 and the upper bits of
* the physical address for the MCC_MAILBOX structure. Software must poll
* the ready bit until this is acknowledged. Then, sotware writes the
* register with hi=0 with the lower bits in the address. It must
* poll the ready bit until the MCC command is complete. Upon completion,
* the MCC_MAILBOX will contain a valid completion queue entry.
*/
struct BE_MPU_MAILBOX_DB_AMAP {
u8 ready; /* DWORD 0 */
u8 hi; /* DWORD 0 */
u8 address[30]; /* DWORD 0 */
} __packed;
struct MPU_MAILBOX_DB_AMAP {
u32 dw[1];
};
/*
* This is the protection domain doorbell register map. Note that
* while this map shows doorbells for all Blade Engine supported
* protocols, not all of these may be valid in a given function or
* protection domain. It is the responsibility of the application
* accessing the doorbells to know which are valid. Each doorbell
* occupies 32 bytes of space, but unless otherwise specified,
* only the first 4 bytes should be written. There are 32 instances
* of these doorbells for the host and 31 virtual machines respectively.
* The host and VMs will only map the doorbell pages belonging to its
* protection domain. It will not be able to touch the doorbells for
* another VM. The doorbells are the only registers directly accessible
* by a virtual machine. Similarly, there are 511 additional
* doorbells for RDMA protection domains. PD 0 for RDMA shares
* the same physical protection domain doorbell page as ETH/iSCSI.
*
*/
struct BE_PROTECTION_DOMAIN_DBMAP_AMAP {
u8 rsvd0[512]; /* DWORD 0 */
struct BE_SQ_DB_AMAP rdma_sq_db;
u8 rsvd1[7][32]; /* DWORD 17 */
struct BE_WRB_POST_DB_AMAP iscsi_wrb_post_db;
u8 rsvd2[7][32]; /* DWORD 25 */
struct BE_SQ_DB_AMAP etx_sq_db;
u8 rsvd3[7][32]; /* DWORD 33 */
struct BE_RQ_DB_AMAP rdma_rq_db;
u8 rsvd4[7][32]; /* DWORD 41 */
struct BE_DEFAULT_PDU_DB_AMAP iscsi_default_pdu_db;
u8 rsvd5[7][32]; /* DWORD 49 */
struct BE_TPM_RQ_DB_AMAP tpm_rq_db;
u8 rsvd6[7][32]; /* DWORD 57 */
struct BE_RQ_DB_AMAP erx_rq_db;
u8 rsvd7[7][32]; /* DWORD 65 */
struct BE_CQ_DB_AMAP cq_db;
u8 rsvd8[7][32]; /* DWORD 73 */
struct BE_MCC_DB_AMAP mpu_mcc_db;
u8 rsvd9[7][32]; /* DWORD 81 */
struct BE_MPU_MAILBOX_DB_AMAP mcc_bootstrap_db;
u8 rsvd10[935][32]; /* DWORD 89 */
} __packed;
struct PROTECTION_DOMAIN_DBMAP_AMAP {
u32 dw[1024];
};
#endif /* __doorbells_amap_h__ */

View File

@ -1,66 +0,0 @@
/*
* Copyright (C) 2005 - 2008 ServerEngines
* All rights reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License version 2
* as published by the Free Software Foundation. The full GNU General
* Public License is included in this distribution in the file called COPYING.
*
* Contact Information:
* linux-drivers@serverengines.com
*
* ServerEngines
* 209 N. Fair Oaks Ave
* Sunnyvale, CA 94085
*/
/*
* Autogenerated by srcgen version: 0127
*/
#ifndef __ep_amap_h__
#define __ep_amap_h__
/* General Control and Status Register. */
struct BE_EP_CONTROL_CSR_AMAP {
u8 m0_RxPbuf; /* DWORD 0 */
u8 m1_RxPbuf; /* DWORD 0 */
u8 m2_RxPbuf; /* DWORD 0 */
u8 ff_en; /* DWORD 0 */
u8 rsvd0[27]; /* DWORD 0 */
u8 CPU_reset; /* DWORD 0 */
} __packed;
struct EP_CONTROL_CSR_AMAP {
u32 dw[1];
};
/* Semaphore Register. */
struct BE_EP_SEMAPHORE_CSR_AMAP {
u8 value[32]; /* DWORD 0 */
} __packed;
struct EP_SEMAPHORE_CSR_AMAP {
u32 dw[1];
};
/* Embedded Processor Specific Registers. */
struct BE_EP_CSRMAP_AMAP {
struct BE_EP_CONTROL_CSR_AMAP ep_control;
u8 rsvd0[32]; /* DWORD 1 */
u8 rsvd1[32]; /* DWORD 2 */
u8 rsvd2[32]; /* DWORD 3 */
u8 rsvd3[32]; /* DWORD 4 */
u8 rsvd4[32]; /* DWORD 5 */
u8 rsvd5[8][128]; /* DWORD 6 */
u8 rsvd6[32]; /* DWORD 38 */
u8 rsvd7[32]; /* DWORD 39 */
u8 rsvd8[32]; /* DWORD 40 */
u8 rsvd9[32]; /* DWORD 41 */
u8 rsvd10[32]; /* DWORD 42 */
struct BE_EP_SEMAPHORE_CSR_AMAP ep_semaphore;
u8 rsvd11[32]; /* DWORD 44 */
u8 rsvd12[19][32]; /* DWORD 45 */
} __packed;
struct EP_CSRMAP_AMAP {
u32 dw[64];
};
#endif /* __ep_amap_h__ */

View File

@ -1,299 +0,0 @@
/*
* Copyright (C) 2005 - 2008 ServerEngines
* All rights reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License version 2
* as published by the Free Software Foundation. The full GNU General
* Public License is included in this distribution in the file called COPYING.
*
* Contact Information:
* linux-drivers@serverengines.com
*
* ServerEngines
* 209 N. Fair Oaks Ave
* Sunnyvale, CA 94085
*/
#include "hwlib.h"
#include "bestatus.h"
/*
This routine creates an event queue based on the client completion
queue configuration information.
FunctionObject - Handle to a function object
EqBaseVa - Base VA for a the EQ ring
SizeEncoding - The encoded size for the EQ entries. This value is
either CEV_EQ_SIZE_4 or CEV_EQ_SIZE_16
NumEntries - CEV_CQ_CNT_* values.
Watermark - Enables watermark based coalescing. This parameter
must be of the type CEV_WMARK_* if watermarks
are enabled. If watermarks to to be disabled
this value should be-1.
TimerDelay - If a timer delay is enabled this value should be the
time of the delay in 8 microsecond units. If
delays are not used this parameter should be
set to -1.
ppEqObject - Internal EQ Handle returned.
Returns BE_SUCCESS if successfull,, otherwise a useful error code
is returned.
IRQL < DISPATCH_LEVEL
*/
int
be_eq_create(struct be_function_object *pfob,
struct ring_desc *rd, u32 eqe_size, u32 num_entries,
u32 watermark, /* CEV_WMARK_* or -1 */
u32 timer_delay, /* in 8us units, or -1 */
struct be_eq_object *eq_object)
{
int status = BE_SUCCESS;
u32 num_entries_encoding, eqe_size_encoding, length;
struct FWCMD_COMMON_EQ_CREATE *fwcmd = NULL;
struct MCC_WRB_AMAP *wrb = NULL;
u32 n;
unsigned long irql;
ASSERT(rd);
ASSERT(eq_object);
switch (num_entries) {
case 256:
num_entries_encoding = CEV_EQ_CNT_256;
break;
case 512:
num_entries_encoding = CEV_EQ_CNT_512;
break;
case 1024:
num_entries_encoding = CEV_EQ_CNT_1024;
break;
case 2048:
num_entries_encoding = CEV_EQ_CNT_2048;
break;
case 4096:
num_entries_encoding = CEV_EQ_CNT_4096;
break;
default:
ASSERT(0);
return BE_STATUS_INVALID_PARAMETER;
}
switch (eqe_size) {
case 4:
eqe_size_encoding = CEV_EQ_SIZE_4;
break;
case 16:
eqe_size_encoding = CEV_EQ_SIZE_16;
break;
default:
ASSERT(0);
return BE_STATUS_INVALID_PARAMETER;
}
if ((eqe_size == 4 && num_entries < 1024) ||
(eqe_size == 16 && num_entries == 4096)) {
TRACE(DL_ERR, "Bad EQ size. eqe_size:%d num_entries:%d",
eqe_size, num_entries);
ASSERT(0);
return BE_STATUS_INVALID_PARAMETER;
}
memset(eq_object, 0, sizeof(*eq_object));
atomic_set(&eq_object->ref_count, 0);
eq_object->parent_function = pfob;
eq_object->eq_id = 0xFFFFFFFF;
INIT_LIST_HEAD(&eq_object->cq_list_head);
length = num_entries * eqe_size;
spin_lock_irqsave(&pfob->post_lock, irql);
wrb = be_function_peek_mcc_wrb(pfob);
if (!wrb) {
ASSERT(wrb);
TRACE(DL_ERR, "No free MCC WRBs in create EQ.");
status = BE_STATUS_NO_MCC_WRB;
goto Error;
}
/* Prepares an embedded fwcmd, including request/response sizes. */
fwcmd = BE_PREPARE_EMBEDDED_FWCMD(pfob, wrb, COMMON_EQ_CREATE);
fwcmd->params.request.num_pages = PAGES_SPANNED(OFFSET_IN_PAGE(rd->va),
length);
n = pfob->pci_function_number;
AMAP_SET_BITS_PTR(EQ_CONTEXT, Func, &fwcmd->params.request.context, n);
AMAP_SET_BITS_PTR(EQ_CONTEXT, valid, &fwcmd->params.request.context, 1);
AMAP_SET_BITS_PTR(EQ_CONTEXT, Size,
&fwcmd->params.request.context, eqe_size_encoding);
n = 0; /* Protection Domain is always 0 in Linux driver */
AMAP_SET_BITS_PTR(EQ_CONTEXT, PD, &fwcmd->params.request.context, n);
/* Let the caller ARM the EQ with the doorbell. */
AMAP_SET_BITS_PTR(EQ_CONTEXT, Armed, &fwcmd->params.request.context, 0);
AMAP_SET_BITS_PTR(EQ_CONTEXT, Count, &fwcmd->params.request.context,
num_entries_encoding);
n = pfob->pci_function_number * 32;
AMAP_SET_BITS_PTR(EQ_CONTEXT, EventVect,
&fwcmd->params.request.context, n);
if (watermark != -1) {
AMAP_SET_BITS_PTR(EQ_CONTEXT, WME,
&fwcmd->params.request.context, 1);
AMAP_SET_BITS_PTR(EQ_CONTEXT, Watermark,
&fwcmd->params.request.context, watermark);
ASSERT(watermark <= CEV_WMARK_240);
} else
AMAP_SET_BITS_PTR(EQ_CONTEXT, WME,
&fwcmd->params.request.context, 0);
if (timer_delay != -1) {
AMAP_SET_BITS_PTR(EQ_CONTEXT, TMR,
&fwcmd->params.request.context, 1);
ASSERT(timer_delay <= 250); /* max value according to EAS */
timer_delay = min(timer_delay, (u32)250);
AMAP_SET_BITS_PTR(EQ_CONTEXT, Delay,
&fwcmd->params.request.context, timer_delay);
} else {
AMAP_SET_BITS_PTR(EQ_CONTEXT, TMR,
&fwcmd->params.request.context, 0);
}
/* Create a page list for the FWCMD. */
be_rd_to_pa_list(rd, fwcmd->params.request.pages,
ARRAY_SIZE(fwcmd->params.request.pages));
status = be_function_post_mcc_wrb(pfob, wrb, NULL, NULL, NULL,
NULL, NULL, fwcmd, NULL);
if (status != BE_SUCCESS) {
TRACE(DL_ERR, "MCC to create EQ failed.");
goto Error;
}
/* Get the EQ id. The MPU allocates the IDs. */
eq_object->eq_id = fwcmd->params.response.eq_id;
Error:
spin_unlock_irqrestore(&pfob->post_lock, irql);
if (pfob->pend_queue_driving && pfob->mcc) {
pfob->pend_queue_driving = 0;
be_drive_mcc_wrb_queue(pfob->mcc);
}
return status;
}
/*
Deferences the given object. Once the object's reference count drops to
zero, the object is destroyed and all resources that are held by this
object are released. The on-chip context is also destroyed along with
the queue ID, and any mappings made into the UT.
eq_object - EQ handle returned from eq_object_create.
Returns BE_SUCCESS if successfull, otherwise a useful error code
is returned.
IRQL: IRQL < DISPATCH_LEVEL
*/
int be_eq_destroy(struct be_eq_object *eq_object)
{
int status = 0;
ASSERT(atomic_read(&eq_object->ref_count) == 0);
/* no CQs should reference this EQ now */
ASSERT(list_empty(&eq_object->cq_list_head));
/* Send fwcmd to destroy the EQ. */
status = be_function_ring_destroy(eq_object->parent_function,
eq_object->eq_id, FWCMD_RING_TYPE_EQ,
NULL, NULL, NULL, NULL);
ASSERT(status == 0);
return BE_SUCCESS;
}
/*
*---------------------------------------------------------------------------
* Function: be_eq_modify_delay
* Changes the EQ delay for a group of EQs.
* num_eq - The number of EQs in the eq_array to adjust.
* This also is the number of delay values in
* the eq_delay_array.
* eq_array - Array of struct be_eq_object pointers to adjust.
* eq_delay_array - Array of "num_eq" timer delays in units
* of microseconds. The be_eq_query_delay_range
* fwcmd returns the resolution and range of
* legal EQ delays.
* cb -
* cb_context -
* q_ctxt - Optional. Pointer to a previously allocated
* struct. If the MCC WRB ring is full, this
* structure is used to queue the operation. It
* will be posted to the MCC ring when space
* becomes available. All queued commands will
* be posted to the ring in the order they are
* received. It is always valid to pass a pointer to
* a generic be_generic_q_cntxt. However,
* the specific context structs
* are generally smaller than the generic struct.
* return pend_status - BE_SUCCESS (0) on success.
* BE_PENDING (postive value) if the FWCMD
* completion is pending. Negative error code on failure.
*-------------------------------------------------------------------------
*/
int
be_eq_modify_delay(struct be_function_object *pfob,
u32 num_eq, struct be_eq_object **eq_array,
u32 *eq_delay_array, mcc_wrb_cqe_callback cb,
void *cb_context, struct be_eq_modify_delay_q_ctxt *q_ctxt)
{
struct FWCMD_COMMON_MODIFY_EQ_DELAY *fwcmd = NULL;
struct MCC_WRB_AMAP *wrb = NULL;
int status = 0;
struct be_generic_q_ctxt *gen_ctxt = NULL;
u32 i;
unsigned long irql;
spin_lock_irqsave(&pfob->post_lock, irql);
wrb = be_function_peek_mcc_wrb(pfob);
if (!wrb) {
if (q_ctxt && cb) {
wrb = (struct MCC_WRB_AMAP *) &q_ctxt->wrb_header;
gen_ctxt = (struct be_generic_q_ctxt *) q_ctxt;
gen_ctxt->context.bytes = sizeof(*q_ctxt);
} else {
status = BE_STATUS_NO_MCC_WRB;
goto Error;
}
}
/* Prepares an embedded fwcmd, including request/response sizes. */
fwcmd = BE_PREPARE_EMBEDDED_FWCMD(pfob, wrb, COMMON_MODIFY_EQ_DELAY);
ASSERT(num_eq > 0);
ASSERT(num_eq <= ARRAY_SIZE(fwcmd->params.request.delay));
fwcmd->params.request.num_eq = num_eq;
for (i = 0; i < num_eq; i++) {
fwcmd->params.request.delay[i].eq_id = eq_array[i]->eq_id;
fwcmd->params.request.delay[i].delay_in_microseconds =
eq_delay_array[i];
}
/* Post the f/w command */
status = be_function_post_mcc_wrb(pfob, wrb, gen_ctxt,
cb, cb_context, NULL, NULL, fwcmd, NULL);
Error:
spin_unlock_irqrestore(&pfob->post_lock, irql);
if (pfob->pend_queue_driving && pfob->mcc) {
pfob->pend_queue_driving = 0;
be_drive_mcc_wrb_queue(pfob->mcc);
}
return status;
}

File diff suppressed because it is too large Load Diff

View File

@ -1,55 +0,0 @@
/*
* Copyright (C) 2005 - 2008 ServerEngines
* All rights reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License version 2
* as published by the Free Software Foundation. The full GNU General
* Public License is included in this distribution in the file called COPYING.
*
* Contact Information:
* linux-drivers@serverengines.com
*
* ServerEngines
* 209 N. Fair Oaks Ave
* Sunnyvale, CA 94085
*/
/*
* Autogenerated by srcgen version: 0127
*/
#ifndef __etx_context_amap_h__
#define __etx_context_amap_h__
/* ETX ring context structure. */
struct BE_ETX_CONTEXT_AMAP {
u8 tx_cidx[11]; /* DWORD 0 */
u8 rsvd0[5]; /* DWORD 0 */
u8 rsvd1[16]; /* DWORD 0 */
u8 tx_pidx[11]; /* DWORD 1 */
u8 rsvd2; /* DWORD 1 */
u8 tx_ring_size[4]; /* DWORD 1 */
u8 pd_id[5]; /* DWORD 1 */
u8 pd_id_not_valid; /* DWORD 1 */
u8 cq_id_send[10]; /* DWORD 1 */
u8 rsvd3[32]; /* DWORD 2 */
u8 rsvd4[32]; /* DWORD 3 */
u8 cur_bytes[32]; /* DWORD 4 */
u8 max_bytes[32]; /* DWORD 5 */
u8 time_stamp[32]; /* DWORD 6 */
u8 rsvd5[11]; /* DWORD 7 */
u8 func; /* DWORD 7 */
u8 rsvd6[20]; /* DWORD 7 */
u8 cur_txd_count[32]; /* DWORD 8 */
u8 max_txd_count[32]; /* DWORD 9 */
u8 rsvd7[32]; /* DWORD 10 */
u8 rsvd8[32]; /* DWORD 11 */
u8 rsvd9[32]; /* DWORD 12 */
u8 rsvd10[32]; /* DWORD 13 */
u8 rsvd11[32]; /* DWORD 14 */
u8 rsvd12[32]; /* DWORD 15 */
} __packed;
struct ETX_CONTEXT_AMAP {
u32 dw[16];
};
#endif /* __etx_context_amap_h__ */

View File

@ -1,565 +0,0 @@
/*
* Copyright (C) 2005 - 2008 ServerEngines
* All rights reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License version 2
* as published by the Free Software Foundation. The full GNU General
* Public License is included in this distribution in the file called COPYING.
*
* Contact Information:
* linux-drivers@serverengines.com
*
* ServerEngines
* 209 N. Fair Oaks Ave
* Sunnyvale, CA 94085
*/
#include "hwlib.h"
#include "bestatus.h"
int
be_function_internal_query_firmware_config(struct be_function_object *pfob,
struct BE_FIRMWARE_CONFIG *config)
{
struct FWCMD_COMMON_FIRMWARE_CONFIG *fwcmd = NULL;
struct MCC_WRB_AMAP *wrb = NULL;
int status = 0;
unsigned long irql;
struct be_mcc_wrb_response_copy rc;
spin_lock_irqsave(&pfob->post_lock, irql);
wrb = be_function_peek_mcc_wrb(pfob);
if (!wrb) {
TRACE(DL_ERR, "MCC wrb peek failed.");
status = BE_STATUS_NO_MCC_WRB;
goto error;
}
/* Prepares an embedded fwcmd, including request/response sizes. */
fwcmd = BE_PREPARE_EMBEDDED_FWCMD(pfob, wrb, COMMON_FIRMWARE_CONFIG);
rc.length = FIELD_SIZEOF(struct FWCMD_COMMON_FIRMWARE_CONFIG,
params.response);
rc.fwcmd_offset = offsetof(struct FWCMD_COMMON_FIRMWARE_CONFIG,
params.response);
rc.va = config;
/* Post the f/w command */
status = be_function_post_mcc_wrb(pfob, wrb, NULL, NULL,
NULL, NULL, NULL, fwcmd, &rc);
error:
spin_unlock_irqrestore(&pfob->post_lock, irql);
if (pfob->pend_queue_driving && pfob->mcc) {
pfob->pend_queue_driving = 0;
be_drive_mcc_wrb_queue(pfob->mcc);
}
return status;
}
/*
This allocates and initializes a function object based on the information
provided by upper layer drivers.
Returns BE_SUCCESS on success and an appropriate int on failure.
A function object represents a single BladeEngine (logical) PCI function.
That is a function object either represents
the networking side of BladeEngine or the iSCSI side of BladeEngine.
This routine will also detect and create an appropriate PD object for the
PCI function as needed.
*/
int
be_function_object_create(u8 __iomem *csr_va, u8 __iomem *db_va,
u8 __iomem *pci_va, u32 function_type,
struct ring_desc *mailbox, struct be_function_object *pfob)
{
int status;
ASSERT(pfob); /* not a magic assert */
ASSERT(function_type <= 2);
TRACE(DL_INFO, "Create function object. type:%s object:0x%p",
(function_type == BE_FUNCTION_TYPE_ISCSI ? "iSCSI" :
(function_type == BE_FUNCTION_TYPE_NETWORK ? "Network" :
"Arm")), pfob);
memset(pfob, 0, sizeof(*pfob));
pfob->type = function_type;
pfob->csr_va = csr_va;
pfob->db_va = db_va;
pfob->pci_va = pci_va;
spin_lock_init(&pfob->cq_lock);
spin_lock_init(&pfob->post_lock);
spin_lock_init(&pfob->mcc_context_lock);
pfob->pci_function_number = 1;
pfob->emulate = false;
TRACE(DL_NOTE, "Non-emulation mode");
status = be_drive_POST(pfob);
if (status != BE_SUCCESS) {
TRACE(DL_ERR, "BladeEngine POST failed.");
goto error;
}
/* Initialize the mailbox */
status = be_mpu_init_mailbox(pfob, mailbox);
if (status != BE_SUCCESS) {
TRACE(DL_ERR, "Failed to initialize mailbox.");
goto error;
}
/*
* Cache the firmware config for ASSERTs in hwclib and later
* driver queries.
*/
status = be_function_internal_query_firmware_config(pfob,
&pfob->fw_config);
if (status != BE_SUCCESS) {
TRACE(DL_ERR, "Failed to query firmware config.");
goto error;
}
error:
if (status != BE_SUCCESS) {
/* No cleanup necessary */
TRACE(DL_ERR, "Failed to create function.");
memset(pfob, 0, sizeof(*pfob));
}
return status;
}
/*
This routine drops the reference count on a given function object. Once
the reference count falls to zero, the function object is destroyed and all
resources held are freed.
FunctionObject - The function object to drop the reference to.
*/
int be_function_object_destroy(struct be_function_object *pfob)
{
TRACE(DL_INFO, "Destroy pfob. Object:0x%p",
pfob);
ASSERT(pfob->mcc == NULL);
return BE_SUCCESS;
}
int be_function_cleanup(struct be_function_object *pfob)
{
int status = 0;
u32 isr;
u32 host_intr;
struct PCICFG_HOST_TIMER_INT_CTRL_CSR_AMAP ctrl;
if (pfob->type == BE_FUNCTION_TYPE_NETWORK) {
status = be_rxf_multicast_config(pfob, false, 0,
NULL, NULL, NULL, NULL);
ASSERT(status == BE_SUCCESS);
}
/* VLAN */
status = be_rxf_vlan_config(pfob, false, 0, NULL, NULL, NULL, NULL);
ASSERT(status == BE_SUCCESS);
/*
* MCC Queue -- Switches to mailbox mode. May want to destroy
* all but the MCC CQ before this call if polling CQ is much better
* performance than polling mailbox register.
*/
if (pfob->mcc)
status = be_mcc_ring_destroy(pfob->mcc);
/*
* If interrupts are disabled, clear any CEV interrupt assertions that
* fired after we stopped processing EQs.
*/
ctrl.dw[0] = PCICFG1_READ(pfob, host_timer_int_ctrl);
host_intr = AMAP_GET_BITS_PTR(PCICFG_HOST_TIMER_INT_CTRL_CSR,
hostintr, ctrl.dw);
if (!host_intr)
if (pfob->type == BE_FUNCTION_TYPE_NETWORK)
isr = CSR_READ(pfob, cev.isr1);
else
isr = CSR_READ(pfob, cev.isr0);
else
/* This should never happen... */
TRACE(DL_ERR, "function_cleanup called with interrupt enabled");
/* Function object destroy */
status = be_function_object_destroy(pfob);
ASSERT(status == BE_SUCCESS);
return status;
}
void *
be_function_prepare_embedded_fwcmd(struct be_function_object *pfob,
struct MCC_WRB_AMAP *wrb, u32 payld_len, u32 request_length,
u32 response_length, u32 opcode, u32 subsystem)
{
struct FWCMD_REQUEST_HEADER *header = NULL;
u32 n;
ASSERT(wrb);
n = offsetof(struct BE_MCC_WRB_AMAP, payload)/8;
AMAP_SET_BITS_PTR(MCC_WRB, embedded, wrb, 1);
AMAP_SET_BITS_PTR(MCC_WRB, payload_length, wrb, min(payld_len, n));
header = (struct FWCMD_REQUEST_HEADER *)((u8 *)wrb + n);
header->timeout = 0;
header->domain = 0;
header->request_length = max(request_length, response_length);
header->opcode = opcode;
header->subsystem = subsystem;
return header;
}
void *
be_function_prepare_nonembedded_fwcmd(struct be_function_object *pfob,
struct MCC_WRB_AMAP *wrb,
void *fwcmd_va, u64 fwcmd_pa,
u32 payld_len,
u32 request_length,
u32 response_length,
u32 opcode, u32 subsystem)
{
struct FWCMD_REQUEST_HEADER *header = NULL;
u32 n;
struct MCC_WRB_PAYLOAD_AMAP *plp;
ASSERT(wrb);
ASSERT(fwcmd_va);
header = (struct FWCMD_REQUEST_HEADER *) fwcmd_va;
AMAP_SET_BITS_PTR(MCC_WRB, embedded, wrb, 0);
AMAP_SET_BITS_PTR(MCC_WRB, payload_length, wrb, payld_len);
/*
* Assume one fragment. The caller may override the SGL by
* rewriting the 0th length and adding more entries. They
* will also need to update the sge_count.
*/
AMAP_SET_BITS_PTR(MCC_WRB, sge_count, wrb, 1);
n = offsetof(struct BE_MCC_WRB_AMAP, payload)/8;
plp = (struct MCC_WRB_PAYLOAD_AMAP *)((u8 *)wrb + n);
AMAP_SET_BITS_PTR(MCC_WRB_PAYLOAD, sgl[0].length, plp, payld_len);
AMAP_SET_BITS_PTR(MCC_WRB_PAYLOAD, sgl[0].pa_lo, plp, (u32)fwcmd_pa);
AMAP_SET_BITS_PTR(MCC_WRB_PAYLOAD, sgl[0].pa_hi, plp,
upper_32_bits(fwcmd_pa));
header->timeout = 0;
header->domain = 0;
header->request_length = max(request_length, response_length);
header->opcode = opcode;
header->subsystem = subsystem;
return header;
}
struct MCC_WRB_AMAP *
be_function_peek_mcc_wrb(struct be_function_object *pfob)
{
struct MCC_WRB_AMAP *wrb = NULL;
u32 offset;
if (pfob->mcc)
wrb = _be_mpu_peek_ring_wrb(pfob->mcc, false);
else {
offset = offsetof(struct BE_MCC_MAILBOX_AMAP, wrb)/8;
wrb = (struct MCC_WRB_AMAP *) ((u8 *) pfob->mailbox.va +
offset);
}
if (wrb)
memset(wrb, 0, sizeof(struct MCC_WRB_AMAP));
return wrb;
}
#if defined(BE_DEBUG)
void be_function_debug_print_wrb(struct be_function_object *pfob,
struct MCC_WRB_AMAP *wrb, void *optional_fwcmd_va,
struct be_mcc_wrb_context *wrb_context)
{
struct FWCMD_REQUEST_HEADER *header = NULL;
u8 embedded;
u32 n;
embedded = AMAP_GET_BITS_PTR(MCC_WRB, embedded, wrb);
if (embedded) {
n = offsetof(struct BE_MCC_WRB_AMAP, payload)/8;
header = (struct FWCMD_REQUEST_HEADER *)((u8 *)wrb + n);
} else {
header = (struct FWCMD_REQUEST_HEADER *) optional_fwcmd_va;
}
/* Save the completed count before posting for a debug assert. */
if (header) {
wrb_context->opcode = header->opcode;
wrb_context->subsystem = header->subsystem;
} else {
wrb_context->opcode = 0;
wrb_context->subsystem = 0;
}
}
#else
#define be_function_debug_print_wrb(a_, b_, c_, d_)
#endif
int
be_function_post_mcc_wrb(struct be_function_object *pfob,
struct MCC_WRB_AMAP *wrb,
struct be_generic_q_ctxt *q_ctxt,
mcc_wrb_cqe_callback cb, void *cb_context,
mcc_wrb_cqe_callback internal_cb,
void *internal_cb_context, void *optional_fwcmd_va,
struct be_mcc_wrb_response_copy *rc)
{
int status;
struct be_mcc_wrb_context *wrb_context = NULL;
u64 *p;
if (q_ctxt) {
/* Initialize context. */
q_ctxt->context.internal_cb = internal_cb;
q_ctxt->context.internal_cb_context = internal_cb_context;
q_ctxt->context.cb = cb;
q_ctxt->context.cb_context = cb_context;
if (rc) {
q_ctxt->context.copy.length = rc->length;
q_ctxt->context.copy.fwcmd_offset = rc->fwcmd_offset;
q_ctxt->context.copy.va = rc->va;
} else
q_ctxt->context.copy.length = 0;
q_ctxt->context.optional_fwcmd_va = optional_fwcmd_va;
/* Queue this request */
status = be_function_queue_mcc_wrb(pfob, q_ctxt);
goto Error;
}
/*
* Allocate a WRB context struct to hold the callback pointers,
* status, etc. This is required if commands complete out of order.
*/
wrb_context = _be_mcc_allocate_wrb_context(pfob);
if (!wrb_context) {
TRACE(DL_WARN, "Failed to allocate MCC WRB context.");
status = BE_STATUS_SYSTEM_RESOURCES;
goto Error;
}
/* Initialize context. */
memset(wrb_context, 0, sizeof(*wrb_context));
wrb_context->internal_cb = internal_cb;
wrb_context->internal_cb_context = internal_cb_context;
wrb_context->cb = cb;
wrb_context->cb_context = cb_context;
if (rc) {
wrb_context->copy.length = rc->length;
wrb_context->copy.fwcmd_offset = rc->fwcmd_offset;
wrb_context->copy.va = rc->va;
} else
wrb_context->copy.length = 0;
wrb_context->wrb = wrb;
/*
* Copy the context pointer into the WRB opaque tag field.
* Verify assumption of 64-bit tag with a compile time assert.
*/
p = (u64 *) ((u8 *)wrb + offsetof(struct BE_MCC_WRB_AMAP, tag)/8);
*p = (u64)(size_t)wrb_context;
/* Print info about this FWCMD for debug builds. */
be_function_debug_print_wrb(pfob, wrb, optional_fwcmd_va, wrb_context);
/*
* issue the WRB to the MPU as appropriate
*/
if (pfob->mcc) {
/*
* we're in WRB mode, pass to the mcc layer
*/
status = _be_mpu_post_wrb_ring(pfob->mcc, wrb, wrb_context);
} else {
/*
* we're in mailbox mode
*/
status = _be_mpu_post_wrb_mailbox(pfob, wrb, wrb_context);
/* mailbox mode always completes synchronously */
ASSERT(status != BE_STATUS_PENDING);
}
Error:
return status;
}
int
be_function_ring_destroy(struct be_function_object *pfob,
u32 id, u32 ring_type, mcc_wrb_cqe_callback cb,
void *cb_context, mcc_wrb_cqe_callback internal_cb,
void *internal_cb_context)
{
struct FWCMD_COMMON_RING_DESTROY *fwcmd = NULL;
struct MCC_WRB_AMAP *wrb = NULL;
int status = 0;
unsigned long irql;
spin_lock_irqsave(&pfob->post_lock, irql);
TRACE(DL_INFO, "Destroy ring id:%d type:%d", id, ring_type);
wrb = be_function_peek_mcc_wrb(pfob);
if (!wrb) {
ASSERT(wrb);
TRACE(DL_ERR, "No free MCC WRBs in destroy ring.");
status = BE_STATUS_NO_MCC_WRB;
goto Error;
}
/* Prepares an embedded fwcmd, including request/response sizes. */
fwcmd = BE_PREPARE_EMBEDDED_FWCMD(pfob, wrb, COMMON_RING_DESTROY);
fwcmd->params.request.id = id;
fwcmd->params.request.ring_type = ring_type;
/* Post the f/w command */
status = be_function_post_mcc_wrb(pfob, wrb, NULL, cb, cb_context,
internal_cb, internal_cb_context, fwcmd, NULL);
if (status != BE_SUCCESS && status != BE_PENDING) {
TRACE(DL_ERR, "Ring destroy fwcmd failed. id:%d ring_type:%d",
id, ring_type);
goto Error;
}
Error:
spin_unlock_irqrestore(&pfob->post_lock, irql);
if (pfob->pend_queue_driving && pfob->mcc) {
pfob->pend_queue_driving = 0;
be_drive_mcc_wrb_queue(pfob->mcc);
}
return status;
}
void
be_rd_to_pa_list(struct ring_desc *rd, struct PHYS_ADDR *pa_list, u32 max_num)
{
u32 num_pages = PAGES_SPANNED(rd->va, rd->length);
u32 i = 0;
u64 pa = rd->pa;
__le64 lepa;
ASSERT(pa_list);
ASSERT(pa);
for (i = 0; i < min(num_pages, max_num); i++) {
lepa = cpu_to_le64(pa);
pa_list[i].lo = (u32)lepa;
pa_list[i].hi = upper_32_bits(lepa);
pa += PAGE_SIZE;
}
}
/*-----------------------------------------------------------------------------
* Function: be_function_get_fw_version
* Retrieves the firmware version on the adpater. If the callback is
* NULL this call executes synchronously. If the callback is not NULL,
* the returned status will be BE_PENDING if the command was issued
* successfully.
* pfob -
* fwv - Pointer to response buffer if callback is NULL.
* cb - Callback function invoked when the FWCMD completes.
* cb_context - Passed to the callback function.
* return pend_status - BE_SUCCESS (0) on success.
* BE_PENDING (postive value) if the FWCMD
* completion is pending. Negative error code on failure.
*---------------------------------------------------------------------------
*/
int
be_function_get_fw_version(struct be_function_object *pfob,
struct FWCMD_COMMON_GET_FW_VERSION_RESPONSE_PAYLOAD *fwv,
mcc_wrb_cqe_callback cb, void *cb_context)
{
int status = BE_SUCCESS;
struct MCC_WRB_AMAP *wrb = NULL;
struct FWCMD_COMMON_GET_FW_VERSION *fwcmd = NULL;
unsigned long irql;
struct be_mcc_wrb_response_copy rc;
spin_lock_irqsave(&pfob->post_lock, irql);
wrb = be_function_peek_mcc_wrb(pfob);
if (!wrb) {
TRACE(DL_ERR, "MCC wrb peek failed.");
status = BE_STATUS_NO_MCC_WRB;
goto Error;
}
if (!cb && !fwv) {
TRACE(DL_ERR, "callback and response buffer NULL!");
status = BE_NOT_OK;
goto Error;
}
/* Prepares an embedded fwcmd, including request/response sizes. */
fwcmd = BE_PREPARE_EMBEDDED_FWCMD(pfob, wrb, COMMON_GET_FW_VERSION);
rc.length = FIELD_SIZEOF(struct FWCMD_COMMON_GET_FW_VERSION,
params.response);
rc.fwcmd_offset = offsetof(struct FWCMD_COMMON_GET_FW_VERSION,
params.response);
rc.va = fwv;
/* Post the f/w command */
status = be_function_post_mcc_wrb(pfob, wrb, NULL, cb,
cb_context, NULL, NULL, fwcmd, &rc);
Error:
spin_unlock_irqrestore(&pfob->post_lock, irql);
if (pfob->pend_queue_driving && pfob->mcc) {
pfob->pend_queue_driving = 0;
be_drive_mcc_wrb_queue(pfob->mcc);
}
return status;
}
int
be_function_queue_mcc_wrb(struct be_function_object *pfob,
struct be_generic_q_ctxt *q_ctxt)
{
int status;
ASSERT(q_ctxt);
/*
* issue the WRB to the MPU as appropriate
*/
if (pfob->mcc) {
/* We're in ring mode. Queue this item. */
pfob->mcc->backlog_length++;
list_add_tail(&q_ctxt->context.list, &pfob->mcc->backlog);
status = BE_PENDING;
} else {
status = BE_NOT_OK;
}
return status;
}

View File

@ -1,222 +0,0 @@
/*
* Copyright (C) 2005 - 2008 ServerEngines
* All rights reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License version 2
* as published by the Free Software Foundation. The full GNU General
* Public License is included in this distribution in the file called COPYING.
*
* Contact Information:
* linux-drivers@serverengines.com
*
* ServerEngines
* 209 N. Fair Oaks Ave
* Sunnyvale, CA 94085
*/
/*
* Autogenerated by srcgen version: 0127
*/
#ifndef __fwcmd_common_amap_h__
#define __fwcmd_common_amap_h__
#include "host_struct.h"
/* --- PHY_LINK_DUPLEX_ENUM --- */
#define PHY_LINK_DUPLEX_NONE (0)
#define PHY_LINK_DUPLEX_HALF (1)
#define PHY_LINK_DUPLEX_FULL (2)
/* --- PHY_LINK_SPEED_ENUM --- */
#define PHY_LINK_SPEED_ZERO (0) /* No link. */
#define PHY_LINK_SPEED_10MBPS (1) /* 10 Mbps */
#define PHY_LINK_SPEED_100MBPS (2) /* 100 Mbps */
#define PHY_LINK_SPEED_1GBPS (3) /* 1 Gbps */
#define PHY_LINK_SPEED_10GBPS (4) /* 10 Gbps */
/* --- PHY_LINK_FAULT_ENUM --- */
#define PHY_LINK_FAULT_NONE (0) /* No fault status
available or detected */
#define PHY_LINK_FAULT_LOCAL (1) /* Local fault detected */
#define PHY_LINK_FAULT_REMOTE (2) /* Remote fault detected */
/* --- BE_ULP_MASK --- */
#define BE_ULP0_MASK (1)
#define BE_ULP1_MASK (2)
#define BE_ULP2_MASK (4)
/* --- NTWK_ACTIVE_PORT --- */
#define NTWK_PORT_A (0) /* Port A is currently active */
#define NTWK_PORT_B (1) /* Port B is currently active */
#define NTWK_NO_ACTIVE_PORT (15) /* Both ports have lost link */
/* --- NTWK_LINK_TYPE --- */
#define NTWK_LINK_TYPE_PHYSICAL (0) /* link up/down event
applies to BladeEngine's
Physical Ports
*/
#define NTWK_LINK_TYPE_VIRTUAL (1) /* Virtual link up/down event
reported by BladeExchange.
This applies only when the
VLD feature is enabled
*/
/*
* --- FWCMD_MAC_TYPE_ENUM ---
* This enum defines the types of MAC addresses in the RXF MAC Address Table.
*/
#define MAC_ADDRESS_TYPE_STORAGE (0) /* Storage MAC Address */
#define MAC_ADDRESS_TYPE_NETWORK (1) /* Network MAC Address */
#define MAC_ADDRESS_TYPE_PD (2) /* Protection Domain MAC Addr */
#define MAC_ADDRESS_TYPE_MANAGEMENT (3) /* Managment MAC Address */
/* --- FWCMD_RING_TYPE_ENUM --- */
#define FWCMD_RING_TYPE_ETH_RX (1) /* Ring created with */
/* FWCMD_COMMON_ETH_RX_CREATE. */
#define FWCMD_RING_TYPE_ETH_TX (2) /* Ring created with */
/* FWCMD_COMMON_ETH_TX_CREATE. */
#define FWCMD_RING_TYPE_ISCSI_WRBQ (3) /* Ring created with */
/* FWCMD_COMMON_ISCSI_WRBQ_CREATE. */
#define FWCMD_RING_TYPE_ISCSI_DEFQ (4) /* Ring created with */
/* FWCMD_COMMON_ISCSI_DEFQ_CREATE. */
#define FWCMD_RING_TYPE_TPM_WRBQ (5) /* Ring created with */
/* FWCMD_COMMON_TPM_WRBQ_CREATE. */
#define FWCMD_RING_TYPE_TPM_DEFQ (6) /* Ring created with */
/* FWCMD_COMMONTPM_TDEFQ_CREATE. */
#define FWCMD_RING_TYPE_TPM_RQ (7) /* Ring created with */
/* FWCMD_COMMON_TPM_RQ_CREATE. */
#define FWCMD_RING_TYPE_MCC (8) /* Ring created with */
/* FWCMD_COMMON_MCC_CREATE. */
#define FWCMD_RING_TYPE_CQ (9) /* Ring created with */
/* FWCMD_COMMON_CQ_CREATE. */
#define FWCMD_RING_TYPE_EQ (10) /* Ring created with */
/* FWCMD_COMMON_EQ_CREATE. */
#define FWCMD_RING_TYPE_QP (11) /* Ring created with */
/* FWCMD_RDMA_QP_CREATE. */
/* --- ETH_TX_RING_TYPE_ENUM --- */
#define ETH_TX_RING_TYPE_FORWARDING (1) /* Ethernet ring for
forwarding packets */
#define ETH_TX_RING_TYPE_STANDARD (2) /* Ethernet ring for sending
network packets. */
#define ETH_TX_RING_TYPE_BOUND (3) /* Ethernet ring bound to the
port specified in the command
header.port_number field.
Rings of this type are
NOT subject to the
failover logic implemented
in the BladeEngine.
*/
/* --- FWCMD_COMMON_QOS_TYPE_ENUM --- */
#define QOS_BITS_NIC (1) /* max_bits_per_second_NIC */
/* field is valid. */
#define QOS_PKTS_NIC (2) /* max_packets_per_second_NIC */
/* field is valid. */
#define QOS_IOPS_ISCSI (4) /* max_ios_per_second_iSCSI */
/*field is valid. */
#define QOS_VLAN_TAG (8) /* domain_VLAN_tag field
is valid. */
#define QOS_FABRIC_ID (16) /* fabric_domain_ID field
is valid. */
#define QOS_OEM_PARAMS (32) /* qos_params_oem field
is valid. */
#define QOS_TPUT_ISCSI (64) /* max_bytes_per_second_iSCSI
field is valid. */
/*
* --- FAILOVER_CONFIG_ENUM ---
* Failover configuration setting used in FWCMD_COMMON_FORCE_FAILOVER
*/
#define FAILOVER_CONFIG_NO_CHANGE (0) /* No change to automatic */
/* port failover setting. */
#define FAILOVER_CONFIG_ON (1) /* Automatic port failover
on link down is enabled. */
#define FAILOVER_CONFIG_OFF (2) /* Automatic port failover
on link down is disabled. */
/*
* --- FAILOVER_PORT_ENUM ---
* Failover port setting used in FWCMD_COMMON_FORCE_FAILOVER
*/
#define FAILOVER_PORT_A (0) /* Selects port A. */
#define FAILOVER_PORT_B (1) /* Selects port B. */
#define FAILOVER_PORT_NONE (15) /* No port change requested. */
/*
* --- MGMT_FLASHROM_OPCODE ---
* Flash ROM operation code
*/
#define MGMT_FLASHROM_OPCODE_FLASH (1) /* Commit downloaded data
to Flash ROM */
#define MGMT_FLASHROM_OPCODE_SAVE (2) /* Save downloaded data to
ARM's DDR - do not flash */
#define MGMT_FLASHROM_OPCODE_CLEAR (3) /* Erase specified component
from FlashROM */
#define MGMT_FLASHROM_OPCODE_REPORT (4) /* Read specified component
from Flash ROM */
#define MGMT_FLASHROM_OPCODE_IMAGE_INFO (5) /* Returns size of a
component */
/*
* --- MGMT_FLASHROM_OPTYPE ---
* Flash ROM operation type
*/
#define MGMT_FLASHROM_OPTYPE_CODE_FIRMWARE (0) /* Includes ARM firmware,
IPSec (optional) and EP
firmware */
#define MGMT_FLASHROM_OPTYPE_CODE_REDBOOT (1)
#define MGMT_FLASHROM_OPTYPE_CODE_BIOS (2)
#define MGMT_FLASHROM_OPTYPE_CODE_PXE_BIOS (3)
#define MGMT_FLASHROM_OPTYPE_CODE_CTRLS (4)
#define MGMT_FLASHROM_OPTYPE_CFG_IPSEC (5)
#define MGMT_FLASHROM_OPTYPE_CFG_INI (6)
#define MGMT_FLASHROM_OPTYPE_ROM_OFFSET_SPECIFIED (7)
/*
* --- FLASHROM_TYPE ---
* Flash ROM manufacturers supported in the f/w
*/
#define INTEL (0)
#define SPANSION (1)
#define MICRON (2)
/* --- DDR_CAS_TYPE --- */
#define CAS_3 (0)
#define CAS_4 (1)
#define CAS_5 (2)
/* --- DDR_SIZE_TYPE --- */
#define SIZE_256MB (0)
#define SIZE_512MB (1)
/* --- DDR_MODE_TYPE --- */
#define DDR_NO_ECC (0)
#define DDR_ECC (1)
/* --- INTERFACE_10GB_TYPE --- */
#define CX4_TYPE (0)
#define XFP_TYPE (1)
/* --- BE_CHIP_MAX_MTU --- */
#define CHIP_MAX_MTU (9000)
/* --- XAUI_STATE_ENUM --- */
#define XAUI_STATE_ENABLE (0) /* This MUST be the default
value for all requests
which set/change
equalization parameter. */
#define XAUI_STATE_DISABLE (255) /* The XAUI for both ports
may be disabled for EMI
tests. There is no
provision for turning off
individual ports.
*/
/* --- BE_ASIC_REVISION --- */
#define BE_ASIC_REV_A0 (1)
#define BE_ASIC_REV_A1 (2)
#endif /* __fwcmd_common_amap_h__ */

View File

@ -1,717 +0,0 @@
/*
* Copyright (C) 2005 - 2008 ServerEngines
* All rights reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License version 2
* as published by the Free Software Foundation. The full GNU General
* Public License is included in this distribution in the file called COPYING.
*
* Contact Information:
* linux-drivers@serverengines.com
*
* ServerEngines
* 209 N. Fair Oaks Ave
* Sunnyvale, CA 94085
*/
/*
* Autogenerated by srcgen version: 0127
*/
#ifndef __fwcmd_common_bmap_h__
#define __fwcmd_common_bmap_h__
#include "fwcmd_types_bmap.h"
#include "fwcmd_hdr_bmap.h"
#if defined(__BIG_ENDIAN)
/* Physical Address. */
struct PHYS_ADDR {
union {
struct {
u32 lo; /* DWORD 0 */
u32 hi; /* DWORD 1 */
} __packed; /* unnamed struct */
u32 dw[2]; /* dword union */
}; /* unnamed union */
} __packed ;
#else
/* Physical Address. */
struct PHYS_ADDR {
union {
struct {
u32 lo; /* DWORD 0 */
u32 hi; /* DWORD 1 */
} __packed; /* unnamed struct */
u32 dw[2]; /* dword union */
}; /* unnamed union */
} __packed ;
struct BE_LINK_STATUS {
u8 mac0_duplex;
u8 mac0_speed;
u8 mac1_duplex;
u8 mac1_speed;
u8 mgmt_mac_duplex;
u8 mgmt_mac_speed;
u8 active_port;
u8 rsvd0;
u8 mac0_fault;
u8 mac1_fault;
u16 rsvd1;
} __packed;
#endif
struct FWCMD_COMMON_ANON_170_REQUEST {
u32 rsvd0;
} __packed;
union LINK_STATUS_QUERY_PARAMS {
struct BE_LINK_STATUS response;
struct FWCMD_COMMON_ANON_170_REQUEST request;
} __packed;
/*
* Queries the the link status for all ports. The valid values below
* DO NOT indicate that a particular duplex or speed is supported by
* BladeEngine. These enumerations simply list all possible duplexes
* and speeds for any port. Consult BladeEngine product documentation
* for the supported parameters.
*/
struct FWCMD_COMMON_NTWK_LINK_STATUS_QUERY {
union FWCMD_HEADER header;
union LINK_STATUS_QUERY_PARAMS params;
} __packed;
struct FWCMD_COMMON_ANON_171_REQUEST {
u8 type;
u8 port;
u8 mac1;
u8 permanent;
} __packed;
struct FWCMD_COMMON_ANON_172_RESPONSE {
struct MAC_ADDRESS_FORMAT mac;
} __packed;
union NTWK_MAC_QUERY_PARAMS {
struct FWCMD_COMMON_ANON_171_REQUEST request;
struct FWCMD_COMMON_ANON_172_RESPONSE response;
} __packed;
/* Queries one MAC address. */
struct FWCMD_COMMON_NTWK_MAC_QUERY {
union FWCMD_HEADER header;
union NTWK_MAC_QUERY_PARAMS params;
} __packed;
struct MAC_SET_PARAMS_IN {
u8 type;
u8 port;
u8 mac1;
u8 invalidate;
struct MAC_ADDRESS_FORMAT mac;
} __packed;
struct MAC_SET_PARAMS_OUT {
u32 rsvd0;
} __packed;
union MAC_SET_PARAMS {
struct MAC_SET_PARAMS_IN request;
struct MAC_SET_PARAMS_OUT response;
} __packed;
/* Sets a MAC address. */
struct FWCMD_COMMON_NTWK_MAC_SET {
union FWCMD_HEADER header;
union MAC_SET_PARAMS params;
} __packed;
/* MAC address list. */
struct NTWK_MULTICAST_MAC_LIST {
u8 byte[6];
} __packed;
struct FWCMD_COMMON_NTWK_MULTICAST_SET_REQUEST_PAYLOAD {
u16 num_mac;
u8 promiscuous;
u8 rsvd0;
struct NTWK_MULTICAST_MAC_LIST mac[32];
} __packed;
struct FWCMD_COMMON_ANON_174_RESPONSE {
u32 rsvd0;
} __packed;
union FWCMD_COMMON_ANON_173_PARAMS {
struct FWCMD_COMMON_NTWK_MULTICAST_SET_REQUEST_PAYLOAD request;
struct FWCMD_COMMON_ANON_174_RESPONSE response;
} __packed;
/*
* Sets multicast address hash. The MPU will merge the MAC address lists
* from all clients, including the networking and storage functions.
* This command may fail if the final merged list of MAC addresses exceeds
* 32 entries.
*/
struct FWCMD_COMMON_NTWK_MULTICAST_SET {
union FWCMD_HEADER header;
union FWCMD_COMMON_ANON_173_PARAMS params;
} __packed;
struct FWCMD_COMMON_NTWK_VLAN_CONFIG_REQUEST_PAYLOAD {
u16 num_vlan;
u8 promiscuous;
u8 rsvd0;
u16 vlan_tag[32];
} __packed;
struct FWCMD_COMMON_ANON_176_RESPONSE {
u32 rsvd0;
} __packed;
union FWCMD_COMMON_ANON_175_PARAMS {
struct FWCMD_COMMON_NTWK_VLAN_CONFIG_REQUEST_PAYLOAD request;
struct FWCMD_COMMON_ANON_176_RESPONSE response;
} __packed;
/*
* Sets VLAN tag filter. The MPU will merge the VLAN tag list from all
* clients, including the networking and storage functions. This command
* may fail if the final vlan_tag array (from all functions) is longer
* than 32 entries.
*/
struct FWCMD_COMMON_NTWK_VLAN_CONFIG {
union FWCMD_HEADER header;
union FWCMD_COMMON_ANON_175_PARAMS params;
} __packed;
struct RING_DESTROY_REQUEST {
u16 ring_type;
u16 id;
u8 bypass_flush;
u8 rsvd0;
u16 rsvd1;
} __packed;
struct FWCMD_COMMON_ANON_190_RESPONSE {
u32 rsvd0;
} __packed;
union FWCMD_COMMON_ANON_189_PARAMS {
struct RING_DESTROY_REQUEST request;
struct FWCMD_COMMON_ANON_190_RESPONSE response;
} __packed;
/*
* Command for destroying any ring. The connection(s) using the ring should
* be quiesced before destroying the ring.
*/
struct FWCMD_COMMON_RING_DESTROY {
union FWCMD_HEADER header;
union FWCMD_COMMON_ANON_189_PARAMS params;
} __packed;
struct FWCMD_COMMON_ANON_192_REQUEST {
u16 num_pages;
u16 rsvd0;
struct CQ_CONTEXT_AMAP context;
struct PHYS_ADDR pages[4];
} __packed ;
struct FWCMD_COMMON_ANON_193_RESPONSE {
u16 cq_id;
} __packed ;
union FWCMD_COMMON_ANON_191_PARAMS {
struct FWCMD_COMMON_ANON_192_REQUEST request;
struct FWCMD_COMMON_ANON_193_RESPONSE response;
} __packed ;
/*
* Command for creating a completion queue. A Completion Queue must span
* at least 1 page and at most 4 pages. Each completion queue entry
* is 16 bytes regardless of CQ entry format. Thus the ring must be
* at least 256 entries deep (corresponding to 1 page) and can be at
* most 1024 entries deep (corresponding to 4 pages). The number of
* pages posted must contain the CQ ring size as encoded in the context.
*
*/
struct FWCMD_COMMON_CQ_CREATE {
union FWCMD_HEADER header;
union FWCMD_COMMON_ANON_191_PARAMS params;
} __packed ;
struct FWCMD_COMMON_ANON_198_REQUEST {
u16 num_pages;
u16 rsvd0;
struct EQ_CONTEXT_AMAP context;
struct PHYS_ADDR pages[8];
} __packed ;
struct FWCMD_COMMON_ANON_199_RESPONSE {
u16 eq_id;
} __packed ;
union FWCMD_COMMON_ANON_197_PARAMS {
struct FWCMD_COMMON_ANON_198_REQUEST request;
struct FWCMD_COMMON_ANON_199_RESPONSE response;
} __packed ;
/*
* Command for creating a event queue. An Event Queue must span at least
* 1 page and at most 8 pages. The number of pages posted must contain
* the EQ ring. The ring is defined by the size of the EQ entries (encoded
* in the context) and the number of EQ entries (also encoded in the
* context).
*/
struct FWCMD_COMMON_EQ_CREATE {
union FWCMD_HEADER header;
union FWCMD_COMMON_ANON_197_PARAMS params;
} __packed ;
struct FWCMD_COMMON_ANON_201_REQUEST {
u16 cq_id;
u16 bcmc_cq_id;
u16 num_pages;
u16 rsvd0;
struct PHYS_ADDR pages[2];
} __packed;
struct FWCMD_COMMON_ANON_202_RESPONSE {
u16 id;
} __packed;
union FWCMD_COMMON_ANON_200_PARAMS {
struct FWCMD_COMMON_ANON_201_REQUEST request;
struct FWCMD_COMMON_ANON_202_RESPONSE response;
} __packed;
/*
* Command for creating Ethernet receive ring. An ERX ring contains ETH_RX_D
* entries (8 bytes each). An ERX ring must be 1024 entries deep
* (corresponding to 2 pages).
*/
struct FWCMD_COMMON_ETH_RX_CREATE {
union FWCMD_HEADER header;
union FWCMD_COMMON_ANON_200_PARAMS params;
} __packed;
struct FWCMD_COMMON_ANON_204_REQUEST {
u16 num_pages;
u8 ulp_num;
u8 type;
struct ETX_CONTEXT_AMAP context;
struct PHYS_ADDR pages[8];
} __packed ;
struct FWCMD_COMMON_ANON_205_RESPONSE {
u16 cid;
u8 ulp_num;
u8 rsvd0;
} __packed ;
union FWCMD_COMMON_ANON_203_PARAMS {
struct FWCMD_COMMON_ANON_204_REQUEST request;
struct FWCMD_COMMON_ANON_205_RESPONSE response;
} __packed ;
/*
* Command for creating an Ethernet transmit ring. An ETX ring contains
* ETH_WRB entries (16 bytes each). An ETX ring must be at least 256
* entries deep (corresponding to 1 page) and at most 2k entries deep
* (corresponding to 8 pages).
*/
struct FWCMD_COMMON_ETH_TX_CREATE {
union FWCMD_HEADER header;
union FWCMD_COMMON_ANON_203_PARAMS params;
} __packed ;
struct FWCMD_COMMON_ANON_222_REQUEST {
u16 num_pages;
u16 rsvd0;
struct MCC_RING_CONTEXT_AMAP context;
struct PHYS_ADDR pages[8];
} __packed ;
struct FWCMD_COMMON_ANON_223_RESPONSE {
u16 id;
} __packed ;
union FWCMD_COMMON_ANON_221_PARAMS {
struct FWCMD_COMMON_ANON_222_REQUEST request;
struct FWCMD_COMMON_ANON_223_RESPONSE response;
} __packed ;
/*
* Command for creating the MCC ring. An MCC ring must be at least 16
* entries deep (corresponding to 1 page) and at most 128 entries deep
* (corresponding to 8 pages).
*/
struct FWCMD_COMMON_MCC_CREATE {
union FWCMD_HEADER header;
union FWCMD_COMMON_ANON_221_PARAMS params;
} __packed ;
struct GET_QOS_IN {
u32 qos_params_rsvd;
} __packed;
struct GET_QOS_OUT {
u32 max_bits_per_second_NIC;
u32 max_packets_per_second_NIC;
u32 max_ios_per_second_iSCSI;
u32 max_bytes_per_second_iSCSI;
u16 domain_VLAN_tag;
u16 fabric_domain_ID;
u32 qos_params_oem[4];
} __packed;
union GET_QOS_PARAMS {
struct GET_QOS_IN request;
struct GET_QOS_OUT response;
} __packed;
/* QOS/Bandwidth settings per domain. Applicable only in VMs. */
struct FWCMD_COMMON_GET_QOS {
union FWCMD_HEADER header;
union GET_QOS_PARAMS params;
} __packed;
struct SET_QOS_IN {
u32 valid_flags;
u32 max_bits_per_second_NIC;
u32 max_packets_per_second_NIC;
u32 max_ios_per_second_iSCSI;
u32 max_bytes_per_second_iSCSI;
u16 domain_VLAN_tag;
u16 fabric_domain_ID;
u32 qos_params_oem[4];
} __packed;
struct SET_QOS_OUT {
u32 qos_params_rsvd;
} __packed;
union SET_QOS_PARAMS {
struct SET_QOS_IN request;
struct SET_QOS_OUT response;
} __packed;
/* QOS/Bandwidth settings per domain. Applicable only in VMs. */
struct FWCMD_COMMON_SET_QOS {
union FWCMD_HEADER header;
union SET_QOS_PARAMS params;
} __packed;
struct SET_FRAME_SIZE_IN {
u32 max_tx_frame_size;
u32 max_rx_frame_size;
} __packed;
struct SET_FRAME_SIZE_OUT {
u32 chip_max_tx_frame_size;
u32 chip_max_rx_frame_size;
} __packed;
union SET_FRAME_SIZE_PARAMS {
struct SET_FRAME_SIZE_IN request;
struct SET_FRAME_SIZE_OUT response;
} __packed;
/* Set frame size command. Only host domain may issue this command. */
struct FWCMD_COMMON_SET_FRAME_SIZE {
union FWCMD_HEADER header;
union SET_FRAME_SIZE_PARAMS params;
} __packed;
struct FORCE_FAILOVER_IN {
u32 move_to_port;
u32 failover_config;
} __packed;
struct FWCMD_COMMON_ANON_231_RESPONSE {
u32 rsvd0;
} __packed;
union FWCMD_COMMON_ANON_230_PARAMS {
struct FORCE_FAILOVER_IN request;
struct FWCMD_COMMON_ANON_231_RESPONSE response;
} __packed;
/*
* Use this command to control failover in BladeEngine. It may be used
* to failback to a restored port or to forcibly move traffic from
* one port to another. It may also be used to enable or disable the
* automatic failover feature. This command can only be issued by domain
* 0.
*/
struct FWCMD_COMMON_FORCE_FAILOVER {
union FWCMD_HEADER header;
union FWCMD_COMMON_ANON_230_PARAMS params;
} __packed;
struct FWCMD_COMMON_ANON_240_REQUEST {
u64 context;
} __packed;
struct FWCMD_COMMON_ANON_241_RESPONSE {
u64 context;
} __packed;
union FWCMD_COMMON_ANON_239_PARAMS {
struct FWCMD_COMMON_ANON_240_REQUEST request;
struct FWCMD_COMMON_ANON_241_RESPONSE response;
} __packed;
/*
* This command can be used by clients as a no-operation request. Typical
* uses for drivers are as a heartbeat mechanism, or deferred processing
* catalyst. The ARM will always complete this command with a good completion.
* The 64-bit parameter is not touched by the ARM processor.
*/
struct FWCMD_COMMON_NOP {
union FWCMD_HEADER header;
union FWCMD_COMMON_ANON_239_PARAMS params;
} __packed;
struct NTWK_RX_FILTER_SETTINGS {
u8 promiscuous;
u8 ip_cksum;
u8 tcp_cksum;
u8 udp_cksum;
u8 pass_err;
u8 pass_ckerr;
u8 strip_crc;
u8 mcast_en;
u8 bcast_en;
u8 mcast_promiscuous_en;
u8 unicast_en;
u8 vlan_promiscuous;
} __packed;
union FWCMD_COMMON_ANON_242_PARAMS {
struct NTWK_RX_FILTER_SETTINGS request;
struct NTWK_RX_FILTER_SETTINGS response;
} __packed;
/*
* This command is used to modify the ethernet receive filter configuration.
* Only domain 0 network function drivers may issue this command. The
* applied configuration is returned in the response payload. Note:
* Some receive packet filter settings are global on BladeEngine and
* can affect both the storage and network function clients that the
* BladeEngine hardware and firmware serve. Additionaly, depending
* on the revision of BladeEngine, some ethernet receive filter settings
* are dependent on others. If a dependency exists between settings
* for the BladeEngine revision, and the command request settings do
* not meet the dependency requirement, the invalid settings will not
* be applied despite the comand succeeding. For example: a driver may
* request to enable broadcast packets, but not enable multicast packets.
* On early revisions of BladeEngine, there may be no distinction between
* broadcast and multicast filters, so broadcast could not be enabled
* without enabling multicast. In this scenario, the comand would still
* succeed, but the response payload would indicate the previously
* configured broadcast and multicast setting.
*/
struct FWCMD_COMMON_NTWK_RX_FILTER {
union FWCMD_HEADER header;
union FWCMD_COMMON_ANON_242_PARAMS params;
} __packed;
struct FWCMD_COMMON_ANON_244_REQUEST {
u32 rsvd0;
} __packed;
struct FWCMD_COMMON_GET_FW_VERSION_RESPONSE_PAYLOAD {
u8 firmware_version_string[32];
u8 fw_on_flash_version_string[32];
} __packed;
union FWCMD_COMMON_ANON_243_PARAMS {
struct FWCMD_COMMON_ANON_244_REQUEST request;
struct FWCMD_COMMON_GET_FW_VERSION_RESPONSE_PAYLOAD response;
} __packed;
/* This comand retrieves the firmware version. */
struct FWCMD_COMMON_GET_FW_VERSION {
union FWCMD_HEADER header;
union FWCMD_COMMON_ANON_243_PARAMS params;
} __packed;
struct FWCMD_COMMON_ANON_246_REQUEST {
u16 tx_flow_control;
u16 rx_flow_control;
} __packed;
struct FWCMD_COMMON_ANON_247_RESPONSE {
u32 rsvd0;
} __packed;
union FWCMD_COMMON_ANON_245_PARAMS {
struct FWCMD_COMMON_ANON_246_REQUEST request;
struct FWCMD_COMMON_ANON_247_RESPONSE response;
} __packed;
/*
* This comand is used to program BladeEngine flow control behavior.
* Only the host networking driver is allowed to use this comand.
*/
struct FWCMD_COMMON_SET_FLOW_CONTROL {
union FWCMD_HEADER header;
union FWCMD_COMMON_ANON_245_PARAMS params;
} __packed;
struct FWCMD_COMMON_ANON_249_REQUEST {
u32 rsvd0;
} __packed;
struct FWCMD_COMMON_ANON_250_RESPONSE {
u16 tx_flow_control;
u16 rx_flow_control;
} __packed;
union FWCMD_COMMON_ANON_248_PARAMS {
struct FWCMD_COMMON_ANON_249_REQUEST request;
struct FWCMD_COMMON_ANON_250_RESPONSE response;
} __packed;
/* This comand is used to read BladeEngine flow control settings. */
struct FWCMD_COMMON_GET_FLOW_CONTROL {
union FWCMD_HEADER header;
union FWCMD_COMMON_ANON_248_PARAMS params;
} __packed;
struct EQ_DELAY_PARAMS {
u32 eq_id;
u32 delay_in_microseconds;
} __packed;
struct FWCMD_COMMON_ANON_257_REQUEST {
u32 num_eq;
u32 rsvd0;
struct EQ_DELAY_PARAMS delay[16];
} __packed;
struct FWCMD_COMMON_ANON_258_RESPONSE {
u32 delay_resolution_in_microseconds;
u32 delay_max_in_microseconds;
} __packed;
union MODIFY_EQ_DELAY_PARAMS {
struct FWCMD_COMMON_ANON_257_REQUEST request;
struct FWCMD_COMMON_ANON_258_RESPONSE response;
} __packed;
/* This comand changes the EQ delay for a given set of EQs. */
struct FWCMD_COMMON_MODIFY_EQ_DELAY {
union FWCMD_HEADER header;
union MODIFY_EQ_DELAY_PARAMS params;
} __packed;
struct FWCMD_COMMON_ANON_260_REQUEST {
u32 rsvd0;
} __packed;
struct BE_FIRMWARE_CONFIG {
u16 be_config_number;
u16 asic_revision;
u32 nic_ulp_mask;
u32 tulp_mask;
u32 iscsi_ulp_mask;
u32 rdma_ulp_mask;
u32 rsvd0[4];
u32 eth_tx_id_start;
u32 eth_tx_id_count;
u32 eth_rx_id_start;
u32 eth_rx_id_count;
u32 tpm_wrbq_id_start;
u32 tpm_wrbq_id_count;
u32 tpm_defq_id_start;
u32 tpm_defq_id_count;
u32 iscsi_wrbq_id_start;
u32 iscsi_wrbq_id_count;
u32 iscsi_defq_id_start;
u32 iscsi_defq_id_count;
u32 rdma_qp_id_start;
u32 rdma_qp_id_count;
u32 rsvd1[8];
} __packed;
union FWCMD_COMMON_ANON_259_PARAMS {
struct FWCMD_COMMON_ANON_260_REQUEST request;
struct BE_FIRMWARE_CONFIG response;
} __packed;
/*
* This comand queries the current firmware configuration parameters.
* The static configuration type is defined by be_config_number. This
* differentiates different BladeEngine builds, such as iSCSI Initiator
* versus iSCSI Target. For a given static configuration, the Upper
* Layer Protocol (ULP) processors may be reconfigured to support different
* protocols. Each ULP processor supports one or more protocols. The
* masks indicate which processors are configured for each protocol.
* For a given static configuration, the number of TCP connections
* supported for each protocol may vary. The *_id_start and *_id_count
* variables define a linear range of IDs that are available for each
* supported protocol. The *_id_count may be used by the driver to allocate
* the appropriate number of connection resources. The *_id_start may
* be used to map the arbitrary range of IDs to a zero-based range
* of indices.
*/
struct FWCMD_COMMON_FIRMWARE_CONFIG {
union FWCMD_HEADER header;
union FWCMD_COMMON_ANON_259_PARAMS params;
} __packed;
struct FWCMD_COMMON_PORT_EQUALIZATION_PARAMS {
u32 emph_lev_sel_port0;
u32 emph_lev_sel_port1;
u8 xaui_vo_sel;
u8 xaui_state;
u16 rsvd0;
u32 xaui_eq_vector;
} __packed;
struct FWCMD_COMMON_ANON_262_REQUEST {
u32 rsvd0;
} __packed;
union FWCMD_COMMON_ANON_261_PARAMS {
struct FWCMD_COMMON_ANON_262_REQUEST request;
struct FWCMD_COMMON_PORT_EQUALIZATION_PARAMS response;
} __packed;
/*
* This comand can be used to read XAUI equalization parameters. The
* ARM firmware applies default equalization parameters during initialization.
* These parameters may be customer-specific when derived from the
* SEEPROM. See SEEPROM_DATA for equalization specific fields.
*/
struct FWCMD_COMMON_GET_PORT_EQUALIZATION {
union FWCMD_HEADER header;
union FWCMD_COMMON_ANON_261_PARAMS params;
} __packed;
struct FWCMD_COMMON_ANON_264_RESPONSE {
u32 rsvd0;
} __packed;
union FWCMD_COMMON_ANON_263_PARAMS {
struct FWCMD_COMMON_PORT_EQUALIZATION_PARAMS request;
struct FWCMD_COMMON_ANON_264_RESPONSE response;
} __packed;
/*
* This comand can be used to set XAUI equalization parameters. The ARM
* firmware applies default equalization parameters during initialization.
* These parameters may be customer-specific when derived from the
* SEEPROM. See SEEPROM_DATA for equalization specific fields.
*/
struct FWCMD_COMMON_SET_PORT_EQUALIZATION {
union FWCMD_HEADER header;
union FWCMD_COMMON_ANON_263_PARAMS params;
} __packed;
#endif /* __fwcmd_common_bmap_h__ */

View File

@ -1,280 +0,0 @@
/*
* Copyright (C) 2005 - 2008 ServerEngines
* All rights reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License version 2
* as published by the Free Software Foundation. The full GNU General
* Public License is included in this distribution in the file called COPYING.
*
* Contact Information:
* linux-drivers@serverengines.com
*
* ServerEngines
* 209 N. Fair Oaks Ave
* Sunnyvale, CA 94085
*/
/*
* Autogenerated by srcgen version: 0127
*/
#ifndef __fwcmd_eth_bmap_h__
#define __fwcmd_eth_bmap_h__
#include "fwcmd_hdr_bmap.h"
#include "fwcmd_types_bmap.h"
struct MIB_ETH_STATISTICS_PARAMS_IN {
u32 rsvd0;
} __packed;
struct BE_RXF_STATS {
u32 p0recvdtotalbytesLSD; /* DWORD 0 */
u32 p0recvdtotalbytesMSD; /* DWORD 1 */
u32 p0recvdtotalframes; /* DWORD 2 */
u32 p0recvdunicastframes; /* DWORD 3 */
u32 p0recvdmulticastframes; /* DWORD 4 */
u32 p0recvdbroadcastframes; /* DWORD 5 */
u32 p0crcerrors; /* DWORD 6 */
u32 p0alignmentsymerrs; /* DWORD 7 */
u32 p0pauseframesrecvd; /* DWORD 8 */
u32 p0controlframesrecvd; /* DWORD 9 */
u32 p0inrangelenerrors; /* DWORD 10 */
u32 p0outrangeerrors; /* DWORD 11 */
u32 p0frametoolongerrors; /* DWORD 12 */
u32 p0droppedaddressmatch; /* DWORD 13 */
u32 p0droppedvlanmismatch; /* DWORD 14 */
u32 p0ipdroppedtoosmall; /* DWORD 15 */
u32 p0ipdroppedtooshort; /* DWORD 16 */
u32 p0ipdroppedhdrtoosmall; /* DWORD 17 */
u32 p0tcpdroppedlen; /* DWORD 18 */
u32 p0droppedrunt; /* DWORD 19 */
u32 p0recvd64; /* DWORD 20 */
u32 p0recvd65_127; /* DWORD 21 */
u32 p0recvd128_256; /* DWORD 22 */
u32 p0recvd256_511; /* DWORD 23 */
u32 p0recvd512_1023; /* DWORD 24 */
u32 p0recvd1518_1522; /* DWORD 25 */
u32 p0recvd1522_2047; /* DWORD 26 */
u32 p0recvd2048_4095; /* DWORD 27 */
u32 p0recvd4096_8191; /* DWORD 28 */
u32 p0recvd8192_9216; /* DWORD 29 */
u32 p0rcvdipcksmerrs; /* DWORD 30 */
u32 p0recvdtcpcksmerrs; /* DWORD 31 */
u32 p0recvdudpcksmerrs; /* DWORD 32 */
u32 p0recvdnonrsspackets; /* DWORD 33 */
u32 p0recvdippackets; /* DWORD 34 */
u32 p0recvdchute1packets; /* DWORD 35 */
u32 p0recvdchute2packets; /* DWORD 36 */
u32 p0recvdchute3packets; /* DWORD 37 */
u32 p0recvdipsecpackets; /* DWORD 38 */
u32 p0recvdmanagementpackets; /* DWORD 39 */
u32 p0xmitbyteslsd; /* DWORD 40 */
u32 p0xmitbytesmsd; /* DWORD 41 */
u32 p0xmitunicastframes; /* DWORD 42 */
u32 p0xmitmulticastframes; /* DWORD 43 */
u32 p0xmitbroadcastframes; /* DWORD 44 */
u32 p0xmitpauseframes; /* DWORD 45 */
u32 p0xmitcontrolframes; /* DWORD 46 */
u32 p0xmit64; /* DWORD 47 */
u32 p0xmit65_127; /* DWORD 48 */
u32 p0xmit128_256; /* DWORD 49 */
u32 p0xmit256_511; /* DWORD 50 */
u32 p0xmit512_1023; /* DWORD 51 */
u32 p0xmit1518_1522; /* DWORD 52 */
u32 p0xmit1522_2047; /* DWORD 53 */
u32 p0xmit2048_4095; /* DWORD 54 */
u32 p0xmit4096_8191; /* DWORD 55 */
u32 p0xmit8192_9216; /* DWORD 56 */
u32 p0rxfifooverflowdropped; /* DWORD 57 */
u32 p0ipseclookupfaileddropped; /* DWORD 58 */
u32 p1recvdtotalbytesLSD; /* DWORD 59 */
u32 p1recvdtotalbytesMSD; /* DWORD 60 */
u32 p1recvdtotalframes; /* DWORD 61 */
u32 p1recvdunicastframes; /* DWORD 62 */
u32 p1recvdmulticastframes; /* DWORD 63 */
u32 p1recvdbroadcastframes; /* DWORD 64 */
u32 p1crcerrors; /* DWORD 65 */
u32 p1alignmentsymerrs; /* DWORD 66 */
u32 p1pauseframesrecvd; /* DWORD 67 */
u32 p1controlframesrecvd; /* DWORD 68 */
u32 p1inrangelenerrors; /* DWORD 69 */
u32 p1outrangeerrors; /* DWORD 70 */
u32 p1frametoolongerrors; /* DWORD 71 */
u32 p1droppedaddressmatch; /* DWORD 72 */
u32 p1droppedvlanmismatch; /* DWORD 73 */
u32 p1ipdroppedtoosmall; /* DWORD 74 */
u32 p1ipdroppedtooshort; /* DWORD 75 */
u32 p1ipdroppedhdrtoosmall; /* DWORD 76 */
u32 p1tcpdroppedlen; /* DWORD 77 */
u32 p1droppedrunt; /* DWORD 78 */
u32 p1recvd64; /* DWORD 79 */
u32 p1recvd65_127; /* DWORD 80 */
u32 p1recvd128_256; /* DWORD 81 */
u32 p1recvd256_511; /* DWORD 82 */
u32 p1recvd512_1023; /* DWORD 83 */
u32 p1recvd1518_1522; /* DWORD 84 */
u32 p1recvd1522_2047; /* DWORD 85 */
u32 p1recvd2048_4095; /* DWORD 86 */
u32 p1recvd4096_8191; /* DWORD 87 */
u32 p1recvd8192_9216; /* DWORD 88 */
u32 p1rcvdipcksmerrs; /* DWORD 89 */
u32 p1recvdtcpcksmerrs; /* DWORD 90 */
u32 p1recvdudpcksmerrs; /* DWORD 91 */
u32 p1recvdnonrsspackets; /* DWORD 92 */
u32 p1recvdippackets; /* DWORD 93 */
u32 p1recvdchute1packets; /* DWORD 94 */
u32 p1recvdchute2packets; /* DWORD 95 */
u32 p1recvdchute3packets; /* DWORD 96 */
u32 p1recvdipsecpackets; /* DWORD 97 */
u32 p1recvdmanagementpackets; /* DWORD 98 */
u32 p1xmitbyteslsd; /* DWORD 99 */
u32 p1xmitbytesmsd; /* DWORD 100 */
u32 p1xmitunicastframes; /* DWORD 101 */
u32 p1xmitmulticastframes; /* DWORD 102 */
u32 p1xmitbroadcastframes; /* DWORD 103 */
u32 p1xmitpauseframes; /* DWORD 104 */
u32 p1xmitcontrolframes; /* DWORD 105 */
u32 p1xmit64; /* DWORD 106 */
u32 p1xmit65_127; /* DWORD 107 */
u32 p1xmit128_256; /* DWORD 108 */
u32 p1xmit256_511; /* DWORD 109 */
u32 p1xmit512_1023; /* DWORD 110 */
u32 p1xmit1518_1522; /* DWORD 111 */
u32 p1xmit1522_2047; /* DWORD 112 */
u32 p1xmit2048_4095; /* DWORD 113 */
u32 p1xmit4096_8191; /* DWORD 114 */
u32 p1xmit8192_9216; /* DWORD 115 */
u32 p1rxfifooverflowdropped; /* DWORD 116 */
u32 p1ipseclookupfaileddropped; /* DWORD 117 */
u32 pxdroppednopbuf; /* DWORD 118 */
u32 pxdroppednotxpb; /* DWORD 119 */
u32 pxdroppednoipsecbuf; /* DWORD 120 */
u32 pxdroppednoerxdescr; /* DWORD 121 */
u32 pxdroppednotpredescr; /* DWORD 122 */
u32 pxrecvdmanagementportpackets; /* DWORD 123 */
u32 pxrecvdmanagementportbytes; /* DWORD 124 */
u32 pxrecvdmanagementportpauseframes; /* DWORD 125 */
u32 pxrecvdmanagementporterrors; /* DWORD 126 */
u32 pxxmitmanagementportpackets; /* DWORD 127 */
u32 pxxmitmanagementportbytes; /* DWORD 128 */
u32 pxxmitmanagementportpause; /* DWORD 129 */
u32 pxxmitmanagementportrxfifooverflow; /* DWORD 130 */
u32 pxrecvdipsecipcksmerrs; /* DWORD 131 */
u32 pxrecvdtcpsecipcksmerrs; /* DWORD 132 */
u32 pxrecvdudpsecipcksmerrs; /* DWORD 133 */
u32 pxipsecrunt; /* DWORD 134 */
u32 pxipsecaddressmismatchdropped; /* DWORD 135 */
u32 pxipsecrxfifooverflowdropped; /* DWORD 136 */
u32 pxipsecframestoolong; /* DWORD 137 */
u32 pxipsectotalipframes; /* DWORD 138 */
u32 pxipseciptoosmall; /* DWORD 139 */
u32 pxipseciptooshort; /* DWORD 140 */
u32 pxipseciphdrtoosmall; /* DWORD 141 */
u32 pxipsectcphdrbad; /* DWORD 142 */
u32 pxrecvdipsecchute1; /* DWORD 143 */
u32 pxrecvdipsecchute2; /* DWORD 144 */
u32 pxrecvdipsecchute3; /* DWORD 145 */
u32 pxdropped7frags; /* DWORD 146 */
u32 pxdroppedfrags; /* DWORD 147 */
u32 pxdroppedinvalidfragring; /* DWORD 148 */
u32 pxnumforwardedpackets; /* DWORD 149 */
} __packed;
union MIB_ETH_STATISTICS_PARAMS {
struct MIB_ETH_STATISTICS_PARAMS_IN request;
struct BE_RXF_STATS response;
} __packed;
/*
* Query ethernet statistics. All domains may issue this command. The
* host domain drivers may optionally reset internal statistic counters
* with a query.
*/
struct FWCMD_ETH_GET_STATISTICS {
union FWCMD_HEADER header;
union MIB_ETH_STATISTICS_PARAMS params;
} __packed;
struct FWCMD_ETH_ANON_175_REQUEST {
u8 port0_promiscuous;
u8 port1_promiscuous;
u16 rsvd0;
} __packed;
struct FWCMD_ETH_ANON_176_RESPONSE {
u32 rsvd0;
} __packed;
union FWCMD_ETH_ANON_174_PARAMS {
struct FWCMD_ETH_ANON_175_REQUEST request;
struct FWCMD_ETH_ANON_176_RESPONSE response;
} __packed;
/* Enables/Disables promiscuous ethernet receive mode. */
struct FWCMD_ETH_PROMISCUOUS {
union FWCMD_HEADER header;
union FWCMD_ETH_ANON_174_PARAMS params;
} __packed;
struct FWCMD_ETH_ANON_178_REQUEST {
u32 new_fragsize_log2;
} __packed;
struct FWCMD_ETH_ANON_179_RESPONSE {
u32 actual_fragsize_log2;
} __packed;
union FWCMD_ETH_ANON_177_PARAMS {
struct FWCMD_ETH_ANON_178_REQUEST request;
struct FWCMD_ETH_ANON_179_RESPONSE response;
} __packed;
/*
* Sets the Ethernet RX fragment size. Only host (domain 0) networking
* drivers may issue this command. This call will fail for non-host
* protection domains. In this situation the MCC CQ status will indicate
* a failure due to insufficient priviledges. The response should be
* ignored, and the driver should use the FWCMD_ETH_GET_FRAG_SIZE to
* query the existing ethernet receive fragment size. It must use this
* fragment size for all fragments in the ethernet receive ring. If
* the command succeeds, the driver must use the frag size indicated
* in the command response since the requested frag size may not be applied
* until the next reboot. When the requested fragsize matches the response
* fragsize, this indicates the request was applied immediately.
*/
struct FWCMD_ETH_SET_RX_FRAG_SIZE {
union FWCMD_HEADER header;
union FWCMD_ETH_ANON_177_PARAMS params;
} __packed;
struct FWCMD_ETH_ANON_181_REQUEST {
u32 rsvd0;
} __packed;
struct FWCMD_ETH_ANON_182_RESPONSE {
u32 actual_fragsize_log2;
} __packed;
union FWCMD_ETH_ANON_180_PARAMS {
struct FWCMD_ETH_ANON_181_REQUEST request;
struct FWCMD_ETH_ANON_182_RESPONSE response;
} __packed;
/*
* Queries the Ethernet RX fragment size. All domains may issue this
* command. The driver should call this command to determine the minimum
* required fragment size for the ethernet RX ring buffers. Drivers
* may choose to use a larger size for each fragment buffer, but BladeEngine
* will use up to the configured minimum required fragsize in each ethernet
* receive fragment buffer. For example, if the ethernet receive fragment
* size is configured to 4kB, and a driver uses 8kB fragments, a 6kB
* ethernet packet received by BladeEngine will be split accross two
* of the driver's receive framgents (4kB in one fragment buffer, and
* 2kB in the subsequent fragment buffer).
*/
struct FWCMD_ETH_GET_RX_FRAG_SIZE {
union FWCMD_HEADER header;
union FWCMD_ETH_ANON_180_PARAMS params;
} __packed;
#endif /* __fwcmd_eth_bmap_h__ */

View File

@ -1,54 +0,0 @@
/*
* Copyright (C) 2005 - 2008 ServerEngines
* All rights reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License version 2
* as published by the Free Software Foundation. The full GNU General
* Public License is included in this distribution in the file called COPYING.
*
* Contact Information:
* linux-drivers@serverengines.com
*
* ServerEngines
* 209 N. Fair Oaks Ave
* Sunnyvale, CA 94085
*/
/*
* Autogenerated by srcgen version: 0127
*/
#ifndef __fwcmd_hdr_bmap_h__
#define __fwcmd_hdr_bmap_h__
struct FWCMD_REQUEST_HEADER {
u8 opcode;
u8 subsystem;
u8 port_number;
u8 domain;
u32 timeout;
u32 request_length;
u32 rsvd0;
} __packed;
struct FWCMD_RESPONSE_HEADER {
u8 opcode;
u8 subsystem;
u8 rsvd0;
u8 domain;
u8 status;
u8 additional_status;
u16 rsvd1;
u32 response_length;
u32 actual_response_length;
} __packed;
/*
* The firmware/driver overwrites the input FWCMD_REQUEST_HEADER with
* the output FWCMD_RESPONSE_HEADER.
*/
union FWCMD_HEADER {
struct FWCMD_REQUEST_HEADER request;
struct FWCMD_RESPONSE_HEADER response;
} __packed;
#endif /* __fwcmd_hdr_bmap_h__ */

View File

@ -1,94 +0,0 @@
/*
* Copyright (C) 2005 - 2008 ServerEngines
* All rights reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License version 2
* as published by the Free Software Foundation. The full GNU General
* Public License is included in this distribution in the file called COPYING.
*
* Contact Information:
* linux-drivers@serverengines.com
*
* ServerEngines
* 209 N. Fair Oaks Ave
* Sunnyvale, CA 94085
*/
/*
* Autogenerated by srcgen version: 0127
*/
#ifndef __fwcmd_mcc_amap_h__
#define __fwcmd_mcc_amap_h__
#include "fwcmd_opcodes.h"
/*
* Where applicable, a WRB, may contain a list of Scatter-gather elements.
* Each element supports a 64 bit address and a 32bit length field.
*/
struct BE_MCC_SGE_AMAP {
u8 pa_lo[32]; /* DWORD 0 */
u8 pa_hi[32]; /* DWORD 1 */
u8 length[32]; /* DWORD 2 */
} __packed;
struct MCC_SGE_AMAP {
u32 dw[3];
};
/*
* The design of an MCC_SGE allows up to 19 elements to be embedded
* in a WRB, supporting 64KB data transfers (assuming a 4KB page size).
*/
struct BE_MCC_WRB_PAYLOAD_AMAP {
union {
struct BE_MCC_SGE_AMAP sgl[19];
u8 embedded[59][32]; /* DWORD 0 */
};
} __packed;
struct MCC_WRB_PAYLOAD_AMAP {
u32 dw[59];
};
/*
* This is the structure of the MCC Command WRB for commands
* sent to the Management Processing Unit (MPU). See section
* for usage in embedded and non-embedded modes.
*/
struct BE_MCC_WRB_AMAP {
u8 embedded; /* DWORD 0 */
u8 rsvd0[2]; /* DWORD 0 */
u8 sge_count[5]; /* DWORD 0 */
u8 rsvd1[16]; /* DWORD 0 */
u8 special[8]; /* DWORD 0 */
u8 payload_length[32]; /* DWORD 1 */
u8 tag[2][32]; /* DWORD 2 */
u8 rsvd2[32]; /* DWORD 4 */
struct BE_MCC_WRB_PAYLOAD_AMAP payload;
} __packed;
struct MCC_WRB_AMAP {
u32 dw[64];
};
/* This is the structure of the MCC Completion queue entry */
struct BE_MCC_CQ_ENTRY_AMAP {
u8 completion_status[16]; /* DWORD 0 */
u8 extended_status[16]; /* DWORD 0 */
u8 mcc_tag[2][32]; /* DWORD 1 */
u8 rsvd0[27]; /* DWORD 3 */
u8 consumed; /* DWORD 3 */
u8 completed; /* DWORD 3 */
u8 hpi_buffer_completion; /* DWORD 3 */
u8 async_event; /* DWORD 3 */
u8 valid; /* DWORD 3 */
} __packed;
struct MCC_CQ_ENTRY_AMAP {
u32 dw[4];
};
/* Mailbox structures used by the MPU during bootstrap */
struct BE_MCC_MAILBOX_AMAP {
struct BE_MCC_WRB_AMAP wrb;
struct BE_MCC_CQ_ENTRY_AMAP cq;
} __packed;
struct MCC_MAILBOX_AMAP {
u32 dw[68];
};
#endif /* __fwcmd_mcc_amap_h__ */

View File

@ -1,244 +0,0 @@
/*
* Copyright (C) 2005 - 2008 ServerEngines
* All rights reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License version 2
* as published by the Free Software Foundation. The full GNU General
* Public License is included in this distribution in the file called COPYING.
*
* Contact Information:
* linux-drivers@serverengines.com
*
* ServerEngines
* 209 N. Fair Oaks Ave
* Sunnyvale, CA 94085
*/
/*
* Autogenerated by srcgen version: 0127
*/
#ifndef __fwcmd_opcodes_amap_h__
#define __fwcmd_opcodes_amap_h__
/*
* --- FWCMD_SUBSYSTEMS ---
* The commands are grouped into the following subsystems. The subsystem
* code along with the opcode uniquely identify a particular fwcmd.
*/
#define FWCMD_SUBSYSTEM_RSVD (0) /* This subsystem is reserved. It is */
/* never used. */
#define FWCMD_SUBSYSTEM_COMMON (1) /* CMDs in this group are common to
* all subsystems. See
* COMMON_SUBSYSTEM_OPCODES for opcodes
* and Common Host Configuration CMDs
* for the FWCMD descriptions.
*/
#define FWCMD_SUBSYSTEM_COMMON_ISCSI (2) /* CMDs in this group are */
/*
* common to Initiator and Target. See
* COMMON_ISCSI_SUBSYSTEM_OPCODES and
* Common iSCSI Initiator and Target
* CMDs for the command descriptions.
*/
#define FWCMD_SUBSYSTEM_ETH (3) /* This subsystem is used to
execute Ethernet commands. */
#define FWCMD_SUBSYSTEM_TPM (4) /* This subsystem is used
to execute TPM commands. */
#define FWCMD_SUBSYSTEM_PXE_UNDI (5) /* This subsystem is used
* to execute PXE
* and UNDI specific commands.
*/
#define FWCMD_SUBSYSTEM_ISCSI_INI (6) /* This subsystem is used to
execute ISCSI Initiator
specific commands.
*/
#define FWCMD_SUBSYSTEM_ISCSI_TGT (7) /* This subsystem is used
to execute iSCSI Target
specific commands.between
PTL and ARM firmware.
*/
#define FWCMD_SUBSYSTEM_MILI_PTL (8) /* This subsystem is used to
execute iSCSI Target specific
commands.between MILI
and PTL. */
#define FWCMD_SUBSYSTEM_MILI_TMD (9) /* This subsystem is used to
execute iSCSI Target specific
commands between MILI
and TMD. */
#define FWCMD_SUBSYSTEM_PROXY (11) /* This subsystem is used
to execute proxied commands
within the host at the
explicit request of a
non priviledged domain.
This 'subsystem' is entirely
virtual from the controller
and firmware perspective as
it is implemented in host
drivers.
*/
/*
* --- COMMON_SUBSYSTEM_OPCODES ---
* These opcodes are common to both networking and storage PCI
* functions. They are used to reserve resources and configure
* BladeEngine. These opcodes all use the FWCMD_SUBSYSTEM_COMMON
* subsystem code.
*/
#define OPCODE_COMMON_NTWK_MAC_QUERY (1)
#define SUBSYSTEM_COMMON_NTWK_MAC_QUERY (1)
#define SUBSYSTEM_COMMON_NTWK_MAC_SET (1)
#define SUBSYSTEM_COMMON_NTWK_MULTICAST_SET (1)
#define SUBSYSTEM_COMMON_NTWK_VLAN_CONFIG (1)
#define SUBSYSTEM_COMMON_NTWK_LINK_STATUS_QUERY (1)
#define SUBSYSTEM_COMMON_READ_FLASHROM (1)
#define SUBSYSTEM_COMMON_WRITE_FLASHROM (1)
#define SUBSYSTEM_COMMON_QUERY_MAX_FWCMD_BUFFER_SIZE (1)
#define SUBSYSTEM_COMMON_ADD_PAGE_TABLES (1)
#define SUBSYSTEM_COMMON_REMOVE_PAGE_TABLES (1)
#define SUBSYSTEM_COMMON_RING_DESTROY (1)
#define SUBSYSTEM_COMMON_CQ_CREATE (1)
#define SUBSYSTEM_COMMON_EQ_CREATE (1)
#define SUBSYSTEM_COMMON_ETH_RX_CREATE (1)
#define SUBSYSTEM_COMMON_ETH_TX_CREATE (1)
#define SUBSYSTEM_COMMON_ISCSI_DEFQ_CREATE (1)
#define SUBSYSTEM_COMMON_ISCSI_WRBQ_CREATE (1)
#define SUBSYSTEM_COMMON_MCC_CREATE (1)
#define SUBSYSTEM_COMMON_JELL_CONFIG (1)
#define SUBSYSTEM_COMMON_FORCE_FAILOVER (1)
#define SUBSYSTEM_COMMON_ADD_TEMPLATE_HEADER_BUFFERS (1)
#define SUBSYSTEM_COMMON_REMOVE_TEMPLATE_HEADER_BUFFERS (1)
#define SUBSYSTEM_COMMON_POST_ZERO_BUFFER (1)
#define SUBSYSTEM_COMMON_GET_QOS (1)
#define SUBSYSTEM_COMMON_SET_QOS (1)
#define SUBSYSTEM_COMMON_TCP_GET_STATISTICS (1)
#define SUBSYSTEM_COMMON_SEEPROM_READ (1)
#define SUBSYSTEM_COMMON_TCP_STATE_QUERY (1)
#define SUBSYSTEM_COMMON_GET_CNTL_ATTRIBUTES (1)
#define SUBSYSTEM_COMMON_NOP (1)
#define SUBSYSTEM_COMMON_NTWK_RX_FILTER (1)
#define SUBSYSTEM_COMMON_GET_FW_VERSION (1)
#define SUBSYSTEM_COMMON_SET_FLOW_CONTROL (1)
#define SUBSYSTEM_COMMON_GET_FLOW_CONTROL (1)
#define SUBSYSTEM_COMMON_SET_TCP_PARAMETERS (1)
#define SUBSYSTEM_COMMON_SET_FRAME_SIZE (1)
#define SUBSYSTEM_COMMON_GET_FAT (1)
#define SUBSYSTEM_COMMON_MODIFY_EQ_DELAY (1)
#define SUBSYSTEM_COMMON_FIRMWARE_CONFIG (1)
#define SUBSYSTEM_COMMON_ENABLE_DISABLE_DOMAINS (1)
#define SUBSYSTEM_COMMON_GET_DOMAIN_CONFIG (1)
#define SUBSYSTEM_COMMON_SET_VLD_CONFIG (1)
#define SUBSYSTEM_COMMON_GET_VLD_CONFIG (1)
#define SUBSYSTEM_COMMON_GET_PORT_EQUALIZATION (1)
#define SUBSYSTEM_COMMON_SET_PORT_EQUALIZATION (1)
#define SUBSYSTEM_COMMON_RED_CONFIG (1)
#define OPCODE_COMMON_NTWK_MAC_SET (2)
#define OPCODE_COMMON_NTWK_MULTICAST_SET (3)
#define OPCODE_COMMON_NTWK_VLAN_CONFIG (4)
#define OPCODE_COMMON_NTWK_LINK_STATUS_QUERY (5)
#define OPCODE_COMMON_READ_FLASHROM (6)
#define OPCODE_COMMON_WRITE_FLASHROM (7)
#define OPCODE_COMMON_QUERY_MAX_FWCMD_BUFFER_SIZE (8)
#define OPCODE_COMMON_ADD_PAGE_TABLES (9)
#define OPCODE_COMMON_REMOVE_PAGE_TABLES (10)
#define OPCODE_COMMON_RING_DESTROY (11)
#define OPCODE_COMMON_CQ_CREATE (12)
#define OPCODE_COMMON_EQ_CREATE (13)
#define OPCODE_COMMON_ETH_RX_CREATE (14)
#define OPCODE_COMMON_ETH_TX_CREATE (15)
#define OPCODE_COMMON_NET_RESERVED0 (16) /* Reserved */
#define OPCODE_COMMON_NET_RESERVED1 (17) /* Reserved */
#define OPCODE_COMMON_NET_RESERVED2 (18) /* Reserved */
#define OPCODE_COMMON_ISCSI_DEFQ_CREATE (19)
#define OPCODE_COMMON_ISCSI_WRBQ_CREATE (20)
#define OPCODE_COMMON_MCC_CREATE (21)
#define OPCODE_COMMON_JELL_CONFIG (22)
#define OPCODE_COMMON_FORCE_FAILOVER (23)
#define OPCODE_COMMON_ADD_TEMPLATE_HEADER_BUFFERS (24)
#define OPCODE_COMMON_REMOVE_TEMPLATE_HEADER_BUFFERS (25)
#define OPCODE_COMMON_POST_ZERO_BUFFER (26)
#define OPCODE_COMMON_GET_QOS (27)
#define OPCODE_COMMON_SET_QOS (28)
#define OPCODE_COMMON_TCP_GET_STATISTICS (29)
#define OPCODE_COMMON_SEEPROM_READ (30)
#define OPCODE_COMMON_TCP_STATE_QUERY (31)
#define OPCODE_COMMON_GET_CNTL_ATTRIBUTES (32)
#define OPCODE_COMMON_NOP (33)
#define OPCODE_COMMON_NTWK_RX_FILTER (34)
#define OPCODE_COMMON_GET_FW_VERSION (35)
#define OPCODE_COMMON_SET_FLOW_CONTROL (36)
#define OPCODE_COMMON_GET_FLOW_CONTROL (37)
#define OPCODE_COMMON_SET_TCP_PARAMETERS (38)
#define OPCODE_COMMON_SET_FRAME_SIZE (39)
#define OPCODE_COMMON_GET_FAT (40)
#define OPCODE_COMMON_MODIFY_EQ_DELAY (41)
#define OPCODE_COMMON_FIRMWARE_CONFIG (42)
#define OPCODE_COMMON_ENABLE_DISABLE_DOMAINS (43)
#define OPCODE_COMMON_GET_DOMAIN_CONFIG (44)
#define OPCODE_COMMON_SET_VLD_CONFIG (45)
#define OPCODE_COMMON_GET_VLD_CONFIG (46)
#define OPCODE_COMMON_GET_PORT_EQUALIZATION (47)
#define OPCODE_COMMON_SET_PORT_EQUALIZATION (48)
#define OPCODE_COMMON_RED_CONFIG (49)
/*
* --- ETH_SUBSYSTEM_OPCODES ---
* These opcodes are used for configuring the Ethernet interfaces. These
* opcodes all use the FWCMD_SUBSYSTEM_ETH subsystem code.
*/
#define OPCODE_ETH_RSS_CONFIG (1)
#define OPCODE_ETH_ACPI_CONFIG (2)
#define SUBSYSTEM_ETH_RSS_CONFIG (3)
#define SUBSYSTEM_ETH_ACPI_CONFIG (3)
#define OPCODE_ETH_PROMISCUOUS (3)
#define SUBSYSTEM_ETH_PROMISCUOUS (3)
#define SUBSYSTEM_ETH_GET_STATISTICS (3)
#define SUBSYSTEM_ETH_GET_RX_FRAG_SIZE (3)
#define SUBSYSTEM_ETH_SET_RX_FRAG_SIZE (3)
#define OPCODE_ETH_GET_STATISTICS (4)
#define OPCODE_ETH_GET_RX_FRAG_SIZE (5)
#define OPCODE_ETH_SET_RX_FRAG_SIZE (6)
/*
* --- MCC_STATUS_CODE ---
* These are the global status codes used by all subsystems
*/
#define MCC_STATUS_SUCCESS (0) /* Indicates a successful
completion of the command */
#define MCC_STATUS_INSUFFICIENT_PRIVILEGES (1) /* The client does not have
sufficient privileges to
execute the command */
#define MCC_STATUS_INVALID_PARAMETER (2) /* A parameter in the command
was invalid. The extended
status contains the index
of the parameter */
#define MCC_STATUS_INSUFFICIENT_RESOURCES (3) /* There are insufficient
chip resources to execute
the command */
#define MCC_STATUS_QUEUE_FLUSHING (4) /* The command is completing
because the queue was
getting flushed */
#define MCC_STATUS_DMA_FAILED (5) /* The command is completing
with a DMA error */
/*
* --- MGMT_ERROR_CODES ---
* Error Codes returned in the status field of the FWCMD response header
*/
#define MGMT_STATUS_SUCCESS (0) /* The FWCMD completed
without errors */
#define MGMT_STATUS_FAILED (1) /* Error status in the Status
field of the
struct FWCMD_RESPONSE_HEADER */
#define MGMT_STATUS_ILLEGAL_REQUEST (2) /* Invalid FWCMD opcode */
#define MGMT_STATUS_ILLEGAL_FIELD (3) /* Invalid parameter in
the FWCMD payload */
#endif /* __fwcmd_opcodes_amap_h__ */

View File

@ -1,29 +0,0 @@
/*
* Copyright (C) 2005 - 2008 ServerEngines
* All rights reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License version 2
* as published by the Free Software Foundation. The full GNU General
* Public License is included in this distribution in the file called COPYING.
*
* Contact Information:
* linux-drivers@serverengines.com
*
* ServerEngines
* 209 N. Fair Oaks Ave
* Sunnyvale, CA 94085
*/
/*
* Autogenerated by srcgen version: 0127
*/
#ifndef __fwcmd_types_bmap_h__
#define __fwcmd_types_bmap_h__
/* MAC address format */
struct MAC_ADDRESS_FORMAT {
u16 SizeOfStructure;
u8 MACAddress[6];
} __packed;
#endif /* __fwcmd_types_bmap_h__ */

View File

@ -1,182 +0,0 @@
/*
* Copyright (C) 2005 - 2008 ServerEngines
* All rights reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License version 2
* as published by the Free Software Foundation. The full GNU General
* Public License is included in this distribution in the file called COPYING.
*
* Contact Information:
* linux-drivers@serverengines.com
*
* ServerEngines
* 209 N. Fair Oaks Ave
* Sunnyvale, CA 94085
*/
/*
* Autogenerated by srcgen version: 0127
*/
#ifndef __host_struct_amap_h__
#define __host_struct_amap_h__
#include "be_cm.h"
#include "be_common.h"
#include "descriptors.h"
/* --- EQ_COMPLETION_MAJOR_CODE_ENUM --- */
#define EQ_MAJOR_CODE_COMPLETION (0) /* Completion event on a */
/* qcompletion ueue. */
#define EQ_MAJOR_CODE_ETH (1) /* Affiliated Ethernet Event. */
#define EQ_MAJOR_CODE_RESERVED (2) /* Reserved */
#define EQ_MAJOR_CODE_RDMA (3) /* Affiliated RDMA Event. */
#define EQ_MAJOR_CODE_ISCSI (4) /* Affiliated ISCSI Event */
#define EQ_MAJOR_CODE_UNAFFILIATED (5) /* Unaffiliated Event */
/* --- EQ_COMPLETION_MINOR_CODE_ENUM --- */
#define EQ_MINOR_CODE_COMPLETION (0) /* Completion event on a */
/* completion queue. */
#define EQ_MINOR_CODE_OTHER (1) /* Other Event (TBD). */
/* Queue Entry Definition for all 4 byte event queue types. */
struct BE_EQ_ENTRY_AMAP {
u8 Valid; /* DWORD 0 */
u8 MajorCode[3]; /* DWORD 0 */
u8 MinorCode[12]; /* DWORD 0 */
u8 ResourceID[16]; /* DWORD 0 */
} __packed;
struct EQ_ENTRY_AMAP {
u32 dw[1];
};
/*
* --- ETH_EVENT_CODE ---
* These codes are returned by the MPU when one of these events has occurred,
* and the event is configured to report to an Event Queue when an event
* is detected.
*/
#define ETH_EQ_LINK_STATUS (0) /* Link status change event */
/* detected. */
#define ETH_EQ_WATERMARK (1) /* watermark event detected. */
#define ETH_EQ_MAGIC_PKT (2) /* magic pkt event detected. */
#define ETH_EQ_ACPI_PKT0 (3) /* ACPI interesting packet */
/* detected. */
#define ETH_EQ_ACPI_PKT1 (3) /* ACPI interesting packet */
/* detected. */
#define ETH_EQ_ACPI_PKT2 (3) /* ACPI interesting packet */
/* detected. */
#define ETH_EQ_ACPI_PKT3 (3) /* ACPI interesting packet */
/* detected. */
/*
* --- ETH_TX_COMPL_STATUS_ENUM ---
* Status codes contained in Ethernet TX completion descriptors.
*/
#define ETH_COMP_VALID (0)
#define ETH_COMP_ERROR (1)
#define ETH_COMP_INVALID (15)
/*
* --- ETH_TX_COMPL_PORT_ENUM ---
* Port indicator contained in Ethernet TX completion descriptors.
*/
#define ETH_COMP_PORT0 (0)
#define ETH_COMP_PORT1 (1)
#define ETH_COMP_MGMT (2)
/*
* --- ETH_TX_COMPL_CT_ENUM ---
* Completion type indicator contained in Ethernet TX completion descriptors.
*/
#define ETH_COMP_ETH (0)
/*
* Work request block that the driver issues to the chip for
* Ethernet transmissions. All control fields must be valid in each WRB for
* a message. The controller, as specified by the flags, optionally writes
* an entry to the Completion Ring and generate an event.
*/
struct BE_ETH_WRB_AMAP {
u8 frag_pa_hi[32]; /* DWORD 0 */
u8 frag_pa_lo[32]; /* DWORD 1 */
u8 complete; /* DWORD 2 */
u8 event; /* DWORD 2 */
u8 crc; /* DWORD 2 */
u8 forward; /* DWORD 2 */
u8 ipsec; /* DWORD 2 */
u8 mgmt; /* DWORD 2 */
u8 ipcs; /* DWORD 2 */
u8 udpcs; /* DWORD 2 */
u8 tcpcs; /* DWORD 2 */
u8 lso; /* DWORD 2 */
u8 last; /* DWORD 2 */
u8 vlan; /* DWORD 2 */
u8 dbg[3]; /* DWORD 2 */
u8 hash_val[3]; /* DWORD 2 */
u8 lso_mss[14]; /* DWORD 2 */
u8 frag_len[16]; /* DWORD 3 */
u8 vlan_tag[16]; /* DWORD 3 */
} __packed;
struct ETH_WRB_AMAP {
u32 dw[4];
};
/* This is an Ethernet transmit completion descriptor */
struct BE_ETH_TX_COMPL_AMAP {
u8 user_bytes[16]; /* DWORD 0 */
u8 nwh_bytes[8]; /* DWORD 0 */
u8 lso; /* DWORD 0 */
u8 rsvd0[7]; /* DWORD 0 */
u8 wrb_index[16]; /* DWORD 1 */
u8 ct[2]; /* DWORD 1 */
u8 port[2]; /* DWORD 1 */
u8 rsvd1[8]; /* DWORD 1 */
u8 status[4]; /* DWORD 1 */
u8 rsvd2[16]; /* DWORD 2 */
u8 ringid[11]; /* DWORD 2 */
u8 hash_val[4]; /* DWORD 2 */
u8 valid; /* DWORD 2 */
u8 rsvd3[32]; /* DWORD 3 */
} __packed;
struct ETH_TX_COMPL_AMAP {
u32 dw[4];
};
/* Ethernet Receive Buffer descriptor */
struct BE_ETH_RX_D_AMAP {
u8 fragpa_hi[32]; /* DWORD 0 */
u8 fragpa_lo[32]; /* DWORD 1 */
} __packed;
struct ETH_RX_D_AMAP {
u32 dw[2];
};
/* This is an Ethernet Receive Completion Descriptor */
struct BE_ETH_RX_COMPL_AMAP {
u8 vlan_tag[16]; /* DWORD 0 */
u8 pktsize[14]; /* DWORD 0 */
u8 port; /* DWORD 0 */
u8 rsvd0; /* DWORD 0 */
u8 err; /* DWORD 1 */
u8 rsshp; /* DWORD 1 */
u8 ipf; /* DWORD 1 */
u8 tcpf; /* DWORD 1 */
u8 udpf; /* DWORD 1 */
u8 ipcksm; /* DWORD 1 */
u8 tcpcksm; /* DWORD 1 */
u8 udpcksm; /* DWORD 1 */
u8 macdst[6]; /* DWORD 1 */
u8 vtp; /* DWORD 1 */
u8 vtm; /* DWORD 1 */
u8 fragndx[10]; /* DWORD 1 */
u8 ct[2]; /* DWORD 1 */
u8 ipsec; /* DWORD 1 */
u8 numfrags[3]; /* DWORD 1 */
u8 rsvd1[31]; /* DWORD 2 */
u8 valid; /* DWORD 2 */
u8 rsshash[32]; /* DWORD 3 */
} __packed;
struct ETH_RX_COMPL_AMAP {
u32 dw[4];
};
#endif /* __host_struct_amap_h__ */

View File

@ -1,830 +0,0 @@
/*
* Copyright (C) 2005 - 2008 ServerEngines
* All rights reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License version 2
* as published by the Free Software Foundation. The full GNU General
* Public License is included in this distribution in the file called COPYING.
*
* Contact Information:
* linux-drivers@serverengines.com
*
* ServerEngines
* 209 N. Fair Oaks Ave
* Sunnyvale, CA 94085
*/
#ifndef __hwlib_h__
#define __hwlib_h__
#include <linux/module.h>
#include <linux/io.h>
#include <linux/list.h>
#include <linux/spinlock.h>
#include "regmap.h" /* srcgen array map output */
#include "asyncmesg.h"
#include "fwcmd_opcodes.h"
#include "post_codes.h"
#include "fwcmd_mcc.h"
#include "fwcmd_types_bmap.h"
#include "fwcmd_common_bmap.h"
#include "fwcmd_eth_bmap.h"
#include "bestatus.h"
/*
*
* Macros for reading/writing a protection domain or CSR registers
* in BladeEngine.
*/
#define PD_READ(fo, field) ioread32((fo)->db_va + \
offsetof(struct BE_PROTECTION_DOMAIN_DBMAP_AMAP, field)/8)
#define PD_WRITE(fo, field, val) iowrite32(val, (fo)->db_va + \
offsetof(struct BE_PROTECTION_DOMAIN_DBMAP_AMAP, field)/8)
#define CSR_READ(fo, field) ioread32((fo)->csr_va + \
offsetof(struct BE_BLADE_ENGINE_CSRMAP_AMAP, field)/8)
#define CSR_WRITE(fo, field, val) iowrite32(val, (fo)->csr_va + \
offsetof(struct BE_BLADE_ENGINE_CSRMAP_AMAP, field)/8)
#define PCICFG0_READ(fo, field) ioread32((fo)->pci_va + \
offsetof(struct BE_PCICFG0_CSRMAP_AMAP, field)/8)
#define PCICFG0_WRITE(fo, field, val) iowrite32(val, (fo)->pci_va + \
offsetof(struct BE_PCICFG0_CSRMAP_AMAP, field)/8)
#define PCICFG1_READ(fo, field) ioread32((fo)->pci_va + \
offsetof(struct BE_PCICFG1_CSRMAP_AMAP, field)/8)
#define PCICFG1_WRITE(fo, field, val) iowrite32(val, (fo)->pci_va + \
offsetof(struct BE_PCICFG1_CSRMAP_AMAP, field)/8)
#ifdef BE_DEBUG
#define ASSERT(c) BUG_ON(!(c));
#else
#define ASSERT(c)
#endif
/* debug levels */
enum BE_DEBUG_LEVELS {
DL_ALWAYS = 0, /* cannot be masked */
DL_ERR = 0x1, /* errors that should never happen */
DL_WARN = 0x2, /* something questionable.
recoverable errors */
DL_NOTE = 0x4, /* infrequent, important debug info */
DL_INFO = 0x8, /* debug information */
DL_VERBOSE = 0x10, /* detailed info, such as buffer traces */
BE_DL_MIN_VALUE = 0x1, /* this is the min value used */
BE_DL_MAX_VALUE = 0x80 /* this is the higheset value used */
} ;
extern unsigned int trace_level;
#define TRACE(lm, fmt, args...) { \
if (trace_level & lm) { \
printk(KERN_NOTICE "BE: %s:%d \n" fmt, \
__FILE__ , __LINE__ , ## args); \
} \
}
static inline unsigned int be_trace_set_level(unsigned int level)
{
unsigned int old_level = trace_level;
trace_level = level;
return old_level;
}
#define be_trace_get_level() trace_level
/*
* Returns number of pages spanned by the size of data
* starting at the given address.
*/
#define PAGES_SPANNED(_address, _size) \
((u32)((((size_t)(_address) & (PAGE_SIZE - 1)) + \
(_size) + (PAGE_SIZE - 1)) >> PAGE_SHIFT))
/* Byte offset into the page corresponding to given address */
#define OFFSET_IN_PAGE(_addr_) ((size_t)(_addr_) & (PAGE_SIZE-1))
/*
* circular subtract.
* Returns a - b assuming a circular number system, where a and b are
* in range (0, maxValue-1). If a==b, zero is returned so the
* highest value possible with this subtraction is maxValue-1.
*/
static inline u32 be_subc(u32 a, u32 b, u32 max)
{
ASSERT(a <= max && b <= max);
ASSERT(max > 0);
return a >= b ? (a - b) : (max - b + a);
}
static inline u32 be_addc(u32 a, u32 b, u32 max)
{
ASSERT(a < max);
ASSERT(max > 0);
return (max - a > b) ? (a + b) : (b + a - max);
}
/* descriptor for a physically contiguous memory used for ring */
struct ring_desc {
u32 length; /* length in bytes */
void *va; /* virtual address */
u64 pa; /* bus address */
} ;
/*
* This structure stores information about a ring shared between hardware
* and software. Each ring is allocated by the driver in the uncached
* extension and mapped into BladeEngine's unified table.
*/
struct mp_ring {
u32 pages; /* queue size in pages */
u32 id; /* queue id assigned by beklib */
u32 num; /* number of elements in queue */
u32 cidx; /* consumer index */
u32 pidx; /* producer index -- not used by most rings */
u32 itemSize; /* size in bytes of one object */
void *va; /* The virtual address of the ring.
This should be last to allow 32 & 64
bit debugger extensions to work. */
} ;
/*----------- amap bit filed get / set macros and functions -----*/
/*
* Structures defined in the map header files (under fw/amap/) with names
* in the format BE_<name>_AMAP are pseudo structures with members
* of type u8. These structures are templates that are used in
* conjuntion with the structures with names in the format
* <name>_AMAP to calculate the bit masks and bit offsets to get or set
* bit fields in structures. The structures <name>_AMAP are arrays
* of 32 bits words and have the correct size. The following macros
* provide convenient ways to get and set the various members
* in the structures without using strucctures with bit fields.
* Always use the macros AMAP_GET_BITS_PTR and AMAP_SET_BITS_PTR
* macros to extract and set various members.
*/
/*
* Returns the a bit mask for the register that is NOT shifted into location.
* That means return values always look like: 0x1, 0xFF, 0x7FF, etc...
*/
static inline u32 amap_mask(u32 bit_size)
{
return bit_size == 32 ? 0xFFFFFFFF : (1 << bit_size) - 1;
}
#define AMAP_BIT_MASK(_struct_, field) \
amap_mask(AMAP_BIT_SIZE(_struct_, field))
/*
* non-optimized set bits function. First clears the bits and then assigns them.
* This does not require knowledge of the particular DWORD you are setting.
* e.g. AMAP_SET_BITS_PTR (struct, field1, &contextMemory, 123);
*/
static inline void
amap_set(void *ptr, u32 dw_offset, u32 mask, u32 offset, u32 value)
{
u32 *dw = (u32 *)ptr;
*(dw + dw_offset) &= ~(mask << offset);
*(dw + dw_offset) |= (mask & value) << offset;
}
#define AMAP_SET_BITS_PTR(_struct_, field, _structPtr_, val) \
amap_set(_structPtr_, AMAP_WORD_OFFSET(_struct_, field),\
AMAP_BIT_MASK(_struct_, field), \
AMAP_BIT_OFFSET(_struct_, field), val)
/*
* Non-optimized routine that gets the bits without knowing the correct DWORD.
* e.g. fieldValue = AMAP_GET_BITS_PTR (struct, field1, &contextMemory);
*/
static inline u32
amap_get(void *ptr, u32 dw_offset, u32 mask, u32 offset)
{
u32 *dw = (u32 *)ptr;
return mask & (*(dw + dw_offset) >> offset);
}
#define AMAP_GET_BITS_PTR(_struct_, field, _structPtr_) \
amap_get(_structPtr_, AMAP_WORD_OFFSET(_struct_, field), \
AMAP_BIT_MASK(_struct_, field), \
AMAP_BIT_OFFSET(_struct_, field))
/* Returns 0-31 representing bit offset within a DWORD of a bitfield. */
#define AMAP_BIT_OFFSET(_struct_, field) \
(offsetof(struct BE_ ## _struct_ ## _AMAP, field) % 32)
/* Returns 0-n representing DWORD offset of bitfield within the structure. */
#define AMAP_WORD_OFFSET(_struct_, field) \
(offsetof(struct BE_ ## _struct_ ## _AMAP, field)/32)
/* Returns size of bitfield in bits. */
#define AMAP_BIT_SIZE(_struct_, field) \
sizeof(((struct BE_ ## _struct_ ## _AMAP*)0)->field)
struct be_mcc_wrb_response_copy {
u16 length; /* bytes in response */
u16 fwcmd_offset; /* offset within the wrb of the response */
void *va; /* user's va to copy response into */
} ;
typedef void (*mcc_wrb_cqe_callback) (void *context, int status,
struct MCC_WRB_AMAP *optional_wrb);
struct be_mcc_wrb_context {
mcc_wrb_cqe_callback internal_cb; /* Function to call on
completion */
void *internal_cb_context; /* Parameter to pass
to completion function */
mcc_wrb_cqe_callback cb; /* Function to call on completion */
void *cb_context; /* Parameter to pass to completion function */
int *users_final_status; /* pointer to a local
variable for synchronous
commands */
struct MCC_WRB_AMAP *wrb; /* pointer to original wrb for embedded
commands only */
struct list_head next; /* links context structs together in
free list */
struct be_mcc_wrb_response_copy copy; /* Optional parameters to copy
embedded response to user's va */
#if defined(BE_DEBUG)
u16 subsystem, opcode; /* Track this FWCMD for debug builds. */
struct MCC_WRB_AMAP *ring_wrb;
u32 consumed_count;
#endif
} ;
/*
Represents a function object for network or storage. This
is used to manage per-function resources like MCC CQs, etc.
*/
struct be_function_object {
u32 magic; /*!< magic for detecting memory corruption. */
/* PCI BAR mapped addresses */
u8 __iomem *csr_va; /* CSR */
u8 __iomem *db_va; /* Door Bell */
u8 __iomem *pci_va; /* PCI config space */
u32 emulate; /* if set, MPU is not available.
Emulate everything. */
u32 pend_queue_driving; /* if set, drive the queued WRBs
after releasing the WRB lock */
spinlock_t post_lock; /* lock for verifying one thread posting wrbs */
spinlock_t cq_lock; /* lock for verifying one thread
processing cq */
spinlock_t mcc_context_lock; /* lock for protecting mcc
context free list */
unsigned long post_irq;
unsigned long cq_irq;
u32 type;
u32 pci_function_number;
struct be_mcc_object *mcc; /* mcc rings. */
struct {
struct MCC_MAILBOX_AMAP *va; /* VA to the mailbox */
u64 pa; /* PA to the mailbox */
u32 length; /* byte length of mailbox */
/* One default context struct used for posting at
* least one MCC_WRB
*/
struct be_mcc_wrb_context default_context;
bool default_context_allocated;
} mailbox;
struct {
/* Wake on lans configured. */
u32 wol_bitmask; /* bits 0,1,2,3 are set if
corresponding index is enabled */
} config;
struct BE_FIRMWARE_CONFIG fw_config;
} ;
/*
Represents an Event Queue
*/
struct be_eq_object {
u32 magic;
atomic_t ref_count;
struct be_function_object *parent_function;
struct list_head eq_list;
struct list_head cq_list_head;
u32 eq_id;
void *cb_context;
} ;
/*
Manages a completion queue
*/
struct be_cq_object {
u32 magic;
atomic_t ref_count;
struct be_function_object *parent_function;
struct be_eq_object *eq_object;
struct list_head cq_list;
struct list_head cqlist_for_eq;
void *va;
u32 num_entries;
void *cb_context;
u32 cq_id;
} ;
/*
Manages an ethernet send queue
*/
struct be_ethsq_object {
u32 magic;
struct list_head list;
struct be_function_object *parent_function;
struct be_cq_object *cq_object;
u32 bid;
} ;
/*
@brief
Manages an ethernet receive queue
*/
struct be_ethrq_object {
u32 magic;
struct list_head list;
struct be_function_object *parent_function;
u32 rid;
struct be_cq_object *cq_object;
struct be_cq_object *rss_cq_object[4];
} ;
/*
Manages an MCC
*/
typedef void (*mcc_async_event_callback) (void *context, u32 event_code,
void *event);
struct be_mcc_object {
u32 magic;
struct be_function_object *parent_function;
struct list_head mcc_list;
struct be_cq_object *cq_object;
/* Async event callback for MCC CQ. */
mcc_async_event_callback async_cb;
void *async_context;
struct {
struct be_mcc_wrb_context *base;
u32 num;
struct list_head list_head;
} wrb_context;
struct {
struct ring_desc *rd;
struct mp_ring ring;
} sq;
struct {
struct mp_ring ring;
} cq;
u32 processing; /* flag indicating that one thread
is processing CQ */
u32 rearm; /* doorbell rearm setting to make
sure the active processing thread */
/* rearms the CQ if any of the threads requested it. */
struct list_head backlog;
u32 backlog_length;
u32 driving_backlog;
u32 consumed_index;
} ;
/* Queue context header -- the required software information for
* queueing a WRB.
*/
struct be_queue_driver_context {
mcc_wrb_cqe_callback internal_cb; /* Function to call on
completion */
void *internal_cb_context; /* Parameter to pass
to completion function */
mcc_wrb_cqe_callback cb; /* Function to call on completion */
void *cb_context; /* Parameter to pass to completion function */
struct be_mcc_wrb_response_copy copy; /* Optional parameters to copy
embedded response to user's va */
void *optional_fwcmd_va;
struct list_head list;
u32 bytes;
} ;
/*
* Common MCC WRB header that all commands require.
*/
struct be_mcc_wrb_header {
u8 rsvd[offsetof(struct BE_MCC_WRB_AMAP, payload)/8];
} ;
/*
* All non embedded commands supported by hwlib functions only allow
* 1 SGE. This queue context handles them all.
*/
struct be_nonembedded_q_ctxt {
struct be_queue_driver_context context;
struct be_mcc_wrb_header wrb_header;
struct MCC_SGE_AMAP sge[1];
} ;
/*
* ------------------------------------------------------------------------
* This section contains the specific queue struct for each command.
* The user could always provide a be_generic_q_ctxt but this is a
* rather large struct. By using the specific struct, memory consumption
* can be reduced.
* ------------------------------------------------------------------------
*/
struct be_link_status_q_ctxt {
struct be_queue_driver_context context;
struct be_mcc_wrb_header wrb_header;
struct FWCMD_COMMON_NTWK_LINK_STATUS_QUERY fwcmd;
} ;
struct be_multicast_q_ctxt {
struct be_queue_driver_context context;
struct be_mcc_wrb_header wrb_header;
struct FWCMD_COMMON_NTWK_MULTICAST_SET fwcmd;
} ;
struct be_vlan_q_ctxt {
struct be_queue_driver_context context;
struct be_mcc_wrb_header wrb_header;
struct FWCMD_COMMON_NTWK_VLAN_CONFIG fwcmd;
} ;
struct be_promiscuous_q_ctxt {
struct be_queue_driver_context context;
struct be_mcc_wrb_header wrb_header;
struct FWCMD_ETH_PROMISCUOUS fwcmd;
} ;
struct be_force_failover_q_ctxt {
struct be_queue_driver_context context;
struct be_mcc_wrb_header wrb_header;
struct FWCMD_COMMON_FORCE_FAILOVER fwcmd;
} ;
struct be_rxf_filter_q_ctxt {
struct be_queue_driver_context context;
struct be_mcc_wrb_header wrb_header;
struct FWCMD_COMMON_NTWK_RX_FILTER fwcmd;
} ;
struct be_eq_modify_delay_q_ctxt {
struct be_queue_driver_context context;
struct be_mcc_wrb_header wrb_header;
struct FWCMD_COMMON_MODIFY_EQ_DELAY fwcmd;
} ;
/*
* The generic context is the largest size that would be required.
* It is the software context plus an entire WRB.
*/
struct be_generic_q_ctxt {
struct be_queue_driver_context context;
struct be_mcc_wrb_header wrb_header;
struct MCC_WRB_PAYLOAD_AMAP payload;
} ;
/*
* Types for the BE_QUEUE_CONTEXT object.
*/
#define BE_QUEUE_INVALID (0)
#define BE_QUEUE_LINK_STATUS (0xA006)
#define BE_QUEUE_ETH_STATS (0xA007)
#define BE_QUEUE_TPM_STATS (0xA008)
#define BE_QUEUE_TCP_STATS (0xA009)
#define BE_QUEUE_MULTICAST (0xA00A)
#define BE_QUEUE_VLAN (0xA00B)
#define BE_QUEUE_RSS (0xA00C)
#define BE_QUEUE_FORCE_FAILOVER (0xA00D)
#define BE_QUEUE_PROMISCUOUS (0xA00E)
#define BE_QUEUE_WAKE_ON_LAN (0xA00F)
#define BE_QUEUE_NOP (0xA010)
/* --- BE_FUNCTION_ENUM --- */
#define BE_FUNCTION_TYPE_ISCSI (0)
#define BE_FUNCTION_TYPE_NETWORK (1)
#define BE_FUNCTION_TYPE_ARM (2)
/* --- BE_ETH_TX_RING_TYPE_ENUM --- */
#define BE_ETH_TX_RING_TYPE_FORWARDING (1) /* Ether ring for forwarding */
#define BE_ETH_TX_RING_TYPE_STANDARD (2) /* Ether ring for sending */
/* network packets. */
#define BE_ETH_TX_RING_TYPE_BOUND (3) /* Ethernet ring for sending */
/* network packets, bound */
/* to a physical port. */
/*
* ----------------------------------------------------------------------
* API MACROS
* ----------------------------------------------------------------------
*/
#define BE_FWCMD_NAME(_short_name_) struct FWCMD_##_short_name_
#define BE_OPCODE_NAME(_short_name_) OPCODE_##_short_name_
#define BE_SUBSYSTEM_NAME(_short_name_) SUBSYSTEM_##_short_name_
#define BE_PREPARE_EMBEDDED_FWCMD(_pfob_, _wrb_, _short_name_) \
((BE_FWCMD_NAME(_short_name_) *) \
be_function_prepare_embedded_fwcmd(_pfob_, _wrb_, \
sizeof(BE_FWCMD_NAME(_short_name_)), \
FIELD_SIZEOF(BE_FWCMD_NAME(_short_name_), params.request), \
FIELD_SIZEOF(BE_FWCMD_NAME(_short_name_), params.response), \
BE_OPCODE_NAME(_short_name_), \
BE_SUBSYSTEM_NAME(_short_name_)));
#define BE_PREPARE_NONEMBEDDED_FWCMD(_pfob_, _wrb_, _iva_, _ipa_, _short_name_)\
((BE_FWCMD_NAME(_short_name_) *) \
be_function_prepare_nonembedded_fwcmd(_pfob_, _wrb_, (_iva_), (_ipa_), \
sizeof(BE_FWCMD_NAME(_short_name_)), \
FIELD_SIZEOF(BE_FWCMD_NAME(_short_name_), params.request), \
FIELD_SIZEOF(BE_FWCMD_NAME(_short_name_), params.response), \
BE_OPCODE_NAME(_short_name_), \
BE_SUBSYSTEM_NAME(_short_name_)));
int be_function_object_create(u8 __iomem *csr_va, u8 __iomem *db_va,
u8 __iomem *pci_va, u32 function_type, struct ring_desc *mailbox_rd,
struct be_function_object *pfob);
int be_function_object_destroy(struct be_function_object *pfob);
int be_function_cleanup(struct be_function_object *pfob);
int be_function_get_fw_version(struct be_function_object *pfob,
struct FWCMD_COMMON_GET_FW_VERSION_RESPONSE_PAYLOAD *fw_version,
mcc_wrb_cqe_callback cb, void *cb_context);
int be_eq_modify_delay(struct be_function_object *pfob,
u32 num_eq, struct be_eq_object **eq_array,
u32 *eq_delay_array, mcc_wrb_cqe_callback cb,
void *cb_context,
struct be_eq_modify_delay_q_ctxt *q_ctxt);
int be_eq_create(struct be_function_object *pfob,
struct ring_desc *rd, u32 eqe_size, u32 num_entries,
u32 watermark, u32 timer_delay, struct be_eq_object *eq_object);
int be_eq_destroy(struct be_eq_object *eq);
int be_cq_create(struct be_function_object *pfob,
struct ring_desc *rd, u32 length,
bool solicited_eventable, bool no_delay,
u32 wm_thresh, struct be_eq_object *eq_object,
struct be_cq_object *cq_object);
int be_cq_destroy(struct be_cq_object *cq);
int be_mcc_ring_create(struct be_function_object *pfob,
struct ring_desc *rd, u32 length,
struct be_mcc_wrb_context *context_array,
u32 num_context_entries,
struct be_cq_object *cq, struct be_mcc_object *mcc);
int be_mcc_ring_destroy(struct be_mcc_object *mcc_object);
int be_mcc_process_cq(struct be_mcc_object *mcc_object, bool rearm);
int be_mcc_add_async_event_callback(struct be_mcc_object *mcc_object,
mcc_async_event_callback cb, void *cb_context);
int be_pci_soft_reset(struct be_function_object *pfob);
int be_drive_POST(struct be_function_object *pfob);
int be_eth_sq_create(struct be_function_object *pfob,
struct ring_desc *rd, u32 length_in_bytes,
u32 type, u32 ulp, struct be_cq_object *cq_object,
struct be_ethsq_object *eth_sq);
struct be_eth_sq_parameters {
u32 port;
u32 rsvd0[2];
} ;
int be_eth_sq_create_ex(struct be_function_object *pfob,
struct ring_desc *rd, u32 length_in_bytes,
u32 type, u32 ulp, struct be_cq_object *cq_object,
struct be_eth_sq_parameters *ex_parameters,
struct be_ethsq_object *eth_sq);
int be_eth_sq_destroy(struct be_ethsq_object *eth_sq);
int be_eth_set_flow_control(struct be_function_object *pfob,
bool txfc_enable, bool rxfc_enable);
int be_eth_get_flow_control(struct be_function_object *pfob,
bool *txfc_enable, bool *rxfc_enable);
int be_eth_set_qos(struct be_function_object *pfob, u32 max_bps, u32 max_pps);
int be_eth_get_qos(struct be_function_object *pfob, u32 *max_bps, u32 *max_pps);
int be_eth_set_frame_size(struct be_function_object *pfob,
u32 *tx_frame_size, u32 *rx_frame_size);
int be_eth_rq_create(struct be_function_object *pfob,
struct ring_desc *rd, struct be_cq_object *cq_object,
struct be_cq_object *bcmc_cq_object,
struct be_ethrq_object *eth_rq);
int be_eth_rq_destroy(struct be_ethrq_object *eth_rq);
int be_eth_rq_destroy_options(struct be_ethrq_object *eth_rq, bool flush,
mcc_wrb_cqe_callback cb, void *cb_context);
int be_eth_rq_set_frag_size(struct be_function_object *pfob,
u32 new_frag_size_bytes, u32 *actual_frag_size_bytes);
int be_eth_rq_get_frag_size(struct be_function_object *pfob,
u32 *frag_size_bytes);
void *be_function_prepare_embedded_fwcmd(struct be_function_object *pfob,
struct MCC_WRB_AMAP *wrb,
u32 payload_length, u32 request_length,
u32 response_length, u32 opcode, u32 subsystem);
void *be_function_prepare_nonembedded_fwcmd(struct be_function_object *pfob,
struct MCC_WRB_AMAP *wrb, void *fwcmd_header_va, u64 fwcmd_header_pa,
u32 payload_length, u32 request_length, u32 response_length,
u32 opcode, u32 subsystem);
struct MCC_WRB_AMAP *
be_function_peek_mcc_wrb(struct be_function_object *pfob);
int be_rxf_mac_address_read_write(struct be_function_object *pfob,
bool port1, bool mac1, bool mgmt,
bool write, bool permanent, u8 *mac_address,
mcc_wrb_cqe_callback cb,
void *cb_context);
int be_rxf_multicast_config(struct be_function_object *pfob,
bool promiscuous, u32 num, u8 *mac_table,
mcc_wrb_cqe_callback cb,
void *cb_context,
struct be_multicast_q_ctxt *q_ctxt);
int be_rxf_vlan_config(struct be_function_object *pfob,
bool promiscuous, u32 num, u16 *vlan_tag_array,
mcc_wrb_cqe_callback cb, void *cb_context,
struct be_vlan_q_ctxt *q_ctxt);
int be_rxf_link_status(struct be_function_object *pfob,
struct BE_LINK_STATUS *link_status,
mcc_wrb_cqe_callback cb,
void *cb_context,
struct be_link_status_q_ctxt *q_ctxt);
int be_rxf_query_eth_statistics(struct be_function_object *pfob,
struct FWCMD_ETH_GET_STATISTICS *va_for_fwcmd,
u64 pa_for_fwcmd, mcc_wrb_cqe_callback cb,
void *cb_context,
struct be_nonembedded_q_ctxt *q_ctxt);
int be_rxf_promiscuous(struct be_function_object *pfob,
bool enable_port0, bool enable_port1,
mcc_wrb_cqe_callback cb, void *cb_context,
struct be_promiscuous_q_ctxt *q_ctxt);
int be_rxf_filter_config(struct be_function_object *pfob,
struct NTWK_RX_FILTER_SETTINGS *settings,
mcc_wrb_cqe_callback cb,
void *cb_context,
struct be_rxf_filter_q_ctxt *q_ctxt);
/*
* ------------------------------------------------------
* internal functions used by hwlib
* ------------------------------------------------------
*/
int be_function_ring_destroy(struct be_function_object *pfob,
u32 id, u32 ring_type, mcc_wrb_cqe_callback cb,
void *cb_context,
mcc_wrb_cqe_callback internal_cb,
void *internal_callback_context);
int be_function_post_mcc_wrb(struct be_function_object *pfob,
struct MCC_WRB_AMAP *wrb,
struct be_generic_q_ctxt *q_ctxt,
mcc_wrb_cqe_callback cb, void *cb_context,
mcc_wrb_cqe_callback internal_cb,
void *internal_cb_context, void *optional_fwcmd_va,
struct be_mcc_wrb_response_copy *response_copy);
int be_function_queue_mcc_wrb(struct be_function_object *pfob,
struct be_generic_q_ctxt *q_ctxt);
/*
* ------------------------------------------------------
* MCC QUEUE
* ------------------------------------------------------
*/
int be_mpu_init_mailbox(struct be_function_object *pfob, struct ring_desc *rd);
struct MCC_WRB_AMAP *
_be_mpu_peek_ring_wrb(struct be_mcc_object *mcc, bool driving_queue);
struct be_mcc_wrb_context *
_be_mcc_allocate_wrb_context(struct be_function_object *pfob);
void _be_mcc_free_wrb_context(struct be_function_object *pfob,
struct be_mcc_wrb_context *context);
int _be_mpu_post_wrb_mailbox(struct be_function_object *pfob,
struct MCC_WRB_AMAP *wrb, struct be_mcc_wrb_context *wrb_context);
int _be_mpu_post_wrb_ring(struct be_mcc_object *mcc,
struct MCC_WRB_AMAP *wrb, struct be_mcc_wrb_context *wrb_context);
void be_drive_mcc_wrb_queue(struct be_mcc_object *mcc);
/*
* ------------------------------------------------------
* Ring Sizes
* ------------------------------------------------------
*/
static inline u32 be_ring_encoding_to_length(u32 encoding, u32 object_size)
{
ASSERT(encoding != 1); /* 1 is rsvd */
ASSERT(encoding < 16);
ASSERT(object_size > 0);
if (encoding == 0) /* 32k deep */
encoding = 16;
return (1 << (encoding - 1)) * object_size;
}
static inline
u32 be_ring_length_to_encoding(u32 length_in_bytes, u32 object_size)
{
u32 count, encoding;
ASSERT(object_size > 0);
ASSERT(length_in_bytes % object_size == 0);
count = length_in_bytes / object_size;
ASSERT(count > 1);
ASSERT(count <= 32 * 1024);
ASSERT(length_in_bytes <= 8 * PAGE_SIZE); /* max ring size in UT */
encoding = __ilog2_u32(count) + 1;
if (encoding == 16)
encoding = 0; /* 32k deep */
return encoding;
}
void be_rd_to_pa_list(struct ring_desc *rd, struct PHYS_ADDR *pa_list,
u32 max_num);
#endif /* __hwlib_h__ */

File diff suppressed because it is too large Load Diff

View File

@ -1,74 +0,0 @@
/*
* Copyright (C) 2005 - 2008 ServerEngines
* All rights reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License version 2
* as published by the Free Software Foundation. The full GNU General
* Public License is included in this distribution in the file called COPYING.
*
* Contact Information:
* linux-drivers@serverengines.com
*
* ServerEngines
* 209 N. Fair Oaks Ave
* Sunnyvale, CA 94085
*/
/*
* Autogenerated by srcgen version: 0127
*/
#ifndef __mpu_amap_h__
#define __mpu_amap_h__
#include "ep.h"
/* Provide control parameters for the Managment Processor Unit. */
struct BE_MPU_CSRMAP_AMAP {
struct BE_EP_CSRMAP_AMAP ep;
u8 rsvd0[128]; /* DWORD 64 */
u8 rsvd1[32]; /* DWORD 68 */
u8 rsvd2[192]; /* DWORD 69 */
u8 rsvd3[192]; /* DWORD 75 */
u8 rsvd4[32]; /* DWORD 81 */
u8 rsvd5[32]; /* DWORD 82 */
u8 rsvd6[32]; /* DWORD 83 */
u8 rsvd7[32]; /* DWORD 84 */
u8 rsvd8[32]; /* DWORD 85 */
u8 rsvd9[32]; /* DWORD 86 */
u8 rsvd10[32]; /* DWORD 87 */
u8 rsvd11[32]; /* DWORD 88 */
u8 rsvd12[32]; /* DWORD 89 */
u8 rsvd13[32]; /* DWORD 90 */
u8 rsvd14[32]; /* DWORD 91 */
u8 rsvd15[32]; /* DWORD 92 */
u8 rsvd16[32]; /* DWORD 93 */
u8 rsvd17[32]; /* DWORD 94 */
u8 rsvd18[32]; /* DWORD 95 */
u8 rsvd19[32]; /* DWORD 96 */
u8 rsvd20[32]; /* DWORD 97 */
u8 rsvd21[32]; /* DWORD 98 */
u8 rsvd22[32]; /* DWORD 99 */
u8 rsvd23[32]; /* DWORD 100 */
u8 rsvd24[32]; /* DWORD 101 */
u8 rsvd25[32]; /* DWORD 102 */
u8 rsvd26[32]; /* DWORD 103 */
u8 rsvd27[32]; /* DWORD 104 */
u8 rsvd28[96]; /* DWORD 105 */
u8 rsvd29[32]; /* DWORD 108 */
u8 rsvd30[32]; /* DWORD 109 */
u8 rsvd31[32]; /* DWORD 110 */
u8 rsvd32[32]; /* DWORD 111 */
u8 rsvd33[32]; /* DWORD 112 */
u8 rsvd34[96]; /* DWORD 113 */
u8 rsvd35[32]; /* DWORD 116 */
u8 rsvd36[32]; /* DWORD 117 */
u8 rsvd37[32]; /* DWORD 118 */
u8 rsvd38[32]; /* DWORD 119 */
u8 rsvd39[32]; /* DWORD 120 */
u8 rsvd40[32]; /* DWORD 121 */
u8 rsvd41[134][32]; /* DWORD 122 */
} __packed;
struct MPU_CSRMAP_AMAP {
u32 dw[256];
};
#endif /* __mpu_amap_h__ */

View File

@ -1,46 +0,0 @@
/*
* Copyright (C) 2005 - 2008 ServerEngines
* All rights reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License version 2
* as published by the Free Software Foundation. The full GNU General
* Public License is included in this distribution in the file called COPYING.
*
* Contact Information:
* linux-drivers@serverengines.com
*
* ServerEngines
* 209 N. Fair Oaks Ave
* Sunnyvale, CA 94085
*/
/*
* Autogenerated by srcgen version: 0127
*/
#ifndef __mpu_context_amap_h__
#define __mpu_context_amap_h__
/*
* Management command and control ring context. The MPUs BTLR_CTRL1 CSR
* controls the writeback behavior of the producer and consumer index values.
*/
struct BE_MCC_RING_CONTEXT_AMAP {
u8 con_index[16]; /* DWORD 0 */
u8 ring_size[4]; /* DWORD 0 */
u8 cq_id[11]; /* DWORD 0 */
u8 rsvd0; /* DWORD 0 */
u8 prod_index[16]; /* DWORD 1 */
u8 pdid[15]; /* DWORD 1 */
u8 invalid; /* DWORD 1 */
u8 cmd_pending_current[7]; /* DWORD 2 */
u8 rsvd1[25]; /* DWORD 2 */
u8 hpi_port_cq_id[11]; /* DWORD 3 */
u8 rsvd2[5]; /* DWORD 3 */
u8 cmd_pending_max[7]; /* DWORD 3 */
u8 rsvd3[9]; /* DWORD 3 */
} __packed;
struct MCC_RING_CONTEXT_AMAP {
u32 dw[4];
};
#endif /* __mpu_context_amap_h__ */

View File

@ -1,825 +0,0 @@
/*
* Copyright (C) 2005 - 2008 ServerEngines
* All rights reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License version 2
* as published by the Free Software Foundation. The full GNU General
* Public License is included in this distribution in the file called COPYING.
*
* Contact Information:
* linux-drivers@serverengines.com
*
* ServerEngines
* 209 N. Fair Oaks Ave
* Sunnyvale, CA 94085
*/
/*
* Autogenerated by srcgen version: 0127
*/
#ifndef __pcicfg_amap_h__
#define __pcicfg_amap_h__
/* Vendor and Device ID Register. */
struct BE_PCICFG_ID_CSR_AMAP {
u8 vendorid[16]; /* DWORD 0 */
u8 deviceid[16]; /* DWORD 0 */
} __packed;
struct PCICFG_ID_CSR_AMAP {
u32 dw[1];
};
/* IO Bar Register. */
struct BE_PCICFG_IOBAR_CSR_AMAP {
u8 iospace; /* DWORD 0 */
u8 rsvd0[7]; /* DWORD 0 */
u8 iobar[24]; /* DWORD 0 */
} __packed;
struct PCICFG_IOBAR_CSR_AMAP {
u32 dw[1];
};
/* Memory BAR 0 Register. */
struct BE_PCICFG_MEMBAR0_CSR_AMAP {
u8 memspace; /* DWORD 0 */
u8 type[2]; /* DWORD 0 */
u8 pf; /* DWORD 0 */
u8 rsvd0[10]; /* DWORD 0 */
u8 membar0[18]; /* DWORD 0 */
} __packed;
struct PCICFG_MEMBAR0_CSR_AMAP {
u32 dw[1];
};
/* Memory BAR 1 - Low Address Register. */
struct BE_PCICFG_MEMBAR1_LO_CSR_AMAP {
u8 memspace; /* DWORD 0 */
u8 type[2]; /* DWORD 0 */
u8 pf; /* DWORD 0 */
u8 rsvd0[13]; /* DWORD 0 */
u8 membar1lo[15]; /* DWORD 0 */
} __packed;
struct PCICFG_MEMBAR1_LO_CSR_AMAP {
u32 dw[1];
};
/* Memory BAR 1 - High Address Register. */
struct BE_PCICFG_MEMBAR1_HI_CSR_AMAP {
u8 membar1hi[32]; /* DWORD 0 */
} __packed;
struct PCICFG_MEMBAR1_HI_CSR_AMAP {
u32 dw[1];
};
/* Memory BAR 2 - Low Address Register. */
struct BE_PCICFG_MEMBAR2_LO_CSR_AMAP {
u8 memspace; /* DWORD 0 */
u8 type[2]; /* DWORD 0 */
u8 pf; /* DWORD 0 */
u8 rsvd0[17]; /* DWORD 0 */
u8 membar2lo[11]; /* DWORD 0 */
} __packed;
struct PCICFG_MEMBAR2_LO_CSR_AMAP {
u32 dw[1];
};
/* Memory BAR 2 - High Address Register. */
struct BE_PCICFG_MEMBAR2_HI_CSR_AMAP {
u8 membar2hi[32]; /* DWORD 0 */
} __packed;
struct PCICFG_MEMBAR2_HI_CSR_AMAP {
u32 dw[1];
};
/* Subsystem Vendor and ID (Function 0) Register. */
struct BE_PCICFG_SUBSYSTEM_ID_F0_CSR_AMAP {
u8 subsys_vendor_id[16]; /* DWORD 0 */
u8 subsys_id[16]; /* DWORD 0 */
} __packed;
struct PCICFG_SUBSYSTEM_ID_F0_CSR_AMAP {
u32 dw[1];
};
/* Subsystem Vendor and ID (Function 1) Register. */
struct BE_PCICFG_SUBSYSTEM_ID_F1_CSR_AMAP {
u8 subsys_vendor_id[16]; /* DWORD 0 */
u8 subsys_id[16]; /* DWORD 0 */
} __packed;
struct PCICFG_SUBSYSTEM_ID_F1_CSR_AMAP {
u32 dw[1];
};
/* Semaphore Register. */
struct BE_PCICFG_SEMAPHORE_CSR_AMAP {
u8 locked; /* DWORD 0 */
u8 rsvd0[31]; /* DWORD 0 */
} __packed;
struct PCICFG_SEMAPHORE_CSR_AMAP {
u32 dw[1];
};
/* Soft Reset Register. */
struct BE_PCICFG_SOFT_RESET_CSR_AMAP {
u8 rsvd0[7]; /* DWORD 0 */
u8 softreset; /* DWORD 0 */
u8 rsvd1[16]; /* DWORD 0 */
u8 nec_ll_rcvdetect_i[8]; /* DWORD 0 */
} __packed;
struct PCICFG_SOFT_RESET_CSR_AMAP {
u32 dw[1];
};
/* Unrecoverable Error Status (Low) Register. Each bit corresponds to
* an internal Unrecoverable Error. These are set by hardware and may be
* cleared by writing a one to the respective bit(s) to be cleared. Any
* bit being set that is also unmasked will result in Unrecoverable Error
* interrupt notification to the host CPU and/or Server Management chip
* and the transitioning of BladeEngine to an Offline state.
*/
struct BE_PCICFG_UE_STATUS_LOW_CSR_AMAP {
u8 cev_ue_status; /* DWORD 0 */
u8 ctx_ue_status; /* DWORD 0 */
u8 dbuf_ue_status; /* DWORD 0 */
u8 erx_ue_status; /* DWORD 0 */
u8 host_ue_status; /* DWORD 0 */
u8 mpu_ue_status; /* DWORD 0 */
u8 ndma_ue_status; /* DWORD 0 */
u8 ptc_ue_status; /* DWORD 0 */
u8 rdma_ue_status; /* DWORD 0 */
u8 rxf_ue_status; /* DWORD 0 */
u8 rxips_ue_status; /* DWORD 0 */
u8 rxulp0_ue_status; /* DWORD 0 */
u8 rxulp1_ue_status; /* DWORD 0 */
u8 rxulp2_ue_status; /* DWORD 0 */
u8 tim_ue_status; /* DWORD 0 */
u8 tpost_ue_status; /* DWORD 0 */
u8 tpre_ue_status; /* DWORD 0 */
u8 txips_ue_status; /* DWORD 0 */
u8 txulp0_ue_status; /* DWORD 0 */
u8 txulp1_ue_status; /* DWORD 0 */
u8 uc_ue_status; /* DWORD 0 */
u8 wdma_ue_status; /* DWORD 0 */
u8 txulp2_ue_status; /* DWORD 0 */
u8 host1_ue_status; /* DWORD 0 */
u8 p0_ob_link_ue_status; /* DWORD 0 */
u8 p1_ob_link_ue_status; /* DWORD 0 */
u8 host_gpio_ue_status; /* DWORD 0 */
u8 mbox_netw_ue_status; /* DWORD 0 */
u8 mbox_stor_ue_status; /* DWORD 0 */
u8 axgmac0_ue_status; /* DWORD 0 */
u8 axgmac1_ue_status; /* DWORD 0 */
u8 mpu_intpend_ue_status; /* DWORD 0 */
} __packed;
struct PCICFG_UE_STATUS_LOW_CSR_AMAP {
u32 dw[1];
};
/* Unrecoverable Error Status (High) Register. Each bit corresponds to
* an internal Unrecoverable Error. These are set by hardware and may be
* cleared by writing a one to the respective bit(s) to be cleared. Any
* bit being set that is also unmasked will result in Unrecoverable Error
* interrupt notification to the host CPU and/or Server Management chip;
* and the transitioning of BladeEngine to an Offline state.
*/
struct BE_PCICFG_UE_STATUS_HI_CSR_AMAP {
u8 jtag_ue_status; /* DWORD 0 */
u8 lpcmemhost_ue_status; /* DWORD 0 */
u8 mgmt_mac_ue_status; /* DWORD 0 */
u8 mpu_iram_ue_status; /* DWORD 0 */
u8 pcs0online_ue_status; /* DWORD 0 */
u8 pcs1online_ue_status; /* DWORD 0 */
u8 pctl0_ue_status; /* DWORD 0 */
u8 pctl1_ue_status; /* DWORD 0 */
u8 pmem_ue_status; /* DWORD 0 */
u8 rr_ue_status; /* DWORD 0 */
u8 rxpp_ue_status; /* DWORD 0 */
u8 txpb_ue_status; /* DWORD 0 */
u8 txp_ue_status; /* DWORD 0 */
u8 xaui_ue_status; /* DWORD 0 */
u8 arm_ue_status; /* DWORD 0 */
u8 ipc_ue_status; /* DWORD 0 */
u8 rsvd0[16]; /* DWORD 0 */
} __packed;
struct PCICFG_UE_STATUS_HI_CSR_AMAP {
u32 dw[1];
};
/* Unrecoverable Error Mask (Low) Register. Each bit, when set to one,
* will mask the associated Unrecoverable Error status bit from notification
* of Unrecoverable Error to the host CPU and/or Server Managment chip and the
* transitioning of all BladeEngine units to an Offline state.
*/
struct BE_PCICFG_UE_STATUS_LOW_MASK_CSR_AMAP {
u8 cev_ue_mask; /* DWORD 0 */
u8 ctx_ue_mask; /* DWORD 0 */
u8 dbuf_ue_mask; /* DWORD 0 */
u8 erx_ue_mask; /* DWORD 0 */
u8 host_ue_mask; /* DWORD 0 */
u8 mpu_ue_mask; /* DWORD 0 */
u8 ndma_ue_mask; /* DWORD 0 */
u8 ptc_ue_mask; /* DWORD 0 */
u8 rdma_ue_mask; /* DWORD 0 */
u8 rxf_ue_mask; /* DWORD 0 */
u8 rxips_ue_mask; /* DWORD 0 */
u8 rxulp0_ue_mask; /* DWORD 0 */
u8 rxulp1_ue_mask; /* DWORD 0 */
u8 rxulp2_ue_mask; /* DWORD 0 */
u8 tim_ue_mask; /* DWORD 0 */
u8 tpost_ue_mask; /* DWORD 0 */
u8 tpre_ue_mask; /* DWORD 0 */
u8 txips_ue_mask; /* DWORD 0 */
u8 txulp0_ue_mask; /* DWORD 0 */
u8 txulp1_ue_mask; /* DWORD 0 */
u8 uc_ue_mask; /* DWORD 0 */
u8 wdma_ue_mask; /* DWORD 0 */
u8 txulp2_ue_mask; /* DWORD 0 */
u8 host1_ue_mask; /* DWORD 0 */
u8 p0_ob_link_ue_mask; /* DWORD 0 */
u8 p1_ob_link_ue_mask; /* DWORD 0 */
u8 host_gpio_ue_mask; /* DWORD 0 */
u8 mbox_netw_ue_mask; /* DWORD 0 */
u8 mbox_stor_ue_mask; /* DWORD 0 */
u8 axgmac0_ue_mask; /* DWORD 0 */
u8 axgmac1_ue_mask; /* DWORD 0 */
u8 mpu_intpend_ue_mask; /* DWORD 0 */
} __packed;
struct PCICFG_UE_STATUS_LOW_MASK_CSR_AMAP {
u32 dw[1];
};
/* Unrecoverable Error Mask (High) Register. Each bit, when set to one,
* will mask the associated Unrecoverable Error status bit from notification
* of Unrecoverable Error to the host CPU and/or Server Managment chip and the
* transitioning of all BladeEngine units to an Offline state.
*/
struct BE_PCICFG_UE_STATUS_HI_MASK_CSR_AMAP {
u8 jtag_ue_mask; /* DWORD 0 */
u8 lpcmemhost_ue_mask; /* DWORD 0 */
u8 mgmt_mac_ue_mask; /* DWORD 0 */
u8 mpu_iram_ue_mask; /* DWORD 0 */
u8 pcs0online_ue_mask; /* DWORD 0 */
u8 pcs1online_ue_mask; /* DWORD 0 */
u8 pctl0_ue_mask; /* DWORD 0 */
u8 pctl1_ue_mask; /* DWORD 0 */
u8 pmem_ue_mask; /* DWORD 0 */
u8 rr_ue_mask; /* DWORD 0 */
u8 rxpp_ue_mask; /* DWORD 0 */
u8 txpb_ue_mask; /* DWORD 0 */
u8 txp_ue_mask; /* DWORD 0 */
u8 xaui_ue_mask; /* DWORD 0 */
u8 arm_ue_mask; /* DWORD 0 */
u8 ipc_ue_mask; /* DWORD 0 */
u8 rsvd0[16]; /* DWORD 0 */
} __packed;
struct PCICFG_UE_STATUS_HI_MASK_CSR_AMAP {
u32 dw[1];
};
/* Online Control Register 0. This register controls various units within
* BladeEngine being in an Online or Offline state.
*/
struct BE_PCICFG_ONLINE0_CSR_AMAP {
u8 cev_online; /* DWORD 0 */
u8 ctx_online; /* DWORD 0 */
u8 dbuf_online; /* DWORD 0 */
u8 erx_online; /* DWORD 0 */
u8 host_online; /* DWORD 0 */
u8 mpu_online; /* DWORD 0 */
u8 ndma_online; /* DWORD 0 */
u8 ptc_online; /* DWORD 0 */
u8 rdma_online; /* DWORD 0 */
u8 rxf_online; /* DWORD 0 */
u8 rxips_online; /* DWORD 0 */
u8 rxulp0_online; /* DWORD 0 */
u8 rxulp1_online; /* DWORD 0 */
u8 rxulp2_online; /* DWORD 0 */
u8 tim_online; /* DWORD 0 */
u8 tpost_online; /* DWORD 0 */
u8 tpre_online; /* DWORD 0 */
u8 txips_online; /* DWORD 0 */
u8 txulp0_online; /* DWORD 0 */
u8 txulp1_online; /* DWORD 0 */
u8 uc_online; /* DWORD 0 */
u8 wdma_online; /* DWORD 0 */
u8 txulp2_online; /* DWORD 0 */
u8 host1_online; /* DWORD 0 */
u8 p0_ob_link_online; /* DWORD 0 */
u8 p1_ob_link_online; /* DWORD 0 */
u8 host_gpio_online; /* DWORD 0 */
u8 mbox_netw_online; /* DWORD 0 */
u8 mbox_stor_online; /* DWORD 0 */
u8 axgmac0_online; /* DWORD 0 */
u8 axgmac1_online; /* DWORD 0 */
u8 mpu_intpend_online; /* DWORD 0 */
} __packed;
struct PCICFG_ONLINE0_CSR_AMAP {
u32 dw[1];
};
/* Online Control Register 1. This register controls various units within
* BladeEngine being in an Online or Offline state.
*/
struct BE_PCICFG_ONLINE1_CSR_AMAP {
u8 jtag_online; /* DWORD 0 */
u8 lpcmemhost_online; /* DWORD 0 */
u8 mgmt_mac_online; /* DWORD 0 */
u8 mpu_iram_online; /* DWORD 0 */
u8 pcs0online_online; /* DWORD 0 */
u8 pcs1online_online; /* DWORD 0 */
u8 pctl0_online; /* DWORD 0 */
u8 pctl1_online; /* DWORD 0 */
u8 pmem_online; /* DWORD 0 */
u8 rr_online; /* DWORD 0 */
u8 rxpp_online; /* DWORD 0 */
u8 txpb_online; /* DWORD 0 */
u8 txp_online; /* DWORD 0 */
u8 xaui_online; /* DWORD 0 */
u8 arm_online; /* DWORD 0 */
u8 ipc_online; /* DWORD 0 */
u8 rsvd0[16]; /* DWORD 0 */
} __packed;
struct PCICFG_ONLINE1_CSR_AMAP {
u32 dw[1];
};
/* Host Timer Register. */
struct BE_PCICFG_HOST_TIMER_INT_CTRL_CSR_AMAP {
u8 hosttimer[24]; /* DWORD 0 */
u8 hostintr; /* DWORD 0 */
u8 rsvd0[7]; /* DWORD 0 */
} __packed;
struct PCICFG_HOST_TIMER_INT_CTRL_CSR_AMAP {
u32 dw[1];
};
/* Scratchpad Register (for software use). */
struct BE_PCICFG_SCRATCHPAD_CSR_AMAP {
u8 scratchpad[32]; /* DWORD 0 */
} __packed;
struct PCICFG_SCRATCHPAD_CSR_AMAP {
u32 dw[1];
};
/* PCI Express Capabilities Register. */
struct BE_PCICFG_PCIE_CAP_CSR_AMAP {
u8 capid[8]; /* DWORD 0 */
u8 nextcap[8]; /* DWORD 0 */
u8 capver[4]; /* DWORD 0 */
u8 devport[4]; /* DWORD 0 */
u8 rsvd0[6]; /* DWORD 0 */
u8 rsvd1[2]; /* DWORD 0 */
} __packed;
struct PCICFG_PCIE_CAP_CSR_AMAP {
u32 dw[1];
};
/* PCI Express Device Capabilities Register. */
struct BE_PCICFG_PCIE_DEVCAP_CSR_AMAP {
u8 payload[3]; /* DWORD 0 */
u8 rsvd0[3]; /* DWORD 0 */
u8 lo_lat[3]; /* DWORD 0 */
u8 l1_lat[3]; /* DWORD 0 */
u8 rsvd1[3]; /* DWORD 0 */
u8 rsvd2[3]; /* DWORD 0 */
u8 pwr_value[8]; /* DWORD 0 */
u8 pwr_scale[2]; /* DWORD 0 */
u8 rsvd3[4]; /* DWORD 0 */
} __packed;
struct PCICFG_PCIE_DEVCAP_CSR_AMAP {
u32 dw[1];
};
/* PCI Express Device Control/Status Registers. */
struct BE_PCICFG_PCIE_CONTROL_STATUS_CSR_AMAP {
u8 CorrErrReportEn; /* DWORD 0 */
u8 NonFatalErrReportEn; /* DWORD 0 */
u8 FatalErrReportEn; /* DWORD 0 */
u8 UnsuppReqReportEn; /* DWORD 0 */
u8 EnableRelaxOrder; /* DWORD 0 */
u8 Max_Payload_Size[3]; /* DWORD 0 */
u8 ExtendTagFieldEnable; /* DWORD 0 */
u8 PhantomFnEnable; /* DWORD 0 */
u8 AuxPwrPMEnable; /* DWORD 0 */
u8 EnableNoSnoop; /* DWORD 0 */
u8 Max_Read_Req_Size[3]; /* DWORD 0 */
u8 rsvd0; /* DWORD 0 */
u8 CorrErrDetect; /* DWORD 0 */
u8 NonFatalErrDetect; /* DWORD 0 */
u8 FatalErrDetect; /* DWORD 0 */
u8 UnsuppReqDetect; /* DWORD 0 */
u8 AuxPwrDetect; /* DWORD 0 */
u8 TransPending; /* DWORD 0 */
u8 rsvd1[10]; /* DWORD 0 */
} __packed;
struct PCICFG_PCIE_CONTROL_STATUS_CSR_AMAP {
u32 dw[1];
};
/* PCI Express Link Capabilities Register. */
struct BE_PCICFG_PCIE_LINK_CAP_CSR_AMAP {
u8 MaxLinkSpeed[4]; /* DWORD 0 */
u8 MaxLinkWidth[6]; /* DWORD 0 */
u8 ASPMSupport[2]; /* DWORD 0 */
u8 L0sExitLat[3]; /* DWORD 0 */
u8 L1ExitLat[3]; /* DWORD 0 */
u8 rsvd0[6]; /* DWORD 0 */
u8 PortNum[8]; /* DWORD 0 */
} __packed;
struct PCICFG_PCIE_LINK_CAP_CSR_AMAP {
u32 dw[1];
};
/* PCI Express Link Status Register. */
struct BE_PCICFG_PCIE_LINK_STATUS_CSR_AMAP {
u8 ASPMCtl[2]; /* DWORD 0 */
u8 rsvd0; /* DWORD 0 */
u8 ReadCmplBndry; /* DWORD 0 */
u8 LinkDisable; /* DWORD 0 */
u8 RetrainLink; /* DWORD 0 */
u8 CommonClkConfig; /* DWORD 0 */
u8 ExtendSync; /* DWORD 0 */
u8 rsvd1[8]; /* DWORD 0 */
u8 LinkSpeed[4]; /* DWORD 0 */
u8 NegLinkWidth[6]; /* DWORD 0 */
u8 LinkTrainErr; /* DWORD 0 */
u8 LinkTrain; /* DWORD 0 */
u8 SlotClkConfig; /* DWORD 0 */
u8 rsvd2[3]; /* DWORD 0 */
} __packed;
struct PCICFG_PCIE_LINK_STATUS_CSR_AMAP {
u32 dw[1];
};
/* PCI Express MSI Configuration Register. */
struct BE_PCICFG_MSI_CSR_AMAP {
u8 capid[8]; /* DWORD 0 */
u8 nextptr[8]; /* DWORD 0 */
u8 tablesize[11]; /* DWORD 0 */
u8 rsvd0[3]; /* DWORD 0 */
u8 funcmask; /* DWORD 0 */
u8 en; /* DWORD 0 */
} __packed;
struct PCICFG_MSI_CSR_AMAP {
u32 dw[1];
};
/* MSI-X Table Offset Register. */
struct BE_PCICFG_MSIX_TABLE_CSR_AMAP {
u8 tablebir[3]; /* DWORD 0 */
u8 offset[29]; /* DWORD 0 */
} __packed;
struct PCICFG_MSIX_TABLE_CSR_AMAP {
u32 dw[1];
};
/* MSI-X PBA Offset Register. */
struct BE_PCICFG_MSIX_PBA_CSR_AMAP {
u8 pbabir[3]; /* DWORD 0 */
u8 offset[29]; /* DWORD 0 */
} __packed;
struct PCICFG_MSIX_PBA_CSR_AMAP {
u32 dw[1];
};
/* PCI Express MSI-X Message Vector Control Register. */
struct BE_PCICFG_MSIX_VECTOR_CONTROL_CSR_AMAP {
u8 vector_control; /* DWORD 0 */
u8 rsvd0[31]; /* DWORD 0 */
} __packed;
struct PCICFG_MSIX_VECTOR_CONTROL_CSR_AMAP {
u32 dw[1];
};
/* PCI Express MSI-X Message Data Register. */
struct BE_PCICFG_MSIX_MSG_DATA_CSR_AMAP {
u8 data[16]; /* DWORD 0 */
u8 rsvd0[16]; /* DWORD 0 */
} __packed;
struct PCICFG_MSIX_MSG_DATA_CSR_AMAP {
u32 dw[1];
};
/* PCI Express MSI-X Message Address Register - High Part. */
struct BE_PCICFG_MSIX_MSG_ADDR_HI_CSR_AMAP {
u8 addr[32]; /* DWORD 0 */
} __packed;
struct PCICFG_MSIX_MSG_ADDR_HI_CSR_AMAP {
u32 dw[1];
};
/* PCI Express MSI-X Message Address Register - Low Part. */
struct BE_PCICFG_MSIX_MSG_ADDR_LO_CSR_AMAP {
u8 rsvd0[2]; /* DWORD 0 */
u8 addr[30]; /* DWORD 0 */
} __packed;
struct PCICFG_MSIX_MSG_ADDR_LO_CSR_AMAP {
u32 dw[1];
};
struct BE_PCICFG_ANON_18_RSVD_AMAP {
u8 rsvd0[32]; /* DWORD 0 */
} __packed;
struct PCICFG_ANON_18_RSVD_AMAP {
u32 dw[1];
};
struct BE_PCICFG_ANON_19_RSVD_AMAP {
u8 rsvd0[32]; /* DWORD 0 */
} __packed;
struct PCICFG_ANON_19_RSVD_AMAP {
u32 dw[1];
};
struct BE_PCICFG_ANON_20_RSVD_AMAP {
u8 rsvd0[32]; /* DWORD 0 */
u8 rsvd1[25][32]; /* DWORD 1 */
} __packed;
struct PCICFG_ANON_20_RSVD_AMAP {
u32 dw[26];
};
struct BE_PCICFG_ANON_21_RSVD_AMAP {
u8 rsvd0[32]; /* DWORD 0 */
u8 rsvd1[1919][32]; /* DWORD 1 */
} __packed;
struct PCICFG_ANON_21_RSVD_AMAP {
u32 dw[1920];
};
struct BE_PCICFG_ANON_22_MESSAGE_AMAP {
struct BE_PCICFG_MSIX_VECTOR_CONTROL_CSR_AMAP vec_ctrl;
struct BE_PCICFG_MSIX_MSG_DATA_CSR_AMAP msg_data;
struct BE_PCICFG_MSIX_MSG_ADDR_HI_CSR_AMAP addr_hi;
struct BE_PCICFG_MSIX_MSG_ADDR_LO_CSR_AMAP addr_low;
} __packed;
struct PCICFG_ANON_22_MESSAGE_AMAP {
u32 dw[4];
};
struct BE_PCICFG_ANON_23_RSVD_AMAP {
u8 rsvd0[32]; /* DWORD 0 */
u8 rsvd1[895][32]; /* DWORD 1 */
} __packed;
struct PCICFG_ANON_23_RSVD_AMAP {
u32 dw[896];
};
/* These PCI Configuration Space registers are for the Storage Function of
* BladeEngine (Function 0). In the memory map of the registers below their
* table,
*/
struct BE_PCICFG0_CSRMAP_AMAP {
struct BE_PCICFG_ID_CSR_AMAP id;
u8 rsvd0[32]; /* DWORD 1 */
u8 rsvd1[32]; /* DWORD 2 */
u8 rsvd2[32]; /* DWORD 3 */
struct BE_PCICFG_IOBAR_CSR_AMAP iobar;
struct BE_PCICFG_MEMBAR0_CSR_AMAP membar0;
struct BE_PCICFG_MEMBAR1_LO_CSR_AMAP membar1_lo;
struct BE_PCICFG_MEMBAR1_HI_CSR_AMAP membar1_hi;
struct BE_PCICFG_MEMBAR2_LO_CSR_AMAP membar2_lo;
struct BE_PCICFG_MEMBAR2_HI_CSR_AMAP membar2_hi;
u8 rsvd3[32]; /* DWORD 10 */
struct BE_PCICFG_SUBSYSTEM_ID_F0_CSR_AMAP subsystem_id;
u8 rsvd4[32]; /* DWORD 12 */
u8 rsvd5[32]; /* DWORD 13 */
u8 rsvd6[32]; /* DWORD 14 */
u8 rsvd7[32]; /* DWORD 15 */
struct BE_PCICFG_SEMAPHORE_CSR_AMAP semaphore[4];
struct BE_PCICFG_SOFT_RESET_CSR_AMAP soft_reset;
u8 rsvd8[32]; /* DWORD 21 */
struct BE_PCICFG_SCRATCHPAD_CSR_AMAP scratchpad;
u8 rsvd9[32]; /* DWORD 23 */
u8 rsvd10[32]; /* DWORD 24 */
u8 rsvd11[32]; /* DWORD 25 */
u8 rsvd12[32]; /* DWORD 26 */
u8 rsvd13[32]; /* DWORD 27 */
u8 rsvd14[2][32]; /* DWORD 28 */
u8 rsvd15[32]; /* DWORD 30 */
u8 rsvd16[32]; /* DWORD 31 */
u8 rsvd17[8][32]; /* DWORD 32 */
struct BE_PCICFG_UE_STATUS_LOW_CSR_AMAP ue_status_low;
struct BE_PCICFG_UE_STATUS_HI_CSR_AMAP ue_status_hi;
struct BE_PCICFG_UE_STATUS_LOW_MASK_CSR_AMAP ue_status_low_mask;
struct BE_PCICFG_UE_STATUS_HI_MASK_CSR_AMAP ue_status_hi_mask;
struct BE_PCICFG_ONLINE0_CSR_AMAP online0;
struct BE_PCICFG_ONLINE1_CSR_AMAP online1;
u8 rsvd18[32]; /* DWORD 46 */
u8 rsvd19[32]; /* DWORD 47 */
u8 rsvd20[32]; /* DWORD 48 */
u8 rsvd21[32]; /* DWORD 49 */
struct BE_PCICFG_HOST_TIMER_INT_CTRL_CSR_AMAP host_timer_int_ctrl;
u8 rsvd22[32]; /* DWORD 51 */
struct BE_PCICFG_PCIE_CAP_CSR_AMAP pcie_cap;
struct BE_PCICFG_PCIE_DEVCAP_CSR_AMAP pcie_devcap;
struct BE_PCICFG_PCIE_CONTROL_STATUS_CSR_AMAP pcie_control_status;
struct BE_PCICFG_PCIE_LINK_CAP_CSR_AMAP pcie_link_cap;
struct BE_PCICFG_PCIE_LINK_STATUS_CSR_AMAP pcie_link_status;
struct BE_PCICFG_MSI_CSR_AMAP msi;
struct BE_PCICFG_MSIX_TABLE_CSR_AMAP msix_table_offset;
struct BE_PCICFG_MSIX_PBA_CSR_AMAP msix_pba_offset;
u8 rsvd23[32]; /* DWORD 60 */
u8 rsvd24[32]; /* DWORD 61 */
u8 rsvd25[32]; /* DWORD 62 */
u8 rsvd26[32]; /* DWORD 63 */
u8 rsvd27[32]; /* DWORD 64 */
u8 rsvd28[32]; /* DWORD 65 */
u8 rsvd29[32]; /* DWORD 66 */
u8 rsvd30[32]; /* DWORD 67 */
u8 rsvd31[32]; /* DWORD 68 */
u8 rsvd32[32]; /* DWORD 69 */
u8 rsvd33[32]; /* DWORD 70 */
u8 rsvd34[32]; /* DWORD 71 */
u8 rsvd35[32]; /* DWORD 72 */
u8 rsvd36[32]; /* DWORD 73 */
u8 rsvd37[32]; /* DWORD 74 */
u8 rsvd38[32]; /* DWORD 75 */
u8 rsvd39[32]; /* DWORD 76 */
u8 rsvd40[32]; /* DWORD 77 */
u8 rsvd41[32]; /* DWORD 78 */
u8 rsvd42[32]; /* DWORD 79 */
u8 rsvd43[32]; /* DWORD 80 */
u8 rsvd44[32]; /* DWORD 81 */
u8 rsvd45[32]; /* DWORD 82 */
u8 rsvd46[32]; /* DWORD 83 */
u8 rsvd47[32]; /* DWORD 84 */
u8 rsvd48[32]; /* DWORD 85 */
u8 rsvd49[32]; /* DWORD 86 */
u8 rsvd50[32]; /* DWORD 87 */
u8 rsvd51[32]; /* DWORD 88 */
u8 rsvd52[32]; /* DWORD 89 */
u8 rsvd53[32]; /* DWORD 90 */
u8 rsvd54[32]; /* DWORD 91 */
u8 rsvd55[32]; /* DWORD 92 */
u8 rsvd56[832]; /* DWORD 93 */
u8 rsvd57[32]; /* DWORD 119 */
u8 rsvd58[32]; /* DWORD 120 */
u8 rsvd59[32]; /* DWORD 121 */
u8 rsvd60[32]; /* DWORD 122 */
u8 rsvd61[32]; /* DWORD 123 */
u8 rsvd62[32]; /* DWORD 124 */
u8 rsvd63[32]; /* DWORD 125 */
u8 rsvd64[32]; /* DWORD 126 */
u8 rsvd65[32]; /* DWORD 127 */
u8 rsvd66[61440]; /* DWORD 128 */
struct BE_PCICFG_ANON_22_MESSAGE_AMAP message[32];
u8 rsvd67[28672]; /* DWORD 2176 */
u8 rsvd68[32]; /* DWORD 3072 */
u8 rsvd69[1023][32]; /* DWORD 3073 */
} __packed;
struct PCICFG0_CSRMAP_AMAP {
u32 dw[4096];
};
struct BE_PCICFG_ANON_24_RSVD_AMAP {
u8 rsvd0[32]; /* DWORD 0 */
} __packed;
struct PCICFG_ANON_24_RSVD_AMAP {
u32 dw[1];
};
struct BE_PCICFG_ANON_25_RSVD_AMAP {
u8 rsvd0[32]; /* DWORD 0 */
} __packed;
struct PCICFG_ANON_25_RSVD_AMAP {
u32 dw[1];
};
struct BE_PCICFG_ANON_26_RSVD_AMAP {
u8 rsvd0[32]; /* DWORD 0 */
} __packed;
struct PCICFG_ANON_26_RSVD_AMAP {
u32 dw[1];
};
struct BE_PCICFG_ANON_27_RSVD_AMAP {
u8 rsvd0[32]; /* DWORD 0 */
u8 rsvd1[32]; /* DWORD 1 */
} __packed;
struct PCICFG_ANON_27_RSVD_AMAP {
u32 dw[2];
};
struct BE_PCICFG_ANON_28_RSVD_AMAP {
u8 rsvd0[32]; /* DWORD 0 */
u8 rsvd1[3][32]; /* DWORD 1 */
} __packed;
struct PCICFG_ANON_28_RSVD_AMAP {
u32 dw[4];
};
struct BE_PCICFG_ANON_29_RSVD_AMAP {
u8 rsvd0[32]; /* DWORD 0 */
u8 rsvd1[36][32]; /* DWORD 1 */
} __packed;
struct PCICFG_ANON_29_RSVD_AMAP {
u32 dw[37];
};
struct BE_PCICFG_ANON_30_RSVD_AMAP {
u8 rsvd0[32]; /* DWORD 0 */
u8 rsvd1[1930][32]; /* DWORD 1 */
} __packed;
struct PCICFG_ANON_30_RSVD_AMAP {
u32 dw[1931];
};
struct BE_PCICFG_ANON_31_MESSAGE_AMAP {
struct BE_PCICFG_MSIX_VECTOR_CONTROL_CSR_AMAP vec_ctrl;
struct BE_PCICFG_MSIX_MSG_DATA_CSR_AMAP msg_data;
struct BE_PCICFG_MSIX_MSG_ADDR_HI_CSR_AMAP addr_hi;
struct BE_PCICFG_MSIX_MSG_ADDR_LO_CSR_AMAP addr_low;
} __packed;
struct PCICFG_ANON_31_MESSAGE_AMAP {
u32 dw[4];
};
struct BE_PCICFG_ANON_32_RSVD_AMAP {
u8 rsvd0[32]; /* DWORD 0 */
u8 rsvd1[895][32]; /* DWORD 1 */
} __packed;
struct PCICFG_ANON_32_RSVD_AMAP {
u32 dw[896];
};
/* This PCI configuration space register map is for the Networking Function of
* BladeEngine (Function 1).
*/
struct BE_PCICFG1_CSRMAP_AMAP {
struct BE_PCICFG_ID_CSR_AMAP id;
u8 rsvd0[32]; /* DWORD 1 */
u8 rsvd1[32]; /* DWORD 2 */
u8 rsvd2[32]; /* DWORD 3 */
struct BE_PCICFG_IOBAR_CSR_AMAP iobar;
struct BE_PCICFG_MEMBAR0_CSR_AMAP membar0;
struct BE_PCICFG_MEMBAR1_LO_CSR_AMAP membar1_lo;
struct BE_PCICFG_MEMBAR1_HI_CSR_AMAP membar1_hi;
struct BE_PCICFG_MEMBAR2_LO_CSR_AMAP membar2_lo;
struct BE_PCICFG_MEMBAR2_HI_CSR_AMAP membar2_hi;
u8 rsvd3[32]; /* DWORD 10 */
struct BE_PCICFG_SUBSYSTEM_ID_F1_CSR_AMAP subsystem_id;
u8 rsvd4[32]; /* DWORD 12 */
u8 rsvd5[32]; /* DWORD 13 */
u8 rsvd6[32]; /* DWORD 14 */
u8 rsvd7[32]; /* DWORD 15 */
struct BE_PCICFG_SEMAPHORE_CSR_AMAP semaphore[4];
struct BE_PCICFG_SOFT_RESET_CSR_AMAP soft_reset;
u8 rsvd8[32]; /* DWORD 21 */
struct BE_PCICFG_SCRATCHPAD_CSR_AMAP scratchpad;
u8 rsvd9[32]; /* DWORD 23 */
u8 rsvd10[32]; /* DWORD 24 */
u8 rsvd11[32]; /* DWORD 25 */
u8 rsvd12[32]; /* DWORD 26 */
u8 rsvd13[32]; /* DWORD 27 */
u8 rsvd14[2][32]; /* DWORD 28 */
u8 rsvd15[32]; /* DWORD 30 */
u8 rsvd16[32]; /* DWORD 31 */
u8 rsvd17[8][32]; /* DWORD 32 */
struct BE_PCICFG_UE_STATUS_LOW_CSR_AMAP ue_status_low;
struct BE_PCICFG_UE_STATUS_HI_CSR_AMAP ue_status_hi;
struct BE_PCICFG_UE_STATUS_LOW_MASK_CSR_AMAP ue_status_low_mask;
struct BE_PCICFG_UE_STATUS_HI_MASK_CSR_AMAP ue_status_hi_mask;
struct BE_PCICFG_ONLINE0_CSR_AMAP online0;
struct BE_PCICFG_ONLINE1_CSR_AMAP online1;
u8 rsvd18[32]; /* DWORD 46 */
u8 rsvd19[32]; /* DWORD 47 */
u8 rsvd20[32]; /* DWORD 48 */
u8 rsvd21[32]; /* DWORD 49 */
struct BE_PCICFG_HOST_TIMER_INT_CTRL_CSR_AMAP host_timer_int_ctrl;
u8 rsvd22[32]; /* DWORD 51 */
struct BE_PCICFG_PCIE_CAP_CSR_AMAP pcie_cap;
struct BE_PCICFG_PCIE_DEVCAP_CSR_AMAP pcie_devcap;
struct BE_PCICFG_PCIE_CONTROL_STATUS_CSR_AMAP pcie_control_status;
struct BE_PCICFG_PCIE_LINK_CAP_CSR_AMAP pcie_link_cap;
struct BE_PCICFG_PCIE_LINK_STATUS_CSR_AMAP pcie_link_status;
struct BE_PCICFG_MSI_CSR_AMAP msi;
struct BE_PCICFG_MSIX_TABLE_CSR_AMAP msix_table_offset;
struct BE_PCICFG_MSIX_PBA_CSR_AMAP msix_pba_offset;
u8 rsvd23[64]; /* DWORD 60 */
u8 rsvd24[32]; /* DWORD 62 */
u8 rsvd25[32]; /* DWORD 63 */
u8 rsvd26[32]; /* DWORD 64 */
u8 rsvd27[32]; /* DWORD 65 */
u8 rsvd28[32]; /* DWORD 66 */
u8 rsvd29[32]; /* DWORD 67 */
u8 rsvd30[32]; /* DWORD 68 */
u8 rsvd31[32]; /* DWORD 69 */
u8 rsvd32[32]; /* DWORD 70 */
u8 rsvd33[32]; /* DWORD 71 */
u8 rsvd34[32]; /* DWORD 72 */
u8 rsvd35[32]; /* DWORD 73 */
u8 rsvd36[32]; /* DWORD 74 */
u8 rsvd37[128]; /* DWORD 75 */
u8 rsvd38[32]; /* DWORD 79 */
u8 rsvd39[1184]; /* DWORD 80 */
u8 rsvd40[61792]; /* DWORD 117 */
struct BE_PCICFG_ANON_31_MESSAGE_AMAP message[32];
u8 rsvd41[28672]; /* DWORD 2176 */
u8 rsvd42[32]; /* DWORD 3072 */
u8 rsvd43[1023][32]; /* DWORD 3073 */
} __packed;
struct PCICFG1_CSRMAP_AMAP {
u32 dw[4096];
};
#endif /* __pcicfg_amap_h__ */

View File

@ -1,111 +0,0 @@
/*
* Copyright (C) 2005 - 2008 ServerEngines
* All rights reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License version 2
* as published by the Free Software Foundation. The full GNU General
* Public License is included in this distribution in the file called COPYING.
*
* Contact Information:
* linux-drivers@serverengines.com
*
* ServerEngines
* 209 N. Fair Oaks Ave
* Sunnyvale, CA 94085
*/
/*
* Autogenerated by srcgen version: 0127
*/
#ifndef __post_codes_amap_h__
#define __post_codes_amap_h__
/* --- MGMT_HBA_POST_STAGE_ENUM --- */
#define POST_STAGE_POWER_ON_RESET (0) /* State after a cold or warm boot. */
#define POST_STAGE_AWAITING_HOST_RDY (1) /* ARM boot code awaiting a
go-ahed from the host. */
#define POST_STAGE_HOST_RDY (2) /* Host has given go-ahed to ARM. */
#define POST_STAGE_BE_RESET (3) /* Host wants to reset chip, this is a chip
workaround */
#define POST_STAGE_SEEPROM_CS_START (256) /* SEEPROM checksum
test start. */
#define POST_STAGE_SEEPROM_CS_DONE (257) /* SEEPROM checksum test
done. */
#define POST_STAGE_DDR_CONFIG_START (512) /* DDR configuration start. */
#define POST_STAGE_DDR_CONFIG_DONE (513) /* DDR configuration done. */
#define POST_STAGE_DDR_CALIBRATE_START (768) /* DDR calibration start. */
#define POST_STAGE_DDR_CALIBRATE_DONE (769) /* DDR calibration done. */
#define POST_STAGE_DDR_TEST_START (1024) /* DDR memory test start. */
#define POST_STAGE_DDR_TEST_DONE (1025) /* DDR memory test done. */
#define POST_STAGE_REDBOOT_INIT_START (1536) /* Redboot starts execution. */
#define POST_STAGE_REDBOOT_INIT_DONE (1537) /* Redboot done execution. */
#define POST_STAGE_FW_IMAGE_LOAD_START (1792) /* Firmware image load to
DDR start. */
#define POST_STAGE_FW_IMAGE_LOAD_DONE (1793) /* Firmware image load
to DDR done. */
#define POST_STAGE_ARMFW_START (2048) /* ARMfw runtime code
starts execution. */
#define POST_STAGE_DHCP_QUERY_START (2304) /* DHCP server query start. */
#define POST_STAGE_DHCP_QUERY_DONE (2305) /* DHCP server query done. */
#define POST_STAGE_BOOT_TARGET_DISCOVERY_START (2560) /* Boot Target
Discovery Start. */
#define POST_STAGE_BOOT_TARGET_DISCOVERY_DONE (2561) /* Boot Target
Discovery Done. */
#define POST_STAGE_RC_OPTION_SET (2816) /* Remote configuration
option is set in SEEPROM */
#define POST_STAGE_SWITCH_LINK (2817) /* Wait for link up on switch */
#define POST_STAGE_SEND_ICDS_MESSAGE (2818) /* Send the ICDS message
to switch */
#define POST_STAGE_PERFROM_TFTP (2819) /* Download xml using TFTP */
#define POST_STAGE_PARSE_XML (2820) /* Parse XML file */
#define POST_STAGE_DOWNLOAD_IMAGE (2821) /* Download IMAGE from
TFTP server */
#define POST_STAGE_FLASH_IMAGE (2822) /* Flash the IMAGE */
#define POST_STAGE_RC_DONE (2823) /* Remote configuration
complete */
#define POST_STAGE_REBOOT_SYSTEM (2824) /* Upgrade IMAGE done,
reboot required */
#define POST_STAGE_MAC_ADDRESS (3072) /* MAC Address Check */
#define POST_STAGE_ARMFW_READY (49152) /* ARMfw is done with POST
and ready. */
#define POST_STAGE_ARMFW_UE (61440) /* ARMfw has asserted an
unrecoverable error. The
lower 3 hex digits of the
stage code identify the
unique error code.
*/
/* This structure defines the format of the MPU semaphore
* register when used for POST.
*/
struct BE_MGMT_HBA_POST_STATUS_STRUCT_AMAP {
u8 stage[16]; /* DWORD 0 */
u8 rsvd0[10]; /* DWORD 0 */
u8 iscsi_driver_loaded; /* DWORD 0 */
u8 option_rom_installed; /* DWORD 0 */
u8 iscsi_ip_conflict; /* DWORD 0 */
u8 iscsi_no_ip; /* DWORD 0 */
u8 backup_fw; /* DWORD 0 */
u8 error; /* DWORD 0 */
} __packed;
struct MGMT_HBA_POST_STATUS_STRUCT_AMAP {
u32 dw[1];
};
/* --- MGMT_HBA_POST_DUMMY_BITS_ENUM --- */
#define POST_BIT_ISCSI_LOADED (26)
#define POST_BIT_OPTROM_INST (27)
#define POST_BIT_BAD_IP_ADDR (28)
#define POST_BIT_NO_IP_ADDR (29)
#define POST_BIT_BACKUP_FW (30)
#define POST_BIT_ERROR (31)
/* --- MGMT_HBA_POST_DUMMY_VALUES_ENUM --- */
#define POST_ISCSI_DRIVER_LOADED (67108864)
#define POST_OPTROM_INSTALLED (134217728)
#define POST_ISCSI_IP_ADDRESS_CONFLICT (268435456)
#define POST_ISCSI_NO_IP_ADDRESS (536870912)
#define POST_BACKUP_FW_LOADED (1073741824)
#define POST_FATAL_ERROR (2147483648)
#endif /* __post_codes_amap_h__ */

View File

@ -1,68 +0,0 @@
/*
* Copyright (C) 2005 - 2008 ServerEngines
* All rights reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License version 2
* as published by the Free Software Foundation. The full GNU General
* Public License is included in this distribution in the file called COPYING.
*
* Contact Information:
* linux-drivers@serverengines.com
*
* ServerEngines
* 209 N. Fair Oaks Ave
* Sunnyvale, CA 94085
*/
/*
* Autogenerated by srcgen version: 0127
*/
#ifndef __regmap_amap_h__
#define __regmap_amap_h__
#include "pcicfg.h"
#include "ep.h"
#include "cev.h"
#include "mpu.h"
#include "doorbells.h"
/*
* This is the control and status register map for BladeEngine, showing
* the relative size and offset of each sub-module. The CSR registers
* are identical for the network and storage PCI functions. The
* CSR map is shown below, followed by details of each block,
* in sub-sections. The sub-sections begin with a description
* of CSRs that are instantiated in multiple blocks.
*/
struct BE_BLADE_ENGINE_CSRMAP_AMAP {
struct BE_MPU_CSRMAP_AMAP mpu;
u8 rsvd0[8192]; /* DWORD 256 */
u8 rsvd1[8192]; /* DWORD 512 */
struct BE_CEV_CSRMAP_AMAP cev;
u8 rsvd2[8192]; /* DWORD 1024 */
u8 rsvd3[8192]; /* DWORD 1280 */
u8 rsvd4[8192]; /* DWORD 1536 */
u8 rsvd5[8192]; /* DWORD 1792 */
u8 rsvd6[8192]; /* DWORD 2048 */
u8 rsvd7[8192]; /* DWORD 2304 */
u8 rsvd8[8192]; /* DWORD 2560 */
u8 rsvd9[8192]; /* DWORD 2816 */
u8 rsvd10[8192]; /* DWORD 3072 */
u8 rsvd11[8192]; /* DWORD 3328 */
u8 rsvd12[8192]; /* DWORD 3584 */
u8 rsvd13[8192]; /* DWORD 3840 */
u8 rsvd14[8192]; /* DWORD 4096 */
u8 rsvd15[8192]; /* DWORD 4352 */
u8 rsvd16[8192]; /* DWORD 4608 */
u8 rsvd17[8192]; /* DWORD 4864 */
u8 rsvd18[8192]; /* DWORD 5120 */
u8 rsvd19[8192]; /* DWORD 5376 */
u8 rsvd20[8192]; /* DWORD 5632 */
u8 rsvd21[8192]; /* DWORD 5888 */
u8 rsvd22[8192]; /* DWORD 6144 */
u8 rsvd23[17152][32]; /* DWORD 6400 */
} __packed;
struct BLADE_ENGINE_CSRMAP_AMAP {
u32 dw[23552];
};
#endif /* __regmap_amap_h__ */

View File

@ -485,7 +485,7 @@ static int cxacru_cm(struct cxacru_data *instance, enum cxacru_cm_request cm,
usb_err(instance->usbatm, "requested transfer size too large (%d, %d)\n",
wbuflen, rbuflen);
ret = -ENOMEM;
goto fail;
goto err;
}
mutex_lock(&instance->cm_serialize);
@ -565,6 +565,7 @@ static int cxacru_cm(struct cxacru_data *instance, enum cxacru_cm_request cm,
dbg("cm %#x", cm);
fail:
mutex_unlock(&instance->cm_serialize);
err:
return ret;
}

View File

@ -50,6 +50,7 @@
static struct usb_device_id usbtmc_devices[] = {
{ USB_INTERFACE_INFO(USB_CLASS_APP_SPEC, 3, 0), },
{ USB_INTERFACE_INFO(USB_CLASS_APP_SPEC, 3, 1), },
{ 0, } /* terminating entry */
};
MODULE_DEVICE_TABLE(usb, usbtmc_devices);
@ -106,12 +107,13 @@ static int usbtmc_open(struct inode *inode, struct file *filp)
{
struct usb_interface *intf;
struct usbtmc_device_data *data;
int retval = -ENODEV;
int retval = 0;
intf = usb_find_interface(&usbtmc_driver, iminor(inode));
if (!intf) {
printk(KERN_ERR KBUILD_MODNAME
": can not find device for minor %d", iminor(inode));
retval = -ENODEV;
goto exit;
}

View File

@ -359,11 +359,6 @@ static void destroy_async(struct dev_state *ps, struct list_head *list)
spin_lock_irqsave(&ps->lock, flags);
}
spin_unlock_irqrestore(&ps->lock, flags);
as = async_getcompleted(ps);
while (as) {
free_async(as);
as = async_getcompleted(ps);
}
}
static void destroy_async_on_interface(struct dev_state *ps,
@ -643,6 +638,7 @@ static int usbdev_release(struct inode *inode, struct file *file)
struct dev_state *ps = file->private_data;
struct usb_device *dev = ps->dev;
unsigned int ifnum;
struct async *as;
usb_lock_device(dev);
@ -661,6 +657,12 @@ static int usbdev_release(struct inode *inode, struct file *file)
usb_unlock_device(dev);
usb_put_dev(dev);
put_pid(ps->disc_pid);
as = async_getcompleted(ps);
while (as) {
free_async(as);
as = async_getcompleted(ps);
}
kfree(ps);
return 0;
}

View File

@ -1095,7 +1095,8 @@ static void start_unlink_async (struct ehci_hcd *ehci, struct ehci_qh *qh)
prev->qh_next = qh->qh_next;
wmb ();
if (unlikely (ehci_to_hcd(ehci)->state == HC_STATE_HALT)) {
/* If the controller isn't running, we don't have to wait for it */
if (unlikely(!HC_IS_RUNNING(ehci_to_hcd(ehci)->state))) {
/* if (unlikely (qh->reclaim != 0))
* this will recurse, probably not much
*/

View File

@ -1536,7 +1536,7 @@ itd_link_urb (
struct ehci_itd, itd_list);
list_move_tail (&itd->itd_list, &stream->td_list);
itd->stream = iso_stream_get (stream);
itd->urb = usb_get_urb (urb);
itd->urb = urb;
itd_init (ehci, stream, itd);
}
@ -1645,7 +1645,7 @@ itd_complete (
(void) disable_periodic(ehci);
ehci_to_hcd(ehci)->self.bandwidth_isoc_reqs--;
if (unlikely (list_empty (&stream->td_list))) {
if (unlikely(list_is_singular(&stream->td_list))) {
ehci_to_hcd(ehci)->self.bandwidth_allocated
-= stream->bandwidth;
ehci_vdbg (ehci,
@ -1656,7 +1656,6 @@ itd_complete (
iso_stream_put (ehci, stream);
done:
usb_put_urb(urb);
itd->urb = NULL;
if (ehci->clock_frame != itd->frame || itd->index[7] != -1) {
/* OK to recycle this ITD now. */
@ -1949,7 +1948,7 @@ sitd_link_urb (
struct ehci_sitd, sitd_list);
list_move_tail (&sitd->sitd_list, &stream->td_list);
sitd->stream = iso_stream_get (stream);
sitd->urb = usb_get_urb (urb);
sitd->urb = urb;
sitd_patch(ehci, stream, sitd, sched, packet);
sitd_link (ehci, (next_uframe >> 3) % ehci->periodic_size,
@ -2034,7 +2033,7 @@ sitd_complete (
(void) disable_periodic(ehci);
ehci_to_hcd(ehci)->self.bandwidth_isoc_reqs--;
if (list_empty (&stream->td_list)) {
if (list_is_singular(&stream->td_list)) {
ehci_to_hcd(ehci)->self.bandwidth_allocated
-= stream->bandwidth;
ehci_vdbg (ehci,
@ -2045,7 +2044,6 @@ sitd_complete (
iso_stream_put (ehci, stream);
/* OK to recycle this SITD now that its completion callback ran. */
done:
usb_put_urb(urb);
sitd->urb = NULL;
sitd->stream = NULL;
list_move(&sitd->sitd_list, &stream->free_list);

View File

@ -499,6 +499,7 @@ static int mdc800_usb_probe (struct usb_interface *intf,
retval = usb_register_dev(intf, &mdc800_class);
if (retval) {
dev_err(&intf->dev, "Not able to get a minor for this device.\n");
mutex_unlock(&mdc800->io_lock);
return -ENODEV;
}

View File

@ -376,7 +376,7 @@ static int adu_release(struct inode *inode, struct file *file)
if (dev->open_count <= 0) {
dbg(1," %s : device not opened", __func__);
retval = -ENODEV;
goto exit;
goto unlock;
}
adu_release_internal(dev);
@ -385,9 +385,9 @@ static int adu_release(struct inode *inode, struct file *file)
if (!dev->open_count) /* ... and we're the last user */
adu_delete(dev);
}
exit:
unlock:
mutex_unlock(&adutux_mutex);
exit:
dbg(2," %s : leave, return value %d", __func__, retval);
return retval;
}

View File

@ -401,6 +401,7 @@ static ssize_t vstusb_write(struct file *file, const char __user *buffer,
}
if (copy_from_user(buf, buffer, count)) {
mutex_unlock(&vstdev->lock);
dev_err(&dev->dev, "%s: can't copy_from_user\n", __func__);
retval = -EFAULT;
goto exit;

View File

@ -79,6 +79,7 @@ static struct usb_device_id id_table [] = {
{ USB_DEVICE(0x10C4, 0x814A) }, /* West Mountain Radio RIGblaster P&P */
{ USB_DEVICE(0x10C4, 0x814B) }, /* West Mountain Radio RIGtalk */
{ USB_DEVICE(0x10C4, 0x815E) }, /* Helicomm IP-Link 1220-DVM */
{ USB_DEVICE(0x10C4, 0x819F) }, /* MJS USB Toslink Switcher */
{ USB_DEVICE(0x10C4, 0x81A6) }, /* ThinkOptics WavIt */
{ USB_DEVICE(0x10C4, 0x81AC) }, /* MSD Dash Hawk */
{ USB_DEVICE(0x10C4, 0x81C8) }, /* Lipowsky Industrie Elektronik GmbH, Baby-JTAG */

View File

@ -663,6 +663,11 @@ static struct usb_device_id id_table_combined [] = {
{ USB_DEVICE(ALTI2_VID, ALTI2_N3_PID) },
{ USB_DEVICE(FTDI_VID, DIEBOLD_BCS_SE923_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_NDI_HUC_PID) },
{ USB_DEVICE(ATMEL_VID, STK541_PID) },
{ USB_DEVICE(DE_VID, STB_PID) },
{ USB_DEVICE(DE_VID, WHT_PID) },
{ USB_DEVICE(ADI_VID, ADI_GNICE_PID),
.driver_info = (kernel_ulong_t)&ftdi_jtag_quirk },
{ }, /* Optional parameter entry */
{ } /* Terminating entry */
};

View File

@ -892,6 +892,26 @@
*/
#define DIEBOLD_BCS_SE923_PID 0xfb99
/*
* Atmel STK541
*/
#define ATMEL_VID 0x03eb /* Vendor ID */
#define STK541_PID 0x2109 /* Zigbee Controller */
/*
* Dresden Elektronic Sensor Terminal Board
*/
#define DE_VID 0x1cf1 /* Vendor ID */
#define STB_PID 0x0001 /* Sensor Terminal Board */
#define WHT_PID 0x0004 /* Wireless Handheld Terminal */
/*
* Blackfin gnICE JTAG
* http://docs.blackfin.uclinux.org/doku.php?id=hw:jtag:gnice
*/
#define ADI_VID 0x0456
#define ADI_GNICE_PID 0xF000
/*
* BmRequestType: 1100 0000b
* bRequest: FTDI_E2_READ

View File

@ -89,6 +89,7 @@ static int option_send_setup(struct tty_struct *tty, struct usb_serial_port *po
#define OPTION_PRODUCT_ETNA_MODEM_GT 0x7041
#define OPTION_PRODUCT_ETNA_MODEM_EX 0x7061
#define OPTION_PRODUCT_ETNA_KOI_MODEM 0x7100
#define OPTION_PRODUCT_GTM380_MODEM 0x7201
#define HUAWEI_VENDOR_ID 0x12D1
#define HUAWEI_PRODUCT_E600 0x1001
@ -197,6 +198,7 @@ static int option_send_setup(struct tty_struct *tty, struct usb_serial_port *po
/* OVATION PRODUCTS */
#define NOVATELWIRELESS_PRODUCT_MC727 0x4100
#define NOVATELWIRELESS_PRODUCT_MC950D 0x4400
#define NOVATELWIRELESS_PRODUCT_U727 0x5010
/* FUTURE NOVATEL PRODUCTS */
#define NOVATELWIRELESS_PRODUCT_EVDO_FULLSPEED 0X6000
@ -288,15 +290,11 @@ static int option_send_setup(struct tty_struct *tty, struct usb_serial_port *po
/* ZTE PRODUCTS */
#define ZTE_VENDOR_ID 0x19d2
#define ZTE_PRODUCT_MF622 0x0001
#define ZTE_PRODUCT_MF628 0x0015
#define ZTE_PRODUCT_MF626 0x0031
#define ZTE_PRODUCT_CDMA_TECH 0xfffe
/* Ericsson products */
#define ERICSSON_VENDOR_ID 0x0bdb
#define ERICSSON_PRODUCT_F3507G_1 0x1900
#define ERICSSON_PRODUCT_F3507G_2 0x1902
#define BENQ_VENDOR_ID 0x04a5
#define BENQ_PRODUCT_H10 0x4068
@ -325,6 +323,7 @@ static struct usb_device_id option_ids[] = {
{ USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_ETNA_MODEM_GT) },
{ USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_ETNA_MODEM_EX) },
{ USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_ETNA_KOI_MODEM) },
{ USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_GTM380_MODEM) },
{ USB_DEVICE(QUANTA_VENDOR_ID, QUANTA_PRODUCT_Q101) },
{ USB_DEVICE(QUANTA_VENDOR_ID, QUANTA_PRODUCT_Q111) },
{ USB_DEVICE(QUANTA_VENDOR_ID, QUANTA_PRODUCT_GLX) },
@ -415,6 +414,7 @@ static struct usb_device_id option_ids[] = {
{ USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_EU870D) }, /* Novatel EU850D/EU860D/EU870D */
{ USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_MC950D) }, /* Novatel MC930D/MC950D */
{ USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_MC727) }, /* Novatel MC727/U727/USB727 */
{ USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_U727) }, /* Novatel MC727/U727/USB727 */
{ USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_EVDO_FULLSPEED) }, /* Novatel EVDO product */
{ USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_HSPA_FULLSPEED) }, /* Novatel HSPA product */
{ USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_EVDO_EMBEDDED_FULLSPEED) }, /* Novatel EVDO Embedded product */
@ -442,7 +442,6 @@ static struct usb_device_id option_ids[] = {
{ USB_DEVICE(DELL_VENDOR_ID, DELL_PRODUCT_5520_MINICARD_CINGULAR) }, /* Dell Wireless HSDPA 5520 == Novatel Expedite EU860D */
{ USB_DEVICE(DELL_VENDOR_ID, DELL_PRODUCT_5520_MINICARD_GENERIC_L) }, /* Dell Wireless HSDPA 5520 */
{ USB_DEVICE(DELL_VENDOR_ID, DELL_PRODUCT_5520_MINICARD_GENERIC_I) }, /* Dell Wireless 5520 Voda I Mobile Broadband (3G HSDPA) Minicard */
{ USB_DEVICE(DELL_VENDOR_ID, 0x8147) }, /* Dell Wireless 5530 Mobile Broadband (3G HSPA) Mini-Card */
{ USB_DEVICE(DELL_VENDOR_ID, DELL_PRODUCT_5730_MINICARD_SPRINT) }, /* Dell Wireless 5730 Mobile Broadband EVDO/HSPA Mini-Card */
{ USB_DEVICE(DELL_VENDOR_ID, DELL_PRODUCT_5730_MINICARD_TELUS) }, /* Dell Wireless 5730 Mobile Broadband EVDO/HSPA Mini-Card */
{ USB_DEVICE(DELL_VENDOR_ID, DELL_PRODUCT_5730_MINICARD_VZW) }, /* Dell Wireless 5730 Mobile Broadband EVDO/HSPA Mini-Card */
@ -510,11 +509,10 @@ static struct usb_device_id option_ids[] = {
{ USB_DEVICE(QUALCOMM_VENDOR_ID, 0x6613)}, /* Onda H600/ZTE MF330 */
{ USB_DEVICE(MAXON_VENDOR_ID, 0x6280) }, /* BP3-USB & BP3-EXT HSDPA */
{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_UC864E) },
{ USB_DEVICE(ZTE_VENDOR_ID, ZTE_PRODUCT_MF622) },
{ USB_DEVICE(ZTE_VENDOR_ID, ZTE_PRODUCT_MF626) },
{ USB_DEVICE(ZTE_VENDOR_ID, ZTE_PRODUCT_MF628) },
{ USB_DEVICE(ZTE_VENDOR_ID, ZTE_PRODUCT_CDMA_TECH) },
{ USB_DEVICE(ERICSSON_VENDOR_ID, ERICSSON_PRODUCT_F3507G_1) },
{ USB_DEVICE(ERICSSON_VENDOR_ID, ERICSSON_PRODUCT_F3507G_2) },
{ USB_DEVICE(BENQ_VENDOR_ID, BENQ_PRODUCT_H10) },
{ USB_DEVICE(0x1da5, 0x4515) }, /* BenQ H20 */
{ } /* Terminating entry */

View File

@ -226,7 +226,7 @@ UNUSUAL_DEV( 0x0421, 0x047c, 0x0370, 0x0610,
US_FL_MAX_SECTORS_64 ),
/* Reported by Manuel Osdoba <manuel.osdoba@tu-ilmenau.de> */
UNUSUAL_DEV( 0x0421, 0x0492, 0x0452, 0x0452,
UNUSUAL_DEV( 0x0421, 0x0492, 0x0452, 0x9999,
"Nokia",
"Nokia 6233",
US_SC_DEVICE, US_PR_DEVICE, NULL,
@ -951,7 +951,9 @@ UNUSUAL_DEV( 0x066f, 0x8000, 0x0001, 0x0001,
US_FL_FIX_CAPACITY ),
/* Reported by Richard -=[]=- <micro_flyer@hotmail.com> */
UNUSUAL_DEV( 0x067b, 0x2507, 0x0100, 0x0100,
/* Change to bcdDeviceMin (0x0100 to 0x0001) reported by
* Thomas Bartosik <tbartdev@gmx-topmail.de> */
UNUSUAL_DEV( 0x067b, 0x2507, 0x0001, 0x0100,
"Prolific Technology Inc.",
"Mass Storage Device",
US_SC_DEVICE, US_PR_DEVICE, NULL,
@ -1390,6 +1392,16 @@ UNUSUAL_DEV( 0x0af0, 0x7401, 0x0000, 0x0000,
US_SC_DEVICE, US_PR_DEVICE, NULL,
0 ),
/* Reported by Jan Dumon <j.dumon@option.com>
* This device (wrongly) has a vendor-specific device descriptor.
* The entry is needed so usb-storage can bind to it's mass-storage
* interface as an interface driver */
UNUSUAL_DEV( 0x0af0, 0x7501, 0x0000, 0x0000,
"Option",
"GI 0431 SD-Card",
US_SC_DEVICE, US_PR_DEVICE, NULL,
0 ),
/* Reported by Ben Efros <ben@pc-doctor.com> */
UNUSUAL_DEV( 0x0bc2, 0x3010, 0x0000, 0x0000,
"Seagate",

View File

@ -921,8 +921,10 @@ static void wa_urb_enqueue_b(struct wa_xfer *xfer)
result = -ENODEV;
/* FIXME: segmentation broken -- kills DWA */
mutex_lock(&wusbhc->mutex); /* get a WUSB dev */
if (urb->dev == NULL)
if (urb->dev == NULL) {
mutex_unlock(&wusbhc->mutex);
goto error_dev_gone;
}
wusb_dev = __wusb_dev_get_by_usb_dev(wusbhc, urb->dev);
if (wusb_dev == NULL) {
mutex_unlock(&wusbhc->mutex);

View File

@ -443,7 +443,7 @@ static struct kiocb *__aio_get_req(struct kioctx *ctx)
req->private = NULL;
req->ki_iovec = NULL;
INIT_LIST_HEAD(&req->ki_run_list);
req->ki_eventfd = ERR_PTR(-EINVAL);
req->ki_eventfd = NULL;
/* Check if the completion queue has enough free space to
* accept an event from this io.
@ -485,8 +485,6 @@ static inline void really_put_req(struct kioctx *ctx, struct kiocb *req)
{
assert_spin_locked(&ctx->ctx_lock);
if (!IS_ERR(req->ki_eventfd))
fput(req->ki_eventfd);
if (req->ki_dtor)
req->ki_dtor(req);
if (req->ki_iovec != &req->ki_inline_vec)
@ -508,8 +506,11 @@ static void aio_fput_routine(struct work_struct *data)
list_del(&req->ki_list);
spin_unlock_irq(&fput_lock);
/* Complete the fput */
__fput(req->ki_filp);
/* Complete the fput(s) */
if (req->ki_filp != NULL)
__fput(req->ki_filp);
if (req->ki_eventfd != NULL)
__fput(req->ki_eventfd);
/* Link the iocb into the context's free list */
spin_lock_irq(&ctx->ctx_lock);
@ -527,12 +528,14 @@ static void aio_fput_routine(struct work_struct *data)
*/
static int __aio_put_req(struct kioctx *ctx, struct kiocb *req)
{
int schedule_putreq = 0;
dprintk(KERN_DEBUG "aio_put(%p): f_count=%ld\n",
req, atomic_long_read(&req->ki_filp->f_count));
assert_spin_locked(&ctx->ctx_lock);
req->ki_users --;
req->ki_users--;
BUG_ON(req->ki_users < 0);
if (likely(req->ki_users))
return 0;
@ -540,10 +543,23 @@ static int __aio_put_req(struct kioctx *ctx, struct kiocb *req)
req->ki_cancel = NULL;
req->ki_retry = NULL;
/* Must be done under the lock to serialise against cancellation.
* Call this aio_fput as it duplicates fput via the fput_work.
/*
* Try to optimize the aio and eventfd file* puts, by avoiding to
* schedule work in case it is not __fput() time. In normal cases,
* we would not be holding the last reference to the file*, so
* this function will be executed w/out any aio kthread wakeup.
*/
if (unlikely(atomic_long_dec_and_test(&req->ki_filp->f_count))) {
if (unlikely(atomic_long_dec_and_test(&req->ki_filp->f_count)))
schedule_putreq++;
else
req->ki_filp = NULL;
if (req->ki_eventfd != NULL) {
if (unlikely(atomic_long_dec_and_test(&req->ki_eventfd->f_count)))
schedule_putreq++;
else
req->ki_eventfd = NULL;
}
if (unlikely(schedule_putreq)) {
get_ioctx(ctx);
spin_lock(&fput_lock);
list_add(&req->ki_list, &fput_head);
@ -571,7 +587,7 @@ int aio_put_req(struct kiocb *req)
static struct kioctx *lookup_ioctx(unsigned long ctx_id)
{
struct mm_struct *mm = current->mm;
struct kioctx *ctx = NULL;
struct kioctx *ctx, *ret = NULL;
struct hlist_node *n;
rcu_read_lock();
@ -579,12 +595,13 @@ static struct kioctx *lookup_ioctx(unsigned long ctx_id)
hlist_for_each_entry_rcu(ctx, n, &mm->ioctx_list, list) {
if (ctx->user_id == ctx_id && !ctx->dead) {
get_ioctx(ctx);
ret = ctx;
break;
}
}
rcu_read_unlock();
return ctx;
return ret;
}
/*
@ -1009,7 +1026,7 @@ int aio_complete(struct kiocb *iocb, long res, long res2)
* eventfd. The eventfd_signal() function is safe to be called
* from IRQ context.
*/
if (!IS_ERR(iocb->ki_eventfd))
if (iocb->ki_eventfd != NULL)
eventfd_signal(iocb->ki_eventfd, 1);
put_rq:
@ -1608,6 +1625,7 @@ static int io_submit_one(struct kioctx *ctx, struct iocb __user *user_iocb,
req->ki_eventfd = eventfd_fget((int) iocb->aio_resfd);
if (IS_ERR(req->ki_eventfd)) {
ret = PTR_ERR(req->ki_eventfd);
req->ki_eventfd = NULL;
goto out_put_req;
}
}

View File

@ -784,7 +784,14 @@ struct btrfs_fs_info {
struct list_head dirty_cowonly_roots;
struct btrfs_fs_devices *fs_devices;
/*
* the space_info list is almost entirely read only. It only changes
* when we add a new raid type to the FS, and that happens
* very rarely. RCU is used to protect it.
*/
struct list_head space_info;
spinlock_t delalloc_lock;
spinlock_t new_trans_lock;
u64 delalloc_bytes;
@ -1797,6 +1804,8 @@ int btrfs_cleanup_reloc_trees(struct btrfs_root *root);
int btrfs_reloc_clone_csums(struct inode *inode, u64 file_pos, u64 len);
u64 btrfs_reduce_alloc_profile(struct btrfs_root *root, u64 flags);
void btrfs_set_inode_space_info(struct btrfs_root *root, struct inode *ionde);
void btrfs_clear_space_info_full(struct btrfs_fs_info *info);
int btrfs_check_metadata_free_space(struct btrfs_root *root);
int btrfs_check_data_free_space(struct btrfs_root *root, struct inode *inode,
u64 bytes);

View File

@ -20,6 +20,7 @@
#include <linux/writeback.h>
#include <linux/blkdev.h>
#include <linux/sort.h>
#include <linux/rcupdate.h>
#include "compat.h"
#include "hash.h"
#include "crc32c.h"
@ -330,13 +331,33 @@ static struct btrfs_space_info *__find_space_info(struct btrfs_fs_info *info,
{
struct list_head *head = &info->space_info;
struct btrfs_space_info *found;
list_for_each_entry(found, head, list) {
if (found->flags == flags)
rcu_read_lock();
list_for_each_entry_rcu(found, head, list) {
if (found->flags == flags) {
rcu_read_unlock();
return found;
}
}
rcu_read_unlock();
return NULL;
}
/*
* after adding space to the filesystem, we need to clear the full flags
* on all the space infos.
*/
void btrfs_clear_space_info_full(struct btrfs_fs_info *info)
{
struct list_head *head = &info->space_info;
struct btrfs_space_info *found;
rcu_read_lock();
list_for_each_entry_rcu(found, head, list)
found->full = 0;
rcu_read_unlock();
}
static u64 div_factor(u64 num, int factor)
{
if (factor == 10)
@ -1903,7 +1924,6 @@ static int update_space_info(struct btrfs_fs_info *info, u64 flags,
if (!found)
return -ENOMEM;
list_add(&found->list, &info->space_info);
INIT_LIST_HEAD(&found->block_groups);
init_rwsem(&found->groups_sem);
spin_lock_init(&found->lock);
@ -1917,6 +1937,7 @@ static int update_space_info(struct btrfs_fs_info *info, u64 flags,
found->full = 0;
found->force_alloc = 0;
*space_info = found;
list_add_rcu(&found->list, &info->space_info);
return 0;
}
@ -6320,6 +6341,7 @@ out:
int btrfs_free_block_groups(struct btrfs_fs_info *info)
{
struct btrfs_block_group_cache *block_group;
struct btrfs_space_info *space_info;
struct rb_node *n;
spin_lock(&info->block_group_cache_lock);
@ -6341,6 +6363,23 @@ int btrfs_free_block_groups(struct btrfs_fs_info *info)
spin_lock(&info->block_group_cache_lock);
}
spin_unlock(&info->block_group_cache_lock);
/* now that all the block groups are freed, go through and
* free all the space_info structs. This is only called during
* the final stages of unmount, and so we know nobody is
* using them. We call synchronize_rcu() once before we start,
* just to be on the safe side.
*/
synchronize_rcu();
while(!list_empty(&info->space_info)) {
space_info = list_entry(info->space_info.next,
struct btrfs_space_info,
list);
list_del(&space_info->list);
kfree(space_info);
}
return 0;
}

View File

@ -1374,6 +1374,12 @@ int btrfs_init_new_device(struct btrfs_root *root, char *device_path)
ret = btrfs_add_device(trans, root, device);
}
/*
* we've got more storage, clear any full flags on the space
* infos
*/
btrfs_clear_space_info_full(root->fs_info);
unlock_chunks(root);
btrfs_commit_transaction(trans, root);
@ -1459,6 +1465,8 @@ static int __btrfs_grow_device(struct btrfs_trans_handle *trans,
device->fs_devices->total_rw_bytes += diff;
device->total_bytes = new_size;
btrfs_clear_space_info_full(device->dev_root->fs_info);
return btrfs_update_device(trans, device);
}

View File

@ -760,15 +760,9 @@ EXPORT_SYMBOL(mark_buffer_dirty_inode);
* If warn is true, then emit a warning if the page is not uptodate and has
* not been truncated.
*/
static int __set_page_dirty(struct page *page,
static void __set_page_dirty(struct page *page,
struct address_space *mapping, int warn)
{
if (unlikely(!mapping))
return !TestSetPageDirty(page);
if (TestSetPageDirty(page))
return 0;
spin_lock_irq(&mapping->tree_lock);
if (page->mapping) { /* Race with truncate? */
WARN_ON_ONCE(warn && !PageUptodate(page));
@ -785,8 +779,6 @@ static int __set_page_dirty(struct page *page,
}
spin_unlock_irq(&mapping->tree_lock);
__mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
return 1;
}
/*
@ -816,6 +808,7 @@ static int __set_page_dirty(struct page *page,
*/
int __set_page_dirty_buffers(struct page *page)
{
int newly_dirty;
struct address_space *mapping = page_mapping(page);
if (unlikely(!mapping))
@ -831,9 +824,12 @@ int __set_page_dirty_buffers(struct page *page)
bh = bh->b_this_page;
} while (bh != head);
}
newly_dirty = !TestSetPageDirty(page);
spin_unlock(&mapping->private_lock);
return __set_page_dirty(page, mapping, 1);
if (newly_dirty)
__set_page_dirty(page, mapping, 1);
return newly_dirty;
}
EXPORT_SYMBOL(__set_page_dirty_buffers);
@ -1262,8 +1258,11 @@ void mark_buffer_dirty(struct buffer_head *bh)
return;
}
if (!test_set_buffer_dirty(bh))
__set_page_dirty(bh->b_page, page_mapping(bh->b_page), 0);
if (!test_set_buffer_dirty(bh)) {
struct page *page = bh->b_page;
if (!TestSetPageDirty(page))
__set_page_dirty(page, page_mapping(page), 0);
}
}
/*

View File

@ -1122,7 +1122,8 @@ ext4_ext_search_right(struct inode *inode, struct ext4_ext_path *path,
struct ext4_extent_idx *ix;
struct ext4_extent *ex;
ext4_fsblk_t block;
int depth, ee_len;
int depth; /* Note, NOT eh_depth; depth from top of tree */
int ee_len;
BUG_ON(path == NULL);
depth = path->p_depth;
@ -1179,7 +1180,8 @@ got_index:
if (bh == NULL)
return -EIO;
eh = ext_block_hdr(bh);
if (ext4_ext_check_header(inode, eh, depth)) {
/* subtract from p_depth to get proper eh_depth */
if (ext4_ext_check_header(inode, eh, path->p_depth - depth)) {
put_bh(bh);
return -EIO;
}

View File

@ -698,6 +698,7 @@ struct inode *ext4_new_inode(handle_t *handle, struct inode *dir, int mode)
struct inode *ret;
ext4_group_t i;
int free = 0;
static int once = 1;
ext4_group_t flex_group;
/* Cannot create files in a deleted directory */
@ -719,7 +720,8 @@ struct inode *ext4_new_inode(handle_t *handle, struct inode *dir, int mode)
ret2 = find_group_flex(sb, dir, &group);
if (ret2 == -1) {
ret2 = find_group_other(sb, dir, &group);
if (ret2 == 0 && printk_ratelimit())
if (ret2 == 0 && once)
once = 0;
printk(KERN_NOTICE "ext4: find_group_flex "
"failed, fallback succeeded dir %lu\n",
dir->i_ino);

View File

@ -1447,7 +1447,7 @@ static void ext4_mb_measure_extent(struct ext4_allocation_context *ac,
struct ext4_free_extent *gex = &ac->ac_g_ex;
BUG_ON(ex->fe_len <= 0);
BUG_ON(ex->fe_len >= EXT4_BLOCKS_PER_GROUP(ac->ac_sb));
BUG_ON(ex->fe_len > EXT4_BLOCKS_PER_GROUP(ac->ac_sb));
BUG_ON(ex->fe_start >= EXT4_BLOCKS_PER_GROUP(ac->ac_sb));
BUG_ON(ac->ac_status != AC_STATUS_CONTINUE);
@ -3292,7 +3292,7 @@ ext4_mb_normalize_request(struct ext4_allocation_context *ac,
}
BUG_ON(start + size <= ac->ac_o_ex.fe_logical &&
start > ac->ac_o_ex.fe_logical);
BUG_ON(size <= 0 || size >= EXT4_BLOCKS_PER_GROUP(ac->ac_sb));
BUG_ON(size <= 0 || size > EXT4_BLOCKS_PER_GROUP(ac->ac_sb));
/* now prepare goal request */
@ -3589,6 +3589,7 @@ static void ext4_mb_put_pa(struct ext4_allocation_context *ac,
struct super_block *sb, struct ext4_prealloc_space *pa)
{
ext4_group_t grp;
ext4_fsblk_t grp_blk;
if (!atomic_dec_and_test(&pa->pa_count) || pa->pa_free != 0)
return;
@ -3603,8 +3604,12 @@ static void ext4_mb_put_pa(struct ext4_allocation_context *ac,
pa->pa_deleted = 1;
spin_unlock(&pa->pa_lock);
/* -1 is to protect from crossing allocation group */
ext4_get_group_no_and_offset(sb, pa->pa_pstart - 1, &grp, NULL);
grp_blk = pa->pa_pstart;
/* If linear, pa_pstart may be in the next group when pa is used up */
if (pa->pa_linear)
grp_blk--;
ext4_get_group_no_and_offset(sb, grp_blk, &grp, NULL);
/*
* possible race:

View File

@ -2596,6 +2596,7 @@ static nfsd4_enc nfsd4_enc_ops[] = {
[OP_LOOKUPP] = (nfsd4_enc)nfsd4_encode_noop,
[OP_NVERIFY] = (nfsd4_enc)nfsd4_encode_noop,
[OP_OPEN] = (nfsd4_enc)nfsd4_encode_open,
[OP_OPENATTR] = (nfsd4_enc)nfsd4_encode_noop,
[OP_OPEN_CONFIRM] = (nfsd4_enc)nfsd4_encode_open_confirm,
[OP_OPEN_DOWNGRADE] = (nfsd4_enc)nfsd4_encode_open_downgrade,
[OP_PUTFH] = (nfsd4_enc)nfsd4_encode_noop,

View File

@ -393,8 +393,10 @@ struct cpu_vfs_cap_data {
# define CAP_FULL_SET ((kernel_cap_t){{ ~0, ~0 }})
# define CAP_INIT_EFF_SET ((kernel_cap_t){{ ~CAP_TO_MASK(CAP_SETPCAP), ~0 }})
# define CAP_FS_SET ((kernel_cap_t){{ CAP_FS_MASK_B0, CAP_FS_MASK_B1 } })
# define CAP_NFSD_SET ((kernel_cap_t){{ CAP_FS_MASK_B0|CAP_TO_MASK(CAP_SYS_RESOURCE), \
CAP_FS_MASK_B1 } })
# define CAP_NFSD_SET ((kernel_cap_t){{ CAP_FS_MASK_B0 \
| CAP_TO_MASK(CAP_SYS_RESOURCE) \
| CAP_TO_MASK(CAP_MKNOD), \
CAP_FS_MASK_B1 } })
#endif /* _KERNEL_CAPABILITY_U32S != 2 */

View File

@ -919,10 +919,8 @@ static int __kprobes pre_handler_kretprobe(struct kprobe *p,
ri->rp = rp;
ri->task = current;
if (rp->entry_handler && rp->entry_handler(ri, regs)) {
spin_unlock_irqrestore(&rp->lock, flags);
if (rp->entry_handler && rp->entry_handler(ri, regs))
return 0;
}
arch_prepare_kretprobe(ri, regs);

View File

@ -2049,14 +2049,6 @@ static noinline struct module *load_module(void __user *umod,
if (err < 0)
goto free_mod;
#if defined(CONFIG_MODULE_UNLOAD) && defined(CONFIG_SMP)
mod->refptr = percpu_modalloc(sizeof(local_t), __alignof__(local_t),
mod->name);
if (!mod->refptr) {
err = -ENOMEM;
goto free_mod;
}
#endif
if (pcpuindex) {
/* We have a special allocation for this section. */
percpu = percpu_modalloc(sechdrs[pcpuindex].sh_size,
@ -2064,7 +2056,7 @@ static noinline struct module *load_module(void __user *umod,
mod->name);
if (!percpu) {
err = -ENOMEM;
goto free_percpu;
goto free_mod;
}
sechdrs[pcpuindex].sh_flags &= ~(unsigned long)SHF_ALLOC;
mod->percpu = percpu;
@ -2116,6 +2108,14 @@ static noinline struct module *load_module(void __user *umod,
/* Module has been moved. */
mod = (void *)sechdrs[modindex].sh_addr;
#if defined(CONFIG_MODULE_UNLOAD) && defined(CONFIG_SMP)
mod->refptr = percpu_modalloc(sizeof(local_t), __alignof__(local_t),
mod->name);
if (!mod->refptr) {
err = -ENOMEM;
goto free_init;
}
#endif
/* Now we've moved module, initialize linked lists, etc. */
module_unload_init(mod);
@ -2322,15 +2322,17 @@ static noinline struct module *load_module(void __user *umod,
ftrace_release(mod->module_core, mod->core_size);
free_unload:
module_unload_free(mod);
module_free(mod, mod->module_init);
free_core:
module_free(mod, mod->module_core);
free_percpu:
if (percpu)
percpu_modfree(percpu);
free_init:
#if defined(CONFIG_MODULE_UNLOAD) && defined(CONFIG_SMP)
percpu_modfree(mod->refptr);
#endif
module_free(mod, mod->module_init);
free_core:
module_free(mod, mod->module_core);
/* mod will be freed with core. Don't access it beyond this line! */
free_percpu:
if (percpu)
percpu_modfree(percpu);
free_mod:
kfree(args);
free_hdr:

View File

@ -641,6 +641,7 @@ void tracing_reset_online_cpus(struct trace_array *tr)
}
#define SAVED_CMDLINES 128
#define NO_CMDLINE_MAP UINT_MAX
static unsigned map_pid_to_cmdline[PID_MAX_DEFAULT+1];
static unsigned map_cmdline_to_pid[SAVED_CMDLINES];
static char saved_cmdlines[SAVED_CMDLINES][TASK_COMM_LEN];
@ -652,8 +653,8 @@ static atomic_t trace_record_cmdline_disabled __read_mostly;
static void trace_init_cmdlines(void)
{
memset(&map_pid_to_cmdline, -1, sizeof(map_pid_to_cmdline));
memset(&map_cmdline_to_pid, -1, sizeof(map_cmdline_to_pid));
memset(&map_pid_to_cmdline, NO_CMDLINE_MAP, sizeof(map_pid_to_cmdline));
memset(&map_cmdline_to_pid, NO_CMDLINE_MAP, sizeof(map_cmdline_to_pid));
cmdline_idx = 0;
}
@ -745,8 +746,7 @@ void trace_stop_cmdline_recording(void);
static void trace_save_cmdline(struct task_struct *tsk)
{
unsigned map;
unsigned idx;
unsigned pid, idx;
if (!tsk->pid || unlikely(tsk->pid > PID_MAX_DEFAULT))
return;
@ -761,13 +761,20 @@ static void trace_save_cmdline(struct task_struct *tsk)
return;
idx = map_pid_to_cmdline[tsk->pid];
if (idx >= SAVED_CMDLINES) {
if (idx == NO_CMDLINE_MAP) {
idx = (cmdline_idx + 1) % SAVED_CMDLINES;
map = map_cmdline_to_pid[idx];
if (map <= PID_MAX_DEFAULT)
map_pid_to_cmdline[map] = (unsigned)-1;
/*
* Check whether the cmdline buffer at idx has a pid
* mapped. We are going to overwrite that entry so we
* need to clear the map_pid_to_cmdline. Otherwise we
* would read the new comm for the old pid.
*/
pid = map_cmdline_to_pid[idx];
if (pid != NO_CMDLINE_MAP)
map_pid_to_cmdline[pid] = NO_CMDLINE_MAP;
map_cmdline_to_pid[idx] = tsk->pid;
map_pid_to_cmdline[tsk->pid] = idx;
cmdline_idx = idx;
@ -794,18 +801,18 @@ void trace_find_cmdline(int pid, char comm[])
__raw_spin_lock(&trace_cmdline_lock);
map = map_pid_to_cmdline[pid];
if (map >= SAVED_CMDLINES)
goto out;
if (map != NO_CMDLINE_MAP)
strcpy(comm, saved_cmdlines[map]);
else
strcpy(comm, "<...>");
strcpy(comm, saved_cmdlines[map]);
out:
__raw_spin_unlock(&trace_cmdline_lock);
}
void tracing_record_cmdline(struct task_struct *tsk)
{
if (atomic_read(&trace_record_cmdline_disabled) || !tracing_is_on())
if (atomic_read(&trace_record_cmdline_disabled) || !tracer_enabled ||
!tracing_is_on())
return;
trace_save_cmdline(tsk);

View File

@ -692,6 +692,9 @@ static int snd_mixer_oss_put_volume1(struct snd_mixer_oss_file *fmixer,
snd_mixer_oss_put_volume1_vol(fmixer, pslot, slot->numid[SNDRV_MIXER_OSS_ITEM_PVOLUME], left, right);
if (slot->present & SNDRV_MIXER_OSS_PRESENT_CVOLUME)
snd_mixer_oss_put_volume1_vol(fmixer, pslot, slot->numid[SNDRV_MIXER_OSS_ITEM_CVOLUME], left, right);
} else if (slot->present & SNDRV_MIXER_OSS_PRESENT_CVOLUME) {
snd_mixer_oss_put_volume1_vol(fmixer, pslot,
slot->numid[SNDRV_MIXER_OSS_ITEM_CVOLUME], left, right);
} else if (slot->present & SNDRV_MIXER_OSS_PRESENT_GVOLUME) {
snd_mixer_oss_put_volume1_vol(fmixer, pslot, slot->numid[SNDRV_MIXER_OSS_ITEM_GVOLUME], left, right);
} else if (slot->present & SNDRV_MIXER_OSS_PRESENT_GLOBAL) {

View File

@ -2872,7 +2872,7 @@ static void snd_pcm_oss_proc_write(struct snd_info_entry *entry,
setup = kmalloc(sizeof(*setup), GFP_KERNEL);
if (! setup) {
buffer->error = -ENOMEM;
mutex_lock(&pstr->oss.setup_mutex);
mutex_unlock(&pstr->oss.setup_mutex);
return;
}
if (pstr->oss.setup_list == NULL)
@ -2886,7 +2886,7 @@ static void snd_pcm_oss_proc_write(struct snd_info_entry *entry,
if (! template.task_name) {
kfree(setup);
buffer->error = -ENOMEM;
mutex_lock(&pstr->oss.setup_mutex);
mutex_unlock(&pstr->oss.setup_mutex);
return;
}
}

View File

@ -38,6 +38,10 @@ int snd_free_sgbuf_pages(struct snd_dma_buffer *dmab)
if (! sgbuf)
return -EINVAL;
if (dmab->area)
vunmap(dmab->area);
dmab->area = NULL;
tmpb.dev.type = SNDRV_DMA_TYPE_DEV;
tmpb.dev.dev = sgbuf->dev;
for (i = 0; i < sgbuf->pages; i++) {
@ -48,9 +52,6 @@ int snd_free_sgbuf_pages(struct snd_dma_buffer *dmab)
tmpb.bytes = (sgbuf->table[i].addr & ~PAGE_MASK) << PAGE_SHIFT;
snd_dma_free_pages(&tmpb);
}
if (dmab->area)
vunmap(dmab->area);
dmab->area = NULL;
kfree(sgbuf->table);
kfree(sgbuf->page_table);

View File

@ -550,21 +550,27 @@ static int __devinit snd_opl3sa2_mixer(struct snd_card *card)
#ifdef CONFIG_PM
static int snd_opl3sa2_suspend(struct snd_card *card, pm_message_t state)
{
struct snd_opl3sa2 *chip = card->private_data;
if (card) {
struct snd_opl3sa2 *chip = card->private_data;
snd_power_change_state(card, SNDRV_CTL_POWER_D3hot);
chip->wss->suspend(chip->wss);
/* power down */
snd_opl3sa2_write(chip, OPL3SA2_PM_CTRL, OPL3SA2_PM_D3);
snd_power_change_state(card, SNDRV_CTL_POWER_D3hot);
chip->wss->suspend(chip->wss);
/* power down */
snd_opl3sa2_write(chip, OPL3SA2_PM_CTRL, OPL3SA2_PM_D3);
}
return 0;
}
static int snd_opl3sa2_resume(struct snd_card *card)
{
struct snd_opl3sa2 *chip = card->private_data;
struct snd_opl3sa2 *chip;
int i;
if (!card)
return 0;
chip = card->private_data;
/* power up */
snd_opl3sa2_write(chip, OPL3SA2_PM_CTRL, OPL3SA2_PM_D0);

View File

@ -2059,26 +2059,31 @@ static int __devinit check_position_fix(struct azx *chip, int fix)
{
const struct snd_pci_quirk *q;
/* Check VIA HD Audio Controller exist */
if (chip->pci->vendor == PCI_VENDOR_ID_VIA &&
chip->pci->device == VIA_HDAC_DEVICE_ID) {
switch (fix) {
case POS_FIX_LPIB:
case POS_FIX_POSBUF:
return fix;
}
/* Check VIA/ATI HD Audio Controller exist */
switch (chip->driver_type) {
case AZX_DRIVER_VIA:
case AZX_DRIVER_ATI:
chip->via_dmapos_patch = 1;
/* Use link position directly, avoid any transfer problem. */
return POS_FIX_LPIB;
}
chip->via_dmapos_patch = 0;
if (fix == POS_FIX_AUTO) {
q = snd_pci_quirk_lookup(chip->pci, position_fix_list);
if (q) {
printk(KERN_INFO
"hda_intel: position_fix set to %d "
"for device %04x:%04x\n",
q->value, q->subvendor, q->subdevice);
return q->value;
}
q = snd_pci_quirk_lookup(chip->pci, position_fix_list);
if (q) {
printk(KERN_INFO
"hda_intel: position_fix set to %d "
"for device %04x:%04x\n",
q->value, q->subvendor, q->subdevice);
return q->value;
}
return fix;
return POS_FIX_AUTO;
}
/*
@ -2210,9 +2215,17 @@ static int __devinit azx_create(struct snd_card *card, struct pci_dev *pci,
gcap = azx_readw(chip, GCAP);
snd_printdd("chipset global capabilities = 0x%x\n", gcap);
/* ATI chips seems buggy about 64bit DMA addresses */
if (chip->driver_type == AZX_DRIVER_ATI)
gcap &= ~0x01;
/* allow 64bit DMA address if supported by H/W */
if ((gcap & 0x01) && !pci_set_dma_mask(pci, DMA_64BIT_MASK))
pci_set_consistent_dma_mask(pci, DMA_64BIT_MASK);
else {
pci_set_dma_mask(pci, DMA_32BIT_MASK);
pci_set_consistent_dma_mask(pci, DMA_32BIT_MASK);
}
/* read number of streams from GCAP register instead of using
* hardcoded value

View File

@ -607,6 +607,7 @@ static int snd_mixart_hw_params(struct snd_pcm_substream *subs,
/* set the format to the board */
err = mixart_set_format(stream, format);
if(err < 0) {
mutex_unlock(&mgr->setup_mutex);
return err;
}