Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
Two entries being added at the same time to the IFLA
policy table, whilst parallel bug fixes to decnet
routing dst handling overlapping with the dst gc removal
in net-next.
Signed-off-by: David S. Miller <davem@davemloft.net>
diff --git a/Documentation/ABI/testing/sysfs-class-net b/Documentation/ABI/testing/sysfs-class-net
index 668604f..6856da9 100644
--- a/Documentation/ABI/testing/sysfs-class-net
+++ b/Documentation/ABI/testing/sysfs-class-net
@@ -251,3 +251,11 @@
Description:
Indicates the unique physical switch identifier of a switch this
port belongs to, as a string.
+
+What: /sys/class/net/<iface>/phydev
+Date: May 2017
+KernelVersion: 4.13
+Contact: netdev@vger.kernel.org
+Description:
+ Symbolic link to the PHY device this network device is attached
+ to.
diff --git a/Documentation/ABI/testing/sysfs-class-net-phydev b/Documentation/ABI/testing/sysfs-class-net-phydev
new file mode 100644
index 0000000..6ebabfb
--- /dev/null
+++ b/Documentation/ABI/testing/sysfs-class-net-phydev
@@ -0,0 +1,36 @@
+What: /sys/class/mdio_bus/<bus>/<device>/attached_dev
+Date: May 2017
+KernelVersion: 4.13
+Contact: netdev@vger.kernel.org
+Description:
+ Symbolic link to the network device this PHY device is
+ attached to.
+
+What: /sys/class/mdio_bus/<bus>/<device>/phy_has_fixups
+Date: February 2014
+KernelVersion: 3.15
+Contact: netdev@vger.kernel.org
+Description:
+ Boolean value indicating whether the PHY device has
+ any fixups registered against it (phy_register_fixup)
+
+What: /sys/class/mdio_bus/<bus>/<device>/phy_id
+Date: November 2012
+KernelVersion: 3.8
+Contact: netdev@vger.kernel.org
+Description:
+ 32-bit hexadecimal value corresponding to the PHY device's OUI,
+ model and revision number.
+
+What: /sys/class/mdio_bus/<bus>/<device>/phy_interface
+Date: February 2014
+KernelVersion: 3.15
+Contact: netdev@vger.kernel.org
+Description:
+ String value indicating the PHY interface, possible
+ values are:.
+ <empty> (not available), mii, gmii, sgmii, tbi, rev-mii,
+ rmii, rgmii, rgmii-id, rgmii-rxid, rgmii-txid, rtbi, smii
+ xgmii, moca, qsgmii, trgmii, 1000base-x, 2500base-x, rxaui,
+ xaui, 10gbase-kr, unknown
+
diff --git a/Documentation/devicetree/bindings/misc/allwinner,syscon.txt b/Documentation/devicetree/bindings/misc/allwinner,syscon.txt
new file mode 100644
index 0000000..31494a2
--- /dev/null
+++ b/Documentation/devicetree/bindings/misc/allwinner,syscon.txt
@@ -0,0 +1,20 @@
+* Allwinner sun8i system controller
+
+This file describes the bindings for the system controller present in
+Allwinner SoC H3, A83T and A64.
+The principal function of this syscon is to control EMAC PHY choice and
+config.
+
+Required properties for the system controller:
+- reg: address and length of the register for the device.
+- compatible: should be "syscon" and one of the following string:
+ "allwinner,sun8i-h3-system-controller"
+ "allwinner,sun8i-v3s-system-controller"
+ "allwinner,sun50i-a64-system-controller"
+ "allwinner,sun8i-a83t-system-controller"
+
+Example:
+syscon: syscon@1c00000 {
+ compatible = "allwinner,sun8i-h3-system-controller", "syscon";
+ reg = <0x01c00000 0x1000>;
+};
diff --git a/Documentation/devicetree/bindings/net/cortina.txt b/Documentation/devicetree/bindings/net/cortina.txt
new file mode 100644
index 0000000..40d0bd9
--- /dev/null
+++ b/Documentation/devicetree/bindings/net/cortina.txt
@@ -0,0 +1,21 @@
+Cortina Phy Driver Device Tree Bindings
+---------------------------------------
+
+CORTINA is a registered trademark of Cortina Systems, Inc.
+
+The driver supports the Cortina Electronic Dispersion Compensation (EDC)
+devices, equipped with clock and data recovery (CDR) circuits. These
+devices make use of registers that are not compatible with Clause 45 or
+Clause 22, therefore they need to be described using the
+"ethernet-phy-id" compatible.
+
+Since the driver only implements polling mode support, interrupts info
+can be skipped.
+
+Example (CS4340 phy):
+ mdio {
+ cs4340_phy@10 {
+ compatible = "ethernet-phy-id13e5.1002";
+ reg = <0x10>;
+ };
+ };
diff --git a/Documentation/devicetree/bindings/net/dsa/b53.txt b/Documentation/devicetree/bindings/net/dsa/b53.txt
index 8ec2ca2..8acf51a 100644
--- a/Documentation/devicetree/bindings/net/dsa/b53.txt
+++ b/Documentation/devicetree/bindings/net/dsa/b53.txt
@@ -13,6 +13,9 @@
"brcm,bcm5397"
"brcm,bcm5398"
+ For the BCM11360 SoC, must be:
+ "brcm,bcm11360-srab" and the mandatory "brcm,cygnus-srab" string
+
For the BCM5310x SoCs with an integrated switch, must be one of:
"brcm,bcm53010-srab"
"brcm,bcm53011-srab"
diff --git a/Documentation/devicetree/bindings/net/dsa/ksz.txt b/Documentation/devicetree/bindings/net/dsa/ksz.txt
new file mode 100644
index 0000000..0ab8b39
--- /dev/null
+++ b/Documentation/devicetree/bindings/net/dsa/ksz.txt
@@ -0,0 +1,72 @@
+Microchip KSZ Series Ethernet switches
+==================================
+
+Required properties:
+
+- compatible: For external switch chips, compatible string must be exactly one
+ of: "microchip,ksz9477"
+
+See Documentation/devicetree/bindings/dsa/dsa.txt for a list of additional
+required and optional properties.
+
+Examples:
+
+Ethernet switch connected via SPI to the host, CPU port wired to eth0:
+
+ eth0: ethernet@10001000 {
+ fixed-link {
+ speed = <1000>;
+ full-duplex;
+ };
+ };
+
+ spi1: spi@f8008000 {
+ pinctrl-0 = <&pinctrl_spi_ksz>;
+ cs-gpios = <&pioC 25 0>;
+ id = <1>;
+ status = "okay";
+
+ ksz9477: ksz9477@0 {
+ compatible = "microchip,ksz9477";
+ reg = <0>;
+
+ spi-max-frequency = <44000000>;
+ spi-cpha;
+ spi-cpol;
+
+ status = "okay";
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ port@0 {
+ reg = <0>;
+ label = "lan1";
+ };
+ port@1 {
+ reg = <1>;
+ label = "lan2";
+ };
+ port@2 {
+ reg = <2>;
+ label = "lan3";
+ };
+ port@3 {
+ reg = <3>;
+ label = "lan4";
+ };
+ port@4 {
+ reg = <4>;
+ label = "lan5";
+ };
+ port@5 {
+ reg = <5>;
+ label = "cpu";
+ ethernet = <ð0>;
+ fixed-link {
+ speed = <1000>;
+ full-duplex;
+ };
+ };
+ };
+ };
+ };
diff --git a/Documentation/devicetree/bindings/net/dwmac-sun8i.txt b/Documentation/devicetree/bindings/net/dwmac-sun8i.txt
new file mode 100644
index 0000000..725f3b1
--- /dev/null
+++ b/Documentation/devicetree/bindings/net/dwmac-sun8i.txt
@@ -0,0 +1,84 @@
+* Allwinner sun8i GMAC ethernet controller
+
+This device is a platform glue layer for stmmac.
+Please see stmmac.txt for the other unchanged properties.
+
+Required properties:
+- compatible: should be one of the following string:
+ "allwinner,sun8i-a83t-emac"
+ "allwinner,sun8i-h3-emac"
+ "allwinner,sun8i-v3s-emac"
+ "allwinner,sun50i-a64-emac"
+- reg: address and length of the register for the device.
+- interrupts: interrupt for the device
+- interrupt-names: should be "macirq"
+- clocks: A phandle to the reference clock for this device
+- clock-names: should be "stmmaceth"
+- resets: A phandle to the reset control for this device
+- reset-names: should be "stmmaceth"
+- phy-mode: See ethernet.txt
+- phy-handle: See ethernet.txt
+- #address-cells: shall be 1
+- #size-cells: shall be 0
+- syscon: A phandle to the syscon of the SoC with one of the following
+ compatible string:
+ - allwinner,sun8i-h3-system-controller
+ - allwinner,sun8i-v3s-system-controller
+ - allwinner,sun50i-a64-system-controller
+ - allwinner,sun8i-a83t-system-controller
+
+Optional properties:
+- allwinner,tx-delay-ps: TX clock delay chain value in ps. Range value is 0-700. Default is 0)
+- allwinner,rx-delay-ps: RX clock delay chain value in ps. Range value is 0-3100. Default is 0)
+Both delay properties need to be a multiple of 100. They control the delay for
+external PHY.
+
+Optional properties for the following compatibles:
+ - "allwinner,sun8i-h3-emac",
+ - "allwinner,sun8i-v3s-emac":
+- allwinner,leds-active-low: EPHY LEDs are active low
+
+Required child node of emac:
+- mdio bus node: should be named mdio
+
+Required properties of the mdio node:
+- #address-cells: shall be 1
+- #size-cells: shall be 0
+
+The device node referenced by "phy" or "phy-handle" should be a child node
+of the mdio node. See phy.txt for the generic PHY bindings.
+
+Required properties of the phy node with the following compatibles:
+ - "allwinner,sun8i-h3-emac",
+ - "allwinner,sun8i-v3s-emac":
+- clocks: a phandle to the reference clock for the EPHY
+- resets: a phandle to the reset control for the EPHY
+
+Example:
+
+emac: ethernet@1c0b000 {
+ compatible = "allwinner,sun8i-h3-emac";
+ syscon = <&syscon>;
+ reg = <0x01c0b000 0x104>;
+ interrupts = <GIC_SPI 82 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "macirq";
+ resets = <&ccu RST_BUS_EMAC>;
+ reset-names = "stmmaceth";
+ clocks = <&ccu CLK_BUS_EMAC>;
+ clock-names = "stmmaceth";
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ phy-handle = <&int_mii_phy>;
+ phy-mode = "mii";
+ allwinner,leds-active-low;
+ mdio: mdio {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ int_mii_phy: ethernet-phy@1 {
+ reg = <1>;
+ clocks = <&ccu CLK_BUS_EPHY>;
+ resets = <&ccu RST_BUS_EPHY>;
+ };
+ };
+};
diff --git a/Documentation/devicetree/bindings/net/ethernet.txt b/Documentation/devicetree/bindings/net/ethernet.txt
index 3a69169..d4abe9a 100644
--- a/Documentation/devicetree/bindings/net/ethernet.txt
+++ b/Documentation/devicetree/bindings/net/ethernet.txt
@@ -32,6 +32,8 @@
* "2000base-x",
* "2500base-x",
* "rxaui"
+ * "xaui"
+ * "10gbase-kr" (10GBASE-KR, XFI, SFI)
- phy-connection-type: the same as "phy-mode" property but described in ePAPR;
- phy-handle: phandle, specifies a reference to a node representing a PHY
device; this property is described in ePAPR and so preferred;
diff --git a/Documentation/devicetree/bindings/net/marvell-orion-mdio.txt b/Documentation/devicetree/bindings/net/marvell-orion-mdio.txt
index ccdabdc..42cd810 100644
--- a/Documentation/devicetree/bindings/net/marvell-orion-mdio.txt
+++ b/Documentation/devicetree/bindings/net/marvell-orion-mdio.txt
@@ -1,12 +1,14 @@
* Marvell MDIO Ethernet Controller interface
The Ethernet controllers of the Marvel Kirkwood, Dove, Orion5x,
-MV78xx0, Armada 370 and Armada XP have an identical unit that provides
-an interface with the MDIO bus. This driver handles this MDIO
-interface.
+MV78xx0, Armada 370, Armada XP, Armada 7k and Armada 8k have an
+identical unit that provides an interface with the MDIO bus.
+Additionally, Armada 7k and Armada 8k has a second unit which
+provides an interface with the xMDIO bus. This driver handles
+these interfaces.
Required properties:
-- compatible: "marvell,orion-mdio"
+- compatible: "marvell,orion-mdio" or "marvell,xmdio"
- reg: address and length of the MDIO registers. When an interrupt is
not present, the length is the size of the SMI register (4 bytes)
otherwise it must be 0x84 bytes to cover the interrupt control
diff --git a/Documentation/devicetree/bindings/net/qca,qca7000.txt b/Documentation/devicetree/bindings/net/qca,qca7000.txt
new file mode 100644
index 0000000..6d9efb2
--- /dev/null
+++ b/Documentation/devicetree/bindings/net/qca,qca7000.txt
@@ -0,0 +1,88 @@
+* Qualcomm QCA7000
+
+The QCA7000 is a serial-to-powerline bridge with a host interface which could
+be configured either as SPI or UART slave. This configuration is done by
+the QCA7000 firmware.
+
+(a) Ethernet over SPI
+
+In order to use the QCA7000 as SPI device it must be defined as a child of a
+SPI master in the device tree.
+
+Required properties:
+- compatible : Should be "qca,qca7000"
+- reg : Should specify the SPI chip select
+- interrupts : The first cell should specify the index of the source
+ interrupt and the second cell should specify the trigger
+ type as rising edge
+- spi-cpha : Must be set
+- spi-cpol : Must be set
+
+Optional properties:
+- interrupt-parent : Specify the pHandle of the source interrupt
+- spi-max-frequency : Maximum frequency of the SPI bus the chip can operate at.
+ Numbers smaller than 1000000 or greater than 16000000
+ are invalid. Missing the property will set the SPI
+ frequency to 8000000 Hertz.
+- local-mac-address : see ./ethernet.txt
+- qca,legacy-mode : Set the SPI data transfer of the QCA7000 to legacy mode.
+ In this mode the SPI master must toggle the chip select
+ between each data word. In burst mode these gaps aren't
+ necessary, which is faster. This setting depends on how
+ the QCA7000 is setup via GPIO pin strapping. If the
+ property is missing the driver defaults to burst mode.
+
+SPI Example:
+
+/* Freescale i.MX28 SPI master*/
+ssp2: spi@80014000 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ compatible = "fsl,imx28-spi";
+ pinctrl-names = "default";
+ pinctrl-0 = <&spi2_pins_a>;
+ status = "okay";
+
+ qca7000: ethernet@0 {
+ compatible = "qca,qca7000";
+ reg = <0x0>;
+ interrupt-parent = <&gpio3>; /* GPIO Bank 3 */
+ interrupts = <25 0x1>; /* Index: 25, rising edge */
+ spi-cpha; /* SPI mode: CPHA=1 */
+ spi-cpol; /* SPI mode: CPOL=1 */
+ spi-max-frequency = <8000000>; /* freq: 8 MHz */
+ local-mac-address = [ A0 B0 C0 D0 E0 F0 ];
+ };
+};
+
+(b) Ethernet over UART
+
+In order to use the QCA7000 as UART slave it must be defined as a child of a
+UART master in the device tree. It is possible to preconfigure the UART
+settings of the QCA7000 firmware, but it's not possible to change them during
+runtime.
+
+Required properties:
+- compatible : Should be "qca,qca7000"
+
+Optional properties:
+- local-mac-address : see ./ethernet.txt
+- current-speed : current baud rate of QCA7000 which defaults to 115200
+ if absent, see also ../serial/slave-device.txt
+
+UART Example:
+
+/* Freescale i.MX28 UART */
+auart0: serial@8006a000 {
+ compatible = "fsl,imx28-auart", "fsl,imx23-auart";
+ reg = <0x8006a000 0x2000>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&auart0_2pins_a>;
+ status = "okay";
+
+ qca7000: ethernet {
+ compatible = "qca,qca7000";
+ local-mac-address = [ A0 B0 C0 D0 E0 F0 ];
+ current-speed = <38400>;
+ };
+};
diff --git a/Documentation/devicetree/bindings/net/qca-qca7000-spi.txt b/Documentation/devicetree/bindings/net/qca-qca7000-spi.txt
deleted file mode 100644
index c74989c..0000000
--- a/Documentation/devicetree/bindings/net/qca-qca7000-spi.txt
+++ /dev/null
@@ -1,47 +0,0 @@
-* Qualcomm QCA7000 (Ethernet over SPI protocol)
-
-Note: The QCA7000 is useable as a SPI device. In this case it must be defined
-as a child of a SPI master in the device tree.
-
-Required properties:
-- compatible : Should be "qca,qca7000"
-- reg : Should specify the SPI chip select
-- interrupts : The first cell should specify the index of the source interrupt
- and the second cell should specify the trigger type as rising edge
-- spi-cpha : Must be set
-- spi-cpol: Must be set
-
-Optional properties:
-- interrupt-parent : Specify the pHandle of the source interrupt
-- spi-max-frequency : Maximum frequency of the SPI bus the chip can operate at.
- Numbers smaller than 1000000 or greater than 16000000 are invalid. Missing
- the property will set the SPI frequency to 8000000 Hertz.
-- local-mac-address: 6 bytes, MAC address
-- qca,legacy-mode : Set the SPI data transfer of the QCA7000 to legacy mode.
- In this mode the SPI master must toggle the chip select between each data
- word. In burst mode these gaps aren't necessary, which is faster.
- This setting depends on how the QCA7000 is setup via GPIO pin strapping.
- If the property is missing the driver defaults to burst mode.
-
-Example:
-
-/* Freescale i.MX28 SPI master*/
-ssp2: spi@80014000 {
- #address-cells = <1>;
- #size-cells = <0>;
- compatible = "fsl,imx28-spi";
- pinctrl-names = "default";
- pinctrl-0 = <&spi2_pins_a>;
- status = "okay";
-
- qca7000: ethernet@0 {
- compatible = "qca,qca7000";
- reg = <0x0>;
- interrupt-parent = <&gpio3>; /* GPIO Bank 3 */
- interrupts = <25 0x1>; /* Index: 25, rising edge */
- spi-cpha; /* SPI mode: CPHA=1 */
- spi-cpol; /* SPI mode: CPOL=1 */
- spi-max-frequency = <8000000>; /* freq: 8 MHz */
- local-mac-address = [ A0 B0 C0 D0 E0 F0 ];
- };
-};
diff --git a/Documentation/devicetree/bindings/net/ti,wilink-st.txt b/Documentation/devicetree/bindings/net/ti,wilink-st.txt
index cbad73a..1649c1f 100644
--- a/Documentation/devicetree/bindings/net/ti,wilink-st.txt
+++ b/Documentation/devicetree/bindings/net/ti,wilink-st.txt
@@ -14,6 +14,12 @@
- compatible: should be one of the following:
"ti,wl1271-st"
"ti,wl1273-st"
+ "ti,wl1281-st"
+ "ti,wl1283-st"
+ "ti,wl1285-st"
+ "ti,wl1801-st"
+ "ti,wl1805-st"
+ "ti,wl1807-st"
"ti,wl1831-st"
"ti,wl1835-st"
"ti,wl1837-st"
@@ -22,6 +28,10 @@
- enable-gpios : GPIO signal controlling enabling of BT. Active high.
- vio-supply : Vio input supply (1.8V)
- vbat-supply : Vbat input supply (2.9-4.8V)
+ - clocks : Must contain an entry, for each entry in clock-names.
+ See ../clocks/clock-bindings.txt for details.
+ - clock-names : Must include the following entry:
+ "ext_clock" (External clock provided to the TI combo chip).
Example:
@@ -31,5 +41,7 @@
bluetooth {
compatible = "ti,wl1835-st";
enable-gpios = <&gpio1 7 GPIO_ACTIVE_HIGH>;
+ clocks = <&clk32k_wl18xx>;
+ clock-names = "ext_clock";
};
};
diff --git a/Documentation/devicetree/bindings/ptp/brcm,ptp-dte.txt b/Documentation/devicetree/bindings/ptp/brcm,ptp-dte.txt
new file mode 100644
index 0000000..07590bc
--- /dev/null
+++ b/Documentation/devicetree/bindings/ptp/brcm,ptp-dte.txt
@@ -0,0 +1,13 @@
+* Broadcom Digital Timing Engine(DTE) based PTP clock driver
+
+Required properties:
+- compatible: should be "brcm,ptp-dte"
+- reg: address and length of the DTE block's NCO registers
+
+Example:
+
+ptp_dte: ptp_dte@180af650 {
+ compatible = "brcm,ptp-dte";
+ reg = <0x180af650 0x10>;
+ status = "okay";
+};
diff --git a/Documentation/devicetree/bindings/serial/slave-device.txt b/Documentation/devicetree/bindings/serial/slave-device.txt
index f660379..40110e0 100644
--- a/Documentation/devicetree/bindings/serial/slave-device.txt
+++ b/Documentation/devicetree/bindings/serial/slave-device.txt
@@ -21,6 +21,15 @@
can support. For example, a particular board has some signal
quality issue or the host processor can't support higher
baud rates.
+- current-speed : The current baud rate the device operates at. This should
+ only be present in case a driver has no chance to know
+ the baud rate of the slave device.
+ Examples:
+ * device supports auto-baud
+ * the rate is setup by a bootloader and there is no
+ way to reset the device
+ * device baud rate is configured by its firmware but
+ there is no way to request the actual settings
Example:
diff --git a/Documentation/networking/checksum-offloads.txt b/Documentation/networking/checksum-offloads.txt
index 56e3686..d52d191 100644
--- a/Documentation/networking/checksum-offloads.txt
+++ b/Documentation/networking/checksum-offloads.txt
@@ -35,6 +35,9 @@
encapsulation is used, the packet may have multiple checksum fields in
different header layers, and the rest will have to be handled by another
mechanism such as LCO or RCO.
+CRC32c can also be offloaded using this interface, by means of filling
+ skb->csum_start and skb->csum_offset as described above, and setting
+ skb->csum_not_inet: see skbuff.h comment (section 'D') for more details.
No offloading of the IP header checksum is performed; it is always done in
software. This is OK because when we build the IP header, we obviously
have it in cache, so summing it isn't expensive. It's also rather short.
@@ -49,9 +52,9 @@
and csum_offset given in the SKB; if it tries to deduce these itself in
hardware (as some NICs do) the driver should check that the values in the
SKB match those which the hardware will deduce, and if not, fall back to
- checksumming in software instead (with skb_checksum_help or one of the
- skb_csum_off_chk* functions as mentioned in include/linux/skbuff.h). This
- is a pain, but that's what you get when hardware tries to be clever.
+ checksumming in software instead (with skb_csum_hwoffload_help() or one of
+ the skb_checksum_help() / skb_crc32c_csum_help functions, as mentioned in
+ include/linux/skbuff.h).
The stack should, for the most part, assume that checksum offload is
supported by the underlying device. The only place that should check is
@@ -60,7 +63,7 @@
may include other offloads besides TX Checksum Offload) and, if they are
not supported or enabled on the device (determined by netdev->features),
performs the corresponding offload in software. In the case of TX
- Checksum Offload, that means calling skb_checksum_help(skb).
+ Checksum Offload, that means calling skb_csum_hwoffload_help(skb, features).
LCO: Local Checksum Offload
diff --git a/Documentation/networking/i40evf.txt b/Documentation/networking/i40evf.txt
index 21e4127..e9b3035 100644
--- a/Documentation/networking/i40evf.txt
+++ b/Documentation/networking/i40evf.txt
@@ -1,8 +1,8 @@
Linux* Base Driver for Intel(R) Network Connection
==================================================
-Intel XL710 X710 Virtual Function Linux driver.
-Copyright(c) 2013 Intel Corporation.
+Intel Ethernet Adaptive Virtual Function Linux driver.
+Copyright(c) 2013-2017 Intel Corporation.
Contents
========
@@ -11,19 +11,26 @@
- Known Issues/Troubleshooting
- Support
-This file describes the i40evf Linux* Base Driver for the Intel(R) XL710
-X710 Virtual Function.
+This file describes the i40evf Linux* Base Driver.
-The i40evf driver supports XL710 and X710 virtual function devices that
-can only be activated on kernels with CONFIG_PCI_IOV enabled.
+The i40evf driver supports the below mentioned virtual function
+devices and can only be activated on kernels running the i40e or
+newer Physical Function (PF) driver compiled with CONFIG_PCI_IOV.
+The i40evf driver requires CONFIG_PCI_MSI to be enabled.
The guest OS loading the i40evf driver must support MSI-X interrupts.
+Supported Hardware
+==================
+Intel XL710 X710 Virtual Function
+Intel Ethernet Adaptive Virtual Function
+Intel X722 Virtual Function
+
Identifying Your Adapter
========================
-For more information on how to identify your adapter, go to the Adapter &
-Driver ID Guide at:
+For more information on how to identify your adapter, go to the
+Adapter & Driver ID Guide at:
http://support.intel.com/support/go/network/adapter/idguide.htm
diff --git a/Documentation/networking/phy.txt b/Documentation/networking/phy.txt
index 16f90d8..bdec0f7 100644
--- a/Documentation/networking/phy.txt
+++ b/Documentation/networking/phy.txt
@@ -295,7 +295,6 @@
settings in the PHY.
int phy_ethtool_sset(struct phy_device *phydev, struct ethtool_cmd *cmd);
- int phy_ethtool_gset(struct phy_device *phydev, struct ethtool_cmd *cmd);
Ethtool convenience functions.
diff --git a/Documentation/networking/rxrpc.txt b/Documentation/networking/rxrpc.txt
index 1b63bbc..8c70ba5 100644
--- a/Documentation/networking/rxrpc.txt
+++ b/Documentation/networking/rxrpc.txt
@@ -325,6 +325,9 @@
RXRPC_LOCAL_ERROR -rt error num Local error encountered
RXRPC_NEW_CALL -r- n/a New call received
RXRPC_ACCEPT s-- n/a Accept new call
+ RXRPC_EXCLUSIVE_CALL s-- n/a Make an exclusive client call
+ RXRPC_UPGRADE_SERVICE s-- n/a Client call can be upgraded
+ RXRPC_TX_LENGTH s-- data len Total length of Tx data
(SRT = usable in Sendmsg / delivered by Recvmsg / Terminal message)
@@ -387,6 +390,40 @@
return error ENODATA. If the user ID is already in use by another call,
then error EBADSLT will be returned.
+ (*) RXRPC_EXCLUSIVE_CALL
+
+ This is used to indicate that a client call should be made on a one-off
+ connection. The connection is discarded once the call has terminated.
+
+ (*) RXRPC_UPGRADE_SERVICE
+
+ This is used to make a client call to probe if the specified service ID
+ may be upgraded by the server. The caller must check msg_name returned to
+ recvmsg() for the service ID actually in use. The operation probed must
+ be one that takes the same arguments in both services.
+
+ Once this has been used to establish the upgrade capability (or lack
+ thereof) of the server, the service ID returned should be used for all
+ future communication to that server and RXRPC_UPGRADE_SERVICE should no
+ longer be set.
+
+ (*) RXRPC_TX_LENGTH
+
+ This is used to inform the kernel of the total amount of data that is
+ going to be transmitted by a call (whether in a client request or a
+ service response). If given, it allows the kernel to encrypt from the
+ userspace buffer directly to the packet buffers, rather than copying into
+ the buffer and then encrypting in place. This may only be given with the
+ first sendmsg() providing data for a call. EMSGSIZE will be generated if
+ the amount of data actually given is different.
+
+ This takes a parameter of __s64 type that indicates how much will be
+ transmitted. This may not be less than zero.
+
+The symbol RXRPC__SUPPORTED is defined as one more than the highest control
+message type supported. At run time this can be queried by means of the
+RXRPC_SUPPORTED_CMSG socket option (see below).
+
==============
SOCKET OPTIONS
@@ -433,6 +470,18 @@
Encrypted checksum plus entire packet padded and encrypted, including
actual packet length.
+ (*) RXRPC_UPGRADEABLE_SERVICE
+
+ This is used to indicate that a service socket with two bindings may
+ upgrade one bound service to the other if requested by the client. optval
+ must point to an array of two unsigned short ints. The first is the
+ service ID to upgrade from and the second the service ID to upgrade to.
+
+ (*) RXRPC_SUPPORTED_CMSG
+
+ This is a read-only option that writes an int into the buffer indicating
+ the highest control message type supported.
+
========
SECURITY
@@ -542,6 +591,9 @@
MSG_MORE should be set in msghdr::msg_flags on all but the last part of
the request. Multiple requests may be made simultaneously.
+ An RXRPC_TX_LENGTH control message can also be specified on the first
+ sendmsg() call.
+
If a call is intended to go to a destination other than the default
specified through connect(), then msghdr::msg_name should be set on the
first request message of that call.
@@ -559,6 +611,17 @@
buffer instead, and MSG_EOR will be flagged to indicate the end of that
call.
+A client may ask for a service ID it knows and ask that this be upgraded to a
+better service if one is available by supplying RXRPC_UPGRADE_SERVICE on the
+first sendmsg() of a call. The client should then check srx_service in the
+msg_name filled in by recvmsg() when collecting the result. srx_service will
+hold the same value as given to sendmsg() if the upgrade request was ignored by
+the service - otherwise it will be altered to indicate the service ID the
+server upgraded to. Note that the upgraded service ID is chosen by the server.
+The caller has to wait until it sees the service ID in the reply before sending
+any more calls (further calls to the same destination will be blocked until the
+probe is concluded).
+
====================
EXAMPLE SERVER USAGE
@@ -588,7 +651,7 @@
The keyring can be manipulated after it has been given to the socket. This
permits the server to add more keys, replace keys, etc. whilst it is live.
- (2) A local address must then be bound:
+ (3) A local address must then be bound:
struct sockaddr_rxrpc srx = {
.srx_family = AF_RXRPC,
@@ -600,11 +663,26 @@
};
bind(server, &srx, sizeof(srx));
- (3) The server is then set to listen out for incoming calls:
+ More than one service ID may be bound to a socket, provided the transport
+ parameters are the same. The limit is currently two. To do this, bind()
+ should be called twice.
+
+ (4) If service upgrading is required, first two service IDs must have been
+ bound and then the following option must be set:
+
+ unsigned short service_ids[2] = { from_ID, to_ID };
+ setsockopt(server, SOL_RXRPC, RXRPC_UPGRADEABLE_SERVICE,
+ service_ids, sizeof(service_ids));
+
+ This will automatically upgrade connections on service from_ID to service
+ to_ID if they request it. This will be reflected in msg_name obtained
+ through recvmsg() when the request data is delivered to userspace.
+
+ (5) The server is then set to listen out for incoming calls:
listen(server, 100);
- (4) The kernel notifies the server of pending incoming connections by sending
+ (6) The kernel notifies the server of pending incoming connections by sending
it a message for each. This is received with recvmsg() on the server
socket. It has no data, and has a single dataless control message
attached:
@@ -616,13 +694,13 @@
the time it is accepted - in which case the first call still on the queue
will be accepted.
- (5) The server then accepts the new call by issuing a sendmsg() with two
+ (7) The server then accepts the new call by issuing a sendmsg() with two
pieces of control data and no actual data:
RXRPC_ACCEPT - indicate connection acceptance
RXRPC_USER_CALL_ID - specify user ID for this call
- (6) The first request data packet will then be posted to the server socket for
+ (8) The first request data packet will then be posted to the server socket for
recvmsg() to pick up. At that point, the RxRPC address for the call can
be read from the address fields in the msghdr struct.
@@ -634,7 +712,7 @@
RXRPC_USER_CALL_ID - specifies the user ID for this call
- (8) The reply data should then be posted to the server socket using a series
+ (9) The reply data should then be posted to the server socket using a series
of sendmsg() calls, each with the following control messages attached:
RXRPC_USER_CALL_ID - specifies the user ID for this call
@@ -642,7 +720,7 @@
MSG_MORE should be set in msghdr::msg_flags on all but the last message
for a particular call.
- (9) The final ACK from the client will be posted for retrieval by recvmsg()
+(10) The final ACK from the client will be posted for retrieval by recvmsg()
when it is received. It will take the form of a dataless message with two
control messages attached:
@@ -652,7 +730,7 @@
MSG_EOR will be flagged to indicate that this is the final message for
this call.
-(10) Up to the point the final packet of reply data is sent, the call can be
+(11) Up to the point the final packet of reply data is sent, the call can be
aborted by calling sendmsg() with a dataless message with the following
control messages attached:
@@ -703,6 +781,7 @@
struct sockaddr_rxrpc *srx,
struct key *key,
unsigned long user_call_ID,
+ s64 tx_total_len,
gfp_t gfp);
This allocates the infrastructure to make a new RxRPC call and assigns
@@ -719,6 +798,11 @@
control data buffer. It is entirely feasible to use this to point to a
kernel data structure.
+ tx_total_len is the amount of data the caller is intending to transmit
+ with this call (or -1 if unknown at this point). Setting the data size
+ allows the kernel to encrypt directly to the packet buffers, thereby
+ saving a copy. The value may not be less than -1.
+
If this function is successful, an opaque reference to the RxRPC call is
returned. The caller now holds a reference on this and it must be
properly ended.
@@ -870,6 +954,17 @@
This is used to find the remote peer address of a call.
+ (*) Set the total transmit data size on a call.
+
+ void rxrpc_kernel_set_tx_length(struct socket *sock,
+ struct rxrpc_call *call,
+ s64 tx_total_len);
+
+ This sets the amount of data that the caller is intending to transmit on a
+ call. It's intended to be used for setting the reply size as the request
+ size should be set when the call is begun. tx_total_len may not be less
+ than zero.
+
=======================
CONFIGURABLE PARAMETERS
diff --git a/Documentation/networking/timestamping.txt b/Documentation/networking/timestamping.txt
index 96f5069..196ba17 100644
--- a/Documentation/networking/timestamping.txt
+++ b/Documentation/networking/timestamping.txt
@@ -193,6 +193,24 @@
the transmit timestamps, such as how long a certain block of
data was limited by peer's receiver window.
+SOF_TIMESTAMPING_OPT_PKTINFO:
+
+ Enable the SCM_TIMESTAMPING_PKTINFO control message for incoming
+ packets with hardware timestamps. The message contains struct
+ scm_ts_pktinfo, which supplies the index of the real interface which
+ received the packet and its length at layer 2. A valid (non-zero)
+ interface index will be returned only if CONFIG_NET_RX_BUSY_POLL is
+ enabled and the driver is using NAPI. The struct contains also two
+ other fields, but they are reserved and undefined.
+
+SOF_TIMESTAMPING_OPT_TX_SWHW:
+
+ Request both hardware and software timestamps for outgoing packets
+ when SOF_TIMESTAMPING_TX_HARDWARE and SOF_TIMESTAMPING_TX_SOFTWARE
+ are enabled at the same time. If both timestamps are generated,
+ two separate messages will be looped to the socket's error queue,
+ each containing just one timestamp.
+
New applications are encouraged to pass SOF_TIMESTAMPING_OPT_ID to
disambiguate timestamps and SOF_TIMESTAMPING_OPT_TSONLY to operate
regardless of the setting of sysctl net.core.tstamp_allow_data.
@@ -312,7 +330,7 @@
};
The structure can return up to three timestamps. This is a legacy
-feature. Only one field is non-zero at any time. Most timestamps
+feature. At least one field is non-zero at any time. Most timestamps
are passed in ts[0]. Hardware timestamps are passed in ts[2].
ts[1] used to hold hardware timestamps converted to system time.
@@ -321,6 +339,12 @@
optionally synchronize system time with a userspace PTP stack such
as linuxptp. For the PTP clock API, see Documentation/ptp/ptp.txt.
+Note that if the SO_TIMESTAMP or SO_TIMESTAMPNS option is enabled
+together with SO_TIMESTAMPING using SOF_TIMESTAMPING_SOFTWARE, a false
+software timestamp will be generated in the recvmsg() call and passed
+in ts[0] when a real software timestamp is missing. This happens also
+on hardware transmit timestamps.
+
2.1.1 Transmit timestamps with MSG_ERRQUEUE
For transmit timestamps the outgoing packet is looped back to the
diff --git a/Documentation/networking/tls.txt b/Documentation/networking/tls.txt
new file mode 100644
index 0000000..77ed006
--- /dev/null
+++ b/Documentation/networking/tls.txt
@@ -0,0 +1,135 @@
+Overview
+========
+
+Transport Layer Security (TLS) is a Upper Layer Protocol (ULP) that runs over
+TCP. TLS provides end-to-end data integrity and confidentiality.
+
+User interface
+==============
+
+Creating a TLS connection
+-------------------------
+
+First create a new TCP socket and set the TLS ULP.
+
+ sock = socket(AF_INET, SOCK_STREAM, 0);
+ setsockopt(sock, SOL_TCP, TCP_ULP, "tls", sizeof("tls"));
+
+Setting the TLS ULP allows us to set/get TLS socket options. Currently
+only the symmetric encryption is handled in the kernel. After the TLS
+handshake is complete, we have all the parameters required to move the
+data-path to the kernel. There is a separate socket option for moving
+the transmit and the receive into the kernel.
+
+ /* From linux/tls.h */
+ struct tls_crypto_info {
+ unsigned short version;
+ unsigned short cipher_type;
+ };
+
+ struct tls12_crypto_info_aes_gcm_128 {
+ struct tls_crypto_info info;
+ unsigned char iv[TLS_CIPHER_AES_GCM_128_IV_SIZE];
+ unsigned char key[TLS_CIPHER_AES_GCM_128_KEY_SIZE];
+ unsigned char salt[TLS_CIPHER_AES_GCM_128_SALT_SIZE];
+ unsigned char rec_seq[TLS_CIPHER_AES_GCM_128_REC_SEQ_SIZE];
+ };
+
+
+ struct tls12_crypto_info_aes_gcm_128 crypto_info;
+
+ crypto_info.info.version = TLS_1_2_VERSION;
+ crypto_info.info.cipher_type = TLS_CIPHER_AES_GCM_128;
+ memcpy(crypto_info.iv, iv_write, TLS_CIPHER_AES_GCM_128_IV_SIZE);
+ memcpy(crypto_info.rec_seq, seq_number_write,
+ TLS_CIPHER_AES_GCM_128_REC_SEQ_SIZE);
+ memcpy(crypto_info.key, cipher_key_write, TLS_CIPHER_AES_GCM_128_KEY_SIZE);
+ memcpy(crypto_info.salt, implicit_iv_write, TLS_CIPHER_AES_GCM_128_SALT_SIZE);
+
+ setsockopt(sock, SOL_TLS, TLS_TX, &crypto_info, sizeof(crypto_info));
+
+Sending TLS application data
+----------------------------
+
+After setting the TLS_TX socket option all application data sent over this
+socket is encrypted using TLS and the parameters provided in the socket option.
+For example, we can send an encrypted hello world record as follows:
+
+ const char *msg = "hello world\n";
+ send(sock, msg, strlen(msg));
+
+send() data is directly encrypted from the userspace buffer provided
+to the encrypted kernel send buffer if possible.
+
+The sendfile system call will send the file's data over TLS records of maximum
+length (2^14).
+
+ file = open(filename, O_RDONLY);
+ fstat(file, &stat);
+ sendfile(sock, file, &offset, stat.st_size);
+
+TLS records are created and sent after each send() call, unless
+MSG_MORE is passed. MSG_MORE will delay creation of a record until
+MSG_MORE is not passed, or the maximum record size is reached.
+
+The kernel will need to allocate a buffer for the encrypted data.
+This buffer is allocated at the time send() is called, such that
+either the entire send() call will return -ENOMEM (or block waiting
+for memory), or the encryption will always succeed. If send() returns
+-ENOMEM and some data was left on the socket buffer from a previous
+call using MSG_MORE, the MSG_MORE data is left on the socket buffer.
+
+Send TLS control messages
+-------------------------
+
+Other than application data, TLS has control messages such as alert
+messages (record type 21) and handshake messages (record type 22), etc.
+These messages can be sent over the socket by providing the TLS record type
+via a CMSG. For example the following function sends @data of @length bytes
+using a record of type @record_type.
+
+/* send TLS control message using record_type */
+ static int klts_send_ctrl_message(int sock, unsigned char record_type,
+ void *data, size_t length)
+ {
+ struct msghdr msg = {0};
+ int cmsg_len = sizeof(record_type);
+ struct cmsghdr *cmsg;
+ char buf[CMSG_SPACE(cmsg_len)];
+ struct iovec msg_iov; /* Vector of data to send/receive into. */
+
+ msg.msg_control = buf;
+ msg.msg_controllen = sizeof(buf);
+ cmsg = CMSG_FIRSTHDR(&msg);
+ cmsg->cmsg_level = SOL_TLS;
+ cmsg->cmsg_type = TLS_SET_RECORD_TYPE;
+ cmsg->cmsg_len = CMSG_LEN(cmsg_len);
+ *CMSG_DATA(cmsg) = record_type;
+ msg.msg_controllen = cmsg->cmsg_len;
+
+ msg_iov.iov_base = data;
+ msg_iov.iov_len = length;
+ msg.msg_iov = &msg_iov;
+ msg.msg_iovlen = 1;
+
+ return sendmsg(sock, &msg, 0);
+ }
+
+Control message data should be provided unencrypted, and will be
+encrypted by the kernel.
+
+Integrating in to userspace TLS library
+---------------------------------------
+
+At a high level, the kernel TLS ULP is a replacement for the record
+layer of a userspace TLS library.
+
+A patchset to OpenSSL to use ktls as the record layer is here:
+
+https://github.com/Mellanox/tls-openssl
+
+An example of calling send directly after a handshake using
+gnutls. Since it doesn't implement a full record layer, control
+messages are not supported:
+
+https://github.com/Mellanox/tls-af_ktls_tool
diff --git a/MAINTAINERS b/MAINTAINERS
index 09b5ab6..71a7455 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -155,7 +155,7 @@
F: drivers/scsi/53c700*
6LOWPAN GENERIC (BTLE/IEEE 802.15.4)
-M: Alexander Aring <aar@pengutronix.de>
+M: Alexander Aring <alex.aring@gmail.com>
M: Jukka Rissanen <jukka.rissanen@linux.intel.com>
L: linux-bluetooth@vger.kernel.org
L: linux-wpan@vger.kernel.org
@@ -6426,7 +6426,7 @@
F: drivers/ide/ide-cd*
IEEE 802.15.4 SUBSYSTEM
-M: Alexander Aring <aar@pengutronix.de>
+M: Alexander Aring <alex.aring@gmail.com>
M: Stefan Schmidt <stefan@osg.samsung.com>
L: linux-wpan@vger.kernel.org
W: http://wpan.cakelab.org/
@@ -6737,6 +6737,7 @@
F: Documentation/networking/i40evf.txt
F: drivers/net/ethernet/intel/
F: drivers/net/ethernet/intel/*/
+F: include/linux/avf/virtchnl.h
INTEL RDMA RNIC DRIVER
M: Faisal Latif <faisal.latif@intel.com>
@@ -7977,6 +7978,12 @@
F: drivers/net/ethernet/marvell/mv643xx_eth.*
F: include/linux/mv643xx.h
+MARVELL MV88X3310 PHY DRIVER
+M: Russell King <rmk@armlinux.org.uk>
+L: netdev@vger.kernel.org
+S: Maintained
+F: drivers/net/phy/marvell10g.c
+
MARVELL MVNETA ETHERNET DRIVER
M: Thomas Petazzoni <thomas.petazzoni@free-electrons.com>
L: netdev@vger.kernel.org
@@ -8310,6 +8317,16 @@
Q: http://patchwork.ozlabs.org/project/netdev/list/
F: drivers/net/ethernet/mellanox/mlx5/core/en_*
+MELLANOX ETHERNET INNOVA DRIVER
+M: Ilan Tayari <ilant@mellanox.com>
+R: Boris Pismenny <borisp@mellanox.com>
+L: netdev@vger.kernel.org
+S: Supported
+W: http://www.mellanox.com
+Q: http://patchwork.ozlabs.org/project/netdev/list/
+F: drivers/net/ethernet/mellanox/mlx5/core/fpga/*
+F: include/linux/mlx5/mlx5_ifc_fpga.h
+
MELLANOX ETHERNET SWITCH DRIVERS
M: Jiri Pirko <jiri@mellanox.com>
M: Ido Schimmel <idosch@mellanox.com>
@@ -8319,6 +8336,14 @@
Q: http://patchwork.ozlabs.org/project/netdev/list/
F: drivers/net/ethernet/mellanox/mlxsw/
+MELLANOX FIRMWARE FLASH LIBRARY (mlxfw)
+M: Yotam Gigi <yotamg@mellanox.com>
+L: netdev@vger.kernel.org
+S: Supported
+W: http://www.mellanox.com
+Q: http://patchwork.ozlabs.org/project/netdev/list/
+F: drivers/net/ethernet/mellanox/mlxfw/
+
MELLANOX MLXCPLD I2C AND MUX DRIVER
M: Vadim Pasternak <vadimp@mellanox.com>
M: Michael Shych <michaelsh@mellanox.com>
@@ -8460,6 +8485,16 @@
F: drivers/media/platform/atmel/atmel-isc-regs.h
F: devicetree/bindings/media/atmel-isc.txt
+MICROCHIP KSZ SERIES ETHERNET SWITCH DRIVER
+M: Woojung Huh <Woojung.Huh@microchip.com>
+M: Microchip Linux Driver Support <UNGLinuxDriver@microchip.com>
+L: netdev@vger.kernel.org
+S: Maintained
+F: net/dsa/tag_ksz.c
+F: drivers/net/dsa/microchip/*
+F: include/linux/platform_data/microchip-ksz.h
+F: Documentation/devicetree/bindings/net/dsa/ksz.txt
+
MICROCHIP USB251XB DRIVER
M: Richard Leitner <richard.leitner@skidata.com>
L: linux-usb@vger.kernel.org
@@ -8943,6 +8978,16 @@
F: include/net/ip*
F: arch/x86/net/*
+NETWORKING [TLS]
+M: Ilya Lesokhin <ilyal@mellanox.com>
+M: Aviad Yehezkel <aviadye@mellanox.com>
+M: Dave Watson <davejwatson@fb.com>
+L: netdev@vger.kernel.org
+S: Maintained
+F: net/tls/*
+F: include/uapi/linux/tls.h
+F: include/net/tls.h
+
NETWORKING [IPSEC]
M: Steffen Klassert <steffen.klassert@secunet.com>
M: Herbert Xu <herbert@gondor.apana.org.au>
@@ -10598,6 +10643,14 @@
S: Maintained
F: drivers/firmware/qemu_fw_cfg.c
+QUANTENNA QTNFMAC WIRELESS DRIVER
+M: Igor Mitsyanko <imitsyanko@quantenna.com>
+M: Avinash Patil <avinashp@quantenna.com>
+M: Sergey Matyukevich <smatyukevich@quantenna.com>
+L: linux-wireless@vger.kernel.org
+S: Maintained
+F: drivers/net/wireless/quantenna
+
RADOS BLOCK DEVICE (RBD)
M: Ilya Dryomov <idryomov@gmail.com>
M: Sage Weil <sage@redhat.com>
diff --git a/arch/alpha/include/uapi/asm/socket.h b/arch/alpha/include/uapi/asm/socket.h
index 148d7a3..7b285dd 100644
--- a/arch/alpha/include/uapi/asm/socket.h
+++ b/arch/alpha/include/uapi/asm/socket.h
@@ -105,4 +105,8 @@
#define SO_COOKIE 57
+#define SCM_TIMESTAMPING_PKTINFO 58
+
+#define SO_PEERGROUPS 59
+
#endif /* _UAPI_ASM_SOCKET_H */
diff --git a/arch/arm/boot/dts/sun8i-h2-plus-orangepi-zero.dts b/arch/arm/boot/dts/sun8i-h2-plus-orangepi-zero.dts
index 9e8b082..dd3525a 100644
--- a/arch/arm/boot/dts/sun8i-h2-plus-orangepi-zero.dts
+++ b/arch/arm/boot/dts/sun8i-h2-plus-orangepi-zero.dts
@@ -57,6 +57,7 @@
aliases {
serial0 = &uart0;
/* ethernet0 is the H3 emac, defined in sun8i-h3.dtsi */
+ ethernet0 = &emac;
ethernet1 = &xr819;
};
@@ -103,6 +104,13 @@
status = "okay";
};
+&emac {
+ phy-handle = <&int_mii_phy>;
+ phy-mode = "mii";
+ allwinner,leds-active-low;
+ status = "okay";
+};
+
&mmc0 {
pinctrl-names = "default";
pinctrl-0 = <&mmc0_pins_a>;
diff --git a/arch/arm/boot/dts/sun8i-h3-nanopi-neo.dts b/arch/arm/boot/dts/sun8i-h3-nanopi-neo.dts
index 8d2cc6e..78f6c24 100644
--- a/arch/arm/boot/dts/sun8i-h3-nanopi-neo.dts
+++ b/arch/arm/boot/dts/sun8i-h3-nanopi-neo.dts
@@ -46,3 +46,10 @@
model = "FriendlyARM NanoPi NEO";
compatible = "friendlyarm,nanopi-neo", "allwinner,sun8i-h3";
};
+
+&emac {
+ phy-handle = <&int_mii_phy>;
+ phy-mode = "mii";
+ allwinner,leds-active-low;
+ status = "okay";
+};
diff --git a/arch/arm/boot/dts/sun8i-h3-orangepi-2.dts b/arch/arm/boot/dts/sun8i-h3-orangepi-2.dts
index 5b6d145..cedd326 100644
--- a/arch/arm/boot/dts/sun8i-h3-orangepi-2.dts
+++ b/arch/arm/boot/dts/sun8i-h3-orangepi-2.dts
@@ -54,6 +54,7 @@
aliases {
serial0 = &uart0;
/* ethernet0 is the H3 emac, defined in sun8i-h3.dtsi */
+ ethernet0 = &emac;
ethernet1 = &rtl8189;
};
@@ -108,6 +109,13 @@
status = "okay";
};
+&emac {
+ phy-handle = <&int_mii_phy>;
+ phy-mode = "mii";
+ allwinner,leds-active-low;
+ status = "okay";
+};
+
&ir {
pinctrl-names = "default";
pinctrl-0 = <&ir_pins_a>;
diff --git a/arch/arm/boot/dts/sun8i-h3-orangepi-one.dts b/arch/arm/boot/dts/sun8i-h3-orangepi-one.dts
index 5fea430..6880268 100644
--- a/arch/arm/boot/dts/sun8i-h3-orangepi-one.dts
+++ b/arch/arm/boot/dts/sun8i-h3-orangepi-one.dts
@@ -52,6 +52,7 @@
compatible = "xunlong,orangepi-one", "allwinner,sun8i-h3";
aliases {
+ ethernet0 = &emac;
serial0 = &uart0;
};
@@ -97,6 +98,13 @@
status = "okay";
};
+&emac {
+ phy-handle = <&int_mii_phy>;
+ phy-mode = "mii";
+ allwinner,leds-active-low;
+ status = "okay";
+};
+
&mmc0 {
pinctrl-names = "default";
pinctrl-0 = <&mmc0_pins_a>, <&mmc0_cd_pin>;
diff --git a/arch/arm/boot/dts/sun8i-h3-orangepi-pc-plus.dts b/arch/arm/boot/dts/sun8i-h3-orangepi-pc-plus.dts
index 8b93f5c..a10281b 100644
--- a/arch/arm/boot/dts/sun8i-h3-orangepi-pc-plus.dts
+++ b/arch/arm/boot/dts/sun8i-h3-orangepi-pc-plus.dts
@@ -53,6 +53,11 @@
};
};
+&emac {
+ /* LEDs changed to active high on the plus */
+ /delete-property/ allwinner,leds-active-low;
+};
+
&mmc1 {
pinctrl-names = "default";
pinctrl-0 = <&mmc1_pins_a>;
diff --git a/arch/arm/boot/dts/sun8i-h3-orangepi-pc.dts b/arch/arm/boot/dts/sun8i-h3-orangepi-pc.dts
index f148111..52e6575 100644
--- a/arch/arm/boot/dts/sun8i-h3-orangepi-pc.dts
+++ b/arch/arm/boot/dts/sun8i-h3-orangepi-pc.dts
@@ -52,6 +52,7 @@
compatible = "xunlong,orangepi-pc", "allwinner,sun8i-h3";
aliases {
+ ethernet0 = &emac;
serial0 = &uart0;
};
@@ -109,6 +110,13 @@
status = "okay";
};
+&emac {
+ phy-handle = <&int_mii_phy>;
+ phy-mode = "mii";
+ allwinner,leds-active-low;
+ status = "okay";
+};
+
&ir {
pinctrl-names = "default";
pinctrl-0 = <&ir_pins_a>;
diff --git a/arch/arm/boot/dts/sunxi-h3-h5.dtsi b/arch/arm/boot/dts/sunxi-h3-h5.dtsi
index d4f600d..a6d4fda 100644
--- a/arch/arm/boot/dts/sunxi-h3-h5.dtsi
+++ b/arch/arm/boot/dts/sunxi-h3-h5.dtsi
@@ -83,6 +83,12 @@
#size-cells = <1>;
ranges;
+ syscon: syscon@1c00000 {
+ compatible = "allwinner,sun8i-h3-system-controller",
+ "syscon";
+ reg = <0x01c00000 0x1000>;
+ };
+
dma: dma-controller@01c02000 {
compatible = "allwinner,sun8i-h3-dma";
reg = <0x01c02000 0x1000>;
@@ -279,6 +285,14 @@
interrupt-controller;
#interrupt-cells = <3>;
+ emac_rgmii_pins: emac0 {
+ pins = "PD0", "PD1", "PD2", "PD3", "PD4",
+ "PD5", "PD7", "PD8", "PD9", "PD10",
+ "PD12", "PD13", "PD15", "PD16", "PD17";
+ function = "emac";
+ drive-strength = <40>;
+ };
+
i2c0_pins: i2c0 {
pins = "PA11", "PA12";
function = "i2c0";
@@ -375,6 +389,32 @@
clocks = <&osc24M>;
};
+ emac: ethernet@1c30000 {
+ compatible = "allwinner,sun8i-h3-emac";
+ syscon = <&syscon>;
+ reg = <0x01c30000 0x104>;
+ interrupts = <GIC_SPI 82 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "macirq";
+ resets = <&ccu RST_BUS_EMAC>;
+ reset-names = "stmmaceth";
+ clocks = <&ccu CLK_BUS_EMAC>;
+ clock-names = "stmmaceth";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "disabled";
+
+ mdio: mdio {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ int_mii_phy: ethernet-phy@1 {
+ compatible = "ethernet-phy-ieee802.3-c22";
+ reg = <1>;
+ clocks = <&ccu CLK_BUS_EPHY>;
+ resets = <&ccu RST_BUS_EPHY>;
+ };
+ };
+ };
+
spi0: spi@01c68000 {
compatible = "allwinner,sun8i-h3-spi";
reg = <0x01c68000 0x1000>;
diff --git a/arch/arm/configs/multi_v7_defconfig b/arch/arm/configs/multi_v7_defconfig
index 2685e03..6da6af8 100644
--- a/arch/arm/configs/multi_v7_defconfig
+++ b/arch/arm/configs/multi_v7_defconfig
@@ -257,6 +257,7 @@
CONFIG_STMMAC_ETH=y
CONFIG_STMMAC_PLATFORM=y
CONFIG_DWMAC_DWC_QOS_ETH=y
+CONFIG_DWMAC_SUN8I=y
CONFIG_TI_CPSW=y
CONFIG_XILINX_EMACLITE=y
CONFIG_AT803X_PHY=y
diff --git a/arch/arm/configs/sunxi_defconfig b/arch/arm/configs/sunxi_defconfig
index 5cd5dd70..504e022 100644
--- a/arch/arm/configs/sunxi_defconfig
+++ b/arch/arm/configs/sunxi_defconfig
@@ -40,6 +40,7 @@
CONFIG_AHCI_SUNXI=y
CONFIG_NETDEVICES=y
CONFIG_SUN4I_EMAC=y
+CONFIG_DWMAC_SUN8I=y
# CONFIG_NET_VENDOR_ARC is not set
# CONFIG_NET_CADENCE is not set
# CONFIG_NET_VENDOR_BROADCOM is not set
diff --git a/arch/arm64/boot/dts/allwinner/sun50i-a64-bananapi-m64.dts b/arch/arm64/boot/dts/allwinner/sun50i-a64-bananapi-m64.dts
index 6872135..0d1f026 100644
--- a/arch/arm64/boot/dts/allwinner/sun50i-a64-bananapi-m64.dts
+++ b/arch/arm64/boot/dts/allwinner/sun50i-a64-bananapi-m64.dts
@@ -67,6 +67,14 @@
};
};
+&emac {
+ pinctrl-names = "default";
+ pinctrl-0 = <&rgmii_pins>;
+ phy-mode = "rgmii";
+ phy-handle = <&ext_rgmii_phy>;
+ status = "okay";
+};
+
&i2c1 {
pinctrl-names = "default";
pinctrl-0 = <&i2c1_pins>;
@@ -77,6 +85,13 @@
bias-pull-up;
};
+&mdio {
+ ext_rgmii_phy: ethernet-phy@1 {
+ compatible = "ethernet-phy-ieee802.3-c22";
+ reg = <1>;
+ };
+};
+
&mmc0 {
pinctrl-names = "default";
pinctrl-0 = <&mmc0_pins>;
diff --git a/arch/arm64/boot/dts/allwinner/sun50i-a64-pine64-plus.dts b/arch/arm64/boot/dts/allwinner/sun50i-a64-pine64-plus.dts
index 790d14d..24f1aac 100644
--- a/arch/arm64/boot/dts/allwinner/sun50i-a64-pine64-plus.dts
+++ b/arch/arm64/boot/dts/allwinner/sun50i-a64-pine64-plus.dts
@@ -46,5 +46,20 @@
model = "Pine64+";
compatible = "pine64,pine64-plus", "allwinner,sun50i-a64";
- /* TODO: Camera, Ethernet PHY, touchscreen, etc. */
+ /* TODO: Camera, touchscreen, etc. */
+};
+
+&emac {
+ pinctrl-names = "default";
+ pinctrl-0 = <&rgmii_pins>;
+ phy-mode = "rgmii";
+ phy-handle = <&ext_rgmii_phy>;
+ status = "okay";
+};
+
+&mdio {
+ ext_rgmii_phy: ethernet-phy@1 {
+ compatible = "ethernet-phy-ieee802.3-c22";
+ reg = <1>;
+ };
};
diff --git a/arch/arm64/boot/dts/allwinner/sun50i-a64-pine64.dts b/arch/arm64/boot/dts/allwinner/sun50i-a64-pine64.dts
index c680ed3..3b491c0 100644
--- a/arch/arm64/boot/dts/allwinner/sun50i-a64-pine64.dts
+++ b/arch/arm64/boot/dts/allwinner/sun50i-a64-pine64.dts
@@ -70,6 +70,15 @@
status = "okay";
};
+&emac {
+ pinctrl-names = "default";
+ pinctrl-0 = <&rmii_pins>;
+ phy-mode = "rmii";
+ phy-handle = <&ext_rmii_phy1>;
+ status = "okay";
+
+};
+
&i2c1 {
pinctrl-names = "default";
pinctrl-0 = <&i2c1_pins>;
@@ -80,6 +89,13 @@
bias-pull-up;
};
+&mdio {
+ ext_rmii_phy1: ethernet-phy@1 {
+ compatible = "ethernet-phy-ieee802.3-c22";
+ reg = <1>;
+ };
+};
+
&mmc0 {
pinctrl-names = "default";
pinctrl-0 = <&mmc0_pins>;
diff --git a/arch/arm64/boot/dts/allwinner/sun50i-a64.dtsi b/arch/arm64/boot/dts/allwinner/sun50i-a64.dtsi
index 166c9ef..769ced0 100644
--- a/arch/arm64/boot/dts/allwinner/sun50i-a64.dtsi
+++ b/arch/arm64/boot/dts/allwinner/sun50i-a64.dtsi
@@ -129,6 +129,12 @@
#size-cells = <1>;
ranges;
+ syscon: syscon@1c00000 {
+ compatible = "allwinner,sun50i-a64-system-controller",
+ "syscon";
+ reg = <0x01c00000 0x1000>;
+ };
+
mmc0: mmc@1c0f000 {
compatible = "allwinner,sun50i-a64-mmc";
reg = <0x01c0f000 0x1000>;
@@ -281,6 +287,21 @@
bias-pull-up;
};
+ rmii_pins: rmii_pins {
+ pins = "PD10", "PD11", "PD13", "PD14", "PD17",
+ "PD18", "PD19", "PD20", "PD22", "PD23";
+ function = "emac";
+ drive-strength = <40>;
+ };
+
+ rgmii_pins: rgmii_pins {
+ pins = "PD8", "PD9", "PD10", "PD11", "PD12",
+ "PD13", "PD15", "PD16", "PD17", "PD18",
+ "PD19", "PD20", "PD21", "PD22", "PD23";
+ function = "emac";
+ drive-strength = <40>;
+ };
+
uart0_pins_a: uart0@0 {
pins = "PB8", "PB9";
function = "uart0";
@@ -385,6 +406,26 @@
#size-cells = <0>;
};
+ emac: ethernet@1c30000 {
+ compatible = "allwinner,sun50i-a64-emac";
+ syscon = <&syscon>;
+ reg = <0x01c30000 0x100>;
+ interrupts = <GIC_SPI 82 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "macirq";
+ resets = <&ccu RST_BUS_EMAC>;
+ reset-names = "stmmaceth";
+ clocks = <&ccu CLK_BUS_EMAC>;
+ clock-names = "stmmaceth";
+ status = "disabled";
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ mdio: mdio {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ };
+ };
+
gic: interrupt-controller@1c81000 {
compatible = "arm,gic-400";
reg = <0x01c81000 0x1000>,
diff --git a/arch/arm64/boot/dts/hisilicon/hi6220-hikey.dts b/arch/arm64/boot/dts/hisilicon/hi6220-hikey.dts
index 49f6a62..5cdfe73 100644
--- a/arch/arm64/boot/dts/hisilicon/hi6220-hikey.dts
+++ b/arch/arm64/boot/dts/hisilicon/hi6220-hikey.dts
@@ -141,6 +141,8 @@
bluetooth {
compatible = "ti,wl1835-st";
enable-gpios = <&gpio1 7 GPIO_ACTIVE_HIGH>;
+ clocks = <&pmic>;
+ clock-names = "ext_clock";
};
};
diff --git a/arch/arm64/configs/defconfig b/arch/arm64/configs/defconfig
index 97c123e..d789858 100644
--- a/arch/arm64/configs/defconfig
+++ b/arch/arm64/configs/defconfig
@@ -191,6 +191,7 @@
CONFIG_SMC91X=y
CONFIG_SMSC911X=y
CONFIG_STMMAC_ETH=m
+CONFIG_DWMAC_SUN8I=m
CONFIG_MDIO_BUS_MUX_MMIOREG=y
CONFIG_MESON_GXL_PHY=m
CONFIG_MICREL_PHY=y
diff --git a/arch/arm64/net/bpf_jit_comp.c b/arch/arm64/net/bpf_jit_comp.c
index c870d6f..2f0505b 100644
--- a/arch/arm64/net/bpf_jit_comp.c
+++ b/arch/arm64/net/bpf_jit_comp.c
@@ -71,6 +71,7 @@
int epilogue_offset;
int *offset;
u32 *image;
+ u32 stack_size;
};
static inline void emit(const u32 insn, struct jit_ctx *ctx)
@@ -147,16 +148,11 @@
/* Stack must be multiples of 16B */
#define STACK_ALIGN(sz) (((sz) + 15) & ~15)
-#define _STACK_SIZE \
- (MAX_BPF_STACK \
- + 4 /* extra for skb_copy_bits buffer */)
-
-#define STACK_SIZE STACK_ALIGN(_STACK_SIZE)
-
#define PROLOGUE_OFFSET 8
static int build_prologue(struct jit_ctx *ctx)
{
+ const struct bpf_prog *prog = ctx->prog;
const u8 r6 = bpf2a64[BPF_REG_6];
const u8 r7 = bpf2a64[BPF_REG_7];
const u8 r8 = bpf2a64[BPF_REG_8];
@@ -178,9 +174,9 @@
* | |
* | ... | BPF prog stack
* | |
- * +-----+ <= (BPF_FP - MAX_BPF_STACK)
+ * +-----+ <= (BPF_FP - prog->aux->stack_depth)
* |RSVD | JIT scratchpad
- * current A64_SP => +-----+ <= (BPF_FP - STACK_SIZE)
+ * current A64_SP => +-----+ <= (BPF_FP - ctx->stack_size)
* | |
* | ... | Function call stack
* | |
@@ -204,8 +200,12 @@
/* Initialize tail_call_cnt */
emit(A64_MOVZ(1, tcc, 0, 0), ctx);
+ /* 4 byte extra for skb_copy_bits buffer */
+ ctx->stack_size = prog->aux->stack_depth + 4;
+ ctx->stack_size = STACK_ALIGN(ctx->stack_size);
+
/* Set up function call stack */
- emit(A64_SUB_I(1, A64_SP, A64_SP, STACK_SIZE), ctx);
+ emit(A64_SUB_I(1, A64_SP, A64_SP, ctx->stack_size), ctx);
cur_offset = ctx->idx - idx0;
if (cur_offset != PROLOGUE_OFFSET) {
@@ -290,7 +290,7 @@
const u8 fp = bpf2a64[BPF_REG_FP];
/* We're done with BPF stack */
- emit(A64_ADD_I(1, A64_SP, A64_SP, STACK_SIZE), ctx);
+ emit(A64_ADD_I(1, A64_SP, A64_SP, ctx->stack_size), ctx);
/* Restore fs (x25) and x26 */
emit(A64_POP(fp, A64_R(26), A64_SP), ctx);
@@ -589,7 +589,7 @@
break;
}
/* tail call */
- case BPF_JMP | BPF_CALL | BPF_X:
+ case BPF_JMP | BPF_TAIL_CALL:
if (emit_bpf_tail_call(ctx))
return -EFAULT;
break;
@@ -735,7 +735,7 @@
return -EINVAL;
}
emit_a64_mov_i64(r3, size, ctx);
- emit(A64_SUB_I(1, r4, fp, STACK_SIZE), ctx);
+ emit(A64_SUB_I(1, r4, fp, ctx->stack_size), ctx);
emit_a64_mov_i64(r5, (unsigned long)bpf_load_pointer, ctx);
emit(A64_BLR(r5), ctx);
emit(A64_MOV(1, r0, A64_R(0)), ctx);
@@ -903,6 +903,7 @@
bpf_jit_binary_lock_ro(header);
prog->bpf_func = (void *)ctx.image;
prog->jited = 1;
+ prog->jited_len = image_size;
out_off:
kfree(ctx.offset);
diff --git a/arch/frv/include/uapi/asm/socket.h b/arch/frv/include/uapi/asm/socket.h
index 1ccf456..f1e3b20 100644
--- a/arch/frv/include/uapi/asm/socket.h
+++ b/arch/frv/include/uapi/asm/socket.h
@@ -98,5 +98,9 @@
#define SO_COOKIE 57
+#define SCM_TIMESTAMPING_PKTINFO 58
+
+#define SO_PEERGROUPS 59
+
#endif /* _ASM_SOCKET_H */
diff --git a/arch/ia64/include/uapi/asm/socket.h b/arch/ia64/include/uapi/asm/socket.h
index 2c3f4b4..5dd5c5d 100644
--- a/arch/ia64/include/uapi/asm/socket.h
+++ b/arch/ia64/include/uapi/asm/socket.h
@@ -107,4 +107,8 @@
#define SO_COOKIE 57
+#define SCM_TIMESTAMPING_PKTINFO 58
+
+#define SO_PEERGROUPS 59
+
#endif /* _ASM_IA64_SOCKET_H */
diff --git a/arch/m32r/include/uapi/asm/socket.h b/arch/m32r/include/uapi/asm/socket.h
index ae6548d..f8f7b47 100644
--- a/arch/m32r/include/uapi/asm/socket.h
+++ b/arch/m32r/include/uapi/asm/socket.h
@@ -98,4 +98,8 @@
#define SO_COOKIE 57
+#define SCM_TIMESTAMPING_PKTINFO 58
+
+#define SO_PEERGROUPS 59
+
#endif /* _ASM_M32R_SOCKET_H */
diff --git a/arch/mips/include/uapi/asm/socket.h b/arch/mips/include/uapi/asm/socket.h
index 3418ec9..882823b 100644
--- a/arch/mips/include/uapi/asm/socket.h
+++ b/arch/mips/include/uapi/asm/socket.h
@@ -116,4 +116,8 @@
#define SO_COOKIE 57
+#define SCM_TIMESTAMPING_PKTINFO 58
+
+#define SO_PEERGROUPS 59
+
#endif /* _UAPI_ASM_SOCKET_H */
diff --git a/arch/mn10300/include/uapi/asm/socket.h b/arch/mn10300/include/uapi/asm/socket.h
index 4526e92..c710db3 100644
--- a/arch/mn10300/include/uapi/asm/socket.h
+++ b/arch/mn10300/include/uapi/asm/socket.h
@@ -98,4 +98,8 @@
#define SO_COOKIE 57
+#define SCM_TIMESTAMPING_PKTINFO 58
+
+#define SO_PEERGROUPS 59
+
#endif /* _ASM_SOCKET_H */
diff --git a/arch/parisc/include/uapi/asm/socket.h b/arch/parisc/include/uapi/asm/socket.h
index 5147018..a0d4dc9 100644
--- a/arch/parisc/include/uapi/asm/socket.h
+++ b/arch/parisc/include/uapi/asm/socket.h
@@ -97,4 +97,8 @@
#define SO_COOKIE 0x4032
+#define SCM_TIMESTAMPING_PKTINFO 0x4033
+
+#define SO_PEERGROUPS 0x4034
+
#endif /* _UAPI_ASM_SOCKET_H */
diff --git a/arch/powerpc/boot/dts/fsl/kmcent2.dts b/arch/powerpc/boot/dts/fsl/kmcent2.dts
index 47afa43..5922c1e 100644
--- a/arch/powerpc/boot/dts/fsl/kmcent2.dts
+++ b/arch/powerpc/boot/dts/fsl/kmcent2.dts
@@ -293,9 +293,7 @@
compatible = "fsl,ucc-hdlc";
rx-clock-name = "clk9";
tx-clock-name = "clk9";
- fsl,tx-timeslot-mask = <0xfffffffe>;
- fsl,rx-timeslot-mask = <0xfffffffe>;
- fsl,siram-entry-id = <0>;
+ fsl,hdlc-bus;
};
};
};
diff --git a/arch/powerpc/include/asm/hvcall.h b/arch/powerpc/include/asm/hvcall.h
index d73755f..57d38b5 100644
--- a/arch/powerpc/include/asm/hvcall.h
+++ b/arch/powerpc/include/asm/hvcall.h
@@ -295,6 +295,8 @@
#define H_DISABLE_ALL_VIO_INTS 0x0A
#define H_DISABLE_VIO_INTERRUPT 0x0B
#define H_ENABLE_VIO_INTERRUPT 0x0C
+#define H_GET_SESSION_TOKEN 0x19
+#define H_SESSION_ERR_DETECTED 0x1A
/* Platform specific hcalls, used by KVM */
diff --git a/arch/powerpc/include/uapi/asm/socket.h b/arch/powerpc/include/uapi/asm/socket.h
index 58e2ec0..3c590c7 100644
--- a/arch/powerpc/include/uapi/asm/socket.h
+++ b/arch/powerpc/include/uapi/asm/socket.h
@@ -8,28 +8,6 @@
* 2 of the License, or (at your option) any later version.
*/
-#include <asm/sockios.h>
-
-/* For setsockopt(2) */
-#define SOL_SOCKET 1
-
-#define SO_DEBUG 1
-#define SO_REUSEADDR 2
-#define SO_TYPE 3
-#define SO_ERROR 4
-#define SO_DONTROUTE 5
-#define SO_BROADCAST 6
-#define SO_SNDBUF 7
-#define SO_RCVBUF 8
-#define SO_SNDBUFFORCE 32
-#define SO_RCVBUFFORCE 33
-#define SO_KEEPALIVE 9
-#define SO_OOBINLINE 10
-#define SO_NO_CHECK 11
-#define SO_PRIORITY 12
-#define SO_LINGER 13
-#define SO_BSDCOMPAT 14
-#define SO_REUSEPORT 15
#define SO_RCVLOWAT 16
#define SO_SNDLOWAT 17
#define SO_RCVTIMEO 18
@@ -37,72 +15,6 @@
#define SO_PASSCRED 20
#define SO_PEERCRED 21
-/* Security levels - as per NRL IPv6 - don't actually do anything */
-#define SO_SECURITY_AUTHENTICATION 22
-#define SO_SECURITY_ENCRYPTION_TRANSPORT 23
-#define SO_SECURITY_ENCRYPTION_NETWORK 24
-
-#define SO_BINDTODEVICE 25
-
-/* Socket filtering */
-#define SO_ATTACH_FILTER 26
-#define SO_DETACH_FILTER 27
-#define SO_GET_FILTER SO_ATTACH_FILTER
-
-#define SO_PEERNAME 28
-#define SO_TIMESTAMP 29
-#define SCM_TIMESTAMP SO_TIMESTAMP
-
-#define SO_ACCEPTCONN 30
-
-#define SO_PEERSEC 31
-#define SO_PASSSEC 34
-#define SO_TIMESTAMPNS 35
-#define SCM_TIMESTAMPNS SO_TIMESTAMPNS
-
-#define SO_MARK 36
-
-#define SO_TIMESTAMPING 37
-#define SCM_TIMESTAMPING SO_TIMESTAMPING
-
-#define SO_PROTOCOL 38
-#define SO_DOMAIN 39
-
-#define SO_RXQ_OVFL 40
-
-#define SO_WIFI_STATUS 41
-#define SCM_WIFI_STATUS SO_WIFI_STATUS
-#define SO_PEEK_OFF 42
-
-/* Instruct lower device to use last 4-bytes of skb data as FCS */
-#define SO_NOFCS 43
-
-#define SO_LOCK_FILTER 44
-
-#define SO_SELECT_ERR_QUEUE 45
-
-#define SO_BUSY_POLL 46
-
-#define SO_MAX_PACING_RATE 47
-
-#define SO_BPF_EXTENSIONS 48
-
-#define SO_INCOMING_CPU 49
-
-#define SO_ATTACH_BPF 50
-#define SO_DETACH_BPF SO_DETACH_FILTER
-
-#define SO_ATTACH_REUSEPORT_CBPF 51
-#define SO_ATTACH_REUSEPORT_EBPF 52
-
-#define SO_CNX_ADVICE 53
-
-#define SCM_TIMESTAMPING_OPT_STATS 54
-
-#define SO_MEMINFO 55
-
-#define SO_INCOMING_NAPI_ID 56
-
-#define SO_COOKIE 57
+#include <asm-generic/socket.h>
#endif /* _ASM_POWERPC_SOCKET_H */
diff --git a/arch/powerpc/net/bpf_jit_comp64.c b/arch/powerpc/net/bpf_jit_comp64.c
index aee2bb8..861c5af 100644
--- a/arch/powerpc/net/bpf_jit_comp64.c
+++ b/arch/powerpc/net/bpf_jit_comp64.c
@@ -938,7 +938,7 @@
/*
* Tail call
*/
- case BPF_JMP | BPF_CALL | BPF_X:
+ case BPF_JMP | BPF_TAIL_CALL:
ctx->seen |= SEEN_TAILCALL;
bpf_jit_emit_tail_call(image, ctx, addrs[i + 1]);
break;
@@ -1052,6 +1052,7 @@
fp->bpf_func = (void *)image;
fp->jited = 1;
+ fp->jited_len = alloclen;
bpf_flush_icache(bpf_hdr, (u8 *)bpf_hdr + (bpf_hdr->pages * PAGE_SIZE));
diff --git a/arch/s390/include/asm/diag.h b/arch/s390/include/asm/diag.h
index 8acf482..88162bb 100644
--- a/arch/s390/include/asm/diag.h
+++ b/arch/s390/include/asm/diag.h
@@ -8,6 +8,7 @@
#ifndef _ASM_S390_DIAG_H
#define _ASM_S390_DIAG_H
+#include <linux/if_ether.h>
#include <linux/percpu.h>
enum diag_stat_enum {
@@ -24,6 +25,7 @@
DIAG_STAT_X224,
DIAG_STAT_X250,
DIAG_STAT_X258,
+ DIAG_STAT_X26C,
DIAG_STAT_X288,
DIAG_STAT_X2C4,
DIAG_STAT_X2FC,
@@ -225,6 +227,30 @@
struct diag204_x_phys_cpu cpus[];
} __packed;
+enum diag26c_sc {
+ DIAG26C_MAC_SERVICES = 0x00000030
+};
+
+enum diag26c_version {
+ DIAG26C_VERSION2 = 0x00000002 /* z/VM 5.4.0 */
+};
+
+#define DIAG26C_GET_MAC 0x0000
+struct diag26c_mac_req {
+ u32 resp_buf_len;
+ u32 resp_version;
+ u16 op_code;
+ u16 devno;
+ u8 res[4];
+};
+
+struct diag26c_mac_resp {
+ u32 version;
+ u8 mac[ETH_ALEN];
+ u8 res[2];
+} __aligned(8);
+
int diag204(unsigned long subcode, unsigned long size, void *addr);
int diag224(void *ptr);
+int diag26c(void *req, void *resp, enum diag26c_sc subcode);
#endif /* _ASM_S390_DIAG_H */
diff --git a/arch/s390/include/uapi/asm/socket.h b/arch/s390/include/uapi/asm/socket.h
index e8e5ecf..52a63f4 100644
--- a/arch/s390/include/uapi/asm/socket.h
+++ b/arch/s390/include/uapi/asm/socket.h
@@ -104,4 +104,8 @@
#define SO_COOKIE 57
+#define SCM_TIMESTAMPING_PKTINFO 58
+
+#define SO_PEERGROUPS 59
+
#endif /* _ASM_SOCKET_H */
diff --git a/arch/s390/kernel/diag.c b/arch/s390/kernel/diag.c
index ac6abcd..3499145 100644
--- a/arch/s390/kernel/diag.c
+++ b/arch/s390/kernel/diag.c
@@ -38,6 +38,7 @@
[DIAG_STAT_X224] = { .code = 0x224, .name = "EBCDIC-Name Table" },
[DIAG_STAT_X250] = { .code = 0x250, .name = "Block I/O" },
[DIAG_STAT_X258] = { .code = 0x258, .name = "Page-Reference Services" },
+ [DIAG_STAT_X26C] = { .code = 0x26c, .name = "Certain System Information" },
[DIAG_STAT_X288] = { .code = 0x288, .name = "Time Bomb" },
[DIAG_STAT_X2C4] = { .code = 0x2c4, .name = "FTP Services" },
[DIAG_STAT_X2FC] = { .code = 0x2fc, .name = "Guest Performance Data" },
@@ -236,3 +237,31 @@
return rc;
}
EXPORT_SYMBOL(diag224);
+
+/*
+ * Diagnose 26C: Access Certain System Information
+ */
+static inline int __diag26c(void *req, void *resp, enum diag26c_sc subcode)
+{
+ register unsigned long _req asm("2") = (addr_t) req;
+ register unsigned long _resp asm("3") = (addr_t) resp;
+ register unsigned long _subcode asm("4") = subcode;
+ register unsigned long _rc asm("5") = -EOPNOTSUPP;
+
+ asm volatile(
+ " sam31\n"
+ " diag %[rx],%[ry],0x26c\n"
+ "0: sam64\n"
+ EX_TABLE(0b,0b)
+ : "+d" (_rc)
+ : [rx] "d" (_req), "d" (_resp), [ry] "d" (_subcode)
+ : "cc", "memory");
+ return _rc;
+}
+
+int diag26c(void *req, void *resp, enum diag26c_sc subcode)
+{
+ diag_stat_inc(DIAG_STAT_X26C);
+ return __diag26c(req, resp, subcode);
+}
+EXPORT_SYMBOL(diag26c);
diff --git a/arch/s390/net/bpf_jit_comp.c b/arch/s390/net/bpf_jit_comp.c
index 6e97a2e..01c6fbc 100644
--- a/arch/s390/net/bpf_jit_comp.c
+++ b/arch/s390/net/bpf_jit_comp.c
@@ -991,7 +991,7 @@
}
break;
}
- case BPF_JMP | BPF_CALL | BPF_X:
+ case BPF_JMP | BPF_TAIL_CALL:
/*
* Implicit input:
* B1: pointer to ctx
@@ -1329,6 +1329,7 @@
bpf_jit_binary_lock_ro(header);
fp->bpf_func = (void *) jit.prg_buf;
fp->jited = 1;
+ fp->jited_len = jit.size;
free_addrs:
kfree(jit.addrs);
out:
diff --git a/arch/sparc/include/uapi/asm/socket.h b/arch/sparc/include/uapi/asm/socket.h
index 3f4ad19..186fd81 100644
--- a/arch/sparc/include/uapi/asm/socket.h
+++ b/arch/sparc/include/uapi/asm/socket.h
@@ -94,6 +94,10 @@
#define SO_COOKIE 0x003b
+#define SCM_TIMESTAMPING_PKTINFO 0x003c
+
+#define SO_PEERGROUPS 0x003d
+
/* Security levels - as per NRL IPv6 - don't actually do anything */
#define SO_SECURITY_AUTHENTICATION 0x5001
#define SO_SECURITY_ENCRYPTION_TRANSPORT 0x5002
diff --git a/arch/sparc/net/bpf_jit_comp_64.c b/arch/sparc/net/bpf_jit_comp_64.c
index 21de774..8799ae9 100644
--- a/arch/sparc/net/bpf_jit_comp_64.c
+++ b/arch/sparc/net/bpf_jit_comp_64.c
@@ -802,8 +802,13 @@
{
s32 stack_needed = BASE_STACKFRAME;
- if (ctx->saw_frame_pointer || ctx->saw_tail_call)
- stack_needed += MAX_BPF_STACK;
+ if (ctx->saw_frame_pointer || ctx->saw_tail_call) {
+ struct bpf_prog *prog = ctx->prog;
+ u32 stack_depth;
+
+ stack_depth = prog->aux->stack_depth;
+ stack_needed += round_up(stack_depth, 16);
+ }
if (ctx->saw_tail_call)
stack_needed += 8;
@@ -1217,7 +1222,7 @@
}
/* tail call */
- case BPF_JMP | BPF_CALL |BPF_X:
+ case BPF_JMP | BPF_TAIL_CALL:
emit_tail_call(ctx);
break;
@@ -1555,6 +1560,7 @@
prog->bpf_func = (void *)ctx.image;
prog->jited = 1;
+ prog->jited_len = image_size;
out_off:
kfree(ctx.offset);
diff --git a/arch/x86/net/bpf_jit.S b/arch/x86/net/bpf_jit.S
index f2a7faf..b33093f 100644
--- a/arch/x86/net/bpf_jit.S
+++ b/arch/x86/net/bpf_jit.S
@@ -19,9 +19,6 @@
*/
#define SKBDATA %r10
#define SKF_MAX_NEG_OFF $(-0x200000) /* SKF_LL_OFF from filter.h */
-#define MAX_BPF_STACK (512 /* from filter.h */ + \
- 32 /* space for rbx,r13,r14,r15 */ + \
- 8 /* space for skb_copy_bits */)
#define FUNC(name) \
.globl name; \
@@ -66,7 +63,7 @@
/* rsi contains offset and can be scratched */
#define bpf_slow_path_common(LEN) \
- lea -MAX_BPF_STACK + 32(%rbp), %rdx;\
+ lea 32(%rbp), %rdx;\
FRAME_BEGIN; \
mov %rbx, %rdi; /* arg1 == skb */ \
push %r9; \
@@ -83,14 +80,14 @@
bpf_slow_path_word:
bpf_slow_path_common(4)
js bpf_error
- mov - MAX_BPF_STACK + 32(%rbp),%eax
+ mov 32(%rbp),%eax
bswap %eax
ret
bpf_slow_path_half:
bpf_slow_path_common(2)
js bpf_error
- mov - MAX_BPF_STACK + 32(%rbp),%ax
+ mov 32(%rbp),%ax
rol $8,%ax
movzwl %ax,%eax
ret
@@ -98,7 +95,7 @@
bpf_slow_path_byte:
bpf_slow_path_common(1)
js bpf_error
- movzbl - MAX_BPF_STACK + 32(%rbp),%eax
+ movzbl 32(%rbp),%eax
ret
#define sk_negative_common(SIZE) \
@@ -148,9 +145,10 @@
bpf_error:
# force a return 0 from jit handler
xor %eax,%eax
- mov - MAX_BPF_STACK(%rbp),%rbx
- mov - MAX_BPF_STACK + 8(%rbp),%r13
- mov - MAX_BPF_STACK + 16(%rbp),%r14
- mov - MAX_BPF_STACK + 24(%rbp),%r15
+ mov (%rbp),%rbx
+ mov 8(%rbp),%r13
+ mov 16(%rbp),%r14
+ mov 24(%rbp),%r15
+ add $40, %rbp
leaveq
ret
diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c
index f589393..e1324f2 100644
--- a/arch/x86/net/bpf_jit_comp.c
+++ b/arch/x86/net/bpf_jit_comp.c
@@ -197,17 +197,16 @@
#define BPF_MAX_INSN_SIZE 128
#define BPF_INSN_SAFETY 64
-#define STACKSIZE \
- (MAX_BPF_STACK + \
- 32 /* space for rbx, r13, r14, r15 */ + \
+#define AUX_STACK_SPACE \
+ (32 /* space for rbx, r13, r14, r15 */ + \
8 /* space for skb_copy_bits() buffer */)
-#define PROLOGUE_SIZE 48
+#define PROLOGUE_SIZE 37
/* emit x64 prologue code for BPF program and check it's size.
* bpf_tail_call helper will skip it while jumping into another program
*/
-static void emit_prologue(u8 **pprog)
+static void emit_prologue(u8 **pprog, u32 stack_depth)
{
u8 *prog = *pprog;
int cnt = 0;
@@ -215,13 +214,17 @@
EMIT1(0x55); /* push rbp */
EMIT3(0x48, 0x89, 0xE5); /* mov rbp,rsp */
- /* sub rsp, STACKSIZE */
- EMIT3_off32(0x48, 0x81, 0xEC, STACKSIZE);
+ /* sub rsp, rounded_stack_depth + AUX_STACK_SPACE */
+ EMIT3_off32(0x48, 0x81, 0xEC,
+ round_up(stack_depth, 8) + AUX_STACK_SPACE);
+
+ /* sub rbp, AUX_STACK_SPACE */
+ EMIT4(0x48, 0x83, 0xED, AUX_STACK_SPACE);
/* all classic BPF filters use R6(rbx) save it */
- /* mov qword ptr [rbp-X],rbx */
- EMIT3_off32(0x48, 0x89, 0x9D, -STACKSIZE);
+ /* mov qword ptr [rbp+0],rbx */
+ EMIT4(0x48, 0x89, 0x5D, 0);
/* bpf_convert_filter() maps classic BPF register X to R7 and uses R8
* as temporary, so all tcpdump filters need to spill/fill R7(r13) and
@@ -231,12 +234,12 @@
* than synthetic ones. Therefore not worth adding complexity.
*/
- /* mov qword ptr [rbp-X],r13 */
- EMIT3_off32(0x4C, 0x89, 0xAD, -STACKSIZE + 8);
- /* mov qword ptr [rbp-X],r14 */
- EMIT3_off32(0x4C, 0x89, 0xB5, -STACKSIZE + 16);
- /* mov qword ptr [rbp-X],r15 */
- EMIT3_off32(0x4C, 0x89, 0xBD, -STACKSIZE + 24);
+ /* mov qword ptr [rbp+8],r13 */
+ EMIT4(0x4C, 0x89, 0x6D, 8);
+ /* mov qword ptr [rbp+16],r14 */
+ EMIT4(0x4C, 0x89, 0x75, 16);
+ /* mov qword ptr [rbp+24],r15 */
+ EMIT4(0x4C, 0x89, 0x7D, 24);
/* Clear the tail call counter (tail_call_cnt): for eBPF tail calls
* we need to reset the counter to 0. It's done in two instructions,
@@ -246,8 +249,8 @@
/* xor eax, eax */
EMIT2(0x31, 0xc0);
- /* mov qword ptr [rbp-X], rax */
- EMIT3_off32(0x48, 0x89, 0x85, -STACKSIZE + 32);
+ /* mov qword ptr [rbp+32], rax */
+ EMIT4(0x48, 0x89, 0x45, 32);
BUILD_BUG_ON(cnt != PROLOGUE_SIZE);
*pprog = prog;
@@ -289,13 +292,13 @@
/* if (tail_call_cnt > MAX_TAIL_CALL_CNT)
* goto out;
*/
- EMIT2_off32(0x8B, 0x85, -STACKSIZE + 36); /* mov eax, dword ptr [rbp - 516] */
+ EMIT2_off32(0x8B, 0x85, 36); /* mov eax, dword ptr [rbp + 36] */
EMIT3(0x83, 0xF8, MAX_TAIL_CALL_CNT); /* cmp eax, MAX_TAIL_CALL_CNT */
#define OFFSET2 36
EMIT2(X86_JA, OFFSET2); /* ja out */
label2 = cnt;
EMIT3(0x83, 0xC0, 0x01); /* add eax, 1 */
- EMIT2_off32(0x89, 0x85, -STACKSIZE + 36); /* mov dword ptr [rbp - 516], eax */
+ EMIT2_off32(0x89, 0x85, 36); /* mov dword ptr [rbp + 36], eax */
/* prog = array->ptrs[index]; */
EMIT4_off32(0x48, 0x8D, 0x84, 0xD6, /* lea rax, [rsi + rdx * 8 + offsetof(...)] */
@@ -361,7 +364,7 @@
int proglen = 0;
u8 *prog = temp;
- emit_prologue(&prog);
+ emit_prologue(&prog, bpf_prog->aux->stack_depth);
if (seen_ld_abs)
emit_load_skb_data_hlen(&prog);
@@ -877,7 +880,7 @@
}
break;
- case BPF_JMP | BPF_CALL | BPF_X:
+ case BPF_JMP | BPF_TAIL_CALL:
emit_bpf_tail_call(&prog);
break;
@@ -1036,15 +1039,17 @@
seen_exit = true;
/* update cleanup_addr */
ctx->cleanup_addr = proglen;
- /* mov rbx, qword ptr [rbp-X] */
- EMIT3_off32(0x48, 0x8B, 0x9D, -STACKSIZE);
- /* mov r13, qword ptr [rbp-X] */
- EMIT3_off32(0x4C, 0x8B, 0xAD, -STACKSIZE + 8);
- /* mov r14, qword ptr [rbp-X] */
- EMIT3_off32(0x4C, 0x8B, 0xB5, -STACKSIZE + 16);
- /* mov r15, qword ptr [rbp-X] */
- EMIT3_off32(0x4C, 0x8B, 0xBD, -STACKSIZE + 24);
+ /* mov rbx, qword ptr [rbp+0] */
+ EMIT4(0x48, 0x8B, 0x5D, 0);
+ /* mov r13, qword ptr [rbp+8] */
+ EMIT4(0x4C, 0x8B, 0x6D, 8);
+ /* mov r14, qword ptr [rbp+16] */
+ EMIT4(0x4C, 0x8B, 0x75, 16);
+ /* mov r15, qword ptr [rbp+24] */
+ EMIT4(0x4C, 0x8B, 0x7D, 24);
+ /* add rbp, AUX_STACK_SPACE */
+ EMIT4(0x48, 0x83, 0xC5, AUX_STACK_SPACE);
EMIT1(0xC9); /* leave */
EMIT1(0xC3); /* ret */
break;
@@ -1162,6 +1167,7 @@
bpf_jit_binary_lock_ro(header);
prog->bpf_func = (void *)image;
prog->jited = 1;
+ prog->jited_len = proglen;
} else {
prog = orig_prog;
}
diff --git a/arch/xtensa/include/uapi/asm/socket.h b/arch/xtensa/include/uapi/asm/socket.h
index 1eb6d2f..3eed276 100644
--- a/arch/xtensa/include/uapi/asm/socket.h
+++ b/arch/xtensa/include/uapi/asm/socket.h
@@ -109,4 +109,8 @@
#define SO_COOKIE 57
+#define SCM_TIMESTAMPING_PKTINFO 58
+
+#define SO_PEERGROUPS 59
+
#endif /* _XTENSA_SOCKET_H */
diff --git a/drivers/atm/atmtcp.c b/drivers/atm/atmtcp.c
index 3ef6253e1..56fa16c 100644
--- a/drivers/atm/atmtcp.c
+++ b/drivers/atm/atmtcp.c
@@ -60,7 +60,7 @@
return -EUNATCH;
}
atm_force_charge(out_vcc,skb->truesize);
- new_msg = (struct atmtcp_control *) skb_put(skb,sizeof(*new_msg));
+ new_msg = skb_put(skb, sizeof(*new_msg));
*new_msg = *msg;
new_msg->hdr.length = ATMTCP_HDR_MAGIC;
new_msg->type = type;
@@ -217,7 +217,7 @@
atomic_inc(&vcc->stats->tx_err);
return -ENOBUFS;
}
- hdr = (void *) skb_put(new_skb,sizeof(struct atmtcp_hdr));
+ hdr = skb_put(new_skb, sizeof(struct atmtcp_hdr));
hdr->vpi = htons(vcc->vpi);
hdr->vci = htons(vcc->vci);
hdr->length = htonl(skb->len);
diff --git a/drivers/atm/fore200e.c b/drivers/atm/fore200e.c
index 637c3e6..7584ae1 100644
--- a/drivers/atm/fore200e.c
+++ b/drivers/atm/fore200e.c
@@ -1104,7 +1104,7 @@
/* Make device DMA transfer visible to CPU. */
fore200e->bus->dma_sync_for_cpu(fore200e, buffer->data.dma_addr, rpd->rsd[ i ].length, DMA_FROM_DEVICE);
- memcpy(skb_put(skb, rpd->rsd[ i ].length), buffer->data.align_addr, rpd->rsd[ i ].length);
+ skb_put_data(skb, buffer->data.align_addr, rpd->rsd[i].length);
/* Now let the device get at it again. */
fore200e->bus->dma_sync_for_device(fore200e, buffer->data.dma_addr, rpd->rsd[ i ].length, DMA_FROM_DEVICE);
diff --git a/drivers/atm/he.c b/drivers/atm/he.c
index 3617659..461da2b 100644
--- a/drivers/atm/he.c
+++ b/drivers/atm/he.c
@@ -1735,7 +1735,7 @@
__net_timestamp(skb);
list_for_each_entry(heb, &he_vcc->buffers, entry)
- memcpy(skb_put(skb, heb->len), &heb->data, heb->len);
+ skb_put_data(skb, &heb->data, heb->len);
switch (vcc->qos.aal) {
case ATM_AAL0:
diff --git a/drivers/atm/idt77252.c b/drivers/atm/idt77252.c
index 5ec1095..4e64de3 100644
--- a/drivers/atm/idt77252.c
+++ b/drivers/atm/idt77252.c
@@ -1090,8 +1090,7 @@
*((u32 *) sb->data) = aal0;
skb_put(sb, sizeof(u32));
- memcpy(skb_put(sb, ATM_CELL_PAYLOAD),
- cell, ATM_CELL_PAYLOAD);
+ skb_put_data(sb, cell, ATM_CELL_PAYLOAD);
ATM_SKB(sb)->vcc = vcc;
__net_timestamp(sb);
@@ -1159,8 +1158,7 @@
return;
}
skb_queue_walk(&rpp->queue, sb)
- memcpy(skb_put(skb, sb->len),
- sb->data, sb->len);
+ skb_put_data(skb, sb->data, sb->len);
recycle_rx_pool_skb(card, rpp);
@@ -1322,8 +1320,7 @@
*((u32 *) sb->data) = header;
skb_put(sb, sizeof(u32));
- memcpy(skb_put(sb, ATM_CELL_PAYLOAD), &(queue->data[16]),
- ATM_CELL_PAYLOAD);
+ skb_put_data(sb, &(queue->data[16]), ATM_CELL_PAYLOAD);
ATM_SKB(sb)->vcc = vcc;
__net_timestamp(sb);
@@ -2014,7 +2011,7 @@
}
atomic_add(skb->truesize, &sk_atm(vcc)->sk_wmem_alloc);
- memcpy(skb_put(skb, 52), cell, 52);
+ skb_put_data(skb, cell, 52);
return idt77252_send_skb(vcc, skb, 1);
}
diff --git a/drivers/atm/solos-pci.c b/drivers/atm/solos-pci.c
index 5ad037c..c8f2ca6 100644
--- a/drivers/atm/solos-pci.c
+++ b/drivers/atm/solos-pci.c
@@ -205,7 +205,7 @@
return -ENOMEM;
}
- header = (void *)skb_put(skb, sizeof(*header));
+ header = skb_put(skb, sizeof(*header));
buflen = snprintf((void *)&header[1], buflen - 1,
"L%05d\n%s\n", current->pid, attr->attr.name);
@@ -261,7 +261,7 @@
return -ENOMEM;
}
- header = (void *)skb_put(skb, sizeof(*header));
+ header = skb_put(skb, sizeof(*header));
buflen = snprintf((void *)&header[1], buflen - 1,
"L%05d\n%s\n%s\n", current->pid, attr->attr.name, buf);
@@ -486,14 +486,14 @@
return 0;
}
- header = (void *)skb_put(skb, sizeof(*header));
+ header = skb_put(skb, sizeof(*header));
header->size = cpu_to_le16(size);
header->vpi = cpu_to_le16(0);
header->vci = cpu_to_le16(0);
header->type = cpu_to_le16(PKT_COMMAND);
- memcpy(skb_put(skb, size), buf, size);
+ skb_put_data(skb, buf, size);
fpga_queue(card, dev, skb, NULL);
@@ -945,7 +945,7 @@
dev_warn(&card->dev->dev, "Failed to allocate sk_buff in popen()\n");
return -ENOMEM;
}
- header = (void *)skb_put(skb, sizeof(*header));
+ header = skb_put(skb, sizeof(*header));
header->size = cpu_to_le16(0);
header->vpi = cpu_to_le16(vcc->vpi);
@@ -982,7 +982,7 @@
dev_warn(&card->dev->dev, "Failed to allocate sk_buff in pclose()\n");
return;
}
- header = (void *)skb_put(skb, sizeof(*header));
+ header = skb_put(skb, sizeof(*header));
header->size = cpu_to_le16(0);
header->vpi = cpu_to_le16(vcc->vpi);
@@ -1174,7 +1174,7 @@
}
}
- header = (void *)skb_push(skb, sizeof(*header));
+ header = skb_push(skb, sizeof(*header));
/* This does _not_ include the size of the header */
header->size = cpu_to_le16(pktlen);
@@ -1251,10 +1251,10 @@
if (reset) {
iowrite32(1, card->config_regs + FPGA_MODE);
- data32 = ioread32(card->config_regs + FPGA_MODE);
+ ioread32(card->config_regs + FPGA_MODE);
iowrite32(0, card->config_regs + FPGA_MODE);
- data32 = ioread32(card->config_regs + FPGA_MODE);
+ ioread32(card->config_regs + FPGA_MODE);
}
data32 = ioread32(card->config_regs + FPGA_VER);
@@ -1398,7 +1398,7 @@
continue;
}
- header = (void *)skb_put(skb, sizeof(*header));
+ header = skb_put(skb, sizeof(*header));
header->size = cpu_to_le16(0);
header->vpi = cpu_to_le16(0);
diff --git a/drivers/bluetooth/Kconfig b/drivers/bluetooth/Kconfig
index 737d93e..35952a9 100644
--- a/drivers/bluetooth/Kconfig
+++ b/drivers/bluetooth/Kconfig
@@ -97,6 +97,7 @@
depends on BT_HCIUART
depends on BT_HCIUART_SERDEV
depends on PM
+ select BT_HCIUART_H4
help
Nokia H4+ is serial protocol for communication between Bluetooth
device and host. This protocol is required for Bluetooth devices
@@ -131,7 +132,7 @@
config BT_HCIUART_LL
bool "HCILL protocol support"
- depends on BT_HCIUART
+ depends on BT_HCIUART_SERDEV
help
HCILL (HCI Low Level) is a serial protocol for communication
between Bluetooth device and host. This protocol is required for
diff --git a/drivers/bluetooth/bfusb.c b/drivers/bluetooth/bfusb.c
index 3bf4ec6..ab090a3 100644
--- a/drivers/bluetooth/bfusb.c
+++ b/drivers/bluetooth/bfusb.c
@@ -335,7 +335,7 @@
}
if (len > 0)
- memcpy(skb_put(data->reassembly, len), buf, len);
+ skb_put_data(data->reassembly, buf, len);
if (hdr & 0x08) {
hci_recv_frame(data->hdev, data->reassembly);
@@ -505,7 +505,7 @@
buf[1] = 0x00;
buf[2] = (size == BFUSB_MAX_BLOCK_SIZE) ? 0 : size;
- memcpy(skb_put(nskb, 3), buf, 3);
+ skb_put_data(nskb, buf, 3);
skb_copy_from_linear_data_offset(skb, sent, skb_put(nskb, size), size);
sent += size;
@@ -516,7 +516,7 @@
if ((nskb->len % data->bulk_pkt_size) == 0) {
buf[0] = 0xdd;
buf[1] = 0x00;
- memcpy(skb_put(nskb, 2), buf, 2);
+ skb_put_data(nskb, buf, 2);
}
read_lock(&data->lock);
diff --git a/drivers/bluetooth/bluecard_cs.c b/drivers/bluetooth/bluecard_cs.c
index 007c0a4..d4b0b65 100644
--- a/drivers/bluetooth/bluecard_cs.c
+++ b/drivers/bluetooth/bluecard_cs.c
@@ -448,7 +448,7 @@
} else {
- *skb_put(info->rx_skb, 1) = buf[i];
+ skb_put_u8(info->rx_skb, buf[i]);
info->rx_count--;
if (info->rx_count == 0) {
@@ -597,7 +597,7 @@
break;
}
- memcpy(skb_put(skb, sizeof(cmd)), cmd, sizeof(cmd));
+ skb_put_data(skb, cmd, sizeof(cmd));
skb_queue_tail(&(info->txq), skb);
diff --git a/drivers/bluetooth/bpa10x.c b/drivers/bluetooth/bpa10x.c
index a9932fe..48d10cb 100644
--- a/drivers/bluetooth/bpa10x.c
+++ b/drivers/bluetooth/bpa10x.c
@@ -297,7 +297,7 @@
return -ENOMEM;
/* Prepend skb with frame type */
- *skb_push(skb, 1) = hci_skb_pkt_type(skb);
+ *(u8 *)skb_push(skb, 1) = hci_skb_pkt_type(skb);
switch (hci_skb_pkt_type(skb)) {
case HCI_COMMAND_PKT:
diff --git a/drivers/bluetooth/bt3c_cs.c b/drivers/bluetooth/bt3c_cs.c
index 8165ef2..32dcac0 100644
--- a/drivers/bluetooth/bt3c_cs.c
+++ b/drivers/bluetooth/bt3c_cs.c
@@ -282,7 +282,7 @@
__u8 x = inb(iobase + DATA_L);
- *skb_put(info->rx_skb, 1) = x;
+ skb_put_u8(info->rx_skb, x);
inb(iobase + DATA_H);
info->rx_count--;
diff --git a/drivers/bluetooth/btbcm.c b/drivers/bluetooth/btbcm.c
index ba3dd2e..24f8c4e 100644
--- a/drivers/bluetooth/btbcm.c
+++ b/drivers/bluetooth/btbcm.c
@@ -246,6 +246,27 @@
return skb;
}
+static struct sk_buff *btbcm_read_controller_features(struct hci_dev *hdev)
+{
+ struct sk_buff *skb;
+
+ skb = __hci_cmd_sync(hdev, 0xfc6e, 0, NULL, HCI_INIT_TIMEOUT);
+ if (IS_ERR(skb)) {
+ BT_ERR("%s: BCM: Read controller features failed (%ld)",
+ hdev->name, PTR_ERR(skb));
+ return skb;
+ }
+
+ if (skb->len != 9) {
+ BT_ERR("%s: BCM: Controller features length mismatch",
+ hdev->name);
+ kfree_skb(skb);
+ return ERR_PTR(-EIO);
+ }
+
+ return skb;
+}
+
static struct sk_buff *btbcm_read_usb_product(struct hci_dev *hdev)
{
struct sk_buff *skb;
@@ -417,6 +438,14 @@
BT_INFO("%s: BCM: chip id %u", hdev->name, skb->data[1]);
kfree_skb(skb);
+ /* Read Controller Features */
+ skb = btbcm_read_controller_features(hdev);
+ if (IS_ERR(skb))
+ return PTR_ERR(skb);
+
+ BT_INFO("%s: BCM: features 0x%2.2x", hdev->name, skb->data[1]);
+ kfree_skb(skb);
+
/* Read Local Name */
skb = btbcm_read_local_name(hdev);
if (IS_ERR(skb))
@@ -540,6 +569,13 @@
kfree_skb(skb);
}
+ /* Read Controller Features */
+ skb = btbcm_read_controller_features(hdev);
+ if (!IS_ERR(skb)) {
+ BT_INFO("%s: BCM: features 0x%2.2x", hdev->name, skb->data[1]);
+ kfree_skb(skb);
+ }
+
/* Read Local Name */
skb = btbcm_read_local_name(hdev);
if (!IS_ERR(skb)) {
diff --git a/drivers/bluetooth/btintel.c b/drivers/bluetooth/btintel.c
index fce1548..d32e109 100644
--- a/drivers/bluetooth/btintel.c
+++ b/drivers/bluetooth/btintel.c
@@ -575,3 +575,5 @@
MODULE_LICENSE("GPL");
MODULE_FIRMWARE("intel/ibt-11-5.sfi");
MODULE_FIRMWARE("intel/ibt-11-5.ddc");
+MODULE_FIRMWARE("intel/ibt-12-16.sfi");
+MODULE_FIRMWARE("intel/ibt-12-16.ddc");
diff --git a/drivers/bluetooth/btmrvl_main.c b/drivers/bluetooth/btmrvl_main.c
index c38cb5b..8d3d917 100644
--- a/drivers/bluetooth/btmrvl_main.c
+++ b/drivers/bluetooth/btmrvl_main.c
@@ -189,12 +189,12 @@
return -ENOMEM;
}
- hdr = (struct hci_command_hdr *)skb_put(skb, HCI_COMMAND_HDR_SIZE);
+ hdr = skb_put(skb, HCI_COMMAND_HDR_SIZE);
hdr->opcode = cpu_to_le16(opcode);
hdr->plen = len;
if (len)
- memcpy(skb_put(skb, len), param, len);
+ skb_put_data(skb, param, len);
hci_skb_pkt_type(skb) = MRVL_VENDOR_PKT;
diff --git a/drivers/bluetooth/btqcomsmd.c b/drivers/bluetooth/btqcomsmd.c
index ef730c1..d00c4fd 100644
--- a/drivers/bluetooth/btqcomsmd.c
+++ b/drivers/bluetooth/btqcomsmd.c
@@ -43,7 +43,7 @@
}
hci_skb_pkt_type(skb) = type;
- memcpy(skb_put(skb, count), data, count);
+ skb_put_data(skb, data, count);
return hci_recv_frame(hdev, skb);
}
diff --git a/drivers/bluetooth/btuart_cs.c b/drivers/bluetooth/btuart_cs.c
index 9624b29..7df79bb 100644
--- a/drivers/bluetooth/btuart_cs.c
+++ b/drivers/bluetooth/btuart_cs.c
@@ -233,7 +233,7 @@
} else {
- *skb_put(info->rx_skb, 1) = inb(iobase + UART_RX);
+ skb_put_u8(info->rx_skb, inb(iobase + UART_RX));
info->rx_count--;
if (info->rx_count == 0) {
diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
index 7fa373b..fa24d69 100644
--- a/drivers/bluetooth/btusb.c
+++ b/drivers/bluetooth/btusb.c
@@ -266,6 +266,7 @@
{ USB_DEVICE(0x0cf3, 0xe301), .driver_info = BTUSB_QCA_ROME },
{ USB_DEVICE(0x0cf3, 0xe360), .driver_info = BTUSB_QCA_ROME },
{ USB_DEVICE(0x0489, 0xe092), .driver_info = BTUSB_QCA_ROME },
+ { USB_DEVICE(0x0489, 0xe0a2), .driver_info = BTUSB_QCA_ROME },
{ USB_DEVICE(0x04ca, 0x3011), .driver_info = BTUSB_QCA_ROME },
/* Broadcom BCM2035 */
@@ -336,6 +337,7 @@
{ USB_DEVICE(0x8087, 0x0a2a), .driver_info = BTUSB_INTEL },
{ USB_DEVICE(0x8087, 0x0a2b), .driver_info = BTUSB_INTEL_NEW },
{ USB_DEVICE(0x8087, 0x0aa7), .driver_info = BTUSB_INTEL },
+ { USB_DEVICE(0x8087, 0x0aaa), .driver_info = BTUSB_INTEL_NEW },
/* Other Intel Bluetooth devices */
{ USB_VENDOR_AND_INTERFACE_INFO(0x8087, 0xe0, 0x01, 0x01),
@@ -476,7 +478,7 @@
}
len = min_t(uint, hci_skb_expect(skb), count);
- memcpy(skb_put(skb, len), buffer, len);
+ skb_put_data(skb, buffer, len);
count -= len;
buffer += len;
@@ -531,7 +533,7 @@
}
len = min_t(uint, hci_skb_expect(skb), count);
- memcpy(skb_put(skb, len), buffer, len);
+ skb_put_data(skb, buffer, len);
count -= len;
buffer += len;
@@ -588,7 +590,7 @@
}
len = min_t(uint, hci_skb_expect(skb), count);
- memcpy(skb_put(skb, len), buffer, len);
+ skb_put_data(skb, buffer, len);
count -= len;
buffer += len;
@@ -932,8 +934,8 @@
skb = bt_skb_alloc(urb->actual_length, GFP_ATOMIC);
if (skb) {
- memcpy(skb_put(skb, urb->actual_length),
- urb->transfer_buffer, urb->actual_length);
+ skb_put_data(skb, urb->transfer_buffer,
+ urb->actual_length);
hci_recv_diag(hdev, skb);
}
} else if (urb->status == -ENOENT) {
@@ -1834,15 +1836,15 @@
if (!skb)
return -ENOMEM;
- hdr = (struct hci_event_hdr *)skb_put(skb, sizeof(*hdr));
+ hdr = skb_put(skb, sizeof(*hdr));
hdr->evt = HCI_EV_CMD_COMPLETE;
hdr->plen = sizeof(*evt) + 1;
- evt = (struct hci_ev_cmd_complete *)skb_put(skb, sizeof(*evt));
+ evt = skb_put(skb, sizeof(*evt));
evt->ncmd = 0x01;
evt->opcode = cpu_to_le16(opcode);
- *skb_put(skb, 1) = 0x00;
+ skb_put_u8(skb, 0x00);
hci_skb_pkt_type(skb) = HCI_EVENT_PKT;
@@ -2036,6 +2038,7 @@
switch (ver.hw_variant) {
case 0x0b: /* SfP */
case 0x0c: /* WsP */
+ case 0x11: /* JfP */
case 0x12: /* ThP */
break;
default:
@@ -2138,6 +2141,8 @@
* Currently the supported hardware variants are:
* 11 (0x0b) for iBT3.0 (LnP/SfP)
* 12 (0x0c) for iBT3.5 (WsP)
+ * 17 (0x11) for iBT3.5 (JfP)
+ * 18 (0x12) for iBT3.5 (ThP)
*/
snprintf(fwname, sizeof(fwname), "intel/ibt-%u-%u.sfi",
le16_to_cpu(ver.hw_variant),
@@ -2390,7 +2395,7 @@
return -ENOMEM;
}
- memcpy(skb_put(skb, sizeof(cmd)), cmd, sizeof(cmd));
+ skb_put_data(skb, cmd, sizeof(cmd));
hci_skb_pkt_type(skb) = HCI_COMMAND_PKT;
ret = btusb_send_frame(hdev, skb);
@@ -2762,8 +2767,8 @@
return ERR_PTR(-ENOMEM);
}
- *skb_put(skb, 1) = 0xf0;
- *skb_put(skb, 1) = enable;
+ skb_put_u8(skb, 0xf0);
+ skb_put_u8(skb, enable);
pipe = usb_sndbulkpipe(data->udev, data->diag_tx_ep->bEndpointAddress);
diff --git a/drivers/bluetooth/btwilink.c b/drivers/bluetooth/btwilink.c
index b6bb58c..85a3978 100644
--- a/drivers/bluetooth/btwilink.c
+++ b/drivers/bluetooth/btwilink.c
@@ -262,7 +262,6 @@
pkt_type = hci_skb_pkt_type(skb);
len = hst->st_write(skb);
if (len < 0) {
- kfree_skb(skb);
BT_ERR("ST write failed (%ld)", len);
/* Try Again, would only fail if UART has gone bad */
return -EAGAIN;
diff --git a/drivers/bluetooth/dtl1_cs.c b/drivers/bluetooth/dtl1_cs.c
index 6317c6f..2adfe4f 100644
--- a/drivers/bluetooth/dtl1_cs.c
+++ b/drivers/bluetooth/dtl1_cs.c
@@ -226,7 +226,7 @@
}
}
- *skb_put(info->rx_skb, 1) = inb(iobase + UART_RX);
+ skb_put_u8(info->rx_skb, inb(iobase + UART_RX));
nsh = (struct nsh *)info->rx_skb->data;
info->rx_count--;
@@ -414,7 +414,7 @@
skb_reserve(s, NSHL);
skb_copy_from_linear_data(skb, skb_put(s, skb->len), skb->len);
if (skb->len & 0x0001)
- *skb_put(s, 1) = 0; /* PAD */
+ skb_put_u8(s, 0); /* PAD */
/* Prepend skb with Nokia frame header and queue */
memcpy(skb_push(s, NSHL), &nsh, NSHL);
diff --git a/drivers/bluetooth/hci_bcm.c b/drivers/bluetooth/hci_bcm.c
index f87bfdf..d2e9e2d 100644
--- a/drivers/bluetooth/hci_bcm.c
+++ b/drivers/bluetooth/hci_bcm.c
@@ -262,9 +262,9 @@
if (!skb)
return -ENOMEM;
- *skb_put(skb, 1) = BCM_LM_DIAG_PKT;
- *skb_put(skb, 1) = 0xf0;
- *skb_put(skb, 1) = enable;
+ skb_put_u8(skb, BCM_LM_DIAG_PKT);
+ skb_put_u8(skb, 0xf0);
+ skb_put_u8(skb, enable);
skb_queue_tail(&bcm->txq, skb);
hci_uart_tx_wakeup(hu);
@@ -762,8 +762,7 @@
if (id)
gpio_mapping = (const struct acpi_gpio_mapping *) id->driver_data;
- ret = acpi_dev_add_driver_gpios(ACPI_COMPANION(&pdev->dev),
- gpio_mapping);
+ ret = devm_acpi_dev_add_driver_gpios(&pdev->dev, gpio_mapping);
if (ret)
return ret;
@@ -834,8 +833,6 @@
list_del(&dev->list);
mutex_unlock(&bcm_device_lock);
- acpi_dev_remove_driver_gpios(ACPI_COMPANION(&pdev->dev));
-
dev_info(&pdev->dev, "%s device unregistered.\n", dev->name);
return 0;
diff --git a/drivers/bluetooth/hci_bcsp.c b/drivers/bluetooth/hci_bcsp.c
index 910ec96..d880f4e 100644
--- a/drivers/bluetooth/hci_bcsp.c
+++ b/drivers/bluetooth/hci_bcsp.c
@@ -125,7 +125,7 @@
{
const char pkt_delim = 0xc0;
- memcpy(skb_put(skb, 1), &pkt_delim, 1);
+ skb_put_data(skb, &pkt_delim, 1);
}
static void bcsp_slip_one_byte(struct sk_buff *skb, u8 c)
@@ -135,13 +135,13 @@
switch (c) {
case 0xc0:
- memcpy(skb_put(skb, 2), &esc_c0, 2);
+ skb_put_data(skb, &esc_c0, 2);
break;
case 0xdb:
- memcpy(skb_put(skb, 2), &esc_db, 2);
+ skb_put_data(skb, &esc_db, 2);
break;
default:
- memcpy(skb_put(skb, 1), &c, 1);
+ skb_put_data(skb, &c, 1);
}
}
@@ -423,7 +423,7 @@
BT_DBG("Found a LE conf pkt");
if (!nskb)
return;
- memcpy(skb_put(nskb, 4), conf_rsp_pkt, 4);
+ skb_put_data(nskb, conf_rsp_pkt, 4);
hci_skb_pkt_type(nskb) = BCSP_LE_PKT;
skb_queue_head(&bcsp->unrel, nskb);
@@ -447,7 +447,7 @@
bcsp->rx_esc_state = BCSP_ESCSTATE_ESC;
break;
default:
- memcpy(skb_put(bcsp->rx_skb, 1), &byte, 1);
+ skb_put_data(bcsp->rx_skb, &byte, 1);
if ((bcsp->rx_skb->data[0] & 0x40) != 0 &&
bcsp->rx_state != BCSP_W4_CRC)
bcsp_crc_update(&bcsp->message_crc, byte);
@@ -458,7 +458,7 @@
case BCSP_ESCSTATE_ESC:
switch (byte) {
case 0xdc:
- memcpy(skb_put(bcsp->rx_skb, 1), &c0, 1);
+ skb_put_data(bcsp->rx_skb, &c0, 1);
if ((bcsp->rx_skb->data[0] & 0x40) != 0 &&
bcsp->rx_state != BCSP_W4_CRC)
bcsp_crc_update(&bcsp->message_crc, 0xc0);
@@ -467,7 +467,7 @@
break;
case 0xdd:
- memcpy(skb_put(bcsp->rx_skb, 1), &db, 1);
+ skb_put_data(bcsp->rx_skb, &db, 1);
if ((bcsp->rx_skb->data[0] & 0x40) != 0 &&
bcsp->rx_state != BCSP_W4_CRC)
bcsp_crc_update(&bcsp->message_crc, 0xdb);
diff --git a/drivers/bluetooth/hci_h4.c b/drivers/bluetooth/hci_h4.c
index 82e5a32..4e328d7 100644
--- a/drivers/bluetooth/hci_h4.c
+++ b/drivers/bluetooth/hci_h4.c
@@ -209,7 +209,7 @@
}
len = min_t(uint, hci_skb_expect(skb) - skb->len, count);
- memcpy(skb_put(skb, len), buffer, len);
+ skb_put_data(skb, buffer, len);
count -= len;
buffer += len;
diff --git a/drivers/bluetooth/hci_h5.c b/drivers/bluetooth/hci_h5.c
index 90d0456..c0e4e26 100644
--- a/drivers/bluetooth/hci_h5.c
+++ b/drivers/bluetooth/hci_h5.c
@@ -109,7 +109,7 @@
hci_skb_pkt_type(nskb) = HCI_3WIRE_LINK_PKT;
- memcpy(skb_put(nskb, len), data, len);
+ skb_put_data(nskb, data, len);
skb_queue_tail(&h5->unrel, nskb);
}
@@ -487,7 +487,7 @@
}
}
- memcpy(skb_put(h5->rx_skb, 1), byte, 1);
+ skb_put_data(h5->rx_skb, byte, 1);
h5->rx_pending--;
BT_DBG("unsliped 0x%02hhx, rx_pending %zu", *byte, h5->rx_pending);
@@ -579,7 +579,7 @@
{
const char delim = SLIP_DELIMITER;
- memcpy(skb_put(skb, 1), &delim, 1);
+ skb_put_data(skb, &delim, 1);
}
static void h5_slip_one_byte(struct sk_buff *skb, u8 c)
@@ -589,13 +589,13 @@
switch (c) {
case SLIP_DELIMITER:
- memcpy(skb_put(skb, 2), &esc_delim, 2);
+ skb_put_data(skb, &esc_delim, 2);
break;
case SLIP_ESC:
- memcpy(skb_put(skb, 2), &esc_esc, 2);
+ skb_put_data(skb, &esc_esc, 2);
break;
default:
- memcpy(skb_put(skb, 1), &c, 1);
+ skb_put_data(skb, &c, 1);
}
}
diff --git a/drivers/bluetooth/hci_intel.c b/drivers/bluetooth/hci_intel.c
index fa50999..aad07e4 100644
--- a/drivers/bluetooth/hci_intel.c
+++ b/drivers/bluetooth/hci_intel.c
@@ -185,7 +185,7 @@
return -ENOMEM;
}
- memcpy(skb_put(skb, sizeof(suspend)), suspend, sizeof(suspend));
+ skb_put_data(skb, suspend, sizeof(suspend));
hci_skb_pkt_type(skb) = HCI_LPM_PKT;
set_bit(STATE_LPM_TRANSACTION, &intel->flags);
@@ -270,8 +270,7 @@
return -ENOMEM;
}
- memcpy(skb_put(skb, sizeof(lpm_resume_ack)), lpm_resume_ack,
- sizeof(lpm_resume_ack));
+ skb_put_data(skb, lpm_resume_ack, sizeof(lpm_resume_ack));
hci_skb_pkt_type(skb) = HCI_LPM_PKT;
/* LPM flow is a priority, enqueue packet at list head */
@@ -463,15 +462,15 @@
if (!skb)
return -ENOMEM;
- hdr = (struct hci_event_hdr *)skb_put(skb, sizeof(*hdr));
+ hdr = skb_put(skb, sizeof(*hdr));
hdr->evt = HCI_EV_CMD_COMPLETE;
hdr->plen = sizeof(*evt) + 1;
- evt = (struct hci_ev_cmd_complete *)skb_put(skb, sizeof(*evt));
+ evt = skb_put(skb, sizeof(*evt));
evt->ncmd = 0x01;
evt->opcode = cpu_to_le16(opcode);
- *skb_put(skb, 1) = 0x00;
+ skb_put_u8(skb, 0x00);
hci_skb_pkt_type(skb) = HCI_EVENT_PKT;
@@ -522,7 +521,7 @@
return -ENOMEM;
}
- memcpy(skb_put(skb, sizeof(speed_cmd)), speed_cmd, sizeof(speed_cmd));
+ skb_put_data(skb, speed_cmd, sizeof(speed_cmd));
hci_skb_pkt_type(skb) = HCI_COMMAND_PKT;
hci_uart_set_flow_control(hu, true);
@@ -1205,9 +1204,19 @@
SET_RUNTIME_PM_OPS(intel_suspend_device, intel_resume_device, NULL)
};
+static const struct acpi_gpio_params reset_gpios = { 0, 0, false };
+static const struct acpi_gpio_params host_wake_gpios = { 1, 0, false };
+
+static const struct acpi_gpio_mapping acpi_hci_intel_gpios[] = {
+ { "reset-gpios", &reset_gpios, 1 },
+ { "host-wake-gpios", &host_wake_gpios, 1 },
+ { },
+};
+
static int intel_probe(struct platform_device *pdev)
{
struct intel_device *idev;
+ int ret;
idev = devm_kzalloc(&pdev->dev, sizeof(*idev), GFP_KERNEL);
if (!idev)
@@ -1217,6 +1226,10 @@
idev->pdev = pdev;
+ ret = devm_acpi_dev_add_driver_gpios(&pdev->dev, acpi_hci_intel_gpios);
+ if (ret)
+ dev_dbg(&pdev->dev, "Unable to add GPIO mapping table\n");
+
idev->reset = devm_gpiod_get(&pdev->dev, "reset", GPIOD_OUT_LOW);
if (IS_ERR(idev->reset)) {
dev_err(&pdev->dev, "Unable to retrieve gpio\n");
diff --git a/drivers/bluetooth/hci_ldisc.c b/drivers/bluetooth/hci_ldisc.c
index 2edd305..8397b71 100644
--- a/drivers/bluetooth/hci_ldisc.c
+++ b/drivers/bluetooth/hci_ldisc.c
@@ -114,8 +114,12 @@
struct sk_buff *skb = hu->tx_skb;
if (!skb) {
+ read_lock(&hu->proto_lock);
+
if (test_bit(HCI_UART_PROTO_READY, &hu->flags))
skb = hu->proto->dequeue(hu);
+
+ read_unlock(&hu->proto_lock);
} else {
hu->tx_skb = NULL;
}
@@ -125,18 +129,23 @@
int hci_uart_tx_wakeup(struct hci_uart *hu)
{
+ read_lock(&hu->proto_lock);
+
if (!test_bit(HCI_UART_PROTO_READY, &hu->flags))
- return 0;
+ goto no_schedule;
if (test_and_set_bit(HCI_UART_SENDING, &hu->tx_state)) {
set_bit(HCI_UART_TX_WAKEUP, &hu->tx_state);
- return 0;
+ goto no_schedule;
}
BT_DBG("");
schedule_work(&hu->write_work);
+no_schedule:
+ read_unlock(&hu->proto_lock);
+
return 0;
}
EXPORT_SYMBOL_GPL(hci_uart_tx_wakeup);
@@ -237,9 +246,13 @@
tty_ldisc_flush(tty);
tty_driver_flush_buffer(tty);
+ read_lock(&hu->proto_lock);
+
if (test_bit(HCI_UART_PROTO_READY, &hu->flags))
hu->proto->flush(hu);
+ read_unlock(&hu->proto_lock);
+
return 0;
}
@@ -261,10 +274,15 @@
BT_DBG("%s: type %d len %d", hdev->name, hci_skb_pkt_type(skb),
skb->len);
- if (!test_bit(HCI_UART_PROTO_READY, &hu->flags))
+ read_lock(&hu->proto_lock);
+
+ if (!test_bit(HCI_UART_PROTO_READY, &hu->flags)) {
+ read_unlock(&hu->proto_lock);
return -EUNATCH;
+ }
hu->proto->enqueue(hu, skb);
+ read_unlock(&hu->proto_lock);
hci_uart_tx_wakeup(hu);
@@ -460,6 +478,8 @@
INIT_WORK(&hu->init_ready, hci_uart_init_work);
INIT_WORK(&hu->write_work, hci_uart_write_work);
+ rwlock_init(&hu->proto_lock);
+
/* Flush any pending characters in the driver */
tty_driver_flush_buffer(tty);
@@ -475,6 +495,7 @@
{
struct hci_uart *hu = tty->disc_data;
struct hci_dev *hdev;
+ unsigned long flags;
BT_DBG("tty %p", tty);
@@ -490,7 +511,11 @@
cancel_work_sync(&hu->write_work);
- if (test_and_clear_bit(HCI_UART_PROTO_READY, &hu->flags)) {
+ if (test_bit(HCI_UART_PROTO_READY, &hu->flags)) {
+ write_lock_irqsave(&hu->proto_lock, flags);
+ clear_bit(HCI_UART_PROTO_READY, &hu->flags);
+ write_unlock_irqrestore(&hu->proto_lock, flags);
+
if (hdev) {
if (test_bit(HCI_UART_REGISTERED, &hu->flags))
hci_unregister_dev(hdev);
@@ -549,13 +574,18 @@
if (!hu || tty != hu->tty)
return;
- if (!test_bit(HCI_UART_PROTO_READY, &hu->flags))
+ read_lock(&hu->proto_lock);
+
+ if (!test_bit(HCI_UART_PROTO_READY, &hu->flags)) {
+ read_unlock(&hu->proto_lock);
return;
+ }
/* It does not need a lock here as it is already protected by a mutex in
* tty caller
*/
hu->proto->recv(hu, data, count);
+ read_unlock(&hu->proto_lock);
if (hu->hdev)
hu->hdev->stat.byte_rx += count;
diff --git a/drivers/bluetooth/hci_ll.c b/drivers/bluetooth/hci_ll.c
index adc444f..c982943 100644
--- a/drivers/bluetooth/hci_ll.c
+++ b/drivers/bluetooth/hci_ll.c
@@ -48,6 +48,7 @@
#include <linux/serdev.h>
#include <linux/skbuff.h>
#include <linux/ti_wilink_st.h>
+#include <linux/clk.h>
#include <net/bluetooth/bluetooth.h>
#include <net/bluetooth/hci_core.h>
@@ -84,6 +85,7 @@
struct hci_uart hu;
struct serdev_device *serdev;
struct gpio_desc *enable_gpio;
+ struct clk *ext_clk;
};
struct ll_struct {
@@ -118,7 +120,7 @@
}
/* prepare packet */
- hcill_packet = (struct hcill_cmd *) skb_put(skb, 1);
+ hcill_packet = skb_put(skb, 1);
hcill_packet->cmd = cmd;
/* send packet */
@@ -146,8 +148,12 @@
hu->priv = ll;
- if (hu->serdev)
+ if (hu->serdev) {
+ struct ll_device *lldev = serdev_device_get_drvdata(hu->serdev);
serdev_device_open(hu->serdev);
+ if (!IS_ERR(lldev->ext_clk))
+ clk_prepare_enable(lldev->ext_clk);
+ }
return 0;
}
@@ -181,6 +187,8 @@
struct ll_device *lldev = serdev_device_get_drvdata(hu->serdev);
gpiod_set_value_cansleep(lldev->enable_gpio, 0);
+ clk_disable_unprepare(lldev->ext_clk);
+
serdev_device_close(hu->serdev);
}
@@ -405,7 +413,7 @@
while (count) {
if (ll->rx_count) {
len = min_t(unsigned int, ll->rx_count, count);
- memcpy(skb_put(ll->rx_skb, len), ptr, len);
+ skb_put_data(ll->rx_skb, ptr, len);
ll->rx_count -= len; count -= len; ptr += len;
if (ll->rx_count)
@@ -624,6 +632,7 @@
skb = __hci_cmd_sync(lldev->hu.hdev, cmd->opcode, cmd->plen, &cmd->speed, HCI_INIT_TIMEOUT);
if (IS_ERR(skb)) {
bt_dev_err(lldev->hu.hdev, "send command failed\n");
+ err = PTR_ERR(skb);
goto out_rel_fw;
}
kfree_skb(skb);
@@ -720,6 +729,10 @@
if (IS_ERR(lldev->enable_gpio))
return PTR_ERR(lldev->enable_gpio);
+ lldev->ext_clk = devm_clk_get(&serdev->dev, "ext_clock");
+ if (IS_ERR(lldev->ext_clk) && PTR_ERR(lldev->ext_clk) != -ENOENT)
+ return PTR_ERR(lldev->ext_clk);
+
of_property_read_u32(serdev->dev.of_node, "max-speed", &max_speed);
hci_uart_set_speeds(hu, 115200, max_speed);
@@ -740,6 +753,14 @@
}
static const struct of_device_id hci_ti_of_match[] = {
+ { .compatible = "ti,wl1271-st" },
+ { .compatible = "ti,wl1273-st" },
+ { .compatible = "ti,wl1281-st" },
+ { .compatible = "ti,wl1283-st" },
+ { .compatible = "ti,wl1285-st" },
+ { .compatible = "ti,wl1801-st" },
+ { .compatible = "ti,wl1805-st" },
+ { .compatible = "ti,wl1807-st" },
{ .compatible = "ti,wl1831-st" },
{ .compatible = "ti,wl1835-st" },
{ .compatible = "ti,wl1837-st" },
diff --git a/drivers/bluetooth/hci_mrvl.c b/drivers/bluetooth/hci_mrvl.c
index bbc4b39..ffb0066 100644
--- a/drivers/bluetooth/hci_mrvl.c
+++ b/drivers/bluetooth/hci_mrvl.c
@@ -328,7 +328,7 @@
}
bt_cb(skb)->pkt_type = MRVL_RAW_DATA;
- memcpy(skb_put(skb, mrvl->tx_len), fw_ptr, mrvl->tx_len);
+ skb_put_data(skb, fw_ptr, mrvl->tx_len);
fw_ptr += mrvl->tx_len;
set_bit(STATE_FW_REQ_PENDING, &mrvl->flags);
diff --git a/drivers/bluetooth/hci_nokia.c b/drivers/bluetooth/hci_nokia.c
index a7d687d8..181a15b 100644
--- a/drivers/bluetooth/hci_nokia.c
+++ b/drivers/bluetooth/hci_nokia.c
@@ -246,9 +246,9 @@
hci_skb_pkt_type(skb) = HCI_NOKIA_ALIVE_PKT;
memset(skb->data, 0x00, len);
- hdr = (struct hci_nokia_alive_hdr *)skb_put(skb, sizeof(*hdr));
+ hdr = skb_put(skb, sizeof(*hdr));
hdr->dlen = sizeof(*pkt);
- pkt = (struct hci_nokia_alive_pkt *)skb_put(skb, sizeof(*pkt));
+ pkt = skb_put(skb, sizeof(*pkt));
pkt->mid = NOKIA_ALIVE_REQ;
nokia_enqueue(hu, skb);
@@ -285,10 +285,10 @@
hci_skb_pkt_type(skb) = HCI_NOKIA_NEG_PKT;
- neg_hdr = (struct hci_nokia_neg_hdr *)skb_put(skb, sizeof(*neg_hdr));
+ neg_hdr = skb_put(skb, sizeof(*neg_hdr));
neg_hdr->dlen = sizeof(*neg_cmd);
- neg_cmd = (struct hci_nokia_neg_cmd *)skb_put(skb, sizeof(*neg_cmd));
+ neg_cmd = skb_put(skb, sizeof(*neg_cmd));
neg_cmd->ack = NOKIA_NEG_REQ;
neg_cmd->baud = cpu_to_le16(baud);
neg_cmd->unused1 = 0x0000;
@@ -532,7 +532,7 @@
err = skb_pad(skb, 1);
if (err)
return err;
- *skb_put(skb, 1) = 0x00;
+ skb_put_u8(skb, 0x00);
}
skb_queue_tail(&btdev->txq, skb);
@@ -557,7 +557,7 @@
goto finish_neg;
}
- evt = (struct hci_nokia_neg_evt *)skb_pull(skb, sizeof(*hdr));
+ evt = skb_pull(skb, sizeof(*hdr));
if (evt->ack != NOKIA_NEG_ACK) {
dev_err(dev, "Negotiation received: wrong reply");
@@ -595,7 +595,7 @@
goto finish_alive;
}
- pkt = (struct hci_nokia_alive_pkt *)skb_pull(skb, sizeof(*hdr));
+ pkt = skb_pull(skb, sizeof(*hdr));
if (pkt->mid != NOKIA_ALIVE_RESP) {
dev_err(dev, "Alive received: invalid response: 0x%02x!",
diff --git a/drivers/bluetooth/hci_qca.c b/drivers/bluetooth/hci_qca.c
index f242dfd..392f412 100644
--- a/drivers/bluetooth/hci_qca.c
+++ b/drivers/bluetooth/hci_qca.c
@@ -215,7 +215,7 @@
}
/* Assign HCI_IBS type */
- *skb_put(skb, 1) = cmd;
+ skb_put_u8(skb, cmd);
skb_queue_tail(&qca->txq, skb);
@@ -869,7 +869,7 @@
}
/* Assign commands to change baudrate and packet type. */
- memcpy(skb_put(skb, sizeof(cmd)), cmd, sizeof(cmd));
+ skb_put_data(skb, cmd, sizeof(cmd));
hci_skb_pkt_type(skb) = HCI_COMMAND_PKT;
skb_queue_tail(&qca->txq, skb);
diff --git a/drivers/bluetooth/hci_uart.h b/drivers/bluetooth/hci_uart.h
index 2b05e55..c6e9e1c 100644
--- a/drivers/bluetooth/hci_uart.h
+++ b/drivers/bluetooth/hci_uart.h
@@ -87,6 +87,7 @@
struct work_struct write_work;
const struct hci_uart_proto *proto;
+ rwlock_t proto_lock; /* Stop work for proto close */
void *priv;
struct sk_buff *tx_skb;
diff --git a/drivers/bluetooth/hci_vhci.c b/drivers/bluetooth/hci_vhci.c
index 233e850..e6f6dbc 100644
--- a/drivers/bluetooth/hci_vhci.c
+++ b/drivers/bluetooth/hci_vhci.c
@@ -146,8 +146,8 @@
hci_skb_pkt_type(skb) = HCI_VENDOR_PKT;
- *skb_put(skb, 1) = 0xff;
- *skb_put(skb, 1) = opcode;
+ skb_put_u8(skb, 0xff);
+ skb_put_u8(skb, opcode);
put_unaligned_le16(hdev->id, skb_put(skb, 2));
skb_queue_tail(&data->readq, skb);
diff --git a/drivers/char/pcmcia/synclink_cs.c b/drivers/char/pcmcia/synclink_cs.c
index d136db1..62be953 100644
--- a/drivers/char/pcmcia/synclink_cs.c
+++ b/drivers/char/pcmcia/synclink_cs.c
@@ -4235,7 +4235,7 @@
return;
}
- memcpy(skb_put(skb, size), buf, size);
+ skb_put_data(skb, buf, size);
skb->protocol = hdlc_type_trans(skb, dev);
diff --git a/drivers/crypto/chelsio/chcr_algo.c b/drivers/crypto/chelsio/chcr_algo.c
index f00e0d8..b75b8be 100644
--- a/drivers/crypto/chelsio/chcr_algo.c
+++ b/drivers/crypto/chelsio/chcr_algo.c
@@ -604,8 +604,7 @@
if (!skb)
return ERR_PTR(-ENOMEM);
skb_reserve(skb, sizeof(struct sge_opaque_hdr));
- chcr_req = (struct chcr_wr *)__skb_put(skb, transhdr_len);
- memset(chcr_req, 0, transhdr_len);
+ chcr_req = __skb_put_zero(skb, transhdr_len);
chcr_req->sec_cpl.op_ivinsrtofst =
FILL_SEC_CPL_OP_IVINSR(ctx->dev->rx_channel_id, 2, 1);
@@ -881,8 +880,7 @@
return skb;
skb_reserve(skb, sizeof(struct sge_opaque_hdr));
- chcr_req = (struct chcr_wr *)__skb_put(skb, transhdr_len);
- memset(chcr_req, 0, transhdr_len);
+ chcr_req = __skb_put_zero(skb, transhdr_len);
chcr_req->sec_cpl.op_ivinsrtofst =
FILL_SEC_CPL_OP_IVINSR(ctx->dev->rx_channel_id, 2, 0);
@@ -1447,8 +1445,7 @@
skb_reserve(skb, sizeof(struct sge_opaque_hdr));
/* Write WR */
- chcr_req = (struct chcr_wr *) __skb_put(skb, transhdr_len);
- memset(chcr_req, 0, transhdr_len);
+ chcr_req = __skb_put_zero(skb, transhdr_len);
stop_offset = (op_type == CHCR_ENCRYPT_OP) ? 0 : authsize;
@@ -1779,8 +1776,7 @@
skb_reserve(skb, sizeof(struct sge_opaque_hdr));
- chcr_req = (struct chcr_wr *) __skb_put(skb, transhdr_len);
- memset(chcr_req, 0, transhdr_len);
+ chcr_req = __skb_put_zero(skb, transhdr_len);
fill_sec_cpl_for_aead(&chcr_req->sec_cpl, dst_size, req, op_type, ctx);
@@ -1892,8 +1888,7 @@
/* NIC driver is going to write the sge hdr. */
skb_reserve(skb, sizeof(struct sge_opaque_hdr));
- chcr_req = (struct chcr_wr *)__skb_put(skb, transhdr_len);
- memset(chcr_req, 0, transhdr_len);
+ chcr_req = __skb_put_zero(skb, transhdr_len);
if (get_aead_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106)
req->assoclen -= 8;
diff --git a/drivers/firewire/net.c b/drivers/firewire/net.c
index 5d36402..242359c 100644
--- a/drivers/firewire/net.c
+++ b/drivers/firewire/net.c
@@ -219,7 +219,7 @@
{
struct fwnet_header *h;
- h = (struct fwnet_header *)skb_push(skb, sizeof(*h));
+ h = skb_push(skb, sizeof(*h));
put_unaligned_be16(type, &h->h_proto);
if (net->flags & (IFF_LOOPBACK | IFF_NOARP)) {
@@ -600,7 +600,7 @@
return -ENOMEM;
}
skb_reserve(skb, LL_RESERVED_SPACE(net));
- memcpy(skb_put(skb, len), buf, len);
+ skb_put_data(skb, buf, len);
return fwnet_finish_incoming_packet(net, skb, source_node_id,
is_broadcast, ether_type);
@@ -961,16 +961,14 @@
tx_len = ptask->max_payload;
switch (fwnet_get_hdr_lf(&ptask->hdr)) {
case RFC2374_HDR_UNFRAG:
- bufhdr = (struct rfc2734_header *)
- skb_push(ptask->skb, RFC2374_UNFRAG_HDR_SIZE);
+ bufhdr = skb_push(ptask->skb, RFC2374_UNFRAG_HDR_SIZE);
put_unaligned_be32(ptask->hdr.w0, &bufhdr->w0);
break;
case RFC2374_HDR_FIRSTFRAG:
case RFC2374_HDR_INTFRAG:
case RFC2374_HDR_LASTFRAG:
- bufhdr = (struct rfc2734_header *)
- skb_push(ptask->skb, RFC2374_FRAG_HDR_SIZE);
+ bufhdr = skb_push(ptask->skb, RFC2374_FRAG_HDR_SIZE);
put_unaligned_be32(ptask->hdr.w0, &bufhdr->w0);
put_unaligned_be32(ptask->hdr.w1, &bufhdr->w1);
break;
diff --git a/drivers/infiniband/core/addr.c b/drivers/infiniband/core/addr.c
index ece6926..a6cb379 100644
--- a/drivers/infiniband/core/addr.c
+++ b/drivers/infiniband/core/addr.c
@@ -179,8 +179,7 @@
}
/* Construct the family header first */
- header = (struct rdma_ls_ip_resolve_header *)
- skb_put(skb, NLMSG_ALIGN(sizeof(*header)));
+ header = skb_put(skb, NLMSG_ALIGN(sizeof(*header)));
header->ifindex = dev_addr->bound_dev_if;
nla_put(skb, attrtype, size, daddr);
diff --git a/drivers/infiniband/core/sa_query.c b/drivers/infiniband/core/sa_query.c
index fb7aec4..70fa4ca 100644
--- a/drivers/infiniband/core/sa_query.c
+++ b/drivers/infiniband/core/sa_query.c
@@ -759,8 +759,7 @@
query->mad_buf->context[1] = NULL;
/* Construct the family header first */
- header = (struct rdma_ls_resolve_header *)
- skb_put(skb, NLMSG_ALIGN(sizeof(*header)));
+ header = skb_put(skb, NLMSG_ALIGN(sizeof(*header)));
memcpy(header->device_name, query->port->agent->device->name,
LS_DEVICE_NAME_MAX);
header->port_num = query->port->port_num;
diff --git a/drivers/infiniband/hw/cxgb3/cxio_hal.c b/drivers/infiniband/hw/cxgb3/cxio_hal.c
index 558d6a0..3eff654 100644
--- a/drivers/infiniband/hw/cxgb3/cxio_hal.c
+++ b/drivers/infiniband/hw/cxgb3/cxio_hal.c
@@ -142,8 +142,7 @@
pr_debug("%s alloc_skb failed\n", __func__);
return -ENOMEM;
}
- wqe = (struct t3_modify_qp_wr *) skb_put(skb, sizeof(*wqe));
- memset(wqe, 0, sizeof(*wqe));
+ wqe = skb_put_zero(skb, sizeof(*wqe));
build_fw_riwrh((struct fw_riwrh *) wqe, T3_WR_QP_MOD,
T3_COMPLETION_FLAG | T3_NOTIFY_FLAG, 0, qpid, 7,
T3_SOPEOP);
@@ -561,8 +560,7 @@
ctx1 |= ((u64) (V_EC_BASE_HI((u32) base_addr & 0xf) | V_EC_RESPQ(0) |
V_EC_TYPE(0) | V_EC_GEN(1) |
V_EC_UP_TOKEN(T3_CTL_QP_TID) | F_EC_VALID)) << 32;
- wqe = (struct t3_modify_qp_wr *) skb_put(skb, sizeof(*wqe));
- memset(wqe, 0, sizeof(*wqe));
+ wqe = skb_put_zero(skb, sizeof(*wqe));
build_fw_riwrh((struct fw_riwrh *) wqe, T3_WR_QP_MOD, 0, 0,
T3_CTL_QP_TID, 7, T3_SOPEOP);
wqe->flags = cpu_to_be32(MODQP_WRITE_EC);
@@ -837,7 +835,7 @@
if (!skb)
return -ENOMEM;
pr_debug("%s rdev_p %p\n", __func__, rdev_p);
- wqe = (struct t3_rdma_init_wr *) __skb_put(skb, sizeof(*wqe));
+ wqe = __skb_put(skb, sizeof(*wqe));
wqe->wrh.op_seop_flags = cpu_to_be32(V_FW_RIWR_OP(T3_WR_INIT));
wqe->wrh.gen_tid_len = cpu_to_be32(V_FW_RIWR_TID(attr->tid) |
V_FW_RIWR_LEN(sizeof(*wqe) >> 3));
diff --git a/drivers/infiniband/hw/cxgb3/iwch_cm.c b/drivers/infiniband/hw/cxgb3/iwch_cm.c
index b61630e..8697537 100644
--- a/drivers/infiniband/hw/cxgb3/iwch_cm.c
+++ b/drivers/infiniband/hw/cxgb3/iwch_cm.c
@@ -175,7 +175,7 @@
skb = get_skb(skb, sizeof *req, GFP_KERNEL);
if (!skb)
return;
- req = (struct cpl_tid_release *) skb_put(skb, sizeof(*req));
+ req = skb_put(skb, sizeof(*req));
req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_TID_RELEASE, hwtid));
skb->priority = CPL_PRIORITY_SETUP;
@@ -190,7 +190,7 @@
if (!skb)
return -ENOMEM;
- req = (struct cpl_set_tcb_field *) skb_put(skb, sizeof(*req));
+ req = skb_put(skb, sizeof(*req));
req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
req->wr.wr_lo = htonl(V_WR_TID(ep->hwtid));
OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, ep->hwtid));
@@ -211,7 +211,7 @@
if (!skb)
return -ENOMEM;
- req = (struct cpl_set_tcb_field *) skb_put(skb, sizeof(*req));
+ req = skb_put(skb, sizeof(*req));
req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
req->wr.wr_lo = htonl(V_WR_TID(ep->hwtid));
OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, ep->hwtid));
@@ -398,7 +398,7 @@
}
skb->priority = CPL_PRIORITY_DATA;
set_arp_failure_handler(skb, arp_failure_discard);
- req = (struct cpl_close_con_req *) skb_put(skb, sizeof(*req));
+ req = skb_put(skb, sizeof(*req));
req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_OFLD_CLOSE_CON));
req->wr.wr_lo = htonl(V_WR_TID(ep->hwtid));
OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_CLOSE_CON_REQ, ep->hwtid));
@@ -417,8 +417,7 @@
}
skb->priority = CPL_PRIORITY_DATA;
set_arp_failure_handler(skb, abort_arp_failure);
- req = (struct cpl_abort_req *) skb_put(skb, sizeof(*req));
- memset(req, 0, sizeof(*req));
+ req = skb_put_zero(skb, sizeof(*req));
req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_OFLD_HOST_ABORT_CON_REQ));
req->wr.wr_lo = htonl(V_WR_TID(ep->hwtid));
OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_ABORT_REQ, ep->hwtid));
@@ -456,7 +455,7 @@
skb->priority = CPL_PRIORITY_SETUP;
set_arp_failure_handler(skb, act_open_req_arp_failure);
- req = (struct cpl_act_open_req *) skb_put(skb, sizeof(*req));
+ req = skb_put(skb, sizeof(*req));
req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_ACT_OPEN_REQ, ep->atid));
req->local_port = ep->com.local_addr.sin_port;
@@ -514,7 +513,7 @@
set_arp_failure_handler(skb, arp_failure_discard);
skb_reset_transport_header(skb);
len = skb->len;
- req = (struct tx_data_wr *) skb_push(skb, sizeof(*req));
+ req = skb_push(skb, sizeof(*req));
req->wr_hi = htonl(V_WR_OP(FW_WROPCODE_OFLD_TX_DATA)|F_WR_COMPL);
req->wr_lo = htonl(V_WR_TID(ep->hwtid));
req->len = htonl(len);
@@ -547,7 +546,7 @@
return -ENOMEM;
}
skb_reserve(skb, sizeof(*req));
- mpa = (struct mpa_message *) skb_put(skb, mpalen);
+ mpa = skb_put(skb, mpalen);
memset(mpa, 0, sizeof(*mpa));
memcpy(mpa->key, MPA_KEY_REP, sizeof(mpa->key));
mpa->flags = MPA_REJECT;
@@ -565,7 +564,7 @@
skb->priority = CPL_PRIORITY_DATA;
set_arp_failure_handler(skb, arp_failure_discard);
skb_reset_transport_header(skb);
- req = (struct tx_data_wr *) skb_push(skb, sizeof(*req));
+ req = skb_push(skb, sizeof(*req));
req->wr_hi = htonl(V_WR_OP(FW_WROPCODE_OFLD_TX_DATA)|F_WR_COMPL);
req->wr_lo = htonl(V_WR_TID(ep->hwtid));
req->len = htonl(mpalen);
@@ -597,7 +596,7 @@
}
skb->priority = CPL_PRIORITY_DATA;
skb_reserve(skb, sizeof(*req));
- mpa = (struct mpa_message *) skb_put(skb, mpalen);
+ mpa = skb_put(skb, mpalen);
memset(mpa, 0, sizeof(*mpa));
memcpy(mpa->key, MPA_KEY_REP, sizeof(mpa->key));
mpa->flags = (ep->mpa_attr.crc_enabled ? MPA_CRC : 0) |
@@ -616,7 +615,7 @@
set_arp_failure_handler(skb, arp_failure_discard);
skb_reset_transport_header(skb);
len = skb->len;
- req = (struct tx_data_wr *) skb_push(skb, sizeof(*req));
+ req = skb_push(skb, sizeof(*req));
req->wr_hi = htonl(V_WR_OP(FW_WROPCODE_OFLD_TX_DATA)|F_WR_COMPL);
req->wr_lo = htonl(V_WR_TID(ep->hwtid));
req->len = htonl(len);
@@ -801,7 +800,7 @@
return 0;
}
- req = (struct cpl_rx_data_ack *) skb_put(skb, sizeof(*req));
+ req = skb_put(skb, sizeof(*req));
req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_RX_DATA_ACK, ep->hwtid));
req->credit_dack = htonl(V_RX_CREDITS(credits) | V_RX_FORCE_ACK(1));
@@ -1206,7 +1205,7 @@
return -ENOMEM;
}
- req = (struct cpl_pass_open_req *) skb_put(skb, sizeof(*req));
+ req = skb_put(skb, sizeof(*req));
req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_PASS_OPEN_REQ, ep->stid));
req->local_port = ep->com.local_addr.sin_port;
@@ -1247,7 +1246,7 @@
pr_err("%s - failed to alloc skb\n", __func__);
return -ENOMEM;
}
- req = (struct cpl_close_listserv_req *) skb_put(skb, sizeof(*req));
+ req = skb_put(skb, sizeof(*req));
req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
req->cpu_idx = 0;
OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_CLOSE_LISTSRV_REQ, ep->stid));
@@ -1615,7 +1614,7 @@
goto out;
}
rpl_skb->priority = CPL_PRIORITY_DATA;
- rpl = (struct cpl_abort_rpl *) skb_put(rpl_skb, sizeof(*rpl));
+ rpl = skb_put(rpl_skb, sizeof(*rpl));
rpl->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_OFLD_HOST_ABORT_CON_RPL));
rpl->wr.wr_lo = htonl(V_WR_TID(ep->hwtid));
OPCODE_TID(rpl) = htonl(MK_OPCODE_TID(CPL_ABORT_RPL, ep->hwtid));
diff --git a/drivers/infiniband/hw/cxgb3/iwch_qp.c b/drivers/infiniband/hw/cxgb3/iwch_qp.c
index ba6d5d2..7f633da 100644
--- a/drivers/infiniband/hw/cxgb3/iwch_qp.c
+++ b/drivers/infiniband/hw/cxgb3/iwch_qp.c
@@ -670,8 +670,7 @@
pr_err("%s cannot send zb_read!!\n", __func__);
return -ENOMEM;
}
- wqe = (union t3_wr *)skb_put(skb, sizeof(struct t3_rdma_read_wr));
- memset(wqe, 0, sizeof(struct t3_rdma_read_wr));
+ wqe = skb_put_zero(skb, sizeof(struct t3_rdma_read_wr));
wqe->read.rdmaop = T3_READ_REQ;
wqe->read.reserved[0] = 0;
wqe->read.reserved[1] = 0;
@@ -702,8 +701,7 @@
pr_err("%s cannot send TERMINATE!\n", __func__);
return -ENOMEM;
}
- wqe = (union t3_wr *)skb_put(skb, 40);
- memset(wqe, 0, 40);
+ wqe = skb_put_zero(skb, 40);
wqe->send.rdmaop = T3_TERMINATE;
/* immediate data length */
diff --git a/drivers/infiniband/hw/cxgb4/cm.c b/drivers/infiniband/hw/cxgb4/cm.c
index 0910faf..e49b34c 100644
--- a/drivers/infiniband/hw/cxgb4/cm.c
+++ b/drivers/infiniband/hw/cxgb4/cm.c
@@ -398,7 +398,8 @@
(const u32 *)&sin6->sin6_addr.s6_addr,
1);
}
- cxgb4_remove_tid(ep->com.dev->rdev.lldi.tids, 0, ep->hwtid);
+ cxgb4_remove_tid(ep->com.dev->rdev.lldi.tids, 0, ep->hwtid,
+ ep->com.local_addr.ss_family);
dst_release(ep->dst);
cxgb4_l2t_release(ep->l2t);
if (ep->mpa_skb)
@@ -596,7 +597,7 @@
else
nparams = 9;
- flowc = (struct fw_flowc_wr *)__skb_put(skb, FLOWC_LEN);
+ flowc = __skb_put(skb, FLOWC_LEN);
flowc->op_to_nparams = cpu_to_be32(FW_WR_OP_V(FW_FLOWC_WR) |
FW_FLOWC_WR_NPARAMS_V(nparams));
@@ -786,18 +787,16 @@
if (ep->com.remote_addr.ss_family == AF_INET) {
switch (CHELSIO_CHIP_VERSION(adapter_type)) {
case CHELSIO_T4:
- req = (struct cpl_act_open_req *)skb_put(skb, wrlen);
+ req = skb_put(skb, wrlen);
INIT_TP_WR(req, 0);
break;
case CHELSIO_T5:
- t5req = (struct cpl_t5_act_open_req *)skb_put(skb,
- wrlen);
+ t5req = skb_put(skb, wrlen);
INIT_TP_WR(t5req, 0);
req = (struct cpl_act_open_req *)t5req;
break;
case CHELSIO_T6:
- t6req = (struct cpl_t6_act_open_req *)skb_put(skb,
- wrlen);
+ t6req = skb_put(skb, wrlen);
INIT_TP_WR(t6req, 0);
req = (struct cpl_act_open_req *)t6req;
t5req = (struct cpl_t5_act_open_req *)t6req;
@@ -838,18 +837,16 @@
} else {
switch (CHELSIO_CHIP_VERSION(adapter_type)) {
case CHELSIO_T4:
- req6 = (struct cpl_act_open_req6 *)skb_put(skb, wrlen);
+ req6 = skb_put(skb, wrlen);
INIT_TP_WR(req6, 0);
break;
case CHELSIO_T5:
- t5req6 = (struct cpl_t5_act_open_req6 *)skb_put(skb,
- wrlen);
+ t5req6 = skb_put(skb, wrlen);
INIT_TP_WR(t5req6, 0);
req6 = (struct cpl_act_open_req6 *)t5req6;
break;
case CHELSIO_T6:
- t6req6 = (struct cpl_t6_act_open_req6 *)skb_put(skb,
- wrlen);
+ t6req6 = skb_put(skb, wrlen);
INIT_TP_WR(t6req6, 0);
req6 = (struct cpl_act_open_req6 *)t6req6;
t5req6 = (struct cpl_t5_act_open_req6 *)t6req6;
@@ -926,8 +923,7 @@
}
set_wr_txq(skb, CPL_PRIORITY_DATA, ep->txq_idx);
- req = (struct fw_ofld_tx_data_wr *)skb_put(skb, wrlen);
- memset(req, 0, wrlen);
+ req = skb_put_zero(skb, wrlen);
req->op_to_immdlen = cpu_to_be32(
FW_WR_OP_V(FW_OFLD_TX_DATA_WR) |
FW_WR_COMPL_F |
@@ -1033,8 +1029,7 @@
}
set_wr_txq(skb, CPL_PRIORITY_DATA, ep->txq_idx);
- req = (struct fw_ofld_tx_data_wr *)skb_put(skb, wrlen);
- memset(req, 0, wrlen);
+ req = skb_put_zero(skb, wrlen);
req->op_to_immdlen = cpu_to_be32(
FW_WR_OP_V(FW_OFLD_TX_DATA_WR) |
FW_WR_COMPL_F |
@@ -1114,8 +1109,7 @@
}
set_wr_txq(skb, CPL_PRIORITY_DATA, ep->txq_idx);
- req = (struct fw_ofld_tx_data_wr *) skb_put(skb, wrlen);
- memset(req, 0, wrlen);
+ req = skb_put_zero(skb, wrlen);
req->op_to_immdlen = cpu_to_be32(
FW_WR_OP_V(FW_OFLD_TX_DATA_WR) |
FW_WR_COMPL_F |
@@ -1199,7 +1193,7 @@
/* setup the hwtid for this connection */
ep->hwtid = tid;
- cxgb4_insert_tid(t, ep, tid);
+ cxgb4_insert_tid(t, ep, tid, ep->com.local_addr.ss_family);
insert_ep_tid(ep);
ep->snd_seq = be32_to_cpu(req->snd_isn);
@@ -1906,8 +1900,7 @@
int win;
skb = get_skb(NULL, sizeof(*req), GFP_KERNEL);
- req = (struct fw_ofld_connection_wr *)__skb_put(skb, sizeof(*req));
- memset(req, 0, sizeof(*req));
+ req = __skb_put_zero(skb, sizeof(*req));
req->op_compl = htonl(WR_OP_V(FW_OFLD_CONNECTION_WR));
req->len16_pkd = htonl(FW_WR_LEN16_V(DIV_ROUND_UP(sizeof(*req), 16)));
req->le.filter = cpu_to_be32(cxgb4_select_ntuple(
@@ -2304,7 +2297,8 @@
(const u32 *)&sin6->sin6_addr.s6_addr, 1);
}
if (status && act_open_has_tid(status))
- cxgb4_remove_tid(ep->com.dev->rdev.lldi.tids, 0, GET_TID(rpl));
+ cxgb4_remove_tid(ep->com.dev->rdev.lldi.tids, 0, GET_TID(rpl),
+ ep->com.local_addr.ss_family);
remove_handle(ep->com.dev, &ep->com.dev->atid_idr, atid);
cxgb4_free_atid(t, atid);
@@ -2581,7 +2575,8 @@
child_ep->tx_chan, child_ep->smac_idx, child_ep->rss_qid);
init_timer(&child_ep->timer);
- cxgb4_insert_tid(t, child_ep, hwtid);
+ cxgb4_insert_tid(t, child_ep, hwtid,
+ child_ep->com.local_addr.ss_family);
insert_ep_tid(child_ep);
if (accept_cr(child_ep, skb, req)) {
c4iw_put_ep(&parent_ep->com);
@@ -2849,7 +2844,8 @@
1);
}
remove_handle(ep->com.dev, &ep->com.dev->hwtid_idr, ep->hwtid);
- cxgb4_remove_tid(ep->com.dev->rdev.lldi.tids, 0, ep->hwtid);
+ cxgb4_remove_tid(ep->com.dev->rdev.lldi.tids, 0, ep->hwtid,
+ ep->com.local_addr.ss_family);
dst_release(ep->dst);
cxgb4_l2t_release(ep->l2t);
c4iw_reconnect(ep);
@@ -3752,9 +3748,9 @@
*/
memset(&tmp_opt, 0, sizeof(tmp_opt));
tcp_clear_options(&tmp_opt);
- tcp_parse_options(skb, &tmp_opt, 0, NULL);
+ tcp_parse_options(&init_net, skb, &tmp_opt, 0, NULL);
- req = (struct cpl_pass_accept_req *)__skb_push(skb, sizeof(*req));
+ req = __skb_push(skb, sizeof(*req));
memset(req, 0, sizeof(*req));
req->l2info = cpu_to_be16(SYN_INTF_V(intf) |
SYN_MAC_IDX_V(RX_MACIDX_G(
@@ -3806,8 +3802,7 @@
req_skb = alloc_skb(sizeof(struct fw_ofld_connection_wr), GFP_KERNEL);
if (!req_skb)
return;
- req = (struct fw_ofld_connection_wr *)__skb_put(req_skb, sizeof(*req));
- memset(req, 0, sizeof(*req));
+ req = __skb_put_zero(req_skb, sizeof(*req));
req->op_compl = htonl(WR_OP_V(FW_OFLD_CONNECTION_WR) | FW_WR_COMPL_F);
req->len16_pkd = htonl(FW_WR_LEN16_V(DIV_ROUND_UP(sizeof(*req), 16)));
req->le.version_cpl = htonl(FW_OFLD_CONNECTION_WR_CPL_F);
diff --git a/drivers/infiniband/hw/cxgb4/cq.c b/drivers/infiniband/hw/cxgb4/cq.c
index 14de5bd..e16fcaf 100644
--- a/drivers/infiniband/hw/cxgb4/cq.c
+++ b/drivers/infiniband/hw/cxgb4/cq.c
@@ -44,8 +44,7 @@
wr_len = sizeof *res_wr + sizeof *res;
set_wr_txq(skb, CPL_PRIORITY_CONTROL, 0);
- res_wr = (struct fw_ri_res_wr *)__skb_put(skb, wr_len);
- memset(res_wr, 0, wr_len);
+ res_wr = __skb_put_zero(skb, wr_len);
res_wr->op_nres = cpu_to_be32(
FW_WR_OP_V(FW_RI_RES_WR) |
FW_RI_RES_WR_NRES_V(1) |
@@ -114,8 +113,7 @@
}
set_wr_txq(skb, CPL_PRIORITY_CONTROL, 0);
- res_wr = (struct fw_ri_res_wr *)__skb_put(skb, wr_len);
- memset(res_wr, 0, wr_len);
+ res_wr = __skb_put_zero(skb, wr_len);
res_wr->op_nres = cpu_to_be32(
FW_WR_OP_V(FW_RI_RES_WR) |
FW_RI_RES_WR_NRES_V(1) |
diff --git a/drivers/infiniband/hw/cxgb4/mem.c b/drivers/infiniband/hw/cxgb4/mem.c
index 3ee7f43..5332f06 100644
--- a/drivers/infiniband/hw/cxgb4/mem.c
+++ b/drivers/infiniband/hw/cxgb4/mem.c
@@ -81,8 +81,7 @@
}
set_wr_txq(skb, CPL_PRIORITY_CONTROL, 0);
- req = (struct ulp_mem_io *)__skb_put(skb, wr_len);
- memset(req, 0, wr_len);
+ req = __skb_put_zero(skb, wr_len);
INIT_ULPTX_WR(req, wr_len, 0, 0);
req->wr.wr_hi = cpu_to_be32(FW_WR_OP_V(FW_ULPTX_WR) |
(wait ? FW_WR_COMPL_F : 0));
@@ -142,8 +141,7 @@
}
set_wr_txq(skb, CPL_PRIORITY_CONTROL, 0);
- req = (struct ulp_mem_io *)__skb_put(skb, wr_len);
- memset(req, 0, wr_len);
+ req = __skb_put_zero(skb, wr_len);
INIT_ULPTX_WR(req, wr_len, 0, 0);
if (i == (num_wqe-1)) {
diff --git a/drivers/infiniband/hw/cxgb4/qp.c b/drivers/infiniband/hw/cxgb4/qp.c
index 8e4154b..bfc7759 100644
--- a/drivers/infiniband/hw/cxgb4/qp.c
+++ b/drivers/infiniband/hw/cxgb4/qp.c
@@ -293,8 +293,7 @@
}
set_wr_txq(skb, CPL_PRIORITY_CONTROL, 0);
- res_wr = (struct fw_ri_res_wr *)__skb_put(skb, wr_len);
- memset(res_wr, 0, wr_len);
+ res_wr = __skb_put_zero(skb, wr_len);
res_wr->op_nres = cpu_to_be32(
FW_WR_OP_V(FW_RI_RES_WR) |
FW_RI_RES_WR_NRES_V(2) |
@@ -1228,7 +1227,7 @@
set_wr_txq(skb, CPL_PRIORITY_DATA, qhp->ep->txq_idx);
- wqe = (struct fw_ri_wr *)__skb_put(skb, sizeof(*wqe));
+ wqe = __skb_put(skb, sizeof(*wqe));
memset(wqe, 0, sizeof *wqe);
wqe->op_compl = cpu_to_be32(FW_WR_OP_V(FW_RI_INIT_WR));
wqe->flowid_len16 = cpu_to_be32(
@@ -1350,7 +1349,7 @@
set_wr_txq(skb, CPL_PRIORITY_DATA, ep->txq_idx);
- wqe = (struct fw_ri_wr *)__skb_put(skb, sizeof(*wqe));
+ wqe = __skb_put(skb, sizeof(*wqe));
memset(wqe, 0, sizeof *wqe);
wqe->op_compl = cpu_to_be32(
FW_WR_OP_V(FW_RI_INIT_WR) |
@@ -1419,7 +1418,7 @@
}
set_wr_txq(skb, CPL_PRIORITY_DATA, qhp->ep->txq_idx);
- wqe = (struct fw_ri_wr *)__skb_put(skb, sizeof(*wqe));
+ wqe = __skb_put(skb, sizeof(*wqe));
memset(wqe, 0, sizeof *wqe);
wqe->op_compl = cpu_to_be32(
FW_WR_OP_V(FW_RI_INIT_WR) |
diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c
index 521d0de..75b2f7d 100644
--- a/drivers/infiniband/hw/mlx4/main.c
+++ b/drivers/infiniband/hw/mlx4/main.c
@@ -61,8 +61,7 @@
#include <rdma/mlx4-abi.h>
#define DRV_NAME MLX4_IB_DRV_NAME
-#define DRV_VERSION "2.2-1"
-#define DRV_RELDATE "Feb 2014"
+#define DRV_VERSION "4.0-0"
#define MLX4_IB_FLOW_MAX_PRIO 0xFFF
#define MLX4_IB_FLOW_QPN_MASK 0xFFFFFF
@@ -79,7 +78,7 @@
static const char mlx4_ib_version[] =
DRV_NAME ": Mellanox ConnectX InfiniBand driver v"
- DRV_VERSION " (" DRV_RELDATE ")\n";
+ DRV_VERSION "\n";
static void do_slave_init(struct mlx4_ib_dev *ibdev, int slave, int do_init);
diff --git a/drivers/infiniband/hw/mlx5/cq.c b/drivers/infiniband/hw/mlx5/cq.c
index 94c049b..a384d72 100644
--- a/drivers/infiniband/hw/mlx5/cq.c
+++ b/drivers/infiniband/hw/mlx5/cq.c
@@ -788,7 +788,7 @@
*inlen = MLX5_ST_SZ_BYTES(create_cq_in) +
MLX5_FLD_SZ_BYTES(create_cq_in, pas[0]) * ncont;
- *cqb = mlx5_vzalloc(*inlen);
+ *cqb = kvzalloc(*inlen, GFP_KERNEL);
if (!*cqb) {
err = -ENOMEM;
goto err_db;
@@ -884,7 +884,7 @@
*inlen = MLX5_ST_SZ_BYTES(create_cq_in) +
MLX5_FLD_SZ_BYTES(create_cq_in, pas[0]) * cq->buf.buf.npages;
- *cqb = mlx5_vzalloc(*inlen);
+ *cqb = kvzalloc(*inlen, GFP_KERNEL);
if (!*cqb) {
err = -ENOMEM;
goto err_buf;
@@ -1314,7 +1314,7 @@
inlen = MLX5_ST_SZ_BYTES(modify_cq_in) +
MLX5_FLD_SZ_BYTES(modify_cq_in, pas[0]) * npas;
- in = mlx5_vzalloc(inlen);
+ in = kvzalloc(inlen, GFP_KERNEL);
if (!in) {
err = -ENOMEM;
goto ex_resize;
diff --git a/drivers/infiniband/hw/mlx5/mad.c b/drivers/infiniband/hw/mlx5/mad.c
index f1b56de6..95db929 100644
--- a/drivers/infiniband/hw/mlx5/mad.c
+++ b/drivers/infiniband/hw/mlx5/mad.c
@@ -218,7 +218,7 @@
(struct ib_pma_portcounters_ext *)(out_mad->data + 40);
int sz = MLX5_ST_SZ_BYTES(query_vport_counter_out);
- out_cnt = mlx5_vzalloc(sz);
+ out_cnt = kvzalloc(sz, GFP_KERNEL);
if (!out_cnt)
return IB_MAD_RESULT_FAILURE;
@@ -231,7 +231,7 @@
(struct ib_pma_portcounters *)(out_mad->data + 40);
int sz = MLX5_ST_SZ_BYTES(ppcnt_reg);
- out_cnt = mlx5_vzalloc(sz);
+ out_cnt = kvzalloc(sz, GFP_KERNEL);
if (!out_cnt)
return IB_MAD_RESULT_FAILURE;
diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c
index 9ecc089..9f7e186 100644
--- a/drivers/infiniband/hw/mlx5/main.c
+++ b/drivers/infiniband/hw/mlx5/main.c
@@ -60,8 +60,7 @@
#include "cmd.h"
#define DRIVER_NAME "mlx5_ib"
-#define DRIVER_VERSION "2.2-1"
-#define DRIVER_RELDATE "Feb 2014"
+#define DRIVER_VERSION "5.0-0"
MODULE_AUTHOR("Eli Cohen <eli@mellanox.com>");
MODULE_DESCRIPTION("Mellanox Connect-IB HCA IB driver");
@@ -70,7 +69,7 @@
static char mlx5_version[] =
DRIVER_NAME ": Mellanox Connect-IB Infiniband driver v"
- DRIVER_VERSION " (" DRIVER_RELDATE ")\n";
+ DRIVER_VERSION "\n";
enum {
MLX5_ATOMIC_SIZE_QP_8BYTES = 1 << 3,
@@ -440,7 +439,7 @@
u8 atomic_operations = MLX5_CAP_ATOMIC(dev->mdev, atomic_operations);
u8 atomic_size_qp = MLX5_CAP_ATOMIC(dev->mdev, atomic_size_qp);
u8 atomic_req_8B_endianness_mode =
- MLX5_CAP_ATOMIC(dev->mdev, atomic_req_8B_endianess_mode);
+ MLX5_CAP_ATOMIC(dev->mdev, atomic_req_8B_endianness_mode);
/* Check if HW supports 8 bytes standard atomic operations and capable
* of host endianness respond
@@ -2263,7 +2262,7 @@
if (!is_valid_attr(dev->mdev, flow_attr))
return ERR_PTR(-EINVAL);
- spec = mlx5_vzalloc(sizeof(*spec));
+ spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
handler = kzalloc(sizeof(*handler), GFP_KERNEL);
if (!handler || !spec) {
err = -ENOMEM;
@@ -3468,7 +3467,7 @@
__be32 val;
int ret, i;
- out = mlx5_vzalloc(outlen);
+ out = kvzalloc(outlen, GFP_KERNEL);
if (!out)
return -ENOMEM;
@@ -3497,7 +3496,7 @@
int ret, i;
int offset = port->cnts.num_q_counters;
- out = mlx5_vzalloc(outlen);
+ out = kvzalloc(outlen, GFP_KERNEL);
if (!out)
return -ENOMEM;
diff --git a/drivers/infiniband/hw/mlx5/mr.c b/drivers/infiniband/hw/mlx5/mr.c
index 366433f..763bb5b 100644
--- a/drivers/infiniband/hw/mlx5/mr.c
+++ b/drivers/infiniband/hw/mlx5/mr.c
@@ -1110,7 +1110,7 @@
inlen = MLX5_ST_SZ_BYTES(create_mkey_in) +
sizeof(*pas) * ((npages + 1) / 2) * 2;
- in = mlx5_vzalloc(inlen);
+ in = kvzalloc(inlen, GFP_KERNEL);
if (!in) {
err = -ENOMEM;
goto err_1;
diff --git a/drivers/infiniband/hw/mlx5/qp.c b/drivers/infiniband/hw/mlx5/qp.c
index ebb6768..0889ff3 100644
--- a/drivers/infiniband/hw/mlx5/qp.c
+++ b/drivers/infiniband/hw/mlx5/qp.c
@@ -823,7 +823,7 @@
*inlen = MLX5_ST_SZ_BYTES(create_qp_in) +
MLX5_FLD_SZ_BYTES(create_qp_in, pas[0]) * ncont;
- *in = mlx5_vzalloc(*inlen);
+ *in = kvzalloc(*inlen, GFP_KERNEL);
if (!*in) {
err = -ENOMEM;
goto err_umem;
@@ -931,7 +931,7 @@
qp->sq.qend = mlx5_get_send_wqe(qp, qp->sq.wqe_cnt);
*inlen = MLX5_ST_SZ_BYTES(create_qp_in) +
MLX5_FLD_SZ_BYTES(create_qp_in, pas[0]) * qp->buf.npages;
- *in = mlx5_vzalloc(*inlen);
+ *in = kvzalloc(*inlen, GFP_KERNEL);
if (!*in) {
err = -ENOMEM;
goto err_buf;
@@ -1060,7 +1060,7 @@
return err;
inlen = MLX5_ST_SZ_BYTES(create_sq_in) + sizeof(u64) * ncont;
- in = mlx5_vzalloc(inlen);
+ in = kvzalloc(inlen, GFP_KERNEL);
if (!in) {
err = -ENOMEM;
goto err_umem;
@@ -1140,7 +1140,7 @@
u32 rq_pas_size = get_rq_pas_size(qpc);
inlen = MLX5_ST_SZ_BYTES(create_rq_in) + rq_pas_size;
- in = mlx5_vzalloc(inlen);
+ in = kvzalloc(inlen, GFP_KERNEL);
if (!in)
return -ENOMEM;
@@ -1193,7 +1193,7 @@
int err;
inlen = MLX5_ST_SZ_BYTES(create_tir_in);
- in = mlx5_vzalloc(inlen);
+ in = kvzalloc(inlen, GFP_KERNEL);
if (!in)
return -ENOMEM;
@@ -1372,7 +1372,7 @@
}
inlen = MLX5_ST_SZ_BYTES(create_tir_in);
- in = mlx5_vzalloc(inlen);
+ in = kvzalloc(inlen, GFP_KERNEL);
if (!in)
return -ENOMEM;
@@ -1633,7 +1633,7 @@
if (err)
return err;
} else {
- in = mlx5_vzalloc(inlen);
+ in = kvzalloc(inlen, GFP_KERNEL);
if (!in)
return -ENOMEM;
@@ -2164,7 +2164,7 @@
int err;
inlen = MLX5_ST_SZ_BYTES(modify_tis_in);
- in = mlx5_vzalloc(inlen);
+ in = kvzalloc(inlen, GFP_KERNEL);
if (!in)
return -ENOMEM;
@@ -2189,7 +2189,7 @@
int err;
inlen = MLX5_ST_SZ_BYTES(modify_tis_in);
- in = mlx5_vzalloc(inlen);
+ in = kvzalloc(inlen, GFP_KERNEL);
if (!in)
return -ENOMEM;
@@ -2434,7 +2434,7 @@
int err;
inlen = MLX5_ST_SZ_BYTES(modify_rq_in);
- in = mlx5_vzalloc(inlen);
+ in = kvzalloc(inlen, GFP_KERNEL);
if (!in)
return -ENOMEM;
@@ -2479,7 +2479,7 @@
int err;
inlen = MLX5_ST_SZ_BYTES(modify_sq_in);
- in = mlx5_vzalloc(inlen);
+ in = kvzalloc(inlen, GFP_KERNEL);
if (!in)
return -ENOMEM;
@@ -4281,7 +4281,7 @@
int err;
inlen = MLX5_ST_SZ_BYTES(query_sq_out);
- out = mlx5_vzalloc(inlen);
+ out = kvzalloc(inlen, GFP_KERNEL);
if (!out)
return -ENOMEM;
@@ -4308,7 +4308,7 @@
int err;
inlen = MLX5_ST_SZ_BYTES(query_rq_out);
- out = mlx5_vzalloc(inlen);
+ out = kvzalloc(inlen, GFP_KERNEL);
if (!out)
return -ENOMEM;
@@ -4612,7 +4612,7 @@
dev = to_mdev(pd->device);
inlen = MLX5_ST_SZ_BYTES(create_rq_in) + sizeof(u64) * rwq->rq_num_pas;
- in = mlx5_vzalloc(inlen);
+ in = kvzalloc(inlen, GFP_KERNEL);
if (!in)
return -ENOMEM;
@@ -4842,7 +4842,7 @@
return ERR_PTR(-ENOMEM);
inlen = MLX5_ST_SZ_BYTES(create_rqt_in) + sizeof(u32) * sz;
- in = mlx5_vzalloc(inlen);
+ in = kvzalloc(inlen, GFP_KERNEL);
if (!in) {
err = -ENOMEM;
goto err;
@@ -4921,7 +4921,7 @@
return -EOPNOTSUPP;
inlen = MLX5_ST_SZ_BYTES(modify_rq_in);
- in = mlx5_vzalloc(inlen);
+ in = kvzalloc(inlen, GFP_KERNEL);
if (!in)
return -ENOMEM;
diff --git a/drivers/infiniband/hw/mlx5/srq.c b/drivers/infiniband/hw/mlx5/srq.c
index 7cb145f..43707b1 100644
--- a/drivers/infiniband/hw/mlx5/srq.c
+++ b/drivers/infiniband/hw/mlx5/srq.c
@@ -127,7 +127,7 @@
goto err_umem;
}
- in->pas = mlx5_vzalloc(sizeof(*in->pas) * ncont);
+ in->pas = kvzalloc(sizeof(*in->pas) * ncont, GFP_KERNEL);
if (!in->pas) {
err = -ENOMEM;
goto err_umem;
@@ -189,7 +189,7 @@
}
mlx5_ib_dbg(dev, "srq->buf.page_shift = %d\n", srq->buf.page_shift);
- in->pas = mlx5_vzalloc(sizeof(*in->pas) * srq->buf.npages);
+ in->pas = kvzalloc(sizeof(*in->pas) * srq->buf.npages, GFP_KERNEL);
if (!in->pas) {
err = -ENOMEM;
goto err_buf;
diff --git a/drivers/infiniband/hw/qedr/main.c b/drivers/infiniband/hw/qedr/main.c
index 6a72095..b5851fd 100644
--- a/drivers/infiniband/hw/qedr/main.c
+++ b/drivers/infiniband/hw/qedr/main.c
@@ -37,7 +37,7 @@
#include <linux/iommu.h>
#include <linux/pci.h>
#include <net/addrconf.h>
-#include <linux/qed/qede_roce.h>
+
#include <linux/qed/qed_chain.h>
#include <linux/qed/qed_if.h>
#include "qedr.h"
@@ -276,7 +276,7 @@
QED_CHAIN_CNT_TYPE_U16,
n_entries,
sizeof(struct regpair *),
- &cnq->pbl);
+ &cnq->pbl, NULL);
if (rc)
goto err4;
@@ -886,9 +886,9 @@
memcpy(&sgid->raw[8], guid, sizeof(guid));
/* Update LL2 */
- rc = dev->ops->roce_ll2_set_mac_filter(dev->cdev,
- dev->gsi_ll2_mac_address,
- dev->ndev->dev_addr);
+ rc = dev->ops->ll2_set_mac_filter(dev->cdev,
+ dev->gsi_ll2_mac_address,
+ dev->ndev->dev_addr);
ether_addr_copy(dev->gsi_ll2_mac_address, dev->ndev->dev_addr);
@@ -902,7 +902,7 @@
* initialization done before RoCE driver notifies
* event to stack.
*/
-static void qedr_notify(struct qedr_dev *dev, enum qede_roce_event event)
+static void qedr_notify(struct qedr_dev *dev, enum qede_rdma_event event)
{
switch (event) {
case QEDE_UP:
@@ -931,12 +931,12 @@
static int __init qedr_init_module(void)
{
- return qede_roce_register_driver(&qedr_drv);
+ return qede_rdma_register_driver(&qedr_drv);
}
static void __exit qedr_exit_module(void)
{
- qede_roce_unregister_driver(&qedr_drv);
+ qede_rdma_unregister_driver(&qedr_drv);
}
module_init(qedr_init_module);
diff --git a/drivers/infiniband/hw/qedr/qedr.h b/drivers/infiniband/hw/qedr/qedr.h
index d961f79..ab7784b 100644
--- a/drivers/infiniband/hw/qedr/qedr.h
+++ b/drivers/infiniband/hw/qedr/qedr.h
@@ -36,8 +36,8 @@
#include <rdma/ib_addr.h>
#include <linux/qed/qed_if.h>
#include <linux/qed/qed_chain.h>
-#include <linux/qed/qed_roce_if.h>
-#include <linux/qed/qede_roce.h>
+#include <linux/qed/qed_rdma_if.h>
+#include <linux/qed/qede_rdma.h>
#include <linux/qed/roce_common.h>
#include "qedr_hsi_rdma.h"
@@ -153,6 +153,8 @@
u32 dp_module;
u8 dp_level;
u8 num_hwfns;
+ u8 gsi_ll2_handle;
+
uint wq_multiplier;
u8 gsi_ll2_mac_address[ETH_ALEN];
int gsi_qp_created;
diff --git a/drivers/infiniband/hw/qedr/qedr_cm.c b/drivers/infiniband/hw/qedr/qedr_cm.c
index d86dbe8..4689e80 100644
--- a/drivers/infiniband/hw/qedr/qedr_cm.c
+++ b/drivers/infiniband/hw/qedr/qedr_cm.c
@@ -44,7 +44,7 @@
#include <rdma/ib_cache.h>
#include <linux/qed/qed_if.h>
-#include <linux/qed/qed_roce_if.h>
+#include <linux/qed/qed_rdma_if.h>
#include "qedr.h"
#include "verbs.h"
#include <rdma/qedr-abi.h>
@@ -64,9 +64,14 @@
dev->gsi_qp = qp;
}
-void qedr_ll2_tx_cb(void *_qdev, struct qed_roce_ll2_packet *pkt)
+void qedr_ll2_complete_tx_packet(void *cxt,
+ u8 connection_handle,
+ void *cookie,
+ dma_addr_t first_frag_addr,
+ bool b_last_fragment, bool b_last_packet)
{
- struct qedr_dev *dev = (struct qedr_dev *)_qdev;
+ struct qedr_dev *dev = (struct qedr_dev *)cxt;
+ struct qed_roce_ll2_packet *pkt = cookie;
struct qedr_cq *cq = dev->gsi_sqcq;
struct qedr_qp *qp = dev->gsi_qp;
unsigned long flags;
@@ -88,20 +93,26 @@
(*cq->ibcq.comp_handler) (&cq->ibcq, cq->ibcq.cq_context);
}
-void qedr_ll2_rx_cb(void *_dev, struct qed_roce_ll2_packet *pkt,
- struct qed_roce_ll2_rx_params *params)
+void qedr_ll2_complete_rx_packet(void *cxt,
+ struct qed_ll2_comp_rx_data *data)
{
- struct qedr_dev *dev = (struct qedr_dev *)_dev;
+ struct qedr_dev *dev = (struct qedr_dev *)cxt;
struct qedr_cq *cq = dev->gsi_rqcq;
struct qedr_qp *qp = dev->gsi_qp;
unsigned long flags;
spin_lock_irqsave(&qp->q_lock, flags);
- qp->rqe_wr_id[qp->rq.gsi_cons].rc = params->rc;
- qp->rqe_wr_id[qp->rq.gsi_cons].vlan_id = params->vlan_id;
- qp->rqe_wr_id[qp->rq.gsi_cons].sg_list[0].length = pkt->payload[0].len;
- ether_addr_copy(qp->rqe_wr_id[qp->rq.gsi_cons].smac, params->smac);
+ qp->rqe_wr_id[qp->rq.gsi_cons].rc = data->u.data_length_error ?
+ -EINVAL : 0;
+ qp->rqe_wr_id[qp->rq.gsi_cons].vlan_id = data->vlan;
+ /* note: length stands for data length i.e. GRH is excluded */
+ qp->rqe_wr_id[qp->rq.gsi_cons].sg_list[0].length =
+ data->length.data_length;
+ *((u32 *)&qp->rqe_wr_id[qp->rq.gsi_cons].smac[0]) =
+ ntohl(data->opaque_data_0);
+ *((u16 *)&qp->rqe_wr_id[qp->rq.gsi_cons].smac[4]) =
+ ntohs((u16)data->opaque_data_1);
qedr_inc_sw_gsi_cons(&qp->rq);
@@ -111,6 +122,14 @@
(*cq->ibcq.comp_handler) (&cq->ibcq, cq->ibcq.cq_context);
}
+void qedr_ll2_release_rx_packet(void *cxt,
+ u8 connection_handle,
+ void *cookie,
+ dma_addr_t rx_buf_addr, bool b_last_packet)
+{
+ /* Do nothing... */
+}
+
static void qedr_destroy_gsi_cq(struct qedr_dev *dev,
struct ib_qp_init_attr *attrs)
{
@@ -159,27 +178,159 @@
return 0;
}
+static int qedr_ll2_post_tx(struct qedr_dev *dev,
+ struct qed_roce_ll2_packet *pkt)
+{
+ enum qed_ll2_roce_flavor_type roce_flavor;
+ struct qed_ll2_tx_pkt_info ll2_tx_pkt;
+ int rc;
+ int i;
+
+ memset(&ll2_tx_pkt, 0, sizeof(ll2_tx_pkt));
+
+ roce_flavor = (pkt->roce_mode == ROCE_V1) ?
+ QED_LL2_ROCE : QED_LL2_RROCE;
+
+ if (pkt->roce_mode == ROCE_V2_IPV4)
+ ll2_tx_pkt.enable_ip_cksum = 1;
+
+ ll2_tx_pkt.num_of_bds = 1 /* hdr */ + pkt->n_seg;
+ ll2_tx_pkt.vlan = 0;
+ ll2_tx_pkt.tx_dest = pkt->tx_dest;
+ ll2_tx_pkt.qed_roce_flavor = roce_flavor;
+ ll2_tx_pkt.first_frag = pkt->header.baddr;
+ ll2_tx_pkt.first_frag_len = pkt->header.len;
+ ll2_tx_pkt.cookie = pkt;
+
+ /* tx header */
+ rc = dev->ops->ll2_prepare_tx_packet(dev->rdma_ctx,
+ dev->gsi_ll2_handle,
+ &ll2_tx_pkt, 1);
+ if (rc) {
+ /* TX failed while posting header - release resources */
+ dma_free_coherent(&dev->pdev->dev, pkt->header.len,
+ pkt->header.vaddr, pkt->header.baddr);
+ kfree(pkt);
+
+ DP_ERR(dev, "roce ll2 tx: header failed (rc=%d)\n", rc);
+ return rc;
+ }
+
+ /* tx payload */
+ for (i = 0; i < pkt->n_seg; i++) {
+ rc = dev->ops->ll2_set_fragment_of_tx_packet(
+ dev->rdma_ctx,
+ dev->gsi_ll2_handle,
+ pkt->payload[i].baddr,
+ pkt->payload[i].len);
+
+ if (rc) {
+ /* if failed not much to do here, partial packet has
+ * been posted we can't free memory, will need to wait
+ * for completion
+ */
+ DP_ERR(dev, "ll2 tx: payload failed (rc=%d)\n", rc);
+ return rc;
+ }
+ }
+
+ return 0;
+}
+
+int qedr_ll2_stop(struct qedr_dev *dev)
+{
+ int rc;
+
+ if (dev->gsi_ll2_handle == QED_LL2_UNUSED_HANDLE)
+ return 0;
+
+ /* remove LL2 MAC address filter */
+ rc = dev->ops->ll2_set_mac_filter(dev->cdev,
+ dev->gsi_ll2_mac_address, NULL);
+
+ rc = dev->ops->ll2_terminate_connection(dev->rdma_ctx,
+ dev->gsi_ll2_handle);
+ if (rc)
+ DP_ERR(dev, "Failed to terminate LL2 connection (rc=%d)\n", rc);
+
+ dev->ops->ll2_release_connection(dev->rdma_ctx, dev->gsi_ll2_handle);
+
+ dev->gsi_ll2_handle = QED_LL2_UNUSED_HANDLE;
+
+ return rc;
+}
+
+int qedr_ll2_start(struct qedr_dev *dev,
+ struct ib_qp_init_attr *attrs, struct qedr_qp *qp)
+{
+ struct qed_ll2_acquire_data data;
+ struct qed_ll2_cbs cbs;
+ int rc;
+
+ /* configure and start LL2 */
+ cbs.rx_comp_cb = qedr_ll2_complete_rx_packet;
+ cbs.tx_comp_cb = qedr_ll2_complete_tx_packet;
+ cbs.rx_release_cb = qedr_ll2_release_rx_packet;
+ cbs.tx_release_cb = qedr_ll2_complete_tx_packet;
+ cbs.cookie = dev;
+
+ memset(&data, 0, sizeof(data));
+ data.input.conn_type = QED_LL2_TYPE_ROCE;
+ data.input.mtu = dev->ndev->mtu;
+ data.input.rx_num_desc = attrs->cap.max_recv_wr;
+ data.input.rx_drop_ttl0_flg = true;
+ data.input.rx_vlan_removal_en = false;
+ data.input.tx_num_desc = attrs->cap.max_send_wr;
+ data.input.tx_tc = 0;
+ data.input.tx_dest = QED_LL2_TX_DEST_NW;
+ data.input.ai_err_packet_too_big = QED_LL2_DROP_PACKET;
+ data.input.ai_err_no_buf = QED_LL2_DROP_PACKET;
+ data.input.gsi_enable = 1;
+ data.p_connection_handle = &dev->gsi_ll2_handle;
+ data.cbs = &cbs;
+
+ rc = dev->ops->ll2_acquire_connection(dev->rdma_ctx, &data);
+ if (rc) {
+ DP_ERR(dev,
+ "ll2 start: failed to acquire LL2 connection (rc=%d)\n",
+ rc);
+ return rc;
+ }
+
+ rc = dev->ops->ll2_establish_connection(dev->rdma_ctx,
+ dev->gsi_ll2_handle);
+ if (rc) {
+ DP_ERR(dev,
+ "ll2 start: failed to establish LL2 connection (rc=%d)\n",
+ rc);
+ goto err1;
+ }
+
+ rc = dev->ops->ll2_set_mac_filter(dev->cdev, NULL, dev->ndev->dev_addr);
+ if (rc)
+ goto err2;
+
+ return 0;
+
+err2:
+ dev->ops->ll2_terminate_connection(dev->rdma_ctx, dev->gsi_ll2_handle);
+err1:
+ dev->ops->ll2_release_connection(dev->rdma_ctx, dev->gsi_ll2_handle);
+
+ return rc;
+}
+
struct ib_qp *qedr_create_gsi_qp(struct qedr_dev *dev,
struct ib_qp_init_attr *attrs,
struct qedr_qp *qp)
{
- struct qed_roce_ll2_params ll2_params;
int rc;
rc = qedr_check_gsi_qp_attrs(dev, attrs);
if (rc)
return ERR_PTR(rc);
- /* configure and start LL2 */
- memset(&ll2_params, 0, sizeof(ll2_params));
- ll2_params.max_tx_buffers = attrs->cap.max_send_wr;
- ll2_params.max_rx_buffers = attrs->cap.max_recv_wr;
- ll2_params.cbs.tx_cb = qedr_ll2_tx_cb;
- ll2_params.cbs.rx_cb = qedr_ll2_rx_cb;
- ll2_params.cb_cookie = (void *)dev;
- ll2_params.mtu = dev->ndev->mtu;
- ether_addr_copy(ll2_params.mac_address, dev->ndev->dev_addr);
- rc = dev->ops->roce_ll2_start(dev->cdev, &ll2_params);
+ rc = qedr_ll2_start(dev, attrs, qp);
if (rc) {
DP_ERR(dev, "create gsi qp: failed on ll2 start. rc=%d\n", rc);
return ERR_PTR(rc);
@@ -214,7 +365,7 @@
err:
kfree(qp->rqe_wr_id);
- rc = dev->ops->roce_ll2_stop(dev->cdev);
+ rc = qedr_ll2_stop(dev);
if (rc)
DP_ERR(dev, "create gsi qp: failed destroy on create\n");
@@ -223,15 +374,7 @@
int qedr_destroy_gsi_qp(struct qedr_dev *dev)
{
- int rc;
-
- rc = dev->ops->roce_ll2_stop(dev->cdev);
- if (rc)
- DP_ERR(dev, "destroy gsi qp: failed (rc=%d)\n", rc);
- else
- DP_DEBUG(dev, QEDR_MSG_GSI, "destroy gsi qp: success\n");
-
- return rc;
+ return qedr_ll2_stop(dev);
}
#define QEDR_MAX_UD_HEADER_SIZE (100)
@@ -421,7 +564,6 @@
{
struct qed_roce_ll2_packet *pkt = NULL;
struct qedr_qp *qp = get_qedr_qp(ibqp);
- struct qed_roce_ll2_tx_params params;
struct qedr_dev *dev = qp->dev;
unsigned long flags;
int rc;
@@ -449,8 +591,6 @@
goto err;
}
- memset(¶ms, 0, sizeof(params));
-
spin_lock_irqsave(&qp->q_lock, flags);
rc = qedr_gsi_build_packet(dev, qp, wr, &pkt);
@@ -459,7 +599,8 @@
goto err;
}
- rc = dev->ops->roce_ll2_tx(dev->cdev, pkt, ¶ms);
+ rc = qedr_ll2_post_tx(dev, pkt);
+
if (!rc) {
qp->wqe_wr_id[qp->sq.prod].wr_id = wr->wr_id;
qedr_inc_sw_prod(&qp->sq);
@@ -467,17 +608,6 @@
"gsi post send: opcode=%d, in_irq=%ld, irqs_disabled=%d, wr_id=%llx\n",
wr->opcode, in_irq(), irqs_disabled(), wr->wr_id);
} else {
- if (rc == QED_ROCE_TX_HEAD_FAILURE) {
- /* TX failed while posting header - release resources */
- dma_free_coherent(&dev->pdev->dev, pkt->header.len,
- pkt->header.vaddr, pkt->header.baddr);
- kfree(pkt);
- } else if (rc == QED_ROCE_TX_FRAG_FAILURE) {
- /* NTD since TX failed while posting a fragment. We will
- * release the resources on TX callback
- */
- }
-
DP_ERR(dev, "gsi post send: failed to transmit (rc=%d)\n", rc);
rc = -EAGAIN;
*bad_wr = wr;
@@ -504,10 +634,8 @@
{
struct qedr_dev *dev = get_qedr_dev(ibqp->device);
struct qedr_qp *qp = get_qedr_qp(ibqp);
- struct qed_roce_ll2_buffer buf;
unsigned long flags;
- int status = 0;
- int rc;
+ int rc = 0;
if ((qp->state != QED_ROCE_QP_STATE_RTR) &&
(qp->state != QED_ROCE_QP_STATE_RTS)) {
@@ -518,8 +646,6 @@
return -EINVAL;
}
- memset(&buf, 0, sizeof(buf));
-
spin_lock_irqsave(&qp->q_lock, flags);
while (wr) {
@@ -530,10 +656,12 @@
goto err;
}
- buf.baddr = wr->sg_list[0].addr;
- buf.len = wr->sg_list[0].length;
-
- rc = dev->ops->roce_ll2_post_rx_buffer(dev->cdev, &buf, 0, 1);
+ rc = dev->ops->ll2_post_rx_buffer(dev->rdma_ctx,
+ dev->gsi_ll2_handle,
+ wr->sg_list[0].addr,
+ wr->sg_list[0].length,
+ 0 /* cookie */,
+ 1 /* notify_fw */);
if (rc) {
DP_ERR(dev,
"gsi post recv: failed to post rx buffer (rc=%d)\n",
@@ -553,7 +681,7 @@
spin_unlock_irqrestore(&qp->q_lock, flags);
- return status;
+ return rc;
err:
spin_unlock_irqrestore(&qp->q_lock, flags);
*bad_wr = wr;
diff --git a/drivers/infiniband/hw/qedr/verbs.c b/drivers/infiniband/hw/qedr/verbs.c
index d6723c3..548e4d1 100644
--- a/drivers/infiniband/hw/qedr/verbs.c
+++ b/drivers/infiniband/hw/qedr/verbs.c
@@ -935,7 +935,7 @@
QED_CHAIN_CNT_TYPE_U32,
chain_entries,
sizeof(union rdma_cqe),
- &cq->pbl);
+ &cq->pbl, NULL);
if (rc)
goto err1;
@@ -1423,7 +1423,7 @@
QED_CHAIN_CNT_TYPE_U32,
n_sq_elems,
QEDR_SQE_ELEMENT_SIZE,
- &qp->sq.pbl);
+ &qp->sq.pbl, NULL);
if (rc)
return rc;
@@ -1437,7 +1437,7 @@
QED_CHAIN_CNT_TYPE_U32,
n_rq_elems,
QEDR_RQE_ELEMENT_SIZE,
- &qp->rq.pbl);
+ &qp->rq.pbl, NULL);
if (rc)
return rc;
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c
index 1015a63..d129625 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_main.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c
@@ -681,7 +681,7 @@
{
struct ipoib_pseudo_header *phdr;
- phdr = (struct ipoib_pseudo_header *)skb_push(skb, sizeof(*phdr));
+ phdr = skb_push(skb, sizeof(*phdr));
memcpy(phdr->hwaddr, daddr, INFINIBAND_ALEN);
}
@@ -1129,7 +1129,7 @@
{
struct ipoib_header *header;
- header = (struct ipoib_header *) skb_push(skb, sizeof *header);
+ header = skb_push(skb, sizeof *header);
header->proto = htons(type);
header->reserved = 0;
diff --git a/drivers/infiniband/ulp/opa_vnic/opa_vnic_encap.c b/drivers/infiniband/ulp/opa_vnic/opa_vnic_encap.c
index 2e8fee9..afa938b 100644
--- a/drivers/infiniband/ulp/opa_vnic/opa_vnic_encap.c
+++ b/drivers/infiniband/ulp/opa_vnic/opa_vnic_encap.c
@@ -460,7 +460,7 @@
sc = opa_vnic_get_sc(info, skb);
l4_hdr = info->vesw.vesw_id;
- mdata = (struct opa_vnic_skb_mdata *)skb_push(skb, sizeof(*mdata));
+ mdata = skb_push(skb, sizeof(*mdata));
mdata->vl = opa_vnic_get_vl(adapter, skb);
mdata->entropy = entropy;
mdata->flags = 0;
diff --git a/drivers/infiniband/ulp/opa_vnic/opa_vnic_netdev.c b/drivers/infiniband/ulp/opa_vnic/opa_vnic_netdev.c
index 905f39d..fcf7532 100644
--- a/drivers/infiniband/ulp/opa_vnic/opa_vnic_netdev.c
+++ b/drivers/infiniband/ulp/opa_vnic/opa_vnic_netdev.c
@@ -103,7 +103,7 @@
int rc;
/* pass entropy and vl as metadata in skb */
- mdata = (struct opa_vnic_skb_mdata *)skb_push(skb, sizeof(*mdata));
+ mdata = skb_push(skb, sizeof(*mdata));
mdata->entropy = opa_vnic_calc_entropy(adapter, skb);
mdata->vl = opa_vnic_get_vl(adapter, skb);
rc = adapter->rn_ops->ndo_select_queue(netdev, skb,
diff --git a/drivers/isdn/capi/capi.c b/drivers/isdn/capi/capi.c
index 6a2df32..dde8f46 100644
--- a/drivers/isdn/capi/capi.c
+++ b/drivers/isdn/capi/capi.c
@@ -1058,7 +1058,7 @@
}
skb_reserve(skb, CAPI_DATA_B3_REQ_LEN);
- memcpy(skb_put(skb, count), buf, count);
+ skb_put_data(skb, buf, count);
__skb_queue_tail(&mp->outqueue, skb);
mp->outbytes += skb->len;
@@ -1082,7 +1082,7 @@
skb = mp->outskb;
if (skb) {
if (skb_tailroom(skb) > 0) {
- *(skb_put(skb, 1)) = ch;
+ skb_put_u8(skb, ch);
goto unlock_out;
}
mp->outskb = NULL;
@@ -1094,7 +1094,7 @@
skb = alloc_skb(CAPI_DATA_B3_REQ_LEN + CAPI_MAX_BLKSIZE, GFP_ATOMIC);
if (skb) {
skb_reserve(skb, CAPI_DATA_B3_REQ_LEN);
- *(skb_put(skb, 1)) = ch;
+ skb_put_u8(skb, ch);
mp->outskb = skb;
} else {
printk(KERN_ERR "capinc_put_char: char %u lost\n", ch);
diff --git a/drivers/isdn/capi/capidrv.c b/drivers/isdn/capi/capidrv.c
index 85cfa4f..89dd130 100644
--- a/drivers/isdn/capi/capidrv.c
+++ b/drivers/isdn/capi/capidrv.c
@@ -516,7 +516,7 @@
printk(KERN_ERR "capidrv::send_message: can't allocate mem\n");
return;
}
- memcpy(skb_put(skb, len), cmsg->buf, len);
+ skb_put_data(skb, cmsg->buf, len);
if (capi20_put_message(&global.ap, skb) != CAPI_NOERROR)
kfree_skb(skb);
}
diff --git a/drivers/isdn/gigaset/asyncdata.c b/drivers/isdn/gigaset/asyncdata.c
index c90dca5..bc20855 100644
--- a/drivers/isdn/gigaset/asyncdata.c
+++ b/drivers/isdn/gigaset/asyncdata.c
@@ -264,7 +264,7 @@
/* skip remainder of packet */
bcs->rx_skb = skb = NULL;
} else {
- *__skb_put(skb, 1) = c;
+ __skb_put_u8(skb, c);
fcs = crc_ccitt_byte(fcs, c);
}
}
@@ -315,7 +315,7 @@
/* regular data byte: append to current skb */
inputstate |= INS_have_data;
- *__skb_put(skb, 1) = bitrev8(c);
+ __skb_put_u8(skb, bitrev8(c));
}
/* pass data up */
@@ -492,33 +492,33 @@
hdlc_skb->mac_len = skb->mac_len;
/* Add flag sequence in front of everything.. */
- *(skb_put(hdlc_skb, 1)) = PPP_FLAG;
+ skb_put_u8(hdlc_skb, PPP_FLAG);
/* Perform byte stuffing while copying data. */
while (skb->len--) {
if (muststuff(*skb->data)) {
- *(skb_put(hdlc_skb, 1)) = PPP_ESCAPE;
- *(skb_put(hdlc_skb, 1)) = (*skb->data++) ^ PPP_TRANS;
+ skb_put_u8(hdlc_skb, PPP_ESCAPE);
+ skb_put_u8(hdlc_skb, (*skb->data++) ^ PPP_TRANS);
} else
- *(skb_put(hdlc_skb, 1)) = *skb->data++;
+ skb_put_u8(hdlc_skb, *skb->data++);
}
/* Finally add FCS (byte stuffed) and flag sequence */
c = (fcs & 0x00ff); /* least significant byte first */
if (muststuff(c)) {
- *(skb_put(hdlc_skb, 1)) = PPP_ESCAPE;
+ skb_put_u8(hdlc_skb, PPP_ESCAPE);
c ^= PPP_TRANS;
}
- *(skb_put(hdlc_skb, 1)) = c;
+ skb_put_u8(hdlc_skb, c);
c = ((fcs >> 8) & 0x00ff);
if (muststuff(c)) {
- *(skb_put(hdlc_skb, 1)) = PPP_ESCAPE;
+ skb_put_u8(hdlc_skb, PPP_ESCAPE);
c ^= PPP_TRANS;
}
- *(skb_put(hdlc_skb, 1)) = c;
+ skb_put_u8(hdlc_skb, c);
- *(skb_put(hdlc_skb, 1)) = PPP_FLAG;
+ skb_put_u8(hdlc_skb, PPP_FLAG);
dev_kfree_skb_any(skb);
return hdlc_skb;
@@ -561,8 +561,8 @@
while (len--) {
c = bitrev8(*cp++);
if (c == DLE_FLAG)
- *(skb_put(iraw_skb, 1)) = c;
- *(skb_put(iraw_skb, 1)) = c;
+ skb_put_u8(iraw_skb, c);
+ skb_put_u8(iraw_skb, c);
}
dev_kfree_skb_any(skb);
return iraw_skb;
diff --git a/drivers/isdn/gigaset/isocdata.c b/drivers/isdn/gigaset/isocdata.c
index bc29f1d..97e00118 100644
--- a/drivers/isdn/gigaset/isocdata.c
+++ b/drivers/isdn/gigaset/isocdata.c
@@ -511,7 +511,7 @@
bcs->rx_skb = NULL;
return;
}
- *__skb_put(bcs->rx_skb, 1) = c;
+ __skb_put_u8(bcs->rx_skb, c);
}
/* hdlc_flush
diff --git a/drivers/isdn/hardware/avm/b1.c b/drivers/isdn/hardware/avm/b1.c
index 9fdbd99..b1833d0 100644
--- a/drivers/isdn/hardware/avm/b1.c
+++ b/drivers/isdn/hardware/avm/b1.c
@@ -529,8 +529,8 @@
printk(KERN_ERR "%s: incoming packet dropped\n",
card->name);
} else {
- memcpy(skb_put(skb, MsgLen), card->msgbuf, MsgLen);
- memcpy(skb_put(skb, DataB3Len), card->databuf, DataB3Len);
+ skb_put_data(skb, card->msgbuf, MsgLen);
+ skb_put_data(skb, card->databuf, DataB3Len);
capi_ctr_handle_message(ctrl, ApplId, skb);
}
break;
@@ -544,7 +544,7 @@
card->name);
spin_unlock_irqrestore(&card->lock, flags);
} else {
- memcpy(skb_put(skb, MsgLen), card->msgbuf, MsgLen);
+ skb_put_data(skb, card->msgbuf, MsgLen);
if (CAPIMSG_CMD(skb->data) == CAPI_DATA_B3_CONF)
capilib_data_b3_conf(&cinfo->ncci_head, ApplId,
CAPIMSG_NCCI(skb->data),
diff --git a/drivers/isdn/hardware/avm/b1dma.c b/drivers/isdn/hardware/avm/b1dma.c
index 818bd8f..9538a9e 100644
--- a/drivers/isdn/hardware/avm/b1dma.c
+++ b/drivers/isdn/hardware/avm/b1dma.c
@@ -474,8 +474,8 @@
printk(KERN_ERR "%s: incoming packet dropped\n",
card->name);
} else {
- memcpy(skb_put(skb, MsgLen), card->msgbuf, MsgLen);
- memcpy(skb_put(skb, DataB3Len), card->databuf, DataB3Len);
+ skb_put_data(skb, card->msgbuf, MsgLen);
+ skb_put_data(skb, card->databuf, DataB3Len);
capi_ctr_handle_message(ctrl, ApplId, skb);
}
break;
@@ -488,7 +488,7 @@
printk(KERN_ERR "%s: incoming packet dropped\n",
card->name);
} else {
- memcpy(skb_put(skb, MsgLen), card->msgbuf, MsgLen);
+ skb_put_data(skb, card->msgbuf, MsgLen);
if (CAPIMSG_CMD(skb->data) == CAPI_DATA_B3_CONF) {
spin_lock(&card->lock);
capilib_data_b3_conf(&cinfo->ncci_head, ApplId,
diff --git a/drivers/isdn/hardware/avm/c4.c b/drivers/isdn/hardware/avm/c4.c
index 17beb28..40c7e2c 100644
--- a/drivers/isdn/hardware/avm/c4.c
+++ b/drivers/isdn/hardware/avm/c4.c
@@ -536,8 +536,8 @@
printk(KERN_ERR "%s: incoming packet dropped\n",
card->name);
} else {
- memcpy(skb_put(skb, MsgLen), card->msgbuf, MsgLen);
- memcpy(skb_put(skb, DataB3Len), card->databuf, DataB3Len);
+ skb_put_data(skb, card->msgbuf, MsgLen);
+ skb_put_data(skb, card->databuf, DataB3Len);
capi_ctr_handle_message(ctrl, ApplId, skb);
}
break;
@@ -555,7 +555,7 @@
printk(KERN_ERR "%s: incoming packet dropped\n",
card->name);
} else {
- memcpy(skb_put(skb, MsgLen), card->msgbuf, MsgLen);
+ skb_put_data(skb, card->msgbuf, MsgLen);
if (CAPIMSG_CMD(skb->data) == CAPI_DATA_B3_CONF)
capilib_data_b3_conf(&cinfo->ncci_head, ApplId,
CAPIMSG_NCCI(skb->data),
diff --git a/drivers/isdn/hardware/avm/t1isa.c b/drivers/isdn/hardware/avm/t1isa.c
index 9516203..9f80d20 100644
--- a/drivers/isdn/hardware/avm/t1isa.c
+++ b/drivers/isdn/hardware/avm/t1isa.c
@@ -171,8 +171,8 @@
printk(KERN_ERR "%s: incoming packet dropped\n",
card->name);
} else {
- memcpy(skb_put(skb, MsgLen), card->msgbuf, MsgLen);
- memcpy(skb_put(skb, DataB3Len), card->databuf, DataB3Len);
+ skb_put_data(skb, card->msgbuf, MsgLen);
+ skb_put_data(skb, card->databuf, DataB3Len);
capi_ctr_handle_message(ctrl, ApplId, skb);
}
break;
@@ -186,7 +186,7 @@
printk(KERN_ERR "%s: incoming packet dropped\n",
card->name);
} else {
- memcpy(skb_put(skb, MsgLen), card->msgbuf, MsgLen);
+ skb_put_data(skb, card->msgbuf, MsgLen);
if (CAPIMSG_CMD(skb->data) == CAPI_DATA_B3)
capilib_data_b3_conf(&cinfo->ncci_head, ApplId,
CAPIMSG_NCCI(skb->data),
diff --git a/drivers/isdn/hardware/mISDN/hfcmulti.c b/drivers/isdn/hardware/mISDN/hfcmulti.c
index 961c07e..aea0c96 100644
--- a/drivers/isdn/hardware/mISDN/hfcmulti.c
+++ b/drivers/isdn/hardware/mISDN/hfcmulti.c
@@ -1926,7 +1926,7 @@
hh = mISDN_HEAD_P(skb);
hh->prim = PH_CONTROL_IND;
hh->id = DTMF_HFC_COEF;
- memcpy(skb_put(skb, 512), hc->chan[ch].coeff, 512);
+ skb_put_data(skb, hc->chan[ch].coeff, 512);
recv_Bchannel_skb(bch, skb);
}
}
@@ -2332,8 +2332,7 @@
skb = *sp;
*sp = mI_alloc_skb(skb->len, GFP_ATOMIC);
if (*sp) {
- memcpy(skb_put(*sp, skb->len),
- skb->data, skb->len);
+ skb_put_data(*sp, skb->data, skb->len);
skb_trim(skb, 0);
} else {
printk(KERN_DEBUG "%s: No mem\n",
diff --git a/drivers/isdn/hardware/mISDN/hfcsusb.c b/drivers/isdn/hardware/mISDN/hfcsusb.c
index 114f3bc..17cc879 100644
--- a/drivers/isdn/hardware/mISDN/hfcsusb.c
+++ b/drivers/isdn/hardware/mISDN/hfcsusb.c
@@ -893,7 +893,7 @@
}
}
- memcpy(skb_put(rx_skb, len), data, len);
+ skb_put_data(rx_skb, data, len);
if (hdlc) {
/* we have a complete hdlc packet */
diff --git a/drivers/isdn/hardware/mISDN/mISDNipac.c b/drivers/isdn/hardware/mISDN/mISDNipac.c
index 6742b0d..e240010 100644
--- a/drivers/isdn/hardware/mISDN/mISDNipac.c
+++ b/drivers/isdn/hardware/mISDN/mISDNipac.c
@@ -364,8 +364,8 @@
WriteISAC(isac, ISAC_MOCR, isac->mocr);
if (isac->mon_txc && (isac->mon_txp >= isac->mon_txc)) {
if (isac->monitor)
- ret = isac->monitor(isac->dch.hw,
- MONITOR_TX_0, NULL, 0);
+ isac->monitor(isac->dch.hw,
+ MONITOR_TX_0, NULL, 0);
}
kfree(isac->mon_tx);
isac->mon_tx = NULL;
@@ -375,8 +375,8 @@
}
if (isac->mon_txc && (isac->mon_txp >= isac->mon_txc)) {
if (isac->monitor)
- ret = isac->monitor(isac->dch.hw,
- MONITOR_TX_0, NULL, 0);
+ isac->monitor(isac->dch.hw,
+ MONITOR_TX_0, NULL, 0);
kfree(isac->mon_tx);
isac->mon_tx = NULL;
isac->mon_txc = 0;
@@ -397,8 +397,8 @@
WriteISAC(isac, ISAC_MOCR, isac->mocr);
if (isac->mon_txc && (isac->mon_txp >= isac->mon_txc)) {
if (isac->monitor)
- ret = isac->monitor(isac->dch.hw,
- MONITOR_TX_1, NULL, 0);
+ isac->monitor(isac->dch.hw,
+ MONITOR_TX_1, NULL, 0);
}
kfree(isac->mon_tx);
isac->mon_tx = NULL;
@@ -408,8 +408,8 @@
}
if (isac->mon_txc && (isac->mon_txp >= isac->mon_txc)) {
if (isac->monitor)
- ret = isac->monitor(isac->dch.hw,
- MONITOR_TX_1, NULL, 0);
+ isac->monitor(isac->dch.hw,
+ MONITOR_TX_1, NULL, 0);
kfree(isac->mon_tx);
isac->mon_tx = NULL;
isac->mon_txc = 0;
diff --git a/drivers/isdn/hisax/amd7930_fn.c b/drivers/isdn/hisax/amd7930_fn.c
index 3a4c2f9..dcf4c2a 100644
--- a/drivers/isdn/hisax/amd7930_fn.c
+++ b/drivers/isdn/hisax/amd7930_fn.c
@@ -317,7 +317,8 @@
debugl1(cs, "%s", cs->dlog);
}
/* moves received data in sk-buffer */
- memcpy(skb_put(skb, cs->rcvidx), cs->rcvbuf, cs->rcvidx);
+ skb_put_data(skb, cs->rcvbuf,
+ cs->rcvidx);
skb_queue_tail(&cs->rq, skb);
}
}
diff --git a/drivers/isdn/hisax/avm_pci.c b/drivers/isdn/hisax/avm_pci.c
index d1427bd..daf3742 100644
--- a/drivers/isdn/hisax/avm_pci.c
+++ b/drivers/isdn/hisax/avm_pci.c
@@ -378,8 +378,9 @@
if (!(skb = dev_alloc_skb(bcs->hw.hdlc.rcvidx)))
printk(KERN_WARNING "HDLC: receive out of memory\n");
else {
- memcpy(skb_put(skb, bcs->hw.hdlc.rcvidx),
- bcs->hw.hdlc.rcvbuf, bcs->hw.hdlc.rcvidx);
+ skb_put_data(skb,
+ bcs->hw.hdlc.rcvbuf,
+ bcs->hw.hdlc.rcvidx);
skb_queue_tail(&bcs->rqueue, skb);
}
bcs->hw.hdlc.rcvidx = 0;
diff --git a/drivers/isdn/hisax/diva.c b/drivers/isdn/hisax/diva.c
index 079336e..3fc94e7 100644
--- a/drivers/isdn/hisax/diva.c
+++ b/drivers/isdn/hisax/diva.c
@@ -511,7 +511,8 @@
if (!(skb = dev_alloc_skb(count)))
printk(KERN_WARNING "HSCX: receive out of memory\n");
else {
- memcpy(skb_put(skb, count), bcs->hw.hscx.rcvbuf, count);
+ skb_put_data(skb, bcs->hw.hscx.rcvbuf,
+ count);
skb_queue_tail(&bcs->rqueue, skb);
}
}
@@ -526,7 +527,8 @@
if (!(skb = dev_alloc_skb(fifo_size)))
printk(KERN_WARNING "HiSax: receive out of memory\n");
else {
- memcpy(skb_put(skb, fifo_size), bcs->hw.hscx.rcvbuf, fifo_size);
+ skb_put_data(skb, bcs->hw.hscx.rcvbuf,
+ fifo_size);
skb_queue_tail(&bcs->rqueue, skb);
}
bcs->hw.hscx.rcvidx = 0;
diff --git a/drivers/isdn/hisax/elsa_ser.c b/drivers/isdn/hisax/elsa_ser.c
index a2a358c..999effd 100644
--- a/drivers/isdn/hisax/elsa_ser.c
+++ b/drivers/isdn/hisax/elsa_ser.c
@@ -333,8 +333,8 @@
if (!(skb = dev_alloc_skb(cs->hw.elsa.rcvcnt)))
printk(KERN_WARNING "ElsaSER: receive out of memory\n");
else {
- memcpy(skb_put(skb, cs->hw.elsa.rcvcnt), cs->hw.elsa.rcvbuf,
- cs->hw.elsa.rcvcnt);
+ skb_put_data(skb, cs->hw.elsa.rcvbuf,
+ cs->hw.elsa.rcvcnt);
skb_queue_tail(&cs->hw.elsa.bcs->rqueue, skb);
}
schedule_event(cs->hw.elsa.bcs, B_RCVBUFREADY);
diff --git a/drivers/isdn/hisax/hfc_usb.c b/drivers/isdn/hisax/hfc_usb.c
index 6dbd1f1..ef47480 100644
--- a/drivers/isdn/hisax/hfc_usb.c
+++ b/drivers/isdn/hisax/hfc_usb.c
@@ -799,7 +799,7 @@
}
if (len) {
if (fifo->skbuff->len + len < fifo->max_size) {
- memcpy(skb_put(fifo->skbuff, len), data, len);
+ skb_put_data(fifo->skbuff, data, len);
} else {
DBG(HFCUSB_DBG_FIFO_ERR,
"HCF-USB: got frame exceeded fifo->max_size(%d) fifo(%d)",
diff --git a/drivers/isdn/hisax/hisax_fcpcipnp.c b/drivers/isdn/hisax/hisax_fcpcipnp.c
index 5e8a5d9..5a9f39e 100644
--- a/drivers/isdn/hisax/hisax_fcpcipnp.c
+++ b/drivers/isdn/hisax/hisax_fcpcipnp.c
@@ -495,8 +495,7 @@
if (!skb) {
printk(KERN_WARNING "HDLC: receive out of memory\n");
} else {
- memcpy(skb_put(skb, bcs->rcvidx), bcs->rcvbuf,
- bcs->rcvidx);
+ skb_put_data(skb, bcs->rcvbuf, bcs->rcvidx);
DBG_SKB(1, skb);
B_L1L2(bcs, PH_DATA | INDICATION, skb);
}
diff --git a/drivers/isdn/hisax/hisax_isac.c b/drivers/isdn/hisax/hisax_isac.c
index 5154c252..0f36375 100644
--- a/drivers/isdn/hisax/hisax_isac.c
+++ b/drivers/isdn/hisax/hisax_isac.c
@@ -557,7 +557,7 @@
DBG(DBG_WARN, "no memory, dropping\n");
goto out;
}
- memcpy(skb_put(skb, count), isac->rcvbuf, count);
+ skb_put_data(skb, isac->rcvbuf, count);
DBG_SKB(DBG_RPACKET, skb);
D_L1L2(isac, PH_DATA | INDICATION, skb);
out:
@@ -687,7 +687,7 @@
DBG(DBG_WARN, "no memory, dropping");
goto out;
}
- memcpy(skb_put(skb, count), isac->rcvbuf, count);
+ skb_put_data(skb, isac->rcvbuf, count);
DBG_SKB(DBG_RPACKET, skb);
D_L1L2(isac, PH_DATA | INDICATION, skb);
out:
diff --git a/drivers/isdn/hisax/hscx_irq.c b/drivers/isdn/hisax/hscx_irq.c
index a8d6188..0d7e783 100644
--- a/drivers/isdn/hisax/hscx_irq.c
+++ b/drivers/isdn/hisax/hscx_irq.c
@@ -169,7 +169,8 @@
if (!(skb = dev_alloc_skb(count)))
printk(KERN_WARNING "HSCX: receive out of memory\n");
else {
- memcpy(skb_put(skb, count), bcs->hw.hscx.rcvbuf, count);
+ skb_put_data(skb, bcs->hw.hscx.rcvbuf,
+ count);
skb_queue_tail(&bcs->rqueue, skb);
}
}
@@ -184,7 +185,8 @@
if (!(skb = dev_alloc_skb(fifo_size)))
printk(KERN_WARNING "HiSax: receive out of memory\n");
else {
- memcpy(skb_put(skb, fifo_size), bcs->hw.hscx.rcvbuf, fifo_size);
+ skb_put_data(skb, bcs->hw.hscx.rcvbuf,
+ fifo_size);
skb_queue_tail(&bcs->rqueue, skb);
}
bcs->hw.hscx.rcvidx = 0;
diff --git a/drivers/isdn/hisax/icc.c b/drivers/isdn/hisax/icc.c
index c7c3797..8d18045 100644
--- a/drivers/isdn/hisax/icc.c
+++ b/drivers/isdn/hisax/icc.c
@@ -217,7 +217,7 @@
if (!(skb = alloc_skb(count, GFP_ATOMIC)))
printk(KERN_WARNING "HiSax: D receive out of memory\n");
else {
- memcpy(skb_put(skb, count), cs->rcvbuf, count);
+ skb_put_data(skb, cs->rcvbuf, count);
skb_queue_tail(&cs->rq, skb);
}
}
diff --git a/drivers/isdn/hisax/ipacx.c b/drivers/isdn/hisax/ipacx.c
index 43effe7..c426b4f 100644
--- a/drivers/isdn/hisax/ipacx.c
+++ b/drivers/isdn/hisax/ipacx.c
@@ -350,7 +350,7 @@
if (!(skb = dev_alloc_skb(count)))
printk(KERN_WARNING "HiSax dch_int(): receive out of memory\n");
else {
- memcpy(skb_put(skb, count), cs->rcvbuf, count);
+ skb_put_data(skb, cs->rcvbuf, count);
skb_queue_tail(&cs->rq, skb);
}
}
@@ -627,7 +627,8 @@
if (!(skb = dev_alloc_skb(count)))
printk(KERN_WARNING "HiSax bch_int(): receive frame out of memory\n");
else {
- memcpy(skb_put(skb, count), bcs->hw.hscx.rcvbuf, count);
+ skb_put_data(skb, bcs->hw.hscx.rcvbuf,
+ count);
skb_queue_tail(&bcs->rqueue, skb);
}
}
@@ -644,7 +645,8 @@
if (!(skb = dev_alloc_skb(B_FIFO_SIZE)))
printk(KERN_WARNING "HiSax bch_int(): receive transparent out of memory\n");
else {
- memcpy(skb_put(skb, B_FIFO_SIZE), bcs->hw.hscx.rcvbuf, B_FIFO_SIZE);
+ skb_put_data(skb, bcs->hw.hscx.rcvbuf,
+ B_FIFO_SIZE);
skb_queue_tail(&bcs->rqueue, skb);
}
bcs->hw.hscx.rcvidx = 0;
diff --git a/drivers/isdn/hisax/isac.c b/drivers/isdn/hisax/isac.c
index 4273b45..ea965f2 100644
--- a/drivers/isdn/hisax/isac.c
+++ b/drivers/isdn/hisax/isac.c
@@ -222,7 +222,7 @@
if (!skb)
printk(KERN_WARNING "HiSax: D receive out of memory\n");
else {
- memcpy(skb_put(skb, count), cs->rcvbuf, count);
+ skb_put_data(skb, cs->rcvbuf, count);
skb_queue_tail(&cs->rq, skb);
}
}
diff --git a/drivers/isdn/hisax/isar.c b/drivers/isdn/hisax/isar.c
index 0dc60b2..98b4b67 100644
--- a/drivers/isdn/hisax/isar.c
+++ b/drivers/isdn/hisax/isar.c
@@ -458,7 +458,7 @@
struct sk_buff *skb;
if ((skb = dev_alloc_skb(2))) {
- memcpy(skb_put(skb, 2), dleetx, 2);
+ skb_put_data(skb, dleetx, 2);
skb_queue_tail(&bcs->rqueue, skb);
schedule_event(bcs, B_RCVBUFREADY);
} else {
@@ -550,8 +550,8 @@
} else if (!(skb = dev_alloc_skb(bcs->hw.isar.rcvidx - 2))) {
printk(KERN_WARNING "ISAR: receive out of memory\n");
} else {
- memcpy(skb_put(skb, bcs->hw.isar.rcvidx - 2),
- bcs->hw.isar.rcvbuf, bcs->hw.isar.rcvidx - 2);
+ skb_put_data(skb, bcs->hw.isar.rcvbuf,
+ bcs->hw.isar.rcvidx - 2);
skb_queue_tail(&bcs->rqueue, skb);
schedule_event(bcs, B_RCVBUFREADY);
}
diff --git a/drivers/isdn/hisax/isdnl2.c b/drivers/isdn/hisax/isdnl2.c
index c53a53f..1a40ed0 100644
--- a/drivers/isdn/hisax/isdnl2.c
+++ b/drivers/isdn/hisax/isdnl2.c
@@ -433,7 +433,7 @@
printk(KERN_WARNING "isdl2 can't alloc sbbuff for send_uframe\n");
return;
}
- memcpy(skb_put(skb, i), tmp, i);
+ skb_put_data(skb, tmp, i);
enqueue_super(st, skb);
}
@@ -894,7 +894,7 @@
printk(KERN_WARNING "isdl2 can't alloc sbbuff for enquiry_cr\n");
return;
}
- memcpy(skb_put(skb, i), tmp, i);
+ skb_put_data(skb, tmp, i);
enqueue_super(st, skb);
}
diff --git a/drivers/isdn/hisax/jade_irq.c b/drivers/isdn/hisax/jade_irq.c
index b930da9..a89e2df 100644
--- a/drivers/isdn/hisax/jade_irq.c
+++ b/drivers/isdn/hisax/jade_irq.c
@@ -147,7 +147,8 @@
if (!(skb = dev_alloc_skb(count)))
printk(KERN_WARNING "JADE %s receive out of memory\n", (jade ? "B" : "A"));
else {
- memcpy(skb_put(skb, count), bcs->hw.hscx.rcvbuf, count);
+ skb_put_data(skb, bcs->hw.hscx.rcvbuf,
+ count);
skb_queue_tail(&bcs->rqueue, skb);
}
}
@@ -162,7 +163,8 @@
if (!(skb = dev_alloc_skb(fifo_size)))
printk(KERN_WARNING "HiSax: receive out of memory\n");
else {
- memcpy(skb_put(skb, fifo_size), bcs->hw.hscx.rcvbuf, fifo_size);
+ skb_put_data(skb, bcs->hw.hscx.rcvbuf,
+ fifo_size);
skb_queue_tail(&bcs->rqueue, skb);
}
bcs->hw.hscx.rcvidx = 0;
diff --git a/drivers/isdn/hisax/l3_1tr6.c b/drivers/isdn/hisax/l3_1tr6.c
index 875402e..da0a1c6 100644
--- a/drivers/isdn/hisax/l3_1tr6.c
+++ b/drivers/isdn/hisax/l3_1tr6.c
@@ -149,7 +149,7 @@
l = p - tmp;
if (!(skb = l3_alloc_skb(l)))
return;
- memcpy(skb_put(skb, l), tmp, l);
+ skb_put_data(skb, tmp, l);
L3DelTimer(&pc->timer);
L3AddTimer(&pc->timer, T303, CC_T303);
newl3state(pc, 1);
@@ -497,7 +497,7 @@
l = p - tmp;
if (!(skb = l3_alloc_skb(l)))
return;
- memcpy(skb_put(skb, l), tmp, l);
+ skb_put_data(skb, tmp, l);
l3_msg(pc->st, DL_DATA | REQUEST, skb);
L3DelTimer(&pc->timer);
L3AddTimer(&pc->timer, T313, CC_T313);
@@ -543,7 +543,7 @@
l = p - tmp;
if (!(skb = l3_alloc_skb(l)))
return;
- memcpy(skb_put(skb, l), tmp, l);
+ skb_put_data(skb, tmp, l);
l3_msg(pc->st, DL_DATA | REQUEST, skb);
L3AddTimer(&pc->timer, T305, CC_T305);
}
@@ -602,7 +602,7 @@
l = p - tmp;
if (!(skb = l3_alloc_skb(l)))
return;
- memcpy(skb_put(skb, l), tmp, l);
+ skb_put_data(skb, tmp, l);
l3_msg(pc->st, DL_DATA | REQUEST, skb);
L3AddTimer(&pc->timer, T308, CC_T308_1);
}
diff --git a/drivers/isdn/hisax/l3dss1.c b/drivers/isdn/hisax/l3dss1.c
index cda7006..18a3484 100644
--- a/drivers/isdn/hisax/l3dss1.c
+++ b/drivers/isdn/hisax/l3dss1.c
@@ -525,7 +525,7 @@
l = p - tmp;
if (!(skb = l3_alloc_skb(l)))
return;
- memcpy(skb_put(skb, l), tmp, l);
+ skb_put_data(skb, tmp, l);
l3_msg(pc->st, DL_DATA | REQUEST, skb);
}
@@ -551,7 +551,7 @@
l = p - tmp;
if (!(skb = l3_alloc_skb(l)))
return;
- memcpy(skb_put(skb, l), tmp, l);
+ skb_put_data(skb, tmp, l);
l3_msg(pc->st, DL_DATA | REQUEST, skb);
}
@@ -587,7 +587,7 @@
l = p - tmp;
if (!(skb = l3_alloc_skb(l)))
return;
- memcpy(skb_put(skb, l), tmp, l);
+ skb_put_data(skb, tmp, l);
l3_msg(pc->st, DL_DATA | REQUEST, skb);
dss1_release_l3_process(pc);
}
@@ -944,7 +944,7 @@
l = p - tmp;
if (!(skb = l3_alloc_skb(l)))
return;
- memcpy(skb_put(skb, l), tmp, l);
+ skb_put_data(skb, tmp, l);
l3_msg(pc->st, DL_DATA | REQUEST, skb);
} /* l3dss1_msg_with_uus */
@@ -1420,7 +1420,7 @@
l = p - tmp;
if (!(skb = l3_alloc_skb(l)))
return;
- memcpy(skb_put(skb, l), tmp, l);
+ skb_put_data(skb, tmp, l);
L3DelTimer(&pc->timer);
L3AddTimer(&pc->timer, T303, CC_T303);
newl3state(pc, 1);
@@ -1786,7 +1786,7 @@
l = p - tmp;
if (!(skb = l3_alloc_skb(l)))
return;
- memcpy(skb_put(skb, l), tmp, l);
+ skb_put_data(skb, tmp, l);
newl3state(pc, 11);
l3_msg(pc->st, DL_DATA | REQUEST, skb);
L3AddTimer(&pc->timer, T305, CC_T305);
@@ -1848,7 +1848,7 @@
l = p - tmp;
if (!(skb = l3_alloc_skb(l)))
return;
- memcpy(skb_put(skb, l), tmp, l);
+ skb_put_data(skb, tmp, l);
l3_msg(pc->st, DL_DATA | REQUEST, skb);
pc->st->l3.l3l4(pc->st, CC_RELEASE | INDICATION, pc);
newl3state(pc, 0);
@@ -2145,7 +2145,7 @@
l = p - tmp;
if (!(skb = l3_alloc_skb(l))) return;
- memcpy(skb_put(skb, l), tmp, l);
+ skb_put_data(skb, tmp, l);
l3_msg(pc->st, DL_DATA | REQUEST, skb);
} /* l3dss1_redir_req */
@@ -2216,7 +2216,7 @@
if (pc) dss1_release_l3_process(pc);
return (-2);
}
- memcpy(skb_put(skb, l), temp, l);
+ skb_put_data(skb, temp, l);
if (pc)
{ pc->prot.dss1.invoke_id = id; /* remember id */
@@ -2359,7 +2359,7 @@
l = p - tmp;
if (!(skb = l3_alloc_skb(l)))
return;
- memcpy(skb_put(skb, l), tmp, l);
+ skb_put_data(skb, tmp, l);
newl3state(pc, 19);
l3_msg(pc->st, DL_DATA | REQUEST, skb);
L3AddTimer(&pc->timer, T308, CC_T308_1);
@@ -2528,7 +2528,7 @@
l = p - tmp;
if (!(skb = l3_alloc_skb(l)))
return;
- memcpy(skb_put(skb, l), tmp, l);
+ skb_put_data(skb, tmp, l);
l3_msg(pc->st, DL_DATA | REQUEST, skb);
newl3state(pc, 15);
L3AddTimer(&pc->timer, T319, CC_T319);
@@ -2603,7 +2603,7 @@
l = p - tmp;
if (!(skb = l3_alloc_skb(l)))
return;
- memcpy(skb_put(skb, l), tmp, l);
+ skb_put_data(skb, tmp, l);
l3_msg(pc->st, DL_DATA | REQUEST, skb);
newl3state(pc, 17);
L3AddTimer(&pc->timer, T318, CC_T318);
@@ -2721,7 +2721,7 @@
l = p - tmp;
if (!(skb = l3_alloc_skb(l)))
return;
- memcpy(skb_put(skb, l), tmp, l);
+ skb_put_data(skb, tmp, l);
newl3state(pc, 0);
l3_msg(pc->st, DL_DATA | REQUEST, skb);
}
@@ -2929,7 +2929,7 @@
l = p - tmp;
if (!(skb = l3_alloc_skb(l)))
return;
- memcpy(skb_put(skb, l), tmp, l);
+ skb_put_data(skb, tmp, l);
l3_msg(proc->st, DL_DATA | REQUEST, skb);
} else {
if (st->l3.debug & L3_DEB_STATE) {
diff --git a/drivers/isdn/hisax/l3ni1.c b/drivers/isdn/hisax/l3ni1.c
index 8dc791b..ea311e7 100644
--- a/drivers/isdn/hisax/l3ni1.c
+++ b/drivers/isdn/hisax/l3ni1.c
@@ -454,7 +454,7 @@
if (!(skb = l3_alloc_skb(7)))
return;
- memcpy(skb_put(skb, 7), tmp, 7);
+ skb_put_data(skb, tmp, 7);
l3_msg(pc->st, DL_DATA | REQUEST, skb);
}
@@ -475,7 +475,7 @@
l = p - tmp;
if (!(skb = l3_alloc_skb(l)))
return;
- memcpy(skb_put(skb, l), tmp, l);
+ skb_put_data(skb, tmp, l);
l3_msg(pc->st, DL_DATA | REQUEST, skb);
}
@@ -501,7 +501,7 @@
l = p - tmp;
if (!(skb = l3_alloc_skb(l)))
return;
- memcpy(skb_put(skb, l), tmp, l);
+ skb_put_data(skb, tmp, l);
l3_msg(pc->st, DL_DATA | REQUEST, skb);
}
@@ -537,7 +537,7 @@
l = p - tmp;
if (!(skb = l3_alloc_skb(l)))
return;
- memcpy(skb_put(skb, l), tmp, l);
+ skb_put_data(skb, tmp, l);
l3_msg(pc->st, DL_DATA | REQUEST, skb);
ni1_release_l3_process(pc);
}
@@ -894,7 +894,7 @@
l = p - tmp;
if (!(skb = l3_alloc_skb(l)))
return;
- memcpy(skb_put(skb, l), tmp, l);
+ skb_put_data(skb, tmp, l);
l3_msg(pc->st, DL_DATA | REQUEST, skb);
} /* l3ni1_msg_with_uus */
@@ -1274,7 +1274,7 @@
{
return;
}
- memcpy(skb_put(skb, l), tmp, l);
+ skb_put_data(skb, tmp, l);
L3DelTimer(&pc->timer);
L3AddTimer(&pc->timer, T303, CC_T303);
newl3state(pc, 1);
@@ -1640,7 +1640,7 @@
l = p - tmp;
if (!(skb = l3_alloc_skb(l)))
return;
- memcpy(skb_put(skb, l), tmp, l);
+ skb_put_data(skb, tmp, l);
newl3state(pc, 11);
l3_msg(pc->st, DL_DATA | REQUEST, skb);
L3AddTimer(&pc->timer, T305, CC_T305);
@@ -1704,7 +1704,7 @@
l = p - tmp;
if (!(skb = l3_alloc_skb(l)))
return;
- memcpy(skb_put(skb, l), tmp, l);
+ skb_put_data(skb, tmp, l);
l3_msg(pc->st, DL_DATA | REQUEST, skb);
pc->st->l3.l3l4(pc->st, CC_RELEASE | INDICATION, pc);
newl3state(pc, 0);
@@ -2001,7 +2001,7 @@
l = p - tmp;
if (!(skb = l3_alloc_skb(l))) return;
- memcpy(skb_put(skb, l), tmp, l);
+ skb_put_data(skb, tmp, l);
l3_msg(pc->st, DL_DATA | REQUEST, skb);
} /* l3ni1_redir_req */
@@ -2076,7 +2076,7 @@
if (pc) ni1_release_l3_process(pc);
return (-2);
}
- memcpy(skb_put(skb, l), temp, l);
+ skb_put_data(skb, temp, l);
if (pc)
{ pc->prot.ni1.invoke_id = id; /* remember id */
@@ -2219,7 +2219,7 @@
l = p - tmp;
if (!(skb = l3_alloc_skb(l)))
return;
- memcpy(skb_put(skb, l), tmp, l);
+ skb_put_data(skb, tmp, l);
newl3state(pc, 19);
l3_msg(pc->st, DL_DATA | REQUEST, skb);
L3AddTimer(&pc->timer, T308, CC_T308_1);
@@ -2388,7 +2388,7 @@
l = p - tmp;
if (!(skb = l3_alloc_skb(l)))
return;
- memcpy(skb_put(skb, l), tmp, l);
+ skb_put_data(skb, tmp, l);
l3_msg(pc->st, DL_DATA | REQUEST, skb);
newl3state(pc, 15);
L3AddTimer(&pc->timer, T319, CC_T319);
@@ -2463,7 +2463,7 @@
l = p - tmp;
if (!(skb = l3_alloc_skb(l)))
return;
- memcpy(skb_put(skb, l), tmp, l);
+ skb_put_data(skb, tmp, l);
l3_msg(pc->st, DL_DATA | REQUEST, skb);
newl3state(pc, 17);
L3AddTimer(&pc->timer, T318, CC_T318);
@@ -2582,7 +2582,7 @@
l = p - tmp;
if (!(skb = l3_alloc_skb(l)))
return;
- memcpy(skb_put(skb, l), tmp, l);
+ skb_put_data(skb, tmp, l);
newl3state(pc, 0);
l3_msg(pc->st, DL_DATA | REQUEST, skb);
}
@@ -2655,7 +2655,7 @@
*p++ = IE_SPID;
*p++ = l;
- memcpy(skb_put(skb, l), pSPID, l);
+ skb_put_data(skb, pSPID, l);
newl3state(pc, iNewState);
@@ -2873,7 +2873,7 @@
l = p - tmp;
if (!(skb = l3_alloc_skb(l)))
return;
- memcpy(skb_put(skb, l), tmp, l);
+ skb_put_data(skb, tmp, l);
l3_msg(proc->st, DL_DATA | REQUEST, skb);
} else {
if (st->l3.debug & L3_DEB_STATE) {
diff --git a/drivers/isdn/hisax/netjet.c b/drivers/isdn/hisax/netjet.c
index 233e432..b7f54fa 100644
--- a/drivers/isdn/hisax/netjet.c
+++ b/drivers/isdn/hisax/netjet.c
@@ -383,7 +383,7 @@
if (!(skb = dev_alloc_skb(count)))
printk(KERN_WARNING "TIGER: receive out of memory\n");
else {
- memcpy(skb_put(skb, count), bcs->hw.tiger.rcvbuf, count);
+ skb_put_data(skb, bcs->hw.tiger.rcvbuf, count);
skb_queue_tail(&bcs->rqueue, skb);
}
test_and_set_bit(B_RCVBUFREADY, &bcs->event);
diff --git a/drivers/isdn/hisax/st5481_usb.c b/drivers/isdn/hisax/st5481_usb.c
index a0fdbc0..1cb9930 100644
--- a/drivers/isdn/hisax/st5481_usb.c
+++ b/drivers/isdn/hisax/st5481_usb.c
@@ -527,7 +527,7 @@
WARNING("receive out of memory\n");
break;
}
- memcpy(skb_put(skb, status), in->rcvbuf, status);
+ skb_put_data(skb, in->rcvbuf, status);
in->hisax_if->l1l2(in->hisax_if, PH_DATA | INDICATION, skb);
} else if (status == -HDLC_CRC_ERROR) {
INFO("CRC error");
diff --git a/drivers/isdn/hisax/w6692.c b/drivers/isdn/hisax/w6692.c
index c99f0ec..6f6733b 100644
--- a/drivers/isdn/hisax/w6692.c
+++ b/drivers/isdn/hisax/w6692.c
@@ -309,7 +309,9 @@
if (!(skb = dev_alloc_skb(count)))
printk(KERN_WARNING "W6692: Bchan receive out of memory\n");
else {
- memcpy(skb_put(skb, count), bcs->hw.w6692.rcvbuf, count);
+ skb_put_data(skb,
+ bcs->hw.w6692.rcvbuf,
+ count);
skb_queue_tail(&bcs->rqueue, skb);
}
}
@@ -332,7 +334,8 @@
if (!(skb = dev_alloc_skb(W_B_FIFO_THRESH)))
printk(KERN_WARNING "HiSax: receive out of memory\n");
else {
- memcpy(skb_put(skb, W_B_FIFO_THRESH), bcs->hw.w6692.rcvbuf, W_B_FIFO_THRESH);
+ skb_put_data(skb, bcs->hw.w6692.rcvbuf,
+ W_B_FIFO_THRESH);
skb_queue_tail(&bcs->rqueue, skb);
}
bcs->hw.w6692.rcvidx = 0;
@@ -441,7 +444,7 @@
if (!(skb = alloc_skb(count, GFP_ATOMIC)))
printk(KERN_WARNING "HiSax: D receive out of memory\n");
else {
- memcpy(skb_put(skb, count), cs->rcvbuf, count);
+ skb_put_data(skb, cs->rcvbuf, count);
skb_queue_tail(&cs->rq, skb);
}
}
diff --git a/drivers/isdn/hysdn/hycapi.c b/drivers/isdn/hysdn/hycapi.c
index 93bae94..eac0f51 100644
--- a/drivers/isdn/hysdn/hycapi.c
+++ b/drivers/isdn/hysdn/hycapi.c
@@ -171,16 +171,16 @@
card->myid);
return;
}
- memcpy(skb_put(skb, sizeof(__u16)), &len, sizeof(__u16));
- memcpy(skb_put(skb, sizeof(__u16)), &appl, sizeof(__u16));
- memcpy(skb_put(skb, sizeof(__u8)), &_command, sizeof(_command));
- memcpy(skb_put(skb, sizeof(__u8)), &_subcommand, sizeof(_subcommand));
- memcpy(skb_put(skb, sizeof(__u16)), &MessageNumber, sizeof(__u16));
- memcpy(skb_put(skb, sizeof(__u16)), &MessageBufferSize, sizeof(__u16));
- memcpy(skb_put(skb, sizeof(__u16)), &(rp->level3cnt), sizeof(__u16));
- memcpy(skb_put(skb, sizeof(__u16)), &(rp->datablkcnt), sizeof(__u16));
- memcpy(skb_put(skb, sizeof(__u16)), &(rp->datablklen), sizeof(__u16));
- memcpy(skb_put(skb, slen), ExtFeatureDefaults, slen);
+ skb_put_data(skb, &len, sizeof(__u16));
+ skb_put_data(skb, &appl, sizeof(__u16));
+ skb_put_data(skb, &_command, sizeof(__u8));
+ skb_put_data(skb, &_subcommand, sizeof(__u8));
+ skb_put_data(skb, &MessageNumber, sizeof(__u16));
+ skb_put_data(skb, &MessageBufferSize, sizeof(__u16));
+ skb_put_data(skb, &(rp->level3cnt), sizeof(__u16));
+ skb_put_data(skb, &(rp->datablkcnt), sizeof(__u16));
+ skb_put_data(skb, &(rp->datablklen), sizeof(__u16));
+ skb_put_data(skb, ExtFeatureDefaults, slen);
hycapi_applications[appl - 1].ctrl_mask |= (1 << (ctrl->cnr - 1));
hycapi_send_message(ctrl, skb);
}
@@ -279,11 +279,11 @@
card->myid);
return;
}
- memcpy(skb_put(skb, sizeof(__u16)), &len, sizeof(__u16));
- memcpy(skb_put(skb, sizeof(__u16)), &appl, sizeof(__u16));
- memcpy(skb_put(skb, sizeof(__u8)), &_command, sizeof(_command));
- memcpy(skb_put(skb, sizeof(__u8)), &_subcommand, sizeof(_subcommand));
- memcpy(skb_put(skb, sizeof(__u16)), &MessageNumber, sizeof(__u16));
+ skb_put_data(skb, &len, sizeof(__u16));
+ skb_put_data(skb, &appl, sizeof(__u16));
+ skb_put_data(skb, &_command, sizeof(__u8));
+ skb_put_data(skb, &_subcommand, sizeof(__u8));
+ skb_put_data(skb, &MessageNumber, sizeof(__u16));
hycapi_send_message(ctrl, skb);
hycapi_applications[appl - 1].ctrl_mask &= ~(1 << (ctrl->cnr - 1));
}
@@ -557,10 +557,9 @@
card->myid);
return;
}
- memcpy(skb_put(skb, MsgLen), buf, MsgLen);
- memcpy(skb_put(skb, 2 * sizeof(__u32)), CP64, 2 * sizeof(__u32));
- memcpy(skb_put(skb, len - MsgLen), buf + MsgLen,
- len - MsgLen);
+ skb_put_data(skb, buf, MsgLen);
+ skb_put_data(skb, CP64, 2 * sizeof(__u32));
+ skb_put_data(skb, buf + MsgLen, len - MsgLen);
CAPIMSG_SETLEN(skb->data, 30);
} else {
if (!(skb = alloc_skb(len, GFP_ATOMIC))) {
@@ -568,7 +567,7 @@
card->myid);
return;
}
- memcpy(skb_put(skb, len), buf, len);
+ skb_put_data(skb, buf, len);
}
switch (CAPIMSG_CMD(skb->data))
{
diff --git a/drivers/isdn/hysdn/hysdn_net.c b/drivers/isdn/hysdn/hysdn_net.c
index b93a4e9a..8e9c34f 100644
--- a/drivers/isdn/hysdn/hysdn_net.c
+++ b/drivers/isdn/hysdn/hysdn_net.c
@@ -201,7 +201,7 @@
return;
}
/* copy the data */
- memcpy(skb_put(skb, len), buf, len);
+ skb_put_data(skb, buf, len);
/* determine the used protocol */
skb->protocol = eth_type_trans(skb, dev);
diff --git a/drivers/isdn/i4l/isdn_audio.c b/drivers/isdn/i4l/isdn_audio.c
index 78ce422..b6bcd1e 100644
--- a/drivers/isdn/i4l/isdn_audio.c
+++ b/drivers/isdn/i4l/isdn_audio.c
@@ -462,7 +462,7 @@
info->line);
return;
}
- result = (int *) skb_put(skb, sizeof(int) * NCOEFF);
+ result = skb_put(skb, sizeof(int) * NCOEFF);
for (k = 0; k < NCOEFF; k++) {
sk = sk1 = sk2 = 0;
for (n = 0; n < DTMF_NPOINTS; n++) {
@@ -672,7 +672,7 @@
info->line);
return;
}
- p = (char *) skb_put(skb, 2);
+ p = skb_put(skb, 2);
p[0] = 0x10;
p[1] = code;
ISDN_AUDIO_SKB_DLECOUNT(skb) = 0;
diff --git a/drivers/isdn/i4l/isdn_bsdcomp.c b/drivers/isdn/i4l/isdn_bsdcomp.c
index 8837ac5..99012c0 100644
--- a/drivers/isdn/i4l/isdn_bsdcomp.c
+++ b/drivers/isdn/i4l/isdn_bsdcomp.c
@@ -472,7 +472,7 @@
accm |= ((ent) << bitno); \
do { \
if (skb_out && skb_tailroom(skb_out) > 0) \
- *(skb_put(skb_out, 1)) = (unsigned char)(accm >> 24); \
+ skb_put_u8(skb_out, (u8)(accm >> 24)); \
accm <<= 8; \
bitno += 8; \
} while (bitno <= 24); \
@@ -602,7 +602,8 @@
* Do not emit a completely useless byte of ones.
*/
if (bitno < 32 && skb_out && skb_tailroom(skb_out) > 0)
- *(skb_put(skb_out, 1)) = (unsigned char)((accm | (0xff << (bitno - 8))) >> 24);
+ skb_put_u8(skb_out,
+ (unsigned char)((accm | (0xff << (bitno - 8))) >> 24));
/*
* Increase code size if we would have without the packet
@@ -698,7 +699,7 @@
db->bytes_out += ilen;
if (skb_tailroom(skb_out) > 0)
- *(skb_put(skb_out, 1)) = 0;
+ skb_put_u8(skb_out, 0);
else
return DECOMP_ERR_NOMEM;
@@ -816,7 +817,7 @@
#endif
if (extra) /* the KwKwK case again */
- *(skb_put(skb_out, 1)) = finchar;
+ skb_put_u8(skb_out, finchar);
/*
* If not first code in a packet, and
diff --git a/drivers/isdn/i4l/isdn_ppp.c b/drivers/isdn/i4l/isdn_ppp.c
index 8aa158a..88e5a02 100644
--- a/drivers/isdn/i4l/isdn_ppp.c
+++ b/drivers/isdn/i4l/isdn_ppp.c
@@ -1312,7 +1312,7 @@
/* check if we should pass this packet
* the filter instructions are constructed assuming
* a four-byte PPP header on each packet */
- *skb_push(skb, 4) = 1; /* indicate outbound */
+ *(u8 *)skb_push(skb, 4) = 1; /* indicate outbound */
{
__be16 *p = (__be16 *)skb->data;
@@ -1509,7 +1509,7 @@
* temporarily remove part of the fake header stuck on
* earlier.
*/
- *skb_pull(skb, IPPP_MAX_HEADER - 4) = 1; /* indicate outbound */
+ *(u8 *)skb_pull(skb, IPPP_MAX_HEADER - 4) = 1; /* indicate outbound */
{
__be16 *p = (__be16 *)skb->data;
@@ -2258,8 +2258,7 @@
/* Now stuff remaining bytes */
if (len) {
- p = skb_put(skb, len);
- memcpy(p, data, len);
+ skb_put_data(skb, data, len);
}
/* skb is now ready for xmit */
diff --git a/drivers/isdn/i4l/isdn_tty.c b/drivers/isdn/i4l/isdn_tty.c
index ddd8207..d30130c 100644
--- a/drivers/isdn/i4l/isdn_tty.c
+++ b/drivers/isdn/i4l/isdn_tty.c
@@ -474,7 +474,7 @@
return;
}
skb_reserve(skb, skb_res);
- memcpy(skb_put(skb, buflen), info->port.xmit_buf, buflen);
+ skb_put_data(skb, info->port.xmit_buf, buflen);
info->xmit_count = 0;
#ifdef CONFIG_ISDN_AUDIO
if (info->vonline & 2) {
diff --git a/drivers/isdn/i4l/isdn_v110.c b/drivers/isdn/i4l/isdn_v110.c
index 52827a8..8b74ce4 100644
--- a/drivers/isdn/i4l/isdn_v110.c
+++ b/drivers/isdn/i4l/isdn_v110.c
@@ -421,7 +421,7 @@
}
if ((skb = dev_alloc_skb(v->framelen + v->skbres))) {
skb_reserve(skb, v->skbres);
- memcpy(skb_put(skb, v->framelen), v->OfflineFrame, v->framelen);
+ skb_put_data(skb, v->OfflineFrame, v->framelen);
}
return skb;
}
@@ -441,7 +441,7 @@
}
if ((skb = dev_alloc_skb(v->framelen + v->skbres))) {
skb_reserve(skb, v->skbres);
- memcpy(skb_put(skb, v->framelen), v->OnlineFrame, v->framelen);
+ skb_put_data(skb, v->OnlineFrame, v->framelen);
}
return skb;
}
@@ -486,7 +486,7 @@
}
skb_reserve(nskb, v->skbres + sizeof(int));
if (skb->len == 0) {
- memcpy(skb_put(nskb, v->framelen), v->OnlineFrame, v->framelen);
+ skb_put_data(nskb, v->OnlineFrame, v->framelen);
*((int *)skb_push(nskb, sizeof(int))) = 0;
return nskb;
}
diff --git a/drivers/isdn/i4l/isdn_x25iface.c b/drivers/isdn/i4l/isdn_x25iface.c
index ba60076..48bfbcb 100644
--- a/drivers/isdn/i4l/isdn_x25iface.c
+++ b/drivers/isdn/i4l/isdn_x25iface.c
@@ -224,7 +224,7 @@
skb = dev_alloc_skb(1);
if (skb) {
- *(skb_put(skb, 1)) = X25_IFACE_CONNECT;
+ skb_put_u8(skb, X25_IFACE_CONNECT);
skb->protocol = x25_type_trans(skb, cprot->net_dev);
netif_rx(skb);
return 0;
@@ -253,7 +253,7 @@
*state_p = WAN_DISCONNECTED;
skb = dev_alloc_skb(1);
if (skb) {
- *(skb_put(skb, 1)) = X25_IFACE_DISCONNECT;
+ skb_put_u8(skb, X25_IFACE_DISCONNECT);
skb->protocol = x25_type_trans(skb, cprot->net_dev);
netif_rx(skb);
return 0;
diff --git a/drivers/isdn/isdnloop/isdnloop.c b/drivers/isdn/isdnloop/isdnloop.c
index ef9c8e4..7ac7bad 100644
--- a/drivers/isdn/isdnloop/isdnloop.c
+++ b/drivers/isdn/isdnloop/isdnloop.c
@@ -479,7 +479,7 @@
}
if (ch >= 0)
sprintf(skb_put(skb, 3), "%02d;", ch);
- memcpy(skb_put(skb, strlen(s)), s, strlen(s));
+ skb_put_data(skb, s, strlen(s));
skb_queue_tail(&card->dqueue, skb);
return 0;
}
diff --git a/drivers/isdn/mISDN/dsp_cmx.c b/drivers/isdn/mISDN/dsp_cmx.c
index 8e3aa00..d4b6f01 100644
--- a/drivers/isdn/mISDN/dsp_cmx.c
+++ b/drivers/isdn/mISDN/dsp_cmx.c
@@ -1595,8 +1595,7 @@
thh = mISDN_HEAD_P(txskb);
thh->prim = DL_DATA_REQ;
thh->id = 0;
- memcpy(skb_put(txskb, len), nskb->data + preload,
- len);
+ skb_put_data(txskb, nskb->data + preload, len);
/* queue (trigger later) */
skb_queue_tail(&dsp->sendq, txskb);
}
diff --git a/drivers/isdn/mISDN/layer2.c b/drivers/isdn/mISDN/layer2.c
index 5eb380a..7243a67 100644
--- a/drivers/isdn/mISDN/layer2.c
+++ b/drivers/isdn/mISDN/layer2.c
@@ -176,7 +176,7 @@
hh->prim = prim;
hh->id = (l2->ch.nr << 16) | l2->ch.addr;
if (len)
- memcpy(skb_put(skb, len), arg, len);
+ skb_put_data(skb, arg, len);
err = l2->up->send(l2->up, skb);
if (err) {
printk(KERN_WARNING "%s: dev %s err=%d\n", __func__,
@@ -235,7 +235,7 @@
hh->prim = prim;
hh->id = id;
if (len)
- memcpy(skb_put(skb, len), arg, len);
+ skb_put_data(skb, arg, len);
err = l2down_raw(l2, skb);
if (err)
dev_kfree_skb(skb);
@@ -640,7 +640,7 @@
return;
}
}
- memcpy(skb_put(skb, i), tmp, i);
+ skb_put_data(skb, tmp, i);
enqueue_super(l2, skb);
}
@@ -1125,7 +1125,7 @@
mISDNDevName4ch(&l2->ch), __func__);
return;
}
- memcpy(skb_put(skb, i), tmp, i);
+ skb_put_data(skb, tmp, i);
enqueue_super(l2, skb);
}
diff --git a/drivers/isdn/mISDN/tei.c b/drivers/isdn/mISDN/tei.c
index 592f597..908127e 100644
--- a/drivers/isdn/mISDN/tei.c
+++ b/drivers/isdn/mISDN/tei.c
@@ -312,7 +312,7 @@
hh->prim = prim;
hh->id = (mgr->ch.nr << 16) | mgr->ch.addr;
if (len)
- memcpy(skb_put(skb, len), arg, len);
+ skb_put_data(skb, arg, len);
err = mgr->up->send(mgr->up, skb);
if (err) {
printk(KERN_WARNING "%s: err=%d\n", __func__, err);
diff --git a/drivers/media/dvb-core/dvb_net.c b/drivers/media/dvb-core/dvb_net.c
index 9947b34..06b0dcc 100644
--- a/drivers/media/dvb-core/dvb_net.c
+++ b/drivers/media/dvb-core/dvb_net.c
@@ -828,8 +828,7 @@
/* Copy data into our current skb. */
h.how_much = min(h.priv->ule_sndu_remain, (int)h.ts_remain);
- memcpy(skb_put(h.priv->ule_skb, h.how_much),
- h.from_where, h.how_much);
+ skb_put_data(h.priv->ule_skb, h.from_where, h.how_much);
h.priv->ule_sndu_remain -= h.how_much;
h.ts_remain -= h.how_much;
h.from_where += h.how_much;
@@ -964,7 +963,7 @@
skb->dev = dev;
/* copy L3 payload */
- eth = (u8 *) skb_put(skb, pkt_len - 12 - 4 + 14 - snap);
+ eth = skb_put(skb, pkt_len - 12 - 4 + 14 - snap);
memcpy(eth + 14, pkt + 12 + snap, pkt_len - 12 - 4 - snap);
/* create ethernet header: */
diff --git a/drivers/media/radio/wl128x/fmdrv_common.c b/drivers/media/radio/wl128x/fmdrv_common.c
index 588e2d6..ab3428b 100644
--- a/drivers/media/radio/wl128x/fmdrv_common.c
+++ b/drivers/media/radio/wl128x/fmdrv_common.c
@@ -416,7 +416,7 @@
if (!test_bit(FM_FW_DW_INPROGRESS, &fmdev->flag) ||
test_bit(FM_INTTASK_RUNNING, &fmdev->flag)) {
/* Fill command header info */
- hdr = (struct fm_cmd_msg_hdr *)skb_put(skb, FM_CMD_MSG_HDR_SIZE);
+ hdr = skb_put(skb, FM_CMD_MSG_HDR_SIZE);
hdr->hdr = FM_PKT_LOGICAL_CHAN_NUMBER; /* 0x08 */
/* 3 (fm_opcode,rd_wr,dlen) + payload len) */
@@ -442,7 +442,7 @@
fm_cb(skb)->fm_op = *((u8 *)payload + 2);
}
if (payload != NULL)
- memcpy(skb_put(skb, payload_len), payload, payload_len);
+ skb_put_data(skb, payload, payload_len);
fm_cb(skb)->completion = wait_completion;
skb_queue_tail(&fmdev->tx_q, skb);
diff --git a/drivers/misc/ti-st/st_core.c b/drivers/misc/ti-st/st_core.c
index 0005159..eda8d40 100644
--- a/drivers/misc/ti-st/st_core.c
+++ b/drivers/misc/ti-st/st_core.c
@@ -262,7 +262,7 @@
while (count) {
if (st_gdata->rx_count) {
len = min_t(unsigned int, st_gdata->rx_count, count);
- memcpy(skb_put(st_gdata->rx_skb, len), ptr, len);
+ skb_put_data(st_gdata->rx_skb, ptr, len);
st_gdata->rx_count -= len;
count -= len;
ptr += len;
diff --git a/drivers/misc/ti-st/st_kim.c b/drivers/misc/ti-st/st_kim.c
index bf0d770..e74413f 100644
--- a/drivers/misc/ti-st/st_kim.c
+++ b/drivers/misc/ti-st/st_kim.c
@@ -152,7 +152,7 @@
while (count) {
if (kim_gdata->rx_count) {
len = min_t(unsigned int, kim_gdata->rx_count, count);
- memcpy(skb_put(kim_gdata->rx_skb, len), ptr, len);
+ skb_put_data(kim_gdata->rx_skb, ptr, len);
kim_gdata->rx_count -= len;
count -= len;
ptr += len;
diff --git a/drivers/net/arcnet/arc-rawmode.c b/drivers/net/arcnet/arc-rawmode.c
index d78f301..8c651fd 100644
--- a/drivers/net/arcnet/arc-rawmode.c
+++ b/drivers/net/arcnet/arc-rawmode.c
@@ -85,7 +85,7 @@
unsigned short type, uint8_t daddr)
{
int hdr_size = ARC_HDR_SIZE;
- struct archdr *pkt = (struct archdr *)skb_push(skb, hdr_size);
+ struct archdr *pkt = skb_push(skb, hdr_size);
/* Set the source hardware address.
*
diff --git a/drivers/net/arcnet/capmode.c b/drivers/net/arcnet/capmode.c
index 2056878..a80f4eb 100644
--- a/drivers/net/arcnet/capmode.c
+++ b/drivers/net/arcnet/capmode.c
@@ -101,7 +101,7 @@
uint8_t daddr)
{
int hdr_size = ARC_HDR_SIZE;
- struct archdr *pkt = (struct archdr *)skb_push(skb, hdr_size);
+ struct archdr *pkt = skb_push(skb, hdr_size);
arc_printk(D_PROTO, dev, "Preparing header for cap packet %x.\n",
*((int *)&pkt->soft.cap.cookie[0]));
diff --git a/drivers/net/arcnet/rfc1051.c b/drivers/net/arcnet/rfc1051.c
index 4b1a754..a7752a5 100644
--- a/drivers/net/arcnet/rfc1051.c
+++ b/drivers/net/arcnet/rfc1051.c
@@ -162,7 +162,7 @@
unsigned short type, uint8_t daddr)
{
int hdr_size = ARC_HDR_SIZE + RFC1051_HDR_SIZE;
- struct archdr *pkt = (struct archdr *)skb_push(skb, hdr_size);
+ struct archdr *pkt = skb_push(skb, hdr_size);
struct arc_rfc1051 *soft = &pkt->soft.rfc1051;
/* set the protocol ID according to RFC1051 */
diff --git a/drivers/net/arcnet/rfc1201.c b/drivers/net/arcnet/rfc1201.c
index 566da5e..a4c8562 100644
--- a/drivers/net/arcnet/rfc1201.c
+++ b/drivers/net/arcnet/rfc1201.c
@@ -379,7 +379,7 @@
{
struct arcnet_local *lp = netdev_priv(dev);
int hdr_size = ARC_HDR_SIZE + RFC1201_HDR_SIZE;
- struct archdr *pkt = (struct archdr *)skb_push(skb, hdr_size);
+ struct archdr *pkt = skb_push(skb, hdr_size);
struct arc_rfc1201 *soft = &pkt->soft.rfc1201;
/* set the protocol ID according to RFC1201 */
diff --git a/drivers/net/bonding/bond_3ad.c b/drivers/net/bonding/bond_3ad.c
index e5386ab..f43fb2f 100644
--- a/drivers/net/bonding/bond_3ad.c
+++ b/drivers/net/bonding/bond_3ad.c
@@ -340,6 +340,11 @@
default:
/* unknown speed value from ethtool. shouldn't happen */
+ if (slave->speed != SPEED_UNKNOWN)
+ pr_warn_once("%s: unknown ethtool speed (%d) for port %d (set it to 0)\n",
+ slave->bond->dev->name,
+ slave->speed,
+ port->actor_port_number);
speed = 0;
break;
}
@@ -852,7 +857,7 @@
skb->protocol = PKT_TYPE_LACPDU;
skb->priority = TC_PRIO_CONTROL;
- lacpdu_header = (struct lacpdu_header *)skb_put(skb, length);
+ lacpdu_header = skb_put(skb, length);
ether_addr_copy(lacpdu_header->hdr.h_dest, lacpdu_mcast_addr);
/* Note: source address is set to be the member's PERMANENT address,
@@ -894,7 +899,7 @@
skb->network_header = skb->mac_header + ETH_HLEN;
skb->protocol = PKT_TYPE_LACPDU;
- marker_header = (struct bond_marker_header *)skb_put(skb, length);
+ marker_header = skb_put(skb, length);
ether_addr_copy(marker_header->hdr.h_dest, lacpdu_mcast_addr);
/* Note: source address is set to be the member's PERMANENT address,
diff --git a/drivers/net/bonding/bond_alb.c b/drivers/net/bonding/bond_alb.c
index 7d7a3ce..c02cc81 100644
--- a/drivers/net/bonding/bond_alb.c
+++ b/drivers/net/bonding/bond_alb.c
@@ -925,7 +925,6 @@
struct learning_pkt pkt;
struct sk_buff *skb;
int size = sizeof(struct learning_pkt);
- char *data;
memset(&pkt, 0, size);
ether_addr_copy(pkt.mac_dst, mac_addr);
@@ -936,8 +935,7 @@
if (!skb)
return;
- data = skb_put(skb, size);
- memcpy(data, &pkt, size);
+ skb_put_data(skb, &pkt, size);
skb_reset_mac_header(skb);
skb->network_header = skb->mac_header + ETH_HLEN;
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index 8ab6bdb..2865f31 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -3488,7 +3488,8 @@
case BOND_CHANGE_ACTIVE_OLD:
case SIOCBONDCHANGEACTIVE:
bond_opt_initstr(&newval, slave_dev->name);
- res = __bond_opt_set(bond, BOND_OPT_ACTIVE_SLAVE, &newval);
+ res = __bond_opt_set_notify(bond, BOND_OPT_ACTIVE_SLAVE,
+ &newval);
break;
default:
res = -EOPNOTSUPP;
@@ -4174,12 +4175,6 @@
.ndo_add_slave = bond_enslave,
.ndo_del_slave = bond_release,
.ndo_fix_features = bond_fix_features,
- .ndo_bridge_setlink = switchdev_port_bridge_setlink,
- .ndo_bridge_getlink = switchdev_port_bridge_getlink,
- .ndo_bridge_dellink = switchdev_port_bridge_dellink,
- .ndo_fdb_add = switchdev_port_fdb_add,
- .ndo_fdb_del = switchdev_port_fdb_del,
- .ndo_fdb_dump = switchdev_port_fdb_dump,
.ndo_features_check = passthru_features_check,
};
diff --git a/drivers/net/bonding/bond_options.c b/drivers/net/bonding/bond_options.c
index 1bcbb89..8ca6833 100644
--- a/drivers/net/bonding/bond_options.c
+++ b/drivers/net/bonding/bond_options.c
@@ -673,7 +673,30 @@
out:
if (ret)
bond_opt_error_interpret(bond, opt, ret, val);
- else if (bond->dev->reg_state == NETREG_REGISTERED)
+
+ return ret;
+}
+/**
+ * __bond_opt_set_notify - set a bonding option
+ * @bond: target bond device
+ * @option: option to set
+ * @val: value to set it to
+ *
+ * This function is used to change the bond's option value and trigger
+ * a notification to user sapce. It can be used for both enabling/changing
+ * an option and for disabling it. RTNL lock must be obtained before calling
+ * this function.
+ */
+int __bond_opt_set_notify(struct bonding *bond,
+ unsigned int option, struct bond_opt_value *val)
+{
+ int ret = -ENOENT;
+
+ ASSERT_RTNL();
+
+ ret = __bond_opt_set(bond, option, val);
+
+ if (!ret && (bond->dev->reg_state == NETREG_REGISTERED))
call_netdevice_notifiers(NETDEV_CHANGEINFODATA, bond->dev);
return ret;
@@ -696,7 +719,7 @@
if (!rtnl_trylock())
return restart_syscall();
bond_opt_initstr(&optval, buf);
- ret = __bond_opt_set(bond, option, &optval);
+ ret = __bond_opt_set_notify(bond, option, &optval);
rtnl_unlock();
return ret;
diff --git a/drivers/net/caif/caif_hsi.c b/drivers/net/caif/caif_hsi.c
index 71a7c3b..11ba6e3 100644
--- a/drivers/net/caif/caif_hsi.c
+++ b/drivers/net/caif/caif_hsi.c
@@ -426,7 +426,6 @@
/* Check for embedded CAIF frame. */
if (desc->offset) {
struct sk_buff *skb;
- u8 *dst = NULL;
int len = 0;
pfrm = ((u8 *)desc) + desc->offset;
@@ -454,8 +453,7 @@
}
caif_assert(skb != NULL);
- dst = skb_put(skb, len);
- memcpy(dst, pfrm, len);
+ skb_put_data(skb, pfrm, len);
skb->protocol = htons(ETH_P_CAIF);
skb_reset_mac_header(skb);
@@ -556,7 +554,6 @@
/* Parse payload. */
while (nfrms < CFHSI_MAX_PKTS && *plen) {
struct sk_buff *skb;
- u8 *dst = NULL;
u8 *pcffrm = NULL;
int len;
@@ -585,8 +582,7 @@
}
caif_assert(skb != NULL);
- dst = skb_put(skb, len);
- memcpy(dst, pcffrm, len);
+ skb_put_data(skb, pcffrm, len);
skb->protocol = htons(ETH_P_CAIF);
skb_reset_mac_header(skb);
diff --git a/drivers/net/caif/caif_serial.c b/drivers/net/caif/caif_serial.c
index 76e1d35..709838e 100644
--- a/drivers/net/caif/caif_serial.c
+++ b/drivers/net/caif/caif_serial.c
@@ -171,7 +171,6 @@
struct sk_buff *skb = NULL;
struct ser_device *ser;
int ret;
- u8 *p;
ser = tty->disc_data;
@@ -198,8 +197,7 @@
skb = netdev_alloc_skb(ser->dev, count+1);
if (skb == NULL)
return;
- p = skb_put(skb, count);
- memcpy(p, data, count);
+ skb_put_data(skb, data, count);
skb->protocol = htons(ETH_P_CAIF);
skb_reset_mac_header(skb);
diff --git a/drivers/net/caif/caif_spi.c b/drivers/net/caif/caif_spi.c
index fc21afe..207cb84 100644
--- a/drivers/net/caif/caif_spi.c
+++ b/drivers/net/caif/caif_spi.c
@@ -526,7 +526,6 @@
struct sk_buff *skb = NULL;
int spad = 0;
int epad = 0;
- u8 *dst = NULL;
int pkt_len = 0;
/*
@@ -548,8 +547,7 @@
skb = netdev_alloc_skb(cfspi->ndev, pkt_len + 1);
caif_assert(skb != NULL);
- dst = skb_put(skb, pkt_len);
- memcpy(dst, src, pkt_len);
+ skb_put_data(skb, src, pkt_len);
src += pkt_len;
skb->protocol = htons(ETH_P_CAIF);
diff --git a/drivers/net/caif/caif_virtio.c b/drivers/net/caif/caif_virtio.c
index 1794ea0..c3d104f 100644
--- a/drivers/net/caif/caif_virtio.c
+++ b/drivers/net/caif/caif_virtio.c
@@ -242,7 +242,7 @@
skb_reserve(skb, cfv->rx_hr + pad_len);
- memcpy(skb_put(skb, cfpkt_len), frm + cfv->rx_hr, cfpkt_len);
+ skb_put_data(skb, frm + cfv->rx_hr, cfpkt_len);
return skb;
}
diff --git a/drivers/net/can/dev.c b/drivers/net/can/dev.c
index ae4ed03..a3011c0 100644
--- a/drivers/net/can/dev.c
+++ b/drivers/net/can/dev.c
@@ -648,7 +648,7 @@
can_skb_prv(skb)->ifindex = dev->ifindex;
can_skb_prv(skb)->skbcnt = 0;
- *cf = (struct can_frame *)skb_put(skb, sizeof(struct can_frame));
+ *cf = skb_put(skb, sizeof(struct can_frame));
memset(*cf, 0, sizeof(struct can_frame));
return skb;
@@ -677,7 +677,7 @@
can_skb_prv(skb)->ifindex = dev->ifindex;
can_skb_prv(skb)->skbcnt = 0;
- *cfd = (struct canfd_frame *)skb_put(skb, sizeof(struct canfd_frame));
+ *cfd = skb_put(skb, sizeof(struct canfd_frame));
memset(*cfd, 0, sizeof(struct canfd_frame));
return skb;
diff --git a/drivers/net/can/m_can/m_can.c b/drivers/net/can/m_can/m_can.c
index bf8fdae..f4947a7 100644
--- a/drivers/net/can/m_can/m_can.c
+++ b/drivers/net/can/m_can/m_can.c
@@ -621,10 +621,8 @@
return 0;
}
-static int m_can_get_berr_counter(const struct net_device *dev,
- struct can_berr_counter *bec)
+static int m_can_clk_start(struct m_can_priv *priv)
{
- struct m_can_priv *priv = netdev_priv(dev);
int err;
err = clk_prepare_enable(priv->hclk);
@@ -632,15 +630,31 @@
return err;
err = clk_prepare_enable(priv->cclk);
- if (err) {
+ if (err)
clk_disable_unprepare(priv->hclk);
+
+ return err;
+}
+
+static void m_can_clk_stop(struct m_can_priv *priv)
+{
+ clk_disable_unprepare(priv->cclk);
+ clk_disable_unprepare(priv->hclk);
+}
+
+static int m_can_get_berr_counter(const struct net_device *dev,
+ struct can_berr_counter *bec)
+{
+ struct m_can_priv *priv = netdev_priv(dev);
+ int err;
+
+ err = m_can_clk_start(priv);
+ if (err)
return err;
- }
__m_can_get_berr_counter(dev, bec);
- clk_disable_unprepare(priv->cclk);
- clk_disable_unprepare(priv->hclk);
+ m_can_clk_stop(priv);
return 0;
}
@@ -1276,19 +1290,15 @@
struct m_can_priv *priv = netdev_priv(dev);
int err;
- err = clk_prepare_enable(priv->hclk);
+ err = m_can_clk_start(priv);
if (err)
return err;
- err = clk_prepare_enable(priv->cclk);
- if (err)
- goto exit_disable_hclk;
-
/* open the can device */
err = open_candev(dev);
if (err) {
netdev_err(dev, "failed to open can device\n");
- goto exit_disable_cclk;
+ goto exit_disable_clks;
}
/* register interrupt handler */
@@ -1310,10 +1320,8 @@
exit_irq_fail:
close_candev(dev);
-exit_disable_cclk:
- clk_disable_unprepare(priv->cclk);
-exit_disable_hclk:
- clk_disable_unprepare(priv->hclk);
+exit_disable_clks:
+ m_can_clk_stop(priv);
return err;
}
@@ -1324,9 +1332,6 @@
/* disable all interrupts */
m_can_disable_all_interrupts(priv);
- clk_disable_unprepare(priv->hclk);
- clk_disable_unprepare(priv->cclk);
-
/* set the state as STOPPED */
priv->can.state = CAN_STATE_STOPPED;
}
@@ -1338,6 +1343,7 @@
netif_stop_queue(dev);
napi_disable(&priv->napi);
m_can_stop(dev);
+ m_can_clk_stop(priv);
free_irq(dev->irq, dev);
close_candev(dev);
can_led_event(dev, CAN_LED_EVENT_STOP);
@@ -1489,11 +1495,23 @@
return register_candev(dev);
}
+static void m_can_init_ram(struct m_can_priv *priv)
+{
+ int end, i, start;
+
+ /* initialize the entire Message RAM in use to avoid possible
+ * ECC/parity checksum errors when reading an uninitialized buffer
+ */
+ start = priv->mcfg[MRAM_SIDF].off;
+ end = priv->mcfg[MRAM_TXB].off +
+ priv->mcfg[MRAM_TXB].num * TXB_ELEMENT_SIZE;
+ for (i = start; i < end; i += 4)
+ writel(0x0, priv->mram_base + i);
+}
+
static void m_can_of_parse_mram(struct m_can_priv *priv,
const u32 *mram_config_vals)
{
- int i, start, end;
-
priv->mcfg[MRAM_SIDF].off = mram_config_vals[0];
priv->mcfg[MRAM_SIDF].num = mram_config_vals[1];
priv->mcfg[MRAM_XIDF].off = priv->mcfg[MRAM_SIDF].off +
@@ -1529,15 +1547,7 @@
priv->mcfg[MRAM_TXE].off, priv->mcfg[MRAM_TXE].num,
priv->mcfg[MRAM_TXB].off, priv->mcfg[MRAM_TXB].num);
- /* initialize the entire Message RAM in use to avoid possible
- * ECC/parity checksum errors when reading an uninitialized buffer
- */
- start = priv->mcfg[MRAM_SIDF].off;
- end = priv->mcfg[MRAM_TXB].off +
- priv->mcfg[MRAM_TXB].num * TXB_ELEMENT_SIZE;
- for (i = start; i < end; i += 4)
- writel(0x0, priv->mram_base + i);
-
+ m_can_init_ram(priv);
}
static int m_can_plat_probe(struct platform_device *pdev)
@@ -1658,6 +1668,8 @@
return ret;
}
+/* TODO: runtime PM with power down or sleep mode */
+
static __maybe_unused int m_can_suspend(struct device *dev)
{
struct net_device *ndev = dev_get_drvdata(dev);
@@ -1666,10 +1678,10 @@
if (netif_running(ndev)) {
netif_stop_queue(ndev);
netif_device_detach(ndev);
+ m_can_stop(ndev);
+ m_can_clk_stop(priv);
}
- /* TODO: enter low power */
-
priv->can.state = CAN_STATE_SLEEPING;
return 0;
@@ -1680,11 +1692,18 @@
struct net_device *ndev = dev_get_drvdata(dev);
struct m_can_priv *priv = netdev_priv(ndev);
- /* TODO: exit low power */
+ m_can_init_ram(priv);
priv->can.state = CAN_STATE_ERROR_ACTIVE;
if (netif_running(ndev)) {
+ int ret;
+
+ ret = m_can_clk_start(priv);
+ if (ret)
+ return ret;
+
+ m_can_start(ndev);
netif_device_attach(ndev);
netif_start_queue(ndev);
}
diff --git a/drivers/net/can/slcan.c b/drivers/net/can/slcan.c
index 6a6e896..5d067c1 100644
--- a/drivers/net/can/slcan.c
+++ b/drivers/net/can/slcan.c
@@ -216,8 +216,7 @@
can_skb_prv(skb)->ifindex = sl->dev->ifindex;
can_skb_prv(skb)->skbcnt = 0;
- memcpy(skb_put(skb, sizeof(struct can_frame)),
- &cf, sizeof(struct can_frame));
+ skb_put_data(skb, &cf, sizeof(struct can_frame));
sl->dev->stats.rx_packets++;
sl->dev->stats.rx_bytes += cf.can_dlc;
diff --git a/drivers/net/cris/eth_v10.c b/drivers/net/cris/eth_v10.c
index da02041..017f48c 100644
--- a/drivers/net/cris/eth_v10.c
+++ b/drivers/net/cris/eth_v10.c
@@ -1417,10 +1417,9 @@
{
struct net_local *np = netdev_priv(dev);
u32 supported;
- int err;
spin_lock_irq(&np->lock);
- err = mii_ethtool_get_link_ksettings(&np->mii_if, cmd);
+ mii_ethtool_get_link_ksettings(&np->mii_if, cmd);
spin_unlock_irq(&np->lock);
/* The PHY may support 1000baseT, but the Etrax100 does not. */
@@ -1432,7 +1431,7 @@
ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
supported);
- return err;
+ return 0;
}
static int e100_set_link_ksettings(struct net_device *dev,
diff --git a/drivers/net/dsa/Kconfig b/drivers/net/dsa/Kconfig
index 862ee22..83a9bc8 100644
--- a/drivers/net/dsa/Kconfig
+++ b/drivers/net/dsa/Kconfig
@@ -1,13 +1,7 @@
menu "Distributed Switch Architecture drivers"
depends on HAVE_NET_DSA
-config NET_DSA_MV88E6060
- tristate "Marvell 88E6060 ethernet switch chip support"
- depends on NET_DSA
- select NET_DSA_TAG_TRAILER
- ---help---
- This enables support for the Marvell 88E6060 ethernet switch
- chip.
+source "drivers/net/dsa/b53/Kconfig"
config NET_DSA_BCM_SF2
tristate "Broadcom Starfighter 2 Ethernet switch support"
@@ -21,19 +15,6 @@
This enables support for the Broadcom Starfighter 2 Ethernet
switch chips.
-source "drivers/net/dsa/b53/Kconfig"
-
-source "drivers/net/dsa/mv88e6xxx/Kconfig"
-
-config NET_DSA_QCA8K
- tristate "Qualcomm Atheros QCA8K Ethernet switch family support"
- depends on NET_DSA
- select NET_DSA_TAG_QCA
- select REGMAP
- ---help---
- This enables support for the Qualcomm Atheros QCA8K Ethernet
- switch chips.
-
config NET_DSA_LOOP
tristate "DSA mock-up Ethernet switch chip support"
depends on NET_DSA
@@ -50,6 +31,27 @@
This enables support for the Mediatek MT7530 Ethernet switch
chip.
+config NET_DSA_MV88E6060
+ tristate "Marvell 88E6060 ethernet switch chip support"
+ depends on NET_DSA
+ select NET_DSA_TAG_TRAILER
+ ---help---
+ This enables support for the Marvell 88E6060 ethernet switch
+ chip.
+
+source "drivers/net/dsa/microchip/Kconfig"
+
+source "drivers/net/dsa/mv88e6xxx/Kconfig"
+
+config NET_DSA_QCA8K
+ tristate "Qualcomm Atheros QCA8K Ethernet switch family support"
+ depends on NET_DSA
+ select NET_DSA_TAG_QCA
+ select REGMAP
+ ---help---
+ This enables support for the Qualcomm Atheros QCA8K Ethernet
+ switch chips.
+
config NET_DSA_SMSC_LAN9303
tristate
select NET_DSA_TAG_LAN9303
diff --git a/drivers/net/dsa/Makefile b/drivers/net/dsa/Makefile
index edd6303..4a5b5bd 100644
--- a/drivers/net/dsa/Makefile
+++ b/drivers/net/dsa/Makefile
@@ -1,11 +1,12 @@
-obj-$(CONFIG_NET_DSA_MV88E6060) += mv88e6060.o
obj-$(CONFIG_NET_DSA_BCM_SF2) += bcm-sf2.o
bcm-sf2-objs := bcm_sf2.o bcm_sf2_cfp.o
-obj-$(CONFIG_NET_DSA_QCA8K) += qca8k.o
+obj-$(CONFIG_NET_DSA_LOOP) += dsa_loop.o dsa_loop_bdinfo.o
obj-$(CONFIG_NET_DSA_MT7530) += mt7530.o
+obj-$(CONFIG_NET_DSA_MV88E6060) += mv88e6060.o
+obj-$(CONFIG_NET_DSA_QCA8K) += qca8k.o
obj-$(CONFIG_NET_DSA_SMSC_LAN9303) += lan9303-core.o
obj-$(CONFIG_NET_DSA_SMSC_LAN9303_I2C) += lan9303_i2c.o
obj-$(CONFIG_NET_DSA_SMSC_LAN9303_MDIO) += lan9303_mdio.o
obj-y += b53/
+obj-y += microchip/
obj-y += mv88e6xxx/
-obj-$(CONFIG_NET_DSA_LOOP) += dsa_loop.o dsa_loop_bdinfo.o
diff --git a/drivers/net/dsa/b53/b53_common.c b/drivers/net/dsa/b53/b53_common.c
index fa0eece2..e68d368 100644
--- a/drivers/net/dsa/b53/b53_common.c
+++ b/drivers/net/dsa/b53/b53_common.c
@@ -29,7 +29,6 @@
#include <linux/etherdevice.h>
#include <linux/if_bridge.h>
#include <net/dsa.h>
-#include <net/switchdev.h>
#include "b53_regs.h"
#include "b53_priv.h"
@@ -1056,7 +1055,7 @@
int b53_vlan_dump(struct dsa_switch *ds, int port,
struct switchdev_obj_port_vlan *vlan,
- int (*cb)(struct switchdev_obj *obj))
+ switchdev_obj_dump_cb_t *cb)
{
struct b53_device *dev = ds->priv;
u16 vid, vid_start = 0, pvid;
@@ -1282,10 +1281,9 @@
b53_arl_to_entry(ent, mac_vid, fwd_entry);
}
-static int b53_fdb_copy(struct net_device *dev, int port,
- const struct b53_arl_entry *ent,
+static int b53_fdb_copy(int port, const struct b53_arl_entry *ent,
struct switchdev_obj_port_fdb *fdb,
- int (*cb)(struct switchdev_obj *obj))
+ switchdev_obj_dump_cb_t *cb)
{
if (!ent->is_valid)
return 0;
@@ -1302,10 +1300,9 @@
int b53_fdb_dump(struct dsa_switch *ds, int port,
struct switchdev_obj_port_fdb *fdb,
- int (*cb)(struct switchdev_obj *obj))
+ switchdev_obj_dump_cb_t *cb)
{
struct b53_device *priv = ds->priv;
- struct net_device *dev = ds->ports[port].netdev;
struct b53_arl_entry results[2];
unsigned int count = 0;
int ret;
@@ -1321,13 +1318,13 @@
return ret;
b53_arl_search_rd(priv, 0, &results[0]);
- ret = b53_fdb_copy(dev, port, &results[0], fdb, cb);
+ ret = b53_fdb_copy(port, &results[0], fdb, cb);
if (ret)
return ret;
if (priv->num_arl_entries > 2) {
b53_arl_search_rd(priv, 1, &results[1]);
- ret = b53_fdb_copy(dev, port, &results[1], fdb, cb);
+ ret = b53_fdb_copy(port, &results[1], fdb, cb);
if (ret)
return ret;
@@ -1344,7 +1341,7 @@
int b53_br_join(struct dsa_switch *ds, int port, struct net_device *br)
{
struct b53_device *dev = ds->priv;
- s8 cpu_port = ds->dst->cpu_port;
+ s8 cpu_port = ds->dst->cpu_dp->index;
u16 pvlan, reg;
unsigned int i;
@@ -1390,7 +1387,7 @@
{
struct b53_device *dev = ds->priv;
struct b53_vlan *vl = &dev->vlans[0];
- s8 cpu_port = ds->dst->cpu_port;
+ s8 cpu_port = ds->dst->cpu_dp->index;
unsigned int i;
u16 pvlan, reg, pvid;
@@ -1992,7 +1989,7 @@
pr_info("found switch: %s, rev %i\n", dev->name, dev->core_rev);
- return dsa_register_switch(dev->ds, dev->ds->dev);
+ return dsa_register_switch(dev->ds);
}
EXPORT_SYMBOL(b53_switch_register);
diff --git a/drivers/net/dsa/b53/b53_priv.h b/drivers/net/dsa/b53/b53_priv.h
index a9dc90a..155a9c4 100644
--- a/drivers/net/dsa/b53/b53_priv.h
+++ b/drivers/net/dsa/b53/b53_priv.h
@@ -395,7 +395,7 @@
const struct switchdev_obj_port_vlan *vlan);
int b53_vlan_dump(struct dsa_switch *ds, int port,
struct switchdev_obj_port_vlan *vlan,
- int (*cb)(struct switchdev_obj *obj));
+ switchdev_obj_dump_cb_t *cb);
int b53_fdb_prepare(struct dsa_switch *ds, int port,
const struct switchdev_obj_port_fdb *fdb,
struct switchdev_trans *trans);
@@ -406,7 +406,7 @@
const struct switchdev_obj_port_fdb *fdb);
int b53_fdb_dump(struct dsa_switch *ds, int port,
struct switchdev_obj_port_fdb *fdb,
- int (*cb)(struct switchdev_obj *obj));
+ switchdev_obj_dump_cb_t *cb);
int b53_mirror_add(struct dsa_switch *ds, int port,
struct dsa_mall_mirror_tc_entry *mirror, bool ingress);
void b53_mirror_del(struct dsa_switch *ds, int port,
diff --git a/drivers/net/dsa/b53/b53_srab.c b/drivers/net/dsa/b53/b53_srab.c
index 8a62b6a..c37ffd1 100644
--- a/drivers/net/dsa/b53/b53_srab.c
+++ b/drivers/net/dsa/b53/b53_srab.c
@@ -364,6 +364,7 @@
{ .compatible = "brcm,bcm53018-srab" },
{ .compatible = "brcm,bcm53019-srab" },
{ .compatible = "brcm,bcm5301x-srab" },
+ { .compatible = "brcm,bcm11360-srab", .data = (void *)BCM58XX_DEVICE_ID },
{ .compatible = "brcm,bcm58522-srab", .data = (void *)BCM58XX_DEVICE_ID },
{ .compatible = "brcm,bcm58525-srab", .data = (void *)BCM58XX_DEVICE_ID },
{ .compatible = "brcm,bcm58535-srab", .data = (void *)BCM58XX_DEVICE_ID },
@@ -371,6 +372,7 @@
{ .compatible = "brcm,bcm58623-srab", .data = (void *)BCM58XX_DEVICE_ID },
{ .compatible = "brcm,bcm58625-srab", .data = (void *)BCM58XX_DEVICE_ID },
{ .compatible = "brcm,bcm88312-srab", .data = (void *)BCM58XX_DEVICE_ID },
+ { .compatible = "brcm,cygnus-srab", .data = (void *)BCM58XX_DEVICE_ID },
{ .compatible = "brcm,nsp-srab", .data = (void *)BCM58XX_DEVICE_ID },
{ /* sentinel */ },
};
diff --git a/drivers/net/dsa/bcm_sf2.c b/drivers/net/dsa/bcm_sf2.c
index 2be9632..76e98e8 100644
--- a/drivers/net/dsa/bcm_sf2.c
+++ b/drivers/net/dsa/bcm_sf2.c
@@ -28,7 +28,6 @@
#include <linux/if_bridge.h>
#include <linux/brcmphy.h>
#include <linux/etherdevice.h>
-#include <net/switchdev.h>
#include <linux/platform_data/b53.h>
#include "bcm_sf2.h"
@@ -228,7 +227,7 @@
struct phy_device *phy)
{
struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
- s8 cpu_port = ds->dst[ds->index].cpu_port;
+ s8 cpu_port = ds->dst->cpu_dp->index;
unsigned int i;
u32 reg;
@@ -807,7 +806,7 @@
static void bcm_sf2_sw_get_wol(struct dsa_switch *ds, int port,
struct ethtool_wolinfo *wol)
{
- struct net_device *p = ds->dst[ds->index].master_netdev;
+ struct net_device *p = ds->dst[ds->index].cpu_dp->netdev;
struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
struct ethtool_wolinfo pwol;
@@ -830,9 +829,9 @@
static int bcm_sf2_sw_set_wol(struct dsa_switch *ds, int port,
struct ethtool_wolinfo *wol)
{
- struct net_device *p = ds->dst[ds->index].master_netdev;
+ struct net_device *p = ds->dst[ds->index].cpu_dp->netdev;
struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
- s8 cpu_port = ds->dst[ds->index].cpu_port;
+ s8 cpu_port = ds->dst->cpu_dp->index;
struct ethtool_wolinfo pwol;
p->ethtool_ops->get_wol(p, &pwol);
diff --git a/drivers/net/dsa/dsa_loop.c b/drivers/net/dsa/dsa_loop.c
index a19e178..fdd8f38 100644
--- a/drivers/net/dsa/dsa_loop.c
+++ b/drivers/net/dsa/dsa_loop.c
@@ -14,10 +14,10 @@
#include <linux/phy.h>
#include <linux/phy_fixed.h>
#include <linux/export.h>
+#include <linux/ethtool.h>
#include <linux/workqueue.h>
#include <linux/module.h>
#include <linux/if_bridge.h>
-#include <net/switchdev.h>
#include <net/dsa.h>
#include "dsa_loop.h"
@@ -27,6 +27,30 @@
u16 untagged;
};
+struct dsa_loop_mib_entry {
+ char name[ETH_GSTRING_LEN];
+ unsigned long val;
+};
+
+enum dsa_loop_mib_counters {
+ DSA_LOOP_PHY_READ_OK,
+ DSA_LOOP_PHY_READ_ERR,
+ DSA_LOOP_PHY_WRITE_OK,
+ DSA_LOOP_PHY_WRITE_ERR,
+ __DSA_LOOP_CNT_MAX,
+};
+
+static struct dsa_loop_mib_entry dsa_loop_mibs[] = {
+ [DSA_LOOP_PHY_READ_OK] = { "phy_read_ok", },
+ [DSA_LOOP_PHY_READ_ERR] = { "phy_read_err", },
+ [DSA_LOOP_PHY_WRITE_OK] = { "phy_write_ok", },
+ [DSA_LOOP_PHY_WRITE_ERR] = { "phy_write_err", },
+};
+
+struct dsa_loop_port {
+ struct dsa_loop_mib_entry mib[__DSA_LOOP_CNT_MAX];
+};
+
#define DSA_LOOP_VLANS 5
struct dsa_loop_priv {
@@ -34,6 +58,7 @@
unsigned int port_base;
struct dsa_loop_vlan vlans[DSA_LOOP_VLANS];
struct net_device *netdev;
+ struct dsa_loop_port ports[DSA_MAX_PORTS];
u16 pvid;
};
@@ -48,11 +73,43 @@
static int dsa_loop_setup(struct dsa_switch *ds)
{
+ struct dsa_loop_priv *ps = ds->priv;
+ unsigned int i;
+
+ for (i = 0; i < ds->num_ports; i++)
+ memcpy(ps->ports[i].mib, dsa_loop_mibs,
+ sizeof(dsa_loop_mibs));
+
dev_dbg(ds->dev, "%s\n", __func__);
return 0;
}
+static int dsa_loop_get_sset_count(struct dsa_switch *ds)
+{
+ return __DSA_LOOP_CNT_MAX;
+}
+
+static void dsa_loop_get_strings(struct dsa_switch *ds, int port, uint8_t *data)
+{
+ struct dsa_loop_priv *ps = ds->priv;
+ unsigned int i;
+
+ for (i = 0; i < __DSA_LOOP_CNT_MAX; i++)
+ memcpy(data + i * ETH_GSTRING_LEN,
+ ps->ports[port].mib[i].name, ETH_GSTRING_LEN);
+}
+
+static void dsa_loop_get_ethtool_stats(struct dsa_switch *ds, int port,
+ uint64_t *data)
+{
+ struct dsa_loop_priv *ps = ds->priv;
+ unsigned int i;
+
+ for (i = 0; i < __DSA_LOOP_CNT_MAX; i++)
+ data[i] = ps->ports[port].mib[i].val;
+}
+
static int dsa_loop_set_addr(struct dsa_switch *ds, u8 *addr)
{
dev_dbg(ds->dev, "%s\n", __func__);
@@ -64,10 +121,17 @@
{
struct dsa_loop_priv *ps = ds->priv;
struct mii_bus *bus = ps->bus;
+ int ret;
dev_dbg(ds->dev, "%s\n", __func__);
- return mdiobus_read_nested(bus, ps->port_base + port, regnum);
+ ret = mdiobus_read_nested(bus, ps->port_base + port, regnum);
+ if (ret < 0)
+ ps->ports[port].mib[DSA_LOOP_PHY_READ_ERR].val++;
+ else
+ ps->ports[port].mib[DSA_LOOP_PHY_READ_OK].val++;
+
+ return ret;
}
static int dsa_loop_phy_write(struct dsa_switch *ds, int port,
@@ -75,10 +139,17 @@
{
struct dsa_loop_priv *ps = ds->priv;
struct mii_bus *bus = ps->bus;
+ int ret;
dev_dbg(ds->dev, "%s\n", __func__);
- return mdiobus_write_nested(bus, ps->port_base + port, regnum, value);
+ ret = mdiobus_write_nested(bus, ps->port_base + port, regnum, value);
+ if (ret < 0)
+ ps->ports[port].mib[DSA_LOOP_PHY_WRITE_ERR].val++;
+ else
+ ps->ports[port].mib[DSA_LOOP_PHY_WRITE_OK].val++;
+
+ return ret;
}
static int dsa_loop_port_bridge_join(struct dsa_switch *ds, int port,
@@ -188,7 +259,7 @@
static int dsa_loop_port_vlan_dump(struct dsa_switch *ds, int port,
struct switchdev_obj_port_vlan *vlan,
- int (*cb)(struct switchdev_obj *obj))
+ switchdev_obj_dump_cb_t *cb)
{
struct dsa_loop_priv *ps = ds->priv;
struct mii_bus *bus = ps->bus;
@@ -226,6 +297,9 @@
static struct dsa_switch_ops dsa_loop_driver = {
.get_tag_protocol = dsa_loop_get_protocol,
.setup = dsa_loop_setup,
+ .get_strings = dsa_loop_get_strings,
+ .get_ethtool_stats = dsa_loop_get_ethtool_stats,
+ .get_sset_count = dsa_loop_get_sset_count,
.set_addr = dsa_loop_set_addr,
.phy_read = dsa_loop_phy_read,
.phy_write = dsa_loop_phy_write,
@@ -272,7 +346,7 @@
dev_set_drvdata(&mdiodev->dev, ds);
- return dsa_register_switch(ds, ds->dev);
+ return dsa_register_switch(ds);
}
static void dsa_loop_drv_remove(struct mdio_device *mdiodev)
@@ -294,15 +368,6 @@
#define NUM_FIXED_PHYS (DSA_LOOP_NUM_PORTS - 2)
-static void unregister_fixed_phys(void)
-{
- unsigned int i;
-
- for (i = 0; i < NUM_FIXED_PHYS; i++)
- if (phydevs[i])
- fixed_phy_unregister(phydevs[i]);
-}
-
static int __init dsa_loop_init(void)
{
struct fixed_phy_status status = {
@@ -321,8 +386,12 @@
static void __exit dsa_loop_exit(void)
{
+ unsigned int i;
+
mdio_driver_unregister(&dsa_loop_drv);
- unregister_fixed_phys();
+ for (i = 0; i < NUM_FIXED_PHYS; i++)
+ if (phydevs[i])
+ fixed_phy_unregister(phydevs[i]);
}
module_exit(dsa_loop_exit);
diff --git a/drivers/net/dsa/lan9303-core.c b/drivers/net/dsa/lan9303-core.c
index c8b2423..cd76e61 100644
--- a/drivers/net/dsa/lan9303-core.c
+++ b/drivers/net/dsa/lan9303-core.c
@@ -802,7 +802,7 @@
chip->ds->ops = &lan9303_switch_ops;
chip->ds->phys_mii_mask = chip->phy_addr_sel_strap ? 0xe : 0x7;
- return dsa_register_switch(chip->ds, chip->dev);
+ return dsa_register_switch(chip->ds);
}
static void lan9303_probe_reset_gpio(struct lan9303 *chip,
diff --git a/drivers/net/dsa/microchip/Kconfig b/drivers/net/dsa/microchip/Kconfig
new file mode 100644
index 0000000..a8b8f59
--- /dev/null
+++ b/drivers/net/dsa/microchip/Kconfig
@@ -0,0 +1,12 @@
+menuconfig MICROCHIP_KSZ
+ tristate "Microchip KSZ series switch support"
+ depends on NET_DSA
+ select NET_DSA_TAG_KSZ
+ help
+ This driver adds support for Microchip KSZ switch chips.
+
+config MICROCHIP_KSZ_SPI_DRIVER
+ tristate "KSZ series SPI connected switch driver"
+ depends on MICROCHIP_KSZ && SPI
+ help
+ Select to enable support for registering switches configured through SPI.
diff --git a/drivers/net/dsa/microchip/Makefile b/drivers/net/dsa/microchip/Makefile
new file mode 100644
index 0000000..ed335e2
--- /dev/null
+++ b/drivers/net/dsa/microchip/Makefile
@@ -0,0 +1,2 @@
+obj-$(CONFIG_MICROCHIP_KSZ) += ksz_common.o
+obj-$(CONFIG_MICROCHIP_KSZ_SPI_DRIVER) += ksz_spi.o
diff --git a/drivers/net/dsa/microchip/ksz_9477_reg.h b/drivers/net/dsa/microchip/ksz_9477_reg.h
new file mode 100644
index 0000000..6aa6752
--- /dev/null
+++ b/drivers/net/dsa/microchip/ksz_9477_reg.h
@@ -0,0 +1,1676 @@
+/*
+ * Microchip KSZ9477 register definitions
+ *
+ * Copyright (C) 2017
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef __KSZ9477_REGS_H
+#define __KSZ9477_REGS_H
+
+#define KS_PRIO_M 0x7
+#define KS_PRIO_S 4
+
+/* 0 - Operation */
+#define REG_CHIP_ID0__1 0x0000
+
+#define REG_CHIP_ID1__1 0x0001
+
+#define FAMILY_ID 0x95
+#define FAMILY_ID_94 0x94
+#define FAMILY_ID_95 0x95
+#define FAMILY_ID_85 0x85
+#define FAMILY_ID_98 0x98
+#define FAMILY_ID_88 0x88
+
+#define REG_CHIP_ID2__1 0x0002
+
+#define CHIP_ID_63 0x63
+#define CHIP_ID_66 0x66
+#define CHIP_ID_67 0x67
+#define CHIP_ID_77 0x77
+#define CHIP_ID_93 0x93
+#define CHIP_ID_96 0x96
+#define CHIP_ID_97 0x97
+
+#define REG_CHIP_ID3__1 0x0003
+
+#define SWITCH_REVISION_M 0x0F
+#define SWITCH_REVISION_S 4
+#define SWITCH_RESET 0x01
+
+#define REG_SW_PME_CTRL 0x0006
+
+#define PME_ENABLE BIT(1)
+#define PME_POLARITY BIT(0)
+
+#define REG_GLOBAL_OPTIONS 0x000F
+
+#define SW_GIGABIT_ABLE BIT(6)
+#define SW_REDUNDANCY_ABLE BIT(5)
+#define SW_AVB_ABLE BIT(4)
+#define SW_9567_RL_5_2 0xC
+#define SW_9477_SL_5_2 0xD
+
+#define SW_9896_GL_5_1 0xB
+#define SW_9896_RL_5_1 0x8
+#define SW_9896_SL_5_1 0x9
+
+#define SW_9895_GL_4_1 0x7
+#define SW_9895_RL_4_1 0x4
+#define SW_9895_SL_4_1 0x5
+
+#define SW_9896_RL_4_2 0x6
+
+#define SW_9893_RL_2_1 0x0
+#define SW_9893_SL_2_1 0x1
+#define SW_9893_GL_2_1 0x3
+
+#define SW_QW_ABLE BIT(5)
+#define SW_9893_RN_2_1 0xC
+
+#define REG_SW_INT_STATUS__4 0x0010
+#define REG_SW_INT_MASK__4 0x0014
+
+#define LUE_INT BIT(31)
+#define TRIG_TS_INT BIT(30)
+#define APB_TIMEOUT_INT BIT(29)
+
+#define SWITCH_INT_MASK (TRIG_TS_INT | APB_TIMEOUT_INT)
+
+#define REG_SW_PORT_INT_STATUS__4 0x0018
+#define REG_SW_PORT_INT_MASK__4 0x001C
+#define REG_SW_PHY_INT_STATUS 0x0020
+#define REG_SW_PHY_INT_ENABLE 0x0024
+
+/* 1 - Global */
+#define REG_SW_GLOBAL_SERIAL_CTRL_0 0x0100
+#define SW_SPARE_REG_2 BIT(7)
+#define SW_SPARE_REG_1 BIT(6)
+#define SW_SPARE_REG_0 BIT(5)
+#define SW_BIG_ENDIAN BIT(4)
+#define SPI_AUTO_EDGE_DETECTION BIT(1)
+#define SPI_CLOCK_OUT_RISING_EDGE BIT(0)
+
+#define REG_SW_GLOBAL_OUTPUT_CTRL__1 0x0103
+#define SW_ENABLE_REFCLKO BIT(1)
+#define SW_REFCLKO_IS_125MHZ BIT(0)
+
+#define REG_SW_IBA__4 0x0104
+
+#define SW_IBA_ENABLE BIT(31)
+#define SW_IBA_DA_MATCH BIT(30)
+#define SW_IBA_INIT BIT(29)
+#define SW_IBA_QID_M 0xF
+#define SW_IBA_QID_S 22
+#define SW_IBA_PORT_M 0x2F
+#define SW_IBA_PORT_S 16
+#define SW_IBA_FRAME_TPID_M 0xFFFF
+
+#define REG_SW_APB_TIMEOUT_ADDR__4 0x0108
+
+#define APB_TIMEOUT_ACKNOWLEDGE BIT(31)
+
+#define REG_SW_IBA_SYNC__1 0x010C
+
+#define REG_SW_IO_STRENGTH__1 0x010D
+#define SW_DRIVE_STRENGTH_M 0x7
+#define SW_DRIVE_STRENGTH_2MA 0
+#define SW_DRIVE_STRENGTH_4MA 1
+#define SW_DRIVE_STRENGTH_8MA 2
+#define SW_DRIVE_STRENGTH_12MA 3
+#define SW_DRIVE_STRENGTH_16MA 4
+#define SW_DRIVE_STRENGTH_20MA 5
+#define SW_DRIVE_STRENGTH_24MA 6
+#define SW_DRIVE_STRENGTH_28MA 7
+#define SW_HI_SPEED_DRIVE_STRENGTH_S 4
+#define SW_LO_SPEED_DRIVE_STRENGTH_S 0
+
+#define REG_SW_IBA_STATUS__4 0x0110
+
+#define SW_IBA_REQ BIT(31)
+#define SW_IBA_RESP BIT(30)
+#define SW_IBA_DA_MISMATCH BIT(14)
+#define SW_IBA_FMT_MISMATCH BIT(13)
+#define SW_IBA_CODE_ERROR BIT(12)
+#define SW_IBA_CMD_ERROR BIT(11)
+#define SW_IBA_CMD_LOC_M (BIT(6) - 1)
+
+#define REG_SW_IBA_STATES__4 0x0114
+
+#define SW_IBA_BUF_STATE_S 30
+#define SW_IBA_CMD_STATE_S 28
+#define SW_IBA_RESP_STATE_S 26
+#define SW_IBA_STATE_M 0x3
+#define SW_IBA_PACKET_SIZE_M 0x7F
+#define SW_IBA_PACKET_SIZE_S 16
+#define SW_IBA_FMT_ID_M 0xFFFF
+
+#define REG_SW_IBA_RESULT__4 0x0118
+
+#define SW_IBA_SIZE_S 24
+
+#define SW_IBA_RETRY_CNT_M (BIT(5) - 1)
+
+/* 2 - PHY */
+#define REG_SW_POWER_MANAGEMENT_CTRL 0x0201
+
+#define SW_PLL_POWER_DOWN BIT(5)
+#define SW_POWER_DOWN_MODE 0x3
+#define SW_ENERGY_DETECTION 1
+#define SW_SOFT_POWER_DOWN 2
+#define SW_POWER_SAVING 3
+
+/* 3 - Operation Control */
+#define REG_SW_OPERATION 0x0300
+
+#define SW_DOUBLE_TAG BIT(7)
+#define SW_RESET BIT(1)
+#define SW_START BIT(0)
+
+#define REG_SW_MAC_ADDR_0 0x0302
+#define REG_SW_MAC_ADDR_1 0x0303
+#define REG_SW_MAC_ADDR_2 0x0304
+#define REG_SW_MAC_ADDR_3 0x0305
+#define REG_SW_MAC_ADDR_4 0x0306
+#define REG_SW_MAC_ADDR_5 0x0307
+
+#define REG_SW_MTU__2 0x0308
+
+#define REG_SW_ISP_TPID__2 0x030A
+
+#define REG_SW_HSR_TPID__2 0x030C
+
+#define REG_AVB_STRATEGY__2 0x030E
+
+#define SW_SHAPING_CREDIT_ACCT BIT(1)
+#define SW_POLICING_CREDIT_ACCT BIT(0)
+
+#define REG_SW_LUE_CTRL_0 0x0310
+
+#define SW_VLAN_ENABLE BIT(7)
+#define SW_DROP_INVALID_VID BIT(6)
+#define SW_AGE_CNT_M 0x7
+#define SW_AGE_CNT_S 3
+#define SW_RESV_MCAST_ENABLE BIT(2)
+#define SW_HASH_OPTION_M 0x03
+#define SW_HASH_OPTION_CRC 1
+#define SW_HASH_OPTION_XOR 2
+#define SW_HASH_OPTION_DIRECT 3
+
+#define REG_SW_LUE_CTRL_1 0x0311
+
+#define UNICAST_LEARN_DISABLE BIT(7)
+#define SW_SRC_ADDR_FILTER BIT(6)
+#define SW_FLUSH_STP_TABLE BIT(5)
+#define SW_FLUSH_MSTP_TABLE BIT(4)
+#define SW_FWD_MCAST_SRC_ADDR BIT(3)
+#define SW_AGING_ENABLE BIT(2)
+#define SW_FAST_AGING BIT(1)
+#define SW_LINK_AUTO_AGING BIT(0)
+
+#define REG_SW_LUE_CTRL_2 0x0312
+
+#define SW_TRAP_DOUBLE_TAG BIT(6)
+#define SW_EGRESS_VLAN_FILTER_DYN BIT(5)
+#define SW_EGRESS_VLAN_FILTER_STA BIT(4)
+#define SW_FLUSH_OPTION_M 0x3
+#define SW_FLUSH_OPTION_S 2
+#define SW_FLUSH_OPTION_DYN_MAC 1
+#define SW_FLUSH_OPTION_STA_MAC 2
+#define SW_FLUSH_OPTION_BOTH 3
+#define SW_PRIO_M 0x3
+#define SW_PRIO_DA 0
+#define SW_PRIO_SA 1
+#define SW_PRIO_HIGHEST_DA_SA 2
+#define SW_PRIO_LOWEST_DA_SA 3
+
+#define REG_SW_LUE_CTRL_3 0x0313
+
+#define REG_SW_LUE_INT_STATUS 0x0314
+#define REG_SW_LUE_INT_ENABLE 0x0315
+
+#define LEARN_FAIL_INT BIT(2)
+#define ALMOST_FULL_INT BIT(1)
+#define WRITE_FAIL_INT BIT(0)
+
+#define REG_SW_LUE_INDEX_0__2 0x0316
+
+#define ENTRY_INDEX_M 0x0FFF
+
+#define REG_SW_LUE_INDEX_1__2 0x0318
+
+#define FAIL_INDEX_M 0x03FF
+
+#define REG_SW_LUE_INDEX_2__2 0x031A
+
+#define REG_SW_LUE_UNK_UCAST_CTRL__4 0x0320
+
+#define SW_UNK_UCAST_ENABLE BIT(31)
+
+#define REG_SW_LUE_UNK_MCAST_CTRL__4 0x0324
+
+#define SW_UNK_MCAST_ENABLE BIT(31)
+
+#define REG_SW_LUE_UNK_VID_CTRL__4 0x0328
+
+#define SW_UNK_VID_ENABLE BIT(31)
+
+#define REG_SW_MAC_CTRL_0 0x0330
+
+#define SW_NEW_BACKOFF BIT(7)
+#define SW_CHECK_LENGTH BIT(3)
+#define SW_PAUSE_UNH_MODE BIT(1)
+#define SW_AGGR_BACKOFF BIT(0)
+
+#define REG_SW_MAC_CTRL_1 0x0331
+
+#define MULTICAST_STORM_DISABLE BIT(6)
+#define SW_BACK_PRESSURE BIT(5)
+#define FAIR_FLOW_CTRL BIT(4)
+#define NO_EXC_COLLISION_DROP BIT(3)
+#define SW_JUMBO_PACKET BIT(2)
+#define SW_LEGAL_PACKET_DISABLE BIT(1)
+#define SW_PASS_SHORT_FRAME BIT(0)
+
+#define REG_SW_MAC_CTRL_2 0x0332
+
+#define SW_REPLACE_VID BIT(3)
+#define BROADCAST_STORM_RATE_HI 0x07
+
+#define REG_SW_MAC_CTRL_3 0x0333
+
+#define BROADCAST_STORM_RATE_LO 0xFF
+#define BROADCAST_STORM_RATE 0x07FF
+
+#define REG_SW_MAC_CTRL_4 0x0334
+
+#define SW_PASS_PAUSE BIT(3)
+
+#define REG_SW_MAC_CTRL_5 0x0335
+
+#define SW_OUT_RATE_LIMIT_QUEUE_BASED BIT(3)
+
+#define REG_SW_MAC_CTRL_6 0x0336
+
+#define SW_MIB_COUNTER_FLUSH BIT(7)
+#define SW_MIB_COUNTER_FREEZE BIT(6)
+
+#define REG_SW_MAC_802_1P_MAP_0 0x0338
+#define REG_SW_MAC_802_1P_MAP_1 0x0339
+#define REG_SW_MAC_802_1P_MAP_2 0x033A
+#define REG_SW_MAC_802_1P_MAP_3 0x033B
+
+#define SW_802_1P_MAP_M KS_PRIO_M
+#define SW_802_1P_MAP_S KS_PRIO_S
+
+#define REG_SW_MAC_ISP_CTRL 0x033C
+
+#define REG_SW_MAC_TOS_CTRL 0x033E
+
+#define SW_TOS_DSCP_REMARK BIT(1)
+#define SW_TOS_DSCP_REMAP BIT(0)
+
+#define REG_SW_MAC_TOS_PRIO_0 0x0340
+#define REG_SW_MAC_TOS_PRIO_1 0x0341
+#define REG_SW_MAC_TOS_PRIO_2 0x0342
+#define REG_SW_MAC_TOS_PRIO_3 0x0343
+#define REG_SW_MAC_TOS_PRIO_4 0x0344
+#define REG_SW_MAC_TOS_PRIO_5 0x0345
+#define REG_SW_MAC_TOS_PRIO_6 0x0346
+#define REG_SW_MAC_TOS_PRIO_7 0x0347
+#define REG_SW_MAC_TOS_PRIO_8 0x0348
+#define REG_SW_MAC_TOS_PRIO_9 0x0349
+#define REG_SW_MAC_TOS_PRIO_10 0x034A
+#define REG_SW_MAC_TOS_PRIO_11 0x034B
+#define REG_SW_MAC_TOS_PRIO_12 0x034C
+#define REG_SW_MAC_TOS_PRIO_13 0x034D
+#define REG_SW_MAC_TOS_PRIO_14 0x034E
+#define REG_SW_MAC_TOS_PRIO_15 0x034F
+#define REG_SW_MAC_TOS_PRIO_16 0x0350
+#define REG_SW_MAC_TOS_PRIO_17 0x0351
+#define REG_SW_MAC_TOS_PRIO_18 0x0352
+#define REG_SW_MAC_TOS_PRIO_19 0x0353
+#define REG_SW_MAC_TOS_PRIO_20 0x0354
+#define REG_SW_MAC_TOS_PRIO_21 0x0355
+#define REG_SW_MAC_TOS_PRIO_22 0x0356
+#define REG_SW_MAC_TOS_PRIO_23 0x0357
+#define REG_SW_MAC_TOS_PRIO_24 0x0358
+#define REG_SW_MAC_TOS_PRIO_25 0x0359
+#define REG_SW_MAC_TOS_PRIO_26 0x035A
+#define REG_SW_MAC_TOS_PRIO_27 0x035B
+#define REG_SW_MAC_TOS_PRIO_28 0x035C
+#define REG_SW_MAC_TOS_PRIO_29 0x035D
+#define REG_SW_MAC_TOS_PRIO_30 0x035E
+#define REG_SW_MAC_TOS_PRIO_31 0x035F
+
+#define REG_SW_MRI_CTRL_0 0x0370
+
+#define SW_IGMP_SNOOP BIT(6)
+#define SW_IPV6_MLD_OPTION BIT(3)
+#define SW_IPV6_MLD_SNOOP BIT(2)
+#define SW_MIRROR_RX_TX BIT(0)
+
+#define REG_SW_CLASS_D_IP_CTRL__4 0x0374
+
+#define SW_CLASS_D_IP_ENABLE BIT(31)
+
+#define REG_SW_MRI_CTRL_8 0x0378
+
+#define SW_NO_COLOR_S 6
+#define SW_RED_COLOR_S 4
+#define SW_YELLOW_COLOR_S 2
+#define SW_GREEN_COLOR_S 0
+#define SW_COLOR_M 0x3
+
+#define REG_SW_QM_CTRL__4 0x0390
+
+#define PRIO_SCHEME_SELECT_M KS_PRIO_M
+#define PRIO_SCHEME_SELECT_S 6
+#define PRIO_MAP_3_HI 0
+#define PRIO_MAP_2_HI 2
+#define PRIO_MAP_0_LO 3
+#define UNICAST_VLAN_BOUNDARY BIT(1)
+
+#define REG_SW_EEE_QM_CTRL__2 0x03C0
+
+#define REG_SW_EEE_TXQ_WAIT_TIME__2 0x03C2
+
+/* 4 - */
+#define REG_SW_VLAN_ENTRY__4 0x0400
+
+#define VLAN_VALID BIT(31)
+#define VLAN_FORWARD_OPTION BIT(27)
+#define VLAN_PRIO_M KS_PRIO_M
+#define VLAN_PRIO_S 24
+#define VLAN_MSTP_M 0x7
+#define VLAN_MSTP_S 12
+#define VLAN_FID_M 0x7F
+
+#define REG_SW_VLAN_ENTRY_UNTAG__4 0x0404
+#define REG_SW_VLAN_ENTRY_PORTS__4 0x0408
+
+#define REG_SW_VLAN_ENTRY_INDEX__2 0x040C
+
+#define VLAN_INDEX_M 0x0FFF
+
+#define REG_SW_VLAN_CTRL 0x040E
+
+#define VLAN_START BIT(7)
+#define VLAN_ACTION 0x3
+#define VLAN_WRITE 1
+#define VLAN_READ 2
+#define VLAN_CLEAR 3
+
+#define REG_SW_ALU_INDEX_0 0x0410
+
+#define ALU_FID_INDEX_S 16
+#define ALU_MAC_ADDR_HI 0xFFFF
+
+#define REG_SW_ALU_INDEX_1 0x0414
+
+#define ALU_DIRECT_INDEX_M (BIT(12) - 1)
+
+#define REG_SW_ALU_CTRL__4 0x0418
+
+#define ALU_VALID_CNT_M (BIT(14) - 1)
+#define ALU_VALID_CNT_S 16
+#define ALU_START BIT(7)
+#define ALU_VALID BIT(6)
+#define ALU_DIRECT BIT(2)
+#define ALU_ACTION 0x3
+#define ALU_WRITE 1
+#define ALU_READ 2
+#define ALU_SEARCH 3
+
+#define REG_SW_ALU_STAT_CTRL__4 0x041C
+
+#define ALU_STAT_INDEX_M (BIT(4) - 1)
+#define ALU_STAT_INDEX_S 16
+#define ALU_RESV_MCAST_INDEX_M (BIT(6) - 1)
+#define ALU_STAT_START BIT(7)
+#define ALU_RESV_MCAST_ADDR BIT(1)
+#define ALU_STAT_READ BIT(0)
+
+#define REG_SW_ALU_VAL_A 0x0420
+
+#define ALU_V_STATIC_VALID BIT(31)
+#define ALU_V_SRC_FILTER BIT(30)
+#define ALU_V_DST_FILTER BIT(29)
+#define ALU_V_PRIO_AGE_CNT_M (BIT(3) - 1)
+#define ALU_V_PRIO_AGE_CNT_S 26
+#define ALU_V_MSTP_M 0x7
+
+#define REG_SW_ALU_VAL_B 0x0424
+
+#define ALU_V_OVERRIDE BIT(31)
+#define ALU_V_USE_FID BIT(30)
+#define ALU_V_PORT_MAP (BIT(24) - 1)
+
+#define REG_SW_ALU_VAL_C 0x0428
+
+#define ALU_V_FID_M (BIT(16) - 1)
+#define ALU_V_FID_S 16
+#define ALU_V_MAC_ADDR_HI 0xFFFF
+
+#define REG_SW_ALU_VAL_D 0x042C
+
+#define REG_HSR_ALU_INDEX_0 0x0440
+
+#define REG_HSR_ALU_INDEX_1 0x0444
+
+#define HSR_DST_MAC_INDEX_LO_S 16
+#define HSR_SRC_MAC_INDEX_HI 0xFFFF
+
+#define REG_HSR_ALU_INDEX_2 0x0448
+
+#define HSR_INDEX_MAX BIT(9)
+#define HSR_DIRECT_INDEX_M (HSR_INDEX_MAX - 1)
+
+#define REG_HSR_ALU_INDEX_3 0x044C
+
+#define HSR_PATH_INDEX_M (BIT(4) - 1)
+
+#define REG_HSR_ALU_CTRL__4 0x0450
+
+#define HSR_VALID_CNT_M (BIT(14) - 1)
+#define HSR_VALID_CNT_S 16
+#define HSR_START BIT(7)
+#define HSR_VALID BIT(6)
+#define HSR_SEARCH_END BIT(5)
+#define HSR_DIRECT BIT(2)
+#define HSR_ACTION 0x3
+#define HSR_WRITE 1
+#define HSR_READ 2
+#define HSR_SEARCH 3
+
+#define REG_HSR_ALU_VAL_A 0x0454
+
+#define HSR_V_STATIC_VALID BIT(31)
+#define HSR_V_AGE_CNT_M (BIT(3) - 1)
+#define HSR_V_AGE_CNT_S 26
+#define HSR_V_PATH_ID_M (BIT(4) - 1)
+
+#define REG_HSR_ALU_VAL_B 0x0458
+
+#define REG_HSR_ALU_VAL_C 0x045C
+
+#define HSR_V_DST_MAC_ADDR_LO_S 16
+#define HSR_V_SRC_MAC_ADDR_HI 0xFFFF
+
+#define REG_HSR_ALU_VAL_D 0x0460
+
+#define REG_HSR_ALU_VAL_E 0x0464
+
+#define HSR_V_START_SEQ_1_S 16
+#define HSR_V_START_SEQ_2_S 0
+
+#define REG_HSR_ALU_VAL_F 0x0468
+
+#define HSR_V_EXP_SEQ_1_S 16
+#define HSR_V_EXP_SEQ_2_S 0
+
+#define REG_HSR_ALU_VAL_G 0x046C
+
+#define HSR_V_SEQ_CNT_1_S 16
+#define HSR_V_SEQ_CNT_2_S 0
+
+#define HSR_V_SEQ_M (BIT(16) - 1)
+
+/* 5 - PTP Clock */
+#define REG_PTP_CLK_CTRL 0x0500
+
+#define PTP_STEP_ADJ BIT(6)
+#define PTP_STEP_DIR BIT(5)
+#define PTP_READ_TIME BIT(4)
+#define PTP_LOAD_TIME BIT(3)
+#define PTP_CLK_ADJ_ENABLE BIT(2)
+#define PTP_CLK_ENABLE BIT(1)
+#define PTP_CLK_RESET BIT(0)
+
+#define REG_PTP_RTC_SUB_NANOSEC__2 0x0502
+
+#define PTP_RTC_SUB_NANOSEC_M 0x0007
+
+#define REG_PTP_RTC_NANOSEC 0x0504
+#define REG_PTP_RTC_NANOSEC_H 0x0504
+#define REG_PTP_RTC_NANOSEC_L 0x0506
+
+#define REG_PTP_RTC_SEC 0x0508
+#define REG_PTP_RTC_SEC_H 0x0508
+#define REG_PTP_RTC_SEC_L 0x050A
+
+#define REG_PTP_SUBNANOSEC_RATE 0x050C
+#define REG_PTP_SUBNANOSEC_RATE_H 0x050C
+
+#define PTP_RATE_DIR BIT(31)
+#define PTP_TMP_RATE_ENABLE BIT(30)
+
+#define REG_PTP_SUBNANOSEC_RATE_L 0x050E
+
+#define REG_PTP_RATE_DURATION 0x0510
+#define REG_PTP_RATE_DURATION_H 0x0510
+#define REG_PTP_RATE_DURATION_L 0x0512
+
+#define REG_PTP_MSG_CONF1 0x0514
+
+#define PTP_802_1AS BIT(7)
+#define PTP_ENABLE BIT(6)
+#define PTP_ETH_ENABLE BIT(5)
+#define PTP_IPV4_UDP_ENABLE BIT(4)
+#define PTP_IPV6_UDP_ENABLE BIT(3)
+#define PTP_TC_P2P BIT(2)
+#define PTP_MASTER BIT(1)
+#define PTP_1STEP BIT(0)
+
+#define REG_PTP_MSG_CONF2 0x0516
+
+#define PTP_UNICAST_ENABLE BIT(12)
+#define PTP_ALTERNATE_MASTER BIT(11)
+#define PTP_ALL_HIGH_PRIO BIT(10)
+#define PTP_SYNC_CHECK BIT(9)
+#define PTP_DELAY_CHECK BIT(8)
+#define PTP_PDELAY_CHECK BIT(7)
+#define PTP_DROP_SYNC_DELAY_REQ BIT(5)
+#define PTP_DOMAIN_CHECK BIT(4)
+#define PTP_UDP_CHECKSUM BIT(2)
+
+#define REG_PTP_DOMAIN_VERSION 0x0518
+#define PTP_VERSION_M 0xFF00
+#define PTP_DOMAIN_M 0x00FF
+
+#define REG_PTP_UNIT_INDEX__4 0x0520
+
+#define PTP_UNIT_M 0xF
+
+#define PTP_GPIO_INDEX_S 16
+#define PTP_TSI_INDEX_S 8
+#define PTP_TOU_INDEX_S 0
+
+#define REG_PTP_TRIG_STATUS__4 0x0524
+
+#define TRIG_ERROR_S 16
+#define TRIG_DONE_S 0
+
+#define REG_PTP_INT_STATUS__4 0x0528
+
+#define TRIG_INT_S 16
+#define TS_INT_S 0
+
+#define TRIG_UNIT_M 0x7
+#define TS_UNIT_M 0x3
+
+#define REG_PTP_CTRL_STAT__4 0x052C
+
+#define GPIO_IN BIT(7)
+#define GPIO_OUT BIT(6)
+#define TS_INT_ENABLE BIT(5)
+#define TRIG_ACTIVE BIT(4)
+#define TRIG_ENABLE BIT(3)
+#define TRIG_RESET BIT(2)
+#define TS_ENABLE BIT(1)
+#define TS_RESET BIT(0)
+
+#define GPIO_CTRL_M (GPIO_IN | GPIO_OUT)
+
+#define TRIG_CTRL_M \
+ (TRIG_ACTIVE | TRIG_ENABLE | TRIG_RESET)
+
+#define TS_CTRL_M \
+ (TS_INT_ENABLE | TS_ENABLE | TS_RESET)
+
+#define REG_TRIG_TARGET_NANOSEC 0x0530
+#define REG_TRIG_TARGET_SEC 0x0534
+
+#define REG_TRIG_CTRL__4 0x0538
+
+#define TRIG_CASCADE_ENABLE BIT(31)
+#define TRIG_CASCADE_TAIL BIT(30)
+#define TRIG_CASCADE_UPS_M 0xF
+#define TRIG_CASCADE_UPS_S 26
+#define TRIG_NOW BIT(25)
+#define TRIG_NOTIFY BIT(24)
+#define TRIG_EDGE BIT(23)
+#define TRIG_PATTERN_S 20
+#define TRIG_PATTERN_M 0x7
+#define TRIG_NEG_EDGE 0
+#define TRIG_POS_EDGE 1
+#define TRIG_NEG_PULSE 2
+#define TRIG_POS_PULSE 3
+#define TRIG_NEG_PERIOD 4
+#define TRIG_POS_PERIOD 5
+#define TRIG_REG_OUTPUT 6
+#define TRIG_GPO_S 16
+#define TRIG_GPO_M 0xF
+#define TRIG_CASCADE_ITERATE_CNT_M 0xFFFF
+
+#define REG_TRIG_CYCLE_WIDTH 0x053C
+
+#define REG_TRIG_CYCLE_CNT 0x0540
+
+#define TRIG_CYCLE_CNT_M 0xFFFF
+#define TRIG_CYCLE_CNT_S 16
+#define TRIG_BIT_PATTERN_M 0xFFFF
+
+#define REG_TRIG_ITERATE_TIME 0x0544
+
+#define REG_TRIG_PULSE_WIDTH__4 0x0548
+
+#define TRIG_PULSE_WIDTH_M 0x00FFFFFF
+
+#define REG_TS_CTRL_STAT__4 0x0550
+
+#define TS_EVENT_DETECT_M 0xF
+#define TS_EVENT_DETECT_S 17
+#define TS_EVENT_OVERFLOW BIT(16)
+#define TS_GPI_M 0xF
+#define TS_GPI_S 8
+#define TS_DETECT_RISE BIT(7)
+#define TS_DETECT_FALL BIT(6)
+#define TS_DETECT_S 6
+#define TS_CASCADE_TAIL BIT(5)
+#define TS_CASCADE_UPS_M 0xF
+#define TS_CASCADE_UPS_S 1
+#define TS_CASCADE_ENABLE BIT(0)
+
+#define DETECT_RISE (TS_DETECT_RISE >> TS_DETECT_S)
+#define DETECT_FALL (TS_DETECT_FALL >> TS_DETECT_S)
+
+#define REG_TS_EVENT_0_NANOSEC 0x0554
+#define REG_TS_EVENT_0_SEC 0x0558
+#define REG_TS_EVENT_0_SUB_NANOSEC 0x055C
+
+#define REG_TS_EVENT_1_NANOSEC 0x0560
+#define REG_TS_EVENT_1_SEC 0x0564
+#define REG_TS_EVENT_1_SUB_NANOSEC 0x0568
+
+#define REG_TS_EVENT_2_NANOSEC 0x056C
+#define REG_TS_EVENT_2_SEC 0x0570
+#define REG_TS_EVENT_2_SUB_NANOSEC 0x0574
+
+#define REG_TS_EVENT_3_NANOSEC 0x0578
+#define REG_TS_EVENT_3_SEC 0x057C
+#define REG_TS_EVENT_3_SUB_NANOSEC 0x0580
+
+#define REG_TS_EVENT_4_NANOSEC 0x0584
+#define REG_TS_EVENT_4_SEC 0x0588
+#define REG_TS_EVENT_4_SUB_NANOSEC 0x058C
+
+#define REG_TS_EVENT_5_NANOSEC 0x0590
+#define REG_TS_EVENT_5_SEC 0x0594
+#define REG_TS_EVENT_5_SUB_NANOSEC 0x0598
+
+#define REG_TS_EVENT_6_NANOSEC 0x059C
+#define REG_TS_EVENT_6_SEC 0x05A0
+#define REG_TS_EVENT_6_SUB_NANOSEC 0x05A4
+
+#define REG_TS_EVENT_7_NANOSEC 0x05A8
+#define REG_TS_EVENT_7_SEC 0x05AC
+#define REG_TS_EVENT_7_SUB_NANOSEC 0x05B0
+
+#define TS_EVENT_EDGE_M 0x1
+#define TS_EVENT_EDGE_S 30
+#define TS_EVENT_NANOSEC_M (BIT(30) - 1)
+
+#define TS_EVENT_SUB_NANOSEC_M 0x7
+
+#define TS_EVENT_SAMPLE \
+ (REG_TS_EVENT_1_NANOSEC - REG_TS_EVENT_0_NANOSEC)
+
+#define PORT_CTRL_ADDR(port, addr) ((addr) | (((port) + 1) << 12))
+
+#define REG_GLOBAL_RR_INDEX__1 0x0600
+
+/* DLR */
+#define REG_DLR_SRC_PORT__4 0x0604
+
+#define DLR_SRC_PORT_UNICAST BIT(31)
+#define DLR_SRC_PORT_M 0x3
+#define DLR_SRC_PORT_BOTH 0
+#define DLR_SRC_PORT_EACH 1
+
+#define REG_DLR_IP_ADDR__4 0x0608
+
+#define REG_DLR_CTRL__1 0x0610
+
+#define DLR_RESET_SEQ_ID BIT(3)
+#define DLR_BACKUP_AUTO_ON BIT(2)
+#define DLR_BEACON_TX_ENABLE BIT(1)
+#define DLR_ASSIST_ENABLE BIT(0)
+
+#define REG_DLR_STATE__1 0x0611
+
+#define DLR_NODE_STATE_M 0x3
+#define DLR_NODE_STATE_S 1
+#define DLR_NODE_STATE_IDLE 0
+#define DLR_NODE_STATE_FAULT 1
+#define DLR_NODE_STATE_NORMAL 2
+#define DLR_RING_STATE_FAULT 0
+#define DLR_RING_STATE_NORMAL 1
+
+#define REG_DLR_PRECEDENCE__1 0x0612
+
+#define REG_DLR_BEACON_INTERVAL__4 0x0614
+
+#define REG_DLR_BEACON_TIMEOUT__4 0x0618
+
+#define REG_DLR_TIMEOUT_WINDOW__4 0x061C
+
+#define DLR_TIMEOUT_WINDOW_M (BIT(22) - 1)
+
+#define REG_DLR_VLAN_ID__2 0x0620
+
+#define DLR_VLAN_ID_M (BIT(12) - 1)
+
+#define REG_DLR_DEST_ADDR_0 0x0622
+#define REG_DLR_DEST_ADDR_1 0x0623
+#define REG_DLR_DEST_ADDR_2 0x0624
+#define REG_DLR_DEST_ADDR_3 0x0625
+#define REG_DLR_DEST_ADDR_4 0x0626
+#define REG_DLR_DEST_ADDR_5 0x0627
+
+#define REG_DLR_PORT_MAP__4 0x0628
+
+#define REG_DLR_CLASS__1 0x062C
+
+#define DLR_FRAME_QID_M 0x3
+
+/* HSR */
+#define REG_HSR_PORT_MAP__4 0x0640
+
+#define REG_HSR_ALU_CTRL_0__1 0x0644
+
+#define HSR_DUPLICATE_DISCARD BIT(7)
+#define HSR_NODE_UNICAST BIT(6)
+#define HSR_AGE_CNT_DEFAULT_M 0x7
+#define HSR_AGE_CNT_DEFAULT_S 3
+#define HSR_LEARN_MCAST_DISABLE BIT(2)
+#define HSR_HASH_OPTION_M 0x3
+#define HSR_HASH_DISABLE 0
+#define HSR_HASH_UPPER_BITS 1
+#define HSR_HASH_LOWER_BITS 2
+#define HSR_HASH_XOR_BOTH_BITS 3
+
+#define REG_HSR_ALU_CTRL_1__1 0x0645
+
+#define HSR_LEARN_UCAST_DISABLE BIT(7)
+#define HSR_FLUSH_TABLE BIT(5)
+#define HSR_PROC_MCAST_SRC BIT(3)
+#define HSR_AGING_ENABLE BIT(2)
+
+#define REG_HSR_ALU_CTRL_2__2 0x0646
+
+#define REG_HSR_ALU_AGE_PERIOD__4 0x0648
+
+#define REG_HSR_ALU_INT_STATUS__1 0x064C
+#define REG_HSR_ALU_INT_MASK__1 0x064D
+
+#define HSR_WINDOW_OVERFLOW_INT BIT(3)
+#define HSR_LEARN_FAIL_INT BIT(2)
+#define HSR_ALMOST_FULL_INT BIT(1)
+#define HSR_WRITE_FAIL_INT BIT(0)
+
+#define REG_HSR_ALU_ENTRY_0__2 0x0650
+
+#define HSR_ENTRY_INDEX_M (BIT(10) - 1)
+#define HSR_FAIL_INDEX_M (BIT(8) - 1)
+
+#define REG_HSR_ALU_ENTRY_1__2 0x0652
+
+#define HSR_FAIL_LEARN_INDEX_M (BIT(8) - 1)
+
+#define REG_HSR_ALU_ENTRY_3__2 0x0654
+
+#define HSR_CPU_ACCESS_ENTRY_INDEX_M (BIT(8) - 1)
+
+/* 0 - Operation */
+#define REG_PORT_DEFAULT_VID 0x0000
+
+#define REG_PORT_CUSTOM_VID 0x0002
+#define REG_PORT_AVB_SR_1_VID 0x0004
+#define REG_PORT_AVB_SR_2_VID 0x0006
+
+#define REG_PORT_AVB_SR_1_TYPE 0x0008
+#define REG_PORT_AVB_SR_2_TYPE 0x000A
+
+#define REG_PORT_PME_STATUS 0x0013
+#define REG_PORT_PME_CTRL 0x0017
+
+#define PME_WOL_MAGICPKT BIT(2)
+#define PME_WOL_LINKUP BIT(1)
+#define PME_WOL_ENERGY BIT(0)
+
+#define REG_PORT_INT_STATUS 0x001B
+#define REG_PORT_INT_MASK 0x001F
+
+#define PORT_SGMII_INT BIT(3)
+#define PORT_PTP_INT BIT(2)
+#define PORT_PHY_INT BIT(1)
+#define PORT_ACL_INT BIT(0)
+
+#define PORT_INT_MASK \
+ (PORT_SGMII_INT | PORT_PTP_INT | PORT_PHY_INT | PORT_ACL_INT)
+
+#define REG_PORT_CTRL_0 0x0020
+
+#define PORT_MAC_LOOPBACK BIT(7)
+#define PORT_FORCE_TX_FLOW_CTRL BIT(4)
+#define PORT_FORCE_RX_FLOW_CTRL BIT(3)
+#define PORT_TAIL_TAG_ENABLE BIT(2)
+#define PORT_QUEUE_SPLIT_ENABLE 0x3
+
+#define REG_PORT_CTRL_1 0x0021
+
+#define PORT_SRP_ENABLE 0x3
+
+#define REG_PORT_STATUS_0 0x0030
+
+#define PORT_INTF_SPEED_M 0x3
+#define PORT_INTF_SPEED_S 3
+#define PORT_INTF_FULL_DUPLEX BIT(2)
+#define PORT_TX_FLOW_CTRL BIT(1)
+#define PORT_RX_FLOW_CTRL BIT(0)
+
+#define REG_PORT_STATUS_1 0x0034
+
+/* 1 - PHY */
+#define REG_PORT_PHY_CTRL 0x0100
+
+#define PORT_PHY_RESET BIT(15)
+#define PORT_PHY_LOOPBACK BIT(14)
+#define PORT_SPEED_100MBIT BIT(13)
+#define PORT_AUTO_NEG_ENABLE BIT(12)
+#define PORT_POWER_DOWN BIT(11)
+#define PORT_ISOLATE BIT(10)
+#define PORT_AUTO_NEG_RESTART BIT(9)
+#define PORT_FULL_DUPLEX BIT(8)
+#define PORT_COLLISION_TEST BIT(7)
+#define PORT_SPEED_1000MBIT BIT(6)
+
+#define REG_PORT_PHY_STATUS 0x0102
+
+#define PORT_100BT4_CAPABLE BIT(15)
+#define PORT_100BTX_FD_CAPABLE BIT(14)
+#define PORT_100BTX_CAPABLE BIT(13)
+#define PORT_10BT_FD_CAPABLE BIT(12)
+#define PORT_10BT_CAPABLE BIT(11)
+#define PORT_EXTENDED_STATUS BIT(8)
+#define PORT_MII_SUPPRESS_CAPABLE BIT(6)
+#define PORT_AUTO_NEG_ACKNOWLEDGE BIT(5)
+#define PORT_REMOTE_FAULT BIT(4)
+#define PORT_AUTO_NEG_CAPABLE BIT(3)
+#define PORT_LINK_STATUS BIT(2)
+#define PORT_JABBER_DETECT BIT(1)
+#define PORT_EXTENDED_CAPABILITY BIT(0)
+
+#define REG_PORT_PHY_ID_HI 0x0104
+#define REG_PORT_PHY_ID_LO 0x0106
+
+#define KSZ9477_ID_HI 0x0022
+#define KSZ9477_ID_LO 0x1622
+
+#define REG_PORT_PHY_AUTO_NEGOTIATION 0x0108
+
+#define PORT_AUTO_NEG_NEXT_PAGE BIT(15)
+#define PORT_AUTO_NEG_REMOTE_FAULT BIT(13)
+#define PORT_AUTO_NEG_ASYM_PAUSE BIT(11)
+#define PORT_AUTO_NEG_SYM_PAUSE BIT(10)
+#define PORT_AUTO_NEG_100BT4 BIT(9)
+#define PORT_AUTO_NEG_100BTX_FD BIT(8)
+#define PORT_AUTO_NEG_100BTX BIT(7)
+#define PORT_AUTO_NEG_10BT_FD BIT(6)
+#define PORT_AUTO_NEG_10BT BIT(5)
+#define PORT_AUTO_NEG_SELECTOR 0x001F
+#define PORT_AUTO_NEG_802_3 0x0001
+
+#define PORT_AUTO_NEG_PAUSE \
+ (PORT_AUTO_NEG_ASYM_PAUSE | PORT_AUTO_NEG_SYM_PAUSE)
+
+#define REG_PORT_PHY_REMOTE_CAPABILITY 0x010A
+
+#define PORT_REMOTE_NEXT_PAGE BIT(15)
+#define PORT_REMOTE_ACKNOWLEDGE BIT(14)
+#define PORT_REMOTE_REMOTE_FAULT BIT(13)
+#define PORT_REMOTE_ASYM_PAUSE BIT(11)
+#define PORT_REMOTE_SYM_PAUSE BIT(10)
+#define PORT_REMOTE_100BTX_FD BIT(8)
+#define PORT_REMOTE_100BTX BIT(7)
+#define PORT_REMOTE_10BT_FD BIT(6)
+#define PORT_REMOTE_10BT BIT(5)
+
+#define REG_PORT_PHY_1000_CTRL 0x0112
+
+#define PORT_AUTO_NEG_MANUAL BIT(12)
+#define PORT_AUTO_NEG_MASTER BIT(11)
+#define PORT_AUTO_NEG_MASTER_PREFERRED BIT(10)
+#define PORT_AUTO_NEG_1000BT_FD BIT(9)
+#define PORT_AUTO_NEG_1000BT BIT(8)
+
+#define REG_PORT_PHY_1000_STATUS 0x0114
+
+#define PORT_MASTER_FAULT BIT(15)
+#define PORT_LOCAL_MASTER BIT(14)
+#define PORT_LOCAL_RX_OK BIT(13)
+#define PORT_REMOTE_RX_OK BIT(12)
+#define PORT_REMOTE_1000BT_FD BIT(11)
+#define PORT_REMOTE_1000BT BIT(10)
+#define PORT_REMOTE_IDLE_CNT_M 0x0F
+
+#define PORT_PHY_1000_STATIC_STATUS \
+ (PORT_LOCAL_RX_OK | \
+ PORT_REMOTE_RX_OK | \
+ PORT_REMOTE_1000BT_FD | \
+ PORT_REMOTE_1000BT)
+
+#define REG_PORT_PHY_MMD_SETUP 0x011A
+
+#define PORT_MMD_OP_MODE_M 0x3
+#define PORT_MMD_OP_MODE_S 14
+#define PORT_MMD_OP_INDEX 0
+#define PORT_MMD_OP_DATA_NO_INCR 1
+#define PORT_MMD_OP_DATA_INCR_RW 2
+#define PORT_MMD_OP_DATA_INCR_W 3
+#define PORT_MMD_DEVICE_ID_M 0x1F
+
+#define MMD_SETUP(mode, dev) \
+ (((u16)(mode) << PORT_MMD_OP_MODE_S) | (dev))
+
+#define REG_PORT_PHY_MMD_INDEX_DATA 0x011C
+
+#define MMD_DEVICE_ID_DSP 1
+
+#define MMD_DSP_SQI_CHAN_A 0xAC
+#define MMD_DSP_SQI_CHAN_B 0xAD
+#define MMD_DSP_SQI_CHAN_C 0xAE
+#define MMD_DSP_SQI_CHAN_D 0xAF
+
+#define DSP_SQI_ERR_DETECTED BIT(15)
+#define DSP_SQI_AVG_ERR 0x7FFF
+
+#define MMD_DEVICE_ID_COMMON 2
+
+#define MMD_DEVICE_ID_EEE_ADV 7
+
+#define MMD_EEE_ADV 0x3C
+#define EEE_ADV_100MBIT BIT(1)
+#define EEE_ADV_1GBIT BIT(2)
+
+#define MMD_EEE_LP_ADV 0x3D
+#define MMD_EEE_MSG_CODE 0x3F
+
+#define MMD_DEVICE_ID_AFED 0x1C
+
+#define REG_PORT_PHY_EXTENDED_STATUS 0x011E
+
+#define PORT_100BTX_FD_ABLE BIT(15)
+#define PORT_100BTX_ABLE BIT(14)
+#define PORT_10BT_FD_ABLE BIT(13)
+#define PORT_10BT_ABLE BIT(12)
+
+#define REG_PORT_SGMII_ADDR__4 0x0200
+#define PORT_SGMII_AUTO_INCR BIT(23)
+#define PORT_SGMII_DEVICE_ID_M 0x1F
+#define PORT_SGMII_DEVICE_ID_S 16
+#define PORT_SGMII_ADDR_M (BIT(21) - 1)
+
+#define REG_PORT_SGMII_DATA__4 0x0204
+#define PORT_SGMII_DATA_M (BIT(16) - 1)
+
+#define MMD_DEVICE_ID_PMA 0x01
+#define MMD_DEVICE_ID_PCS 0x03
+#define MMD_DEVICE_ID_PHY_XS 0x04
+#define MMD_DEVICE_ID_DTE_XS 0x05
+#define MMD_DEVICE_ID_AN 0x07
+#define MMD_DEVICE_ID_VENDOR_CTRL 0x1E
+#define MMD_DEVICE_ID_VENDOR_MII 0x1F
+
+#define SR_MII MMD_DEVICE_ID_VENDOR_MII
+
+#define MMD_SR_MII_CTRL 0x0000
+
+#define SR_MII_RESET BIT(15)
+#define SR_MII_LOOPBACK BIT(14)
+#define SR_MII_SPEED_100MBIT BIT(13)
+#define SR_MII_AUTO_NEG_ENABLE BIT(12)
+#define SR_MII_POWER_DOWN BIT(11)
+#define SR_MII_AUTO_NEG_RESTART BIT(9)
+#define SR_MII_FULL_DUPLEX BIT(8)
+#define SR_MII_SPEED_1000MBIT BIT(6)
+
+#define MMD_SR_MII_STATUS 0x0001
+#define MMD_SR_MII_ID_1 0x0002
+#define MMD_SR_MII_ID_2 0x0003
+#define MMD_SR_MII_AUTO_NEGOTIATION 0x0004
+
+#define SR_MII_AUTO_NEG_NEXT_PAGE BIT(15)
+#define SR_MII_AUTO_NEG_REMOTE_FAULT_M 0x3
+#define SR_MII_AUTO_NEG_REMOTE_FAULT_S 12
+#define SR_MII_AUTO_NEG_NO_ERROR 0
+#define SR_MII_AUTO_NEG_OFFLINE 1
+#define SR_MII_AUTO_NEG_LINK_FAILURE 2
+#define SR_MII_AUTO_NEG_ERROR 3
+#define SR_MII_AUTO_NEG_PAUSE_M 0x3
+#define SR_MII_AUTO_NEG_PAUSE_S 7
+#define SR_MII_AUTO_NEG_NO_PAUSE 0
+#define SR_MII_AUTO_NEG_ASYM_PAUSE_TX 1
+#define SR_MII_AUTO_NEG_SYM_PAUSE 2
+#define SR_MII_AUTO_NEG_ASYM_PAUSE_RX 3
+#define SR_MII_AUTO_NEG_HALF_DUPLEX BIT(6)
+#define SR_MII_AUTO_NEG_FULL_DUPLEX BIT(5)
+
+#define MMD_SR_MII_REMOTE_CAPABILITY 0x0005
+#define MMD_SR_MII_AUTO_NEG_EXP 0x0006
+#define MMD_SR_MII_AUTO_NEG_EXT 0x000F
+
+#define MMD_SR_MII_DIGITAL_CTRL_1 0x8000
+
+#define MMD_SR_MII_AUTO_NEG_CTRL 0x8001
+
+#define SR_MII_8_BIT BIT(8)
+#define SR_MII_SGMII_LINK_UP BIT(4)
+#define SR_MII_TX_CFG_PHY_MASTER BIT(3)
+#define SR_MII_PCS_MODE_M 0x3
+#define SR_MII_PCS_MODE_S 1
+#define SR_MII_PCS_SGMII 2
+#define SR_MII_AUTO_NEG_COMPLETE_INTR BIT(0)
+
+#define MMD_SR_MII_AUTO_NEG_STATUS 0x8002
+
+#define SR_MII_STAT_LINK_UP BIT(4)
+#define SR_MII_STAT_M 0x3
+#define SR_MII_STAT_S 2
+#define SR_MII_STAT_10_MBPS 0
+#define SR_MII_STAT_100_MBPS 1
+#define SR_MII_STAT_1000_MBPS 2
+#define SR_MII_STAT_FULL_DUPLEX BIT(1)
+
+#define MMD_SR_MII_PHY_CTRL 0x80A0
+
+#define SR_MII_PHY_LANE_SEL_M 0xF
+#define SR_MII_PHY_LANE_SEL_S 8
+#define SR_MII_PHY_WRITE BIT(1)
+#define SR_MII_PHY_START_BUSY BIT(0)
+
+#define MMD_SR_MII_PHY_ADDR 0x80A1
+
+#define SR_MII_PHY_ADDR_M (BIT(16) - 1)
+
+#define MMD_SR_MII_PHY_DATA 0x80A2
+
+#define SR_MII_PHY_DATA_M (BIT(16) - 1)
+
+#define SR_MII_PHY_JTAG_CHIP_ID_HI 0x000C
+#define SR_MII_PHY_JTAG_CHIP_ID_LO 0x000D
+
+#define REG_PORT_PHY_REMOTE_LB_LED 0x0122
+
+#define PORT_REMOTE_LOOPBACK BIT(8)
+#define PORT_LED_SELECT (3 << 6)
+#define PORT_LED_CTRL (3 << 4)
+#define PORT_LED_CTRL_TEST BIT(3)
+#define PORT_10BT_PREAMBLE BIT(2)
+#define PORT_LINK_MD_10BT_ENABLE BIT(1)
+#define PORT_LINK_MD_PASS BIT(0)
+
+#define REG_PORT_PHY_LINK_MD 0x0124
+
+#define PORT_START_CABLE_DIAG BIT(15)
+#define PORT_TX_DISABLE BIT(14)
+#define PORT_CABLE_DIAG_PAIR_M 0x3
+#define PORT_CABLE_DIAG_PAIR_S 12
+#define PORT_CABLE_DIAG_SELECT_M 0x3
+#define PORT_CABLE_DIAG_SELECT_S 10
+#define PORT_CABLE_DIAG_RESULT_M 0x3
+#define PORT_CABLE_DIAG_RESULT_S 8
+#define PORT_CABLE_STAT_NORMAL 0
+#define PORT_CABLE_STAT_OPEN 1
+#define PORT_CABLE_STAT_SHORT 2
+#define PORT_CABLE_STAT_FAILED 3
+#define PORT_CABLE_FAULT_COUNTER 0x00FF
+
+#define REG_PORT_PHY_PMA_STATUS 0x0126
+
+#define PORT_1000_LINK_GOOD BIT(1)
+#define PORT_100_LINK_GOOD BIT(0)
+
+#define REG_PORT_PHY_DIGITAL_STATUS 0x0128
+
+#define PORT_LINK_DETECT BIT(14)
+#define PORT_SIGNAL_DETECT BIT(13)
+#define PORT_PHY_STAT_MDI BIT(12)
+#define PORT_PHY_STAT_MASTER BIT(11)
+
+#define REG_PORT_PHY_RXER_COUNTER 0x012A
+
+#define REG_PORT_PHY_INT_ENABLE 0x0136
+#define REG_PORT_PHY_INT_STATUS 0x0137
+
+#define JABBER_INT BIT(7)
+#define RX_ERR_INT BIT(6)
+#define PAGE_RX_INT BIT(5)
+#define PARALLEL_DETECT_FAULT_INT BIT(4)
+#define LINK_PARTNER_ACK_INT BIT(3)
+#define LINK_DOWN_INT BIT(2)
+#define REMOTE_FAULT_INT BIT(1)
+#define LINK_UP_INT BIT(0)
+
+#define REG_PORT_PHY_DIGITAL_DEBUG_1 0x0138
+
+#define PORT_REG_CLK_SPEED_25_MHZ BIT(14)
+#define PORT_PHY_FORCE_MDI BIT(7)
+#define PORT_PHY_AUTO_MDIX_DISABLE BIT(6)
+
+/* Same as PORT_PHY_LOOPBACK */
+#define PORT_PHY_PCS_LOOPBACK BIT(0)
+
+#define REG_PORT_PHY_DIGITAL_DEBUG_2 0x013A
+
+#define REG_PORT_PHY_DIGITAL_DEBUG_3 0x013C
+
+#define PORT_100BT_FIXED_LATENCY BIT(15)
+
+#define REG_PORT_PHY_PHY_CTRL 0x013E
+
+#define PORT_INT_PIN_HIGH BIT(14)
+#define PORT_ENABLE_JABBER BIT(9)
+#define PORT_STAT_SPEED_1000MBIT BIT(6)
+#define PORT_STAT_SPEED_100MBIT BIT(5)
+#define PORT_STAT_SPEED_10MBIT BIT(4)
+#define PORT_STAT_FULL_DUPLEX BIT(3)
+
+/* Same as PORT_PHY_STAT_MASTER */
+#define PORT_STAT_MASTER BIT(2)
+#define PORT_RESET BIT(1)
+#define PORT_LINK_STATUS_FAIL BIT(0)
+
+/* 3 - xMII */
+#define REG_PORT_XMII_CTRL_0 0x0300
+
+#define PORT_SGMII_SEL BIT(7)
+#define PORT_MII_FULL_DUPLEX BIT(6)
+#define PORT_MII_100MBIT BIT(4)
+#define PORT_GRXC_ENABLE BIT(0)
+
+#define REG_PORT_XMII_CTRL_1 0x0301
+
+#define PORT_RMII_CLK_SEL BIT(7)
+/* S1 */
+#define PORT_MII_1000MBIT_S1 BIT(6)
+/* S2 */
+#define PORT_MII_NOT_1GBIT BIT(6)
+#define PORT_MII_SEL_EDGE BIT(5)
+#define PORT_RGMII_ID_IG_ENABLE BIT(4)
+#define PORT_RGMII_ID_EG_ENABLE BIT(3)
+#define PORT_MII_MAC_MODE BIT(2)
+#define PORT_MII_SEL_M 0x3
+/* S1 */
+#define PORT_MII_SEL_S1 0x0
+#define PORT_RMII_SEL_S1 0x1
+#define PORT_GMII_SEL_S1 0x2
+#define PORT_RGMII_SEL_S1 0x3
+/* S2 */
+#define PORT_RGMII_SEL 0x0
+#define PORT_RMII_SEL 0x1
+#define PORT_GMII_SEL 0x2
+#define PORT_MII_SEL 0x3
+
+/* 4 - MAC */
+#define REG_PORT_MAC_CTRL_0 0x0400
+
+#define PORT_BROADCAST_STORM BIT(1)
+#define PORT_JUMBO_FRAME BIT(0)
+
+#define REG_PORT_MAC_CTRL_1 0x0401
+
+#define PORT_BACK_PRESSURE BIT(3)
+#define PORT_PASS_ALL BIT(0)
+
+#define REG_PORT_MAC_CTRL_2 0x0402
+
+#define PORT_100BT_EEE_DISABLE BIT(7)
+#define PORT_1000BT_EEE_DISABLE BIT(6)
+
+#define REG_PORT_MAC_IN_RATE_LIMIT 0x0403
+
+#define PORT_IN_PORT_BASED_S 6
+#define PORT_RATE_PACKET_BASED_S 5
+#define PORT_IN_FLOW_CTRL_S 4
+#define PORT_COUNT_IFG_S 1
+#define PORT_COUNT_PREAMBLE_S 0
+#define PORT_IN_PORT_BASED BIT(6)
+#define PORT_IN_PACKET_BASED BIT(5)
+#define PORT_IN_FLOW_CTRL BIT(4)
+#define PORT_IN_LIMIT_MODE_M 0x3
+#define PORT_IN_LIMIT_MODE_S 2
+#define PORT_IN_ALL 0
+#define PORT_IN_UNICAST 1
+#define PORT_IN_MULTICAST 2
+#define PORT_IN_BROADCAST 3
+#define PORT_COUNT_IFG BIT(1)
+#define PORT_COUNT_PREAMBLE BIT(0)
+
+#define REG_PORT_IN_RATE_0 0x0410
+#define REG_PORT_IN_RATE_1 0x0411
+#define REG_PORT_IN_RATE_2 0x0412
+#define REG_PORT_IN_RATE_3 0x0413
+#define REG_PORT_IN_RATE_4 0x0414
+#define REG_PORT_IN_RATE_5 0x0415
+#define REG_PORT_IN_RATE_6 0x0416
+#define REG_PORT_IN_RATE_7 0x0417
+
+#define REG_PORT_OUT_RATE_0 0x0420
+#define REG_PORT_OUT_RATE_1 0x0421
+#define REG_PORT_OUT_RATE_2 0x0422
+#define REG_PORT_OUT_RATE_3 0x0423
+
+#define PORT_RATE_LIMIT_M (BIT(7) - 1)
+
+/* 5 - MIB Counters */
+#define REG_PORT_MIB_CTRL_STAT__4 0x0500
+
+#define MIB_COUNTER_OVERFLOW BIT(31)
+#define MIB_COUNTER_VALID BIT(30)
+#define MIB_COUNTER_READ BIT(25)
+#define MIB_COUNTER_FLUSH_FREEZE BIT(24)
+#define MIB_COUNTER_INDEX_M (BIT(8) - 1)
+#define MIB_COUNTER_INDEX_S 16
+#define MIB_COUNTER_DATA_HI_M 0xF
+
+#define REG_PORT_MIB_DATA 0x0504
+
+/* 6 - ACL */
+#define REG_PORT_ACL_0 0x0600
+
+#define ACL_FIRST_RULE_M 0xF
+
+#define REG_PORT_ACL_1 0x0601
+
+#define ACL_MODE_M 0x3
+#define ACL_MODE_S 4
+#define ACL_MODE_DISABLE 0
+#define ACL_MODE_LAYER_2 1
+#define ACL_MODE_LAYER_3 2
+#define ACL_MODE_LAYER_4 3
+#define ACL_ENABLE_M 0x3
+#define ACL_ENABLE_S 2
+#define ACL_ENABLE_2_COUNT 0
+#define ACL_ENABLE_2_TYPE 1
+#define ACL_ENABLE_2_MAC 2
+#define ACL_ENABLE_2_BOTH 3
+#define ACL_ENABLE_3_IP 1
+#define ACL_ENABLE_3_SRC_DST_COMP 2
+#define ACL_ENABLE_4_PROTOCOL 0
+#define ACL_ENABLE_4_TCP_PORT_COMP 1
+#define ACL_ENABLE_4_UDP_PORT_COMP 2
+#define ACL_ENABLE_4_TCP_SEQN_COMP 3
+#define ACL_SRC BIT(1)
+#define ACL_EQUAL BIT(0)
+
+#define REG_PORT_ACL_2 0x0602
+#define REG_PORT_ACL_3 0x0603
+
+#define ACL_MAX_PORT 0xFFFF
+
+#define REG_PORT_ACL_4 0x0604
+#define REG_PORT_ACL_5 0x0605
+
+#define ACL_MIN_PORT 0xFFFF
+#define ACL_IP_ADDR 0xFFFFFFFF
+#define ACL_TCP_SEQNUM 0xFFFFFFFF
+
+#define REG_PORT_ACL_6 0x0606
+
+#define ACL_RESERVED 0xF8
+#define ACL_PORT_MODE_M 0x3
+#define ACL_PORT_MODE_S 1
+#define ACL_PORT_MODE_DISABLE 0
+#define ACL_PORT_MODE_EITHER 1
+#define ACL_PORT_MODE_IN_RANGE 2
+#define ACL_PORT_MODE_OUT_OF_RANGE 3
+
+#define REG_PORT_ACL_7 0x0607
+
+#define ACL_TCP_FLAG_ENABLE BIT(0)
+
+#define REG_PORT_ACL_8 0x0608
+
+#define ACL_TCP_FLAG_M 0xFF
+
+#define REG_PORT_ACL_9 0x0609
+
+#define ACL_TCP_FLAG 0xFF
+#define ACL_ETH_TYPE 0xFFFF
+#define ACL_IP_M 0xFFFFFFFF
+
+#define REG_PORT_ACL_A 0x060A
+
+#define ACL_PRIO_MODE_M 0x3
+#define ACL_PRIO_MODE_S 6
+#define ACL_PRIO_MODE_DISABLE 0
+#define ACL_PRIO_MODE_HIGHER 1
+#define ACL_PRIO_MODE_LOWER 2
+#define ACL_PRIO_MODE_REPLACE 3
+#define ACL_PRIO_M KS_PRIO_M
+#define ACL_PRIO_S 3
+#define ACL_VLAN_PRIO_REPLACE BIT(2)
+#define ACL_VLAN_PRIO_M KS_PRIO_M
+#define ACL_VLAN_PRIO_HI_M 0x3
+
+#define REG_PORT_ACL_B 0x060B
+
+#define ACL_VLAN_PRIO_LO_M 0x8
+#define ACL_VLAN_PRIO_S 7
+#define ACL_MAP_MODE_M 0x3
+#define ACL_MAP_MODE_S 5
+#define ACL_MAP_MODE_DISABLE 0
+#define ACL_MAP_MODE_OR 1
+#define ACL_MAP_MODE_AND 2
+#define ACL_MAP_MODE_REPLACE 3
+
+#define ACL_CNT_M (BIT(11) - 1)
+#define ACL_CNT_S 5
+
+#define REG_PORT_ACL_C 0x060C
+
+#define REG_PORT_ACL_D 0x060D
+#define ACL_MSEC_UNIT BIT(6)
+#define ACL_INTR_MODE BIT(5)
+#define ACL_PORT_MAP 0x7F
+
+#define REG_PORT_ACL_E 0x060E
+#define REG_PORT_ACL_F 0x060F
+
+#define REG_PORT_ACL_BYTE_EN_MSB 0x0610
+#define REG_PORT_ACL_BYTE_EN_LSB 0x0611
+
+#define ACL_ACTION_START 0xA
+#define ACL_ACTION_LEN 4
+#define ACL_INTR_CNT_START 0xD
+#define ACL_RULESET_START 0xE
+#define ACL_RULESET_LEN 2
+#define ACL_TABLE_LEN 16
+
+#define ACL_ACTION_ENABLE 0x003C
+#define ACL_MATCH_ENABLE 0x7FC3
+#define ACL_RULESET_ENABLE 0x8003
+#define ACL_BYTE_ENABLE 0xFFFF
+
+#define REG_PORT_ACL_CTRL_0 0x0612
+
+#define PORT_ACL_WRITE_DONE BIT(6)
+#define PORT_ACL_READ_DONE BIT(5)
+#define PORT_ACL_WRITE BIT(4)
+#define PORT_ACL_INDEX_M 0xF
+
+#define REG_PORT_ACL_CTRL_1 0x0613
+
+/* 8 - Classification and Policing */
+#define REG_PORT_MRI_MIRROR_CTRL 0x0800
+
+#define PORT_MIRROR_RX BIT(6)
+#define PORT_MIRROR_TX BIT(5)
+#define PORT_MIRROR_SNIFFER BIT(1)
+
+#define REG_PORT_MRI_PRIO_CTRL 0x0801
+
+#define PORT_HIGHEST_PRIO BIT(7)
+#define PORT_OR_PRIO BIT(6)
+#define PORT_MAC_PRIO_ENABLE BIT(4)
+#define PORT_VLAN_PRIO_ENABLE BIT(3)
+#define PORT_802_1P_PRIO_ENABLE BIT(2)
+#define PORT_DIFFSERV_PRIO_ENABLE BIT(1)
+#define PORT_ACL_PRIO_ENABLE BIT(0)
+
+#define REG_PORT_MRI_MAC_CTRL 0x0802
+
+#define PORT_USER_PRIO_CEILING BIT(7)
+#define PORT_DROP_NON_VLAN BIT(4)
+#define PORT_DROP_TAG BIT(3)
+#define PORT_BASED_PRIO_M KS_PRIO_M
+#define PORT_BASED_PRIO_S 0
+
+#define REG_PORT_MRI_AUTHEN_CTRL 0x0803
+
+#define PORT_ACL_ENABLE BIT(2)
+#define PORT_AUTHEN_MODE 0x3
+#define PORT_AUTHEN_PASS 0
+#define PORT_AUTHEN_BLOCK 1
+#define PORT_AUTHEN_TRAP 2
+
+#define REG_PORT_MRI_INDEX__4 0x0804
+
+#define MRI_INDEX_P_M 0x7
+#define MRI_INDEX_P_S 16
+#define MRI_INDEX_Q_M 0x3
+#define MRI_INDEX_Q_S 0
+
+#define REG_PORT_MRI_TC_MAP__4 0x0808
+
+#define PORT_TC_MAP_M 0xf
+#define PORT_TC_MAP_S 4
+
+#define REG_PORT_MRI_POLICE_CTRL__4 0x080C
+
+#define POLICE_DROP_ALL BIT(10)
+#define POLICE_PACKET_TYPE_M 0x3
+#define POLICE_PACKET_TYPE_S 8
+#define POLICE_PACKET_DROPPED 0
+#define POLICE_PACKET_GREEN 1
+#define POLICE_PACKET_YELLOW 2
+#define POLICE_PACKET_RED 3
+#define PORT_BASED_POLICING BIT(7)
+#define NON_DSCP_COLOR_M 0x3
+#define NON_DSCP_COLOR_S 5
+#define COLOR_MARK_ENABLE BIT(4)
+#define COLOR_REMAP_ENABLE BIT(3)
+#define POLICE_DROP_SRP BIT(2)
+#define POLICE_COLOR_NOT_AWARE BIT(1)
+#define POLICE_ENABLE BIT(0)
+
+#define REG_PORT_POLICE_COLOR_0__4 0x0810
+#define REG_PORT_POLICE_COLOR_1__4 0x0814
+#define REG_PORT_POLICE_COLOR_2__4 0x0818
+#define REG_PORT_POLICE_COLOR_3__4 0x081C
+
+#define POLICE_COLOR_MAP_S 2
+#define POLICE_COLOR_MAP_M (BIT(POLICE_COLOR_MAP_S) - 1)
+
+#define REG_PORT_POLICE_RATE__4 0x0820
+
+#define POLICE_CIR_S 16
+#define POLICE_PIR_S 0
+
+#define REG_PORT_POLICE_BURST_SIZE__4 0x0824
+
+#define POLICE_BURST_SIZE_M 0x3FFF
+#define POLICE_CBS_S 16
+#define POLICE_PBS_S 0
+
+#define REG_PORT_WRED_PM_CTRL_0__4 0x0830
+
+#define WRED_PM_CTRL_M (BIT(11) - 1)
+
+#define WRED_PM_MAX_THRESHOLD_S 16
+#define WRED_PM_MIN_THRESHOLD_S 0
+
+#define REG_PORT_WRED_PM_CTRL_1__4 0x0834
+
+#define WRED_PM_MULTIPLIER_S 16
+#define WRED_PM_AVG_QUEUE_SIZE_S 0
+
+#define REG_PORT_WRED_QUEUE_CTRL_0__4 0x0840
+#define REG_PORT_WRED_QUEUE_CTRL_1__4 0x0844
+
+#define REG_PORT_WRED_QUEUE_PMON__4 0x0848
+
+#define WRED_RANDOM_DROP_ENABLE BIT(31)
+#define WRED_PMON_FLUSH BIT(30)
+#define WRED_DROP_GYR_DISABLE BIT(29)
+#define WRED_DROP_YR_DISABLE BIT(28)
+#define WRED_DROP_R_DISABLE BIT(27)
+#define WRED_DROP_ALL BIT(26)
+#define WRED_PMON_M (BIT(24) - 1)
+
+/* 9 - Shaping */
+
+#define REG_PORT_MTI_QUEUE_INDEX__4 0x0900
+
+#define REG_PORT_MTI_QUEUE_CTRL_0__4 0x0904
+
+#define MTI_PVID_REPLACE BIT(0)
+
+#define REG_PORT_MTI_QUEUE_CTRL_0 0x0914
+
+#define MTI_SCHEDULE_MODE_M 0x3
+#define MTI_SCHEDULE_MODE_S 6
+#define MTI_SCHEDULE_STRICT_PRIO 0
+#define MTI_SCHEDULE_WRR 2
+#define MTI_SHAPING_M 0x3
+#define MTI_SHAPING_S 4
+#define MTI_SHAPING_OFF 0
+#define MTI_SHAPING_SRP 1
+#define MTI_SHAPING_TIME_AWARE 2
+
+#define REG_PORT_MTI_QUEUE_CTRL_1 0x0915
+
+#define MTI_TX_RATIO_M (BIT(7) - 1)
+
+#define REG_PORT_MTI_QUEUE_CTRL_2__2 0x0916
+#define REG_PORT_MTI_HI_WATER_MARK 0x0916
+#define REG_PORT_MTI_QUEUE_CTRL_3__2 0x0918
+#define REG_PORT_MTI_LO_WATER_MARK 0x0918
+#define REG_PORT_MTI_QUEUE_CTRL_4__2 0x091A
+#define REG_PORT_MTI_CREDIT_INCREMENT 0x091A
+
+/* A - QM */
+
+#define REG_PORT_QM_CTRL__4 0x0A00
+
+#define PORT_QM_DROP_PRIO_M 0x3
+
+#define REG_PORT_VLAN_MEMBERSHIP__4 0x0A04
+
+#define REG_PORT_QM_QUEUE_INDEX__4 0x0A08
+
+#define PORT_QM_QUEUE_INDEX_S 24
+#define PORT_QM_BURST_SIZE_S 16
+#define PORT_QM_MIN_RESV_SPACE_M (BIT(11) - 1)
+
+#define REG_PORT_QM_WATER_MARK__4 0x0A0C
+
+#define PORT_QM_HI_WATER_MARK_S 16
+#define PORT_QM_LO_WATER_MARK_S 0
+#define PORT_QM_WATER_MARK_M (BIT(11) - 1)
+
+#define REG_PORT_QM_TX_CNT_0__4 0x0A10
+
+#define PORT_QM_TX_CNT_USED_S 0
+#define PORT_QM_TX_CNT_M (BIT(11) - 1)
+
+#define REG_PORT_QM_TX_CNT_1__4 0x0A14
+
+#define PORT_QM_TX_CNT_CALCULATED_S 16
+#define PORT_QM_TX_CNT_AVAIL_S 0
+
+/* B - LUE */
+#define REG_PORT_LUE_CTRL 0x0B00
+
+#define PORT_VLAN_LOOKUP_VID_0 BIT(7)
+#define PORT_INGRESS_FILTER BIT(6)
+#define PORT_DISCARD_NON_VID BIT(5)
+#define PORT_MAC_BASED_802_1X BIT(4)
+#define PORT_SRC_ADDR_FILTER BIT(3)
+
+#define REG_PORT_LUE_MSTP_INDEX 0x0B01
+
+#define REG_PORT_LUE_MSTP_STATE 0x0B04
+
+#define PORT_TX_ENABLE BIT(2)
+#define PORT_RX_ENABLE BIT(1)
+#define PORT_LEARN_DISABLE BIT(0)
+
+/* C - PTP */
+
+#define REG_PTP_PORT_RX_DELAY__2 0x0C00
+#define REG_PTP_PORT_TX_DELAY__2 0x0C02
+#define REG_PTP_PORT_ASYM_DELAY__2 0x0C04
+
+#define REG_PTP_PORT_XDELAY_TS 0x0C08
+#define REG_PTP_PORT_XDELAY_TS_H 0x0C08
+#define REG_PTP_PORT_XDELAY_TS_L 0x0C0A
+
+#define REG_PTP_PORT_SYNC_TS 0x0C0C
+#define REG_PTP_PORT_SYNC_TS_H 0x0C0C
+#define REG_PTP_PORT_SYNC_TS_L 0x0C0E
+
+#define REG_PTP_PORT_PDRESP_TS 0x0C10
+#define REG_PTP_PORT_PDRESP_TS_H 0x0C10
+#define REG_PTP_PORT_PDRESP_TS_L 0x0C12
+
+#define REG_PTP_PORT_TX_INT_STATUS__2 0x0C14
+#define REG_PTP_PORT_TX_INT_ENABLE__2 0x0C16
+
+#define PTP_PORT_SYNC_INT BIT(15)
+#define PTP_PORT_XDELAY_REQ_INT BIT(14)
+#define PTP_PORT_PDELAY_RESP_INT BIT(13)
+
+#define REG_PTP_PORT_LINK_DELAY__4 0x0C18
+
+#define PRIO_QUEUES 4
+#define RX_PRIO_QUEUES 8
+
+#define KS_PRIO_IN_REG 2
+
+#define TOTAL_PORT_NUM 7
+
+#define KSZ9477_COUNTER_NUM 0x20
+#define TOTAL_KSZ9477_COUNTER_NUM (KSZ9477_COUNTER_NUM + 2 + 2)
+
+#define SWITCH_COUNTER_NUM KSZ9477_COUNTER_NUM
+#define TOTAL_SWITCH_COUNTER_NUM TOTAL_KSZ9477_COUNTER_NUM
+
+#define P_BCAST_STORM_CTRL REG_PORT_MAC_CTRL_0
+#define P_PRIO_CTRL REG_PORT_MRI_PRIO_CTRL
+#define P_MIRROR_CTRL REG_PORT_MRI_MIRROR_CTRL
+#define P_STP_CTRL REG_PORT_LUE_MSTP_STATE
+#define P_PHY_CTRL REG_PORT_PHY_CTRL
+#define P_NEG_RESTART_CTRL REG_PORT_PHY_CTRL
+#define P_LINK_STATUS REG_PORT_PHY_STATUS
+#define P_SPEED_STATUS REG_PORT_PHY_PHY_CTRL
+#define P_RATE_LIMIT_CTRL REG_PORT_MAC_IN_RATE_LIMIT
+
+#define S_LINK_AGING_CTRL REG_SW_LUE_CTRL_1
+#define S_MIRROR_CTRL REG_SW_MRI_CTRL_0
+#define S_REPLACE_VID_CTRL REG_SW_MAC_CTRL_2
+#define S_802_1P_PRIO_CTRL REG_SW_MAC_802_1P_MAP_0
+#define S_TOS_PRIO_CTRL REG_SW_MAC_TOS_PRIO_0
+#define S_FLUSH_TABLE_CTRL REG_SW_LUE_CTRL_1
+
+#define SW_FLUSH_DYN_MAC_TABLE SW_FLUSH_MSTP_TABLE
+
+#define MAX_TIMESTAMP_UNIT 2
+#define MAX_TRIG_UNIT 3
+#define MAX_TIMESTAMP_EVENT_UNIT 8
+#define MAX_GPIO 4
+
+#define PTP_TRIG_UNIT_M (BIT(MAX_TRIG_UNIT) - 1)
+#define PTP_TS_UNIT_M (BIT(MAX_TIMESTAMP_UNIT) - 1)
+
+/* Driver set switch broadcast storm protection at 10% rate. */
+#define BROADCAST_STORM_PROT_RATE 10
+
+/* 148,800 frames * 67 ms / 100 */
+#define BROADCAST_STORM_VALUE 9969
+
+#endif /* KSZ9477_REGS_H */
diff --git a/drivers/net/dsa/microchip/ksz_common.c b/drivers/net/dsa/microchip/ksz_common.c
new file mode 100644
index 0000000..b313ecd
--- /dev/null
+++ b/drivers/net/dsa/microchip/ksz_common.c
@@ -0,0 +1,1279 @@
+/*
+ * Microchip switch driver main logic
+ *
+ * Copyright (C) 2017
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include <linux/delay.h>
+#include <linux/export.h>
+#include <linux/gpio.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/platform_data/microchip-ksz.h>
+#include <linux/phy.h>
+#include <linux/etherdevice.h>
+#include <linux/if_bridge.h>
+#include <net/dsa.h>
+#include <net/switchdev.h>
+
+#include "ksz_priv.h"
+
+static const struct {
+ int index;
+ char string[ETH_GSTRING_LEN];
+} mib_names[TOTAL_SWITCH_COUNTER_NUM] = {
+ { 0x00, "rx_hi" },
+ { 0x01, "rx_undersize" },
+ { 0x02, "rx_fragments" },
+ { 0x03, "rx_oversize" },
+ { 0x04, "rx_jabbers" },
+ { 0x05, "rx_symbol_err" },
+ { 0x06, "rx_crc_err" },
+ { 0x07, "rx_align_err" },
+ { 0x08, "rx_mac_ctrl" },
+ { 0x09, "rx_pause" },
+ { 0x0A, "rx_bcast" },
+ { 0x0B, "rx_mcast" },
+ { 0x0C, "rx_ucast" },
+ { 0x0D, "rx_64_or_less" },
+ { 0x0E, "rx_65_127" },
+ { 0x0F, "rx_128_255" },
+ { 0x10, "rx_256_511" },
+ { 0x11, "rx_512_1023" },
+ { 0x12, "rx_1024_1522" },
+ { 0x13, "rx_1523_2000" },
+ { 0x14, "rx_2001" },
+ { 0x15, "tx_hi" },
+ { 0x16, "tx_late_col" },
+ { 0x17, "tx_pause" },
+ { 0x18, "tx_bcast" },
+ { 0x19, "tx_mcast" },
+ { 0x1A, "tx_ucast" },
+ { 0x1B, "tx_deferred" },
+ { 0x1C, "tx_total_col" },
+ { 0x1D, "tx_exc_col" },
+ { 0x1E, "tx_single_col" },
+ { 0x1F, "tx_mult_col" },
+ { 0x80, "rx_total" },
+ { 0x81, "tx_total" },
+ { 0x82, "rx_discards" },
+ { 0x83, "tx_discards" },
+};
+
+static void ksz_cfg(struct ksz_device *dev, u32 addr, u8 bits, bool set)
+{
+ u8 data;
+
+ ksz_read8(dev, addr, &data);
+ if (set)
+ data |= bits;
+ else
+ data &= ~bits;
+ ksz_write8(dev, addr, data);
+}
+
+static void ksz_cfg32(struct ksz_device *dev, u32 addr, u32 bits, bool set)
+{
+ u32 data;
+
+ ksz_read32(dev, addr, &data);
+ if (set)
+ data |= bits;
+ else
+ data &= ~bits;
+ ksz_write32(dev, addr, data);
+}
+
+static void ksz_port_cfg(struct ksz_device *dev, int port, int offset, u8 bits,
+ bool set)
+{
+ u32 addr;
+ u8 data;
+
+ addr = PORT_CTRL_ADDR(port, offset);
+ ksz_read8(dev, addr, &data);
+
+ if (set)
+ data |= bits;
+ else
+ data &= ~bits;
+
+ ksz_write8(dev, addr, data);
+}
+
+static void ksz_port_cfg32(struct ksz_device *dev, int port, int offset,
+ u32 bits, bool set)
+{
+ u32 addr;
+ u32 data;
+
+ addr = PORT_CTRL_ADDR(port, offset);
+ ksz_read32(dev, addr, &data);
+
+ if (set)
+ data |= bits;
+ else
+ data &= ~bits;
+
+ ksz_write32(dev, addr, data);
+}
+
+static int wait_vlan_ctrl_ready(struct ksz_device *dev, u32 waiton, int timeout)
+{
+ u8 data;
+
+ do {
+ ksz_read8(dev, REG_SW_VLAN_CTRL, &data);
+ if (!(data & waiton))
+ break;
+ usleep_range(1, 10);
+ } while (timeout-- > 0);
+
+ if (timeout <= 0)
+ return -ETIMEDOUT;
+
+ return 0;
+}
+
+static int get_vlan_table(struct dsa_switch *ds, u16 vid, u32 *vlan_table)
+{
+ struct ksz_device *dev = ds->priv;
+ int ret;
+
+ mutex_lock(&dev->vlan_mutex);
+
+ ksz_write16(dev, REG_SW_VLAN_ENTRY_INDEX__2, vid & VLAN_INDEX_M);
+ ksz_write8(dev, REG_SW_VLAN_CTRL, VLAN_READ | VLAN_START);
+
+ /* wait to be cleared */
+ ret = wait_vlan_ctrl_ready(dev, VLAN_START, 1000);
+ if (ret < 0) {
+ dev_dbg(dev->dev, "Failed to read vlan table\n");
+ goto exit;
+ }
+
+ ksz_read32(dev, REG_SW_VLAN_ENTRY__4, &vlan_table[0]);
+ ksz_read32(dev, REG_SW_VLAN_ENTRY_UNTAG__4, &vlan_table[1]);
+ ksz_read32(dev, REG_SW_VLAN_ENTRY_PORTS__4, &vlan_table[2]);
+
+ ksz_write8(dev, REG_SW_VLAN_CTRL, 0);
+
+exit:
+ mutex_unlock(&dev->vlan_mutex);
+
+ return ret;
+}
+
+static int set_vlan_table(struct dsa_switch *ds, u16 vid, u32 *vlan_table)
+{
+ struct ksz_device *dev = ds->priv;
+ int ret;
+
+ mutex_lock(&dev->vlan_mutex);
+
+ ksz_write32(dev, REG_SW_VLAN_ENTRY__4, vlan_table[0]);
+ ksz_write32(dev, REG_SW_VLAN_ENTRY_UNTAG__4, vlan_table[1]);
+ ksz_write32(dev, REG_SW_VLAN_ENTRY_PORTS__4, vlan_table[2]);
+
+ ksz_write16(dev, REG_SW_VLAN_ENTRY_INDEX__2, vid & VLAN_INDEX_M);
+ ksz_write8(dev, REG_SW_VLAN_CTRL, VLAN_START | VLAN_WRITE);
+
+ /* wait to be cleared */
+ ret = wait_vlan_ctrl_ready(dev, VLAN_START, 1000);
+ if (ret < 0) {
+ dev_dbg(dev->dev, "Failed to write vlan table\n");
+ goto exit;
+ }
+
+ ksz_write8(dev, REG_SW_VLAN_CTRL, 0);
+
+ /* update vlan cache table */
+ dev->vlan_cache[vid].table[0] = vlan_table[0];
+ dev->vlan_cache[vid].table[1] = vlan_table[1];
+ dev->vlan_cache[vid].table[2] = vlan_table[2];
+
+exit:
+ mutex_unlock(&dev->vlan_mutex);
+
+ return ret;
+}
+
+static void read_table(struct dsa_switch *ds, u32 *table)
+{
+ struct ksz_device *dev = ds->priv;
+
+ ksz_read32(dev, REG_SW_ALU_VAL_A, &table[0]);
+ ksz_read32(dev, REG_SW_ALU_VAL_B, &table[1]);
+ ksz_read32(dev, REG_SW_ALU_VAL_C, &table[2]);
+ ksz_read32(dev, REG_SW_ALU_VAL_D, &table[3]);
+}
+
+static void write_table(struct dsa_switch *ds, u32 *table)
+{
+ struct ksz_device *dev = ds->priv;
+
+ ksz_write32(dev, REG_SW_ALU_VAL_A, table[0]);
+ ksz_write32(dev, REG_SW_ALU_VAL_B, table[1]);
+ ksz_write32(dev, REG_SW_ALU_VAL_C, table[2]);
+ ksz_write32(dev, REG_SW_ALU_VAL_D, table[3]);
+}
+
+static int wait_alu_ready(struct ksz_device *dev, u32 waiton, int timeout)
+{
+ u32 data;
+
+ do {
+ ksz_read32(dev, REG_SW_ALU_CTRL__4, &data);
+ if (!(data & waiton))
+ break;
+ usleep_range(1, 10);
+ } while (timeout-- > 0);
+
+ if (timeout <= 0)
+ return -ETIMEDOUT;
+
+ return 0;
+}
+
+static int wait_alu_sta_ready(struct ksz_device *dev, u32 waiton, int timeout)
+{
+ u32 data;
+
+ do {
+ ksz_read32(dev, REG_SW_ALU_STAT_CTRL__4, &data);
+ if (!(data & waiton))
+ break;
+ usleep_range(1, 10);
+ } while (timeout-- > 0);
+
+ if (timeout <= 0)
+ return -ETIMEDOUT;
+
+ return 0;
+}
+
+static int ksz_reset_switch(struct dsa_switch *ds)
+{
+ struct ksz_device *dev = ds->priv;
+ u8 data8;
+ u16 data16;
+ u32 data32;
+
+ /* reset switch */
+ ksz_cfg(dev, REG_SW_OPERATION, SW_RESET, true);
+
+ /* turn off SPI DO Edge select */
+ ksz_read8(dev, REG_SW_GLOBAL_SERIAL_CTRL_0, &data8);
+ data8 &= ~SPI_AUTO_EDGE_DETECTION;
+ ksz_write8(dev, REG_SW_GLOBAL_SERIAL_CTRL_0, data8);
+
+ /* default configuration */
+ ksz_read8(dev, REG_SW_LUE_CTRL_1, &data8);
+ data8 = SW_AGING_ENABLE | SW_LINK_AUTO_AGING |
+ SW_SRC_ADDR_FILTER | SW_FLUSH_STP_TABLE | SW_FLUSH_MSTP_TABLE;
+ ksz_write8(dev, REG_SW_LUE_CTRL_1, data8);
+
+ /* disable interrupts */
+ ksz_write32(dev, REG_SW_INT_MASK__4, SWITCH_INT_MASK);
+ ksz_write32(dev, REG_SW_PORT_INT_MASK__4, 0x7F);
+ ksz_read32(dev, REG_SW_PORT_INT_STATUS__4, &data32);
+
+ /* set broadcast storm protection 10% rate */
+ ksz_read16(dev, REG_SW_MAC_CTRL_2, &data16);
+ data16 &= ~BROADCAST_STORM_RATE;
+ data16 |= (BROADCAST_STORM_VALUE * BROADCAST_STORM_PROT_RATE) / 100;
+ ksz_write16(dev, REG_SW_MAC_CTRL_2, data16);
+
+ return 0;
+}
+
+static void port_setup(struct ksz_device *dev, int port, bool cpu_port)
+{
+ u8 data8;
+ u16 data16;
+
+ /* enable tag tail for host port */
+ if (cpu_port)
+ ksz_port_cfg(dev, port, REG_PORT_CTRL_0, PORT_TAIL_TAG_ENABLE,
+ true);
+
+ ksz_port_cfg(dev, port, REG_PORT_CTRL_0, PORT_MAC_LOOPBACK, false);
+
+ /* set back pressure */
+ ksz_port_cfg(dev, port, REG_PORT_MAC_CTRL_1, PORT_BACK_PRESSURE, true);
+
+ /* set flow control */
+ ksz_port_cfg(dev, port, REG_PORT_CTRL_0,
+ PORT_FORCE_TX_FLOW_CTRL | PORT_FORCE_RX_FLOW_CTRL, true);
+
+ /* enable broadcast storm limit */
+ ksz_port_cfg(dev, port, P_BCAST_STORM_CTRL, PORT_BROADCAST_STORM, true);
+
+ /* disable DiffServ priority */
+ ksz_port_cfg(dev, port, P_PRIO_CTRL, PORT_DIFFSERV_PRIO_ENABLE, false);
+
+ /* replace priority */
+ ksz_port_cfg(dev, port, REG_PORT_MRI_MAC_CTRL, PORT_USER_PRIO_CEILING,
+ false);
+ ksz_port_cfg32(dev, port, REG_PORT_MTI_QUEUE_CTRL_0__4,
+ MTI_PVID_REPLACE, false);
+
+ /* enable 802.1p priority */
+ ksz_port_cfg(dev, port, P_PRIO_CTRL, PORT_802_1P_PRIO_ENABLE, true);
+
+ /* configure MAC to 1G & RGMII mode */
+ ksz_pread8(dev, port, REG_PORT_XMII_CTRL_1, &data8);
+ data8 |= PORT_RGMII_ID_EG_ENABLE;
+ data8 &= ~PORT_MII_NOT_1GBIT;
+ data8 &= ~PORT_MII_SEL_M;
+ data8 |= PORT_RGMII_SEL;
+ ksz_pwrite8(dev, port, REG_PORT_XMII_CTRL_1, data8);
+
+ /* clear pending interrupts */
+ ksz_pread16(dev, port, REG_PORT_PHY_INT_ENABLE, &data16);
+}
+
+static void ksz_config_cpu_port(struct dsa_switch *ds)
+{
+ struct ksz_device *dev = ds->priv;
+ int i;
+
+ ds->num_ports = dev->port_cnt;
+
+ for (i = 0; i < ds->num_ports; i++) {
+ if (dsa_is_cpu_port(ds, i) && (dev->cpu_ports & (1 << i))) {
+ dev->cpu_port = i;
+
+ /* enable cpu port */
+ port_setup(dev, i, true);
+ }
+ }
+}
+
+static int ksz_setup(struct dsa_switch *ds)
+{
+ struct ksz_device *dev = ds->priv;
+ int ret = 0;
+
+ dev->vlan_cache = devm_kcalloc(dev->dev, sizeof(struct vlan_table),
+ dev->num_vlans, GFP_KERNEL);
+ if (!dev->vlan_cache)
+ return -ENOMEM;
+
+ ret = ksz_reset_switch(ds);
+ if (ret) {
+ dev_err(ds->dev, "failed to reset switch\n");
+ return ret;
+ }
+
+ /* accept packet up to 2000bytes */
+ ksz_cfg(dev, REG_SW_MAC_CTRL_1, SW_LEGAL_PACKET_DISABLE, true);
+
+ ksz_config_cpu_port(ds);
+
+ ksz_cfg(dev, REG_SW_MAC_CTRL_1, MULTICAST_STORM_DISABLE, true);
+
+ /* queue based egress rate limit */
+ ksz_cfg(dev, REG_SW_MAC_CTRL_5, SW_OUT_RATE_LIMIT_QUEUE_BASED, true);
+
+ /* start switch */
+ ksz_cfg(dev, REG_SW_OPERATION, SW_START, true);
+
+ return 0;
+}
+
+static enum dsa_tag_protocol ksz_get_tag_protocol(struct dsa_switch *ds)
+{
+ return DSA_TAG_PROTO_KSZ;
+}
+
+static int ksz_phy_read16(struct dsa_switch *ds, int addr, int reg)
+{
+ struct ksz_device *dev = ds->priv;
+ u16 val = 0;
+
+ ksz_pread16(dev, addr, 0x100 + (reg << 1), &val);
+
+ return val;
+}
+
+static int ksz_phy_write16(struct dsa_switch *ds, int addr, int reg, u16 val)
+{
+ struct ksz_device *dev = ds->priv;
+
+ ksz_pwrite16(dev, addr, 0x100 + (reg << 1), val);
+
+ return 0;
+}
+
+static int ksz_enable_port(struct dsa_switch *ds, int port,
+ struct phy_device *phy)
+{
+ struct ksz_device *dev = ds->priv;
+
+ /* setup slave port */
+ port_setup(dev, port, false);
+
+ return 0;
+}
+
+static void ksz_disable_port(struct dsa_switch *ds, int port,
+ struct phy_device *phy)
+{
+ struct ksz_device *dev = ds->priv;
+
+ /* there is no port disable */
+ ksz_port_cfg(dev, port, REG_PORT_CTRL_0, PORT_MAC_LOOPBACK, true);
+}
+
+static int ksz_sset_count(struct dsa_switch *ds)
+{
+ return TOTAL_SWITCH_COUNTER_NUM;
+}
+
+static void ksz_get_strings(struct dsa_switch *ds, int port, uint8_t *buf)
+{
+ int i;
+
+ for (i = 0; i < TOTAL_SWITCH_COUNTER_NUM; i++) {
+ memcpy(buf + i * ETH_GSTRING_LEN, mib_names[i].string,
+ ETH_GSTRING_LEN);
+ }
+}
+
+static void ksz_get_ethtool_stats(struct dsa_switch *ds, int port,
+ uint64_t *buf)
+{
+ struct ksz_device *dev = ds->priv;
+ int i;
+ u32 data;
+ int timeout;
+
+ mutex_lock(&dev->stats_mutex);
+
+ for (i = 0; i < TOTAL_SWITCH_COUNTER_NUM; i++) {
+ data = MIB_COUNTER_READ;
+ data |= ((mib_names[i].index & 0xFF) << MIB_COUNTER_INDEX_S);
+ ksz_pwrite32(dev, port, REG_PORT_MIB_CTRL_STAT__4, data);
+
+ timeout = 1000;
+ do {
+ ksz_pread32(dev, port, REG_PORT_MIB_CTRL_STAT__4,
+ &data);
+ usleep_range(1, 10);
+ if (!(data & MIB_COUNTER_READ))
+ break;
+ } while (timeout-- > 0);
+
+ /* failed to read MIB. get out of loop */
+ if (!timeout) {
+ dev_dbg(dev->dev, "Failed to get MIB\n");
+ break;
+ }
+
+ /* count resets upon read */
+ ksz_pread32(dev, port, REG_PORT_MIB_DATA, &data);
+
+ dev->mib_value[i] += (uint64_t)data;
+ buf[i] = dev->mib_value[i];
+ }
+
+ mutex_unlock(&dev->stats_mutex);
+}
+
+static void ksz_port_stp_state_set(struct dsa_switch *ds, int port, u8 state)
+{
+ struct ksz_device *dev = ds->priv;
+ u8 data;
+
+ ksz_pread8(dev, port, P_STP_CTRL, &data);
+ data &= ~(PORT_TX_ENABLE | PORT_RX_ENABLE | PORT_LEARN_DISABLE);
+
+ switch (state) {
+ case BR_STATE_DISABLED:
+ data |= PORT_LEARN_DISABLE;
+ break;
+ case BR_STATE_LISTENING:
+ data |= (PORT_RX_ENABLE | PORT_LEARN_DISABLE);
+ break;
+ case BR_STATE_LEARNING:
+ data |= PORT_RX_ENABLE;
+ break;
+ case BR_STATE_FORWARDING:
+ data |= (PORT_TX_ENABLE | PORT_RX_ENABLE);
+ break;
+ case BR_STATE_BLOCKING:
+ data |= PORT_LEARN_DISABLE;
+ break;
+ default:
+ dev_err(ds->dev, "invalid STP state: %d\n", state);
+ return;
+ }
+
+ ksz_pwrite8(dev, port, P_STP_CTRL, data);
+}
+
+static void ksz_port_fast_age(struct dsa_switch *ds, int port)
+{
+ struct ksz_device *dev = ds->priv;
+ u8 data8;
+
+ ksz_read8(dev, REG_SW_LUE_CTRL_1, &data8);
+ data8 |= SW_FAST_AGING;
+ ksz_write8(dev, REG_SW_LUE_CTRL_1, data8);
+
+ data8 &= ~SW_FAST_AGING;
+ ksz_write8(dev, REG_SW_LUE_CTRL_1, data8);
+}
+
+static int ksz_port_vlan_filtering(struct dsa_switch *ds, int port, bool flag)
+{
+ struct ksz_device *dev = ds->priv;
+
+ if (flag) {
+ ksz_port_cfg(dev, port, REG_PORT_LUE_CTRL,
+ PORT_VLAN_LOOKUP_VID_0, true);
+ ksz_cfg32(dev, REG_SW_QM_CTRL__4, UNICAST_VLAN_BOUNDARY, true);
+ ksz_cfg(dev, REG_SW_LUE_CTRL_0, SW_VLAN_ENABLE, true);
+ } else {
+ ksz_cfg(dev, REG_SW_LUE_CTRL_0, SW_VLAN_ENABLE, false);
+ ksz_cfg32(dev, REG_SW_QM_CTRL__4, UNICAST_VLAN_BOUNDARY, false);
+ ksz_port_cfg(dev, port, REG_PORT_LUE_CTRL,
+ PORT_VLAN_LOOKUP_VID_0, false);
+ }
+
+ return 0;
+}
+
+static int ksz_port_vlan_prepare(struct dsa_switch *ds, int port,
+ const struct switchdev_obj_port_vlan *vlan,
+ struct switchdev_trans *trans)
+{
+ /* nothing needed */
+
+ return 0;
+}
+
+static void ksz_port_vlan_add(struct dsa_switch *ds, int port,
+ const struct switchdev_obj_port_vlan *vlan,
+ struct switchdev_trans *trans)
+{
+ struct ksz_device *dev = ds->priv;
+ u32 vlan_table[3];
+ u16 vid;
+ bool untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED;
+
+ for (vid = vlan->vid_begin; vid <= vlan->vid_end; vid++) {
+ if (get_vlan_table(ds, vid, vlan_table)) {
+ dev_dbg(dev->dev, "Failed to get vlan table\n");
+ return;
+ }
+
+ vlan_table[0] = VLAN_VALID | (vid & VLAN_FID_M);
+ if (untagged)
+ vlan_table[1] |= BIT(port);
+ else
+ vlan_table[1] &= ~BIT(port);
+ vlan_table[1] &= ~(BIT(dev->cpu_port));
+
+ vlan_table[2] |= BIT(port) | BIT(dev->cpu_port);
+
+ if (set_vlan_table(ds, vid, vlan_table)) {
+ dev_dbg(dev->dev, "Failed to set vlan table\n");
+ return;
+ }
+
+ /* change PVID */
+ if (vlan->flags & BRIDGE_VLAN_INFO_PVID)
+ ksz_pwrite16(dev, port, REG_PORT_DEFAULT_VID, vid);
+ }
+}
+
+static int ksz_port_vlan_del(struct dsa_switch *ds, int port,
+ const struct switchdev_obj_port_vlan *vlan)
+{
+ struct ksz_device *dev = ds->priv;
+ bool untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED;
+ u32 vlan_table[3];
+ u16 vid;
+ u16 pvid;
+
+ ksz_pread16(dev, port, REG_PORT_DEFAULT_VID, &pvid);
+ pvid = pvid & 0xFFF;
+
+ for (vid = vlan->vid_begin; vid <= vlan->vid_end; vid++) {
+ if (get_vlan_table(ds, vid, vlan_table)) {
+ dev_dbg(dev->dev, "Failed to get vlan table\n");
+ return -ETIMEDOUT;
+ }
+
+ vlan_table[2] &= ~BIT(port);
+
+ if (pvid == vid)
+ pvid = 1;
+
+ if (untagged)
+ vlan_table[1] &= ~BIT(port);
+
+ if (set_vlan_table(ds, vid, vlan_table)) {
+ dev_dbg(dev->dev, "Failed to set vlan table\n");
+ return -ETIMEDOUT;
+ }
+ }
+
+ ksz_pwrite16(dev, port, REG_PORT_DEFAULT_VID, pvid);
+
+ return 0;
+}
+
+static int ksz_port_vlan_dump(struct dsa_switch *ds, int port,
+ struct switchdev_obj_port_vlan *vlan,
+ switchdev_obj_dump_cb_t *cb)
+{
+ struct ksz_device *dev = ds->priv;
+ u16 vid;
+ u16 data;
+ struct vlan_table *vlan_cache;
+ int err = 0;
+
+ mutex_lock(&dev->vlan_mutex);
+
+ /* use dev->vlan_cache due to lack of searching valid vlan entry */
+ for (vid = vlan->vid_begin; vid < dev->num_vlans; vid++) {
+ vlan_cache = &dev->vlan_cache[vid];
+
+ if (!(vlan_cache->table[0] & VLAN_VALID))
+ continue;
+
+ vlan->vid_begin = vid;
+ vlan->vid_end = vid;
+ vlan->flags = 0;
+ if (vlan_cache->table[2] & BIT(port)) {
+ if (vlan_cache->table[1] & BIT(port))
+ vlan->flags |= BRIDGE_VLAN_INFO_UNTAGGED;
+ ksz_pread16(dev, port, REG_PORT_DEFAULT_VID, &data);
+ if (vid == (data & 0xFFFFF))
+ vlan->flags |= BRIDGE_VLAN_INFO_PVID;
+
+ err = cb(&vlan->obj);
+ if (err)
+ break;
+ }
+ }
+
+ mutex_unlock(&dev->vlan_mutex);
+
+ return err;
+}
+
+static int ksz_port_fdb_prepare(struct dsa_switch *ds, int port,
+ const struct switchdev_obj_port_fdb *fdb,
+ struct switchdev_trans *trans)
+{
+ /* nothing needed */
+
+ return 0;
+}
+
+struct alu_struct {
+ /* entry 1 */
+ u8 is_static:1;
+ u8 is_src_filter:1;
+ u8 is_dst_filter:1;
+ u8 prio_age:3;
+ u32 _reserv_0_1:23;
+ u8 mstp:3;
+ /* entry 2 */
+ u8 is_override:1;
+ u8 is_use_fid:1;
+ u32 _reserv_1_1:23;
+ u8 port_forward:7;
+ /* entry 3 & 4*/
+ u32 _reserv_2_1:9;
+ u8 fid:7;
+ u8 mac[ETH_ALEN];
+};
+
+static void ksz_port_fdb_add(struct dsa_switch *ds, int port,
+ const struct switchdev_obj_port_fdb *fdb,
+ struct switchdev_trans *trans)
+{
+ struct ksz_device *dev = ds->priv;
+ u32 alu_table[4];
+ u32 data;
+
+ mutex_lock(&dev->alu_mutex);
+
+ /* find any entry with mac & vid */
+ data = fdb->vid << ALU_FID_INDEX_S;
+ data |= ((fdb->addr[0] << 8) | fdb->addr[1]);
+ ksz_write32(dev, REG_SW_ALU_INDEX_0, data);
+
+ data = ((fdb->addr[2] << 24) | (fdb->addr[3] << 16));
+ data |= ((fdb->addr[4] << 8) | fdb->addr[5]);
+ ksz_write32(dev, REG_SW_ALU_INDEX_1, data);
+
+ /* start read operation */
+ ksz_write32(dev, REG_SW_ALU_CTRL__4, ALU_READ | ALU_START);
+
+ /* wait to be finished */
+ if (wait_alu_ready(dev, ALU_START, 1000) < 0) {
+ dev_dbg(dev->dev, "Failed to read ALU\n");
+ goto exit;
+ }
+
+ /* read ALU entry */
+ read_table(ds, alu_table);
+
+ /* update ALU entry */
+ alu_table[0] = ALU_V_STATIC_VALID;
+ alu_table[1] |= BIT(port);
+ if (fdb->vid)
+ alu_table[1] |= ALU_V_USE_FID;
+ alu_table[2] = (fdb->vid << ALU_V_FID_S);
+ alu_table[2] |= ((fdb->addr[0] << 8) | fdb->addr[1]);
+ alu_table[3] = ((fdb->addr[2] << 24) | (fdb->addr[3] << 16));
+ alu_table[3] |= ((fdb->addr[4] << 8) | fdb->addr[5]);
+
+ write_table(ds, alu_table);
+
+ ksz_write32(dev, REG_SW_ALU_CTRL__4, ALU_WRITE | ALU_START);
+
+ /* wait to be finished */
+ if (wait_alu_ready(dev, ALU_START, 1000) < 0)
+ dev_dbg(dev->dev, "Failed to read ALU\n");
+
+exit:
+ mutex_unlock(&dev->alu_mutex);
+}
+
+static int ksz_port_fdb_del(struct dsa_switch *ds, int port,
+ const struct switchdev_obj_port_fdb *fdb)
+{
+ struct ksz_device *dev = ds->priv;
+ u32 alu_table[4];
+ u32 data;
+ int ret = 0;
+
+ mutex_lock(&dev->alu_mutex);
+
+ /* read any entry with mac & vid */
+ data = fdb->vid << ALU_FID_INDEX_S;
+ data |= ((fdb->addr[0] << 8) | fdb->addr[1]);
+ ksz_write32(dev, REG_SW_ALU_INDEX_0, data);
+
+ data = ((fdb->addr[2] << 24) | (fdb->addr[3] << 16));
+ data |= ((fdb->addr[4] << 8) | fdb->addr[5]);
+ ksz_write32(dev, REG_SW_ALU_INDEX_1, data);
+
+ /* start read operation */
+ ksz_write32(dev, REG_SW_ALU_CTRL__4, ALU_READ | ALU_START);
+
+ /* wait to be finished */
+ ret = wait_alu_ready(dev, ALU_START, 1000);
+ if (ret < 0) {
+ dev_dbg(dev->dev, "Failed to read ALU\n");
+ goto exit;
+ }
+
+ ksz_read32(dev, REG_SW_ALU_VAL_A, &alu_table[0]);
+ if (alu_table[0] & ALU_V_STATIC_VALID) {
+ ksz_read32(dev, REG_SW_ALU_VAL_B, &alu_table[1]);
+ ksz_read32(dev, REG_SW_ALU_VAL_C, &alu_table[2]);
+ ksz_read32(dev, REG_SW_ALU_VAL_D, &alu_table[3]);
+
+ /* clear forwarding port */
+ alu_table[2] &= ~BIT(port);
+
+ /* if there is no port to forward, clear table */
+ if ((alu_table[2] & ALU_V_PORT_MAP) == 0) {
+ alu_table[0] = 0;
+ alu_table[1] = 0;
+ alu_table[2] = 0;
+ alu_table[3] = 0;
+ }
+ } else {
+ alu_table[0] = 0;
+ alu_table[1] = 0;
+ alu_table[2] = 0;
+ alu_table[3] = 0;
+ }
+
+ write_table(ds, alu_table);
+
+ ksz_write32(dev, REG_SW_ALU_CTRL__4, ALU_WRITE | ALU_START);
+
+ /* wait to be finished */
+ ret = wait_alu_ready(dev, ALU_START, 1000);
+ if (ret < 0)
+ dev_dbg(dev->dev, "Failed to write ALU\n");
+
+exit:
+ mutex_unlock(&dev->alu_mutex);
+
+ return ret;
+}
+
+static void convert_alu(struct alu_struct *alu, u32 *alu_table)
+{
+ alu->is_static = !!(alu_table[0] & ALU_V_STATIC_VALID);
+ alu->is_src_filter = !!(alu_table[0] & ALU_V_SRC_FILTER);
+ alu->is_dst_filter = !!(alu_table[0] & ALU_V_DST_FILTER);
+ alu->prio_age = (alu_table[0] >> ALU_V_PRIO_AGE_CNT_S) &
+ ALU_V_PRIO_AGE_CNT_M;
+ alu->mstp = alu_table[0] & ALU_V_MSTP_M;
+
+ alu->is_override = !!(alu_table[1] & ALU_V_OVERRIDE);
+ alu->is_use_fid = !!(alu_table[1] & ALU_V_USE_FID);
+ alu->port_forward = alu_table[1] & ALU_V_PORT_MAP;
+
+ alu->fid = (alu_table[2] >> ALU_V_FID_S) & ALU_V_FID_M;
+
+ alu->mac[0] = (alu_table[2] >> 8) & 0xFF;
+ alu->mac[1] = alu_table[2] & 0xFF;
+ alu->mac[2] = (alu_table[3] >> 24) & 0xFF;
+ alu->mac[3] = (alu_table[3] >> 16) & 0xFF;
+ alu->mac[4] = (alu_table[3] >> 8) & 0xFF;
+ alu->mac[5] = alu_table[3] & 0xFF;
+}
+
+static int ksz_port_fdb_dump(struct dsa_switch *ds, int port,
+ struct switchdev_obj_port_fdb *fdb,
+ switchdev_obj_dump_cb_t *cb)
+{
+ struct ksz_device *dev = ds->priv;
+ int ret = 0;
+ u32 data;
+ u32 alu_table[4];
+ struct alu_struct alu;
+ int timeout;
+
+ mutex_lock(&dev->alu_mutex);
+
+ /* start ALU search */
+ ksz_write32(dev, REG_SW_ALU_CTRL__4, ALU_START | ALU_SEARCH);
+
+ do {
+ timeout = 1000;
+ do {
+ ksz_read32(dev, REG_SW_ALU_CTRL__4, &data);
+ if ((data & ALU_VALID) || !(data & ALU_START))
+ break;
+ usleep_range(1, 10);
+ } while (timeout-- > 0);
+
+ if (!timeout) {
+ dev_dbg(dev->dev, "Failed to search ALU\n");
+ ret = -ETIMEDOUT;
+ goto exit;
+ }
+
+ /* read ALU table */
+ read_table(ds, alu_table);
+
+ convert_alu(&alu, alu_table);
+
+ if (alu.port_forward & BIT(port)) {
+ fdb->vid = alu.fid;
+ if (alu.is_static)
+ fdb->ndm_state = NUD_NOARP;
+ else
+ fdb->ndm_state = NUD_REACHABLE;
+ ether_addr_copy(fdb->addr, alu.mac);
+
+ ret = cb(&fdb->obj);
+ if (ret)
+ goto exit;
+ }
+ } while (data & ALU_START);
+
+exit:
+
+ /* stop ALU search */
+ ksz_write32(dev, REG_SW_ALU_CTRL__4, 0);
+
+ mutex_unlock(&dev->alu_mutex);
+
+ return ret;
+}
+
+static int ksz_port_mdb_prepare(struct dsa_switch *ds, int port,
+ const struct switchdev_obj_port_mdb *mdb,
+ struct switchdev_trans *trans)
+{
+ /* nothing to do */
+ return 0;
+}
+
+static void ksz_port_mdb_add(struct dsa_switch *ds, int port,
+ const struct switchdev_obj_port_mdb *mdb,
+ struct switchdev_trans *trans)
+{
+ struct ksz_device *dev = ds->priv;
+ u32 static_table[4];
+ u32 data;
+ int index;
+ u32 mac_hi, mac_lo;
+
+ mac_hi = ((mdb->addr[0] << 8) | mdb->addr[1]);
+ mac_lo = ((mdb->addr[2] << 24) | (mdb->addr[3] << 16));
+ mac_lo |= ((mdb->addr[4] << 8) | mdb->addr[5]);
+
+ mutex_lock(&dev->alu_mutex);
+
+ for (index = 0; index < dev->num_statics; index++) {
+ /* find empty slot first */
+ data = (index << ALU_STAT_INDEX_S) |
+ ALU_STAT_READ | ALU_STAT_START;
+ ksz_write32(dev, REG_SW_ALU_STAT_CTRL__4, data);
+
+ /* wait to be finished */
+ if (wait_alu_sta_ready(dev, ALU_STAT_START, 1000) < 0) {
+ dev_dbg(dev->dev, "Failed to read ALU STATIC\n");
+ goto exit;
+ }
+
+ /* read ALU static table */
+ read_table(ds, static_table);
+
+ if (static_table[0] & ALU_V_STATIC_VALID) {
+ /* check this has same vid & mac address */
+ if (((static_table[2] >> ALU_V_FID_S) == (mdb->vid)) &&
+ ((static_table[2] & ALU_V_MAC_ADDR_HI) == mac_hi) &&
+ (static_table[3] == mac_lo)) {
+ /* found matching one */
+ break;
+ }
+ } else {
+ /* found empty one */
+ break;
+ }
+ }
+
+ /* no available entry */
+ if (index == dev->num_statics)
+ goto exit;
+
+ /* add entry */
+ static_table[0] = ALU_V_STATIC_VALID;
+ static_table[1] |= BIT(port);
+ if (mdb->vid)
+ static_table[1] |= ALU_V_USE_FID;
+ static_table[2] = (mdb->vid << ALU_V_FID_S);
+ static_table[2] |= mac_hi;
+ static_table[3] = mac_lo;
+
+ write_table(ds, static_table);
+
+ data = (index << ALU_STAT_INDEX_S) | ALU_STAT_START;
+ ksz_write32(dev, REG_SW_ALU_STAT_CTRL__4, data);
+
+ /* wait to be finished */
+ if (wait_alu_sta_ready(dev, ALU_STAT_START, 1000) < 0)
+ dev_dbg(dev->dev, "Failed to read ALU STATIC\n");
+
+exit:
+ mutex_unlock(&dev->alu_mutex);
+}
+
+static int ksz_port_mdb_del(struct dsa_switch *ds, int port,
+ const struct switchdev_obj_port_mdb *mdb)
+{
+ struct ksz_device *dev = ds->priv;
+ u32 static_table[4];
+ u32 data;
+ int index;
+ int ret = 0;
+ u32 mac_hi, mac_lo;
+
+ mac_hi = ((mdb->addr[0] << 8) | mdb->addr[1]);
+ mac_lo = ((mdb->addr[2] << 24) | (mdb->addr[3] << 16));
+ mac_lo |= ((mdb->addr[4] << 8) | mdb->addr[5]);
+
+ mutex_lock(&dev->alu_mutex);
+
+ for (index = 0; index < dev->num_statics; index++) {
+ /* find empty slot first */
+ data = (index << ALU_STAT_INDEX_S) |
+ ALU_STAT_READ | ALU_STAT_START;
+ ksz_write32(dev, REG_SW_ALU_STAT_CTRL__4, data);
+
+ /* wait to be finished */
+ ret = wait_alu_sta_ready(dev, ALU_STAT_START, 1000);
+ if (ret < 0) {
+ dev_dbg(dev->dev, "Failed to read ALU STATIC\n");
+ goto exit;
+ }
+
+ /* read ALU static table */
+ read_table(ds, static_table);
+
+ if (static_table[0] & ALU_V_STATIC_VALID) {
+ /* check this has same vid & mac address */
+
+ if (((static_table[2] >> ALU_V_FID_S) == (mdb->vid)) &&
+ ((static_table[2] & ALU_V_MAC_ADDR_HI) == mac_hi) &&
+ (static_table[3] == mac_lo)) {
+ /* found matching one */
+ break;
+ }
+ }
+ }
+
+ /* no available entry */
+ if (index == dev->num_statics) {
+ ret = -EINVAL;
+ goto exit;
+ }
+
+ /* clear port */
+ static_table[1] &= ~BIT(port);
+
+ if ((static_table[1] & ALU_V_PORT_MAP) == 0) {
+ /* delete entry */
+ static_table[0] = 0;
+ static_table[1] = 0;
+ static_table[2] = 0;
+ static_table[3] = 0;
+ }
+
+ write_table(ds, static_table);
+
+ data = (index << ALU_STAT_INDEX_S) | ALU_STAT_START;
+ ksz_write32(dev, REG_SW_ALU_STAT_CTRL__4, data);
+
+ /* wait to be finished */
+ ret = wait_alu_sta_ready(dev, ALU_STAT_START, 1000);
+ if (ret < 0)
+ dev_dbg(dev->dev, "Failed to read ALU STATIC\n");
+
+exit:
+ mutex_unlock(&dev->alu_mutex);
+
+ return ret;
+}
+
+static int ksz_port_mdb_dump(struct dsa_switch *ds, int port,
+ struct switchdev_obj_port_mdb *mdb,
+ switchdev_obj_dump_cb_t *cb)
+{
+ /* this is not called by switch layer */
+ return 0;
+}
+
+static int ksz_port_mirror_add(struct dsa_switch *ds, int port,
+ struct dsa_mall_mirror_tc_entry *mirror,
+ bool ingress)
+{
+ struct ksz_device *dev = ds->priv;
+
+ if (ingress)
+ ksz_port_cfg(dev, port, P_MIRROR_CTRL, PORT_MIRROR_RX, true);
+ else
+ ksz_port_cfg(dev, port, P_MIRROR_CTRL, PORT_MIRROR_TX, true);
+
+ ksz_port_cfg(dev, port, P_MIRROR_CTRL, PORT_MIRROR_SNIFFER, false);
+
+ /* configure mirror port */
+ ksz_port_cfg(dev, mirror->to_local_port, P_MIRROR_CTRL,
+ PORT_MIRROR_SNIFFER, true);
+
+ ksz_cfg(dev, S_MIRROR_CTRL, SW_MIRROR_RX_TX, false);
+
+ return 0;
+}
+
+static void ksz_port_mirror_del(struct dsa_switch *ds, int port,
+ struct dsa_mall_mirror_tc_entry *mirror)
+{
+ struct ksz_device *dev = ds->priv;
+ u8 data;
+
+ if (mirror->ingress)
+ ksz_port_cfg(dev, port, P_MIRROR_CTRL, PORT_MIRROR_RX, false);
+ else
+ ksz_port_cfg(dev, port, P_MIRROR_CTRL, PORT_MIRROR_TX, false);
+
+ ksz_pread8(dev, port, P_MIRROR_CTRL, &data);
+
+ if (!(data & (PORT_MIRROR_RX | PORT_MIRROR_TX)))
+ ksz_port_cfg(dev, mirror->to_local_port, P_MIRROR_CTRL,
+ PORT_MIRROR_SNIFFER, false);
+}
+
+static const struct dsa_switch_ops ksz_switch_ops = {
+ .get_tag_protocol = ksz_get_tag_protocol,
+ .setup = ksz_setup,
+ .phy_read = ksz_phy_read16,
+ .phy_write = ksz_phy_write16,
+ .port_enable = ksz_enable_port,
+ .port_disable = ksz_disable_port,
+ .get_strings = ksz_get_strings,
+ .get_ethtool_stats = ksz_get_ethtool_stats,
+ .get_sset_count = ksz_sset_count,
+ .port_stp_state_set = ksz_port_stp_state_set,
+ .port_fast_age = ksz_port_fast_age,
+ .port_vlan_filtering = ksz_port_vlan_filtering,
+ .port_vlan_prepare = ksz_port_vlan_prepare,
+ .port_vlan_add = ksz_port_vlan_add,
+ .port_vlan_del = ksz_port_vlan_del,
+ .port_vlan_dump = ksz_port_vlan_dump,
+ .port_fdb_prepare = ksz_port_fdb_prepare,
+ .port_fdb_dump = ksz_port_fdb_dump,
+ .port_fdb_add = ksz_port_fdb_add,
+ .port_fdb_del = ksz_port_fdb_del,
+ .port_mdb_prepare = ksz_port_mdb_prepare,
+ .port_mdb_add = ksz_port_mdb_add,
+ .port_mdb_del = ksz_port_mdb_del,
+ .port_mdb_dump = ksz_port_mdb_dump,
+ .port_mirror_add = ksz_port_mirror_add,
+ .port_mirror_del = ksz_port_mirror_del,
+};
+
+struct ksz_chip_data {
+ u32 chip_id;
+ const char *dev_name;
+ int num_vlans;
+ int num_alus;
+ int num_statics;
+ int cpu_ports;
+ int port_cnt;
+};
+
+static const struct ksz_chip_data ksz_switch_chips[] = {
+ {
+ .chip_id = 0x00947700,
+ .dev_name = "KSZ9477",
+ .num_vlans = 4096,
+ .num_alus = 4096,
+ .num_statics = 16,
+ .cpu_ports = 0x7F, /* can be configured as cpu port */
+ .port_cnt = 7, /* total physical port count */
+ },
+};
+
+static int ksz_switch_init(struct ksz_device *dev)
+{
+ int i;
+
+ mutex_init(&dev->reg_mutex);
+ mutex_init(&dev->stats_mutex);
+ mutex_init(&dev->alu_mutex);
+ mutex_init(&dev->vlan_mutex);
+
+ dev->ds->ops = &ksz_switch_ops;
+
+ for (i = 0; i < ARRAY_SIZE(ksz_switch_chips); i++) {
+ const struct ksz_chip_data *chip = &ksz_switch_chips[i];
+
+ if (dev->chip_id == chip->chip_id) {
+ dev->name = chip->dev_name;
+ dev->num_vlans = chip->num_vlans;
+ dev->num_alus = chip->num_alus;
+ dev->num_statics = chip->num_statics;
+ dev->port_cnt = chip->port_cnt;
+ dev->cpu_ports = chip->cpu_ports;
+
+ break;
+ }
+ }
+
+ /* no switch found */
+ if (!dev->port_cnt)
+ return -ENODEV;
+
+ return 0;
+}
+
+struct ksz_device *ksz_switch_alloc(struct device *base,
+ const struct ksz_io_ops *ops,
+ void *priv)
+{
+ struct dsa_switch *ds;
+ struct ksz_device *swdev;
+
+ ds = dsa_switch_alloc(base, DSA_MAX_PORTS);
+ if (!ds)
+ return NULL;
+
+ swdev = devm_kzalloc(base, sizeof(*swdev), GFP_KERNEL);
+ if (!swdev)
+ return NULL;
+
+ ds->priv = swdev;
+ swdev->dev = base;
+
+ swdev->ds = ds;
+ swdev->priv = priv;
+ swdev->ops = ops;
+
+ return swdev;
+}
+EXPORT_SYMBOL(ksz_switch_alloc);
+
+int ksz_switch_detect(struct ksz_device *dev)
+{
+ u8 data8;
+ u32 id32;
+ int ret;
+
+ /* turn off SPI DO Edge select */
+ ret = ksz_read8(dev, REG_SW_GLOBAL_SERIAL_CTRL_0, &data8);
+ if (ret)
+ return ret;
+
+ data8 &= ~SPI_AUTO_EDGE_DETECTION;
+ ret = ksz_write8(dev, REG_SW_GLOBAL_SERIAL_CTRL_0, data8);
+ if (ret)
+ return ret;
+
+ /* read chip id */
+ ret = ksz_read32(dev, REG_CHIP_ID0__1, &id32);
+ if (ret)
+ return ret;
+
+ dev->chip_id = id32;
+
+ return 0;
+}
+EXPORT_SYMBOL(ksz_switch_detect);
+
+int ksz_switch_register(struct ksz_device *dev)
+{
+ int ret;
+
+ if (dev->pdata)
+ dev->chip_id = dev->pdata->chip_id;
+
+ if (ksz_switch_detect(dev))
+ return -EINVAL;
+
+ ret = ksz_switch_init(dev);
+ if (ret)
+ return ret;
+
+ return dsa_register_switch(dev->ds);
+}
+EXPORT_SYMBOL(ksz_switch_register);
+
+void ksz_switch_remove(struct ksz_device *dev)
+{
+ dsa_unregister_switch(dev->ds);
+}
+EXPORT_SYMBOL(ksz_switch_remove);
+
+MODULE_AUTHOR("Woojung Huh <Woojung.Huh@microchip.com>");
+MODULE_DESCRIPTION("Microchip KSZ Series Switch DSA Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/dsa/microchip/ksz_priv.h b/drivers/net/dsa/microchip/ksz_priv.h
new file mode 100644
index 0000000..2a98dbd
--- /dev/null
+++ b/drivers/net/dsa/microchip/ksz_priv.h
@@ -0,0 +1,210 @@
+/*
+ * Microchip KSZ series switch common definitions
+ *
+ * Copyright (C) 2017
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef __KSZ_PRIV_H
+#define __KSZ_PRIV_H
+
+#include <linux/kernel.h>
+#include <linux/mutex.h>
+#include <linux/phy.h>
+#include <linux/etherdevice.h>
+#include <net/dsa.h>
+
+#include "ksz_9477_reg.h"
+
+struct ksz_io_ops;
+
+struct vlan_table {
+ u32 table[3];
+};
+
+struct ksz_device {
+ struct dsa_switch *ds;
+ struct ksz_platform_data *pdata;
+ const char *name;
+
+ struct mutex reg_mutex; /* register access */
+ struct mutex stats_mutex; /* status access */
+ struct mutex alu_mutex; /* ALU access */
+ struct mutex vlan_mutex; /* vlan access */
+ const struct ksz_io_ops *ops;
+
+ struct device *dev;
+
+ void *priv;
+
+ /* chip specific data */
+ u32 chip_id;
+ int num_vlans;
+ int num_alus;
+ int num_statics;
+ int cpu_port; /* port connected to CPU */
+ int cpu_ports; /* port bitmap can be cpu port */
+ int port_cnt;
+
+ struct vlan_table *vlan_cache;
+
+ u64 mib_value[TOTAL_SWITCH_COUNTER_NUM];
+};
+
+struct ksz_io_ops {
+ int (*read8)(struct ksz_device *dev, u32 reg, u8 *value);
+ int (*read16)(struct ksz_device *dev, u32 reg, u16 *value);
+ int (*read24)(struct ksz_device *dev, u32 reg, u32 *value);
+ int (*read32)(struct ksz_device *dev, u32 reg, u32 *value);
+ int (*write8)(struct ksz_device *dev, u32 reg, u8 value);
+ int (*write16)(struct ksz_device *dev, u32 reg, u16 value);
+ int (*write24)(struct ksz_device *dev, u32 reg, u32 value);
+ int (*write32)(struct ksz_device *dev, u32 reg, u32 value);
+ int (*phy_read16)(struct ksz_device *dev, int addr, int reg,
+ u16 *value);
+ int (*phy_write16)(struct ksz_device *dev, int addr, int reg,
+ u16 value);
+};
+
+struct ksz_device *ksz_switch_alloc(struct device *base,
+ const struct ksz_io_ops *ops, void *priv);
+int ksz_switch_detect(struct ksz_device *dev);
+int ksz_switch_register(struct ksz_device *dev);
+void ksz_switch_remove(struct ksz_device *dev);
+
+static inline int ksz_read8(struct ksz_device *dev, u32 reg, u8 *val)
+{
+ int ret;
+
+ mutex_lock(&dev->reg_mutex);
+ ret = dev->ops->read8(dev, reg, val);
+ mutex_unlock(&dev->reg_mutex);
+
+ return ret;
+}
+
+static inline int ksz_read16(struct ksz_device *dev, u32 reg, u16 *val)
+{
+ int ret;
+
+ mutex_lock(&dev->reg_mutex);
+ ret = dev->ops->read16(dev, reg, val);
+ mutex_unlock(&dev->reg_mutex);
+
+ return ret;
+}
+
+static inline int ksz_read24(struct ksz_device *dev, u32 reg, u32 *val)
+{
+ int ret;
+
+ mutex_lock(&dev->reg_mutex);
+ ret = dev->ops->read24(dev, reg, val);
+ mutex_unlock(&dev->reg_mutex);
+
+ return ret;
+}
+
+static inline int ksz_read32(struct ksz_device *dev, u32 reg, u32 *val)
+{
+ int ret;
+
+ mutex_lock(&dev->reg_mutex);
+ ret = dev->ops->read32(dev, reg, val);
+ mutex_unlock(&dev->reg_mutex);
+
+ return ret;
+}
+
+static inline int ksz_write8(struct ksz_device *dev, u32 reg, u8 value)
+{
+ int ret;
+
+ mutex_lock(&dev->reg_mutex);
+ ret = dev->ops->write8(dev, reg, value);
+ mutex_unlock(&dev->reg_mutex);
+
+ return ret;
+}
+
+static inline int ksz_write16(struct ksz_device *dev, u32 reg, u16 value)
+{
+ int ret;
+
+ mutex_lock(&dev->reg_mutex);
+ ret = dev->ops->write16(dev, reg, value);
+ mutex_unlock(&dev->reg_mutex);
+
+ return ret;
+}
+
+static inline int ksz_write24(struct ksz_device *dev, u32 reg, u32 value)
+{
+ int ret;
+
+ mutex_lock(&dev->reg_mutex);
+ ret = dev->ops->write24(dev, reg, value);
+ mutex_unlock(&dev->reg_mutex);
+
+ return ret;
+}
+
+static inline int ksz_write32(struct ksz_device *dev, u32 reg, u32 value)
+{
+ int ret;
+
+ mutex_lock(&dev->reg_mutex);
+ ret = dev->ops->write32(dev, reg, value);
+ mutex_unlock(&dev->reg_mutex);
+
+ return ret;
+}
+
+static inline void ksz_pread8(struct ksz_device *dev, int port, int offset,
+ u8 *data)
+{
+ ksz_read8(dev, PORT_CTRL_ADDR(port, offset), data);
+}
+
+static inline void ksz_pread16(struct ksz_device *dev, int port, int offset,
+ u16 *data)
+{
+ ksz_read16(dev, PORT_CTRL_ADDR(port, offset), data);
+}
+
+static inline void ksz_pread32(struct ksz_device *dev, int port, int offset,
+ u32 *data)
+{
+ ksz_read32(dev, PORT_CTRL_ADDR(port, offset), data);
+}
+
+static inline void ksz_pwrite8(struct ksz_device *dev, int port, int offset,
+ u8 data)
+{
+ ksz_write8(dev, PORT_CTRL_ADDR(port, offset), data);
+}
+
+static inline void ksz_pwrite16(struct ksz_device *dev, int port, int offset,
+ u16 data)
+{
+ ksz_write16(dev, PORT_CTRL_ADDR(port, offset), data);
+}
+
+static inline void ksz_pwrite32(struct ksz_device *dev, int port, int offset,
+ u32 data)
+{
+ ksz_write32(dev, PORT_CTRL_ADDR(port, offset), data);
+}
+
+#endif
diff --git a/drivers/net/dsa/microchip/ksz_spi.c b/drivers/net/dsa/microchip/ksz_spi.c
new file mode 100644
index 0000000..c519469
--- /dev/null
+++ b/drivers/net/dsa/microchip/ksz_spi.c
@@ -0,0 +1,216 @@
+/*
+ * Microchip KSZ series register access through SPI
+ *
+ * Copyright (C) 2017
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include <asm/unaligned.h>
+
+#include <linux/delay.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/spi/spi.h>
+
+#include "ksz_priv.h"
+
+/* SPI frame opcodes */
+#define KS_SPIOP_RD 3
+#define KS_SPIOP_WR 2
+
+#define SPI_ADDR_SHIFT 24
+#define SPI_ADDR_MASK (BIT(SPI_ADDR_SHIFT) - 1)
+#define SPI_TURNAROUND_SHIFT 5
+
+static int ksz_spi_read_reg(struct spi_device *spi, u32 reg, u8 *val,
+ unsigned int len)
+{
+ u32 txbuf;
+ int ret;
+
+ txbuf = reg & SPI_ADDR_MASK;
+ txbuf |= KS_SPIOP_RD << SPI_ADDR_SHIFT;
+ txbuf <<= SPI_TURNAROUND_SHIFT;
+ txbuf = cpu_to_be32(txbuf);
+
+ ret = spi_write_then_read(spi, &txbuf, 4, val, len);
+ return ret;
+}
+
+static int ksz_spi_read(struct ksz_device *dev, u32 reg, u8 *data,
+ unsigned int len)
+{
+ struct spi_device *spi = dev->priv;
+
+ return ksz_spi_read_reg(spi, reg, data, len);
+}
+
+static int ksz_spi_read8(struct ksz_device *dev, u32 reg, u8 *val)
+{
+ return ksz_spi_read(dev, reg, val, 1);
+}
+
+static int ksz_spi_read16(struct ksz_device *dev, u32 reg, u16 *val)
+{
+ int ret = ksz_spi_read(dev, reg, (u8 *)val, 2);
+
+ if (!ret)
+ *val = be16_to_cpu(*val);
+
+ return ret;
+}
+
+static int ksz_spi_read24(struct ksz_device *dev, u32 reg, u32 *val)
+{
+ int ret;
+
+ *val = 0;
+ ret = ksz_spi_read(dev, reg, (u8 *)val, 3);
+ if (!ret) {
+ *val = be32_to_cpu(*val);
+ /* convert to 24bit */
+ *val >>= 8;
+ }
+
+ return ret;
+}
+
+static int ksz_spi_read32(struct ksz_device *dev, u32 reg, u32 *val)
+{
+ int ret = ksz_spi_read(dev, reg, (u8 *)val, 4);
+
+ if (!ret)
+ *val = be32_to_cpu(*val);
+
+ return ret;
+}
+
+static int ksz_spi_write_reg(struct spi_device *spi, u32 reg, u8 *val,
+ unsigned int len)
+{
+ u32 txbuf;
+ u8 data[12];
+ int i;
+
+ txbuf = reg & SPI_ADDR_MASK;
+ txbuf |= (KS_SPIOP_WR << SPI_ADDR_SHIFT);
+ txbuf <<= SPI_TURNAROUND_SHIFT;
+ txbuf = cpu_to_be32(txbuf);
+
+ data[0] = txbuf & 0xFF;
+ data[1] = (txbuf & 0xFF00) >> 8;
+ data[2] = (txbuf & 0xFF0000) >> 16;
+ data[3] = (txbuf & 0xFF000000) >> 24;
+ for (i = 0; i < len; i++)
+ data[i + 4] = val[i];
+
+ return spi_write(spi, &data, 4 + len);
+}
+
+static int ksz_spi_write8(struct ksz_device *dev, u32 reg, u8 value)
+{
+ struct spi_device *spi = dev->priv;
+
+ return ksz_spi_write_reg(spi, reg, &value, 1);
+}
+
+static int ksz_spi_write16(struct ksz_device *dev, u32 reg, u16 value)
+{
+ struct spi_device *spi = dev->priv;
+
+ value = cpu_to_be16(value);
+ return ksz_spi_write_reg(spi, reg, (u8 *)&value, 2);
+}
+
+static int ksz_spi_write24(struct ksz_device *dev, u32 reg, u32 value)
+{
+ struct spi_device *spi = dev->priv;
+
+ /* make it to big endian 24bit from MSB */
+ value <<= 8;
+ value = cpu_to_be32(value);
+ return ksz_spi_write_reg(spi, reg, (u8 *)&value, 3);
+}
+
+static int ksz_spi_write32(struct ksz_device *dev, u32 reg, u32 value)
+{
+ struct spi_device *spi = dev->priv;
+
+ value = cpu_to_be32(value);
+ return ksz_spi_write_reg(spi, reg, (u8 *)&value, 4);
+}
+
+static const struct ksz_io_ops ksz_spi_ops = {
+ .read8 = ksz_spi_read8,
+ .read16 = ksz_spi_read16,
+ .read24 = ksz_spi_read24,
+ .read32 = ksz_spi_read32,
+ .write8 = ksz_spi_write8,
+ .write16 = ksz_spi_write16,
+ .write24 = ksz_spi_write24,
+ .write32 = ksz_spi_write32,
+};
+
+static int ksz_spi_probe(struct spi_device *spi)
+{
+ struct ksz_device *dev;
+ int ret;
+
+ dev = ksz_switch_alloc(&spi->dev, &ksz_spi_ops, spi);
+ if (!dev)
+ return -ENOMEM;
+
+ if (spi->dev.platform_data)
+ dev->pdata = spi->dev.platform_data;
+
+ ret = ksz_switch_register(dev);
+ if (ret)
+ return ret;
+
+ spi_set_drvdata(spi, dev);
+
+ return 0;
+}
+
+static int ksz_spi_remove(struct spi_device *spi)
+{
+ struct ksz_device *dev = spi_get_drvdata(spi);
+
+ if (dev)
+ ksz_switch_remove(dev);
+
+ return 0;
+}
+
+static const struct of_device_id ksz_dt_ids[] = {
+ { .compatible = "microchip,ksz9477" },
+ {},
+};
+MODULE_DEVICE_TABLE(of, ksz_dt_ids);
+
+static struct spi_driver ksz_spi_driver = {
+ .driver = {
+ .name = "ksz9477-switch",
+ .owner = THIS_MODULE,
+ .of_match_table = of_match_ptr(ksz_dt_ids),
+ },
+ .probe = ksz_spi_probe,
+ .remove = ksz_spi_remove,
+};
+
+module_spi_driver(ksz_spi_driver);
+
+MODULE_AUTHOR("Woojung Huh <Woojung.Huh@microchip.com>");
+MODULE_DESCRIPTION("Microchip KSZ Series Switch SPI access Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/dsa/mt7530.c b/drivers/net/dsa/mt7530.c
index b070c16..1e46418 100644
--- a/drivers/net/dsa/mt7530.c
+++ b/drivers/net/dsa/mt7530.c
@@ -28,7 +28,6 @@
#include <linux/reset.h>
#include <linux/gpio/consumer.h>
#include <net/dsa.h>
-#include <net/switchdev.h>
#include "mt7530.h"
@@ -854,7 +853,7 @@
static int
mt7530_port_fdb_dump(struct dsa_switch *ds, int port,
struct switchdev_obj_port_fdb *fdb,
- int (*cb)(struct switchdev_obj *obj))
+ switchdev_obj_dump_cb_t *cb)
{
struct mt7530_priv *priv = ds->priv;
struct mt7530_fdb _fdb = { 0 };
@@ -913,11 +912,11 @@
struct device_node *dn;
struct mt7530_dummy_poll p;
- /* The parent node of master_netdev which holds the common system
+ /* The parent node of cpu_dp->netdev which holds the common system
* controller also is the container for two GMACs nodes representing
* as two netdev instances.
*/
- dn = ds->master_netdev->dev.of_node->parent;
+ dn = ds->dst->cpu_dp->netdev->dev.of_node->parent;
priv->ethernet = syscon_node_to_regmap(dn);
if (IS_ERR(priv->ethernet))
return PTR_ERR(priv->ethernet);
@@ -1081,7 +1080,7 @@
mutex_init(&priv->reg_mutex);
dev_set_drvdata(&mdiodev->dev, priv);
- return dsa_register_switch(priv->ds, &mdiodev->dev);
+ return dsa_register_switch(priv->ds);
}
static void
diff --git a/drivers/net/dsa/mv88e6060.c b/drivers/net/dsa/mv88e6060.c
index 5934b7a..dce7fa5 100644
--- a/drivers/net/dsa/mv88e6060.c
+++ b/drivers/net/dsa/mv88e6060.c
@@ -176,7 +176,7 @@
((p & 0xf) << PORT_VLAN_MAP_DBNUM_SHIFT) |
(dsa_is_cpu_port(ds, p) ?
ds->enabled_port_mask :
- BIT(ds->dst->cpu_port)));
+ BIT(ds->dst->cpu_dp->index)));
/* Port Association Vector: when learning source addresses
* of packets, add the address to the address database using
diff --git a/drivers/net/dsa/mv88e6xxx/Makefile b/drivers/net/dsa/mv88e6xxx/Makefile
index 6edd869..5cd5551 100644
--- a/drivers/net/dsa/mv88e6xxx/Makefile
+++ b/drivers/net/dsa/mv88e6xxx/Makefile
@@ -4,4 +4,6 @@
mv88e6xxx-objs += global1_atu.o
mv88e6xxx-objs += global1_vtu.o
mv88e6xxx-$(CONFIG_NET_DSA_MV88E6XXX_GLOBAL2) += global2.o
+mv88e6xxx-objs += phy.o
mv88e6xxx-objs += port.o
+mv88e6xxx-objs += serdes.o
diff --git a/drivers/net/dsa/mv88e6xxx/chip.c b/drivers/net/dsa/mv88e6xxx/chip.c
index d034d8c..53b0881 100644
--- a/drivers/net/dsa/mv88e6xxx/chip.c
+++ b/drivers/net/dsa/mv88e6xxx/chip.c
@@ -32,12 +32,13 @@
#include <linux/gpio/consumer.h>
#include <linux/phy.h>
#include <net/dsa.h>
-#include <net/switchdev.h>
-#include "mv88e6xxx.h"
+#include "chip.h"
#include "global1.h"
#include "global2.h"
+#include "phy.h"
#include "port.h"
+#include "serdes.h"
static void assert_reg_lock(struct mv88e6xxx_chip *chip)
{
@@ -222,21 +223,7 @@
return 0;
}
-static int mv88e6165_phy_read(struct mv88e6xxx_chip *chip,
- struct mii_bus *bus,
- int addr, int reg, u16 *val)
-{
- return mv88e6xxx_read(chip, addr, reg, val);
-}
-
-static int mv88e6165_phy_write(struct mv88e6xxx_chip *chip,
- struct mii_bus *bus,
- int addr, int reg, u16 val)
-{
- return mv88e6xxx_write(chip, addr, reg, val);
-}
-
-static struct mii_bus *mv88e6xxx_default_mdio_bus(struct mv88e6xxx_chip *chip)
+struct mii_bus *mv88e6xxx_default_mdio_bus(struct mv88e6xxx_chip *chip)
{
struct mv88e6xxx_mdio_bus *mdio_bus;
@@ -248,106 +235,6 @@
return mdio_bus->bus;
}
-static int mv88e6xxx_phy_read(struct mv88e6xxx_chip *chip, int phy,
- int reg, u16 *val)
-{
- int addr = phy; /* PHY devices addresses start at 0x0 */
- struct mii_bus *bus;
-
- bus = mv88e6xxx_default_mdio_bus(chip);
- if (!bus)
- return -EOPNOTSUPP;
-
- if (!chip->info->ops->phy_read)
- return -EOPNOTSUPP;
-
- return chip->info->ops->phy_read(chip, bus, addr, reg, val);
-}
-
-static int mv88e6xxx_phy_write(struct mv88e6xxx_chip *chip, int phy,
- int reg, u16 val)
-{
- int addr = phy; /* PHY devices addresses start at 0x0 */
- struct mii_bus *bus;
-
- bus = mv88e6xxx_default_mdio_bus(chip);
- if (!bus)
- return -EOPNOTSUPP;
-
- if (!chip->info->ops->phy_write)
- return -EOPNOTSUPP;
-
- return chip->info->ops->phy_write(chip, bus, addr, reg, val);
-}
-
-static int mv88e6xxx_phy_page_get(struct mv88e6xxx_chip *chip, int phy, u8 page)
-{
- if (!mv88e6xxx_has(chip, MV88E6XXX_FLAG_PHY_PAGE))
- return -EOPNOTSUPP;
-
- return mv88e6xxx_phy_write(chip, phy, PHY_PAGE, page);
-}
-
-static void mv88e6xxx_phy_page_put(struct mv88e6xxx_chip *chip, int phy)
-{
- int err;
-
- /* Restore PHY page Copper 0x0 for access via the registered MDIO bus */
- err = mv88e6xxx_phy_write(chip, phy, PHY_PAGE, PHY_PAGE_COPPER);
- if (unlikely(err)) {
- dev_err(chip->dev, "failed to restore PHY %d page Copper (%d)\n",
- phy, err);
- }
-}
-
-static int mv88e6xxx_phy_page_read(struct mv88e6xxx_chip *chip, int phy,
- u8 page, int reg, u16 *val)
-{
- int err;
-
- /* There is no paging for registers 22 */
- if (reg == PHY_PAGE)
- return -EINVAL;
-
- err = mv88e6xxx_phy_page_get(chip, phy, page);
- if (!err) {
- err = mv88e6xxx_phy_read(chip, phy, reg, val);
- mv88e6xxx_phy_page_put(chip, phy);
- }
-
- return err;
-}
-
-static int mv88e6xxx_phy_page_write(struct mv88e6xxx_chip *chip, int phy,
- u8 page, int reg, u16 val)
-{
- int err;
-
- /* There is no paging for registers 22 */
- if (reg == PHY_PAGE)
- return -EINVAL;
-
- err = mv88e6xxx_phy_page_get(chip, phy, page);
- if (!err) {
- err = mv88e6xxx_phy_write(chip, phy, PHY_PAGE, page);
- mv88e6xxx_phy_page_put(chip, phy);
- }
-
- return err;
-}
-
-static int mv88e6xxx_serdes_read(struct mv88e6xxx_chip *chip, int reg, u16 *val)
-{
- return mv88e6xxx_phy_page_read(chip, ADDR_SERDES, SERDES_PAGE_FIBER,
- reg, val);
-}
-
-static int mv88e6xxx_serdes_write(struct mv88e6xxx_chip *chip, int reg, u16 val)
-{
- return mv88e6xxx_phy_page_write(chip, ADDR_SERDES, SERDES_PAGE_FIBER,
- reg, val);
-}
-
static void mv88e6xxx_g1_irq_mask(struct irq_data *d)
{
struct mv88e6xxx_chip *chip = irq_data_get_irq_chip_data(d);
@@ -374,7 +261,7 @@
int err;
mutex_lock(&chip->reg_lock);
- err = mv88e6xxx_g1_read(chip, GLOBAL_STATUS, ®);
+ err = mv88e6xxx_g1_read(chip, MV88E6XXX_G1_STS, ®);
mutex_unlock(&chip->reg_lock);
if (err)
@@ -405,14 +292,14 @@
u16 reg;
int err;
- err = mv88e6xxx_g1_read(chip, GLOBAL_CONTROL, ®);
+ err = mv88e6xxx_g1_read(chip, MV88E6XXX_G1_CTL1, ®);
if (err)
goto out;
reg &= ~mask;
reg |= (~chip->g1_irq.masked & mask);
- err = mv88e6xxx_g1_write(chip, GLOBAL_CONTROL, reg);
+ err = mv88e6xxx_g1_write(chip, MV88E6XXX_G1_CTL1, reg);
if (err)
goto out;
@@ -451,9 +338,9 @@
int irq, virq;
u16 mask;
- mv88e6xxx_g1_read(chip, GLOBAL_CONTROL, &mask);
+ mv88e6xxx_g1_read(chip, MV88E6XXX_G1_CTL1, &mask);
mask |= GENMASK(chip->g1_irq.nirqs, 0);
- mv88e6xxx_g1_write(chip, GLOBAL_CONTROL, mask);
+ mv88e6xxx_g1_write(chip, MV88E6XXX_G1_CTL1, mask);
free_irq(chip->irq, chip);
@@ -483,18 +370,18 @@
chip->g1_irq.chip = mv88e6xxx_g1_irq_chip;
chip->g1_irq.masked = ~0;
- err = mv88e6xxx_g1_read(chip, GLOBAL_CONTROL, &mask);
+ err = mv88e6xxx_g1_read(chip, MV88E6XXX_G1_CTL1, &mask);
if (err)
goto out_mapping;
mask &= ~GENMASK(chip->g1_irq.nirqs, 0);
- err = mv88e6xxx_g1_write(chip, GLOBAL_CONTROL, mask);
+ err = mv88e6xxx_g1_write(chip, MV88E6XXX_G1_CTL1, mask);
if (err)
goto out_disable;
/* Reading the interrupt status clears (most of) them */
- err = mv88e6xxx_g1_read(chip, GLOBAL_STATUS, ®);
+ err = mv88e6xxx_g1_read(chip, MV88E6XXX_G1_STS, ®);
if (err)
goto out_disable;
@@ -509,7 +396,7 @@
out_disable:
mask |= GENMASK(chip->g1_irq.nirqs, 0);
- mv88e6xxx_g1_write(chip, GLOBAL_CONTROL, mask);
+ mv88e6xxx_g1_write(chip, MV88E6XXX_G1_CTL1, mask);
out_mapping:
for (irq = 0; irq < 16; irq++) {
@@ -561,122 +448,6 @@
return mv88e6xxx_write(chip, addr, reg, val);
}
-static int mv88e6xxx_ppu_disable(struct mv88e6xxx_chip *chip)
-{
- if (!chip->info->ops->ppu_disable)
- return 0;
-
- return chip->info->ops->ppu_disable(chip);
-}
-
-static int mv88e6xxx_ppu_enable(struct mv88e6xxx_chip *chip)
-{
- if (!chip->info->ops->ppu_enable)
- return 0;
-
- return chip->info->ops->ppu_enable(chip);
-}
-
-static void mv88e6xxx_ppu_reenable_work(struct work_struct *ugly)
-{
- struct mv88e6xxx_chip *chip;
-
- chip = container_of(ugly, struct mv88e6xxx_chip, ppu_work);
-
- mutex_lock(&chip->reg_lock);
-
- if (mutex_trylock(&chip->ppu_mutex)) {
- if (mv88e6xxx_ppu_enable(chip) == 0)
- chip->ppu_disabled = 0;
- mutex_unlock(&chip->ppu_mutex);
- }
-
- mutex_unlock(&chip->reg_lock);
-}
-
-static void mv88e6xxx_ppu_reenable_timer(unsigned long _ps)
-{
- struct mv88e6xxx_chip *chip = (void *)_ps;
-
- schedule_work(&chip->ppu_work);
-}
-
-static int mv88e6xxx_ppu_access_get(struct mv88e6xxx_chip *chip)
-{
- int ret;
-
- mutex_lock(&chip->ppu_mutex);
-
- /* If the PHY polling unit is enabled, disable it so that
- * we can access the PHY registers. If it was already
- * disabled, cancel the timer that is going to re-enable
- * it.
- */
- if (!chip->ppu_disabled) {
- ret = mv88e6xxx_ppu_disable(chip);
- if (ret < 0) {
- mutex_unlock(&chip->ppu_mutex);
- return ret;
- }
- chip->ppu_disabled = 1;
- } else {
- del_timer(&chip->ppu_timer);
- ret = 0;
- }
-
- return ret;
-}
-
-static void mv88e6xxx_ppu_access_put(struct mv88e6xxx_chip *chip)
-{
- /* Schedule a timer to re-enable the PHY polling unit. */
- mod_timer(&chip->ppu_timer, jiffies + msecs_to_jiffies(10));
- mutex_unlock(&chip->ppu_mutex);
-}
-
-static void mv88e6xxx_ppu_state_init(struct mv88e6xxx_chip *chip)
-{
- mutex_init(&chip->ppu_mutex);
- INIT_WORK(&chip->ppu_work, mv88e6xxx_ppu_reenable_work);
- setup_timer(&chip->ppu_timer, mv88e6xxx_ppu_reenable_timer,
- (unsigned long)chip);
-}
-
-static void mv88e6xxx_ppu_state_destroy(struct mv88e6xxx_chip *chip)
-{
- del_timer_sync(&chip->ppu_timer);
-}
-
-static int mv88e6xxx_phy_ppu_read(struct mv88e6xxx_chip *chip,
- struct mii_bus *bus,
- int addr, int reg, u16 *val)
-{
- int err;
-
- err = mv88e6xxx_ppu_access_get(chip);
- if (!err) {
- err = mv88e6xxx_read(chip, addr, reg, val);
- mv88e6xxx_ppu_access_put(chip);
- }
-
- return err;
-}
-
-static int mv88e6xxx_phy_ppu_write(struct mv88e6xxx_chip *chip,
- struct mii_bus *bus,
- int addr, int reg, u16 val)
-{
- int err;
-
- err = mv88e6xxx_ppu_access_get(chip);
- if (!err) {
- err = mv88e6xxx_write(chip, addr, reg, val);
- mv88e6xxx_ppu_access_put(chip);
- }
-
- return err;
-}
-
static int mv88e6xxx_port_setup_mac(struct mv88e6xxx_chip *chip, int port,
int link, int speed, int duplex,
phy_interface_t mode)
@@ -718,8 +489,7 @@
err = 0;
restore_link:
if (chip->info->ops->port_set_link(chip, port, link))
- netdev_err(chip->ds->ports[port].netdev,
- "failed to restore MAC's link\n");
+ dev_err(chip->dev, "p%d: failed to restore MAC's link\n", port);
return err;
}
@@ -743,7 +513,7 @@
mutex_unlock(&chip->reg_lock);
if (err && err != -EOPNOTSUPP)
- netdev_err(ds->ports[port].netdev, "failed to configure MAC\n");
+ dev_err(ds->dev, "p%d: failed to configure MAC\n", port);
}
static int mv88e6xxx_stats_snapshot(struct mv88e6xxx_chip *chip, int port)
@@ -955,7 +725,7 @@
{
return mv88e6xxx_stats_get_stats(chip, port, data,
STATS_TYPE_BANK0 | STATS_TYPE_PORT,
- 0, GLOBAL_STATS_OP_HIST_RX_TX);
+ 0, MV88E6XXX_G1_STATS_OP_HIST_RX_TX);
}
static void mv88e6320_stats_get_stats(struct mv88e6xxx_chip *chip, int port,
@@ -963,8 +733,8 @@
{
return mv88e6xxx_stats_get_stats(chip, port, data,
STATS_TYPE_BANK0 | STATS_TYPE_BANK1,
- GLOBAL_STATS_OP_BANK_1_BIT_9,
- GLOBAL_STATS_OP_HIST_RX_TX);
+ MV88E6XXX_G1_STATS_OP_BANK_1_BIT_9,
+ MV88E6XXX_G1_STATS_OP_HIST_RX_TX);
}
static void mv88e6390_stats_get_stats(struct mv88e6xxx_chip *chip, int port,
@@ -972,7 +742,8 @@
{
return mv88e6xxx_stats_get_stats(chip, port, data,
STATS_TYPE_BANK0 | STATS_TYPE_BANK1,
- GLOBAL_STATS_OP_BANK_1_BIT_10, 0);
+ MV88E6XXX_G1_STATS_OP_BANK_1_BIT_10,
+ 0);
}
static void mv88e6xxx_get_stats(struct mv88e6xxx_chip *chip, int port,
@@ -1058,11 +829,11 @@
e->eee_enabled = !!(reg & 0x0200);
e->tx_lpi_enabled = !!(reg & 0x0100);
- err = mv88e6xxx_port_read(chip, port, PORT_STATUS, ®);
+ err = mv88e6xxx_port_read(chip, port, MV88E6XXX_PORT_STS, ®);
if (err)
goto out;
- e->eee_active = !!(reg & PORT_STATUS_EEE);
+ e->eee_active = !!(reg & MV88E6352_PORT_STS_EEE);
out:
mutex_unlock(&chip->reg_lock);
@@ -1145,32 +916,14 @@
u8 state)
{
struct mv88e6xxx_chip *chip = ds->priv;
- int stp_state;
int err;
- switch (state) {
- case BR_STATE_DISABLED:
- stp_state = PORT_CONTROL_STATE_DISABLED;
- break;
- case BR_STATE_BLOCKING:
- case BR_STATE_LISTENING:
- stp_state = PORT_CONTROL_STATE_BLOCKING;
- break;
- case BR_STATE_LEARNING:
- stp_state = PORT_CONTROL_STATE_LEARNING;
- break;
- case BR_STATE_FORWARDING:
- default:
- stp_state = PORT_CONTROL_STATE_FORWARDING;
- break;
- }
-
mutex_lock(&chip->reg_lock);
- err = mv88e6xxx_port_set_state(chip, port, stp_state);
+ err = mv88e6xxx_port_set_state(chip, port, state);
mutex_unlock(&chip->reg_lock);
if (err)
- netdev_err(ds->ports[port].netdev, "failed to update state\n");
+ dev_err(ds->dev, "p%d: failed to update state\n", port);
}
static int mv88e6xxx_atu_setup(struct mv88e6xxx_chip *chip)
@@ -1188,6 +941,26 @@
return mv88e6xxx_g1_atu_set_age_time(chip, 300000);
}
+static int mv88e6xxx_irl_setup(struct mv88e6xxx_chip *chip)
+{
+ int port;
+ int err;
+
+ if (!chip->info->ops->irl_init_all)
+ return 0;
+
+ for (port = 0; port < mv88e6xxx_num_ports(chip); port++) {
+ /* Disable ingress rate limiting by resetting all per port
+ * ingress rate limit resources to their initial state.
+ */
+ err = chip->info->ops->irl_init_all(chip, port);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
static int mv88e6xxx_pvt_map(struct mv88e6xxx_chip *chip, int dev, int port)
{
u16 pvlan = 0;
@@ -1238,7 +1011,7 @@
mutex_unlock(&chip->reg_lock);
if (err)
- netdev_err(ds->ports[port].netdev, "failed to flush ATU\n");
+ dev_err(ds->dev, "p%d: failed to flush ATU\n", port);
}
static int mv88e6xxx_vtu_setup(struct mv88e6xxx_chip *chip)
@@ -1269,7 +1042,7 @@
static int mv88e6xxx_port_vlan_dump(struct dsa_switch *ds, int port,
struct switchdev_obj_port_vlan *vlan,
- int (*cb)(struct switchdev_obj *obj))
+ switchdev_obj_dump_cb_t *cb)
{
struct mv88e6xxx_chip *chip = ds->priv;
struct mv88e6xxx_vtu_entry next = {
@@ -1295,7 +1068,8 @@
if (!next.valid)
break;
- if (next.member[port] == GLOBAL_VTU_DATA_MEMBER_TAG_NON_MEMBER)
+ if (next.member[port] ==
+ MV88E6XXX_G1_VTU_DATA_MEMBER_TAG_NON_MEMBER)
continue;
/* reinit and dump this VLAN obj */
@@ -1303,7 +1077,8 @@
vlan->vid_end = next.vid;
vlan->flags = 0;
- if (next.member[port] == GLOBAL_VTU_DATA_MEMBER_TAG_UNTAGGED)
+ if (next.member[port] ==
+ MV88E6XXX_G1_VTU_DATA_MEMBER_TAG_UNTAGGED)
vlan->flags |= BRIDGE_VLAN_INFO_UNTAGGED;
if (next.vid == pvid)
@@ -1388,11 +1163,10 @@
entry->valid = true;
entry->vid = vid;
- /* Include only CPU and DSA ports */
+ /* Exclude all ports */
for (i = 0; i < mv88e6xxx_num_ports(chip); ++i)
- entry->member[i] = dsa_is_normal_port(chip->ds, i) ?
- GLOBAL_VTU_DATA_MEMBER_TAG_NON_MEMBER :
- GLOBAL_VTU_DATA_MEMBER_TAG_UNMODIFIED;
+ entry->member[i] =
+ MV88E6XXX_G1_VTU_DATA_MEMBER_TAG_NON_MEMBER;
return mv88e6xxx_atu_new(chip, &entry->fid);
}
@@ -1434,7 +1208,7 @@
continue;
if (vlan.member[i] ==
- GLOBAL_VTU_DATA_MEMBER_TAG_NON_MEMBER)
+ MV88E6XXX_G1_VTU_DATA_MEMBER_TAG_NON_MEMBER)
continue;
if (ds->ports[i].bridge_dev ==
@@ -1444,10 +1218,9 @@
if (!ds->ports[i].bridge_dev)
continue;
- netdev_warn(ds->ports[port].netdev,
- "hardware VLAN %d already used by %s\n",
- vlan.vid,
- netdev_name(ds->ports[i].bridge_dev));
+ dev_err(ds->dev, "p%d: hw VLAN %d already used by %s\n",
+ port, vlan.vid,
+ netdev_name(ds->ports[i].bridge_dev));
err = -EOPNOTSUPP;
goto unlock;
}
@@ -1463,8 +1236,8 @@
bool vlan_filtering)
{
struct mv88e6xxx_chip *chip = ds->priv;
- u16 mode = vlan_filtering ? PORT_CONTROL_2_8021Q_SECURE :
- PORT_CONTROL_2_8021Q_DISABLED;
+ u16 mode = vlan_filtering ? MV88E6XXX_PORT_CTL2_8021Q_MODE_SECURE :
+ MV88E6XXX_PORT_CTL2_8021Q_MODE_DISABLED;
int err;
if (!chip->info->max_vid)
@@ -1503,7 +1276,7 @@
}
static int _mv88e6xxx_port_vlan_add(struct mv88e6xxx_chip *chip, int port,
- u16 vid, bool untagged)
+ u16 vid, u8 member)
{
struct mv88e6xxx_vtu_entry vlan;
int err;
@@ -1512,9 +1285,7 @@
if (err)
return err;
- vlan.member[port] = untagged ?
- GLOBAL_VTU_DATA_MEMBER_TAG_UNTAGGED :
- GLOBAL_VTU_DATA_MEMBER_TAG_TAGGED;
+ vlan.member[port] = member;
return mv88e6xxx_vtu_loadpurge(chip, &vlan);
}
@@ -1526,22 +1297,29 @@
struct mv88e6xxx_chip *chip = ds->priv;
bool untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED;
bool pvid = vlan->flags & BRIDGE_VLAN_INFO_PVID;
+ u8 member;
u16 vid;
if (!chip->info->max_vid)
return;
+ if (dsa_is_dsa_port(ds, port) || dsa_is_cpu_port(ds, port))
+ member = MV88E6XXX_G1_VTU_DATA_MEMBER_TAG_UNMODIFIED;
+ else if (untagged)
+ member = MV88E6XXX_G1_VTU_DATA_MEMBER_TAG_UNTAGGED;
+ else
+ member = MV88E6XXX_G1_VTU_DATA_MEMBER_TAG_TAGGED;
+
mutex_lock(&chip->reg_lock);
for (vid = vlan->vid_begin; vid <= vlan->vid_end; ++vid)
- if (_mv88e6xxx_port_vlan_add(chip, port, vid, untagged))
- netdev_err(ds->ports[port].netdev,
- "failed to add VLAN %d%c\n",
- vid, untagged ? 'u' : 't');
+ if (_mv88e6xxx_port_vlan_add(chip, port, vid, member))
+ dev_err(ds->dev, "p%d: failed to add VLAN %d%c\n", port,
+ vid, untagged ? 'u' : 't');
if (pvid && mv88e6xxx_port_set_pvid(chip, port, vlan->vid_end))
- netdev_err(ds->ports[port].netdev, "failed to set PVID %d\n",
- vlan->vid_end);
+ dev_err(ds->dev, "p%d: failed to set PVID %d\n", port,
+ vlan->vid_end);
mutex_unlock(&chip->reg_lock);
}
@@ -1549,7 +1327,6 @@
static int _mv88e6xxx_port_vlan_del(struct mv88e6xxx_chip *chip,
int port, u16 vid)
{
- struct dsa_switch *ds = chip->ds;
struct mv88e6xxx_vtu_entry vlan;
int i, err;
@@ -1558,18 +1335,16 @@
return err;
/* Tell switchdev if this VLAN is handled in software */
- if (vlan.member[port] == GLOBAL_VTU_DATA_MEMBER_TAG_NON_MEMBER)
+ if (vlan.member[port] == MV88E6XXX_G1_VTU_DATA_MEMBER_TAG_NON_MEMBER)
return -EOPNOTSUPP;
- vlan.member[port] = GLOBAL_VTU_DATA_MEMBER_TAG_NON_MEMBER;
+ vlan.member[port] = MV88E6XXX_G1_VTU_DATA_MEMBER_TAG_NON_MEMBER;
/* keep the VLAN unless all ports are excluded */
vlan.valid = false;
for (i = 0; i < mv88e6xxx_num_ports(chip); ++i) {
- if (dsa_is_cpu_port(ds, i) || dsa_is_dsa_port(ds, i))
- continue;
-
- if (vlan.member[i] != GLOBAL_VTU_DATA_MEMBER_TAG_NON_MEMBER) {
+ if (vlan.member[i] !=
+ MV88E6XXX_G1_VTU_DATA_MEMBER_TAG_NON_MEMBER) {
vlan.valid = true;
break;
}
@@ -1632,7 +1407,7 @@
if (err)
return err;
- entry.state = GLOBAL_ATU_DATA_STATE_UNUSED;
+ entry.state = MV88E6XXX_G1_ATU_DATA_STATE_UNUSED;
ether_addr_copy(entry.mac, addr);
eth_addr_dec(entry.mac);
@@ -1641,17 +1416,17 @@
return err;
/* Initialize a fresh ATU entry if it isn't found */
- if (entry.state == GLOBAL_ATU_DATA_STATE_UNUSED ||
+ if (entry.state == MV88E6XXX_G1_ATU_DATA_STATE_UNUSED ||
!ether_addr_equal(entry.mac, addr)) {
memset(&entry, 0, sizeof(entry));
ether_addr_copy(entry.mac, addr);
}
/* Purge the ATU entry only if no port is using it anymore */
- if (state == GLOBAL_ATU_DATA_STATE_UNUSED) {
+ if (state == MV88E6XXX_G1_ATU_DATA_STATE_UNUSED) {
entry.portvec &= ~BIT(port);
if (!entry.portvec)
- entry.state = GLOBAL_ATU_DATA_STATE_UNUSED;
+ entry.state = MV88E6XXX_G1_ATU_DATA_STATE_UNUSED;
} else {
entry.portvec |= BIT(port);
entry.state = state;
@@ -1678,8 +1453,9 @@
mutex_lock(&chip->reg_lock);
if (mv88e6xxx_port_db_load_purge(chip, port, fdb->addr, fdb->vid,
- GLOBAL_ATU_DATA_STATE_UC_STATIC))
- netdev_err(ds->ports[port].netdev, "failed to load unicast MAC address\n");
+ MV88E6XXX_G1_ATU_DATA_STATE_UC_STATIC))
+ dev_err(ds->dev, "p%d: failed to load unicast MAC address\n",
+ port);
mutex_unlock(&chip->reg_lock);
}
@@ -1691,7 +1467,7 @@
mutex_lock(&chip->reg_lock);
err = mv88e6xxx_port_db_load_purge(chip, port, fdb->addr, fdb->vid,
- GLOBAL_ATU_DATA_STATE_UNUSED);
+ MV88E6XXX_G1_ATU_DATA_STATE_UNUSED);
mutex_unlock(&chip->reg_lock);
return err;
@@ -1700,12 +1476,12 @@
static int mv88e6xxx_port_db_dump_fid(struct mv88e6xxx_chip *chip,
u16 fid, u16 vid, int port,
struct switchdev_obj *obj,
- int (*cb)(struct switchdev_obj *obj))
+ switchdev_obj_dump_cb_t *cb)
{
struct mv88e6xxx_atu_entry addr;
int err;
- addr.state = GLOBAL_ATU_DATA_STATE_UNUSED;
+ addr.state = MV88E6XXX_G1_ATU_DATA_STATE_UNUSED;
eth_broadcast_addr(addr.mac);
do {
@@ -1713,7 +1489,7 @@
if (err)
return err;
- if (addr.state == GLOBAL_ATU_DATA_STATE_UNUSED)
+ if (addr.state == MV88E6XXX_G1_ATU_DATA_STATE_UNUSED)
break;
if (addr.trunk || (addr.portvec & BIT(port)) == 0)
@@ -1728,7 +1504,7 @@
fdb = SWITCHDEV_OBJ_PORT_FDB(obj);
fdb->vid = vid;
ether_addr_copy(fdb->addr, addr.mac);
- if (addr.state == GLOBAL_ATU_DATA_STATE_UC_STATIC)
+ if (addr.state == MV88E6XXX_G1_ATU_DATA_STATE_UC_STATIC)
fdb->ndm_state = NUD_NOARP;
else
fdb->ndm_state = NUD_REACHABLE;
@@ -1755,7 +1531,7 @@
static int mv88e6xxx_port_db_dump(struct mv88e6xxx_chip *chip, int port,
struct switchdev_obj *obj,
- int (*cb)(struct switchdev_obj *obj))
+ switchdev_obj_dump_cb_t *cb)
{
struct mv88e6xxx_vtu_entry vlan = {
.vid = chip->info->max_vid,
@@ -1792,7 +1568,7 @@
static int mv88e6xxx_port_fdb_dump(struct dsa_switch *ds, int port,
struct switchdev_obj_port_fdb *fdb,
- int (*cb)(struct switchdev_obj *obj))
+ switchdev_obj_dump_cb_t *cb)
{
struct mv88e6xxx_chip *chip = ds->priv;
int err;
@@ -1924,8 +1700,7 @@
/* Set all ports to the Disabled state */
for (i = 0; i < mv88e6xxx_num_ports(chip); i++) {
- err = mv88e6xxx_port_set_state(chip, i,
- PORT_CONTROL_STATE_DISABLED);
+ err = mv88e6xxx_port_set_state(chip, i, BR_STATE_DISABLED);
if (err)
return err;
}
@@ -1951,27 +1726,9 @@
return mv88e6xxx_software_reset(chip);
}
-static int mv88e6xxx_serdes_power_on(struct mv88e6xxx_chip *chip)
-{
- u16 val;
- int err;
-
- /* Clear Power Down bit */
- err = mv88e6xxx_serdes_read(chip, MII_BMCR, &val);
- if (err)
- return err;
-
- if (val & BMCR_PDOWN) {
- val &= ~BMCR_PDOWN;
- err = mv88e6xxx_serdes_write(chip, MII_BMCR, val);
- }
-
- return err;
-}
-
static int mv88e6xxx_set_port_mode(struct mv88e6xxx_chip *chip, int port,
- enum mv88e6xxx_frame_mode frame, u16 egress,
- u16 etype)
+ enum mv88e6xxx_frame_mode frame,
+ enum mv88e6xxx_egress_mode egress, u16 etype)
{
int err;
@@ -1995,22 +1752,23 @@
static int mv88e6xxx_set_port_mode_normal(struct mv88e6xxx_chip *chip, int port)
{
return mv88e6xxx_set_port_mode(chip, port, MV88E6XXX_FRAME_MODE_NORMAL,
- PORT_CONTROL_EGRESS_UNMODIFIED,
- PORT_ETH_TYPE_DEFAULT);
+ MV88E6XXX_EGRESS_MODE_UNMODIFIED,
+ MV88E6XXX_PORT_ETH_TYPE_DEFAULT);
}
static int mv88e6xxx_set_port_mode_dsa(struct mv88e6xxx_chip *chip, int port)
{
return mv88e6xxx_set_port_mode(chip, port, MV88E6XXX_FRAME_MODE_DSA,
- PORT_CONTROL_EGRESS_UNMODIFIED,
- PORT_ETH_TYPE_DEFAULT);
+ MV88E6XXX_EGRESS_MODE_UNMODIFIED,
+ MV88E6XXX_PORT_ETH_TYPE_DEFAULT);
}
static int mv88e6xxx_set_port_mode_edsa(struct mv88e6xxx_chip *chip, int port)
{
return mv88e6xxx_set_port_mode(chip, port,
MV88E6XXX_FRAME_MODE_ETHERTYPE,
- PORT_CONTROL_EGRESS_ADD_TAG, ETH_P_EDSA);
+ MV88E6XXX_EGRESS_MODE_ETHERTYPE,
+ ETH_P_EDSA);
}
static int mv88e6xxx_setup_port_mode(struct mv88e6xxx_chip *chip, int port)
@@ -2050,6 +1808,15 @@
return 0;
}
+static int mv88e6xxx_serdes_power(struct mv88e6xxx_chip *chip, int port,
+ bool on)
+{
+ if (chip->info->ops->serdes_power)
+ return chip->info->ops->serdes_power(chip, port, on);
+
+ return 0;
+}
+
static int mv88e6xxx_setup_port(struct mv88e6xxx_chip *chip, int port)
{
struct dsa_switch *ds = chip->ds;
@@ -2085,10 +1852,10 @@
* If this is the upstream port for this switch, enable
* forwarding of unknown unicasts and multicasts.
*/
- reg = PORT_CONTROL_IGMP_MLD_SNOOP |
- PORT_CONTROL_USE_TAG | PORT_CONTROL_USE_IP |
- PORT_CONTROL_STATE_FORWARDING;
- err = mv88e6xxx_port_write(chip, port, PORT_CONTROL, reg);
+ reg = MV88E6XXX_PORT_CTL0_IGMP_MLD_SNOOP |
+ MV88E6185_PORT_CTL0_USE_TAG | MV88E6185_PORT_CTL0_USE_IP |
+ MV88E6XXX_PORT_CTL0_STATE_FORWARDING;
+ err = mv88e6xxx_port_write(chip, port, MV88E6XXX_PORT_CTL0, reg);
if (err)
return err;
@@ -2100,21 +1867,14 @@
if (err)
return err;
- /* If this port is connected to a SerDes, make sure the SerDes is not
- * powered down.
+ /* Enable the SERDES interface for DSA and CPU ports. Normal
+ * ports SERDES are enabled when the port is enabled, thus
+ * saving a bit of power.
*/
- if (mv88e6xxx_has(chip, MV88E6XXX_FLAGS_SERDES)) {
- err = mv88e6xxx_port_read(chip, port, PORT_STATUS, ®);
+ if ((dsa_is_cpu_port(ds, port) || dsa_is_dsa_port(ds, port))) {
+ err = mv88e6xxx_serdes_power(chip, port, true);
if (err)
return err;
- reg &= PORT_STATUS_CMODE_MASK;
- if ((reg == PORT_STATUS_CMODE_100BASE_X) ||
- (reg == PORT_STATUS_CMODE_1000BASE_X) ||
- (reg == PORT_STATUS_CMODE_SGMII)) {
- err = mv88e6xxx_serdes_power_on(chip);
- if (err < 0)
- return err;
- }
}
/* Port Control 2: don't force a good FCS, set the maximum frame size to
@@ -2136,12 +1896,12 @@
}
err = mv88e6xxx_port_set_8021q_mode(chip, port,
- PORT_CONTROL_2_8021Q_DISABLED);
+ MV88E6XXX_PORT_CTL2_8021Q_MODE_DISABLED);
if (err)
return err;
- if (chip->info->ops->port_jumbo_config) {
- err = chip->info->ops->port_jumbo_config(chip, port);
+ if (chip->info->ops->port_set_jumbo_size) {
+ err = chip->info->ops->port_set_jumbo_size(chip, port, 10240);
if (err)
return err;
}
@@ -2156,17 +1916,19 @@
if (dsa_is_cpu_port(ds, port))
reg = 0;
- err = mv88e6xxx_port_write(chip, port, PORT_ASSOC_VECTOR, reg);
+ err = mv88e6xxx_port_write(chip, port, MV88E6XXX_PORT_ASSOC_VECTOR,
+ reg);
if (err)
return err;
/* Egress rate control 2: disable egress rate control. */
- err = mv88e6xxx_port_write(chip, port, PORT_RATE_CONTROL_2, 0x0000);
+ err = mv88e6xxx_port_write(chip, port, MV88E6XXX_PORT_EGRESS_RATE_CTL2,
+ 0x0000);
if (err)
return err;
- if (chip->info->ops->port_pause_config) {
- err = chip->info->ops->port_pause_config(chip, port);
+ if (chip->info->ops->port_pause_limit) {
+ err = chip->info->ops->port_pause_limit(chip, port, 0, 0);
if (err)
return err;
}
@@ -2214,26 +1976,31 @@
/* Default VLAN ID and priority: don't set a default VLAN
* ID, and set the default packet priority to zero.
*/
- return mv88e6xxx_port_write(chip, port, PORT_DEFAULT_VLAN, 0x0000);
+ return mv88e6xxx_port_write(chip, port, MV88E6XXX_PORT_DEFAULT_VLAN, 0);
}
-static int mv88e6xxx_g1_set_switch_mac(struct mv88e6xxx_chip *chip, u8 *addr)
+static int mv88e6xxx_port_enable(struct dsa_switch *ds, int port,
+ struct phy_device *phydev)
{
+ struct mv88e6xxx_chip *chip = ds->priv;
int err;
- err = mv88e6xxx_g1_write(chip, GLOBAL_MAC_01, (addr[0] << 8) | addr[1]);
- if (err)
- return err;
+ mutex_lock(&chip->reg_lock);
+ err = mv88e6xxx_serdes_power(chip, port, true);
+ mutex_unlock(&chip->reg_lock);
- err = mv88e6xxx_g1_write(chip, GLOBAL_MAC_23, (addr[2] << 8) | addr[3]);
- if (err)
- return err;
+ return err;
+}
- err = mv88e6xxx_g1_write(chip, GLOBAL_MAC_45, (addr[4] << 8) | addr[5]);
- if (err)
- return err;
+static void mv88e6xxx_port_disable(struct dsa_switch *ds, int port,
+ struct phy_device *phydev)
+{
+ struct mv88e6xxx_chip *chip = ds->priv;
- return 0;
+ mutex_lock(&chip->reg_lock);
+ if (mv88e6xxx_serdes_power(chip, port, false))
+ dev_err(chip->dev, "failed to power off SERDES\n");
+ mutex_unlock(&chip->reg_lock);
}
static int mv88e6xxx_set_ageing_time(struct dsa_switch *ds,
@@ -2255,60 +2022,53 @@
u32 upstream_port = dsa_upstream_port(ds);
int err;
- /* Enable the PHY Polling Unit if present, don't discard any packets,
- * and mask all interrupt sources.
- */
- err = mv88e6xxx_ppu_enable(chip);
- if (err)
- return err;
-
- if (chip->info->ops->g1_set_cpu_port) {
- err = chip->info->ops->g1_set_cpu_port(chip, upstream_port);
+ if (chip->info->ops->set_cpu_port) {
+ err = chip->info->ops->set_cpu_port(chip, upstream_port);
if (err)
return err;
}
- if (chip->info->ops->g1_set_egress_port) {
- err = chip->info->ops->g1_set_egress_port(chip, upstream_port);
+ if (chip->info->ops->set_egress_port) {
+ err = chip->info->ops->set_egress_port(chip, upstream_port);
if (err)
return err;
}
/* Disable remote management, and set the switch's DSA device number. */
- err = mv88e6xxx_g1_write(chip, GLOBAL_CONTROL_2,
- GLOBAL_CONTROL_2_MULTIPLE_CASCADE |
+ err = mv88e6xxx_g1_write(chip, MV88E6XXX_G1_CTL2,
+ MV88E6XXX_G1_CTL2_MULTIPLE_CASCADE |
(ds->index & 0x1f));
if (err)
return err;
/* Configure the IP ToS mapping registers. */
- err = mv88e6xxx_g1_write(chip, GLOBAL_IP_PRI_0, 0x0000);
+ err = mv88e6xxx_g1_write(chip, MV88E6XXX_G1_IP_PRI_0, 0x0000);
if (err)
return err;
- err = mv88e6xxx_g1_write(chip, GLOBAL_IP_PRI_1, 0x0000);
+ err = mv88e6xxx_g1_write(chip, MV88E6XXX_G1_IP_PRI_1, 0x0000);
if (err)
return err;
- err = mv88e6xxx_g1_write(chip, GLOBAL_IP_PRI_2, 0x5555);
+ err = mv88e6xxx_g1_write(chip, MV88E6XXX_G1_IP_PRI_2, 0x5555);
if (err)
return err;
- err = mv88e6xxx_g1_write(chip, GLOBAL_IP_PRI_3, 0x5555);
+ err = mv88e6xxx_g1_write(chip, MV88E6XXX_G1_IP_PRI_3, 0x5555);
if (err)
return err;
- err = mv88e6xxx_g1_write(chip, GLOBAL_IP_PRI_4, 0xaaaa);
+ err = mv88e6xxx_g1_write(chip, MV88E6XXX_G1_IP_PRI_4, 0xaaaa);
if (err)
return err;
- err = mv88e6xxx_g1_write(chip, GLOBAL_IP_PRI_5, 0xaaaa);
+ err = mv88e6xxx_g1_write(chip, MV88E6XXX_G1_IP_PRI_5, 0xaaaa);
if (err)
return err;
- err = mv88e6xxx_g1_write(chip, GLOBAL_IP_PRI_6, 0xffff);
+ err = mv88e6xxx_g1_write(chip, MV88E6XXX_G1_IP_PRI_6, 0xffff);
if (err)
return err;
- err = mv88e6xxx_g1_write(chip, GLOBAL_IP_PRI_7, 0xffff);
+ err = mv88e6xxx_g1_write(chip, MV88E6XXX_G1_IP_PRI_7, 0xffff);
if (err)
return err;
/* Configure the IEEE 802.1p priority mapping register. */
- err = mv88e6xxx_g1_write(chip, GLOBAL_IEEE_PRI, 0xfa41);
+ err = mv88e6xxx_g1_write(chip, MV88E6XXX_G1_IEEE_PRI, 0xfa41);
if (err)
return err;
@@ -2318,8 +2078,9 @@
return err;
/* Clear the statistics counters for all ports */
- err = mv88e6xxx_g1_write(chip, GLOBAL_STATS_OP,
- GLOBAL_STATS_OP_FLUSH_ALL);
+ err = mv88e6xxx_g1_write(chip, MV88E6XXX_G1_STATS_OP,
+ MV88E6XXX_G1_STATS_OP_BUSY |
+ MV88E6XXX_G1_STATS_OP_FLUSH_ALL);
if (err)
return err;
@@ -2361,6 +2122,14 @@
goto unlock;
}
+ err = mv88e6xxx_irl_setup(chip);
+ if (err)
+ goto unlock;
+
+ err = mv88e6xxx_phy_setup(chip);
+ if (err)
+ goto unlock;
+
err = mv88e6xxx_vtu_setup(chip);
if (err)
goto unlock;
@@ -2424,7 +2193,7 @@
* the mv88e6390 family model number instead.
*/
if (!(val & 0x3f0))
- val |= PORT_SWITCH_ID_PROD_NUM_6390;
+ val |= MV88E6XXX_PORT_SWITCH_ID_PROD_6390 >> 4;
}
return err ? err : val;
@@ -2594,9 +2363,10 @@
static const struct mv88e6xxx_ops mv88e6085_ops = {
/* MV88E6XXX_FAMILY_6097 */
+ .irl_init_all = mv88e6352_g2_irl_init_all,
.set_switch_mac = mv88e6xxx_g1_set_switch_mac,
- .phy_read = mv88e6xxx_phy_ppu_read,
- .phy_write = mv88e6xxx_phy_ppu_write,
+ .phy_read = mv88e6185_phy_ppu_read,
+ .phy_write = mv88e6185_phy_ppu_write,
.port_set_link = mv88e6xxx_port_set_link,
.port_set_duplex = mv88e6xxx_port_set_duplex,
.port_set_speed = mv88e6185_port_set_speed,
@@ -2605,15 +2375,15 @@
.port_set_egress_floods = mv88e6352_port_set_egress_floods,
.port_set_ether_type = mv88e6351_port_set_ether_type,
.port_egress_rate_limiting = mv88e6097_port_egress_rate_limiting,
- .port_pause_config = mv88e6097_port_pause_config,
+ .port_pause_limit = mv88e6097_port_pause_limit,
.port_disable_learn_limit = mv88e6xxx_port_disable_learn_limit,
.port_disable_pri_override = mv88e6xxx_port_disable_pri_override,
.stats_snapshot = mv88e6xxx_g1_stats_snapshot,
.stats_get_sset_count = mv88e6095_stats_get_sset_count,
.stats_get_strings = mv88e6095_stats_get_strings,
.stats_get_stats = mv88e6095_stats_get_stats,
- .g1_set_cpu_port = mv88e6095_g1_set_cpu_port,
- .g1_set_egress_port = mv88e6095_g1_set_egress_port,
+ .set_cpu_port = mv88e6095_g1_set_cpu_port,
+ .set_egress_port = mv88e6095_g1_set_egress_port,
.watchdog_ops = &mv88e6097_watchdog_ops,
.mgmt_rsvd2cpu = mv88e6095_g2_mgmt_rsvd2cpu,
.ppu_enable = mv88e6185_g1_ppu_enable,
@@ -2626,8 +2396,8 @@
static const struct mv88e6xxx_ops mv88e6095_ops = {
/* MV88E6XXX_FAMILY_6095 */
.set_switch_mac = mv88e6xxx_g1_set_switch_mac,
- .phy_read = mv88e6xxx_phy_ppu_read,
- .phy_write = mv88e6xxx_phy_ppu_write,
+ .phy_read = mv88e6185_phy_ppu_read,
+ .phy_write = mv88e6185_phy_ppu_write,
.port_set_link = mv88e6xxx_port_set_link,
.port_set_duplex = mv88e6xxx_port_set_duplex,
.port_set_speed = mv88e6185_port_set_speed,
@@ -2648,6 +2418,7 @@
static const struct mv88e6xxx_ops mv88e6097_ops = {
/* MV88E6XXX_FAMILY_6097 */
+ .irl_init_all = mv88e6352_g2_irl_init_all,
.set_switch_mac = mv88e6xxx_g2_set_switch_mac,
.phy_read = mv88e6xxx_g2_smi_phy_read,
.phy_write = mv88e6xxx_g2_smi_phy_write,
@@ -2658,17 +2429,17 @@
.port_set_frame_mode = mv88e6351_port_set_frame_mode,
.port_set_egress_floods = mv88e6352_port_set_egress_floods,
.port_set_ether_type = mv88e6351_port_set_ether_type,
- .port_jumbo_config = mv88e6165_port_jumbo_config,
+ .port_set_jumbo_size = mv88e6165_port_set_jumbo_size,
.port_egress_rate_limiting = mv88e6095_port_egress_rate_limiting,
- .port_pause_config = mv88e6097_port_pause_config,
+ .port_pause_limit = mv88e6097_port_pause_limit,
.port_disable_learn_limit = mv88e6xxx_port_disable_learn_limit,
.port_disable_pri_override = mv88e6xxx_port_disable_pri_override,
.stats_snapshot = mv88e6xxx_g1_stats_snapshot,
.stats_get_sset_count = mv88e6095_stats_get_sset_count,
.stats_get_strings = mv88e6095_stats_get_strings,
.stats_get_stats = mv88e6095_stats_get_stats,
- .g1_set_cpu_port = mv88e6095_g1_set_cpu_port,
- .g1_set_egress_port = mv88e6095_g1_set_egress_port,
+ .set_cpu_port = mv88e6095_g1_set_cpu_port,
+ .set_egress_port = mv88e6095_g1_set_egress_port,
.watchdog_ops = &mv88e6097_watchdog_ops,
.mgmt_rsvd2cpu = mv88e6095_g2_mgmt_rsvd2cpu,
.reset = mv88e6352_g1_reset,
@@ -2678,9 +2449,10 @@
static const struct mv88e6xxx_ops mv88e6123_ops = {
/* MV88E6XXX_FAMILY_6165 */
+ .irl_init_all = mv88e6352_g2_irl_init_all,
.set_switch_mac = mv88e6xxx_g2_set_switch_mac,
- .phy_read = mv88e6165_phy_read,
- .phy_write = mv88e6165_phy_write,
+ .phy_read = mv88e6xxx_g2_smi_phy_read,
+ .phy_write = mv88e6xxx_g2_smi_phy_write,
.port_set_link = mv88e6xxx_port_set_link,
.port_set_duplex = mv88e6xxx_port_set_duplex,
.port_set_speed = mv88e6185_port_set_speed,
@@ -2688,12 +2460,12 @@
.port_set_egress_floods = mv88e6352_port_set_egress_floods,
.port_disable_learn_limit = mv88e6xxx_port_disable_learn_limit,
.port_disable_pri_override = mv88e6xxx_port_disable_pri_override,
- .stats_snapshot = mv88e6xxx_g1_stats_snapshot,
+ .stats_snapshot = mv88e6320_g1_stats_snapshot,
.stats_get_sset_count = mv88e6095_stats_get_sset_count,
.stats_get_strings = mv88e6095_stats_get_strings,
.stats_get_stats = mv88e6095_stats_get_stats,
- .g1_set_cpu_port = mv88e6095_g1_set_cpu_port,
- .g1_set_egress_port = mv88e6095_g1_set_egress_port,
+ .set_cpu_port = mv88e6095_g1_set_cpu_port,
+ .set_egress_port = mv88e6095_g1_set_egress_port,
.watchdog_ops = &mv88e6097_watchdog_ops,
.mgmt_rsvd2cpu = mv88e6095_g2_mgmt_rsvd2cpu,
.reset = mv88e6352_g1_reset,
@@ -2704,8 +2476,8 @@
static const struct mv88e6xxx_ops mv88e6131_ops = {
/* MV88E6XXX_FAMILY_6185 */
.set_switch_mac = mv88e6xxx_g1_set_switch_mac,
- .phy_read = mv88e6xxx_phy_ppu_read,
- .phy_write = mv88e6xxx_phy_ppu_write,
+ .phy_read = mv88e6185_phy_ppu_read,
+ .phy_write = mv88e6185_phy_ppu_write,
.port_set_link = mv88e6xxx_port_set_link,
.port_set_duplex = mv88e6xxx_port_set_duplex,
.port_set_speed = mv88e6185_port_set_speed,
@@ -2714,15 +2486,15 @@
.port_set_egress_floods = mv88e6185_port_set_egress_floods,
.port_set_ether_type = mv88e6351_port_set_ether_type,
.port_set_upstream_port = mv88e6095_port_set_upstream_port,
- .port_jumbo_config = mv88e6165_port_jumbo_config,
+ .port_set_jumbo_size = mv88e6165_port_set_jumbo_size,
.port_egress_rate_limiting = mv88e6097_port_egress_rate_limiting,
- .port_pause_config = mv88e6097_port_pause_config,
+ .port_pause_limit = mv88e6097_port_pause_limit,
.stats_snapshot = mv88e6xxx_g1_stats_snapshot,
.stats_get_sset_count = mv88e6095_stats_get_sset_count,
.stats_get_strings = mv88e6095_stats_get_strings,
.stats_get_stats = mv88e6095_stats_get_stats,
- .g1_set_cpu_port = mv88e6095_g1_set_cpu_port,
- .g1_set_egress_port = mv88e6095_g1_set_egress_port,
+ .set_cpu_port = mv88e6095_g1_set_cpu_port,
+ .set_egress_port = mv88e6095_g1_set_egress_port,
.watchdog_ops = &mv88e6097_watchdog_ops,
.mgmt_rsvd2cpu = mv88e6095_g2_mgmt_rsvd2cpu,
.ppu_enable = mv88e6185_g1_ppu_enable,
@@ -2734,6 +2506,7 @@
static const struct mv88e6xxx_ops mv88e6141_ops = {
/* MV88E6XXX_FAMILY_6341 */
+ .irl_init_all = mv88e6352_g2_irl_init_all,
.get_eeprom = mv88e6xxx_g2_get_eeprom8,
.set_eeprom = mv88e6xxx_g2_set_eeprom8,
.set_switch_mac = mv88e6xxx_g2_set_switch_mac,
@@ -2747,17 +2520,17 @@
.port_set_frame_mode = mv88e6351_port_set_frame_mode,
.port_set_egress_floods = mv88e6352_port_set_egress_floods,
.port_set_ether_type = mv88e6351_port_set_ether_type,
- .port_jumbo_config = mv88e6165_port_jumbo_config,
+ .port_set_jumbo_size = mv88e6165_port_set_jumbo_size,
.port_egress_rate_limiting = mv88e6097_port_egress_rate_limiting,
- .port_pause_config = mv88e6097_port_pause_config,
+ .port_pause_limit = mv88e6097_port_pause_limit,
.port_disable_learn_limit = mv88e6xxx_port_disable_learn_limit,
.port_disable_pri_override = mv88e6xxx_port_disable_pri_override,
.stats_snapshot = mv88e6390_g1_stats_snapshot,
.stats_get_sset_count = mv88e6320_stats_get_sset_count,
.stats_get_strings = mv88e6320_stats_get_strings,
.stats_get_stats = mv88e6390_stats_get_stats,
- .g1_set_cpu_port = mv88e6390_g1_set_cpu_port,
- .g1_set_egress_port = mv88e6390_g1_set_egress_port,
+ .set_cpu_port = mv88e6390_g1_set_cpu_port,
+ .set_egress_port = mv88e6390_g1_set_egress_port,
.watchdog_ops = &mv88e6390_watchdog_ops,
.mgmt_rsvd2cpu = mv88e6390_g1_mgmt_rsvd2cpu,
.reset = mv88e6352_g1_reset,
@@ -2767,9 +2540,10 @@
static const struct mv88e6xxx_ops mv88e6161_ops = {
/* MV88E6XXX_FAMILY_6165 */
+ .irl_init_all = mv88e6352_g2_irl_init_all,
.set_switch_mac = mv88e6xxx_g2_set_switch_mac,
- .phy_read = mv88e6165_phy_read,
- .phy_write = mv88e6165_phy_write,
+ .phy_read = mv88e6xxx_g2_smi_phy_read,
+ .phy_write = mv88e6xxx_g2_smi_phy_write,
.port_set_link = mv88e6xxx_port_set_link,
.port_set_duplex = mv88e6xxx_port_set_duplex,
.port_set_speed = mv88e6185_port_set_speed,
@@ -2777,17 +2551,17 @@
.port_set_frame_mode = mv88e6351_port_set_frame_mode,
.port_set_egress_floods = mv88e6352_port_set_egress_floods,
.port_set_ether_type = mv88e6351_port_set_ether_type,
- .port_jumbo_config = mv88e6165_port_jumbo_config,
+ .port_set_jumbo_size = mv88e6165_port_set_jumbo_size,
.port_egress_rate_limiting = mv88e6097_port_egress_rate_limiting,
- .port_pause_config = mv88e6097_port_pause_config,
+ .port_pause_limit = mv88e6097_port_pause_limit,
.port_disable_learn_limit = mv88e6xxx_port_disable_learn_limit,
.port_disable_pri_override = mv88e6xxx_port_disable_pri_override,
- .stats_snapshot = mv88e6xxx_g1_stats_snapshot,
+ .stats_snapshot = mv88e6320_g1_stats_snapshot,
.stats_get_sset_count = mv88e6095_stats_get_sset_count,
.stats_get_strings = mv88e6095_stats_get_strings,
.stats_get_stats = mv88e6095_stats_get_stats,
- .g1_set_cpu_port = mv88e6095_g1_set_cpu_port,
- .g1_set_egress_port = mv88e6095_g1_set_egress_port,
+ .set_cpu_port = mv88e6095_g1_set_cpu_port,
+ .set_egress_port = mv88e6095_g1_set_egress_port,
.watchdog_ops = &mv88e6097_watchdog_ops,
.mgmt_rsvd2cpu = mv88e6095_g2_mgmt_rsvd2cpu,
.reset = mv88e6352_g1_reset,
@@ -2797,6 +2571,7 @@
static const struct mv88e6xxx_ops mv88e6165_ops = {
/* MV88E6XXX_FAMILY_6165 */
+ .irl_init_all = mv88e6352_g2_irl_init_all,
.set_switch_mac = mv88e6xxx_g2_set_switch_mac,
.phy_read = mv88e6165_phy_read,
.phy_write = mv88e6165_phy_write,
@@ -2809,8 +2584,8 @@
.stats_get_sset_count = mv88e6095_stats_get_sset_count,
.stats_get_strings = mv88e6095_stats_get_strings,
.stats_get_stats = mv88e6095_stats_get_stats,
- .g1_set_cpu_port = mv88e6095_g1_set_cpu_port,
- .g1_set_egress_port = mv88e6095_g1_set_egress_port,
+ .set_cpu_port = mv88e6095_g1_set_cpu_port,
+ .set_egress_port = mv88e6095_g1_set_egress_port,
.watchdog_ops = &mv88e6097_watchdog_ops,
.mgmt_rsvd2cpu = mv88e6095_g2_mgmt_rsvd2cpu,
.reset = mv88e6352_g1_reset,
@@ -2820,6 +2595,7 @@
static const struct mv88e6xxx_ops mv88e6171_ops = {
/* MV88E6XXX_FAMILY_6351 */
+ .irl_init_all = mv88e6352_g2_irl_init_all,
.set_switch_mac = mv88e6xxx_g2_set_switch_mac,
.phy_read = mv88e6xxx_g2_smi_phy_read,
.phy_write = mv88e6xxx_g2_smi_phy_write,
@@ -2831,17 +2607,17 @@
.port_set_frame_mode = mv88e6351_port_set_frame_mode,
.port_set_egress_floods = mv88e6352_port_set_egress_floods,
.port_set_ether_type = mv88e6351_port_set_ether_type,
- .port_jumbo_config = mv88e6165_port_jumbo_config,
+ .port_set_jumbo_size = mv88e6165_port_set_jumbo_size,
.port_egress_rate_limiting = mv88e6097_port_egress_rate_limiting,
- .port_pause_config = mv88e6097_port_pause_config,
+ .port_pause_limit = mv88e6097_port_pause_limit,
.port_disable_learn_limit = mv88e6xxx_port_disable_learn_limit,
.port_disable_pri_override = mv88e6xxx_port_disable_pri_override,
.stats_snapshot = mv88e6320_g1_stats_snapshot,
.stats_get_sset_count = mv88e6095_stats_get_sset_count,
.stats_get_strings = mv88e6095_stats_get_strings,
.stats_get_stats = mv88e6095_stats_get_stats,
- .g1_set_cpu_port = mv88e6095_g1_set_cpu_port,
- .g1_set_egress_port = mv88e6095_g1_set_egress_port,
+ .set_cpu_port = mv88e6095_g1_set_cpu_port,
+ .set_egress_port = mv88e6095_g1_set_egress_port,
.watchdog_ops = &mv88e6097_watchdog_ops,
.mgmt_rsvd2cpu = mv88e6095_g2_mgmt_rsvd2cpu,
.reset = mv88e6352_g1_reset,
@@ -2851,6 +2627,7 @@
static const struct mv88e6xxx_ops mv88e6172_ops = {
/* MV88E6XXX_FAMILY_6352 */
+ .irl_init_all = mv88e6352_g2_irl_init_all,
.get_eeprom = mv88e6xxx_g2_get_eeprom16,
.set_eeprom = mv88e6xxx_g2_set_eeprom16,
.set_switch_mac = mv88e6xxx_g2_set_switch_mac,
@@ -2864,26 +2641,28 @@
.port_set_frame_mode = mv88e6351_port_set_frame_mode,
.port_set_egress_floods = mv88e6352_port_set_egress_floods,
.port_set_ether_type = mv88e6351_port_set_ether_type,
- .port_jumbo_config = mv88e6165_port_jumbo_config,
+ .port_set_jumbo_size = mv88e6165_port_set_jumbo_size,
.port_egress_rate_limiting = mv88e6097_port_egress_rate_limiting,
- .port_pause_config = mv88e6097_port_pause_config,
+ .port_pause_limit = mv88e6097_port_pause_limit,
.port_disable_learn_limit = mv88e6xxx_port_disable_learn_limit,
.port_disable_pri_override = mv88e6xxx_port_disable_pri_override,
.stats_snapshot = mv88e6320_g1_stats_snapshot,
.stats_get_sset_count = mv88e6095_stats_get_sset_count,
.stats_get_strings = mv88e6095_stats_get_strings,
.stats_get_stats = mv88e6095_stats_get_stats,
- .g1_set_cpu_port = mv88e6095_g1_set_cpu_port,
- .g1_set_egress_port = mv88e6095_g1_set_egress_port,
+ .set_cpu_port = mv88e6095_g1_set_cpu_port,
+ .set_egress_port = mv88e6095_g1_set_egress_port,
.watchdog_ops = &mv88e6097_watchdog_ops,
.mgmt_rsvd2cpu = mv88e6095_g2_mgmt_rsvd2cpu,
.reset = mv88e6352_g1_reset,
.vtu_getnext = mv88e6352_g1_vtu_getnext,
.vtu_loadpurge = mv88e6352_g1_vtu_loadpurge,
+ .serdes_power = mv88e6352_serdes_power,
};
static const struct mv88e6xxx_ops mv88e6175_ops = {
/* MV88E6XXX_FAMILY_6351 */
+ .irl_init_all = mv88e6352_g2_irl_init_all,
.set_switch_mac = mv88e6xxx_g2_set_switch_mac,
.phy_read = mv88e6xxx_g2_smi_phy_read,
.phy_write = mv88e6xxx_g2_smi_phy_write,
@@ -2895,17 +2674,17 @@
.port_set_frame_mode = mv88e6351_port_set_frame_mode,
.port_set_egress_floods = mv88e6352_port_set_egress_floods,
.port_set_ether_type = mv88e6351_port_set_ether_type,
- .port_jumbo_config = mv88e6165_port_jumbo_config,
+ .port_set_jumbo_size = mv88e6165_port_set_jumbo_size,
.port_egress_rate_limiting = mv88e6097_port_egress_rate_limiting,
- .port_pause_config = mv88e6097_port_pause_config,
+ .port_pause_limit = mv88e6097_port_pause_limit,
.port_disable_learn_limit = mv88e6xxx_port_disable_learn_limit,
.port_disable_pri_override = mv88e6xxx_port_disable_pri_override,
.stats_snapshot = mv88e6320_g1_stats_snapshot,
.stats_get_sset_count = mv88e6095_stats_get_sset_count,
.stats_get_strings = mv88e6095_stats_get_strings,
.stats_get_stats = mv88e6095_stats_get_stats,
- .g1_set_cpu_port = mv88e6095_g1_set_cpu_port,
- .g1_set_egress_port = mv88e6095_g1_set_egress_port,
+ .set_cpu_port = mv88e6095_g1_set_cpu_port,
+ .set_egress_port = mv88e6095_g1_set_egress_port,
.watchdog_ops = &mv88e6097_watchdog_ops,
.mgmt_rsvd2cpu = mv88e6095_g2_mgmt_rsvd2cpu,
.reset = mv88e6352_g1_reset,
@@ -2915,6 +2694,7 @@
static const struct mv88e6xxx_ops mv88e6176_ops = {
/* MV88E6XXX_FAMILY_6352 */
+ .irl_init_all = mv88e6352_g2_irl_init_all,
.get_eeprom = mv88e6xxx_g2_get_eeprom16,
.set_eeprom = mv88e6xxx_g2_set_eeprom16,
.set_switch_mac = mv88e6xxx_g2_set_switch_mac,
@@ -2928,29 +2708,30 @@
.port_set_frame_mode = mv88e6351_port_set_frame_mode,
.port_set_egress_floods = mv88e6352_port_set_egress_floods,
.port_set_ether_type = mv88e6351_port_set_ether_type,
- .port_jumbo_config = mv88e6165_port_jumbo_config,
+ .port_set_jumbo_size = mv88e6165_port_set_jumbo_size,
.port_egress_rate_limiting = mv88e6097_port_egress_rate_limiting,
- .port_pause_config = mv88e6097_port_pause_config,
+ .port_pause_limit = mv88e6097_port_pause_limit,
.port_disable_learn_limit = mv88e6xxx_port_disable_learn_limit,
.port_disable_pri_override = mv88e6xxx_port_disable_pri_override,
.stats_snapshot = mv88e6320_g1_stats_snapshot,
.stats_get_sset_count = mv88e6095_stats_get_sset_count,
.stats_get_strings = mv88e6095_stats_get_strings,
.stats_get_stats = mv88e6095_stats_get_stats,
- .g1_set_cpu_port = mv88e6095_g1_set_cpu_port,
- .g1_set_egress_port = mv88e6095_g1_set_egress_port,
+ .set_cpu_port = mv88e6095_g1_set_cpu_port,
+ .set_egress_port = mv88e6095_g1_set_egress_port,
.watchdog_ops = &mv88e6097_watchdog_ops,
.mgmt_rsvd2cpu = mv88e6095_g2_mgmt_rsvd2cpu,
.reset = mv88e6352_g1_reset,
.vtu_getnext = mv88e6352_g1_vtu_getnext,
.vtu_loadpurge = mv88e6352_g1_vtu_loadpurge,
+ .serdes_power = mv88e6352_serdes_power,
};
static const struct mv88e6xxx_ops mv88e6185_ops = {
/* MV88E6XXX_FAMILY_6185 */
.set_switch_mac = mv88e6xxx_g1_set_switch_mac,
- .phy_read = mv88e6xxx_phy_ppu_read,
- .phy_write = mv88e6xxx_phy_ppu_write,
+ .phy_read = mv88e6185_phy_ppu_read,
+ .phy_write = mv88e6185_phy_ppu_write,
.port_set_link = mv88e6xxx_port_set_link,
.port_set_duplex = mv88e6xxx_port_set_duplex,
.port_set_speed = mv88e6185_port_set_speed,
@@ -2962,8 +2743,8 @@
.stats_get_sset_count = mv88e6095_stats_get_sset_count,
.stats_get_strings = mv88e6095_stats_get_strings,
.stats_get_stats = mv88e6095_stats_get_stats,
- .g1_set_cpu_port = mv88e6095_g1_set_cpu_port,
- .g1_set_egress_port = mv88e6095_g1_set_egress_port,
+ .set_cpu_port = mv88e6095_g1_set_cpu_port,
+ .set_egress_port = mv88e6095_g1_set_egress_port,
.watchdog_ops = &mv88e6097_watchdog_ops,
.mgmt_rsvd2cpu = mv88e6095_g2_mgmt_rsvd2cpu,
.ppu_enable = mv88e6185_g1_ppu_enable,
@@ -2975,6 +2756,7 @@
static const struct mv88e6xxx_ops mv88e6190_ops = {
/* MV88E6XXX_FAMILY_6390 */
+ .irl_init_all = mv88e6390_g2_irl_init_all,
.get_eeprom = mv88e6xxx_g2_get_eeprom8,
.set_eeprom = mv88e6xxx_g2_set_eeprom8,
.set_switch_mac = mv88e6xxx_g2_set_switch_mac,
@@ -2988,7 +2770,7 @@
.port_set_frame_mode = mv88e6351_port_set_frame_mode,
.port_set_egress_floods = mv88e6352_port_set_egress_floods,
.port_set_ether_type = mv88e6351_port_set_ether_type,
- .port_pause_config = mv88e6390_port_pause_config,
+ .port_pause_limit = mv88e6390_port_pause_limit,
.port_disable_learn_limit = mv88e6xxx_port_disable_learn_limit,
.port_disable_pri_override = mv88e6xxx_port_disable_pri_override,
.stats_snapshot = mv88e6390_g1_stats_snapshot,
@@ -2996,17 +2778,19 @@
.stats_get_sset_count = mv88e6320_stats_get_sset_count,
.stats_get_strings = mv88e6320_stats_get_strings,
.stats_get_stats = mv88e6390_stats_get_stats,
- .g1_set_cpu_port = mv88e6390_g1_set_cpu_port,
- .g1_set_egress_port = mv88e6390_g1_set_egress_port,
+ .set_cpu_port = mv88e6390_g1_set_cpu_port,
+ .set_egress_port = mv88e6390_g1_set_egress_port,
.watchdog_ops = &mv88e6390_watchdog_ops,
.mgmt_rsvd2cpu = mv88e6390_g1_mgmt_rsvd2cpu,
.reset = mv88e6352_g1_reset,
.vtu_getnext = mv88e6390_g1_vtu_getnext,
.vtu_loadpurge = mv88e6390_g1_vtu_loadpurge,
+ .serdes_power = mv88e6390_serdes_power,
};
static const struct mv88e6xxx_ops mv88e6190x_ops = {
/* MV88E6XXX_FAMILY_6390 */
+ .irl_init_all = mv88e6390_g2_irl_init_all,
.get_eeprom = mv88e6xxx_g2_get_eeprom8,
.set_eeprom = mv88e6xxx_g2_set_eeprom8,
.set_switch_mac = mv88e6xxx_g2_set_switch_mac,
@@ -3020,7 +2804,7 @@
.port_set_frame_mode = mv88e6351_port_set_frame_mode,
.port_set_egress_floods = mv88e6352_port_set_egress_floods,
.port_set_ether_type = mv88e6351_port_set_ether_type,
- .port_pause_config = mv88e6390_port_pause_config,
+ .port_pause_limit = mv88e6390_port_pause_limit,
.port_disable_learn_limit = mv88e6xxx_port_disable_learn_limit,
.port_disable_pri_override = mv88e6xxx_port_disable_pri_override,
.stats_snapshot = mv88e6390_g1_stats_snapshot,
@@ -3028,17 +2812,19 @@
.stats_get_sset_count = mv88e6320_stats_get_sset_count,
.stats_get_strings = mv88e6320_stats_get_strings,
.stats_get_stats = mv88e6390_stats_get_stats,
- .g1_set_cpu_port = mv88e6390_g1_set_cpu_port,
- .g1_set_egress_port = mv88e6390_g1_set_egress_port,
+ .set_cpu_port = mv88e6390_g1_set_cpu_port,
+ .set_egress_port = mv88e6390_g1_set_egress_port,
.watchdog_ops = &mv88e6390_watchdog_ops,
.mgmt_rsvd2cpu = mv88e6390_g1_mgmt_rsvd2cpu,
.reset = mv88e6352_g1_reset,
.vtu_getnext = mv88e6390_g1_vtu_getnext,
.vtu_loadpurge = mv88e6390_g1_vtu_loadpurge,
+ .serdes_power = mv88e6390_serdes_power,
};
static const struct mv88e6xxx_ops mv88e6191_ops = {
/* MV88E6XXX_FAMILY_6390 */
+ .irl_init_all = mv88e6390_g2_irl_init_all,
.get_eeprom = mv88e6xxx_g2_get_eeprom8,
.set_eeprom = mv88e6xxx_g2_set_eeprom8,
.set_switch_mac = mv88e6xxx_g2_set_switch_mac,
@@ -3052,7 +2838,7 @@
.port_set_frame_mode = mv88e6351_port_set_frame_mode,
.port_set_egress_floods = mv88e6352_port_set_egress_floods,
.port_set_ether_type = mv88e6351_port_set_ether_type,
- .port_pause_config = mv88e6390_port_pause_config,
+ .port_pause_limit = mv88e6390_port_pause_limit,
.port_disable_learn_limit = mv88e6xxx_port_disable_learn_limit,
.port_disable_pri_override = mv88e6xxx_port_disable_pri_override,
.stats_snapshot = mv88e6390_g1_stats_snapshot,
@@ -3060,17 +2846,19 @@
.stats_get_sset_count = mv88e6320_stats_get_sset_count,
.stats_get_strings = mv88e6320_stats_get_strings,
.stats_get_stats = mv88e6390_stats_get_stats,
- .g1_set_cpu_port = mv88e6390_g1_set_cpu_port,
- .g1_set_egress_port = mv88e6390_g1_set_egress_port,
+ .set_cpu_port = mv88e6390_g1_set_cpu_port,
+ .set_egress_port = mv88e6390_g1_set_egress_port,
.watchdog_ops = &mv88e6390_watchdog_ops,
.mgmt_rsvd2cpu = mv88e6390_g1_mgmt_rsvd2cpu,
.reset = mv88e6352_g1_reset,
.vtu_getnext = mv88e6390_g1_vtu_getnext,
.vtu_loadpurge = mv88e6390_g1_vtu_loadpurge,
+ .serdes_power = mv88e6390_serdes_power,
};
static const struct mv88e6xxx_ops mv88e6240_ops = {
/* MV88E6XXX_FAMILY_6352 */
+ .irl_init_all = mv88e6352_g2_irl_init_all,
.get_eeprom = mv88e6xxx_g2_get_eeprom16,
.set_eeprom = mv88e6xxx_g2_set_eeprom16,
.set_switch_mac = mv88e6xxx_g2_set_switch_mac,
@@ -3084,26 +2872,28 @@
.port_set_frame_mode = mv88e6351_port_set_frame_mode,
.port_set_egress_floods = mv88e6352_port_set_egress_floods,
.port_set_ether_type = mv88e6351_port_set_ether_type,
- .port_jumbo_config = mv88e6165_port_jumbo_config,
+ .port_set_jumbo_size = mv88e6165_port_set_jumbo_size,
.port_egress_rate_limiting = mv88e6097_port_egress_rate_limiting,
- .port_pause_config = mv88e6097_port_pause_config,
+ .port_pause_limit = mv88e6097_port_pause_limit,
.port_disable_learn_limit = mv88e6xxx_port_disable_learn_limit,
.port_disable_pri_override = mv88e6xxx_port_disable_pri_override,
.stats_snapshot = mv88e6320_g1_stats_snapshot,
.stats_get_sset_count = mv88e6095_stats_get_sset_count,
.stats_get_strings = mv88e6095_stats_get_strings,
.stats_get_stats = mv88e6095_stats_get_stats,
- .g1_set_cpu_port = mv88e6095_g1_set_cpu_port,
- .g1_set_egress_port = mv88e6095_g1_set_egress_port,
+ .set_cpu_port = mv88e6095_g1_set_cpu_port,
+ .set_egress_port = mv88e6095_g1_set_egress_port,
.watchdog_ops = &mv88e6097_watchdog_ops,
.mgmt_rsvd2cpu = mv88e6095_g2_mgmt_rsvd2cpu,
.reset = mv88e6352_g1_reset,
.vtu_getnext = mv88e6352_g1_vtu_getnext,
.vtu_loadpurge = mv88e6352_g1_vtu_loadpurge,
+ .serdes_power = mv88e6352_serdes_power,
};
static const struct mv88e6xxx_ops mv88e6290_ops = {
/* MV88E6XXX_FAMILY_6390 */
+ .irl_init_all = mv88e6390_g2_irl_init_all,
.get_eeprom = mv88e6xxx_g2_get_eeprom8,
.set_eeprom = mv88e6xxx_g2_set_eeprom8,
.set_switch_mac = mv88e6xxx_g2_set_switch_mac,
@@ -3117,7 +2907,7 @@
.port_set_frame_mode = mv88e6351_port_set_frame_mode,
.port_set_egress_floods = mv88e6352_port_set_egress_floods,
.port_set_ether_type = mv88e6351_port_set_ether_type,
- .port_pause_config = mv88e6390_port_pause_config,
+ .port_pause_limit = mv88e6390_port_pause_limit,
.port_set_cmode = mv88e6390x_port_set_cmode,
.port_disable_learn_limit = mv88e6xxx_port_disable_learn_limit,
.port_disable_pri_override = mv88e6xxx_port_disable_pri_override,
@@ -3126,17 +2916,19 @@
.stats_get_sset_count = mv88e6320_stats_get_sset_count,
.stats_get_strings = mv88e6320_stats_get_strings,
.stats_get_stats = mv88e6390_stats_get_stats,
- .g1_set_cpu_port = mv88e6390_g1_set_cpu_port,
- .g1_set_egress_port = mv88e6390_g1_set_egress_port,
+ .set_cpu_port = mv88e6390_g1_set_cpu_port,
+ .set_egress_port = mv88e6390_g1_set_egress_port,
.watchdog_ops = &mv88e6390_watchdog_ops,
.mgmt_rsvd2cpu = mv88e6390_g1_mgmt_rsvd2cpu,
.reset = mv88e6352_g1_reset,
.vtu_getnext = mv88e6390_g1_vtu_getnext,
.vtu_loadpurge = mv88e6390_g1_vtu_loadpurge,
+ .serdes_power = mv88e6390_serdes_power,
};
static const struct mv88e6xxx_ops mv88e6320_ops = {
/* MV88E6XXX_FAMILY_6320 */
+ .irl_init_all = mv88e6352_g2_irl_init_all,
.get_eeprom = mv88e6xxx_g2_get_eeprom16,
.set_eeprom = mv88e6xxx_g2_set_eeprom16,
.set_switch_mac = mv88e6xxx_g2_set_switch_mac,
@@ -3149,17 +2941,17 @@
.port_set_frame_mode = mv88e6351_port_set_frame_mode,
.port_set_egress_floods = mv88e6352_port_set_egress_floods,
.port_set_ether_type = mv88e6351_port_set_ether_type,
- .port_jumbo_config = mv88e6165_port_jumbo_config,
+ .port_set_jumbo_size = mv88e6165_port_set_jumbo_size,
.port_egress_rate_limiting = mv88e6097_port_egress_rate_limiting,
- .port_pause_config = mv88e6097_port_pause_config,
+ .port_pause_limit = mv88e6097_port_pause_limit,
.port_disable_learn_limit = mv88e6xxx_port_disable_learn_limit,
.port_disable_pri_override = mv88e6xxx_port_disable_pri_override,
.stats_snapshot = mv88e6320_g1_stats_snapshot,
.stats_get_sset_count = mv88e6320_stats_get_sset_count,
.stats_get_strings = mv88e6320_stats_get_strings,
.stats_get_stats = mv88e6320_stats_get_stats,
- .g1_set_cpu_port = mv88e6095_g1_set_cpu_port,
- .g1_set_egress_port = mv88e6095_g1_set_egress_port,
+ .set_cpu_port = mv88e6095_g1_set_cpu_port,
+ .set_egress_port = mv88e6095_g1_set_egress_port,
.mgmt_rsvd2cpu = mv88e6095_g2_mgmt_rsvd2cpu,
.reset = mv88e6352_g1_reset,
.vtu_getnext = mv88e6185_g1_vtu_getnext,
@@ -3168,6 +2960,7 @@
static const struct mv88e6xxx_ops mv88e6321_ops = {
/* MV88E6XXX_FAMILY_6321 */
+ .irl_init_all = mv88e6352_g2_irl_init_all,
.get_eeprom = mv88e6xxx_g2_get_eeprom16,
.set_eeprom = mv88e6xxx_g2_set_eeprom16,
.set_switch_mac = mv88e6xxx_g2_set_switch_mac,
@@ -3180,17 +2973,17 @@
.port_set_frame_mode = mv88e6351_port_set_frame_mode,
.port_set_egress_floods = mv88e6352_port_set_egress_floods,
.port_set_ether_type = mv88e6351_port_set_ether_type,
- .port_jumbo_config = mv88e6165_port_jumbo_config,
+ .port_set_jumbo_size = mv88e6165_port_set_jumbo_size,
.port_egress_rate_limiting = mv88e6097_port_egress_rate_limiting,
- .port_pause_config = mv88e6097_port_pause_config,
+ .port_pause_limit = mv88e6097_port_pause_limit,
.port_disable_learn_limit = mv88e6xxx_port_disable_learn_limit,
.port_disable_pri_override = mv88e6xxx_port_disable_pri_override,
.stats_snapshot = mv88e6320_g1_stats_snapshot,
.stats_get_sset_count = mv88e6320_stats_get_sset_count,
.stats_get_strings = mv88e6320_stats_get_strings,
.stats_get_stats = mv88e6320_stats_get_stats,
- .g1_set_cpu_port = mv88e6095_g1_set_cpu_port,
- .g1_set_egress_port = mv88e6095_g1_set_egress_port,
+ .set_cpu_port = mv88e6095_g1_set_cpu_port,
+ .set_egress_port = mv88e6095_g1_set_egress_port,
.reset = mv88e6352_g1_reset,
.vtu_getnext = mv88e6185_g1_vtu_getnext,
.vtu_loadpurge = mv88e6185_g1_vtu_loadpurge,
@@ -3198,6 +2991,7 @@
static const struct mv88e6xxx_ops mv88e6341_ops = {
/* MV88E6XXX_FAMILY_6341 */
+ .irl_init_all = mv88e6352_g2_irl_init_all,
.get_eeprom = mv88e6xxx_g2_get_eeprom8,
.set_eeprom = mv88e6xxx_g2_set_eeprom8,
.set_switch_mac = mv88e6xxx_g2_set_switch_mac,
@@ -3211,17 +3005,17 @@
.port_set_frame_mode = mv88e6351_port_set_frame_mode,
.port_set_egress_floods = mv88e6352_port_set_egress_floods,
.port_set_ether_type = mv88e6351_port_set_ether_type,
- .port_jumbo_config = mv88e6165_port_jumbo_config,
+ .port_set_jumbo_size = mv88e6165_port_set_jumbo_size,
.port_egress_rate_limiting = mv88e6097_port_egress_rate_limiting,
- .port_pause_config = mv88e6097_port_pause_config,
+ .port_pause_limit = mv88e6097_port_pause_limit,
.port_disable_learn_limit = mv88e6xxx_port_disable_learn_limit,
.port_disable_pri_override = mv88e6xxx_port_disable_pri_override,
.stats_snapshot = mv88e6390_g1_stats_snapshot,
.stats_get_sset_count = mv88e6320_stats_get_sset_count,
.stats_get_strings = mv88e6320_stats_get_strings,
.stats_get_stats = mv88e6390_stats_get_stats,
- .g1_set_cpu_port = mv88e6390_g1_set_cpu_port,
- .g1_set_egress_port = mv88e6390_g1_set_egress_port,
+ .set_cpu_port = mv88e6390_g1_set_cpu_port,
+ .set_egress_port = mv88e6390_g1_set_egress_port,
.watchdog_ops = &mv88e6390_watchdog_ops,
.mgmt_rsvd2cpu = mv88e6390_g1_mgmt_rsvd2cpu,
.reset = mv88e6352_g1_reset,
@@ -3231,6 +3025,7 @@
static const struct mv88e6xxx_ops mv88e6350_ops = {
/* MV88E6XXX_FAMILY_6351 */
+ .irl_init_all = mv88e6352_g2_irl_init_all,
.set_switch_mac = mv88e6xxx_g2_set_switch_mac,
.phy_read = mv88e6xxx_g2_smi_phy_read,
.phy_write = mv88e6xxx_g2_smi_phy_write,
@@ -3242,17 +3037,17 @@
.port_set_frame_mode = mv88e6351_port_set_frame_mode,
.port_set_egress_floods = mv88e6352_port_set_egress_floods,
.port_set_ether_type = mv88e6351_port_set_ether_type,
- .port_jumbo_config = mv88e6165_port_jumbo_config,
+ .port_set_jumbo_size = mv88e6165_port_set_jumbo_size,
.port_egress_rate_limiting = mv88e6097_port_egress_rate_limiting,
- .port_pause_config = mv88e6097_port_pause_config,
+ .port_pause_limit = mv88e6097_port_pause_limit,
.port_disable_learn_limit = mv88e6xxx_port_disable_learn_limit,
.port_disable_pri_override = mv88e6xxx_port_disable_pri_override,
.stats_snapshot = mv88e6320_g1_stats_snapshot,
.stats_get_sset_count = mv88e6095_stats_get_sset_count,
.stats_get_strings = mv88e6095_stats_get_strings,
.stats_get_stats = mv88e6095_stats_get_stats,
- .g1_set_cpu_port = mv88e6095_g1_set_cpu_port,
- .g1_set_egress_port = mv88e6095_g1_set_egress_port,
+ .set_cpu_port = mv88e6095_g1_set_cpu_port,
+ .set_egress_port = mv88e6095_g1_set_egress_port,
.watchdog_ops = &mv88e6097_watchdog_ops,
.mgmt_rsvd2cpu = mv88e6095_g2_mgmt_rsvd2cpu,
.reset = mv88e6352_g1_reset,
@@ -3262,6 +3057,7 @@
static const struct mv88e6xxx_ops mv88e6351_ops = {
/* MV88E6XXX_FAMILY_6351 */
+ .irl_init_all = mv88e6352_g2_irl_init_all,
.set_switch_mac = mv88e6xxx_g2_set_switch_mac,
.phy_read = mv88e6xxx_g2_smi_phy_read,
.phy_write = mv88e6xxx_g2_smi_phy_write,
@@ -3273,17 +3069,17 @@
.port_set_frame_mode = mv88e6351_port_set_frame_mode,
.port_set_egress_floods = mv88e6352_port_set_egress_floods,
.port_set_ether_type = mv88e6351_port_set_ether_type,
- .port_jumbo_config = mv88e6165_port_jumbo_config,
+ .port_set_jumbo_size = mv88e6165_port_set_jumbo_size,
.port_egress_rate_limiting = mv88e6097_port_egress_rate_limiting,
- .port_pause_config = mv88e6097_port_pause_config,
+ .port_pause_limit = mv88e6097_port_pause_limit,
.port_disable_learn_limit = mv88e6xxx_port_disable_learn_limit,
.port_disable_pri_override = mv88e6xxx_port_disable_pri_override,
.stats_snapshot = mv88e6320_g1_stats_snapshot,
.stats_get_sset_count = mv88e6095_stats_get_sset_count,
.stats_get_strings = mv88e6095_stats_get_strings,
.stats_get_stats = mv88e6095_stats_get_stats,
- .g1_set_cpu_port = mv88e6095_g1_set_cpu_port,
- .g1_set_egress_port = mv88e6095_g1_set_egress_port,
+ .set_cpu_port = mv88e6095_g1_set_cpu_port,
+ .set_egress_port = mv88e6095_g1_set_egress_port,
.watchdog_ops = &mv88e6097_watchdog_ops,
.mgmt_rsvd2cpu = mv88e6095_g2_mgmt_rsvd2cpu,
.reset = mv88e6352_g1_reset,
@@ -3293,6 +3089,7 @@
static const struct mv88e6xxx_ops mv88e6352_ops = {
/* MV88E6XXX_FAMILY_6352 */
+ .irl_init_all = mv88e6352_g2_irl_init_all,
.get_eeprom = mv88e6xxx_g2_get_eeprom16,
.set_eeprom = mv88e6xxx_g2_set_eeprom16,
.set_switch_mac = mv88e6xxx_g2_set_switch_mac,
@@ -3306,26 +3103,28 @@
.port_set_frame_mode = mv88e6351_port_set_frame_mode,
.port_set_egress_floods = mv88e6352_port_set_egress_floods,
.port_set_ether_type = mv88e6351_port_set_ether_type,
- .port_jumbo_config = mv88e6165_port_jumbo_config,
+ .port_set_jumbo_size = mv88e6165_port_set_jumbo_size,
.port_egress_rate_limiting = mv88e6097_port_egress_rate_limiting,
- .port_pause_config = mv88e6097_port_pause_config,
+ .port_pause_limit = mv88e6097_port_pause_limit,
.port_disable_learn_limit = mv88e6xxx_port_disable_learn_limit,
.port_disable_pri_override = mv88e6xxx_port_disable_pri_override,
.stats_snapshot = mv88e6320_g1_stats_snapshot,
.stats_get_sset_count = mv88e6095_stats_get_sset_count,
.stats_get_strings = mv88e6095_stats_get_strings,
.stats_get_stats = mv88e6095_stats_get_stats,
- .g1_set_cpu_port = mv88e6095_g1_set_cpu_port,
- .g1_set_egress_port = mv88e6095_g1_set_egress_port,
+ .set_cpu_port = mv88e6095_g1_set_cpu_port,
+ .set_egress_port = mv88e6095_g1_set_egress_port,
.watchdog_ops = &mv88e6097_watchdog_ops,
.mgmt_rsvd2cpu = mv88e6095_g2_mgmt_rsvd2cpu,
.reset = mv88e6352_g1_reset,
.vtu_getnext = mv88e6352_g1_vtu_getnext,
.vtu_loadpurge = mv88e6352_g1_vtu_loadpurge,
+ .serdes_power = mv88e6352_serdes_power,
};
static const struct mv88e6xxx_ops mv88e6390_ops = {
/* MV88E6XXX_FAMILY_6390 */
+ .irl_init_all = mv88e6390_g2_irl_init_all,
.get_eeprom = mv88e6xxx_g2_get_eeprom8,
.set_eeprom = mv88e6xxx_g2_set_eeprom8,
.set_switch_mac = mv88e6xxx_g2_set_switch_mac,
@@ -3339,9 +3138,9 @@
.port_set_frame_mode = mv88e6351_port_set_frame_mode,
.port_set_egress_floods = mv88e6352_port_set_egress_floods,
.port_set_ether_type = mv88e6351_port_set_ether_type,
- .port_jumbo_config = mv88e6165_port_jumbo_config,
+ .port_set_jumbo_size = mv88e6165_port_set_jumbo_size,
.port_egress_rate_limiting = mv88e6097_port_egress_rate_limiting,
- .port_pause_config = mv88e6390_port_pause_config,
+ .port_pause_limit = mv88e6390_port_pause_limit,
.port_set_cmode = mv88e6390x_port_set_cmode,
.port_disable_learn_limit = mv88e6xxx_port_disable_learn_limit,
.port_disable_pri_override = mv88e6xxx_port_disable_pri_override,
@@ -3350,17 +3149,19 @@
.stats_get_sset_count = mv88e6320_stats_get_sset_count,
.stats_get_strings = mv88e6320_stats_get_strings,
.stats_get_stats = mv88e6390_stats_get_stats,
- .g1_set_cpu_port = mv88e6390_g1_set_cpu_port,
- .g1_set_egress_port = mv88e6390_g1_set_egress_port,
+ .set_cpu_port = mv88e6390_g1_set_cpu_port,
+ .set_egress_port = mv88e6390_g1_set_egress_port,
.watchdog_ops = &mv88e6390_watchdog_ops,
.mgmt_rsvd2cpu = mv88e6390_g1_mgmt_rsvd2cpu,
.reset = mv88e6352_g1_reset,
.vtu_getnext = mv88e6390_g1_vtu_getnext,
.vtu_loadpurge = mv88e6390_g1_vtu_loadpurge,
+ .serdes_power = mv88e6390_serdes_power,
};
static const struct mv88e6xxx_ops mv88e6390x_ops = {
/* MV88E6XXX_FAMILY_6390 */
+ .irl_init_all = mv88e6390_g2_irl_init_all,
.get_eeprom = mv88e6xxx_g2_get_eeprom8,
.set_eeprom = mv88e6xxx_g2_set_eeprom8,
.set_switch_mac = mv88e6xxx_g2_set_switch_mac,
@@ -3374,9 +3175,9 @@
.port_set_frame_mode = mv88e6351_port_set_frame_mode,
.port_set_egress_floods = mv88e6352_port_set_egress_floods,
.port_set_ether_type = mv88e6351_port_set_ether_type,
- .port_jumbo_config = mv88e6165_port_jumbo_config,
+ .port_set_jumbo_size = mv88e6165_port_set_jumbo_size,
.port_egress_rate_limiting = mv88e6097_port_egress_rate_limiting,
- .port_pause_config = mv88e6390_port_pause_config,
+ .port_pause_limit = mv88e6390_port_pause_limit,
.port_disable_learn_limit = mv88e6xxx_port_disable_learn_limit,
.port_disable_pri_override = mv88e6xxx_port_disable_pri_override,
.stats_snapshot = mv88e6390_g1_stats_snapshot,
@@ -3384,18 +3185,19 @@
.stats_get_sset_count = mv88e6320_stats_get_sset_count,
.stats_get_strings = mv88e6320_stats_get_strings,
.stats_get_stats = mv88e6390_stats_get_stats,
- .g1_set_cpu_port = mv88e6390_g1_set_cpu_port,
- .g1_set_egress_port = mv88e6390_g1_set_egress_port,
+ .set_cpu_port = mv88e6390_g1_set_cpu_port,
+ .set_egress_port = mv88e6390_g1_set_egress_port,
.watchdog_ops = &mv88e6390_watchdog_ops,
.mgmt_rsvd2cpu = mv88e6390_g1_mgmt_rsvd2cpu,
.reset = mv88e6352_g1_reset,
.vtu_getnext = mv88e6390_g1_vtu_getnext,
.vtu_loadpurge = mv88e6390_g1_vtu_loadpurge,
+ .serdes_power = mv88e6390_serdes_power,
};
static const struct mv88e6xxx_info mv88e6xxx_table[] = {
[MV88E6085] = {
- .prod_num = PORT_SWITCH_ID_PROD_NUM_6085,
+ .prod_num = MV88E6XXX_PORT_SWITCH_ID_PROD_6085,
.family = MV88E6XXX_FAMILY_6097,
.name = "Marvell 88E6085",
.num_databases = 4096,
@@ -3413,7 +3215,7 @@
},
[MV88E6095] = {
- .prod_num = PORT_SWITCH_ID_PROD_NUM_6095,
+ .prod_num = MV88E6XXX_PORT_SWITCH_ID_PROD_6095,
.family = MV88E6XXX_FAMILY_6095,
.name = "Marvell 88E6095/88E6095F",
.num_databases = 256,
@@ -3430,7 +3232,7 @@
},
[MV88E6097] = {
- .prod_num = PORT_SWITCH_ID_PROD_NUM_6097,
+ .prod_num = MV88E6XXX_PORT_SWITCH_ID_PROD_6097,
.family = MV88E6XXX_FAMILY_6097,
.name = "Marvell 88E6097/88E6097F",
.num_databases = 4096,
@@ -3448,7 +3250,7 @@
},
[MV88E6123] = {
- .prod_num = PORT_SWITCH_ID_PROD_NUM_6123,
+ .prod_num = MV88E6XXX_PORT_SWITCH_ID_PROD_6123,
.family = MV88E6XXX_FAMILY_6165,
.name = "Marvell 88E6123",
.num_databases = 4096,
@@ -3460,13 +3262,13 @@
.g1_irqs = 9,
.atu_move_port_mask = 0xf,
.pvt = true,
- .tag_protocol = DSA_TAG_PROTO_DSA,
+ .tag_protocol = DSA_TAG_PROTO_EDSA,
.flags = MV88E6XXX_FLAGS_FAMILY_6165,
.ops = &mv88e6123_ops,
},
[MV88E6131] = {
- .prod_num = PORT_SWITCH_ID_PROD_NUM_6131,
+ .prod_num = MV88E6XXX_PORT_SWITCH_ID_PROD_6131,
.family = MV88E6XXX_FAMILY_6185,
.name = "Marvell 88E6131",
.num_databases = 256,
@@ -3483,7 +3285,7 @@
},
[MV88E6141] = {
- .prod_num = PORT_SWITCH_ID_PROD_NUM_6141,
+ .prod_num = MV88E6XXX_PORT_SWITCH_ID_PROD_6141,
.family = MV88E6XXX_FAMILY_6341,
.name = "Marvell 88E6341",
.num_databases = 4096,
@@ -3500,7 +3302,7 @@
},
[MV88E6161] = {
- .prod_num = PORT_SWITCH_ID_PROD_NUM_6161,
+ .prod_num = MV88E6XXX_PORT_SWITCH_ID_PROD_6161,
.family = MV88E6XXX_FAMILY_6165,
.name = "Marvell 88E6161",
.num_databases = 4096,
@@ -3512,13 +3314,13 @@
.g1_irqs = 9,
.atu_move_port_mask = 0xf,
.pvt = true,
- .tag_protocol = DSA_TAG_PROTO_DSA,
+ .tag_protocol = DSA_TAG_PROTO_EDSA,
.flags = MV88E6XXX_FLAGS_FAMILY_6165,
.ops = &mv88e6161_ops,
},
[MV88E6165] = {
- .prod_num = PORT_SWITCH_ID_PROD_NUM_6165,
+ .prod_num = MV88E6XXX_PORT_SWITCH_ID_PROD_6165,
.family = MV88E6XXX_FAMILY_6165,
.name = "Marvell 88E6165",
.num_databases = 4096,
@@ -3536,7 +3338,7 @@
},
[MV88E6171] = {
- .prod_num = PORT_SWITCH_ID_PROD_NUM_6171,
+ .prod_num = MV88E6XXX_PORT_SWITCH_ID_PROD_6171,
.family = MV88E6XXX_FAMILY_6351,
.name = "Marvell 88E6171",
.num_databases = 4096,
@@ -3554,7 +3356,7 @@
},
[MV88E6172] = {
- .prod_num = PORT_SWITCH_ID_PROD_NUM_6172,
+ .prod_num = MV88E6XXX_PORT_SWITCH_ID_PROD_6172,
.family = MV88E6XXX_FAMILY_6352,
.name = "Marvell 88E6172",
.num_databases = 4096,
@@ -3572,7 +3374,7 @@
},
[MV88E6175] = {
- .prod_num = PORT_SWITCH_ID_PROD_NUM_6175,
+ .prod_num = MV88E6XXX_PORT_SWITCH_ID_PROD_6175,
.family = MV88E6XXX_FAMILY_6351,
.name = "Marvell 88E6175",
.num_databases = 4096,
@@ -3590,7 +3392,7 @@
},
[MV88E6176] = {
- .prod_num = PORT_SWITCH_ID_PROD_NUM_6176,
+ .prod_num = MV88E6XXX_PORT_SWITCH_ID_PROD_6176,
.family = MV88E6XXX_FAMILY_6352,
.name = "Marvell 88E6176",
.num_databases = 4096,
@@ -3608,7 +3410,7 @@
},
[MV88E6185] = {
- .prod_num = PORT_SWITCH_ID_PROD_NUM_6185,
+ .prod_num = MV88E6XXX_PORT_SWITCH_ID_PROD_6185,
.family = MV88E6XXX_FAMILY_6185,
.name = "Marvell 88E6185",
.num_databases = 256,
@@ -3625,7 +3427,7 @@
},
[MV88E6190] = {
- .prod_num = PORT_SWITCH_ID_PROD_NUM_6190,
+ .prod_num = MV88E6XXX_PORT_SWITCH_ID_PROD_6190,
.family = MV88E6XXX_FAMILY_6390,
.name = "Marvell 88E6190",
.num_databases = 4096,
@@ -3643,7 +3445,7 @@
},
[MV88E6190X] = {
- .prod_num = PORT_SWITCH_ID_PROD_NUM_6190X,
+ .prod_num = MV88E6XXX_PORT_SWITCH_ID_PROD_6190X,
.family = MV88E6XXX_FAMILY_6390,
.name = "Marvell 88E6190X",
.num_databases = 4096,
@@ -3661,7 +3463,7 @@
},
[MV88E6191] = {
- .prod_num = PORT_SWITCH_ID_PROD_NUM_6191,
+ .prod_num = MV88E6XXX_PORT_SWITCH_ID_PROD_6191,
.family = MV88E6XXX_FAMILY_6390,
.name = "Marvell 88E6191",
.num_databases = 4096,
@@ -3679,7 +3481,7 @@
},
[MV88E6240] = {
- .prod_num = PORT_SWITCH_ID_PROD_NUM_6240,
+ .prod_num = MV88E6XXX_PORT_SWITCH_ID_PROD_6240,
.family = MV88E6XXX_FAMILY_6352,
.name = "Marvell 88E6240",
.num_databases = 4096,
@@ -3697,7 +3499,7 @@
},
[MV88E6290] = {
- .prod_num = PORT_SWITCH_ID_PROD_NUM_6290,
+ .prod_num = MV88E6XXX_PORT_SWITCH_ID_PROD_6290,
.family = MV88E6XXX_FAMILY_6390,
.name = "Marvell 88E6290",
.num_databases = 4096,
@@ -3715,7 +3517,7 @@
},
[MV88E6320] = {
- .prod_num = PORT_SWITCH_ID_PROD_NUM_6320,
+ .prod_num = MV88E6XXX_PORT_SWITCH_ID_PROD_6320,
.family = MV88E6XXX_FAMILY_6320,
.name = "Marvell 88E6320",
.num_databases = 4096,
@@ -3733,7 +3535,7 @@
},
[MV88E6321] = {
- .prod_num = PORT_SWITCH_ID_PROD_NUM_6321,
+ .prod_num = MV88E6XXX_PORT_SWITCH_ID_PROD_6321,
.family = MV88E6XXX_FAMILY_6320,
.name = "Marvell 88E6321",
.num_databases = 4096,
@@ -3750,7 +3552,7 @@
},
[MV88E6341] = {
- .prod_num = PORT_SWITCH_ID_PROD_NUM_6341,
+ .prod_num = MV88E6XXX_PORT_SWITCH_ID_PROD_6341,
.family = MV88E6XXX_FAMILY_6341,
.name = "Marvell 88E6341",
.num_databases = 4096,
@@ -3767,7 +3569,7 @@
},
[MV88E6350] = {
- .prod_num = PORT_SWITCH_ID_PROD_NUM_6350,
+ .prod_num = MV88E6XXX_PORT_SWITCH_ID_PROD_6350,
.family = MV88E6XXX_FAMILY_6351,
.name = "Marvell 88E6350",
.num_databases = 4096,
@@ -3785,7 +3587,7 @@
},
[MV88E6351] = {
- .prod_num = PORT_SWITCH_ID_PROD_NUM_6351,
+ .prod_num = MV88E6XXX_PORT_SWITCH_ID_PROD_6351,
.family = MV88E6XXX_FAMILY_6351,
.name = "Marvell 88E6351",
.num_databases = 4096,
@@ -3803,7 +3605,7 @@
},
[MV88E6352] = {
- .prod_num = PORT_SWITCH_ID_PROD_NUM_6352,
+ .prod_num = MV88E6XXX_PORT_SWITCH_ID_PROD_6352,
.family = MV88E6XXX_FAMILY_6352,
.name = "Marvell 88E6352",
.num_databases = 4096,
@@ -3820,7 +3622,7 @@
.ops = &mv88e6352_ops,
},
[MV88E6390] = {
- .prod_num = PORT_SWITCH_ID_PROD_NUM_6390,
+ .prod_num = MV88E6XXX_PORT_SWITCH_ID_PROD_6390,
.family = MV88E6XXX_FAMILY_6390,
.name = "Marvell 88E6390",
.num_databases = 4096,
@@ -3837,7 +3639,7 @@
.ops = &mv88e6390_ops,
},
[MV88E6390X] = {
- .prod_num = PORT_SWITCH_ID_PROD_NUM_6390X,
+ .prod_num = MV88E6XXX_PORT_SWITCH_ID_PROD_6390X,
.family = MV88E6XXX_FAMILY_6390,
.name = "Marvell 88E6390X",
.num_databases = 4096,
@@ -3874,13 +3676,13 @@
int err;
mutex_lock(&chip->reg_lock);
- err = mv88e6xxx_port_read(chip, 0, PORT_SWITCH_ID, &id);
+ err = mv88e6xxx_port_read(chip, 0, MV88E6XXX_PORT_SWITCH_ID, &id);
mutex_unlock(&chip->reg_lock);
if (err)
return err;
- prod_num = (id & 0xfff0) >> 4;
- rev = id & 0x000f;
+ prod_num = id & MV88E6XXX_PORT_SWITCH_ID_PROD_MASK;
+ rev = id & MV88E6XXX_PORT_SWITCH_ID_REV_MASK;
info = mv88e6xxx_lookup_info(prod_num);
if (!info)
@@ -3915,18 +3717,6 @@
return chip;
}
-static void mv88e6xxx_phy_init(struct mv88e6xxx_chip *chip)
-{
- if (chip->info->ops->ppu_enable && chip->info->ops->ppu_disable)
- mv88e6xxx_ppu_state_init(chip);
-}
-
-static void mv88e6xxx_phy_destroy(struct mv88e6xxx_chip *chip)
-{
- if (chip->info->ops->ppu_enable && chip->info->ops->ppu_disable)
- mv88e6xxx_ppu_state_destroy(chip);
-}
-
static int mv88e6xxx_smi_init(struct mv88e6xxx_chip *chip,
struct mii_bus *bus, int sw_addr)
{
@@ -4017,8 +3807,9 @@
mutex_lock(&chip->reg_lock);
if (mv88e6xxx_port_db_load_purge(chip, port, mdb->addr, mdb->vid,
- GLOBAL_ATU_DATA_STATE_MC_STATIC))
- netdev_err(ds->ports[port].netdev, "failed to load multicast MAC address\n");
+ MV88E6XXX_G1_ATU_DATA_STATE_MC_STATIC))
+ dev_err(ds->dev, "p%d: failed to load multicast MAC address\n",
+ port);
mutex_unlock(&chip->reg_lock);
}
@@ -4030,7 +3821,7 @@
mutex_lock(&chip->reg_lock);
err = mv88e6xxx_port_db_load_purge(chip, port, mdb->addr, mdb->vid,
- GLOBAL_ATU_DATA_STATE_UNUSED);
+ MV88E6XXX_G1_ATU_DATA_STATE_UNUSED);
mutex_unlock(&chip->reg_lock);
return err;
@@ -4038,7 +3829,7 @@
static int mv88e6xxx_port_mdb_dump(struct dsa_switch *ds, int port,
struct switchdev_obj_port_mdb *mdb,
- int (*cb)(struct switchdev_obj *obj))
+ switchdev_obj_dump_cb_t *cb)
{
struct mv88e6xxx_chip *chip = ds->priv;
int err;
@@ -4059,6 +3850,8 @@
.get_strings = mv88e6xxx_get_strings,
.get_ethtool_stats = mv88e6xxx_get_ethtool_stats,
.get_sset_count = mv88e6xxx_get_sset_count,
+ .port_enable = mv88e6xxx_port_enable,
+ .port_disable = mv88e6xxx_port_disable,
.set_eee = mv88e6xxx_set_eee,
.get_eee = mv88e6xxx_get_eee,
.get_eeprom_len = mv88e6xxx_get_eeprom_len,
@@ -4108,7 +3901,7 @@
dev_set_drvdata(dev, ds);
- return dsa_register_switch(ds, dev);
+ return dsa_register_switch(ds);
}
static void mv88e6xxx_unregister_switch(struct mv88e6xxx_chip *chip)
diff --git a/drivers/net/dsa/mv88e6xxx/chip.h b/drivers/net/dsa/mv88e6xxx/chip.h
new file mode 100644
index 0000000..0864440
--- /dev/null
+++ b/drivers/net/dsa/mv88e6xxx/chip.h
@@ -0,0 +1,518 @@
+/*
+ * Marvell 88E6xxx Ethernet switch single-chip definition
+ *
+ * Copyright (c) 2008 Marvell Semiconductor
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef _MV88E6XXX_CHIP_H
+#define _MV88E6XXX_CHIP_H
+
+#include <linux/if_vlan.h>
+#include <linux/irq.h>
+#include <linux/gpio/consumer.h>
+#include <linux/phy.h>
+#include <net/dsa.h>
+
+#ifndef UINT64_MAX
+#define UINT64_MAX (u64)(~((u64)0))
+#endif
+
+#define SMI_CMD 0x00
+#define SMI_CMD_BUSY BIT(15)
+#define SMI_CMD_CLAUSE_22 BIT(12)
+#define SMI_CMD_OP_22_WRITE ((1 << 10) | SMI_CMD_BUSY | SMI_CMD_CLAUSE_22)
+#define SMI_CMD_OP_22_READ ((2 << 10) | SMI_CMD_BUSY | SMI_CMD_CLAUSE_22)
+#define SMI_CMD_OP_45_WRITE_ADDR ((0 << 10) | SMI_CMD_BUSY)
+#define SMI_CMD_OP_45_WRITE_DATA ((1 << 10) | SMI_CMD_BUSY)
+#define SMI_CMD_OP_45_READ_DATA ((2 << 10) | SMI_CMD_BUSY)
+#define SMI_CMD_OP_45_READ_DATA_INC ((3 << 10) | SMI_CMD_BUSY)
+#define SMI_DATA 0x01
+
+#define MV88E6XXX_N_FID 4096
+
+/* PVT limits for 4-bit port and 5-bit switch */
+#define MV88E6XXX_MAX_PVT_SWITCHES 32
+#define MV88E6XXX_MAX_PVT_PORTS 16
+
+enum mv88e6xxx_egress_mode {
+ MV88E6XXX_EGRESS_MODE_UNMODIFIED,
+ MV88E6XXX_EGRESS_MODE_UNTAGGED,
+ MV88E6XXX_EGRESS_MODE_TAGGED,
+ MV88E6XXX_EGRESS_MODE_ETHERTYPE,
+};
+
+enum mv88e6xxx_frame_mode {
+ MV88E6XXX_FRAME_MODE_NORMAL,
+ MV88E6XXX_FRAME_MODE_DSA,
+ MV88E6XXX_FRAME_MODE_PROVIDER,
+ MV88E6XXX_FRAME_MODE_ETHERTYPE,
+};
+
+/* List of supported models */
+enum mv88e6xxx_model {
+ MV88E6085,
+ MV88E6095,
+ MV88E6097,
+ MV88E6123,
+ MV88E6131,
+ MV88E6141,
+ MV88E6161,
+ MV88E6165,
+ MV88E6171,
+ MV88E6172,
+ MV88E6175,
+ MV88E6176,
+ MV88E6185,
+ MV88E6190,
+ MV88E6190X,
+ MV88E6191,
+ MV88E6240,
+ MV88E6290,
+ MV88E6320,
+ MV88E6321,
+ MV88E6341,
+ MV88E6350,
+ MV88E6351,
+ MV88E6352,
+ MV88E6390,
+ MV88E6390X,
+};
+
+enum mv88e6xxx_family {
+ MV88E6XXX_FAMILY_NONE,
+ MV88E6XXX_FAMILY_6065, /* 6031 6035 6061 6065 */
+ MV88E6XXX_FAMILY_6095, /* 6092 6095 */
+ MV88E6XXX_FAMILY_6097, /* 6046 6085 6096 6097 */
+ MV88E6XXX_FAMILY_6165, /* 6123 6161 6165 */
+ MV88E6XXX_FAMILY_6185, /* 6108 6121 6122 6131 6152 6155 6182 6185 */
+ MV88E6XXX_FAMILY_6320, /* 6320 6321 */
+ MV88E6XXX_FAMILY_6341, /* 6141 6341 */
+ MV88E6XXX_FAMILY_6351, /* 6171 6175 6350 6351 */
+ MV88E6XXX_FAMILY_6352, /* 6172 6176 6240 6352 */
+ MV88E6XXX_FAMILY_6390, /* 6190 6190X 6191 6290 6390 6390X */
+};
+
+enum mv88e6xxx_cap {
+ /* Energy Efficient Ethernet.
+ */
+ MV88E6XXX_CAP_EEE,
+
+ /* Multi-chip Addressing Mode.
+ * Some chips respond to only 2 registers of its own SMI device address
+ * when it is non-zero, and use indirect access to internal registers.
+ */
+ MV88E6XXX_CAP_SMI_CMD, /* (0x00) SMI Command */
+ MV88E6XXX_CAP_SMI_DATA, /* (0x01) SMI Data */
+
+ /* Switch Global (1) Registers.
+ */
+ MV88E6XXX_CAP_G1_ATU_FID, /* (0x01) ATU FID Register */
+ MV88E6XXX_CAP_G1_VTU_FID, /* (0x02) VTU FID Register */
+
+ /* Switch Global 2 Registers.
+ * The device contains a second set of global 16-bit registers.
+ */
+ MV88E6XXX_CAP_GLOBAL2,
+ MV88E6XXX_CAP_G2_INT, /* (0x00) Interrupt Status */
+ MV88E6XXX_CAP_G2_MGMT_EN_2X, /* (0x02) MGMT Enable Register 2x */
+ MV88E6XXX_CAP_G2_MGMT_EN_0X, /* (0x03) MGMT Enable Register 0x */
+ MV88E6XXX_CAP_G2_POT, /* (0x0f) Priority Override Table */
+
+ /* Per VLAN Spanning Tree Unit (STU).
+ * The Port State database, if present, is accessed through VTU
+ * operations and dedicated SID registers. See MV88E6352_G1_VTU_SID.
+ */
+ MV88E6XXX_CAP_STU,
+
+ /* VLAN Table Unit.
+ * The VTU is used to program 802.1Q VLANs. See MV88E6XXX_G1_VTU_OP.
+ */
+ MV88E6XXX_CAP_VTU,
+};
+
+/* Bitmask of capabilities */
+#define MV88E6XXX_FLAG_EEE BIT_ULL(MV88E6XXX_CAP_EEE)
+
+#define MV88E6XXX_FLAG_SMI_CMD BIT_ULL(MV88E6XXX_CAP_SMI_CMD)
+#define MV88E6XXX_FLAG_SMI_DATA BIT_ULL(MV88E6XXX_CAP_SMI_DATA)
+
+#define MV88E6XXX_FLAG_G1_VTU_FID BIT_ULL(MV88E6XXX_CAP_G1_VTU_FID)
+
+#define MV88E6XXX_FLAG_GLOBAL2 BIT_ULL(MV88E6XXX_CAP_GLOBAL2)
+#define MV88E6XXX_FLAG_G2_INT BIT_ULL(MV88E6XXX_CAP_G2_INT)
+#define MV88E6XXX_FLAG_G2_MGMT_EN_2X BIT_ULL(MV88E6XXX_CAP_G2_MGMT_EN_2X)
+#define MV88E6XXX_FLAG_G2_MGMT_EN_0X BIT_ULL(MV88E6XXX_CAP_G2_MGMT_EN_0X)
+#define MV88E6XXX_FLAG_G2_POT BIT_ULL(MV88E6XXX_CAP_G2_POT)
+
+/* Multi-chip Addressing Mode */
+#define MV88E6XXX_FLAGS_MULTI_CHIP \
+ (MV88E6XXX_FLAG_SMI_CMD | \
+ MV88E6XXX_FLAG_SMI_DATA)
+
+#define MV88E6XXX_FLAGS_FAMILY_6095 \
+ (MV88E6XXX_FLAG_GLOBAL2 | \
+ MV88E6XXX_FLAG_G2_MGMT_EN_0X | \
+ MV88E6XXX_FLAGS_MULTI_CHIP)
+
+#define MV88E6XXX_FLAGS_FAMILY_6097 \
+ (MV88E6XXX_FLAG_G1_VTU_FID | \
+ MV88E6XXX_FLAG_GLOBAL2 | \
+ MV88E6XXX_FLAG_G2_INT | \
+ MV88E6XXX_FLAG_G2_MGMT_EN_2X | \
+ MV88E6XXX_FLAG_G2_MGMT_EN_0X | \
+ MV88E6XXX_FLAG_G2_POT | \
+ MV88E6XXX_FLAGS_MULTI_CHIP)
+
+#define MV88E6XXX_FLAGS_FAMILY_6165 \
+ (MV88E6XXX_FLAG_G1_VTU_FID | \
+ MV88E6XXX_FLAG_GLOBAL2 | \
+ MV88E6XXX_FLAG_G2_INT | \
+ MV88E6XXX_FLAG_G2_MGMT_EN_2X | \
+ MV88E6XXX_FLAG_G2_MGMT_EN_0X | \
+ MV88E6XXX_FLAG_G2_POT | \
+ MV88E6XXX_FLAGS_MULTI_CHIP)
+
+#define MV88E6XXX_FLAGS_FAMILY_6185 \
+ (MV88E6XXX_FLAG_GLOBAL2 | \
+ MV88E6XXX_FLAG_G2_INT | \
+ MV88E6XXX_FLAG_G2_MGMT_EN_0X | \
+ MV88E6XXX_FLAGS_MULTI_CHIP)
+
+#define MV88E6XXX_FLAGS_FAMILY_6320 \
+ (MV88E6XXX_FLAG_EEE | \
+ MV88E6XXX_FLAG_GLOBAL2 | \
+ MV88E6XXX_FLAG_G2_MGMT_EN_2X | \
+ MV88E6XXX_FLAG_G2_MGMT_EN_0X | \
+ MV88E6XXX_FLAG_G2_POT | \
+ MV88E6XXX_FLAGS_MULTI_CHIP)
+
+#define MV88E6XXX_FLAGS_FAMILY_6341 \
+ (MV88E6XXX_FLAG_EEE | \
+ MV88E6XXX_FLAG_G1_VTU_FID | \
+ MV88E6XXX_FLAG_GLOBAL2 | \
+ MV88E6XXX_FLAG_G2_INT | \
+ MV88E6XXX_FLAG_G2_POT | \
+ MV88E6XXX_FLAGS_MULTI_CHIP)
+
+#define MV88E6XXX_FLAGS_FAMILY_6351 \
+ (MV88E6XXX_FLAG_G1_VTU_FID | \
+ MV88E6XXX_FLAG_GLOBAL2 | \
+ MV88E6XXX_FLAG_G2_INT | \
+ MV88E6XXX_FLAG_G2_MGMT_EN_2X | \
+ MV88E6XXX_FLAG_G2_MGMT_EN_0X | \
+ MV88E6XXX_FLAG_G2_POT | \
+ MV88E6XXX_FLAGS_MULTI_CHIP)
+
+#define MV88E6XXX_FLAGS_FAMILY_6352 \
+ (MV88E6XXX_FLAG_EEE | \
+ MV88E6XXX_FLAG_G1_VTU_FID | \
+ MV88E6XXX_FLAG_GLOBAL2 | \
+ MV88E6XXX_FLAG_G2_INT | \
+ MV88E6XXX_FLAG_G2_MGMT_EN_2X | \
+ MV88E6XXX_FLAG_G2_MGMT_EN_0X | \
+ MV88E6XXX_FLAG_G2_POT | \
+ MV88E6XXX_FLAGS_MULTI_CHIP)
+
+#define MV88E6XXX_FLAGS_FAMILY_6390 \
+ (MV88E6XXX_FLAG_EEE | \
+ MV88E6XXX_FLAG_GLOBAL2 | \
+ MV88E6XXX_FLAG_G2_INT | \
+ MV88E6XXX_FLAGS_MULTI_CHIP)
+
+struct mv88e6xxx_ops;
+
+struct mv88e6xxx_info {
+ enum mv88e6xxx_family family;
+ u16 prod_num;
+ const char *name;
+ unsigned int num_databases;
+ unsigned int num_ports;
+ unsigned int max_vid;
+ unsigned int port_base_addr;
+ unsigned int global1_addr;
+ unsigned int age_time_coeff;
+ unsigned int g1_irqs;
+ bool pvt;
+ enum dsa_tag_protocol tag_protocol;
+ unsigned long long flags;
+
+ /* Mask for FromPort and ToPort value of PortVec used in ATU Move
+ * operation. 0 means that the ATU Move operation is not supported.
+ */
+ u8 atu_move_port_mask;
+ const struct mv88e6xxx_ops *ops;
+};
+
+struct mv88e6xxx_atu_entry {
+ u8 state;
+ bool trunk;
+ u16 portvec;
+ u8 mac[ETH_ALEN];
+};
+
+struct mv88e6xxx_vtu_entry {
+ u16 vid;
+ u16 fid;
+ u8 sid;
+ bool valid;
+ u8 member[DSA_MAX_PORTS];
+ u8 state[DSA_MAX_PORTS];
+};
+
+struct mv88e6xxx_bus_ops;
+struct mv88e6xxx_irq_ops;
+
+struct mv88e6xxx_irq {
+ u16 masked;
+ struct irq_chip chip;
+ struct irq_domain *domain;
+ unsigned int nirqs;
+};
+
+struct mv88e6xxx_chip {
+ const struct mv88e6xxx_info *info;
+
+ /* The dsa_switch this private structure is related to */
+ struct dsa_switch *ds;
+
+ /* The device this structure is associated to */
+ struct device *dev;
+
+ /* This mutex protects the access to the switch registers */
+ struct mutex reg_lock;
+
+ /* The MII bus and the address on the bus that is used to
+ * communication with the switch
+ */
+ const struct mv88e6xxx_bus_ops *smi_ops;
+ struct mii_bus *bus;
+ int sw_addr;
+
+ /* Handles automatic disabling and re-enabling of the PHY
+ * polling unit.
+ */
+ const struct mv88e6xxx_bus_ops *phy_ops;
+ struct mutex ppu_mutex;
+ int ppu_disabled;
+ struct work_struct ppu_work;
+ struct timer_list ppu_timer;
+
+ /* This mutex serialises access to the statistics unit.
+ * Hold this mutex over snapshot + dump sequences.
+ */
+ struct mutex stats_mutex;
+
+ /* A switch may have a GPIO line tied to its reset pin. Parse
+ * this from the device tree, and use it before performing
+ * switch soft reset.
+ */
+ struct gpio_desc *reset;
+
+ /* set to size of eeprom if supported by the switch */
+ int eeprom_len;
+
+ /* List of mdio busses */
+ struct list_head mdios;
+
+ /* There can be two interrupt controllers, which are chained
+ * off a GPIO as interrupt source
+ */
+ struct mv88e6xxx_irq g1_irq;
+ struct mv88e6xxx_irq g2_irq;
+ int irq;
+ int device_irq;
+ int watchdog_irq;
+};
+
+struct mv88e6xxx_bus_ops {
+ int (*read)(struct mv88e6xxx_chip *chip, int addr, int reg, u16 *val);
+ int (*write)(struct mv88e6xxx_chip *chip, int addr, int reg, u16 val);
+};
+
+struct mv88e6xxx_mdio_bus {
+ struct mii_bus *bus;
+ struct mv88e6xxx_chip *chip;
+ struct list_head list;
+ bool external;
+};
+
+struct mv88e6xxx_ops {
+ /* Ingress Rate Limit unit (IRL) operations */
+ int (*irl_init_all)(struct mv88e6xxx_chip *chip, int port);
+
+ int (*get_eeprom)(struct mv88e6xxx_chip *chip,
+ struct ethtool_eeprom *eeprom, u8 *data);
+ int (*set_eeprom)(struct mv88e6xxx_chip *chip,
+ struct ethtool_eeprom *eeprom, u8 *data);
+
+ int (*set_switch_mac)(struct mv88e6xxx_chip *chip, u8 *addr);
+
+ int (*phy_read)(struct mv88e6xxx_chip *chip,
+ struct mii_bus *bus,
+ int addr, int reg, u16 *val);
+ int (*phy_write)(struct mv88e6xxx_chip *chip,
+ struct mii_bus *bus,
+ int addr, int reg, u16 val);
+
+ /* PHY Polling Unit (PPU) operations */
+ int (*ppu_enable)(struct mv88e6xxx_chip *chip);
+ int (*ppu_disable)(struct mv88e6xxx_chip *chip);
+
+ /* Switch Software Reset */
+ int (*reset)(struct mv88e6xxx_chip *chip);
+
+ /* RGMII Receive/Transmit Timing Control
+ * Add delay on PHY_INTERFACE_MODE_RGMII_*ID, no delay otherwise.
+ */
+ int (*port_set_rgmii_delay)(struct mv88e6xxx_chip *chip, int port,
+ phy_interface_t mode);
+
+#define LINK_FORCED_DOWN 0
+#define LINK_FORCED_UP 1
+#define LINK_UNFORCED -2
+
+ /* Port's MAC link state
+ * Use LINK_FORCED_UP or LINK_FORCED_DOWN to force link up or down,
+ * or LINK_UNFORCED for normal link detection.
+ */
+ int (*port_set_link)(struct mv88e6xxx_chip *chip, int port, int link);
+
+#define DUPLEX_UNFORCED -2
+
+ /* Port's MAC duplex mode
+ *
+ * Use DUPLEX_HALF or DUPLEX_FULL to force half or full duplex,
+ * or DUPLEX_UNFORCED for normal duplex detection.
+ */
+ int (*port_set_duplex)(struct mv88e6xxx_chip *chip, int port, int dup);
+
+#define SPEED_MAX INT_MAX
+#define SPEED_UNFORCED -2
+
+ /* Port's MAC speed (in Mbps)
+ *
+ * Depending on the chip, 10, 100, 200, 1000, 2500, 10000 are valid.
+ * Use SPEED_UNFORCED for normal detection, SPEED_MAX for max value.
+ */
+ int (*port_set_speed)(struct mv88e6xxx_chip *chip, int port, int speed);
+
+ int (*port_tag_remap)(struct mv88e6xxx_chip *chip, int port);
+
+ int (*port_set_frame_mode)(struct mv88e6xxx_chip *chip, int port,
+ enum mv88e6xxx_frame_mode mode);
+ int (*port_set_egress_floods)(struct mv88e6xxx_chip *chip, int port,
+ bool unicast, bool multicast);
+ int (*port_set_ether_type)(struct mv88e6xxx_chip *chip, int port,
+ u16 etype);
+ int (*port_set_jumbo_size)(struct mv88e6xxx_chip *chip, int port,
+ size_t size);
+
+ int (*port_egress_rate_limiting)(struct mv88e6xxx_chip *chip, int port);
+ int (*port_pause_limit)(struct mv88e6xxx_chip *chip, int port, u8 in,
+ u8 out);
+ int (*port_disable_learn_limit)(struct mv88e6xxx_chip *chip, int port);
+ int (*port_disable_pri_override)(struct mv88e6xxx_chip *chip, int port);
+
+ /* CMODE control what PHY mode the MAC will use, eg. SGMII, RGMII, etc.
+ * Some chips allow this to be configured on specific ports.
+ */
+ int (*port_set_cmode)(struct mv88e6xxx_chip *chip, int port,
+ phy_interface_t mode);
+
+ /* Some devices have a per port register indicating what is
+ * the upstream port this port should forward to.
+ */
+ int (*port_set_upstream_port)(struct mv88e6xxx_chip *chip, int port,
+ int upstream_port);
+
+ /* Snapshot the statistics for a port. The statistics can then
+ * be read back a leisure but still with a consistent view.
+ */
+ int (*stats_snapshot)(struct mv88e6xxx_chip *chip, int port);
+
+ /* Set the histogram mode for statistics, when the control registers
+ * are separated out of the STATS_OP register.
+ */
+ int (*stats_set_histogram)(struct mv88e6xxx_chip *chip);
+
+ /* Return the number of strings describing statistics */
+ int (*stats_get_sset_count)(struct mv88e6xxx_chip *chip);
+ void (*stats_get_strings)(struct mv88e6xxx_chip *chip, uint8_t *data);
+ void (*stats_get_stats)(struct mv88e6xxx_chip *chip, int port,
+ uint64_t *data);
+ int (*set_cpu_port)(struct mv88e6xxx_chip *chip, int port);
+ int (*set_egress_port)(struct mv88e6xxx_chip *chip, int port);
+ const struct mv88e6xxx_irq_ops *watchdog_ops;
+
+ /* Can be either in g1 or g2, so don't use a prefix */
+ int (*mgmt_rsvd2cpu)(struct mv88e6xxx_chip *chip);
+
+ /* Power on/off a SERDES interface */
+ int (*serdes_power)(struct mv88e6xxx_chip *chip, int port, bool on);
+
+ /* VLAN Translation Unit operations */
+ int (*vtu_getnext)(struct mv88e6xxx_chip *chip,
+ struct mv88e6xxx_vtu_entry *entry);
+ int (*vtu_loadpurge)(struct mv88e6xxx_chip *chip,
+ struct mv88e6xxx_vtu_entry *entry);
+};
+
+struct mv88e6xxx_irq_ops {
+ /* Action to be performed when the interrupt happens */
+ int (*irq_action)(struct mv88e6xxx_chip *chip, int irq);
+ /* Setup the hardware to generate the interrupt */
+ int (*irq_setup)(struct mv88e6xxx_chip *chip);
+ /* Reset the hardware to stop generating the interrupt */
+ void (*irq_free)(struct mv88e6xxx_chip *chip);
+};
+
+#define STATS_TYPE_PORT BIT(0)
+#define STATS_TYPE_BANK0 BIT(1)
+#define STATS_TYPE_BANK1 BIT(2)
+
+struct mv88e6xxx_hw_stat {
+ char string[ETH_GSTRING_LEN];
+ int sizeof_stat;
+ int reg;
+ int type;
+};
+
+static inline bool mv88e6xxx_has(struct mv88e6xxx_chip *chip,
+ unsigned long flags)
+{
+ return (chip->info->flags & flags) == flags;
+}
+
+static inline bool mv88e6xxx_has_pvt(struct mv88e6xxx_chip *chip)
+{
+ return chip->info->pvt;
+}
+
+static inline unsigned int mv88e6xxx_num_databases(struct mv88e6xxx_chip *chip)
+{
+ return chip->info->num_databases;
+}
+
+static inline unsigned int mv88e6xxx_num_ports(struct mv88e6xxx_chip *chip)
+{
+ return chip->info->num_ports;
+}
+
+static inline u16 mv88e6xxx_port_mask(struct mv88e6xxx_chip *chip)
+{
+ return GENMASK(mv88e6xxx_num_ports(chip) - 1, 0);
+}
+
+int mv88e6xxx_read(struct mv88e6xxx_chip *chip, int addr, int reg, u16 *val);
+int mv88e6xxx_write(struct mv88e6xxx_chip *chip, int addr, int reg, u16 val);
+int mv88e6xxx_update(struct mv88e6xxx_chip *chip, int addr, int reg,
+ u16 update);
+int mv88e6xxx_wait(struct mv88e6xxx_chip *chip, int addr, int reg, u16 mask);
+struct mii_bus *mv88e6xxx_default_mdio_bus(struct mv88e6xxx_chip *chip);
+
+#endif /* _MV88E6XXX_CHIP_H */
diff --git a/drivers/net/dsa/mv88e6xxx/global1.c b/drivers/net/dsa/mv88e6xxx/global1.c
index 3982583..d76d7c7 100644
--- a/drivers/net/dsa/mv88e6xxx/global1.c
+++ b/drivers/net/dsa/mv88e6xxx/global1.c
@@ -12,7 +12,9 @@
* (at your option) any later version.
*/
-#include "mv88e6xxx.h"
+#include <linux/bitfield.h>
+
+#include "chip.h"
#include "global1.h"
int mv88e6xxx_g1_read(struct mv88e6xxx_chip *chip, int reg, u16 *val)
@@ -42,13 +44,13 @@
int i, err;
for (i = 0; i < 16; i++) {
- err = mv88e6xxx_g1_read(chip, GLOBAL_STATUS, &state);
+ err = mv88e6xxx_g1_read(chip, MV88E6XXX_G1_STS, &state);
if (err)
return err;
/* Check the value of the PPUState bits 15:14 */
- state &= GLOBAL_STATUS_PPU_STATE_MASK;
- if (state != GLOBAL_STATUS_PPU_STATE_POLLING)
+ state &= MV88E6185_G1_STS_PPU_STATE_MASK;
+ if (state != MV88E6185_G1_STS_PPU_STATE_POLLING)
return 0;
usleep_range(1000, 2000);
@@ -63,13 +65,13 @@
int i, err;
for (i = 0; i < 16; ++i) {
- err = mv88e6xxx_g1_read(chip, GLOBAL_STATUS, &state);
+ err = mv88e6xxx_g1_read(chip, MV88E6XXX_G1_STS, &state);
if (err)
return err;
/* Check the value of the PPUState bits 15:14 */
- state &= GLOBAL_STATUS_PPU_STATE_MASK;
- if (state == GLOBAL_STATUS_PPU_STATE_POLLING)
+ state &= MV88E6185_G1_STS_PPU_STATE_MASK;
+ if (state == MV88E6185_G1_STS_PPU_STATE_POLLING)
return 0;
usleep_range(1000, 2000);
@@ -84,12 +86,12 @@
int i, err;
for (i = 0; i < 16; ++i) {
- err = mv88e6xxx_g1_read(chip, GLOBAL_STATUS, &state);
+ err = mv88e6xxx_g1_read(chip, MV88E6XXX_G1_STS, &state);
if (err)
return err;
/* Check the value of the PPUState (or InitState) bit 15 */
- if (state & GLOBAL_STATUS_PPU_STATE)
+ if (state & MV88E6352_G1_STS_PPU_STATE)
return 0;
usleep_range(1000, 2000);
@@ -109,11 +111,11 @@
* have finished their initialization and are ready to accept frames.
*/
while (time_before(jiffies, timeout)) {
- err = mv88e6xxx_g1_read(chip, GLOBAL_STATUS, &val);
+ err = mv88e6xxx_g1_read(chip, MV88E6XXX_G1_STS, &val);
if (err)
return err;
- if (val & GLOBAL_STATUS_INIT_READY)
+ if (val & MV88E6XXX_G1_STS_INIT_READY)
break;
usleep_range(1000, 2000);
@@ -125,6 +127,33 @@
return 0;
}
+/* Offset 0x01: Switch MAC Address Register Bytes 0 & 1
+ * Offset 0x02: Switch MAC Address Register Bytes 2 & 3
+ * Offset 0x03: Switch MAC Address Register Bytes 4 & 5
+ */
+int mv88e6xxx_g1_set_switch_mac(struct mv88e6xxx_chip *chip, u8 *addr)
+{
+ u16 reg;
+ int err;
+
+ reg = (addr[0] << 8) | addr[1];
+ err = mv88e6xxx_g1_write(chip, MV88E6XXX_G1_MAC_01, reg);
+ if (err)
+ return err;
+
+ reg = (addr[2] << 8) | addr[3];
+ err = mv88e6xxx_g1_write(chip, MV88E6XXX_G1_MAC_23, reg);
+ if (err)
+ return err;
+
+ reg = (addr[4] << 8) | addr[5];
+ err = mv88e6xxx_g1_write(chip, MV88E6XXX_G1_MAC_45, reg);
+ if (err)
+ return err;
+
+ return 0;
+}
+
/* Offset 0x04: Switch Global Control Register */
int mv88e6185_g1_reset(struct mv88e6xxx_chip *chip)
@@ -135,14 +164,14 @@
/* Set the SWReset bit 15 along with the PPUEn bit 14, to also restart
* the PPU, including re-doing PHY detection and initialization
*/
- err = mv88e6xxx_g1_read(chip, GLOBAL_CONTROL, &val);
+ err = mv88e6xxx_g1_read(chip, MV88E6XXX_G1_CTL1, &val);
if (err)
return err;
- val |= GLOBAL_CONTROL_SW_RESET;
- val |= GLOBAL_CONTROL_PPU_ENABLE;
+ val |= MV88E6XXX_G1_CTL1_SW_RESET;
+ val |= MV88E6XXX_G1_CTL1_PPU_ENABLE;
- err = mv88e6xxx_g1_write(chip, GLOBAL_CONTROL, val);
+ err = mv88e6xxx_g1_write(chip, MV88E6XXX_G1_CTL1, val);
if (err)
return err;
@@ -159,13 +188,13 @@
int err;
/* Set the SWReset bit 15 */
- err = mv88e6xxx_g1_read(chip, GLOBAL_CONTROL, &val);
+ err = mv88e6xxx_g1_read(chip, MV88E6XXX_G1_CTL1, &val);
if (err)
return err;
- val |= GLOBAL_CONTROL_SW_RESET;
+ val |= MV88E6XXX_G1_CTL1_SW_RESET;
- err = mv88e6xxx_g1_write(chip, GLOBAL_CONTROL, val);
+ err = mv88e6xxx_g1_write(chip, MV88E6XXX_G1_CTL1, val);
if (err)
return err;
@@ -181,13 +210,13 @@
u16 val;
int err;
- err = mv88e6xxx_g1_read(chip, GLOBAL_CONTROL, &val);
+ err = mv88e6xxx_g1_read(chip, MV88E6XXX_G1_CTL1, &val);
if (err)
return err;
- val |= GLOBAL_CONTROL_PPU_ENABLE;
+ val |= MV88E6XXX_G1_CTL1_PPU_ENABLE;
- err = mv88e6xxx_g1_write(chip, GLOBAL_CONTROL, val);
+ err = mv88e6xxx_g1_write(chip, MV88E6XXX_G1_CTL1, val);
if (err)
return err;
@@ -199,13 +228,13 @@
u16 val;
int err;
- err = mv88e6xxx_g1_read(chip, GLOBAL_CONTROL, &val);
+ err = mv88e6xxx_g1_read(chip, MV88E6XXX_G1_CTL1, &val);
if (err)
return err;
- val &= ~GLOBAL_CONTROL_PPU_ENABLE;
+ val &= ~MV88E6XXX_G1_CTL1_PPU_ENABLE;
- err = mv88e6xxx_g1_write(chip, GLOBAL_CONTROL, val);
+ err = mv88e6xxx_g1_write(chip, MV88E6XXX_G1_CTL1, val);
if (err)
return err;
@@ -220,17 +249,17 @@
u16 reg;
int err;
- err = mv88e6xxx_g1_read(chip, GLOBAL_MONITOR_CONTROL, ®);
+ err = mv88e6xxx_g1_read(chip, MV88E6185_G1_MONITOR_CTL, ®);
if (err)
return err;
- reg &= ~(GLOBAL_MONITOR_CONTROL_INGRESS_MASK |
- GLOBAL_MONITOR_CONTROL_EGRESS_MASK);
+ reg &= ~(MV88E6185_G1_MONITOR_CTL_INGRESS_DEST_MASK |
+ MV88E6185_G1_MONITOR_CTL_EGRESS_DEST_MASK);
- reg |= port << GLOBAL_MONITOR_CONTROL_INGRESS_SHIFT |
- port << GLOBAL_MONITOR_CONTROL_EGRESS_SHIFT;
+ reg |= port << __bf_shf(MV88E6185_G1_MONITOR_CTL_INGRESS_DEST_MASK) |
+ port << __bf_shf(MV88E6185_G1_MONITOR_CTL_EGRESS_DEST_MASK);
- return mv88e6xxx_g1_write(chip, GLOBAL_MONITOR_CONTROL, reg);
+ return mv88e6xxx_g1_write(chip, MV88E6185_G1_MONITOR_CTL, reg);
}
/* Older generations also call this the ARP destination. It has been
@@ -242,14 +271,14 @@
u16 reg;
int err;
- err = mv88e6xxx_g1_read(chip, GLOBAL_MONITOR_CONTROL, ®);
+ err = mv88e6xxx_g1_read(chip, MV88E6185_G1_MONITOR_CTL, ®);
if (err)
return err;
- reg &= ~GLOBAL_MONITOR_CONTROL_ARP_MASK;
- reg |= port << GLOBAL_MONITOR_CONTROL_ARP_SHIFT;
+ reg &= ~MV88E6185_G1_MONITOR_CTL_ARP_DEST_MASK;
+ reg |= port << __bf_shf(MV88E6185_G1_MONITOR_CTL_ARP_DEST_MASK);
- return mv88e6xxx_g1_write(chip, GLOBAL_MONITOR_CONTROL, reg);
+ return mv88e6xxx_g1_write(chip, MV88E6185_G1_MONITOR_CTL, reg);
}
static int mv88e6390_g1_monitor_write(struct mv88e6xxx_chip *chip,
@@ -257,55 +286,66 @@
{
u16 reg;
- reg = GLOBAL_MONITOR_CONTROL_UPDATE | pointer | data;
+ reg = MV88E6390_G1_MONITOR_MGMT_CTL_UPDATE | pointer | data;
- return mv88e6xxx_g1_write(chip, GLOBAL_MONITOR_CONTROL, reg);
+ return mv88e6xxx_g1_write(chip, MV88E6390_G1_MONITOR_MGMT_CTL, reg);
}
int mv88e6390_g1_set_egress_port(struct mv88e6xxx_chip *chip, int port)
{
+ u16 ptr;
int err;
- err = mv88e6390_g1_monitor_write(chip, GLOBAL_MONITOR_CONTROL_INGRESS,
- port);
+ ptr = MV88E6390_G1_MONITOR_MGMT_CTL_PTR_INGRESS_DEST;
+ err = mv88e6390_g1_monitor_write(chip, ptr, port);
if (err)
return err;
- return mv88e6390_g1_monitor_write(chip, GLOBAL_MONITOR_CONTROL_EGRESS,
- port);
+ ptr = MV88E6390_G1_MONITOR_MGMT_CTL_PTR_EGRESS_DEST;
+ err = mv88e6390_g1_monitor_write(chip, ptr, port);
+ if (err)
+ return err;
+
+ return 0;
}
int mv88e6390_g1_set_cpu_port(struct mv88e6xxx_chip *chip, int port)
{
- return mv88e6390_g1_monitor_write(chip, GLOBAL_MONITOR_CONTROL_CPU_DEST,
- port);
+ u16 ptr = MV88E6390_G1_MONITOR_MGMT_CTL_PTR_CPU_DEST;
+
+ return mv88e6390_g1_monitor_write(chip, ptr, port);
}
int mv88e6390_g1_mgmt_rsvd2cpu(struct mv88e6xxx_chip *chip)
{
+ u16 ptr;
int err;
/* 01:c2:80:00:00:00:00-01:c2:80:00:00:00:07 are Management */
- err = mv88e6390_g1_monitor_write(
- chip, GLOBAL_MONITOR_CONTROL_0180C280000000XLO, 0xff);
+ ptr = MV88E6390_G1_MONITOR_MGMT_CTL_PTR_0180C280000000XLO;
+ err = mv88e6390_g1_monitor_write(chip, ptr, 0xff);
if (err)
return err;
/* 01:c2:80:00:00:00:08-01:c2:80:00:00:00:0f are Management */
- err = mv88e6390_g1_monitor_write(
- chip, GLOBAL_MONITOR_CONTROL_0180C280000000XHI, 0xff);
+ ptr = MV88E6390_G1_MONITOR_MGMT_CTL_PTR_0180C280000000XHI;
+ err = mv88e6390_g1_monitor_write(chip, ptr, 0xff);
if (err)
return err;
/* 01:c2:80:00:00:00:20-01:c2:80:00:00:00:27 are Management */
- err = mv88e6390_g1_monitor_write(
- chip, GLOBAL_MONITOR_CONTROL_0180C280000002XLO, 0xff);
+ ptr = MV88E6390_G1_MONITOR_MGMT_CTL_PTR_0180C280000002XLO;
+ err = mv88e6390_g1_monitor_write(chip, ptr, 0xff);
if (err)
return err;
/* 01:c2:80:00:00:00:28-01:c2:80:00:00:00:2f are Management */
- return mv88e6390_g1_monitor_write(
- chip, GLOBAL_MONITOR_CONTROL_0180C280000002XHI, 0xff);
+ ptr = MV88E6390_G1_MONITOR_MGMT_CTL_PTR_0180C280000002XHI;
+ err = mv88e6390_g1_monitor_write(chip, ptr, 0xff);
+ if (err)
+ return err;
+
+ return 0;
}
/* Offset 0x1c: Global Control 2 */
@@ -315,13 +355,13 @@
u16 val;
int err;
- err = mv88e6xxx_g1_read(chip, GLOBAL_CONTROL_2, &val);
+ err = mv88e6xxx_g1_read(chip, MV88E6XXX_G1_CTL2, &val);
if (err)
return err;
- val |= GLOBAL_CONTROL_2_HIST_RX_TX;
+ val |= MV88E6XXX_G1_CTL2_HIST_RX_TX;
- err = mv88e6xxx_g1_write(chip, GLOBAL_CONTROL_2, val);
+ err = mv88e6xxx_g1_write(chip, MV88E6XXX_G1_CTL2, val);
return err;
}
@@ -330,7 +370,8 @@
int mv88e6xxx_g1_stats_wait(struct mv88e6xxx_chip *chip)
{
- return mv88e6xxx_g1_wait(chip, GLOBAL_STATS_OP, GLOBAL_STATS_OP_BUSY);
+ return mv88e6xxx_g1_wait(chip, MV88E6XXX_G1_STATS_OP,
+ MV88E6XXX_G1_STATS_OP_BUSY);
}
int mv88e6xxx_g1_stats_snapshot(struct mv88e6xxx_chip *chip, int port)
@@ -338,9 +379,10 @@
int err;
/* Snapshot the hardware statistics counters for this port. */
- err = mv88e6xxx_g1_write(chip, GLOBAL_STATS_OP,
- GLOBAL_STATS_OP_CAPTURE_PORT |
- GLOBAL_STATS_OP_HIST_RX_TX | port);
+ err = mv88e6xxx_g1_write(chip, MV88E6XXX_G1_STATS_OP,
+ MV88E6XXX_G1_STATS_OP_BUSY |
+ MV88E6XXX_G1_STATS_OP_CAPTURE_PORT |
+ MV88E6XXX_G1_STATS_OP_HIST_RX_TX | port);
if (err)
return err;
@@ -362,8 +404,9 @@
port = (port + 1) << 5;
/* Snapshot the hardware statistics counters for this port. */
- err = mv88e6xxx_g1_write(chip, GLOBAL_STATS_OP,
- GLOBAL_STATS_OP_CAPTURE_PORT | port);
+ err = mv88e6xxx_g1_write(chip, MV88E6XXX_G1_STATS_OP,
+ MV88E6XXX_G1_STATS_OP_BUSY |
+ MV88E6XXX_G1_STATS_OP_CAPTURE_PORT | port);
if (err)
return err;
@@ -379,8 +422,9 @@
*val = 0;
- err = mv88e6xxx_g1_write(chip, GLOBAL_STATS_OP,
- GLOBAL_STATS_OP_READ_CAPTURED | stat);
+ err = mv88e6xxx_g1_write(chip, MV88E6XXX_G1_STATS_OP,
+ MV88E6XXX_G1_STATS_OP_BUSY |
+ MV88E6XXX_G1_STATS_OP_READ_CAPTURED | stat);
if (err)
return;
@@ -388,13 +432,13 @@
if (err)
return;
- err = mv88e6xxx_g1_read(chip, GLOBAL_STATS_COUNTER_32, ®);
+ err = mv88e6xxx_g1_read(chip, MV88E6XXX_G1_STATS_COUNTER_32, ®);
if (err)
return;
value = reg << 16;
- err = mv88e6xxx_g1_read(chip, GLOBAL_STATS_COUNTER_01, ®);
+ err = mv88e6xxx_g1_read(chip, MV88E6XXX_G1_STATS_COUNTER_01, ®);
if (err)
return;
diff --git a/drivers/net/dsa/mv88e6xxx/global1.h b/drivers/net/dsa/mv88e6xxx/global1.h
index 46a4ea0..950b914 100644
--- a/drivers/net/dsa/mv88e6xxx/global1.h
+++ b/drivers/net/dsa/mv88e6xxx/global1.h
@@ -15,12 +15,216 @@
#ifndef _MV88E6XXX_GLOBAL1_H
#define _MV88E6XXX_GLOBAL1_H
-#include "mv88e6xxx.h"
+#include "chip.h"
+
+/* Offset 0x00: Switch Global Status Register */
+#define MV88E6XXX_G1_STS 0x00
+#define MV88E6352_G1_STS_PPU_STATE 0x8000
+#define MV88E6185_G1_STS_PPU_STATE_MASK 0xc000
+#define MV88E6185_G1_STS_PPU_STATE_DISABLED_RST 0x0000
+#define MV88E6185_G1_STS_PPU_STATE_INITIALIZING 0x4000
+#define MV88E6185_G1_STS_PPU_STATE_DISABLED 0x8000
+#define MV88E6185_G1_STS_PPU_STATE_POLLING 0xc000
+#define MV88E6XXX_G1_STS_INIT_READY 0x0800
+#define MV88E6XXX_G1_STS_IRQ_AVB 8
+#define MV88E6XXX_G1_STS_IRQ_DEVICE 7
+#define MV88E6XXX_G1_STS_IRQ_STATS 6
+#define MV88E6XXX_G1_STS_IRQ_VTU_PROBLEM 5
+#define MV88E6XXX_G1_STS_IRQ_VTU_DONE 4
+#define MV88E6XXX_G1_STS_IRQ_ATU_PROBLEM 3
+#define MV88E6XXX_G1_STS_IRQ_ATU_DONE 2
+#define MV88E6XXX_G1_STS_IRQ_TCAM_DONE 1
+#define MV88E6XXX_G1_STS_IRQ_EEPROM_DONE 0
+
+/* Offset 0x01: Switch MAC Address Register Bytes 0 & 1
+ * Offset 0x02: Switch MAC Address Register Bytes 2 & 3
+ * Offset 0x03: Switch MAC Address Register Bytes 4 & 5
+ */
+#define MV88E6XXX_G1_MAC_01 0x01
+#define MV88E6XXX_G1_MAC_23 0x02
+#define MV88E6XXX_G1_MAC_45 0x03
+
+/* Offset 0x01: ATU FID Register */
+#define MV88E6352_G1_ATU_FID 0x01
+
+/* Offset 0x02: VTU FID Register */
+#define MV88E6352_G1_VTU_FID 0x02
+#define MV88E6352_G1_VTU_FID_MASK 0x0fff
+
+/* Offset 0x03: VTU SID Register */
+#define MV88E6352_G1_VTU_SID 0x03
+#define MV88E6352_G1_VTU_SID_MASK 0x3f
+
+/* Offset 0x04: Switch Global Control Register */
+#define MV88E6XXX_G1_CTL1 0x04
+#define MV88E6XXX_G1_CTL1_SW_RESET 0x8000
+#define MV88E6XXX_G1_CTL1_PPU_ENABLE 0x4000
+#define MV88E6352_G1_CTL1_DISCARD_EXCESS 0x2000
+#define MV88E6185_G1_CTL1_SCHED_PRIO 0x0800
+#define MV88E6185_G1_CTL1_MAX_FRAME_1632 0x0400
+#define MV88E6185_G1_CTL1_RELOAD_EEPROM 0x0200
+#define MV88E6XXX_G1_CTL1_DEVICE_EN 0x0080
+#define MV88E6XXX_G1_CTL1_STATS_DONE_EN 0x0040
+#define MV88E6XXX_G1_CTL1_VTU_PROBLEM_EN 0x0020
+#define MV88E6XXX_G1_CTL1_VTU_DONE_EN 0x0010
+#define MV88E6XXX_G1_CTL1_ATU_PROBLEM_EN 0x0008
+#define MV88E6XXX_G1_CTL1_ATU_DONE_EN 0x0004
+#define MV88E6XXX_G1_CTL1_TCAM_EN 0x0002
+#define MV88E6XXX_G1_CTL1_EEPROM_DONE_EN 0x0001
+
+/* Offset 0x05: VTU Operation Register */
+#define MV88E6XXX_G1_VTU_OP 0x05
+#define MV88E6XXX_G1_VTU_OP_BUSY 0x8000
+#define MV88E6XXX_G1_VTU_OP_MASK 0x7000
+#define MV88E6XXX_G1_VTU_OP_FLUSH_ALL 0x1000
+#define MV88E6XXX_G1_VTU_OP_NOOP 0x2000
+#define MV88E6XXX_G1_VTU_OP_VTU_LOAD_PURGE 0x3000
+#define MV88E6XXX_G1_VTU_OP_VTU_GET_NEXT 0x4000
+#define MV88E6XXX_G1_VTU_OP_STU_LOAD_PURGE 0x5000
+#define MV88E6XXX_G1_VTU_OP_STU_GET_NEXT 0x6000
+
+/* Offset 0x06: VTU VID Register */
+#define MV88E6XXX_G1_VTU_VID 0x06
+#define MV88E6XXX_G1_VTU_VID_MASK 0x0fff
+#define MV88E6390_G1_VTU_VID_PAGE 0x2000
+#define MV88E6XXX_G1_VTU_VID_VALID 0x1000
+
+/* Offset 0x07: VTU/STU Data Register 1
+ * Offset 0x08: VTU/STU Data Register 2
+ * Offset 0x09: VTU/STU Data Register 3
+ */
+#define MV88E6XXX_G1_VTU_DATA1 0x07
+#define MV88E6XXX_G1_VTU_DATA2 0x08
+#define MV88E6XXX_G1_VTU_DATA3 0x09
+#define MV88E6XXX_G1_VTU_STU_DATA_MASK 0x0003
+#define MV88E6XXX_G1_VTU_DATA_MEMBER_TAG_UNMODIFIED 0x0000
+#define MV88E6XXX_G1_VTU_DATA_MEMBER_TAG_UNTAGGED 0x0001
+#define MV88E6XXX_G1_VTU_DATA_MEMBER_TAG_TAGGED 0x0002
+#define MV88E6XXX_G1_VTU_DATA_MEMBER_TAG_NON_MEMBER 0x0003
+#define MV88E6XXX_G1_STU_DATA_PORT_STATE_DISABLED 0x0000
+#define MV88E6XXX_G1_STU_DATA_PORT_STATE_BLOCKING 0x0001
+#define MV88E6XXX_G1_STU_DATA_PORT_STATE_LEARNING 0x0002
+#define MV88E6XXX_G1_STU_DATA_PORT_STATE_FORWARDING 0x0003
+
+/* Offset 0x0A: ATU Control Register */
+#define MV88E6XXX_G1_ATU_CTL 0x0a
+#define MV88E6XXX_G1_ATU_CTL_LEARN2ALL 0x0008
+
+/* Offset 0x0B: ATU Operation Register */
+#define MV88E6XXX_G1_ATU_OP 0x0b
+#define MV88E6XXX_G1_ATU_OP_BUSY 0x8000
+#define MV88E6XXX_G1_ATU_OP_MASK 0x7000
+#define MV88E6XXX_G1_ATU_OP_NOOP 0x0000
+#define MV88E6XXX_G1_ATU_OP_FLUSH_MOVE_ALL 0x1000
+#define MV88E6XXX_G1_ATU_OP_FLUSH_MOVE_NON_STATIC 0x2000
+#define MV88E6XXX_G1_ATU_OP_LOAD_DB 0x3000
+#define MV88E6XXX_G1_ATU_OP_GET_NEXT_DB 0x4000
+#define MV88E6XXX_G1_ATU_OP_FLUSH_MOVE_ALL_DB 0x5000
+#define MV88E6XXX_G1_ATU_OP_FLUSH_MOVE_NON_STATIC_DB 0x6000
+#define MV88E6XXX_G1_ATU_OP_GET_CLR_VIOLATION 0x7000
+
+/* Offset 0x0C: ATU Data Register */
+#define MV88E6XXX_G1_ATU_DATA 0x0c
+#define MV88E6XXX_G1_ATU_DATA_TRUNK 0x8000
+#define MV88E6XXX_G1_ATU_DATA_TRUNK_ID_MASK 0x00f0
+#define MV88E6XXX_G1_ATU_DATA_PORT_VECTOR_MASK 0x3ff0
+#define MV88E6XXX_G1_ATU_DATA_STATE_MASK 0x000f
+#define MV88E6XXX_G1_ATU_DATA_STATE_UNUSED 0x0000
+#define MV88E6XXX_G1_ATU_DATA_STATE_UC_MGMT 0x000d
+#define MV88E6XXX_G1_ATU_DATA_STATE_UC_STATIC 0x000e
+#define MV88E6XXX_G1_ATU_DATA_STATE_UC_PRIO_OVER 0x000f
+#define MV88E6XXX_G1_ATU_DATA_STATE_MC_NONE_RATE 0x0005
+#define MV88E6XXX_G1_ATU_DATA_STATE_MC_STATIC 0x0007
+#define MV88E6XXX_G1_ATU_DATA_STATE_MC_MGMT 0x000e
+#define MV88E6XXX_G1_ATU_DATA_STATE_MC_PRIO_OVER 0x000f
+
+/* Offset 0x0D: ATU MAC Address Register Bytes 0 & 1
+ * Offset 0x0E: ATU MAC Address Register Bytes 2 & 3
+ * Offset 0x0F: ATU MAC Address Register Bytes 4 & 5
+ */
+#define MV88E6XXX_G1_ATU_MAC01 0x0d
+#define MV88E6XXX_G1_ATU_MAC23 0x0e
+#define MV88E6XXX_G1_ATU_MAC45 0x0f
+
+/* Offset 0x10: IP-PRI Mapping Register 0
+ * Offset 0x11: IP-PRI Mapping Register 1
+ * Offset 0x12: IP-PRI Mapping Register 2
+ * Offset 0x13: IP-PRI Mapping Register 3
+ * Offset 0x14: IP-PRI Mapping Register 4
+ * Offset 0x15: IP-PRI Mapping Register 5
+ * Offset 0x16: IP-PRI Mapping Register 6
+ * Offset 0x17: IP-PRI Mapping Register 7
+ */
+#define MV88E6XXX_G1_IP_PRI_0 0x10
+#define MV88E6XXX_G1_IP_PRI_1 0x11
+#define MV88E6XXX_G1_IP_PRI_2 0x12
+#define MV88E6XXX_G1_IP_PRI_3 0x13
+#define MV88E6XXX_G1_IP_PRI_4 0x14
+#define MV88E6XXX_G1_IP_PRI_5 0x15
+#define MV88E6XXX_G1_IP_PRI_6 0x16
+#define MV88E6XXX_G1_IP_PRI_7 0x17
+
+/* Offset 0x18: IEEE-PRI Register */
+#define MV88E6XXX_G1_IEEE_PRI 0x18
+
+/* Offset 0x19: Core Tag Type */
+#define MV88E6185_G1_CORE_TAG_TYPE 0x19
+
+/* Offset 0x1A: Monitor Control */
+#define MV88E6185_G1_MONITOR_CTL 0x1a
+#define MV88E6185_G1_MONITOR_CTL_INGRESS_DEST_MASK 0xf000
+#define MV88E6185_G1_MONITOR_CTL_EGRESS_DEST_MASK 0x0f00
+#define MV88E6185_G1_MONITOR_CTL_ARP_DEST_MASK 0x00f0
+#define MV88E6352_G1_MONITOR_CTL_CPU_DEST_MASK 0x00f0
+#define MV88E6352_G1_MONITOR_CTL_MIRROR_DEST_MASK 0x000f
+
+/* Offset 0x1A: Monitor & MGMT Control Register */
+#define MV88E6390_G1_MONITOR_MGMT_CTL 0x1a
+#define MV88E6390_G1_MONITOR_MGMT_CTL_UPDATE 0x8000
+#define MV88E6390_G1_MONITOR_MGMT_CTL_PTR_MASK 0x3f00
+#define MV88E6390_G1_MONITOR_MGMT_CTL_PTR_0180C280000000XLO 0x0000
+#define MV88E6390_G1_MONITOR_MGMT_CTL_PTR_0180C280000000XHI 0x0100
+#define MV88E6390_G1_MONITOR_MGMT_CTL_PTR_0180C280000002XLO 0x0200
+#define MV88E6390_G1_MONITOR_MGMT_CTL_PTR_0180C280000002XHI 0x0300
+#define MV88E6390_G1_MONITOR_MGMT_CTL_PTR_INGRESS_DEST 0x2000
+#define MV88E6390_G1_MONITOR_MGMT_CTL_PTR_EGRESS_DEST 0x2100
+#define MV88E6390_G1_MONITOR_MGMT_CTL_PTR_CPU_DEST 0x3000
+#define MV88E6390_G1_MONITOR_MGMT_CTL_DATA_MASK 0x00ff
+
+/* Offset 0x1C: Global Control 2 */
+#define MV88E6XXX_G1_CTL2 0x1c
+#define MV88E6XXX_G1_CTL2_NO_CASCADE 0xe000
+#define MV88E6XXX_G1_CTL2_MULTIPLE_CASCADE 0xf000
+#define MV88E6XXX_G1_CTL2_HIST_RX 0x0040
+#define MV88E6XXX_G1_CTL2_HIST_TX 0x0080
+#define MV88E6XXX_G1_CTL2_HIST_RX_TX 0x00c0
+
+/* Offset 0x1D: Stats Operation Register */
+#define MV88E6XXX_G1_STATS_OP 0x1d
+#define MV88E6XXX_G1_STATS_OP_BUSY 0x8000
+#define MV88E6XXX_G1_STATS_OP_NOP 0x0000
+#define MV88E6XXX_G1_STATS_OP_FLUSH_ALL 0x1000
+#define MV88E6XXX_G1_STATS_OP_FLUSH_PORT 0x2000
+#define MV88E6XXX_G1_STATS_OP_READ_CAPTURED 0x4000
+#define MV88E6XXX_G1_STATS_OP_CAPTURE_PORT 0x5000
+#define MV88E6XXX_G1_STATS_OP_HIST_RX 0x0400
+#define MV88E6XXX_G1_STATS_OP_HIST_TX 0x0800
+#define MV88E6XXX_G1_STATS_OP_HIST_RX_TX 0x0c00
+#define MV88E6XXX_G1_STATS_OP_BANK_1_BIT_9 0x0200
+#define MV88E6XXX_G1_STATS_OP_BANK_1_BIT_10 0x0400
+
+/* Offset 0x1E: Stats Counter Register Bytes 3 & 2
+ * Offset 0x1F: Stats Counter Register Bytes 1 & 0
+ */
+#define MV88E6XXX_G1_STATS_COUNTER_32 0x1e
+#define MV88E6XXX_G1_STATS_COUNTER_01 0x1f
int mv88e6xxx_g1_read(struct mv88e6xxx_chip *chip, int reg, u16 *val);
int mv88e6xxx_g1_write(struct mv88e6xxx_chip *chip, int reg, u16 val);
int mv88e6xxx_g1_wait(struct mv88e6xxx_chip *chip, int reg, u16 mask);
+int mv88e6xxx_g1_set_switch_mac(struct mv88e6xxx_chip *chip, u8 *addr);
+
int mv88e6185_g1_reset(struct mv88e6xxx_chip *chip);
int mv88e6352_g1_reset(struct mv88e6xxx_chip *chip);
diff --git a/drivers/net/dsa/mv88e6xxx/global1_atu.c b/drivers/net/dsa/mv88e6xxx/global1_atu.c
index fa7e7db..efeef4b 100644
--- a/drivers/net/dsa/mv88e6xxx/global1_atu.c
+++ b/drivers/net/dsa/mv88e6xxx/global1_atu.c
@@ -10,14 +10,14 @@
* (at your option) any later version.
*/
-#include "mv88e6xxx.h"
+#include "chip.h"
#include "global1.h"
/* Offset 0x01: ATU FID Register */
static int mv88e6xxx_g1_atu_fid_write(struct mv88e6xxx_chip *chip, u16 fid)
{
- return mv88e6xxx_g1_write(chip, GLOBAL_ATU_FID, fid & 0xfff);
+ return mv88e6xxx_g1_write(chip, MV88E6352_G1_ATU_FID, fid & 0xfff);
}
/* Offset 0x0A: ATU Control Register */
@@ -27,16 +27,16 @@
u16 val;
int err;
- err = mv88e6xxx_g1_read(chip, GLOBAL_ATU_CONTROL, &val);
+ err = mv88e6xxx_g1_read(chip, MV88E6XXX_G1_ATU_CTL, &val);
if (err)
return err;
if (learn2all)
- val |= GLOBAL_ATU_CONTROL_LEARN2ALL;
+ val |= MV88E6XXX_G1_ATU_CTL_LEARN2ALL;
else
- val &= ~GLOBAL_ATU_CONTROL_LEARN2ALL;
+ val &= ~MV88E6XXX_G1_ATU_CTL_LEARN2ALL;
- return mv88e6xxx_g1_write(chip, GLOBAL_ATU_CONTROL, val);
+ return mv88e6xxx_g1_write(chip, MV88E6XXX_G1_ATU_CTL, val);
}
int mv88e6xxx_g1_atu_set_age_time(struct mv88e6xxx_chip *chip,
@@ -55,7 +55,7 @@
/* Round to nearest multiple of coeff */
age_time = (msecs + coeff / 2) / coeff;
- err = mv88e6xxx_g1_read(chip, GLOBAL_ATU_CONTROL, &val);
+ err = mv88e6xxx_g1_read(chip, MV88E6XXX_G1_ATU_CTL, &val);
if (err)
return err;
@@ -63,7 +63,7 @@
val &= ~0xff0;
val |= age_time << 4;
- err = mv88e6xxx_g1_write(chip, GLOBAL_ATU_CONTROL, val);
+ err = mv88e6xxx_g1_write(chip, MV88E6XXX_G1_ATU_CTL, val);
if (err)
return err;
@@ -77,7 +77,8 @@
static int mv88e6xxx_g1_atu_op_wait(struct mv88e6xxx_chip *chip)
{
- return mv88e6xxx_g1_wait(chip, GLOBAL_ATU_OP, GLOBAL_ATU_OP_BUSY);
+ return mv88e6xxx_g1_wait(chip, MV88E6XXX_G1_ATU_OP,
+ MV88E6XXX_G1_ATU_OP_BUSY);
}
static int mv88e6xxx_g1_atu_op(struct mv88e6xxx_chip *chip, u16 fid, u16 op)
@@ -93,12 +94,14 @@
} else {
if (mv88e6xxx_num_databases(chip) > 16) {
/* ATU DBNum[7:4] are located in ATU Control 15:12 */
- err = mv88e6xxx_g1_read(chip, GLOBAL_ATU_CONTROL, &val);
+ err = mv88e6xxx_g1_read(chip, MV88E6XXX_G1_ATU_CTL,
+ &val);
if (err)
return err;
val = (val & 0x0fff) | ((fid << 8) & 0xf000);
- err = mv88e6xxx_g1_write(chip, GLOBAL_ATU_CONTROL, val);
+ err = mv88e6xxx_g1_write(chip, MV88E6XXX_G1_ATU_CTL,
+ val);
if (err)
return err;
}
@@ -107,7 +110,8 @@
op |= fid & 0xf;
}
- err = mv88e6xxx_g1_write(chip, GLOBAL_ATU_OP, op);
+ err = mv88e6xxx_g1_write(chip, MV88E6XXX_G1_ATU_OP,
+ MV88E6XXX_G1_ATU_OP_BUSY | op);
if (err)
return err;
@@ -122,13 +126,13 @@
u16 val;
int err;
- err = mv88e6xxx_g1_read(chip, GLOBAL_ATU_DATA, &val);
+ err = mv88e6xxx_g1_read(chip, MV88E6XXX_G1_ATU_DATA, &val);
if (err)
return err;
entry->state = val & 0xf;
- if (entry->state != GLOBAL_ATU_DATA_STATE_UNUSED) {
- entry->trunk = !!(val & GLOBAL_ATU_DATA_TRUNK);
+ if (entry->state != MV88E6XXX_G1_ATU_DATA_STATE_UNUSED) {
+ entry->trunk = !!(val & MV88E6XXX_G1_ATU_DATA_TRUNK);
entry->portvec = (val >> 4) & mv88e6xxx_port_mask(chip);
}
@@ -140,14 +144,14 @@
{
u16 data = entry->state & 0xf;
- if (entry->state != GLOBAL_ATU_DATA_STATE_UNUSED) {
+ if (entry->state != MV88E6XXX_G1_ATU_DATA_STATE_UNUSED) {
if (entry->trunk)
- data |= GLOBAL_ATU_DATA_TRUNK;
+ data |= MV88E6XXX_G1_ATU_DATA_TRUNK;
data |= (entry->portvec & mv88e6xxx_port_mask(chip)) << 4;
}
- return mv88e6xxx_g1_write(chip, GLOBAL_ATU_DATA, data);
+ return mv88e6xxx_g1_write(chip, MV88E6XXX_G1_ATU_DATA, data);
}
/* Offset 0x0D: ATU MAC Address Register Bytes 0 & 1
@@ -162,7 +166,7 @@
int i, err;
for (i = 0; i < 3; i++) {
- err = mv88e6xxx_g1_read(chip, GLOBAL_ATU_MAC_01 + i, &val);
+ err = mv88e6xxx_g1_read(chip, MV88E6XXX_G1_ATU_MAC01 + i, &val);
if (err)
return err;
@@ -181,7 +185,7 @@
for (i = 0; i < 3; i++) {
val = (entry->mac[i * 2] << 8) | entry->mac[i * 2 + 1];
- err = mv88e6xxx_g1_write(chip, GLOBAL_ATU_MAC_01 + i, val);
+ err = mv88e6xxx_g1_write(chip, MV88E6XXX_G1_ATU_MAC01 + i, val);
if (err)
return err;
}
@@ -201,13 +205,13 @@
return err;
/* Write the MAC address to iterate from only once */
- if (entry->state == GLOBAL_ATU_DATA_STATE_UNUSED) {
+ if (entry->state == MV88E6XXX_G1_ATU_DATA_STATE_UNUSED) {
err = mv88e6xxx_g1_atu_mac_write(chip, entry);
if (err)
return err;
}
- err = mv88e6xxx_g1_atu_op(chip, fid, GLOBAL_ATU_OP_GET_NEXT_DB);
+ err = mv88e6xxx_g1_atu_op(chip, fid, MV88E6XXX_G1_ATU_OP_GET_NEXT_DB);
if (err)
return err;
@@ -235,7 +239,7 @@
if (err)
return err;
- return mv88e6xxx_g1_atu_op(chip, fid, GLOBAL_ATU_OP_LOAD_DB);
+ return mv88e6xxx_g1_atu_op(chip, fid, MV88E6XXX_G1_ATU_OP_LOAD_DB);
}
static int mv88e6xxx_g1_atu_flushmove(struct mv88e6xxx_chip *chip, u16 fid,
@@ -255,13 +259,13 @@
/* Flush/Move all or non-static entries from all or a given database */
if (all && fid)
- op = GLOBAL_ATU_OP_FLUSH_MOVE_ALL_DB;
+ op = MV88E6XXX_G1_ATU_OP_FLUSH_MOVE_ALL_DB;
else if (fid)
- op = GLOBAL_ATU_OP_FLUSH_MOVE_NON_STATIC_DB;
+ op = MV88E6XXX_G1_ATU_OP_FLUSH_MOVE_NON_STATIC_DB;
else if (all)
- op = GLOBAL_ATU_OP_FLUSH_MOVE_ALL;
+ op = MV88E6XXX_G1_ATU_OP_FLUSH_MOVE_ALL;
else
- op = GLOBAL_ATU_OP_FLUSH_MOVE_NON_STATIC;
+ op = MV88E6XXX_G1_ATU_OP_FLUSH_MOVE_NON_STATIC;
return mv88e6xxx_g1_atu_op(chip, fid, op);
}
diff --git a/drivers/net/dsa/mv88e6xxx/global1_vtu.c b/drivers/net/dsa/mv88e6xxx/global1_vtu.c
index 9aea22d..8c8a0ec3 100644
--- a/drivers/net/dsa/mv88e6xxx/global1_vtu.c
+++ b/drivers/net/dsa/mv88e6xxx/global1_vtu.c
@@ -11,7 +11,7 @@
* (at your option) any later version.
*/
-#include "mv88e6xxx.h"
+#include "chip.h"
#include "global1.h"
/* Offset 0x02: VTU FID Register */
@@ -22,11 +22,11 @@
u16 val;
int err;
- err = mv88e6xxx_g1_read(chip, GLOBAL_VTU_FID, &val);
+ err = mv88e6xxx_g1_read(chip, MV88E6352_G1_VTU_FID, &val);
if (err)
return err;
- entry->fid = val & GLOBAL_VTU_FID_MASK;
+ entry->fid = val & MV88E6352_G1_VTU_FID_MASK;
return 0;
}
@@ -34,9 +34,9 @@
static int mv88e6xxx_g1_vtu_fid_write(struct mv88e6xxx_chip *chip,
struct mv88e6xxx_vtu_entry *entry)
{
- u16 val = entry->fid & GLOBAL_VTU_FID_MASK;
+ u16 val = entry->fid & MV88E6352_G1_VTU_FID_MASK;
- return mv88e6xxx_g1_write(chip, GLOBAL_VTU_FID, val);
+ return mv88e6xxx_g1_write(chip, MV88E6352_G1_VTU_FID, val);
}
/* Offset 0x03: VTU SID Register */
@@ -47,11 +47,11 @@
u16 val;
int err;
- err = mv88e6xxx_g1_read(chip, GLOBAL_VTU_SID, &val);
+ err = mv88e6xxx_g1_read(chip, MV88E6352_G1_VTU_SID, &val);
if (err)
return err;
- entry->sid = val & GLOBAL_VTU_SID_MASK;
+ entry->sid = val & MV88E6352_G1_VTU_SID_MASK;
return 0;
}
@@ -59,23 +59,25 @@
static int mv88e6xxx_g1_vtu_sid_write(struct mv88e6xxx_chip *chip,
struct mv88e6xxx_vtu_entry *entry)
{
- u16 val = entry->sid & GLOBAL_VTU_SID_MASK;
+ u16 val = entry->sid & MV88E6352_G1_VTU_SID_MASK;
- return mv88e6xxx_g1_write(chip, GLOBAL_VTU_SID, val);
+ return mv88e6xxx_g1_write(chip, MV88E6352_G1_VTU_SID, val);
}
/* Offset 0x05: VTU Operation Register */
static int mv88e6xxx_g1_vtu_op_wait(struct mv88e6xxx_chip *chip)
{
- return mv88e6xxx_g1_wait(chip, GLOBAL_VTU_OP, GLOBAL_VTU_OP_BUSY);
+ return mv88e6xxx_g1_wait(chip, MV88E6XXX_G1_VTU_OP,
+ MV88E6XXX_G1_VTU_OP_BUSY);
}
static int mv88e6xxx_g1_vtu_op(struct mv88e6xxx_chip *chip, u16 op)
{
int err;
- err = mv88e6xxx_g1_write(chip, GLOBAL_VTU_OP, op);
+ err = mv88e6xxx_g1_write(chip, MV88E6XXX_G1_VTU_OP,
+ MV88E6XXX_G1_VTU_OP_BUSY | op);
if (err)
return err;
@@ -90,16 +92,16 @@
u16 val;
int err;
- err = mv88e6xxx_g1_read(chip, GLOBAL_VTU_VID, &val);
+ err = mv88e6xxx_g1_read(chip, MV88E6XXX_G1_VTU_VID, &val);
if (err)
return err;
entry->vid = val & 0xfff;
- if (val & GLOBAL_VTU_VID_PAGE)
+ if (val & MV88E6390_G1_VTU_VID_PAGE)
entry->vid |= 0x1000;
- entry->valid = !!(val & GLOBAL_VTU_VID_VALID);
+ entry->valid = !!(val & MV88E6XXX_G1_VTU_VID_VALID);
return 0;
}
@@ -110,12 +112,12 @@
u16 val = entry->vid & 0xfff;
if (entry->vid & 0x1000)
- val |= GLOBAL_VTU_VID_PAGE;
+ val |= MV88E6390_G1_VTU_VID_PAGE;
if (entry->valid)
- val |= GLOBAL_VTU_VID_VALID;
+ val |= MV88E6XXX_G1_VTU_VID_VALID;
- return mv88e6xxx_g1_write(chip, GLOBAL_VTU_VID, val);
+ return mv88e6xxx_g1_write(chip, MV88E6XXX_G1_VTU_VID, val);
}
/* Offset 0x07: VTU/STU Data Register 1
@@ -134,7 +136,7 @@
u16 *reg = ®s[i];
int err;
- err = mv88e6xxx_g1_read(chip, GLOBAL_VTU_DATA_0_3 + i, reg);
+ err = mv88e6xxx_g1_read(chip, MV88E6XXX_G1_VTU_DATA1 + i, reg);
if (err)
return err;
}
@@ -171,7 +173,7 @@
u16 reg = regs[i];
int err;
- err = mv88e6xxx_g1_write(chip, GLOBAL_VTU_DATA_0_3 + i, reg);
+ err = mv88e6xxx_g1_write(chip, MV88E6XXX_G1_VTU_DATA1 + i, reg);
if (err)
return err;
}
@@ -189,7 +191,7 @@
u16 *reg = ®s[i];
int err;
- err = mv88e6xxx_g1_read(chip, GLOBAL_VTU_DATA_0_3 + i, reg);
+ err = mv88e6xxx_g1_read(chip, MV88E6XXX_G1_VTU_DATA1 + i, reg);
if (err)
return err;
}
@@ -221,7 +223,7 @@
u16 reg = regs[i];
int err;
- err = mv88e6xxx_g1_write(chip, GLOBAL_VTU_DATA_0_3 + i, reg);
+ err = mv88e6xxx_g1_write(chip, MV88E6XXX_G1_VTU_DATA1 + i, reg);
if (err)
return err;
}
@@ -240,7 +242,7 @@
if (err)
return err;
- err = mv88e6xxx_g1_vtu_op(chip, GLOBAL_VTU_OP_STU_GET_NEXT);
+ err = mv88e6xxx_g1_vtu_op(chip, MV88E6XXX_G1_VTU_OP_STU_GET_NEXT);
if (err)
return err;
@@ -295,7 +297,7 @@
return err;
}
- err = mv88e6xxx_g1_vtu_op(chip, GLOBAL_VTU_OP_VTU_GET_NEXT);
+ err = mv88e6xxx_g1_vtu_op(chip, MV88E6XXX_G1_VTU_OP_VTU_GET_NEXT);
if (err)
return err;
@@ -320,7 +322,7 @@
/* VTU DBNum[3:0] are located in VTU Operation 3:0
* VTU DBNum[7:4] are located in VTU Operation 11:8
*/
- err = mv88e6xxx_g1_read(chip, GLOBAL_VTU_OP, &val);
+ err = mv88e6xxx_g1_read(chip, MV88E6XXX_G1_VTU_OP, &val);
if (err)
return err;
@@ -394,7 +396,7 @@
int mv88e6185_g1_vtu_loadpurge(struct mv88e6xxx_chip *chip,
struct mv88e6xxx_vtu_entry *entry)
{
- u16 op = GLOBAL_VTU_OP_VTU_LOAD_PURGE;
+ u16 op = MV88E6XXX_G1_VTU_OP_VTU_LOAD_PURGE;
int err;
err = mv88e6xxx_g1_vtu_op_wait(chip);
@@ -444,7 +446,8 @@
return err;
/* Load STU entry */
- err = mv88e6xxx_g1_vtu_op(chip, GLOBAL_VTU_OP_STU_LOAD_PURGE);
+ err = mv88e6xxx_g1_vtu_op(chip,
+ MV88E6XXX_G1_VTU_OP_STU_LOAD_PURGE);
if (err)
return err;
@@ -454,7 +457,7 @@
}
/* Load/Purge VTU entry */
- return mv88e6xxx_g1_vtu_op(chip, GLOBAL_VTU_OP_VTU_LOAD_PURGE);
+ return mv88e6xxx_g1_vtu_op(chip, MV88E6XXX_G1_VTU_OP_VTU_LOAD_PURGE);
}
int mv88e6390_g1_vtu_loadpurge(struct mv88e6xxx_chip *chip,
@@ -481,7 +484,8 @@
return err;
/* Load STU entry */
- err = mv88e6xxx_g1_vtu_op(chip, GLOBAL_VTU_OP_STU_LOAD_PURGE);
+ err = mv88e6xxx_g1_vtu_op(chip,
+ MV88E6XXX_G1_VTU_OP_STU_LOAD_PURGE);
if (err)
return err;
@@ -496,7 +500,7 @@
}
/* Load/Purge VTU entry */
- return mv88e6xxx_g1_vtu_op(chip, GLOBAL_VTU_OP_VTU_LOAD_PURGE);
+ return mv88e6xxx_g1_vtu_op(chip, MV88E6XXX_G1_VTU_OP_VTU_LOAD_PURGE);
}
int mv88e6xxx_g1_vtu_flush(struct mv88e6xxx_chip *chip)
@@ -507,5 +511,5 @@
if (err)
return err;
- return mv88e6xxx_g1_vtu_op(chip, GLOBAL_VTU_OP_FLUSH_ALL);
+ return mv88e6xxx_g1_vtu_op(chip, MV88E6XXX_G1_VTU_OP_FLUSH_ALL);
}
diff --git a/drivers/net/dsa/mv88e6xxx/global2.c b/drivers/net/dsa/mv88e6xxx/global2.c
index b3fea55..158d0f4 100644
--- a/drivers/net/dsa/mv88e6xxx/global2.c
+++ b/drivers/net/dsa/mv88e6xxx/global2.c
@@ -1,6 +1,5 @@
/*
- * Marvell 88E6xxx Switch Global 2 Registers support (device address
- * 0x1C)
+ * Marvell 88E6xxx Switch Global 2 Registers support
*
* Copyright (c) 2008 Marvell Semiconductor
*
@@ -13,31 +12,32 @@
* (at your option) any later version.
*/
+#include <linux/bitfield.h>
#include <linux/interrupt.h>
#include <linux/irqdomain.h>
-#include "mv88e6xxx.h"
-#include "global2.h"
-#define ADDR_GLOBAL2 0x1c
+#include "chip.h"
+#include "global1.h" /* for MV88E6XXX_G1_STS_IRQ_DEVICE */
+#include "global2.h"
static int mv88e6xxx_g2_read(struct mv88e6xxx_chip *chip, int reg, u16 *val)
{
- return mv88e6xxx_read(chip, ADDR_GLOBAL2, reg, val);
+ return mv88e6xxx_read(chip, MV88E6XXX_G2, reg, val);
}
static int mv88e6xxx_g2_write(struct mv88e6xxx_chip *chip, int reg, u16 val)
{
- return mv88e6xxx_write(chip, ADDR_GLOBAL2, reg, val);
+ return mv88e6xxx_write(chip, MV88E6XXX_G2, reg, val);
}
static int mv88e6xxx_g2_update(struct mv88e6xxx_chip *chip, int reg, u16 update)
{
- return mv88e6xxx_update(chip, ADDR_GLOBAL2, reg, update);
+ return mv88e6xxx_update(chip, MV88E6XXX_G2, reg, update);
}
static int mv88e6xxx_g2_wait(struct mv88e6xxx_chip *chip, int reg, u16 mask)
{
- return mv88e6xxx_wait(chip, ADDR_GLOBAL2, reg, mask);
+ return mv88e6xxx_wait(chip, MV88E6XXX_G2, reg, mask);
}
/* Offset 0x02: Management Enable 2x */
@@ -51,7 +51,7 @@
* addresses matching 01:80:c2:00:00:2x as MGMT.
*/
if (mv88e6xxx_has(chip, MV88E6XXX_FLAG_G2_MGMT_EN_2X)) {
- err = mv88e6xxx_g2_write(chip, GLOBAL2_MGMT_EN_2X, 0xffff);
+ err = mv88e6xxx_g2_write(chip, MV88E6XXX_G2_MGMT_EN_2X, 0xffff);
if (err)
return err;
}
@@ -60,7 +60,8 @@
* addresses matching 01:80:c2:00:00:0x as MGMT.
*/
if (mv88e6xxx_has(chip, MV88E6XXX_FLAG_G2_MGMT_EN_0X))
- return mv88e6xxx_g2_write(chip, GLOBAL2_MGMT_EN_0X, 0xffff);
+ return mv88e6xxx_g2_write(chip, MV88E6XXX_G2_MGMT_EN_0X,
+ 0xffff);
return 0;
}
@@ -72,7 +73,7 @@
{
u16 val = (target << 8) | (port & 0xf);
- return mv88e6xxx_g2_update(chip, GLOBAL2_DEVICE_MAPPING, val);
+ return mv88e6xxx_g2_update(chip, MV88E6XXX_G2_DEVICE_MAPPING, val);
}
static int mv88e6xxx_g2_set_device_mapping(struct mv88e6xxx_chip *chip)
@@ -101,15 +102,14 @@
/* Offset 0x07: Trunk Mask Table register */
static int mv88e6xxx_g2_trunk_mask_write(struct mv88e6xxx_chip *chip, int num,
- bool hask, u16 mask)
+ bool hash, u16 mask)
{
- const u16 port_mask = BIT(mv88e6xxx_num_ports(chip)) - 1;
- u16 val = (num << 12) | (mask & port_mask);
+ u16 val = (num << 12) | (mask & mv88e6xxx_port_mask(chip));
- if (hask)
- val |= GLOBAL2_TRUNK_MASK_HASK;
+ if (hash)
+ val |= MV88E6XXX_G2_TRUNK_MASK_HASH;
- return mv88e6xxx_g2_update(chip, GLOBAL2_TRUNK_MASK, val);
+ return mv88e6xxx_g2_update(chip, MV88E6XXX_G2_TRUNK_MASK, val);
}
/* Offset 0x08: Trunk Mapping Table register */
@@ -120,7 +120,7 @@
const u16 port_mask = BIT(mv88e6xxx_num_ports(chip)) - 1;
u16 val = (id << 11) | (map & port_mask);
- return mv88e6xxx_g2_update(chip, GLOBAL2_TRUNK_MAPPING, val);
+ return mv88e6xxx_g2_update(chip, MV88E6XXX_G2_TRUNK_MAPPING, val);
}
static int mv88e6xxx_g2_clear_trunk(struct mv88e6xxx_chip *chip)
@@ -149,27 +149,36 @@
* Offset 0x0A: Ingress Rate Data register
*/
-static int mv88e6xxx_g2_clear_irl(struct mv88e6xxx_chip *chip)
+static int mv88e6xxx_g2_irl_wait(struct mv88e6xxx_chip *chip)
{
- int port, err;
+ return mv88e6xxx_g2_wait(chip, MV88E6XXX_G2_IRL_CMD,
+ MV88E6XXX_G2_IRL_CMD_BUSY);
+}
- /* Init all Ingress Rate Limit resources of all ports */
- for (port = 0; port < mv88e6xxx_num_ports(chip); ++port) {
- /* XXX newer chips (like 88E6390) have different 2-bit ops */
- err = mv88e6xxx_g2_write(chip, GLOBAL2_IRL_CMD,
- GLOBAL2_IRL_CMD_OP_INIT_ALL |
- (port << 8));
- if (err)
- break;
+static int mv88e6xxx_g2_irl_op(struct mv88e6xxx_chip *chip, u16 op, int port,
+ int res, int reg)
+{
+ int err;
- /* Wait for the operation to complete */
- err = mv88e6xxx_g2_wait(chip, GLOBAL2_IRL_CMD,
- GLOBAL2_IRL_CMD_BUSY);
- if (err)
- break;
- }
+ err = mv88e6xxx_g2_write(chip, MV88E6XXX_G2_IRL_CMD,
+ MV88E6XXX_G2_IRL_CMD_BUSY | op | (port << 8) |
+ (res << 5) | reg);
+ if (err)
+ return err;
- return err;
+ return mv88e6xxx_g2_irl_wait(chip);
+}
+
+int mv88e6352_g2_irl_init_all(struct mv88e6xxx_chip *chip, int port)
+{
+ return mv88e6xxx_g2_irl_op(chip, MV88E6352_G2_IRL_CMD_OP_INIT_ALL, port,
+ 0, 0);
+}
+
+int mv88e6390_g2_irl_init_all(struct mv88e6xxx_chip *chip, int port)
+{
+ return mv88e6xxx_g2_irl_op(chip, MV88E6390_G2_IRL_CMD_OP_INIT_ALL, port,
+ 0, 0);
}
/* Offset 0x0B: Cross-chip Port VLAN (Addr) Register
@@ -178,7 +187,8 @@
static int mv88e6xxx_g2_pvt_op_wait(struct mv88e6xxx_chip *chip)
{
- return mv88e6xxx_g2_wait(chip, GLOBAL2_PVT_ADDR, GLOBAL2_PVT_ADDR_BUSY);
+ return mv88e6xxx_g2_wait(chip, MV88E6XXX_G2_PVT_ADDR,
+ MV88E6XXX_G2_PVT_ADDR_BUSY);
}
static int mv88e6xxx_g2_pvt_op(struct mv88e6xxx_chip *chip, int src_dev,
@@ -186,13 +196,14 @@
{
int err;
- /* 9-bit Cross-chip PVT pointer: with GLOBAL2_MISC_5_BIT_PORT cleared,
- * source device is 5-bit, source port is 4-bit.
+ /* 9-bit Cross-chip PVT pointer: with MV88E6XXX_G2_MISC_5_BIT_PORT
+ * cleared, source device is 5-bit, source port is 4-bit.
*/
+ op |= MV88E6XXX_G2_PVT_ADDR_BUSY;
op |= (src_dev & 0x1f) << 4;
op |= (src_port & 0xf);
- err = mv88e6xxx_g2_write(chip, GLOBAL2_PVT_ADDR, op);
+ err = mv88e6xxx_g2_write(chip, MV88E6XXX_G2_PVT_ADDR, op);
if (err)
return err;
@@ -208,12 +219,12 @@
if (err)
return err;
- err = mv88e6xxx_g2_write(chip, GLOBAL2_PVT_DATA, data);
+ err = mv88e6xxx_g2_write(chip, MV88E6XXX_G2_PVT_DATA, data);
if (err)
return err;
return mv88e6xxx_g2_pvt_op(chip, src_dev, src_port,
- GLOBAL2_PVT_ADDR_OP_WRITE_PVLAN);
+ MV88E6XXX_G2_PVT_ADDR_OP_WRITE_PVLAN);
}
/* Offset 0x0D: Switch MAC/WoL/WoF register */
@@ -223,7 +234,7 @@
{
u16 val = (pointer << 8) | data;
- return mv88e6xxx_g2_update(chip, GLOBAL2_SWITCH_MAC, val);
+ return mv88e6xxx_g2_update(chip, MV88E6XXX_G2_SWITCH_MAC, val);
}
int mv88e6xxx_g2_set_switch_mac(struct mv88e6xxx_chip *chip, u8 *addr)
@@ -246,7 +257,7 @@
{
u16 val = (pointer << 8) | (data & 0x7);
- return mv88e6xxx_g2_update(chip, GLOBAL2_PRIO_OVERRIDE, val);
+ return mv88e6xxx_g2_update(chip, MV88E6XXX_G2_PRIO_OVERRIDE, val);
}
static int mv88e6xxx_g2_clear_pot(struct mv88e6xxx_chip *chip)
@@ -270,16 +281,17 @@
static int mv88e6xxx_g2_eeprom_wait(struct mv88e6xxx_chip *chip)
{
- return mv88e6xxx_g2_wait(chip, GLOBAL2_EEPROM_CMD,
- GLOBAL2_EEPROM_CMD_BUSY |
- GLOBAL2_EEPROM_CMD_RUNNING);
+ return mv88e6xxx_g2_wait(chip, MV88E6XXX_G2_EEPROM_CMD,
+ MV88E6XXX_G2_EEPROM_CMD_BUSY |
+ MV88E6XXX_G2_EEPROM_CMD_RUNNING);
}
static int mv88e6xxx_g2_eeprom_cmd(struct mv88e6xxx_chip *chip, u16 cmd)
{
int err;
- err = mv88e6xxx_g2_write(chip, GLOBAL2_EEPROM_CMD, cmd);
+ err = mv88e6xxx_g2_write(chip, MV88E6XXX_G2_EEPROM_CMD,
+ MV88E6XXX_G2_EEPROM_CMD_BUSY | cmd);
if (err)
return err;
@@ -289,14 +301,14 @@
static int mv88e6xxx_g2_eeprom_read8(struct mv88e6xxx_chip *chip,
u16 addr, u8 *data)
{
- u16 cmd = GLOBAL2_EEPROM_CMD_OP_READ;
+ u16 cmd = MV88E6XXX_G2_EEPROM_CMD_OP_READ;
int err;
err = mv88e6xxx_g2_eeprom_wait(chip);
if (err)
return err;
- err = mv88e6xxx_g2_write(chip, GLOBAL2_EEPROM_ADDR, addr);
+ err = mv88e6xxx_g2_write(chip, MV88E6390_G2_EEPROM_ADDR, addr);
if (err)
return err;
@@ -304,7 +316,7 @@
if (err)
return err;
- err = mv88e6xxx_g2_read(chip, GLOBAL2_EEPROM_CMD, &cmd);
+ err = mv88e6xxx_g2_read(chip, MV88E6XXX_G2_EEPROM_CMD, &cmd);
if (err)
return err;
@@ -316,14 +328,15 @@
static int mv88e6xxx_g2_eeprom_write8(struct mv88e6xxx_chip *chip,
u16 addr, u8 data)
{
- u16 cmd = GLOBAL2_EEPROM_CMD_OP_WRITE | GLOBAL2_EEPROM_CMD_WRITE_EN;
+ u16 cmd = MV88E6XXX_G2_EEPROM_CMD_OP_WRITE |
+ MV88E6XXX_G2_EEPROM_CMD_WRITE_EN;
int err;
err = mv88e6xxx_g2_eeprom_wait(chip);
if (err)
return err;
- err = mv88e6xxx_g2_write(chip, GLOBAL2_EEPROM_ADDR, addr);
+ err = mv88e6xxx_g2_write(chip, MV88E6390_G2_EEPROM_ADDR, addr);
if (err)
return err;
@@ -333,7 +346,7 @@
static int mv88e6xxx_g2_eeprom_read16(struct mv88e6xxx_chip *chip,
u8 addr, u16 *data)
{
- u16 cmd = GLOBAL2_EEPROM_CMD_OP_READ | addr;
+ u16 cmd = MV88E6XXX_G2_EEPROM_CMD_OP_READ | addr;
int err;
err = mv88e6xxx_g2_eeprom_wait(chip);
@@ -344,20 +357,20 @@
if (err)
return err;
- return mv88e6xxx_g2_read(chip, GLOBAL2_EEPROM_DATA, data);
+ return mv88e6xxx_g2_read(chip, MV88E6352_G2_EEPROM_DATA, data);
}
static int mv88e6xxx_g2_eeprom_write16(struct mv88e6xxx_chip *chip,
u8 addr, u16 data)
{
- u16 cmd = GLOBAL2_EEPROM_CMD_OP_WRITE | addr;
+ u16 cmd = MV88E6XXX_G2_EEPROM_CMD_OP_WRITE | addr;
int err;
err = mv88e6xxx_g2_eeprom_wait(chip);
if (err)
return err;
- err = mv88e6xxx_g2_write(chip, GLOBAL2_EEPROM_DATA, data);
+ err = mv88e6xxx_g2_write(chip, MV88E6352_G2_EEPROM_DATA, data);
if (err)
return err;
@@ -469,11 +482,11 @@
int err;
/* Ensure the RO WriteEn bit is set */
- err = mv88e6xxx_g2_read(chip, GLOBAL2_EEPROM_CMD, &val);
+ err = mv88e6xxx_g2_read(chip, MV88E6XXX_G2_EEPROM_CMD, &val);
if (err)
return err;
- if (!(val & GLOBAL2_EEPROM_CMD_WRITE_EN))
+ if (!(val & MV88E6XXX_G2_EEPROM_CMD_WRITE_EN))
return -EROFS;
eeprom->len = 0;
@@ -532,178 +545,213 @@
static int mv88e6xxx_g2_smi_phy_wait(struct mv88e6xxx_chip *chip)
{
- return mv88e6xxx_g2_wait(chip, GLOBAL2_SMI_PHY_CMD,
- GLOBAL2_SMI_PHY_CMD_BUSY);
+ return mv88e6xxx_g2_wait(chip, MV88E6XXX_G2_SMI_PHY_CMD,
+ MV88E6XXX_G2_SMI_PHY_CMD_BUSY);
}
static int mv88e6xxx_g2_smi_phy_cmd(struct mv88e6xxx_chip *chip, u16 cmd)
{
int err;
- err = mv88e6xxx_g2_write(chip, GLOBAL2_SMI_PHY_CMD, cmd);
+ err = mv88e6xxx_g2_write(chip, MV88E6XXX_G2_SMI_PHY_CMD,
+ MV88E6XXX_G2_SMI_PHY_CMD_BUSY | cmd);
if (err)
return err;
return mv88e6xxx_g2_smi_phy_wait(chip);
}
-static int mv88e6xxx_g2_smi_phy_write_addr(struct mv88e6xxx_chip *chip,
- int addr, int device, int reg,
- bool external)
+static int mv88e6xxx_g2_smi_phy_access(struct mv88e6xxx_chip *chip,
+ bool external, bool c45, u16 op, int dev,
+ int reg)
{
- int cmd = SMI_CMD_OP_45_WRITE_ADDR | (addr << 5) | device;
- int err;
+ u16 cmd = op;
if (external)
- cmd |= GLOBAL2_SMI_PHY_CMD_EXTERNAL;
+ cmd |= MV88E6390_G2_SMI_PHY_CMD_FUNC_EXTERNAL;
+ else
+ cmd |= MV88E6390_G2_SMI_PHY_CMD_FUNC_INTERNAL; /* empty mask */
- err = mv88e6xxx_g2_smi_phy_wait(chip);
- if (err)
- return err;
+ if (c45)
+ cmd |= MV88E6XXX_G2_SMI_PHY_CMD_MODE_45; /* empty mask */
+ else
+ cmd |= MV88E6XXX_G2_SMI_PHY_CMD_MODE_22;
- err = mv88e6xxx_g2_write(chip, GLOBAL2_SMI_PHY_DATA, reg);
- if (err)
- return err;
+ dev <<= __bf_shf(MV88E6XXX_G2_SMI_PHY_CMD_DEV_ADDR_MASK);
+ cmd |= dev & MV88E6XXX_G2_SMI_PHY_CMD_DEV_ADDR_MASK;
+ cmd |= reg & MV88E6XXX_G2_SMI_PHY_CMD_REG_ADDR_MASK;
return mv88e6xxx_g2_smi_phy_cmd(chip, cmd);
}
-static int mv88e6xxx_g2_smi_phy_read_c45(struct mv88e6xxx_chip *chip,
- int addr, int reg_c45, u16 *val,
- bool external)
+static int mv88e6xxx_g2_smi_phy_access_c22(struct mv88e6xxx_chip *chip,
+ bool external, u16 op, int dev,
+ int reg)
{
- int device = (reg_c45 >> 16) & 0x1f;
- int reg = reg_c45 & 0xffff;
- int err;
- u16 cmd;
-
- err = mv88e6xxx_g2_smi_phy_write_addr(chip, addr, device, reg,
- external);
- if (err)
- return err;
-
- cmd = GLOBAL2_SMI_PHY_CMD_OP_45_READ_DATA | (addr << 5) | device;
-
- if (external)
- cmd |= GLOBAL2_SMI_PHY_CMD_EXTERNAL;
-
- err = mv88e6xxx_g2_smi_phy_cmd(chip, cmd);
- if (err)
- return err;
-
- err = mv88e6xxx_g2_read(chip, GLOBAL2_SMI_PHY_DATA, val);
- if (err)
- return err;
-
- err = *val;
-
- return 0;
+ return mv88e6xxx_g2_smi_phy_access(chip, external, false, op, dev, reg);
}
-static int mv88e6xxx_g2_smi_phy_read_c22(struct mv88e6xxx_chip *chip,
- int addr, int reg, u16 *val,
- bool external)
+/* IEEE 802.3 Clause 22 Read Data Register */
+static int mv88e6xxx_g2_smi_phy_read_data_c22(struct mv88e6xxx_chip *chip,
+ bool external, int dev, int reg,
+ u16 *data)
{
- u16 cmd = GLOBAL2_SMI_PHY_CMD_OP_22_READ_DATA | (addr << 5) | reg;
+ u16 op = MV88E6XXX_G2_SMI_PHY_CMD_OP_22_READ_DATA;
int err;
- if (external)
- cmd |= GLOBAL2_SMI_PHY_CMD_EXTERNAL;
-
err = mv88e6xxx_g2_smi_phy_wait(chip);
if (err)
return err;
- err = mv88e6xxx_g2_smi_phy_cmd(chip, cmd);
+ err = mv88e6xxx_g2_smi_phy_access_c22(chip, external, op, dev, reg);
if (err)
return err;
- return mv88e6xxx_g2_read(chip, GLOBAL2_SMI_PHY_DATA, val);
+ return mv88e6xxx_g2_read(chip, MV88E6XXX_G2_SMI_PHY_DATA, data);
}
-int mv88e6xxx_g2_smi_phy_read(struct mv88e6xxx_chip *chip,
- struct mii_bus *bus,
+/* IEEE 802.3 Clause 22 Write Data Register */
+static int mv88e6xxx_g2_smi_phy_write_data_c22(struct mv88e6xxx_chip *chip,
+ bool external, int dev, int reg,
+ u16 data)
+{
+ u16 op = MV88E6XXX_G2_SMI_PHY_CMD_OP_22_WRITE_DATA;
+ int err;
+
+ err = mv88e6xxx_g2_smi_phy_wait(chip);
+ if (err)
+ return err;
+
+ err = mv88e6xxx_g2_write(chip, MV88E6XXX_G2_SMI_PHY_DATA, data);
+ if (err)
+ return err;
+
+ return mv88e6xxx_g2_smi_phy_access_c22(chip, external, op, dev, reg);
+}
+
+static int mv88e6xxx_g2_smi_phy_access_c45(struct mv88e6xxx_chip *chip,
+ bool external, u16 op, int port,
+ int dev)
+{
+ return mv88e6xxx_g2_smi_phy_access(chip, external, true, op, port, dev);
+}
+
+/* IEEE 802.3 Clause 45 Write Address Register */
+static int mv88e6xxx_g2_smi_phy_write_addr_c45(struct mv88e6xxx_chip *chip,
+ bool external, int port, int dev,
+ int addr)
+{
+ u16 op = MV88E6XXX_G2_SMI_PHY_CMD_OP_45_WRITE_ADDR;
+ int err;
+
+ err = mv88e6xxx_g2_smi_phy_wait(chip);
+ if (err)
+ return err;
+
+ err = mv88e6xxx_g2_write(chip, MV88E6XXX_G2_SMI_PHY_DATA, addr);
+ if (err)
+ return err;
+
+ return mv88e6xxx_g2_smi_phy_access_c45(chip, external, op, port, dev);
+}
+
+/* IEEE 802.3 Clause 45 Read Data Register */
+static int mv88e6xxx_g2_smi_phy_read_data_c45(struct mv88e6xxx_chip *chip,
+ bool external, int port, int dev,
+ u16 *data)
+{
+ u16 op = MV88E6XXX_G2_SMI_PHY_CMD_OP_45_READ_DATA;
+ int err;
+
+ err = mv88e6xxx_g2_smi_phy_access_c45(chip, external, op, port, dev);
+ if (err)
+ return err;
+
+ return mv88e6xxx_g2_read(chip, MV88E6XXX_G2_SMI_PHY_DATA, data);
+}
+
+static int mv88e6xxx_g2_smi_phy_read_c45(struct mv88e6xxx_chip *chip,
+ bool external, int port, int reg,
+ u16 *data)
+{
+ int dev = (reg >> 16) & 0x1f;
+ int addr = reg & 0xffff;
+ int err;
+
+ err = mv88e6xxx_g2_smi_phy_write_addr_c45(chip, external, port, dev,
+ addr);
+ if (err)
+ return err;
+
+ return mv88e6xxx_g2_smi_phy_read_data_c45(chip, external, port, dev,
+ data);
+}
+
+/* IEEE 802.3 Clause 45 Write Data Register */
+static int mv88e6xxx_g2_smi_phy_write_data_c45(struct mv88e6xxx_chip *chip,
+ bool external, int port, int dev,
+ u16 data)
+{
+ u16 op = MV88E6XXX_G2_SMI_PHY_CMD_OP_45_WRITE_DATA;
+ int err;
+
+ err = mv88e6xxx_g2_write(chip, MV88E6XXX_G2_SMI_PHY_DATA, data);
+ if (err)
+ return err;
+
+ return mv88e6xxx_g2_smi_phy_access_c45(chip, external, op, port, dev);
+}
+
+static int mv88e6xxx_g2_smi_phy_write_c45(struct mv88e6xxx_chip *chip,
+ bool external, int port, int reg,
+ u16 data)
+{
+ int dev = (reg >> 16) & 0x1f;
+ int addr = reg & 0xffff;
+ int err;
+
+ err = mv88e6xxx_g2_smi_phy_write_addr_c45(chip, external, port, dev,
+ addr);
+ if (err)
+ return err;
+
+ return mv88e6xxx_g2_smi_phy_write_data_c45(chip, external, port, dev,
+ data);
+}
+
+int mv88e6xxx_g2_smi_phy_read(struct mv88e6xxx_chip *chip, struct mii_bus *bus,
int addr, int reg, u16 *val)
{
struct mv88e6xxx_mdio_bus *mdio_bus = bus->priv;
bool external = mdio_bus->external;
if (reg & MII_ADDR_C45)
- return mv88e6xxx_g2_smi_phy_read_c45(chip, addr, reg, val,
- external);
- return mv88e6xxx_g2_smi_phy_read_c22(chip, addr, reg, val, external);
+ return mv88e6xxx_g2_smi_phy_read_c45(chip, external, addr, reg,
+ val);
+
+ return mv88e6xxx_g2_smi_phy_read_data_c22(chip, external, addr, reg,
+ val);
}
-static int mv88e6xxx_g2_smi_phy_write_c45(struct mv88e6xxx_chip *chip,
- int addr, int reg_c45, u16 val,
- bool external)
-{
- int device = (reg_c45 >> 16) & 0x1f;
- int reg = reg_c45 & 0xffff;
- int err;
- u16 cmd;
-
- err = mv88e6xxx_g2_smi_phy_write_addr(chip, addr, device, reg,
- external);
- if (err)
- return err;
-
- cmd = GLOBAL2_SMI_PHY_CMD_OP_45_WRITE_DATA | (addr << 5) | device;
-
- if (external)
- cmd |= GLOBAL2_SMI_PHY_CMD_EXTERNAL;
-
- err = mv88e6xxx_g2_write(chip, GLOBAL2_SMI_PHY_DATA, val);
- if (err)
- return err;
-
- err = mv88e6xxx_g2_smi_phy_cmd(chip, cmd);
- if (err)
- return err;
-
- return 0;
-}
-
-static int mv88e6xxx_g2_smi_phy_write_c22(struct mv88e6xxx_chip *chip,
- int addr, int reg, u16 val,
- bool external)
-{
- u16 cmd = GLOBAL2_SMI_PHY_CMD_OP_22_WRITE_DATA | (addr << 5) | reg;
- int err;
-
- if (external)
- cmd |= GLOBAL2_SMI_PHY_CMD_EXTERNAL;
-
- err = mv88e6xxx_g2_smi_phy_wait(chip);
- if (err)
- return err;
-
- err = mv88e6xxx_g2_write(chip, GLOBAL2_SMI_PHY_DATA, val);
- if (err)
- return err;
-
- return mv88e6xxx_g2_smi_phy_cmd(chip, cmd);
-}
-
-int mv88e6xxx_g2_smi_phy_write(struct mv88e6xxx_chip *chip,
- struct mii_bus *bus,
+int mv88e6xxx_g2_smi_phy_write(struct mv88e6xxx_chip *chip, struct mii_bus *bus,
int addr, int reg, u16 val)
{
struct mv88e6xxx_mdio_bus *mdio_bus = bus->priv;
bool external = mdio_bus->external;
if (reg & MII_ADDR_C45)
- return mv88e6xxx_g2_smi_phy_write_c45(chip, addr, reg, val,
- external);
+ return mv88e6xxx_g2_smi_phy_write_c45(chip, external, addr, reg,
+ val);
- return mv88e6xxx_g2_smi_phy_write_c22(chip, addr, reg, val, external);
+ return mv88e6xxx_g2_smi_phy_write_data_c22(chip, external, addr, reg,
+ val);
}
static int mv88e6097_watchdog_action(struct mv88e6xxx_chip *chip, int irq)
{
u16 reg;
- mv88e6xxx_g2_read(chip, GLOBAL2_WDOG_CONTROL, ®);
+ mv88e6xxx_g2_read(chip, MV88E6352_G2_WDOG_CTL, ®);
dev_info(chip->dev, "Watchdog event: 0x%04x", reg);
@@ -714,20 +762,20 @@
{
u16 reg;
- mv88e6xxx_g2_read(chip, GLOBAL2_WDOG_CONTROL, ®);
+ mv88e6xxx_g2_read(chip, MV88E6352_G2_WDOG_CTL, ®);
- reg &= ~(GLOBAL2_WDOG_CONTROL_EGRESS_ENABLE |
- GLOBAL2_WDOG_CONTROL_QC_ENABLE);
+ reg &= ~(MV88E6352_G2_WDOG_CTL_EGRESS_ENABLE |
+ MV88E6352_G2_WDOG_CTL_QC_ENABLE);
- mv88e6xxx_g2_write(chip, GLOBAL2_WDOG_CONTROL, reg);
+ mv88e6xxx_g2_write(chip, MV88E6352_G2_WDOG_CTL, reg);
}
static int mv88e6097_watchdog_setup(struct mv88e6xxx_chip *chip)
{
- return mv88e6xxx_g2_write(chip, GLOBAL2_WDOG_CONTROL,
- GLOBAL2_WDOG_CONTROL_EGRESS_ENABLE |
- GLOBAL2_WDOG_CONTROL_QC_ENABLE |
- GLOBAL2_WDOG_CONTROL_SWRESET);
+ return mv88e6xxx_g2_write(chip, MV88E6352_G2_WDOG_CTL,
+ MV88E6352_G2_WDOG_CTL_EGRESS_ENABLE |
+ MV88E6352_G2_WDOG_CTL_QC_ENABLE |
+ MV88E6352_G2_WDOG_CTL_SWRESET);
}
const struct mv88e6xxx_irq_ops mv88e6097_watchdog_ops = {
@@ -738,12 +786,12 @@
static int mv88e6390_watchdog_setup(struct mv88e6xxx_chip *chip)
{
- return mv88e6xxx_g2_update(chip, GLOBAL2_WDOG_CONTROL,
- GLOBAL2_WDOG_INT_ENABLE |
- GLOBAL2_WDOG_CUT_THROUGH |
- GLOBAL2_WDOG_QUEUE_CONTROLLER |
- GLOBAL2_WDOG_EGRESS |
- GLOBAL2_WDOG_FORCE_IRQ);
+ return mv88e6xxx_g2_update(chip, MV88E6390_G2_WDOG_CTL,
+ MV88E6390_G2_WDOG_CTL_PTR_INT_ENABLE |
+ MV88E6390_G2_WDOG_CTL_CUT_THROUGH |
+ MV88E6390_G2_WDOG_CTL_QUEUE_CONTROLLER |
+ MV88E6390_G2_WDOG_CTL_EGRESS |
+ MV88E6390_G2_WDOG_CTL_FORCE_IRQ);
}
static int mv88e6390_watchdog_action(struct mv88e6xxx_chip *chip, int irq)
@@ -751,17 +799,19 @@
int err;
u16 reg;
- mv88e6xxx_g2_write(chip, GLOBAL2_WDOG_CONTROL, GLOBAL2_WDOG_EVENT);
- err = mv88e6xxx_g2_read(chip, GLOBAL2_WDOG_CONTROL, ®);
+ mv88e6xxx_g2_write(chip, MV88E6390_G2_WDOG_CTL,
+ MV88E6390_G2_WDOG_CTL_PTR_EVENT);
+ err = mv88e6xxx_g2_read(chip, MV88E6390_G2_WDOG_CTL, ®);
dev_info(chip->dev, "Watchdog event: 0x%04x",
- reg & GLOBAL2_WDOG_DATA_MASK);
+ reg & MV88E6390_G2_WDOG_CTL_DATA_MASK);
- mv88e6xxx_g2_write(chip, GLOBAL2_WDOG_CONTROL, GLOBAL2_WDOG_HISTORY);
- err = mv88e6xxx_g2_read(chip, GLOBAL2_WDOG_CONTROL, ®);
+ mv88e6xxx_g2_write(chip, MV88E6390_G2_WDOG_CTL,
+ MV88E6390_G2_WDOG_CTL_PTR_HISTORY);
+ err = mv88e6xxx_g2_read(chip, MV88E6390_G2_WDOG_CTL, ®);
dev_info(chip->dev, "Watchdog history: 0x%04x",
- reg & GLOBAL2_WDOG_DATA_MASK);
+ reg & MV88E6390_G2_WDOG_CTL_DATA_MASK);
/* Trigger a software reset to try to recover the switch */
if (chip->info->ops->reset)
@@ -774,8 +824,8 @@
static void mv88e6390_watchdog_free(struct mv88e6xxx_chip *chip)
{
- mv88e6xxx_g2_update(chip, GLOBAL2_WDOG_CONTROL,
- GLOBAL2_WDOG_INT_ENABLE);
+ mv88e6xxx_g2_update(chip, MV88E6390_G2_WDOG_CTL,
+ MV88E6390_G2_WDOG_CTL_PTR_INT_ENABLE);
}
const struct mv88e6xxx_irq_ops mv88e6390_watchdog_ops = {
@@ -813,7 +863,7 @@
int err;
chip->watchdog_irq = irq_find_mapping(chip->g2_irq.domain,
- GLOBAL2_INT_SOURCE_WATCHDOG);
+ MV88E6XXX_G2_INT_SOURCE_WATCHDOG);
if (chip->watchdog_irq < 0)
return chip->watchdog_irq;
@@ -840,16 +890,16 @@
u16 val;
int err;
- err = mv88e6xxx_g2_read(chip, GLOBAL2_MISC, &val);
+ err = mv88e6xxx_g2_read(chip, MV88E6XXX_G2_MISC, &val);
if (err)
return err;
if (port_5_bit)
- val |= GLOBAL2_MISC_5_BIT_PORT;
+ val |= MV88E6XXX_G2_MISC_5_BIT_PORT;
else
- val &= ~GLOBAL2_MISC_5_BIT_PORT;
+ val &= ~MV88E6XXX_G2_MISC_5_BIT_PORT;
- return mv88e6xxx_g2_write(chip, GLOBAL2_MISC, val);
+ return mv88e6xxx_g2_write(chip, MV88E6XXX_G2_MISC, val);
}
int mv88e6xxx_g2_misc_4_bit_port(struct mv88e6xxx_chip *chip)
@@ -883,7 +933,7 @@
u16 reg;
mutex_lock(&chip->reg_lock);
- err = mv88e6xxx_g2_read(chip, GLOBAL2_INT_SOURCE, ®);
+ err = mv88e6xxx_g2_read(chip, MV88E6XXX_G2_INT_SOURCE, ®);
mutex_unlock(&chip->reg_lock);
if (err)
goto out;
@@ -910,7 +960,7 @@
{
struct mv88e6xxx_chip *chip = irq_data_get_irq_chip_data(d);
- mv88e6xxx_g2_write(chip, GLOBAL2_INT_MASK, ~chip->g2_irq.masked);
+ mv88e6xxx_g2_write(chip, MV88E6XXX_G2_INT_MASK, ~chip->g2_irq.masked);
mutex_unlock(&chip->reg_lock);
}
@@ -977,7 +1027,7 @@
chip->g2_irq.masked = ~0;
chip->device_irq = irq_find_mapping(chip->g1_irq.domain,
- GLOBAL_STATUS_IRQ_DEVICE);
+ MV88E6XXX_G1_STS_IRQ_DEVICE);
if (chip->device_irq < 0) {
err = chip->device_irq;
goto out;
@@ -1012,11 +1062,11 @@
* highest, and send all special multicast frames to the CPU
* port at the highest priority.
*/
- reg = GLOBAL2_SWITCH_MGMT_FORCE_FLOW_CTRL_PRI | (0x7 << 4);
+ reg = MV88E6XXX_G2_SWITCH_MGMT_FORCE_FLOW_CTL_PRI | (0x7 << 4);
if (mv88e6xxx_has(chip, MV88E6XXX_FLAG_G2_MGMT_EN_0X) ||
mv88e6xxx_has(chip, MV88E6XXX_FLAG_G2_MGMT_EN_2X))
- reg |= GLOBAL2_SWITCH_MGMT_RSVD2CPU | 0x7;
- err = mv88e6xxx_g2_write(chip, GLOBAL2_SWITCH_MGMT, reg);
+ reg |= MV88E6XXX_G2_SWITCH_MGMT_RSVD2CPU | 0x7;
+ err = mv88e6xxx_g2_write(chip, MV88E6XXX_G2_SWITCH_MGMT, reg);
if (err)
return err;
@@ -1030,15 +1080,6 @@
if (err)
return err;
- if (mv88e6xxx_has(chip, MV88E6XXX_FLAGS_IRL)) {
- /* Disable ingress rate limiting by resetting all per port
- * ingress rate limit resources to their initial state.
- */
- err = mv88e6xxx_g2_clear_irl(chip);
- if (err)
- return err;
- }
-
if (mv88e6xxx_has(chip, MV88E6XXX_FLAG_G2_POT)) {
/* Clear the priority override table. */
err = mv88e6xxx_g2_clear_pot(chip);
diff --git a/drivers/net/dsa/mv88e6xxx/global2.h b/drivers/net/dsa/mv88e6xxx/global2.h
index 14c0be9..317ffd8 100644
--- a/drivers/net/dsa/mv88e6xxx/global2.h
+++ b/drivers/net/dsa/mv88e6xxx/global2.h
@@ -1,5 +1,5 @@
/*
- * Marvell 88E6xxx Switch Global 2 Registers support (device address 0x1C)
+ * Marvell 88E6xxx Switch Global 2 Registers support
*
* Copyright (c) 2008 Marvell Semiconductor
*
@@ -15,7 +15,200 @@
#ifndef _MV88E6XXX_GLOBAL2_H
#define _MV88E6XXX_GLOBAL2_H
-#include "mv88e6xxx.h"
+#include "chip.h"
+
+#define MV88E6XXX_G2 0x1c
+
+/* Offset 0x00: Interrupt Source Register */
+#define MV88E6XXX_G2_INT_SOURCE 0x00
+#define MV88E6XXX_G2_INT_SOURCE_WATCHDOG 15
+
+/* Offset 0x01: Interrupt Mask Register */
+#define MV88E6XXX_G2_INT_MASK 0x01
+
+/* Offset 0x02: MGMT Enable Register 2x */
+#define MV88E6XXX_G2_MGMT_EN_2X 0x02
+
+/* Offset 0x03: MGMT Enable Register 0x */
+#define MV88E6XXX_G2_MGMT_EN_0X 0x03
+
+/* Offset 0x04: Flow Control Delay Register */
+#define MV88E6XXX_G2_FLOW_CTL 0x04
+
+/* Offset 0x05: Switch Management Register */
+#define MV88E6XXX_G2_SWITCH_MGMT 0x05
+#define MV88E6XXX_G2_SWITCH_MGMT_USE_DOUBLE_TAG_DATA 0x8000
+#define MV88E6XXX_G2_SWITCH_MGMT_PREVENT_LOOPS 0x4000
+#define MV88E6XXX_G2_SWITCH_MGMT_FLOW_CTL_MSG 0x2000
+#define MV88E6XXX_G2_SWITCH_MGMT_FORCE_FLOW_CTL_PRI 0x0080
+#define MV88E6XXX_G2_SWITCH_MGMT_RSVD2CPU 0x0008
+
+/* Offset 0x06: Device Mapping Table Register */
+#define MV88E6XXX_G2_DEVICE_MAPPING 0x06
+#define MV88E6XXX_G2_DEVICE_MAPPING_UPDATE 0x8000
+#define MV88E6XXX_G2_DEVICE_MAPPING_DEV_MASK 0x1f00
+#define MV88E6XXX_G2_DEVICE_MAPPING_PORT_MASK 0x000f
+
+/* Offset 0x07: Trunk Mask Table Register */
+#define MV88E6XXX_G2_TRUNK_MASK 0x07
+#define MV88E6XXX_G2_TRUNK_MASK_UPDATE 0x8000
+#define MV88E6XXX_G2_TRUNK_MASK_NUM_MASK 0x7000
+#define MV88E6XXX_G2_TRUNK_MASK_HASH 0x0800
+
+/* Offset 0x08: Trunk Mapping Table Register */
+#define MV88E6XXX_G2_TRUNK_MAPPING 0x08
+#define MV88E6XXX_G2_TRUNK_MAPPING_UPDATE 0x8000
+#define MV88E6XXX_G2_TRUNK_MAPPING_ID_MASK 0x7800
+
+/* Offset 0x09: Ingress Rate Command Register */
+#define MV88E6XXX_G2_IRL_CMD 0x09
+#define MV88E6XXX_G2_IRL_CMD_BUSY 0x8000
+#define MV88E6352_G2_IRL_CMD_OP_MASK 0x7000
+#define MV88E6352_G2_IRL_CMD_OP_NOOP 0x0000
+#define MV88E6352_G2_IRL_CMD_OP_INIT_ALL 0x1000
+#define MV88E6352_G2_IRL_CMD_OP_INIT_RES 0x2000
+#define MV88E6352_G2_IRL_CMD_OP_WRITE_REG 0x3000
+#define MV88E6352_G2_IRL_CMD_OP_READ_REG 0x4000
+#define MV88E6390_G2_IRL_CMD_OP_MASK 0x6000
+#define MV88E6390_G2_IRL_CMD_OP_READ_REG 0x0000
+#define MV88E6390_G2_IRL_CMD_OP_INIT_ALL 0x2000
+#define MV88E6390_G2_IRL_CMD_OP_INIT_RES 0x4000
+#define MV88E6390_G2_IRL_CMD_OP_WRITE_REG 0x6000
+#define MV88E6352_G2_IRL_CMD_PORT_MASK 0x0f00
+#define MV88E6390_G2_IRL_CMD_PORT_MASK 0x1f00
+#define MV88E6XXX_G2_IRL_CMD_RES_MASK 0x00e0
+#define MV88E6XXX_G2_IRL_CMD_REG_MASK 0x000f
+
+/* Offset 0x0A: Ingress Rate Data Register */
+#define MV88E6XXX_G2_IRL_DATA 0x0a
+#define MV88E6XXX_G2_IRL_DATA_MASK 0xffff
+
+/* Offset 0x0B: Cross-chip Port VLAN Register */
+#define MV88E6XXX_G2_PVT_ADDR 0x0b
+#define MV88E6XXX_G2_PVT_ADDR_BUSY 0x8000
+#define MV88E6XXX_G2_PVT_ADDR_OP_MASK 0x7000
+#define MV88E6XXX_G2_PVT_ADDR_OP_INIT_ONES 0x1000
+#define MV88E6XXX_G2_PVT_ADDR_OP_WRITE_PVLAN 0x3000
+#define MV88E6XXX_G2_PVT_ADDR_OP_READ 0x4000
+#define MV88E6XXX_G2_PVT_ADDR_PTR_MASK 0x01ff
+
+/* Offset 0x0C: Cross-chip Port VLAN Data Register */
+#define MV88E6XXX_G2_PVT_DATA 0x0c
+#define MV88E6XXX_G2_PVT_DATA_MASK 0x7f
+
+/* Offset 0x0D: Switch MAC/WoL/WoF Register */
+#define MV88E6XXX_G2_SWITCH_MAC 0x0d
+#define MV88E6XXX_G2_SWITCH_MAC_UPDATE 0x8000
+#define MV88E6XXX_G2_SWITCH_MAC_PTR_MASK 0x1f00
+#define MV88E6XXX_G2_SWITCH_MAC_DATA_MASK 0x00ff
+
+/* Offset 0x0E: ATU Stats Register */
+#define MV88E6XXX_G2_ATU_STATS 0x0e
+
+/* Offset 0x0F: Priority Override Table */
+#define MV88E6XXX_G2_PRIO_OVERRIDE 0x0f
+#define MV88E6XXX_G2_PRIO_OVERRIDE_UPDATE 0x8000
+#define MV88E6XXX_G2_PRIO_OVERRIDE_FPRISET 0x1000
+#define MV88E6XXX_G2_PRIO_OVERRIDE_PTR_MASK 0x0f00
+#define MV88E6352_G2_PRIO_OVERRIDE_QPRIAVBEN 0x0080
+#define MV88E6352_G2_PRIO_OVERRIDE_DATAAVB_MASK 0x0030
+#define MV88E6XXX_G2_PRIO_OVERRIDE_QFPRIEN 0x0008
+#define MV88E6XXX_G2_PRIO_OVERRIDE_DATA_MASK 0x0007
+
+/* Offset 0x14: EEPROM Command */
+#define MV88E6XXX_G2_EEPROM_CMD 0x14
+#define MV88E6XXX_G2_EEPROM_CMD_BUSY 0x8000
+#define MV88E6XXX_G2_EEPROM_CMD_OP_MASK 0x7000
+#define MV88E6XXX_G2_EEPROM_CMD_OP_WRITE 0x3000
+#define MV88E6XXX_G2_EEPROM_CMD_OP_READ 0x4000
+#define MV88E6XXX_G2_EEPROM_CMD_OP_LOAD 0x6000
+#define MV88E6XXX_G2_EEPROM_CMD_RUNNING 0x0800
+#define MV88E6XXX_G2_EEPROM_CMD_WRITE_EN 0x0400
+#define MV88E6352_G2_EEPROM_CMD_ADDR_MASK 0x00ff
+#define MV88E6390_G2_EEPROM_CMD_DATA_MASK 0x00ff
+
+/* Offset 0x15: EEPROM Data */
+#define MV88E6352_G2_EEPROM_DATA 0x15
+#define MV88E6352_G2_EEPROM_DATA_MASK 0xffff
+
+/* Offset 0x15: EEPROM Addr */
+#define MV88E6390_G2_EEPROM_ADDR 0x15
+#define MV88E6390_G2_EEPROM_ADDR_MASK 0xffff
+
+/* Offset 0x16: AVB Command Register */
+#define MV88E6352_G2_AVB_CMD 0x16
+
+/* Offset 0x17: AVB Data Register */
+#define MV88E6352_G2_AVB_DATA 0x17
+
+/* Offset 0x18: SMI PHY Command Register */
+#define MV88E6XXX_G2_SMI_PHY_CMD 0x18
+#define MV88E6XXX_G2_SMI_PHY_CMD_BUSY 0x8000
+#define MV88E6390_G2_SMI_PHY_CMD_FUNC_MASK 0x6000
+#define MV88E6390_G2_SMI_PHY_CMD_FUNC_INTERNAL 0x0000
+#define MV88E6390_G2_SMI_PHY_CMD_FUNC_EXTERNAL 0x2000
+#define MV88E6390_G2_SMI_PHY_CMD_FUNC_SETUP 0x4000
+#define MV88E6XXX_G2_SMI_PHY_CMD_MODE_MASK 0x1000
+#define MV88E6XXX_G2_SMI_PHY_CMD_MODE_45 0x0000
+#define MV88E6XXX_G2_SMI_PHY_CMD_MODE_22 0x1000
+#define MV88E6XXX_G2_SMI_PHY_CMD_OP_MASK 0x0c00
+#define MV88E6XXX_G2_SMI_PHY_CMD_OP_22_WRITE_DATA 0x0400
+#define MV88E6XXX_G2_SMI_PHY_CMD_OP_22_READ_DATA 0x0800
+#define MV88E6XXX_G2_SMI_PHY_CMD_OP_45_WRITE_ADDR 0x0000
+#define MV88E6XXX_G2_SMI_PHY_CMD_OP_45_WRITE_DATA 0x0400
+#define MV88E6XXX_G2_SMI_PHY_CMD_OP_45_READ_DATA_INC 0x0800
+#define MV88E6XXX_G2_SMI_PHY_CMD_OP_45_READ_DATA 0x0c00
+#define MV88E6XXX_G2_SMI_PHY_CMD_DEV_ADDR_MASK 0x03e0
+#define MV88E6XXX_G2_SMI_PHY_CMD_REG_ADDR_MASK 0x001f
+#define MV88E6XXX_G2_SMI_PHY_CMD_SETUP_PTR_MASK 0x03ff
+
+/* Offset 0x19: SMI PHY Data Register */
+#define MV88E6XXX_G2_SMI_PHY_DATA 0x19
+
+/* Offset 0x1A: Scratch and Misc. Register */
+#define MV88E6XXX_G2_SCRATCH_MISC_MISC 0x1a
+#define MV88E6XXX_G2_SCRATCH_MISC_UPDATE 0x8000
+#define MV88E6XXX_G2_SCRATCH_MISC_PTR_MASK 0x7f00
+#define MV88E6XXX_G2_SCRATCH_MISC_DATA_MASK 0x00ff
+
+/* Offset 0x1B: Watch Dog Control Register */
+#define MV88E6352_G2_WDOG_CTL 0x1b
+#define MV88E6352_G2_WDOG_CTL_EGRESS_EVENT 0x0080
+#define MV88E6352_G2_WDOG_CTL_RMU_TIMEOUT 0x0040
+#define MV88E6352_G2_WDOG_CTL_QC_ENABLE 0x0020
+#define MV88E6352_G2_WDOG_CTL_EGRESS_HISTORY 0x0010
+#define MV88E6352_G2_WDOG_CTL_EGRESS_ENABLE 0x0008
+#define MV88E6352_G2_WDOG_CTL_FORCE_IRQ 0x0004
+#define MV88E6352_G2_WDOG_CTL_HISTORY 0x0002
+#define MV88E6352_G2_WDOG_CTL_SWRESET 0x0001
+
+/* Offset 0x1B: Watch Dog Control Register */
+#define MV88E6390_G2_WDOG_CTL 0x1b
+#define MV88E6390_G2_WDOG_CTL_UPDATE 0x8000
+#define MV88E6390_G2_WDOG_CTL_PTR_MASK 0x7f00
+#define MV88E6390_G2_WDOG_CTL_PTR_INT_SOURCE 0x0000
+#define MV88E6390_G2_WDOG_CTL_PTR_INT_STS 0x1000
+#define MV88E6390_G2_WDOG_CTL_PTR_INT_ENABLE 0x1100
+#define MV88E6390_G2_WDOG_CTL_PTR_EVENT 0x1200
+#define MV88E6390_G2_WDOG_CTL_PTR_HISTORY 0x1300
+#define MV88E6390_G2_WDOG_CTL_DATA_MASK 0x00ff
+#define MV88E6390_G2_WDOG_CTL_CUT_THROUGH 0x0008
+#define MV88E6390_G2_WDOG_CTL_QUEUE_CONTROLLER 0x0004
+#define MV88E6390_G2_WDOG_CTL_EGRESS 0x0002
+#define MV88E6390_G2_WDOG_CTL_FORCE_IRQ 0x0001
+
+/* Offset 0x1C: QoS Weights Register */
+#define MV88E6XXX_G2_QOS_WEIGHTS 0x1c
+#define MV88E6XXX_G2_QOS_WEIGHTS_UPDATE 0x8000
+#define MV88E6352_G2_QOS_WEIGHTS_PTR_MASK 0x3f00
+#define MV88E6390_G2_QOS_WEIGHTS_PTR_MASK 0x7f00
+#define MV88E6XXX_G2_QOS_WEIGHTS_DATA_MASK 0x00ff
+
+/* Offset 0x1D: Misc Register */
+#define MV88E6XXX_G2_MISC 0x1d
+#define MV88E6XXX_G2_MISC_5_BIT_PORT 0x4000
+#define MV88E6352_G2_NOEGR_POLICY 0x2000
+#define MV88E6390_G2_LAG_ID_4 0x2000
#ifdef CONFIG_NET_DSA_MV88E6XXX_GLOBAL2
@@ -24,6 +217,9 @@
return 0;
}
+int mv88e6352_g2_irl_init_all(struct mv88e6xxx_chip *chip, int port);
+int mv88e6390_g2_irl_init_all(struct mv88e6xxx_chip *chip, int port);
+
int mv88e6xxx_g2_smi_phy_read(struct mv88e6xxx_chip *chip,
struct mii_bus *bus,
int addr, int reg, u16 *val);
@@ -66,6 +262,18 @@
return 0;
}
+static inline int mv88e6352_g2_irl_init_all(struct mv88e6xxx_chip *chip,
+ int port)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int mv88e6390_g2_irl_init_all(struct mv88e6xxx_chip *chip,
+ int port)
+{
+ return -EOPNOTSUPP;
+}
+
static inline int mv88e6xxx_g2_smi_phy_read(struct mv88e6xxx_chip *chip,
struct mii_bus *bus,
int addr, int reg, u16 *val)
diff --git a/drivers/net/dsa/mv88e6xxx/mv88e6xxx.h b/drivers/net/dsa/mv88e6xxx/mv88e6xxx.h
deleted file mode 100644
index 77236cd..0000000
--- a/drivers/net/dsa/mv88e6xxx/mv88e6xxx.h
+++ /dev/null
@@ -1,946 +0,0 @@
-/*
- * Marvell 88e6xxx common definitions
- *
- * Copyright (c) 2008 Marvell Semiconductor
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- */
-
-#ifndef __MV88E6XXX_H
-#define __MV88E6XXX_H
-
-#include <linux/if_vlan.h>
-#include <linux/irq.h>
-#include <linux/gpio/consumer.h>
-#include <linux/phy.h>
-#include <net/dsa.h>
-
-#ifndef UINT64_MAX
-#define UINT64_MAX (u64)(~((u64)0))
-#endif
-
-#define SMI_CMD 0x00
-#define SMI_CMD_BUSY BIT(15)
-#define SMI_CMD_CLAUSE_22 BIT(12)
-#define SMI_CMD_OP_22_WRITE ((1 << 10) | SMI_CMD_BUSY | SMI_CMD_CLAUSE_22)
-#define SMI_CMD_OP_22_READ ((2 << 10) | SMI_CMD_BUSY | SMI_CMD_CLAUSE_22)
-#define SMI_CMD_OP_45_WRITE_ADDR ((0 << 10) | SMI_CMD_BUSY)
-#define SMI_CMD_OP_45_WRITE_DATA ((1 << 10) | SMI_CMD_BUSY)
-#define SMI_CMD_OP_45_READ_DATA ((2 << 10) | SMI_CMD_BUSY)
-#define SMI_CMD_OP_45_READ_DATA_INC ((3 << 10) | SMI_CMD_BUSY)
-#define SMI_DATA 0x01
-
-/* PHY Registers */
-#define PHY_PAGE 0x16
-#define PHY_PAGE_COPPER 0x00
-
-#define ADDR_SERDES 0x0f
-#define SERDES_PAGE_FIBER 0x01
-
-#define PORT_STATUS 0x00
-#define PORT_STATUS_PAUSE_EN BIT(15)
-#define PORT_STATUS_MY_PAUSE BIT(14)
-#define PORT_STATUS_HD_FLOW BIT(13)
-#define PORT_STATUS_PHY_DETECT BIT(12)
-#define PORT_STATUS_LINK BIT(11)
-#define PORT_STATUS_DUPLEX BIT(10)
-#define PORT_STATUS_SPEED_MASK 0x0300
-#define PORT_STATUS_SPEED_10 0x0000
-#define PORT_STATUS_SPEED_100 0x0100
-#define PORT_STATUS_SPEED_1000 0x0200
-#define PORT_STATUS_EEE BIT(6) /* 6352 */
-#define PORT_STATUS_AM_DIS BIT(6) /* 6165 */
-#define PORT_STATUS_MGMII BIT(6) /* 6185 */
-#define PORT_STATUS_TX_PAUSED BIT(5)
-#define PORT_STATUS_FLOW_CTRL BIT(4)
-#define PORT_STATUS_CMODE_MASK 0x0f
-#define PORT_STATUS_CMODE_100BASE_X 0x8
-#define PORT_STATUS_CMODE_1000BASE_X 0x9
-#define PORT_STATUS_CMODE_SGMII 0xa
-#define PORT_STATUS_CMODE_2500BASEX 0xb
-#define PORT_STATUS_CMODE_XAUI 0xc
-#define PORT_STATUS_CMODE_RXAUI 0xd
-#define PORT_PCS_CTRL 0x01
-#define PORT_PCS_CTRL_RGMII_DELAY_RXCLK BIT(15)
-#define PORT_PCS_CTRL_RGMII_DELAY_TXCLK BIT(14)
-#define PORT_PCS_CTRL_FORCE_SPEED BIT(13) /* 6390 */
-#define PORT_PCS_CTRL_ALTSPEED BIT(12) /* 6390 */
-#define PORT_PCS_CTRL_200BASE BIT(12) /* 6352 */
-#define PORT_PCS_CTRL_FC BIT(7)
-#define PORT_PCS_CTRL_FORCE_FC BIT(6)
-#define PORT_PCS_CTRL_LINK_UP BIT(5)
-#define PORT_PCS_CTRL_FORCE_LINK BIT(4)
-#define PORT_PCS_CTRL_DUPLEX_FULL BIT(3)
-#define PORT_PCS_CTRL_FORCE_DUPLEX BIT(2)
-#define PORT_PCS_CTRL_SPEED_MASK (0x03)
-#define PORT_PCS_CTRL_SPEED_10 (0x00)
-#define PORT_PCS_CTRL_SPEED_100 (0x01)
-#define PORT_PCS_CTRL_SPEED_200 (0x02) /* 6065 and non Gb chips */
-#define PORT_PCS_CTRL_SPEED_1000 (0x02)
-#define PORT_PCS_CTRL_SPEED_10000 (0x03) /* 6390X */
-#define PORT_PCS_CTRL_SPEED_UNFORCED (0x03)
-#define PORT_PAUSE_CTRL 0x02
-#define PORT_FLOW_CTRL_LIMIT_IN ((0x00 << 8) | BIT(15))
-#define PORT_FLOW_CTRL_LIMIT_OUT ((0x01 << 8) | BIT(15))
-#define PORT_SWITCH_ID 0x03
-#define PORT_SWITCH_ID_PROD_NUM_6085 0x04a
-#define PORT_SWITCH_ID_PROD_NUM_6095 0x095
-#define PORT_SWITCH_ID_PROD_NUM_6097 0x099
-#define PORT_SWITCH_ID_PROD_NUM_6131 0x106
-#define PORT_SWITCH_ID_PROD_NUM_6320 0x115
-#define PORT_SWITCH_ID_PROD_NUM_6123 0x121
-#define PORT_SWITCH_ID_PROD_NUM_6141 0x340
-#define PORT_SWITCH_ID_PROD_NUM_6161 0x161
-#define PORT_SWITCH_ID_PROD_NUM_6165 0x165
-#define PORT_SWITCH_ID_PROD_NUM_6171 0x171
-#define PORT_SWITCH_ID_PROD_NUM_6172 0x172
-#define PORT_SWITCH_ID_PROD_NUM_6175 0x175
-#define PORT_SWITCH_ID_PROD_NUM_6176 0x176
-#define PORT_SWITCH_ID_PROD_NUM_6185 0x1a7
-#define PORT_SWITCH_ID_PROD_NUM_6190 0x190
-#define PORT_SWITCH_ID_PROD_NUM_6190X 0x0a0
-#define PORT_SWITCH_ID_PROD_NUM_6191 0x191
-#define PORT_SWITCH_ID_PROD_NUM_6240 0x240
-#define PORT_SWITCH_ID_PROD_NUM_6290 0x290
-#define PORT_SWITCH_ID_PROD_NUM_6321 0x310
-#define PORT_SWITCH_ID_PROD_NUM_6341 0x341
-#define PORT_SWITCH_ID_PROD_NUM_6352 0x352
-#define PORT_SWITCH_ID_PROD_NUM_6350 0x371
-#define PORT_SWITCH_ID_PROD_NUM_6351 0x375
-#define PORT_SWITCH_ID_PROD_NUM_6390 0x390
-#define PORT_SWITCH_ID_PROD_NUM_6390X 0x0a1
-#define PORT_CONTROL 0x04
-#define PORT_CONTROL_USE_CORE_TAG BIT(15)
-#define PORT_CONTROL_DROP_ON_LOCK BIT(14)
-#define PORT_CONTROL_EGRESS_UNMODIFIED (0x0 << 12)
-#define PORT_CONTROL_EGRESS_UNTAGGED (0x1 << 12)
-#define PORT_CONTROL_EGRESS_TAGGED (0x2 << 12)
-#define PORT_CONTROL_EGRESS_ADD_TAG (0x3 << 12)
-#define PORT_CONTROL_EGRESS_MASK (0x3 << 12)
-#define PORT_CONTROL_HEADER BIT(11)
-#define PORT_CONTROL_IGMP_MLD_SNOOP BIT(10)
-#define PORT_CONTROL_DOUBLE_TAG BIT(9)
-#define PORT_CONTROL_FRAME_MODE_NORMAL (0x0 << 8)
-#define PORT_CONTROL_FRAME_MODE_DSA (0x1 << 8)
-#define PORT_CONTROL_FRAME_MODE_PROVIDER (0x2 << 8)
-#define PORT_CONTROL_FRAME_ETHER_TYPE_DSA (0x3 << 8)
-#define PORT_CONTROL_FRAME_MASK (0x3 << 8)
-#define PORT_CONTROL_DSA_TAG BIT(8)
-#define PORT_CONTROL_VLAN_TUNNEL BIT(7)
-#define PORT_CONTROL_TAG_IF_BOTH BIT(6)
-#define PORT_CONTROL_USE_IP BIT(5)
-#define PORT_CONTROL_USE_TAG BIT(4)
-#define PORT_CONTROL_FORWARD_UNKNOWN BIT(2)
-#define PORT_CONTROL_EGRESS_FLOODS_MASK (0x3 << 2)
-#define PORT_CONTROL_EGRESS_FLOODS_NO_UNKNOWN_DA (0x0 << 2)
-#define PORT_CONTROL_EGRESS_FLOODS_NO_UNKNOWN_MC_DA (0x1 << 2)
-#define PORT_CONTROL_EGRESS_FLOODS_NO_UNKNOWN_UC_DA (0x2 << 2)
-#define PORT_CONTROL_EGRESS_FLOODS_ALL_UNKNOWN_DA (0x3 << 2)
-#define PORT_CONTROL_STATE_MASK 0x03
-#define PORT_CONTROL_STATE_DISABLED 0x00
-#define PORT_CONTROL_STATE_BLOCKING 0x01
-#define PORT_CONTROL_STATE_LEARNING 0x02
-#define PORT_CONTROL_STATE_FORWARDING 0x03
-#define PORT_CONTROL_1 0x05
-#define PORT_CONTROL_1_MESSAGE_PORT BIT(15)
-#define PORT_CONTROL_1_FID_11_4_MASK (0xff << 0)
-#define PORT_BASE_VLAN 0x06
-#define PORT_BASE_VLAN_FID_3_0_MASK (0xf << 12)
-#define PORT_DEFAULT_VLAN 0x07
-#define PORT_DEFAULT_VLAN_MASK 0xfff
-#define PORT_CONTROL_2 0x08
-#define PORT_CONTROL_2_IGNORE_FCS BIT(15)
-#define PORT_CONTROL_2_VTU_PRI_OVERRIDE BIT(14)
-#define PORT_CONTROL_2_SA_PRIO_OVERRIDE BIT(13)
-#define PORT_CONTROL_2_DA_PRIO_OVERRIDE BIT(12)
-#define PORT_CONTROL_2_JUMBO_1522 (0x00 << 12)
-#define PORT_CONTROL_2_JUMBO_2048 (0x01 << 12)
-#define PORT_CONTROL_2_JUMBO_10240 (0x02 << 12)
-#define PORT_CONTROL_2_8021Q_MASK (0x03 << 10)
-#define PORT_CONTROL_2_8021Q_DISABLED (0x00 << 10)
-#define PORT_CONTROL_2_8021Q_FALLBACK (0x01 << 10)
-#define PORT_CONTROL_2_8021Q_CHECK (0x02 << 10)
-#define PORT_CONTROL_2_8021Q_SECURE (0x03 << 10)
-#define PORT_CONTROL_2_DISCARD_TAGGED BIT(9)
-#define PORT_CONTROL_2_DISCARD_UNTAGGED BIT(8)
-#define PORT_CONTROL_2_MAP_DA BIT(7)
-#define PORT_CONTROL_2_DEFAULT_FORWARD BIT(6)
-#define PORT_CONTROL_2_EGRESS_MONITOR BIT(5)
-#define PORT_CONTROL_2_INGRESS_MONITOR BIT(4)
-#define PORT_CONTROL_2_UPSTREAM_MASK 0x0f
-#define PORT_RATE_CONTROL 0x09
-#define PORT_RATE_CONTROL_2 0x0a
-#define PORT_ASSOC_VECTOR 0x0b
-#define PORT_ASSOC_VECTOR_HOLD_AT_1 BIT(15)
-#define PORT_ASSOC_VECTOR_INT_AGE_OUT BIT(14)
-#define PORT_ASSOC_VECTOR_LOCKED_PORT BIT(13)
-#define PORT_ASSOC_VECTOR_IGNORE_WRONG BIT(12)
-#define PORT_ASSOC_VECTOR_REFRESH_LOCKED BIT(11)
-#define PORT_ATU_CONTROL 0x0c
-#define PORT_PRI_OVERRIDE 0x0d
-#define PORT_ETH_TYPE 0x0f
-#define PORT_ETH_TYPE_DEFAULT 0x9100
-#define PORT_IN_DISCARD_LO 0x10
-#define PORT_IN_DISCARD_HI 0x11
-#define PORT_IN_FILTERED 0x12
-#define PORT_OUT_FILTERED 0x13
-#define PORT_TAG_REGMAP_0123 0x18
-#define PORT_TAG_REGMAP_4567 0x19
-#define PORT_IEEE_PRIO_MAP_TABLE 0x18 /* 6390 */
-#define PORT_IEEE_PRIO_MAP_TABLE_UPDATE BIT(15)
-#define PORT_IEEE_PRIO_MAP_TABLE_INGRESS_PCP (0x0 << 12)
-#define PORT_IEEE_PRIO_MAP_TABLE_EGRESS_GREEN_PCP (0x1 << 12)
-#define PORT_IEEE_PRIO_MAP_TABLE_EGRESS_YELLOW_PCP (0x2 << 12)
-#define PORT_IEEE_PRIO_MAP_TABLE_EGRESS_AVB_PCP (0x3 << 12)
-#define PORT_IEEE_PRIO_MAP_TABLE_EGRESS_GREEN_DSCP (0x5 << 12)
-#define PORT_IEEE_PRIO_MAP_TABLE_EGRESS_YELLOW_DSCP (0x6 << 12)
-#define PORT_IEEE_PRIO_MAP_TABLE_EGRESS_AVB_DSCP (0x7 << 12)
-#define PORT_IEEE_PRIO_MAP_TABLE_POINTER_SHIFT 9
-
-#define GLOBAL_STATUS 0x00
-#define GLOBAL_STATUS_PPU_STATE BIT(15) /* 6351 and 6171 */
-#define GLOBAL_STATUS_PPU_STATE_MASK (0x3 << 14) /* 6165 6185 */
-#define GLOBAL_STATUS_PPU_STATE_DISABLED_RST (0x0 << 14)
-#define GLOBAL_STATUS_PPU_STATE_INITIALIZING (0x1 << 14)
-#define GLOBAL_STATUS_PPU_STATE_DISABLED (0x2 << 14)
-#define GLOBAL_STATUS_PPU_STATE_POLLING (0x3 << 14)
-#define GLOBAL_STATUS_INIT_READY BIT(11)
-#define GLOBAL_STATUS_IRQ_AVB 8
-#define GLOBAL_STATUS_IRQ_DEVICE 7
-#define GLOBAL_STATUS_IRQ_STATS 6
-#define GLOBAL_STATUS_IRQ_VTU_PROBLEM 5
-#define GLOBAL_STATUS_IRQ_VTU_DONE 4
-#define GLOBAL_STATUS_IRQ_ATU_PROBLEM 3
-#define GLOBAL_STATUS_IRQ_ATU_DONE 2
-#define GLOBAL_STATUS_IRQ_TCAM_DONE 1
-#define GLOBAL_STATUS_IRQ_EEPROM_DONE 0
-#define GLOBAL_MAC_01 0x01
-#define GLOBAL_MAC_23 0x02
-#define GLOBAL_MAC_45 0x03
-#define GLOBAL_ATU_FID 0x01
-#define GLOBAL_VTU_FID 0x02
-#define GLOBAL_VTU_FID_MASK 0xfff
-#define GLOBAL_VTU_SID 0x03 /* 6097 6165 6351 6352 */
-#define GLOBAL_VTU_SID_MASK 0x3f
-#define GLOBAL_CONTROL 0x04
-#define GLOBAL_CONTROL_SW_RESET BIT(15)
-#define GLOBAL_CONTROL_PPU_ENABLE BIT(14)
-#define GLOBAL_CONTROL_DISCARD_EXCESS BIT(13) /* 6352 */
-#define GLOBAL_CONTROL_SCHED_PRIO BIT(11) /* 6152 */
-#define GLOBAL_CONTROL_MAX_FRAME_1632 BIT(10) /* 6152 */
-#define GLOBAL_CONTROL_RELOAD_EEPROM BIT(9) /* 6152 */
-#define GLOBAL_CONTROL_DEVICE_EN BIT(7)
-#define GLOBAL_CONTROL_STATS_DONE_EN BIT(6)
-#define GLOBAL_CONTROL_VTU_PROBLEM_EN BIT(5)
-#define GLOBAL_CONTROL_VTU_DONE_EN BIT(4)
-#define GLOBAL_CONTROL_ATU_PROBLEM_EN BIT(3)
-#define GLOBAL_CONTROL_ATU_DONE_EN BIT(2)
-#define GLOBAL_CONTROL_TCAM_EN BIT(1)
-#define GLOBAL_CONTROL_EEPROM_DONE_EN BIT(0)
-#define GLOBAL_VTU_OP 0x05
-#define GLOBAL_VTU_OP_BUSY BIT(15)
-#define GLOBAL_VTU_OP_FLUSH_ALL ((0x01 << 12) | GLOBAL_VTU_OP_BUSY)
-#define GLOBAL_VTU_OP_VTU_LOAD_PURGE ((0x03 << 12) | GLOBAL_VTU_OP_BUSY)
-#define GLOBAL_VTU_OP_VTU_GET_NEXT ((0x04 << 12) | GLOBAL_VTU_OP_BUSY)
-#define GLOBAL_VTU_OP_STU_LOAD_PURGE ((0x05 << 12) | GLOBAL_VTU_OP_BUSY)
-#define GLOBAL_VTU_OP_STU_GET_NEXT ((0x06 << 12) | GLOBAL_VTU_OP_BUSY)
-#define GLOBAL_VTU_VID 0x06
-#define GLOBAL_VTU_VID_MASK 0xfff
-#define GLOBAL_VTU_VID_PAGE BIT(13)
-#define GLOBAL_VTU_VID_VALID BIT(12)
-#define GLOBAL_VTU_DATA_0_3 0x07
-#define GLOBAL_VTU_DATA_4_7 0x08
-#define GLOBAL_VTU_DATA_8_11 0x09
-#define GLOBAL_VTU_STU_DATA_MASK 0x03
-#define GLOBAL_VTU_DATA_MEMBER_TAG_UNMODIFIED 0x00
-#define GLOBAL_VTU_DATA_MEMBER_TAG_UNTAGGED 0x01
-#define GLOBAL_VTU_DATA_MEMBER_TAG_TAGGED 0x02
-#define GLOBAL_VTU_DATA_MEMBER_TAG_NON_MEMBER 0x03
-#define GLOBAL_STU_DATA_PORT_STATE_DISABLED 0x00
-#define GLOBAL_STU_DATA_PORT_STATE_BLOCKING 0x01
-#define GLOBAL_STU_DATA_PORT_STATE_LEARNING 0x02
-#define GLOBAL_STU_DATA_PORT_STATE_FORWARDING 0x03
-#define GLOBAL_ATU_CONTROL 0x0a
-#define GLOBAL_ATU_CONTROL_LEARN2ALL BIT(3)
-#define GLOBAL_ATU_OP 0x0b
-#define GLOBAL_ATU_OP_BUSY BIT(15)
-#define GLOBAL_ATU_OP_NOP (0 << 12)
-#define GLOBAL_ATU_OP_FLUSH_MOVE_ALL ((1 << 12) | GLOBAL_ATU_OP_BUSY)
-#define GLOBAL_ATU_OP_FLUSH_MOVE_NON_STATIC ((2 << 12) | GLOBAL_ATU_OP_BUSY)
-#define GLOBAL_ATU_OP_LOAD_DB ((3 << 12) | GLOBAL_ATU_OP_BUSY)
-#define GLOBAL_ATU_OP_GET_NEXT_DB ((4 << 12) | GLOBAL_ATU_OP_BUSY)
-#define GLOBAL_ATU_OP_FLUSH_MOVE_ALL_DB ((5 << 12) | GLOBAL_ATU_OP_BUSY)
-#define GLOBAL_ATU_OP_FLUSH_MOVE_NON_STATIC_DB ((6 << 12) | GLOBAL_ATU_OP_BUSY)
-#define GLOBAL_ATU_OP_GET_CLR_VIOLATION ((7 << 12) | GLOBAL_ATU_OP_BUSY)
-#define GLOBAL_ATU_DATA 0x0c
-#define GLOBAL_ATU_DATA_TRUNK BIT(15)
-#define GLOBAL_ATU_DATA_TRUNK_ID_MASK 0x00f0
-#define GLOBAL_ATU_DATA_TRUNK_ID_SHIFT 4
-#define GLOBAL_ATU_DATA_PORT_VECTOR_MASK 0x3ff0
-#define GLOBAL_ATU_DATA_PORT_VECTOR_SHIFT 4
-#define GLOBAL_ATU_DATA_STATE_MASK 0x0f
-#define GLOBAL_ATU_DATA_STATE_UNUSED 0x00
-#define GLOBAL_ATU_DATA_STATE_UC_MGMT 0x0d
-#define GLOBAL_ATU_DATA_STATE_UC_STATIC 0x0e
-#define GLOBAL_ATU_DATA_STATE_UC_PRIO_OVER 0x0f
-#define GLOBAL_ATU_DATA_STATE_MC_NONE_RATE 0x05
-#define GLOBAL_ATU_DATA_STATE_MC_STATIC 0x07
-#define GLOBAL_ATU_DATA_STATE_MC_MGMT 0x0e
-#define GLOBAL_ATU_DATA_STATE_MC_PRIO_OVER 0x0f
-#define GLOBAL_ATU_MAC_01 0x0d
-#define GLOBAL_ATU_MAC_23 0x0e
-#define GLOBAL_ATU_MAC_45 0x0f
-#define GLOBAL_IP_PRI_0 0x10
-#define GLOBAL_IP_PRI_1 0x11
-#define GLOBAL_IP_PRI_2 0x12
-#define GLOBAL_IP_PRI_3 0x13
-#define GLOBAL_IP_PRI_4 0x14
-#define GLOBAL_IP_PRI_5 0x15
-#define GLOBAL_IP_PRI_6 0x16
-#define GLOBAL_IP_PRI_7 0x17
-#define GLOBAL_IEEE_PRI 0x18
-#define GLOBAL_CORE_TAG_TYPE 0x19
-#define GLOBAL_MONITOR_CONTROL 0x1a
-#define GLOBAL_MONITOR_CONTROL_INGRESS_SHIFT 12
-#define GLOBAL_MONITOR_CONTROL_INGRESS_MASK (0xf << 12)
-#define GLOBAL_MONITOR_CONTROL_EGRESS_SHIFT 8
-#define GLOBAL_MONITOR_CONTROL_EGRESS_MASK (0xf << 8)
-#define GLOBAL_MONITOR_CONTROL_ARP_SHIFT 4
-#define GLOBAL_MONITOR_CONTROL_ARP_MASK (0xf << 4)
-#define GLOBAL_MONITOR_CONTROL_MIRROR_SHIFT 0
-#define GLOBAL_MONITOR_CONTROL_ARP_DISABLED (0xf0)
-#define GLOBAL_MONITOR_CONTROL_UPDATE BIT(15)
-#define GLOBAL_MONITOR_CONTROL_0180C280000000XLO (0x00 << 8)
-#define GLOBAL_MONITOR_CONTROL_0180C280000000XHI (0x01 << 8)
-#define GLOBAL_MONITOR_CONTROL_0180C280000002XLO (0x02 << 8)
-#define GLOBAL_MONITOR_CONTROL_0180C280000002XHI (0x03 << 8)
-#define GLOBAL_MONITOR_CONTROL_INGRESS (0x20 << 8)
-#define GLOBAL_MONITOR_CONTROL_EGRESS (0x21 << 8)
-#define GLOBAL_MONITOR_CONTROL_CPU_DEST (0x30 << 8)
-#define GLOBAL_CONTROL_2 0x1c
-#define GLOBAL_CONTROL_2_NO_CASCADE 0xe000
-#define GLOBAL_CONTROL_2_MULTIPLE_CASCADE 0xf000
-#define GLOBAL_CONTROL_2_HIST_RX (0x1 << 6)
-#define GLOBAL_CONTROL_2_HIST_TX (0x2 << 6)
-#define GLOBAL_CONTROL_2_HIST_RX_TX (0x3 << 6)
-#define GLOBAL_STATS_OP 0x1d
-#define GLOBAL_STATS_OP_BUSY BIT(15)
-#define GLOBAL_STATS_OP_NOP (0 << 12)
-#define GLOBAL_STATS_OP_FLUSH_ALL ((1 << 12) | GLOBAL_STATS_OP_BUSY)
-#define GLOBAL_STATS_OP_FLUSH_PORT ((2 << 12) | GLOBAL_STATS_OP_BUSY)
-#define GLOBAL_STATS_OP_READ_CAPTURED ((4 << 12) | GLOBAL_STATS_OP_BUSY)
-#define GLOBAL_STATS_OP_CAPTURE_PORT ((5 << 12) | GLOBAL_STATS_OP_BUSY)
-#define GLOBAL_STATS_OP_HIST_RX ((1 << 10) | GLOBAL_STATS_OP_BUSY)
-#define GLOBAL_STATS_OP_HIST_TX ((2 << 10) | GLOBAL_STATS_OP_BUSY)
-#define GLOBAL_STATS_OP_HIST_RX_TX ((3 << 10) | GLOBAL_STATS_OP_BUSY)
-#define GLOBAL_STATS_OP_BANK_1_BIT_9 BIT(9)
-#define GLOBAL_STATS_OP_BANK_1_BIT_10 BIT(10)
-#define GLOBAL_STATS_COUNTER_32 0x1e
-#define GLOBAL_STATS_COUNTER_01 0x1f
-
-#define GLOBAL2_INT_SOURCE 0x00
-#define GLOBAL2_INT_SOURCE_WATCHDOG 15
-#define GLOBAL2_INT_MASK 0x01
-#define GLOBAL2_MGMT_EN_2X 0x02
-#define GLOBAL2_MGMT_EN_0X 0x03
-#define GLOBAL2_FLOW_CONTROL 0x04
-#define GLOBAL2_SWITCH_MGMT 0x05
-#define GLOBAL2_SWITCH_MGMT_USE_DOUBLE_TAG_DATA BIT(15)
-#define GLOBAL2_SWITCH_MGMT_PREVENT_LOOPS BIT(14)
-#define GLOBAL2_SWITCH_MGMT_FLOW_CONTROL_MSG BIT(13)
-#define GLOBAL2_SWITCH_MGMT_FORCE_FLOW_CTRL_PRI BIT(7)
-#define GLOBAL2_SWITCH_MGMT_RSVD2CPU BIT(3)
-#define GLOBAL2_DEVICE_MAPPING 0x06
-#define GLOBAL2_DEVICE_MAPPING_UPDATE BIT(15)
-#define GLOBAL2_DEVICE_MAPPING_TARGET_SHIFT 8
-#define GLOBAL2_DEVICE_MAPPING_PORT_MASK 0x0f
-#define GLOBAL2_TRUNK_MASK 0x07
-#define GLOBAL2_TRUNK_MASK_UPDATE BIT(15)
-#define GLOBAL2_TRUNK_MASK_NUM_SHIFT 12
-#define GLOBAL2_TRUNK_MASK_HASK BIT(11)
-#define GLOBAL2_TRUNK_MAPPING 0x08
-#define GLOBAL2_TRUNK_MAPPING_UPDATE BIT(15)
-#define GLOBAL2_TRUNK_MAPPING_ID_SHIFT 11
-#define GLOBAL2_IRL_CMD 0x09
-#define GLOBAL2_IRL_CMD_BUSY BIT(15)
-#define GLOBAL2_IRL_CMD_OP_INIT_ALL ((0x001 << 12) | GLOBAL2_IRL_CMD_BUSY)
-#define GLOBAL2_IRL_CMD_OP_INIT_SEL ((0x010 << 12) | GLOBAL2_IRL_CMD_BUSY)
-#define GLOBAL2_IRL_CMD_OP_WRITE_SEL ((0x011 << 12) | GLOBAL2_IRL_CMD_BUSY)
-#define GLOBAL2_IRL_CMD_OP_READ_SEL ((0x100 << 12) | GLOBAL2_IRL_CMD_BUSY)
-#define GLOBAL2_IRL_DATA 0x0a
-#define GLOBAL2_PVT_ADDR 0x0b
-#define GLOBAL2_PVT_ADDR_BUSY BIT(15)
-#define GLOBAL2_PVT_ADDR_OP_INIT_ONES ((0x01 << 12) | GLOBAL2_PVT_ADDR_BUSY)
-#define GLOBAL2_PVT_ADDR_OP_WRITE_PVLAN ((0x03 << 12) | GLOBAL2_PVT_ADDR_BUSY)
-#define GLOBAL2_PVT_ADDR_OP_READ ((0x04 << 12) | GLOBAL2_PVT_ADDR_BUSY)
-#define GLOBAL2_PVT_DATA 0x0c
-#define GLOBAL2_SWITCH_MAC 0x0d
-#define GLOBAL2_ATU_STATS 0x0e
-#define GLOBAL2_PRIO_OVERRIDE 0x0f
-#define GLOBAL2_PRIO_OVERRIDE_FORCE_SNOOP BIT(7)
-#define GLOBAL2_PRIO_OVERRIDE_SNOOP_SHIFT 4
-#define GLOBAL2_PRIO_OVERRIDE_FORCE_ARP BIT(3)
-#define GLOBAL2_PRIO_OVERRIDE_ARP_SHIFT 0
-#define GLOBAL2_EEPROM_CMD 0x14
-#define GLOBAL2_EEPROM_CMD_BUSY BIT(15)
-#define GLOBAL2_EEPROM_CMD_OP_WRITE ((0x3 << 12) | GLOBAL2_EEPROM_CMD_BUSY)
-#define GLOBAL2_EEPROM_CMD_OP_READ ((0x4 << 12) | GLOBAL2_EEPROM_CMD_BUSY)
-#define GLOBAL2_EEPROM_CMD_OP_LOAD ((0x6 << 12) | GLOBAL2_EEPROM_CMD_BUSY)
-#define GLOBAL2_EEPROM_CMD_RUNNING BIT(11)
-#define GLOBAL2_EEPROM_CMD_WRITE_EN BIT(10)
-#define GLOBAL2_EEPROM_CMD_ADDR_MASK 0xff
-#define GLOBAL2_EEPROM_DATA 0x15
-#define GLOBAL2_EEPROM_ADDR 0x15 /* 6390, 6341 */
-#define GLOBAL2_PTP_AVB_OP 0x16
-#define GLOBAL2_PTP_AVB_DATA 0x17
-#define GLOBAL2_SMI_PHY_CMD 0x18
-#define GLOBAL2_SMI_PHY_CMD_BUSY BIT(15)
-#define GLOBAL2_SMI_PHY_CMD_EXTERNAL BIT(13)
-#define GLOBAL2_SMI_PHY_CMD_MODE_22 BIT(12)
-#define GLOBAL2_SMI_PHY_CMD_OP_22_WRITE_DATA ((0x1 << 10) | \
- GLOBAL2_SMI_PHY_CMD_MODE_22 | \
- GLOBAL2_SMI_PHY_CMD_BUSY)
-#define GLOBAL2_SMI_PHY_CMD_OP_22_READ_DATA ((0x2 << 10) | \
- GLOBAL2_SMI_PHY_CMD_MODE_22 | \
- GLOBAL2_SMI_PHY_CMD_BUSY)
-#define GLOBAL2_SMI_PHY_CMD_OP_45_WRITE_ADDR ((0x0 << 10) | \
- GLOBAL2_SMI_PHY_CMD_BUSY)
-#define GLOBAL2_SMI_PHY_CMD_OP_45_WRITE_DATA ((0x1 << 10) | \
- GLOBAL2_SMI_PHY_CMD_BUSY)
-#define GLOBAL2_SMI_PHY_CMD_OP_45_READ_DATA ((0x3 << 10) | \
- GLOBAL2_SMI_PHY_CMD_BUSY)
-
-#define GLOBAL2_SMI_PHY_DATA 0x19
-#define GLOBAL2_SCRATCH_MISC 0x1a
-#define GLOBAL2_SCRATCH_BUSY BIT(15)
-#define GLOBAL2_SCRATCH_REGISTER_SHIFT 8
-#define GLOBAL2_SCRATCH_VALUE_MASK 0xff
-#define GLOBAL2_WDOG_CONTROL 0x1b
-#define GLOBAL2_WDOG_CONTROL_EGRESS_EVENT BIT(7)
-#define GLOBAL2_WDOG_CONTROL_RMU_TIMEOUT BIT(6)
-#define GLOBAL2_WDOG_CONTROL_QC_ENABLE BIT(5)
-#define GLOBAL2_WDOG_CONTROL_EGRESS_HISTORY BIT(4)
-#define GLOBAL2_WDOG_CONTROL_EGRESS_ENABLE BIT(3)
-#define GLOBAL2_WDOG_CONTROL_FORCE_IRQ BIT(2)
-#define GLOBAL2_WDOG_CONTROL_HISTORY BIT(1)
-#define GLOBAL2_WDOG_CONTROL_SWRESET BIT(0)
-#define GLOBAL2_WDOG_UPDATE BIT(15)
-#define GLOBAL2_WDOG_INT_SOURCE (0x00 << 8)
-#define GLOBAL2_WDOG_INT_STATUS (0x10 << 8)
-#define GLOBAL2_WDOG_INT_ENABLE (0x11 << 8)
-#define GLOBAL2_WDOG_EVENT (0x12 << 8)
-#define GLOBAL2_WDOG_HISTORY (0x13 << 8)
-#define GLOBAL2_WDOG_DATA_MASK 0xff
-#define GLOBAL2_WDOG_CUT_THROUGH BIT(3)
-#define GLOBAL2_WDOG_QUEUE_CONTROLLER BIT(2)
-#define GLOBAL2_WDOG_EGRESS BIT(1)
-#define GLOBAL2_WDOG_FORCE_IRQ BIT(0)
-#define GLOBAL2_QOS_WEIGHT 0x1c
-#define GLOBAL2_MISC 0x1d
-#define GLOBAL2_MISC_5_BIT_PORT BIT(14)
-
-#define MV88E6XXX_N_FID 4096
-
-/* PVT limits for 4-bit port and 5-bit switch */
-#define MV88E6XXX_MAX_PVT_SWITCHES 32
-#define MV88E6XXX_MAX_PVT_PORTS 16
-
-enum mv88e6xxx_frame_mode {
- MV88E6XXX_FRAME_MODE_NORMAL,
- MV88E6XXX_FRAME_MODE_DSA,
- MV88E6XXX_FRAME_MODE_PROVIDER,
- MV88E6XXX_FRAME_MODE_ETHERTYPE,
-};
-
-/* List of supported models */
-enum mv88e6xxx_model {
- MV88E6085,
- MV88E6095,
- MV88E6097,
- MV88E6123,
- MV88E6131,
- MV88E6141,
- MV88E6161,
- MV88E6165,
- MV88E6171,
- MV88E6172,
- MV88E6175,
- MV88E6176,
- MV88E6185,
- MV88E6190,
- MV88E6190X,
- MV88E6191,
- MV88E6240,
- MV88E6290,
- MV88E6320,
- MV88E6321,
- MV88E6341,
- MV88E6350,
- MV88E6351,
- MV88E6352,
- MV88E6390,
- MV88E6390X,
-};
-
-enum mv88e6xxx_family {
- MV88E6XXX_FAMILY_NONE,
- MV88E6XXX_FAMILY_6065, /* 6031 6035 6061 6065 */
- MV88E6XXX_FAMILY_6095, /* 6092 6095 */
- MV88E6XXX_FAMILY_6097, /* 6046 6085 6096 6097 */
- MV88E6XXX_FAMILY_6165, /* 6123 6161 6165 */
- MV88E6XXX_FAMILY_6185, /* 6108 6121 6122 6131 6152 6155 6182 6185 */
- MV88E6XXX_FAMILY_6320, /* 6320 6321 */
- MV88E6XXX_FAMILY_6341, /* 6141 6341 */
- MV88E6XXX_FAMILY_6351, /* 6171 6175 6350 6351 */
- MV88E6XXX_FAMILY_6352, /* 6172 6176 6240 6352 */
- MV88E6XXX_FAMILY_6390, /* 6190 6190X 6191 6290 6390 6390X */
-};
-
-enum mv88e6xxx_cap {
- /* Energy Efficient Ethernet.
- */
- MV88E6XXX_CAP_EEE,
-
- /* Multi-chip Addressing Mode.
- * Some chips respond to only 2 registers of its own SMI device address
- * when it is non-zero, and use indirect access to internal registers.
- */
- MV88E6XXX_CAP_SMI_CMD, /* (0x00) SMI Command */
- MV88E6XXX_CAP_SMI_DATA, /* (0x01) SMI Data */
-
- /* PHY Registers.
- */
- MV88E6XXX_CAP_PHY_PAGE, /* (0x16) Page Register */
-
- /* Fiber/SERDES Registers (SMI address F).
- */
- MV88E6XXX_CAP_SERDES,
-
- /* Switch Global (1) Registers.
- */
- MV88E6XXX_CAP_G1_ATU_FID, /* (0x01) ATU FID Register */
- MV88E6XXX_CAP_G1_VTU_FID, /* (0x02) VTU FID Register */
-
- /* Switch Global 2 Registers.
- * The device contains a second set of global 16-bit registers.
- */
- MV88E6XXX_CAP_GLOBAL2,
- MV88E6XXX_CAP_G2_INT, /* (0x00) Interrupt Status */
- MV88E6XXX_CAP_G2_MGMT_EN_2X, /* (0x02) MGMT Enable Register 2x */
- MV88E6XXX_CAP_G2_MGMT_EN_0X, /* (0x03) MGMT Enable Register 0x */
- MV88E6XXX_CAP_G2_IRL_CMD, /* (0x09) Ingress Rate Command */
- MV88E6XXX_CAP_G2_IRL_DATA, /* (0x0a) Ingress Rate Data */
- MV88E6XXX_CAP_G2_POT, /* (0x0f) Priority Override Table */
-
- /* Per VLAN Spanning Tree Unit (STU).
- * The Port State database, if present, is accessed through VTU
- * operations and dedicated SID registers. See GLOBAL_VTU_SID.
- */
- MV88E6XXX_CAP_STU,
-
- /* VLAN Table Unit.
- * The VTU is used to program 802.1Q VLANs. See GLOBAL_VTU_OP.
- */
- MV88E6XXX_CAP_VTU,
-};
-
-/* Bitmask of capabilities */
-#define MV88E6XXX_FLAG_EEE BIT_ULL(MV88E6XXX_CAP_EEE)
-
-#define MV88E6XXX_FLAG_SMI_CMD BIT_ULL(MV88E6XXX_CAP_SMI_CMD)
-#define MV88E6XXX_FLAG_SMI_DATA BIT_ULL(MV88E6XXX_CAP_SMI_DATA)
-
-#define MV88E6XXX_FLAG_PHY_PAGE BIT_ULL(MV88E6XXX_CAP_PHY_PAGE)
-
-#define MV88E6XXX_FLAG_SERDES BIT_ULL(MV88E6XXX_CAP_SERDES)
-
-#define MV88E6XXX_FLAG_G1_VTU_FID BIT_ULL(MV88E6XXX_CAP_G1_VTU_FID)
-
-#define MV88E6XXX_FLAG_GLOBAL2 BIT_ULL(MV88E6XXX_CAP_GLOBAL2)
-#define MV88E6XXX_FLAG_G2_INT BIT_ULL(MV88E6XXX_CAP_G2_INT)
-#define MV88E6XXX_FLAG_G2_MGMT_EN_2X BIT_ULL(MV88E6XXX_CAP_G2_MGMT_EN_2X)
-#define MV88E6XXX_FLAG_G2_MGMT_EN_0X BIT_ULL(MV88E6XXX_CAP_G2_MGMT_EN_0X)
-#define MV88E6XXX_FLAG_G2_IRL_CMD BIT_ULL(MV88E6XXX_CAP_G2_IRL_CMD)
-#define MV88E6XXX_FLAG_G2_IRL_DATA BIT_ULL(MV88E6XXX_CAP_G2_IRL_DATA)
-#define MV88E6XXX_FLAG_G2_POT BIT_ULL(MV88E6XXX_CAP_G2_POT)
-
-/* Ingress Rate Limit unit */
-#define MV88E6XXX_FLAGS_IRL \
- (MV88E6XXX_FLAG_G2_IRL_CMD | \
- MV88E6XXX_FLAG_G2_IRL_DATA)
-
-/* Multi-chip Addressing Mode */
-#define MV88E6XXX_FLAGS_MULTI_CHIP \
- (MV88E6XXX_FLAG_SMI_CMD | \
- MV88E6XXX_FLAG_SMI_DATA)
-
-/* Fiber/SERDES Registers at SMI address F, page 1 */
-#define MV88E6XXX_FLAGS_SERDES \
- (MV88E6XXX_FLAG_PHY_PAGE | \
- MV88E6XXX_FLAG_SERDES)
-
-#define MV88E6XXX_FLAGS_FAMILY_6095 \
- (MV88E6XXX_FLAG_GLOBAL2 | \
- MV88E6XXX_FLAG_G2_MGMT_EN_0X | \
- MV88E6XXX_FLAGS_MULTI_CHIP)
-
-#define MV88E6XXX_FLAGS_FAMILY_6097 \
- (MV88E6XXX_FLAG_G1_VTU_FID | \
- MV88E6XXX_FLAG_GLOBAL2 | \
- MV88E6XXX_FLAG_G2_INT | \
- MV88E6XXX_FLAG_G2_MGMT_EN_2X | \
- MV88E6XXX_FLAG_G2_MGMT_EN_0X | \
- MV88E6XXX_FLAG_G2_POT | \
- MV88E6XXX_FLAGS_IRL | \
- MV88E6XXX_FLAGS_MULTI_CHIP)
-
-#define MV88E6XXX_FLAGS_FAMILY_6165 \
- (MV88E6XXX_FLAG_G1_VTU_FID | \
- MV88E6XXX_FLAG_GLOBAL2 | \
- MV88E6XXX_FLAG_G2_INT | \
- MV88E6XXX_FLAG_G2_MGMT_EN_2X | \
- MV88E6XXX_FLAG_G2_MGMT_EN_0X | \
- MV88E6XXX_FLAG_G2_POT | \
- MV88E6XXX_FLAGS_IRL | \
- MV88E6XXX_FLAGS_MULTI_CHIP)
-
-#define MV88E6XXX_FLAGS_FAMILY_6185 \
- (MV88E6XXX_FLAG_GLOBAL2 | \
- MV88E6XXX_FLAG_G2_INT | \
- MV88E6XXX_FLAG_G2_MGMT_EN_0X | \
- MV88E6XXX_FLAGS_MULTI_CHIP)
-
-#define MV88E6XXX_FLAGS_FAMILY_6320 \
- (MV88E6XXX_FLAG_EEE | \
- MV88E6XXX_FLAG_GLOBAL2 | \
- MV88E6XXX_FLAG_G2_MGMT_EN_2X | \
- MV88E6XXX_FLAG_G2_MGMT_EN_0X | \
- MV88E6XXX_FLAG_G2_POT | \
- MV88E6XXX_FLAGS_IRL | \
- MV88E6XXX_FLAGS_MULTI_CHIP)
-
-#define MV88E6XXX_FLAGS_FAMILY_6341 \
- (MV88E6XXX_FLAG_EEE | \
- MV88E6XXX_FLAG_G1_VTU_FID | \
- MV88E6XXX_FLAG_GLOBAL2 | \
- MV88E6XXX_FLAG_G2_INT | \
- MV88E6XXX_FLAG_G2_POT | \
- MV88E6XXX_FLAGS_IRL | \
- MV88E6XXX_FLAGS_MULTI_CHIP | \
- MV88E6XXX_FLAGS_SERDES)
-
-#define MV88E6XXX_FLAGS_FAMILY_6351 \
- (MV88E6XXX_FLAG_G1_VTU_FID | \
- MV88E6XXX_FLAG_GLOBAL2 | \
- MV88E6XXX_FLAG_G2_INT | \
- MV88E6XXX_FLAG_G2_MGMT_EN_2X | \
- MV88E6XXX_FLAG_G2_MGMT_EN_0X | \
- MV88E6XXX_FLAG_G2_POT | \
- MV88E6XXX_FLAGS_IRL | \
- MV88E6XXX_FLAGS_MULTI_CHIP)
-
-#define MV88E6XXX_FLAGS_FAMILY_6352 \
- (MV88E6XXX_FLAG_EEE | \
- MV88E6XXX_FLAG_G1_VTU_FID | \
- MV88E6XXX_FLAG_GLOBAL2 | \
- MV88E6XXX_FLAG_G2_INT | \
- MV88E6XXX_FLAG_G2_MGMT_EN_2X | \
- MV88E6XXX_FLAG_G2_MGMT_EN_0X | \
- MV88E6XXX_FLAG_G2_POT | \
- MV88E6XXX_FLAGS_IRL | \
- MV88E6XXX_FLAGS_MULTI_CHIP | \
- MV88E6XXX_FLAGS_SERDES)
-
-#define MV88E6XXX_FLAGS_FAMILY_6390 \
- (MV88E6XXX_FLAG_EEE | \
- MV88E6XXX_FLAG_GLOBAL2 | \
- MV88E6XXX_FLAG_G2_INT | \
- MV88E6XXX_FLAGS_IRL | \
- MV88E6XXX_FLAGS_MULTI_CHIP)
-
-struct mv88e6xxx_ops;
-
-struct mv88e6xxx_info {
- enum mv88e6xxx_family family;
- u16 prod_num;
- const char *name;
- unsigned int num_databases;
- unsigned int num_ports;
- unsigned int max_vid;
- unsigned int port_base_addr;
- unsigned int global1_addr;
- unsigned int age_time_coeff;
- unsigned int g1_irqs;
- bool pvt;
- enum dsa_tag_protocol tag_protocol;
- unsigned long long flags;
-
- /* Mask for FromPort and ToPort value of PortVec used in ATU Move
- * operation. 0 means that the ATU Move operation is not supported.
- */
- u8 atu_move_port_mask;
- const struct mv88e6xxx_ops *ops;
-};
-
-struct mv88e6xxx_atu_entry {
- u8 state;
- bool trunk;
- u16 portvec;
- u8 mac[ETH_ALEN];
-};
-
-struct mv88e6xxx_vtu_entry {
- u16 vid;
- u16 fid;
- u8 sid;
- bool valid;
- u8 member[DSA_MAX_PORTS];
- u8 state[DSA_MAX_PORTS];
-};
-
-struct mv88e6xxx_bus_ops;
-struct mv88e6xxx_irq_ops;
-
-struct mv88e6xxx_irq {
- u16 masked;
- struct irq_chip chip;
- struct irq_domain *domain;
- unsigned int nirqs;
-};
-
-struct mv88e6xxx_chip {
- const struct mv88e6xxx_info *info;
-
- /* The dsa_switch this private structure is related to */
- struct dsa_switch *ds;
-
- /* The device this structure is associated to */
- struct device *dev;
-
- /* This mutex protects the access to the switch registers */
- struct mutex reg_lock;
-
- /* The MII bus and the address on the bus that is used to
- * communication with the switch
- */
- const struct mv88e6xxx_bus_ops *smi_ops;
- struct mii_bus *bus;
- int sw_addr;
-
- /* Handles automatic disabling and re-enabling of the PHY
- * polling unit.
- */
- const struct mv88e6xxx_bus_ops *phy_ops;
- struct mutex ppu_mutex;
- int ppu_disabled;
- struct work_struct ppu_work;
- struct timer_list ppu_timer;
-
- /* This mutex serialises access to the statistics unit.
- * Hold this mutex over snapshot + dump sequences.
- */
- struct mutex stats_mutex;
-
- /* A switch may have a GPIO line tied to its reset pin. Parse
- * this from the device tree, and use it before performing
- * switch soft reset.
- */
- struct gpio_desc *reset;
-
- /* set to size of eeprom if supported by the switch */
- int eeprom_len;
-
- /* List of mdio busses */
- struct list_head mdios;
-
- /* There can be two interrupt controllers, which are chained
- * off a GPIO as interrupt source
- */
- struct mv88e6xxx_irq g1_irq;
- struct mv88e6xxx_irq g2_irq;
- int irq;
- int device_irq;
- int watchdog_irq;
-};
-
-struct mv88e6xxx_bus_ops {
- int (*read)(struct mv88e6xxx_chip *chip, int addr, int reg, u16 *val);
- int (*write)(struct mv88e6xxx_chip *chip, int addr, int reg, u16 val);
-};
-
-struct mv88e6xxx_mdio_bus {
- struct mii_bus *bus;
- struct mv88e6xxx_chip *chip;
- struct list_head list;
- bool external;
-};
-
-struct mv88e6xxx_ops {
- int (*get_eeprom)(struct mv88e6xxx_chip *chip,
- struct ethtool_eeprom *eeprom, u8 *data);
- int (*set_eeprom)(struct mv88e6xxx_chip *chip,
- struct ethtool_eeprom *eeprom, u8 *data);
-
- int (*set_switch_mac)(struct mv88e6xxx_chip *chip, u8 *addr);
-
- int (*phy_read)(struct mv88e6xxx_chip *chip,
- struct mii_bus *bus,
- int addr, int reg, u16 *val);
- int (*phy_write)(struct mv88e6xxx_chip *chip,
- struct mii_bus *bus,
- int addr, int reg, u16 val);
-
- /* PHY Polling Unit (PPU) operations */
- int (*ppu_enable)(struct mv88e6xxx_chip *chip);
- int (*ppu_disable)(struct mv88e6xxx_chip *chip);
-
- /* Switch Software Reset */
- int (*reset)(struct mv88e6xxx_chip *chip);
-
- /* RGMII Receive/Transmit Timing Control
- * Add delay on PHY_INTERFACE_MODE_RGMII_*ID, no delay otherwise.
- */
- int (*port_set_rgmii_delay)(struct mv88e6xxx_chip *chip, int port,
- phy_interface_t mode);
-
-#define LINK_FORCED_DOWN 0
-#define LINK_FORCED_UP 1
-#define LINK_UNFORCED -2
-
- /* Port's MAC link state
- * Use LINK_FORCED_UP or LINK_FORCED_DOWN to force link up or down,
- * or LINK_UNFORCED for normal link detection.
- */
- int (*port_set_link)(struct mv88e6xxx_chip *chip, int port, int link);
-
-#define DUPLEX_UNFORCED -2
-
- /* Port's MAC duplex mode
- *
- * Use DUPLEX_HALF or DUPLEX_FULL to force half or full duplex,
- * or DUPLEX_UNFORCED for normal duplex detection.
- */
- int (*port_set_duplex)(struct mv88e6xxx_chip *chip, int port, int dup);
-
-#define SPEED_MAX INT_MAX
-#define SPEED_UNFORCED -2
-
- /* Port's MAC speed (in Mbps)
- *
- * Depending on the chip, 10, 100, 200, 1000, 2500, 10000 are valid.
- * Use SPEED_UNFORCED for normal detection, SPEED_MAX for max value.
- */
- int (*port_set_speed)(struct mv88e6xxx_chip *chip, int port, int speed);
-
- int (*port_tag_remap)(struct mv88e6xxx_chip *chip, int port);
-
- int (*port_set_frame_mode)(struct mv88e6xxx_chip *chip, int port,
- enum mv88e6xxx_frame_mode mode);
- int (*port_set_egress_floods)(struct mv88e6xxx_chip *chip, int port,
- bool unicast, bool multicast);
- int (*port_set_ether_type)(struct mv88e6xxx_chip *chip, int port,
- u16 etype);
- int (*port_jumbo_config)(struct mv88e6xxx_chip *chip, int port);
-
- int (*port_egress_rate_limiting)(struct mv88e6xxx_chip *chip, int port);
- int (*port_pause_config)(struct mv88e6xxx_chip *chip, int port);
- int (*port_disable_learn_limit)(struct mv88e6xxx_chip *chip, int port);
- int (*port_disable_pri_override)(struct mv88e6xxx_chip *chip, int port);
-
- /* CMODE control what PHY mode the MAC will use, eg. SGMII, RGMII, etc.
- * Some chips allow this to be configured on specific ports.
- */
- int (*port_set_cmode)(struct mv88e6xxx_chip *chip, int port,
- phy_interface_t mode);
-
- /* Some devices have a per port register indicating what is
- * the upstream port this port should forward to.
- */
- int (*port_set_upstream_port)(struct mv88e6xxx_chip *chip, int port,
- int upstream_port);
-
- /* Snapshot the statistics for a port. The statistics can then
- * be read back a leisure but still with a consistent view.
- */
- int (*stats_snapshot)(struct mv88e6xxx_chip *chip, int port);
-
- /* Set the histogram mode for statistics, when the control registers
- * are separated out of the STATS_OP register.
- */
- int (*stats_set_histogram)(struct mv88e6xxx_chip *chip);
-
- /* Return the number of strings describing statistics */
- int (*stats_get_sset_count)(struct mv88e6xxx_chip *chip);
- void (*stats_get_strings)(struct mv88e6xxx_chip *chip, uint8_t *data);
- void (*stats_get_stats)(struct mv88e6xxx_chip *chip, int port,
- uint64_t *data);
- int (*g1_set_cpu_port)(struct mv88e6xxx_chip *chip, int port);
- int (*g1_set_egress_port)(struct mv88e6xxx_chip *chip, int port);
- const struct mv88e6xxx_irq_ops *watchdog_ops;
-
- /* Can be either in g1 or g2, so don't use a prefix */
- int (*mgmt_rsvd2cpu)(struct mv88e6xxx_chip *chip);
-
- /* VLAN Translation Unit operations */
- int (*vtu_getnext)(struct mv88e6xxx_chip *chip,
- struct mv88e6xxx_vtu_entry *entry);
- int (*vtu_loadpurge)(struct mv88e6xxx_chip *chip,
- struct mv88e6xxx_vtu_entry *entry);
-};
-
-struct mv88e6xxx_irq_ops {
- /* Action to be performed when the interrupt happens */
- int (*irq_action)(struct mv88e6xxx_chip *chip, int irq);
- /* Setup the hardware to generate the interrupt */
- int (*irq_setup)(struct mv88e6xxx_chip *chip);
- /* Reset the hardware to stop generating the interrupt */
- void (*irq_free)(struct mv88e6xxx_chip *chip);
-};
-
-#define STATS_TYPE_PORT BIT(0)
-#define STATS_TYPE_BANK0 BIT(1)
-#define STATS_TYPE_BANK1 BIT(2)
-
-struct mv88e6xxx_hw_stat {
- char string[ETH_GSTRING_LEN];
- int sizeof_stat;
- int reg;
- int type;
-};
-
-static inline bool mv88e6xxx_has(struct mv88e6xxx_chip *chip,
- unsigned long flags)
-{
- return (chip->info->flags & flags) == flags;
-}
-
-static inline bool mv88e6xxx_has_pvt(struct mv88e6xxx_chip *chip)
-{
- return chip->info->pvt;
-}
-
-static inline unsigned int mv88e6xxx_num_databases(struct mv88e6xxx_chip *chip)
-{
- return chip->info->num_databases;
-}
-
-static inline unsigned int mv88e6xxx_num_ports(struct mv88e6xxx_chip *chip)
-{
- return chip->info->num_ports;
-}
-
-static inline u16 mv88e6xxx_port_mask(struct mv88e6xxx_chip *chip)
-{
- return GENMASK(mv88e6xxx_num_ports(chip) - 1, 0);
-}
-
-int mv88e6xxx_read(struct mv88e6xxx_chip *chip, int addr, int reg, u16 *val);
-int mv88e6xxx_write(struct mv88e6xxx_chip *chip, int addr, int reg, u16 val);
-int mv88e6xxx_update(struct mv88e6xxx_chip *chip, int addr, int reg,
- u16 update);
-int mv88e6xxx_wait(struct mv88e6xxx_chip *chip, int addr, int reg, u16 mask);
-
-#endif
diff --git a/drivers/net/dsa/mv88e6xxx/phy.c b/drivers/net/dsa/mv88e6xxx/phy.c
new file mode 100644
index 0000000..3500ac0
--- /dev/null
+++ b/drivers/net/dsa/mv88e6xxx/phy.c
@@ -0,0 +1,249 @@
+/*
+ * Marvell 88e6xxx Ethernet switch PHY and PPU support
+ *
+ * Copyright (c) 2008 Marvell Semiconductor
+ *
+ * Copyright (c) 2017 Andrew Lunn <andrew@lunn.ch>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/mdio.h>
+#include <linux/module.h>
+#include <net/dsa.h>
+
+#include "chip.h"
+#include "phy.h"
+
+int mv88e6165_phy_read(struct mv88e6xxx_chip *chip, struct mii_bus *bus,
+ int addr, int reg, u16 *val)
+{
+ return mv88e6xxx_read(chip, addr, reg, val);
+}
+
+int mv88e6165_phy_write(struct mv88e6xxx_chip *chip, struct mii_bus *bus,
+ int addr, int reg, u16 val)
+{
+ return mv88e6xxx_write(chip, addr, reg, val);
+}
+
+int mv88e6xxx_phy_read(struct mv88e6xxx_chip *chip, int phy, int reg, u16 *val)
+{
+ int addr = phy; /* PHY devices addresses start at 0x0 */
+ struct mii_bus *bus;
+
+ bus = mv88e6xxx_default_mdio_bus(chip);
+ if (!bus)
+ return -EOPNOTSUPP;
+
+ if (!chip->info->ops->phy_read)
+ return -EOPNOTSUPP;
+
+ return chip->info->ops->phy_read(chip, bus, addr, reg, val);
+}
+
+int mv88e6xxx_phy_write(struct mv88e6xxx_chip *chip, int phy, int reg, u16 val)
+{
+ int addr = phy; /* PHY devices addresses start at 0x0 */
+ struct mii_bus *bus;
+
+ bus = mv88e6xxx_default_mdio_bus(chip);
+ if (!bus)
+ return -EOPNOTSUPP;
+
+ if (!chip->info->ops->phy_write)
+ return -EOPNOTSUPP;
+
+ return chip->info->ops->phy_write(chip, bus, addr, reg, val);
+}
+
+static int mv88e6xxx_phy_page_get(struct mv88e6xxx_chip *chip, int phy, u8 page)
+{
+ return mv88e6xxx_phy_write(chip, phy, MV88E6XXX_PHY_PAGE, page);
+}
+
+static void mv88e6xxx_phy_page_put(struct mv88e6xxx_chip *chip, int phy)
+{
+ int err;
+
+ /* Restore PHY page Copper 0x0 for access via the registered
+ * MDIO bus
+ */
+ err = mv88e6xxx_phy_write(chip, phy, MV88E6XXX_PHY_PAGE,
+ MV88E6XXX_PHY_PAGE_COPPER);
+ if (unlikely(err)) {
+ dev_err(chip->dev,
+ "failed to restore PHY %d page Copper (%d)\n",
+ phy, err);
+ }
+}
+
+int mv88e6xxx_phy_page_read(struct mv88e6xxx_chip *chip, int phy,
+ u8 page, int reg, u16 *val)
+{
+ int err;
+
+ /* There is no paging for registers 22 */
+ if (reg == MV88E6XXX_PHY_PAGE)
+ return -EINVAL;
+
+ err = mv88e6xxx_phy_page_get(chip, phy, page);
+ if (!err) {
+ err = mv88e6xxx_phy_read(chip, phy, reg, val);
+ mv88e6xxx_phy_page_put(chip, phy);
+ }
+
+ return err;
+}
+
+int mv88e6xxx_phy_page_write(struct mv88e6xxx_chip *chip, int phy,
+ u8 page, int reg, u16 val)
+{
+ int err;
+
+ /* There is no paging for registers 22 */
+ if (reg == MV88E6XXX_PHY_PAGE)
+ return -EINVAL;
+
+ err = mv88e6xxx_phy_page_get(chip, phy, page);
+ if (!err) {
+ err = mv88e6xxx_phy_write(chip, phy, MV88E6XXX_PHY_PAGE, page);
+ mv88e6xxx_phy_page_put(chip, phy);
+ }
+
+ return err;
+}
+
+static int mv88e6xxx_phy_ppu_disable(struct mv88e6xxx_chip *chip)
+{
+ if (!chip->info->ops->ppu_disable)
+ return 0;
+
+ return chip->info->ops->ppu_disable(chip);
+}
+
+static int mv88e6xxx_phy_ppu_enable(struct mv88e6xxx_chip *chip)
+{
+ if (!chip->info->ops->ppu_enable)
+ return 0;
+
+ return chip->info->ops->ppu_enable(chip);
+}
+
+static void mv88e6xxx_phy_ppu_reenable_work(struct work_struct *ugly)
+{
+ struct mv88e6xxx_chip *chip;
+
+ chip = container_of(ugly, struct mv88e6xxx_chip, ppu_work);
+
+ mutex_lock(&chip->reg_lock);
+
+ if (mutex_trylock(&chip->ppu_mutex)) {
+ if (mv88e6xxx_phy_ppu_enable(chip) == 0)
+ chip->ppu_disabled = 0;
+ mutex_unlock(&chip->ppu_mutex);
+ }
+
+ mutex_unlock(&chip->reg_lock);
+}
+
+static void mv88e6xxx_phy_ppu_reenable_timer(unsigned long _ps)
+{
+ struct mv88e6xxx_chip *chip = (void *)_ps;
+
+ schedule_work(&chip->ppu_work);
+}
+
+static int mv88e6xxx_phy_ppu_access_get(struct mv88e6xxx_chip *chip)
+{
+ int ret;
+
+ mutex_lock(&chip->ppu_mutex);
+
+ /* If the PHY polling unit is enabled, disable it so that
+ * we can access the PHY registers. If it was already
+ * disabled, cancel the timer that is going to re-enable
+ * it.
+ */
+ if (!chip->ppu_disabled) {
+ ret = mv88e6xxx_phy_ppu_disable(chip);
+ if (ret < 0) {
+ mutex_unlock(&chip->ppu_mutex);
+ return ret;
+ }
+ chip->ppu_disabled = 1;
+ } else {
+ del_timer(&chip->ppu_timer);
+ ret = 0;
+ }
+
+ return ret;
+}
+
+static void mv88e6xxx_phy_ppu_access_put(struct mv88e6xxx_chip *chip)
+{
+ /* Schedule a timer to re-enable the PHY polling unit. */
+ mod_timer(&chip->ppu_timer, jiffies + msecs_to_jiffies(10));
+ mutex_unlock(&chip->ppu_mutex);
+}
+
+static void mv88e6xxx_phy_ppu_state_init(struct mv88e6xxx_chip *chip)
+{
+ mutex_init(&chip->ppu_mutex);
+ INIT_WORK(&chip->ppu_work, mv88e6xxx_phy_ppu_reenable_work);
+ setup_timer(&chip->ppu_timer, mv88e6xxx_phy_ppu_reenable_timer,
+ (unsigned long)chip);
+}
+
+static void mv88e6xxx_phy_ppu_state_destroy(struct mv88e6xxx_chip *chip)
+{
+ del_timer_sync(&chip->ppu_timer);
+}
+
+int mv88e6185_phy_ppu_read(struct mv88e6xxx_chip *chip, struct mii_bus *bus,
+ int addr, int reg, u16 *val)
+{
+ int err;
+
+ err = mv88e6xxx_phy_ppu_access_get(chip);
+ if (!err) {
+ err = mv88e6xxx_read(chip, addr, reg, val);
+ mv88e6xxx_phy_ppu_access_put(chip);
+ }
+
+ return err;
+}
+
+int mv88e6185_phy_ppu_write(struct mv88e6xxx_chip *chip, struct mii_bus *bus,
+ int addr, int reg, u16 val)
+{
+ int err;
+
+ err = mv88e6xxx_phy_ppu_access_get(chip);
+ if (!err) {
+ err = mv88e6xxx_write(chip, addr, reg, val);
+ mv88e6xxx_phy_ppu_access_put(chip);
+ }
+
+ return err;
+}
+
+void mv88e6xxx_phy_init(struct mv88e6xxx_chip *chip)
+{
+ if (chip->info->ops->ppu_enable && chip->info->ops->ppu_disable)
+ mv88e6xxx_phy_ppu_state_init(chip);
+}
+
+void mv88e6xxx_phy_destroy(struct mv88e6xxx_chip *chip)
+{
+ if (chip->info->ops->ppu_enable && chip->info->ops->ppu_disable)
+ mv88e6xxx_phy_ppu_state_destroy(chip);
+}
+
+int mv88e6xxx_phy_setup(struct mv88e6xxx_chip *chip)
+{
+ return mv88e6xxx_phy_ppu_enable(chip);
+}
diff --git a/drivers/net/dsa/mv88e6xxx/phy.h b/drivers/net/dsa/mv88e6xxx/phy.h
new file mode 100644
index 0000000..556b74a
--- /dev/null
+++ b/drivers/net/dsa/mv88e6xxx/phy.h
@@ -0,0 +1,43 @@
+/*
+ * Marvell 88E6xxx PHY access
+ *
+ * Copyright (c) 2008 Marvell Semiconductor
+ *
+ * Copyright (c) 2017 Andrew Lunn <andrew@lunn.ch>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef _MV88E6XXX_PHY_H
+#define _MV88E6XXX_PHY_H
+
+#define MV88E6XXX_PHY_PAGE 0x16
+#define MV88E6XXX_PHY_PAGE_COPPER 0x00
+
+/* PHY Registers accesses implementations */
+int mv88e6165_phy_read(struct mv88e6xxx_chip *chip, struct mii_bus *bus,
+ int addr, int reg, u16 *val);
+int mv88e6165_phy_write(struct mv88e6xxx_chip *chip, struct mii_bus *bus,
+ int addr, int reg, u16 val);
+int mv88e6185_phy_ppu_read(struct mv88e6xxx_chip *chip, struct mii_bus *bus,
+ int addr, int reg, u16 *val);
+int mv88e6185_phy_ppu_write(struct mv88e6xxx_chip *chip, struct mii_bus *bus,
+ int addr, int reg, u16 val);
+
+/* Generic PHY operations */
+int mv88e6xxx_phy_read(struct mv88e6xxx_chip *chip, int phy,
+ int reg, u16 *val);
+int mv88e6xxx_phy_write(struct mv88e6xxx_chip *chip, int phy,
+ int reg, u16 val);
+int mv88e6xxx_phy_page_read(struct mv88e6xxx_chip *chip, int phy,
+ u8 page, int reg, u16 *val);
+int mv88e6xxx_phy_page_write(struct mv88e6xxx_chip *chip, int phy,
+ u8 page, int reg, u16 val);
+void mv88e6xxx_phy_init(struct mv88e6xxx_chip *chip);
+void mv88e6xxx_phy_destroy(struct mv88e6xxx_chip *chip);
+int mv88e6xxx_phy_setup(struct mv88e6xxx_chip *chip);
+
+#endif /*_MV88E6XXX_PHY_H */
diff --git a/drivers/net/dsa/mv88e6xxx/port.c b/drivers/net/dsa/mv88e6xxx/port.c
index 548a956..a7801f6 100644
--- a/drivers/net/dsa/mv88e6xxx/port.c
+++ b/drivers/net/dsa/mv88e6xxx/port.c
@@ -12,8 +12,11 @@
* (at your option) any later version.
*/
+#include <linux/bitfield.h>
+#include <linux/if_bridge.h>
#include <linux/phy.h>
-#include "mv88e6xxx.h"
+
+#include "chip.h"
#include "port.h"
int mv88e6xxx_port_read(struct mv88e6xxx_chip *chip, int port, int reg,
@@ -47,23 +50,23 @@
u16 reg;
int err;
- err = mv88e6xxx_port_read(chip, port, PORT_PCS_CTRL, ®);
+ err = mv88e6xxx_port_read(chip, port, MV88E6XXX_PORT_MAC_CTL, ®);
if (err)
return err;
- reg &= ~(PORT_PCS_CTRL_RGMII_DELAY_RXCLK |
- PORT_PCS_CTRL_RGMII_DELAY_TXCLK);
+ reg &= ~(MV88E6XXX_PORT_MAC_CTL_RGMII_DELAY_RXCLK |
+ MV88E6XXX_PORT_MAC_CTL_RGMII_DELAY_TXCLK);
switch (mode) {
case PHY_INTERFACE_MODE_RGMII_RXID:
- reg |= PORT_PCS_CTRL_RGMII_DELAY_RXCLK;
+ reg |= MV88E6XXX_PORT_MAC_CTL_RGMII_DELAY_RXCLK;
break;
case PHY_INTERFACE_MODE_RGMII_TXID:
- reg |= PORT_PCS_CTRL_RGMII_DELAY_TXCLK;
+ reg |= MV88E6XXX_PORT_MAC_CTL_RGMII_DELAY_TXCLK;
break;
case PHY_INTERFACE_MODE_RGMII_ID:
- reg |= PORT_PCS_CTRL_RGMII_DELAY_RXCLK |
- PORT_PCS_CTRL_RGMII_DELAY_TXCLK;
+ reg |= MV88E6XXX_PORT_MAC_CTL_RGMII_DELAY_RXCLK |
+ MV88E6XXX_PORT_MAC_CTL_RGMII_DELAY_TXCLK;
break;
case PHY_INTERFACE_MODE_RGMII:
break;
@@ -71,13 +74,13 @@
return 0;
}
- err = mv88e6xxx_port_write(chip, port, PORT_PCS_CTRL, reg);
+ err = mv88e6xxx_port_write(chip, port, MV88E6XXX_PORT_MAC_CTL, reg);
if (err)
return err;
- netdev_dbg(chip->ds->ports[port].netdev, "delay RXCLK %s, TXCLK %s\n",
- reg & PORT_PCS_CTRL_RGMII_DELAY_RXCLK ? "yes" : "no",
- reg & PORT_PCS_CTRL_RGMII_DELAY_TXCLK ? "yes" : "no");
+ dev_dbg(chip->dev, "p%d: delay RXCLK %s, TXCLK %s\n", port,
+ reg & MV88E6XXX_PORT_MAC_CTL_RGMII_DELAY_RXCLK ? "yes" : "no",
+ reg & MV88E6XXX_PORT_MAC_CTL_RGMII_DELAY_TXCLK ? "yes" : "no");
return 0;
}
@@ -105,18 +108,20 @@
u16 reg;
int err;
- err = mv88e6xxx_port_read(chip, port, PORT_PCS_CTRL, ®);
+ err = mv88e6xxx_port_read(chip, port, MV88E6XXX_PORT_MAC_CTL, ®);
if (err)
return err;
- reg &= ~(PORT_PCS_CTRL_FORCE_LINK | PORT_PCS_CTRL_LINK_UP);
+ reg &= ~(MV88E6XXX_PORT_MAC_CTL_FORCE_LINK |
+ MV88E6XXX_PORT_MAC_CTL_LINK_UP);
switch (link) {
case LINK_FORCED_DOWN:
- reg |= PORT_PCS_CTRL_FORCE_LINK;
+ reg |= MV88E6XXX_PORT_MAC_CTL_FORCE_LINK;
break;
case LINK_FORCED_UP:
- reg |= PORT_PCS_CTRL_FORCE_LINK | PORT_PCS_CTRL_LINK_UP;
+ reg |= MV88E6XXX_PORT_MAC_CTL_FORCE_LINK |
+ MV88E6XXX_PORT_MAC_CTL_LINK_UP;
break;
case LINK_UNFORCED:
/* normal link detection */
@@ -125,13 +130,13 @@
return -EINVAL;
}
- err = mv88e6xxx_port_write(chip, port, PORT_PCS_CTRL, reg);
+ err = mv88e6xxx_port_write(chip, port, MV88E6XXX_PORT_MAC_CTL, reg);
if (err)
return err;
- netdev_dbg(chip->ds->ports[port].netdev, "%s link %s\n",
- reg & PORT_PCS_CTRL_FORCE_LINK ? "Force" : "Unforce",
- reg & PORT_PCS_CTRL_LINK_UP ? "up" : "down");
+ dev_dbg(chip->dev, "p%d: %s link %s\n", port,
+ reg & MV88E6XXX_PORT_MAC_CTL_FORCE_LINK ? "Force" : "Unforce",
+ reg & MV88E6XXX_PORT_MAC_CTL_LINK_UP ? "up" : "down");
return 0;
}
@@ -141,18 +146,20 @@
u16 reg;
int err;
- err = mv88e6xxx_port_read(chip, port, PORT_PCS_CTRL, ®);
+ err = mv88e6xxx_port_read(chip, port, MV88E6XXX_PORT_MAC_CTL, ®);
if (err)
return err;
- reg &= ~(PORT_PCS_CTRL_FORCE_DUPLEX | PORT_PCS_CTRL_DUPLEX_FULL);
+ reg &= ~(MV88E6XXX_PORT_MAC_CTL_FORCE_DUPLEX |
+ MV88E6XXX_PORT_MAC_CTL_DUPLEX_FULL);
switch (dup) {
case DUPLEX_HALF:
- reg |= PORT_PCS_CTRL_FORCE_DUPLEX;
+ reg |= MV88E6XXX_PORT_MAC_CTL_FORCE_DUPLEX;
break;
case DUPLEX_FULL:
- reg |= PORT_PCS_CTRL_FORCE_DUPLEX | PORT_PCS_CTRL_DUPLEX_FULL;
+ reg |= MV88E6XXX_PORT_MAC_CTL_FORCE_DUPLEX |
+ MV88E6XXX_PORT_MAC_CTL_DUPLEX_FULL;
break;
case DUPLEX_UNFORCED:
/* normal duplex detection */
@@ -161,13 +168,13 @@
return -EINVAL;
}
- err = mv88e6xxx_port_write(chip, port, PORT_PCS_CTRL, reg);
+ err = mv88e6xxx_port_write(chip, port, MV88E6XXX_PORT_MAC_CTL, reg);
if (err)
return err;
- netdev_dbg(chip->ds->ports[port].netdev, "%s %s duplex\n",
- reg & PORT_PCS_CTRL_FORCE_DUPLEX ? "Force" : "Unforce",
- reg & PORT_PCS_CTRL_DUPLEX_FULL ? "full" : "half");
+ dev_dbg(chip->dev, "p%d: %s %s duplex\n", port,
+ reg & MV88E6XXX_PORT_MAC_CTL_FORCE_DUPLEX ? "Force" : "Unforce",
+ reg & MV88E6XXX_PORT_MAC_CTL_DUPLEX_FULL ? "full" : "half");
return 0;
}
@@ -180,55 +187,56 @@
switch (speed) {
case 10:
- ctrl = PORT_PCS_CTRL_SPEED_10;
+ ctrl = MV88E6XXX_PORT_MAC_CTL_SPEED_10;
break;
case 100:
- ctrl = PORT_PCS_CTRL_SPEED_100;
+ ctrl = MV88E6XXX_PORT_MAC_CTL_SPEED_100;
break;
case 200:
if (alt_bit)
- ctrl = PORT_PCS_CTRL_SPEED_100 | PORT_PCS_CTRL_ALTSPEED;
+ ctrl = MV88E6XXX_PORT_MAC_CTL_SPEED_100 |
+ MV88E6390_PORT_MAC_CTL_ALTSPEED;
else
- ctrl = PORT_PCS_CTRL_SPEED_200;
+ ctrl = MV88E6065_PORT_MAC_CTL_SPEED_200;
break;
case 1000:
- ctrl = PORT_PCS_CTRL_SPEED_1000;
+ ctrl = MV88E6XXX_PORT_MAC_CTL_SPEED_1000;
break;
case 2500:
- ctrl = PORT_PCS_CTRL_SPEED_10000 | PORT_PCS_CTRL_ALTSPEED;
+ ctrl = MV88E6390_PORT_MAC_CTL_SPEED_10000 |
+ MV88E6390_PORT_MAC_CTL_ALTSPEED;
break;
case 10000:
/* all bits set, fall through... */
case SPEED_UNFORCED:
- ctrl = PORT_PCS_CTRL_SPEED_UNFORCED;
+ ctrl = MV88E6XXX_PORT_MAC_CTL_SPEED_UNFORCED;
break;
default:
return -EOPNOTSUPP;
}
- err = mv88e6xxx_port_read(chip, port, PORT_PCS_CTRL, ®);
+ err = mv88e6xxx_port_read(chip, port, MV88E6XXX_PORT_MAC_CTL, ®);
if (err)
return err;
- reg &= ~PORT_PCS_CTRL_SPEED_MASK;
+ reg &= ~MV88E6XXX_PORT_MAC_CTL_SPEED_MASK;
if (alt_bit)
- reg &= ~PORT_PCS_CTRL_ALTSPEED;
+ reg &= ~MV88E6390_PORT_MAC_CTL_ALTSPEED;
if (force_bit) {
- reg &= ~PORT_PCS_CTRL_FORCE_SPEED;
+ reg &= ~MV88E6390_PORT_MAC_CTL_FORCE_SPEED;
if (speed != SPEED_UNFORCED)
- ctrl |= PORT_PCS_CTRL_FORCE_SPEED;
+ ctrl |= MV88E6390_PORT_MAC_CTL_FORCE_SPEED;
}
reg |= ctrl;
- err = mv88e6xxx_port_write(chip, port, PORT_PCS_CTRL, reg);
+ err = mv88e6xxx_port_write(chip, port, MV88E6XXX_PORT_MAC_CTL, reg);
if (err)
return err;
if (speed)
- netdev_dbg(chip->ds->ports[port].netdev,
- "Speed set to %d Mbps\n", speed);
+ dev_dbg(chip->dev, "p%d: Speed set to %d Mbps\n", port, speed);
else
- netdev_dbg(chip->ds->ports[port].netdev, "Speed unforced\n");
+ dev_dbg(chip->dev, "p%d: Speed unforced\n", port);
return 0;
}
@@ -321,33 +329,33 @@
switch (mode) {
case PHY_INTERFACE_MODE_1000BASEX:
- cmode = PORT_STATUS_CMODE_1000BASE_X;
+ cmode = MV88E6XXX_PORT_STS_CMODE_1000BASE_X;
break;
case PHY_INTERFACE_MODE_SGMII:
- cmode = PORT_STATUS_CMODE_SGMII;
+ cmode = MV88E6XXX_PORT_STS_CMODE_SGMII;
break;
case PHY_INTERFACE_MODE_2500BASEX:
- cmode = PORT_STATUS_CMODE_2500BASEX;
+ cmode = MV88E6XXX_PORT_STS_CMODE_2500BASEX;
break;
case PHY_INTERFACE_MODE_XGMII:
- cmode = PORT_STATUS_CMODE_XAUI;
+ cmode = MV88E6XXX_PORT_STS_CMODE_XAUI;
break;
case PHY_INTERFACE_MODE_RXAUI:
- cmode = PORT_STATUS_CMODE_RXAUI;
+ cmode = MV88E6XXX_PORT_STS_CMODE_RXAUI;
break;
default:
cmode = 0;
}
if (cmode) {
- err = mv88e6xxx_port_read(chip, port, PORT_STATUS, ®);
+ err = mv88e6xxx_port_read(chip, port, MV88E6XXX_PORT_STS, ®);
if (err)
return err;
- reg &= ~PORT_STATUS_CMODE_MASK;
+ reg &= ~MV88E6XXX_PORT_STS_CMODE_MASK;
reg |= cmode;
- err = mv88e6xxx_port_write(chip, port, PORT_STATUS, reg);
+ err = mv88e6xxx_port_write(chip, port, MV88E6XXX_PORT_STS, reg);
if (err)
return err;
}
@@ -360,46 +368,51 @@
int err;
u16 reg;
- err = mv88e6xxx_port_read(chip, port, PORT_STATUS, ®);
+ err = mv88e6xxx_port_read(chip, port, MV88E6XXX_PORT_STS, ®);
if (err)
return err;
- *cmode = reg & PORT_STATUS_CMODE_MASK;
+ *cmode = reg & MV88E6XXX_PORT_STS_CMODE_MASK;
return 0;
}
-/* Offset 0x02: Pause Control
+/* Offset 0x02: Jamming Control
*
* Do not limit the period of time that this port can be paused for by
* the remote end or the period of time that this port can pause the
* remote end.
*/
-int mv88e6097_port_pause_config(struct mv88e6xxx_chip *chip, int port)
+int mv88e6097_port_pause_limit(struct mv88e6xxx_chip *chip, int port, u8 in,
+ u8 out)
{
- return mv88e6xxx_port_write(chip, port, PORT_PAUSE_CTRL, 0x0000);
+ return mv88e6xxx_port_write(chip, port, MV88E6097_PORT_JAM_CTL,
+ out << 8 | in);
}
-int mv88e6390_port_pause_config(struct mv88e6xxx_chip *chip, int port)
+int mv88e6390_port_pause_limit(struct mv88e6xxx_chip *chip, int port, u8 in,
+ u8 out)
{
int err;
- err = mv88e6xxx_port_write(chip, port, PORT_PAUSE_CTRL,
- PORT_FLOW_CTRL_LIMIT_IN | 0);
+ err = mv88e6xxx_port_write(chip, port, MV88E6390_PORT_FLOW_CTL,
+ MV88E6390_PORT_FLOW_CTL_UPDATE |
+ MV88E6390_PORT_FLOW_CTL_LIMIT_IN | in);
if (err)
return err;
- return mv88e6xxx_port_write(chip, port, PORT_PAUSE_CTRL,
- PORT_FLOW_CTRL_LIMIT_OUT | 0);
+ return mv88e6xxx_port_write(chip, port, MV88E6390_PORT_FLOW_CTL,
+ MV88E6390_PORT_FLOW_CTL_UPDATE |
+ MV88E6390_PORT_FLOW_CTL_LIMIT_OUT | out);
}
/* Offset 0x04: Port Control Register */
static const char * const mv88e6xxx_port_state_names[] = {
- [PORT_CONTROL_STATE_DISABLED] = "Disabled",
- [PORT_CONTROL_STATE_BLOCKING] = "Blocking/Listening",
- [PORT_CONTROL_STATE_LEARNING] = "Learning",
- [PORT_CONTROL_STATE_FORWARDING] = "Forwarding",
+ [MV88E6XXX_PORT_CTL0_STATE_DISABLED] = "Disabled",
+ [MV88E6XXX_PORT_CTL0_STATE_BLOCKING] = "Blocking/Listening",
+ [MV88E6XXX_PORT_CTL0_STATE_LEARNING] = "Learning",
+ [MV88E6XXX_PORT_CTL0_STATE_FORWARDING] = "Forwarding",
};
int mv88e6xxx_port_set_state(struct mv88e6xxx_chip *chip, int port, u8 state)
@@ -407,37 +420,72 @@
u16 reg;
int err;
- err = mv88e6xxx_port_read(chip, port, PORT_CONTROL, ®);
+ err = mv88e6xxx_port_read(chip, port, MV88E6XXX_PORT_CTL0, ®);
if (err)
return err;
- reg &= ~PORT_CONTROL_STATE_MASK;
+ reg &= ~MV88E6XXX_PORT_CTL0_STATE_MASK;
+
+ switch (state) {
+ case BR_STATE_DISABLED:
+ state = MV88E6XXX_PORT_CTL0_STATE_DISABLED;
+ break;
+ case BR_STATE_BLOCKING:
+ case BR_STATE_LISTENING:
+ state = MV88E6XXX_PORT_CTL0_STATE_BLOCKING;
+ break;
+ case BR_STATE_LEARNING:
+ state = MV88E6XXX_PORT_CTL0_STATE_LEARNING;
+ break;
+ case BR_STATE_FORWARDING:
+ state = MV88E6XXX_PORT_CTL0_STATE_FORWARDING;
+ break;
+ default:
+ return -EINVAL;
+ }
+
reg |= state;
- err = mv88e6xxx_port_write(chip, port, PORT_CONTROL, reg);
+ err = mv88e6xxx_port_write(chip, port, MV88E6XXX_PORT_CTL0, reg);
if (err)
return err;
- netdev_dbg(chip->ds->ports[port].netdev, "PortState set to %s\n",
- mv88e6xxx_port_state_names[state]);
+ dev_dbg(chip->dev, "p%d: PortState set to %s\n", port,
+ mv88e6xxx_port_state_names[state]);
return 0;
}
int mv88e6xxx_port_set_egress_mode(struct mv88e6xxx_chip *chip, int port,
- u16 mode)
+ enum mv88e6xxx_egress_mode mode)
{
int err;
u16 reg;
- err = mv88e6xxx_port_read(chip, port, PORT_CONTROL, ®);
+ err = mv88e6xxx_port_read(chip, port, MV88E6XXX_PORT_CTL0, ®);
if (err)
return err;
- reg &= ~PORT_CONTROL_EGRESS_MASK;
- reg |= mode;
+ reg &= ~MV88E6XXX_PORT_CTL0_EGRESS_MODE_MASK;
- return mv88e6xxx_port_write(chip, port, PORT_CONTROL, reg);
+ switch (mode) {
+ case MV88E6XXX_EGRESS_MODE_UNMODIFIED:
+ reg |= MV88E6XXX_PORT_CTL0_EGRESS_MODE_UNMODIFIED;
+ break;
+ case MV88E6XXX_EGRESS_MODE_UNTAGGED:
+ reg |= MV88E6XXX_PORT_CTL0_EGRESS_MODE_UNTAGGED;
+ break;
+ case MV88E6XXX_EGRESS_MODE_TAGGED:
+ reg |= MV88E6XXX_PORT_CTL0_EGRESS_MODE_TAGGED;
+ break;
+ case MV88E6XXX_EGRESS_MODE_ETHERTYPE:
+ reg |= MV88E6XXX_PORT_CTL0_EGRESS_MODE_ETHER_TYPE_DSA;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return mv88e6xxx_port_write(chip, port, MV88E6XXX_PORT_CTL0, reg);
}
int mv88e6085_port_set_frame_mode(struct mv88e6xxx_chip *chip, int port,
@@ -446,24 +494,24 @@
int err;
u16 reg;
- err = mv88e6xxx_port_read(chip, port, PORT_CONTROL, ®);
+ err = mv88e6xxx_port_read(chip, port, MV88E6XXX_PORT_CTL0, ®);
if (err)
return err;
- reg &= ~PORT_CONTROL_FRAME_MODE_DSA;
+ reg &= ~MV88E6XXX_PORT_CTL0_FRAME_MODE_MASK;
switch (mode) {
case MV88E6XXX_FRAME_MODE_NORMAL:
- reg |= PORT_CONTROL_FRAME_MODE_NORMAL;
+ reg |= MV88E6XXX_PORT_CTL0_FRAME_MODE_NORMAL;
break;
case MV88E6XXX_FRAME_MODE_DSA:
- reg |= PORT_CONTROL_FRAME_MODE_DSA;
+ reg |= MV88E6XXX_PORT_CTL0_FRAME_MODE_DSA;
break;
default:
return -EINVAL;
}
- return mv88e6xxx_port_write(chip, port, PORT_CONTROL, reg);
+ return mv88e6xxx_port_write(chip, port, MV88E6XXX_PORT_CTL0, reg);
}
int mv88e6351_port_set_frame_mode(struct mv88e6xxx_chip *chip, int port,
@@ -472,30 +520,30 @@
int err;
u16 reg;
- err = mv88e6xxx_port_read(chip, port, PORT_CONTROL, ®);
+ err = mv88e6xxx_port_read(chip, port, MV88E6XXX_PORT_CTL0, ®);
if (err)
return err;
- reg &= ~PORT_CONTROL_FRAME_MASK;
+ reg &= ~MV88E6XXX_PORT_CTL0_FRAME_MODE_MASK;
switch (mode) {
case MV88E6XXX_FRAME_MODE_NORMAL:
- reg |= PORT_CONTROL_FRAME_MODE_NORMAL;
+ reg |= MV88E6XXX_PORT_CTL0_FRAME_MODE_NORMAL;
break;
case MV88E6XXX_FRAME_MODE_DSA:
- reg |= PORT_CONTROL_FRAME_MODE_DSA;
+ reg |= MV88E6XXX_PORT_CTL0_FRAME_MODE_DSA;
break;
case MV88E6XXX_FRAME_MODE_PROVIDER:
- reg |= PORT_CONTROL_FRAME_MODE_PROVIDER;
+ reg |= MV88E6XXX_PORT_CTL0_FRAME_MODE_PROVIDER;
break;
case MV88E6XXX_FRAME_MODE_ETHERTYPE:
- reg |= PORT_CONTROL_FRAME_ETHER_TYPE_DSA;
+ reg |= MV88E6XXX_PORT_CTL0_FRAME_MODE_ETHER_TYPE_DSA;
break;
default:
return -EINVAL;
}
- return mv88e6xxx_port_write(chip, port, PORT_CONTROL, reg);
+ return mv88e6xxx_port_write(chip, port, MV88E6XXX_PORT_CTL0, reg);
}
static int mv88e6185_port_set_forward_unknown(struct mv88e6xxx_chip *chip,
@@ -504,16 +552,16 @@
int err;
u16 reg;
- err = mv88e6xxx_port_read(chip, port, PORT_CONTROL, ®);
+ err = mv88e6xxx_port_read(chip, port, MV88E6XXX_PORT_CTL0, ®);
if (err)
return err;
if (unicast)
- reg |= PORT_CONTROL_FORWARD_UNKNOWN;
+ reg |= MV88E6185_PORT_CTL0_FORWARD_UNKNOWN;
else
- reg &= ~PORT_CONTROL_FORWARD_UNKNOWN;
+ reg &= ~MV88E6185_PORT_CTL0_FORWARD_UNKNOWN;
- return mv88e6xxx_port_write(chip, port, PORT_CONTROL, reg);
+ return mv88e6xxx_port_write(chip, port, MV88E6XXX_PORT_CTL0, reg);
}
int mv88e6352_port_set_egress_floods(struct mv88e6xxx_chip *chip, int port,
@@ -522,22 +570,22 @@
int err;
u16 reg;
- err = mv88e6xxx_port_read(chip, port, PORT_CONTROL, ®);
+ err = mv88e6xxx_port_read(chip, port, MV88E6XXX_PORT_CTL0, ®);
if (err)
return err;
- reg &= ~PORT_CONTROL_EGRESS_FLOODS_MASK;
+ reg &= ~MV88E6352_PORT_CTL0_EGRESS_FLOODS_MASK;
if (unicast && multicast)
- reg |= PORT_CONTROL_EGRESS_FLOODS_ALL_UNKNOWN_DA;
+ reg |= MV88E6352_PORT_CTL0_EGRESS_FLOODS_ALL_UNKNOWN_DA;
else if (unicast)
- reg |= PORT_CONTROL_EGRESS_FLOODS_NO_UNKNOWN_MC_DA;
+ reg |= MV88E6352_PORT_CTL0_EGRESS_FLOODS_NO_UNKNOWN_MC_DA;
else if (multicast)
- reg |= PORT_CONTROL_EGRESS_FLOODS_NO_UNKNOWN_UC_DA;
+ reg |= MV88E6352_PORT_CTL0_EGRESS_FLOODS_NO_UNKNOWN_UC_DA;
else
- reg |= PORT_CONTROL_EGRESS_FLOODS_NO_UNKNOWN_DA;
+ reg |= MV88E6352_PORT_CTL0_EGRESS_FLOODS_NO_UNKNOWN_DA;
- return mv88e6xxx_port_write(chip, port, PORT_CONTROL, reg);
+ return mv88e6xxx_port_write(chip, port, MV88E6XXX_PORT_CTL0, reg);
}
/* Offset 0x05: Port Control 1 */
@@ -548,16 +596,16 @@
u16 val;
int err;
- err = mv88e6xxx_port_read(chip, port, PORT_CONTROL_1, &val);
+ err = mv88e6xxx_port_read(chip, port, MV88E6XXX_PORT_CTL1, &val);
if (err)
return err;
if (message_port)
- val |= PORT_CONTROL_1_MESSAGE_PORT;
+ val |= MV88E6XXX_PORT_CTL1_MESSAGE_PORT;
else
- val &= ~PORT_CONTROL_1_MESSAGE_PORT;
+ val &= ~MV88E6XXX_PORT_CTL1_MESSAGE_PORT;
- return mv88e6xxx_port_write(chip, port, PORT_CONTROL_1, val);
+ return mv88e6xxx_port_write(chip, port, MV88E6XXX_PORT_CTL1, val);
}
/* Offset 0x06: Port Based VLAN Map */
@@ -568,19 +616,18 @@
u16 reg;
int err;
- err = mv88e6xxx_port_read(chip, port, PORT_BASE_VLAN, ®);
+ err = mv88e6xxx_port_read(chip, port, MV88E6XXX_PORT_BASE_VLAN, ®);
if (err)
return err;
reg &= ~mask;
reg |= map & mask;
- err = mv88e6xxx_port_write(chip, port, PORT_BASE_VLAN, reg);
+ err = mv88e6xxx_port_write(chip, port, MV88E6XXX_PORT_BASE_VLAN, reg);
if (err)
return err;
- netdev_dbg(chip->ds->ports[port].netdev, "VLANTable set to %.3x\n",
- map);
+ dev_dbg(chip->dev, "p%d: VLANTable set to %.3x\n", port, map);
return 0;
}
@@ -592,7 +639,7 @@
int err;
/* Port's default FID lower 4 bits are located in reg 0x06, offset 12 */
- err = mv88e6xxx_port_read(chip, port, PORT_BASE_VLAN, ®);
+ err = mv88e6xxx_port_read(chip, port, MV88E6XXX_PORT_BASE_VLAN, ®);
if (err)
return err;
@@ -600,7 +647,8 @@
/* Port's default FID upper bits are located in reg 0x05, offset 0 */
if (upper_mask) {
- err = mv88e6xxx_port_read(chip, port, PORT_CONTROL_1, ®);
+ err = mv88e6xxx_port_read(chip, port, MV88E6XXX_PORT_CTL1,
+ ®);
if (err)
return err;
@@ -620,32 +668,34 @@
return -EINVAL;
/* Port's default FID lower 4 bits are located in reg 0x06, offset 12 */
- err = mv88e6xxx_port_read(chip, port, PORT_BASE_VLAN, ®);
+ err = mv88e6xxx_port_read(chip, port, MV88E6XXX_PORT_BASE_VLAN, ®);
if (err)
return err;
reg &= 0x0fff;
reg |= (fid & 0x000f) << 12;
- err = mv88e6xxx_port_write(chip, port, PORT_BASE_VLAN, reg);
+ err = mv88e6xxx_port_write(chip, port, MV88E6XXX_PORT_BASE_VLAN, reg);
if (err)
return err;
/* Port's default FID upper bits are located in reg 0x05, offset 0 */
if (upper_mask) {
- err = mv88e6xxx_port_read(chip, port, PORT_CONTROL_1, ®);
+ err = mv88e6xxx_port_read(chip, port, MV88E6XXX_PORT_CTL1,
+ ®);
if (err)
return err;
reg &= ~upper_mask;
reg |= (fid >> 4) & upper_mask;
- err = mv88e6xxx_port_write(chip, port, PORT_CONTROL_1, reg);
+ err = mv88e6xxx_port_write(chip, port, MV88E6XXX_PORT_CTL1,
+ reg);
if (err)
return err;
}
- netdev_dbg(chip->ds->ports[port].netdev, "FID set to %u\n", fid);
+ dev_dbg(chip->dev, "p%d: FID set to %u\n", port, fid);
return 0;
}
@@ -657,11 +707,12 @@
u16 reg;
int err;
- err = mv88e6xxx_port_read(chip, port, PORT_DEFAULT_VLAN, ®);
+ err = mv88e6xxx_port_read(chip, port, MV88E6XXX_PORT_DEFAULT_VLAN,
+ ®);
if (err)
return err;
- *pvid = reg & PORT_DEFAULT_VLAN_MASK;
+ *pvid = reg & MV88E6XXX_PORT_DEFAULT_VLAN_MASK;
return 0;
}
@@ -671,19 +722,20 @@
u16 reg;
int err;
- err = mv88e6xxx_port_read(chip, port, PORT_DEFAULT_VLAN, ®);
+ err = mv88e6xxx_port_read(chip, port, MV88E6XXX_PORT_DEFAULT_VLAN,
+ ®);
if (err)
return err;
- reg &= ~PORT_DEFAULT_VLAN_MASK;
- reg |= pvid & PORT_DEFAULT_VLAN_MASK;
+ reg &= ~MV88E6XXX_PORT_DEFAULT_VLAN_MASK;
+ reg |= pvid & MV88E6XXX_PORT_DEFAULT_VLAN_MASK;
- err = mv88e6xxx_port_write(chip, port, PORT_DEFAULT_VLAN, reg);
+ err = mv88e6xxx_port_write(chip, port, MV88E6XXX_PORT_DEFAULT_VLAN,
+ reg);
if (err)
return err;
- netdev_dbg(chip->ds->ports[port].netdev, "DefaultVID set to %u\n",
- pvid);
+ dev_dbg(chip->dev, "p%d: DefaultVID set to %u\n", port, pvid);
return 0;
}
@@ -691,10 +743,10 @@
/* Offset 0x08: Port Control 2 Register */
static const char * const mv88e6xxx_port_8021q_mode_names[] = {
- [PORT_CONTROL_2_8021Q_DISABLED] = "Disabled",
- [PORT_CONTROL_2_8021Q_FALLBACK] = "Fallback",
- [PORT_CONTROL_2_8021Q_CHECK] = "Check",
- [PORT_CONTROL_2_8021Q_SECURE] = "Secure",
+ [MV88E6XXX_PORT_CTL2_8021Q_MODE_DISABLED] = "Disabled",
+ [MV88E6XXX_PORT_CTL2_8021Q_MODE_FALLBACK] = "Fallback",
+ [MV88E6XXX_PORT_CTL2_8021Q_MODE_CHECK] = "Check",
+ [MV88E6XXX_PORT_CTL2_8021Q_MODE_SECURE] = "Secure",
};
static int mv88e6185_port_set_default_forward(struct mv88e6xxx_chip *chip,
@@ -703,16 +755,16 @@
int err;
u16 reg;
- err = mv88e6xxx_port_read(chip, port, PORT_CONTROL_2, ®);
+ err = mv88e6xxx_port_read(chip, port, MV88E6XXX_PORT_CTL2, ®);
if (err)
return err;
if (multicast)
- reg |= PORT_CONTROL_2_DEFAULT_FORWARD;
+ reg |= MV88E6XXX_PORT_CTL2_DEFAULT_FORWARD;
else
- reg &= ~PORT_CONTROL_2_DEFAULT_FORWARD;
+ reg &= ~MV88E6XXX_PORT_CTL2_DEFAULT_FORWARD;
- return mv88e6xxx_port_write(chip, port, PORT_CONTROL_2, reg);
+ return mv88e6xxx_port_write(chip, port, MV88E6XXX_PORT_CTL2, reg);
}
int mv88e6185_port_set_egress_floods(struct mv88e6xxx_chip *chip, int port,
@@ -733,14 +785,14 @@
int err;
u16 reg;
- err = mv88e6xxx_port_read(chip, port, PORT_CONTROL_2, ®);
+ err = mv88e6xxx_port_read(chip, port, MV88E6XXX_PORT_CTL2, ®);
if (err)
return err;
- reg &= ~PORT_CONTROL_2_UPSTREAM_MASK;
+ reg &= ~MV88E6095_PORT_CTL2_CPU_PORT_MASK;
reg |= upstream_port;
- return mv88e6xxx_port_write(chip, port, PORT_CONTROL_2, reg);
+ return mv88e6xxx_port_write(chip, port, MV88E6XXX_PORT_CTL2, reg);
}
int mv88e6xxx_port_set_8021q_mode(struct mv88e6xxx_chip *chip, int port,
@@ -749,19 +801,19 @@
u16 reg;
int err;
- err = mv88e6xxx_port_read(chip, port, PORT_CONTROL_2, ®);
+ err = mv88e6xxx_port_read(chip, port, MV88E6XXX_PORT_CTL2, ®);
if (err)
return err;
- reg &= ~PORT_CONTROL_2_8021Q_MASK;
- reg |= mode & PORT_CONTROL_2_8021Q_MASK;
+ reg &= ~MV88E6XXX_PORT_CTL2_8021Q_MODE_MASK;
+ reg |= mode & MV88E6XXX_PORT_CTL2_8021Q_MODE_MASK;
- err = mv88e6xxx_port_write(chip, port, PORT_CONTROL_2, reg);
+ err = mv88e6xxx_port_write(chip, port, MV88E6XXX_PORT_CTL2, reg);
if (err)
return err;
- netdev_dbg(chip->ds->ports[port].netdev, "802.1QMode set to %s\n",
- mv88e6xxx_port_8021q_mode_names[mode]);
+ dev_dbg(chip->dev, "p%d: 802.1QMode set to %s\n", port,
+ mv88e6xxx_port_8021q_mode_names[mode]);
return 0;
}
@@ -771,53 +823,65 @@
u16 reg;
int err;
- err = mv88e6xxx_port_read(chip, port, PORT_CONTROL_2, ®);
+ err = mv88e6xxx_port_read(chip, port, MV88E6XXX_PORT_CTL2, ®);
if (err)
return err;
- reg |= PORT_CONTROL_2_MAP_DA;
+ reg |= MV88E6XXX_PORT_CTL2_MAP_DA;
- return mv88e6xxx_port_write(chip, port, PORT_CONTROL_2, reg);
+ return mv88e6xxx_port_write(chip, port, MV88E6XXX_PORT_CTL2, reg);
}
-int mv88e6165_port_jumbo_config(struct mv88e6xxx_chip *chip, int port)
+int mv88e6165_port_set_jumbo_size(struct mv88e6xxx_chip *chip, int port,
+ size_t size)
{
u16 reg;
int err;
- err = mv88e6xxx_port_read(chip, port, PORT_CONTROL_2, ®);
+ err = mv88e6xxx_port_read(chip, port, MV88E6XXX_PORT_CTL2, ®);
if (err)
return err;
- reg |= PORT_CONTROL_2_JUMBO_10240;
+ reg &= ~MV88E6XXX_PORT_CTL2_JUMBO_MODE_MASK;
- return mv88e6xxx_port_write(chip, port, PORT_CONTROL_2, reg);
+ if (size <= 1522)
+ reg |= MV88E6XXX_PORT_CTL2_JUMBO_MODE_1522;
+ else if (size <= 2048)
+ reg |= MV88E6XXX_PORT_CTL2_JUMBO_MODE_2048;
+ else if (size <= 10240)
+ reg |= MV88E6XXX_PORT_CTL2_JUMBO_MODE_10240;
+ else
+ return -ERANGE;
+
+ return mv88e6xxx_port_write(chip, port, MV88E6XXX_PORT_CTL2, reg);
}
/* Offset 0x09: Port Rate Control */
int mv88e6095_port_egress_rate_limiting(struct mv88e6xxx_chip *chip, int port)
{
- return mv88e6xxx_port_write(chip, port, PORT_RATE_CONTROL, 0x0000);
+ return mv88e6xxx_port_write(chip, port, MV88E6XXX_PORT_EGRESS_RATE_CTL1,
+ 0x0000);
}
int mv88e6097_port_egress_rate_limiting(struct mv88e6xxx_chip *chip, int port)
{
- return mv88e6xxx_port_write(chip, port, PORT_RATE_CONTROL, 0x0001);
+ return mv88e6xxx_port_write(chip, port, MV88E6XXX_PORT_EGRESS_RATE_CTL1,
+ 0x0001);
}
/* Offset 0x0C: Port ATU Control */
int mv88e6xxx_port_disable_learn_limit(struct mv88e6xxx_chip *chip, int port)
{
- return mv88e6xxx_port_write(chip, port, PORT_ATU_CONTROL, 0);
+ return mv88e6xxx_port_write(chip, port, MV88E6XXX_PORT_ATU_CTL, 0);
}
/* Offset 0x0D: (Priority) Override Register */
int mv88e6xxx_port_disable_pri_override(struct mv88e6xxx_chip *chip, int port)
{
- return mv88e6xxx_port_write(chip, port, PORT_PRI_OVERRIDE, 0);
+ return mv88e6xxx_port_write(chip, port, MV88E6XXX_PORT_PRI_OVERRIDE, 0);
}
/* Offset 0x0f: Port Ether type */
@@ -825,7 +889,7 @@
int mv88e6351_port_set_ether_type(struct mv88e6xxx_chip *chip, int port,
u16 etype)
{
- return mv88e6xxx_port_write(chip, port, PORT_ETH_TYPE, etype);
+ return mv88e6xxx_port_write(chip, port, MV88E6XXX_PORT_ETH_TYPE, etype);
}
/* Offset 0x18: Port IEEE Priority Remapping Registers [0-3]
@@ -837,53 +901,54 @@
int err;
/* Use a direct priority mapping for all IEEE tagged frames */
- err = mv88e6xxx_port_write(chip, port, PORT_TAG_REGMAP_0123, 0x3210);
+ err = mv88e6xxx_port_write(chip, port,
+ MV88E6095_PORT_IEEE_PRIO_REMAP_0123,
+ 0x3210);
if (err)
return err;
- return mv88e6xxx_port_write(chip, port, PORT_TAG_REGMAP_4567, 0x7654);
+ return mv88e6xxx_port_write(chip, port,
+ MV88E6095_PORT_IEEE_PRIO_REMAP_4567,
+ 0x7654);
}
static int mv88e6xxx_port_ieeepmt_write(struct mv88e6xxx_chip *chip,
- int port, u16 table,
- u8 pointer, u16 data)
+ int port, u16 table, u8 ptr, u16 data)
{
u16 reg;
- reg = PORT_IEEE_PRIO_MAP_TABLE_UPDATE |
- table |
- (pointer << PORT_IEEE_PRIO_MAP_TABLE_POINTER_SHIFT) |
- data;
+ reg = MV88E6390_PORT_IEEE_PRIO_MAP_TABLE_UPDATE | table |
+ (ptr << __bf_shf(MV88E6390_PORT_IEEE_PRIO_MAP_TABLE_PTR_MASK)) |
+ (data & MV88E6390_PORT_IEEE_PRIO_MAP_TABLE_DATA_MASK);
- return mv88e6xxx_port_write(chip, port, PORT_IEEE_PRIO_MAP_TABLE, reg);
+ return mv88e6xxx_port_write(chip, port,
+ MV88E6390_PORT_IEEE_PRIO_MAP_TABLE, reg);
}
int mv88e6390_port_tag_remap(struct mv88e6xxx_chip *chip, int port)
{
int err, i;
+ u16 table;
for (i = 0; i <= 7; i++) {
- err = mv88e6xxx_port_ieeepmt_write(
- chip, port, PORT_IEEE_PRIO_MAP_TABLE_INGRESS_PCP,
- i, (i | i << 4));
+ table = MV88E6390_PORT_IEEE_PRIO_MAP_TABLE_INGRESS_PCP;
+ err = mv88e6xxx_port_ieeepmt_write(chip, port, table, i,
+ (i | i << 4));
if (err)
return err;
- err = mv88e6xxx_port_ieeepmt_write(
- chip, port, PORT_IEEE_PRIO_MAP_TABLE_EGRESS_GREEN_PCP,
- i, i);
+ table = MV88E6390_PORT_IEEE_PRIO_MAP_TABLE_EGRESS_GREEN_PCP;
+ err = mv88e6xxx_port_ieeepmt_write(chip, port, table, i, i);
if (err)
return err;
- err = mv88e6xxx_port_ieeepmt_write(
- chip, port, PORT_IEEE_PRIO_MAP_TABLE_EGRESS_YELLOW_PCP,
- i, i);
+ table = MV88E6390_PORT_IEEE_PRIO_MAP_TABLE_EGRESS_YELLOW_PCP;
+ err = mv88e6xxx_port_ieeepmt_write(chip, port, table, i, i);
if (err)
return err;
- err = mv88e6xxx_port_ieeepmt_write(
- chip, port, PORT_IEEE_PRIO_MAP_TABLE_EGRESS_AVB_PCP,
- i, i);
+ table = MV88E6390_PORT_IEEE_PRIO_MAP_TABLE_EGRESS_AVB_PCP;
+ err = mv88e6xxx_port_ieeepmt_write(chip, port, table, i, i);
if (err)
return err;
}
diff --git a/drivers/net/dsa/mv88e6xxx/port.h b/drivers/net/dsa/mv88e6xxx/port.h
index 86f4088..8f3991b 100644
--- a/drivers/net/dsa/mv88e6xxx/port.h
+++ b/drivers/net/dsa/mv88e6xxx/port.h
@@ -15,7 +15,229 @@
#ifndef _MV88E6XXX_PORT_H
#define _MV88E6XXX_PORT_H
-#include "mv88e6xxx.h"
+#include "chip.h"
+
+/* Offset 0x00: Port Status Register */
+#define MV88E6XXX_PORT_STS 0x00
+#define MV88E6XXX_PORT_STS_PAUSE_EN 0x8000
+#define MV88E6XXX_PORT_STS_MY_PAUSE 0x4000
+#define MV88E6XXX_PORT_STS_HD_FLOW 0x2000
+#define MV88E6XXX_PORT_STS_PHY_DETECT 0x1000
+#define MV88E6XXX_PORT_STS_LINK 0x0800
+#define MV88E6XXX_PORT_STS_DUPLEX 0x0400
+#define MV88E6XXX_PORT_STS_SPEED_MASK 0x0300
+#define MV88E6XXX_PORT_STS_SPEED_10 0x0000
+#define MV88E6XXX_PORT_STS_SPEED_100 0x0100
+#define MV88E6XXX_PORT_STS_SPEED_1000 0x0200
+#define MV88E6352_PORT_STS_EEE 0x0040
+#define MV88E6165_PORT_STS_AM_DIS 0x0040
+#define MV88E6185_PORT_STS_MGMII 0x0040
+#define MV88E6XXX_PORT_STS_TX_PAUSED 0x0020
+#define MV88E6XXX_PORT_STS_FLOW_CTL 0x0010
+#define MV88E6XXX_PORT_STS_CMODE_MASK 0x000f
+#define MV88E6XXX_PORT_STS_CMODE_100BASE_X 0x0008
+#define MV88E6XXX_PORT_STS_CMODE_1000BASE_X 0x0009
+#define MV88E6XXX_PORT_STS_CMODE_SGMII 0x000a
+#define MV88E6XXX_PORT_STS_CMODE_2500BASEX 0x000b
+#define MV88E6XXX_PORT_STS_CMODE_XAUI 0x000c
+#define MV88E6XXX_PORT_STS_CMODE_RXAUI 0x000d
+
+/* Offset 0x01: MAC (or PCS or Physical) Control Register */
+#define MV88E6XXX_PORT_MAC_CTL 0x01
+#define MV88E6XXX_PORT_MAC_CTL_RGMII_DELAY_RXCLK 0x8000
+#define MV88E6XXX_PORT_MAC_CTL_RGMII_DELAY_TXCLK 0x4000
+#define MV88E6390_PORT_MAC_CTL_FORCE_SPEED 0x2000
+#define MV88E6390_PORT_MAC_CTL_ALTSPEED 0x1000
+#define MV88E6352_PORT_MAC_CTL_200BASE 0x1000
+#define MV88E6XXX_PORT_MAC_CTL_FC 0x0080
+#define MV88E6XXX_PORT_MAC_CTL_FORCE_FC 0x0040
+#define MV88E6XXX_PORT_MAC_CTL_LINK_UP 0x0020
+#define MV88E6XXX_PORT_MAC_CTL_FORCE_LINK 0x0010
+#define MV88E6XXX_PORT_MAC_CTL_DUPLEX_FULL 0x0008
+#define MV88E6XXX_PORT_MAC_CTL_FORCE_DUPLEX 0x0004
+#define MV88E6XXX_PORT_MAC_CTL_SPEED_MASK 0x0003
+#define MV88E6XXX_PORT_MAC_CTL_SPEED_10 0x0000
+#define MV88E6XXX_PORT_MAC_CTL_SPEED_100 0x0001
+#define MV88E6065_PORT_MAC_CTL_SPEED_200 0x0002
+#define MV88E6XXX_PORT_MAC_CTL_SPEED_1000 0x0002
+#define MV88E6390_PORT_MAC_CTL_SPEED_10000 0x0003
+#define MV88E6XXX_PORT_MAC_CTL_SPEED_UNFORCED 0x0003
+
+/* Offset 0x02: Jamming Control Register */
+#define MV88E6097_PORT_JAM_CTL 0x02
+#define MV88E6097_PORT_JAM_CTL_LIMIT_OUT_MASK 0xff00
+#define MV88E6097_PORT_JAM_CTL_LIMIT_IN_MASK 0x00ff
+
+/* Offset 0x02: Flow Control Register */
+#define MV88E6390_PORT_FLOW_CTL 0x02
+#define MV88E6390_PORT_FLOW_CTL_UPDATE 0x8000
+#define MV88E6390_PORT_FLOW_CTL_PTR_MASK 0x7f00
+#define MV88E6390_PORT_FLOW_CTL_LIMIT_IN 0x0000
+#define MV88E6390_PORT_FLOW_CTL_LIMIT_OUT 0x0100
+#define MV88E6390_PORT_FLOW_CTL_DATA_MASK 0x00ff
+
+/* Offset 0x03: Switch Identifier Register */
+#define MV88E6XXX_PORT_SWITCH_ID 0x03
+#define MV88E6XXX_PORT_SWITCH_ID_PROD_MASK 0xfff0
+#define MV88E6XXX_PORT_SWITCH_ID_PROD_6085 0x04a0
+#define MV88E6XXX_PORT_SWITCH_ID_PROD_6095 0x0950
+#define MV88E6XXX_PORT_SWITCH_ID_PROD_6097 0x0990
+#define MV88E6XXX_PORT_SWITCH_ID_PROD_6190X 0x0a00
+#define MV88E6XXX_PORT_SWITCH_ID_PROD_6390X 0x0a10
+#define MV88E6XXX_PORT_SWITCH_ID_PROD_6131 0x1060
+#define MV88E6XXX_PORT_SWITCH_ID_PROD_6320 0x1150
+#define MV88E6XXX_PORT_SWITCH_ID_PROD_6123 0x1210
+#define MV88E6XXX_PORT_SWITCH_ID_PROD_6161 0x1610
+#define MV88E6XXX_PORT_SWITCH_ID_PROD_6165 0x1650
+#define MV88E6XXX_PORT_SWITCH_ID_PROD_6171 0x1710
+#define MV88E6XXX_PORT_SWITCH_ID_PROD_6172 0x1720
+#define MV88E6XXX_PORT_SWITCH_ID_PROD_6175 0x1750
+#define MV88E6XXX_PORT_SWITCH_ID_PROD_6176 0x1760
+#define MV88E6XXX_PORT_SWITCH_ID_PROD_6190 0x1900
+#define MV88E6XXX_PORT_SWITCH_ID_PROD_6191 0x1910
+#define MV88E6XXX_PORT_SWITCH_ID_PROD_6185 0x1a70
+#define MV88E6XXX_PORT_SWITCH_ID_PROD_6240 0x2400
+#define MV88E6XXX_PORT_SWITCH_ID_PROD_6290 0x2900
+#define MV88E6XXX_PORT_SWITCH_ID_PROD_6321 0x3100
+#define MV88E6XXX_PORT_SWITCH_ID_PROD_6141 0x3400
+#define MV88E6XXX_PORT_SWITCH_ID_PROD_6341 0x3410
+#define MV88E6XXX_PORT_SWITCH_ID_PROD_6352 0x3520
+#define MV88E6XXX_PORT_SWITCH_ID_PROD_6350 0x3710
+#define MV88E6XXX_PORT_SWITCH_ID_PROD_6351 0x3750
+#define MV88E6XXX_PORT_SWITCH_ID_PROD_6390 0x3900
+#define MV88E6XXX_PORT_SWITCH_ID_REV_MASK 0x000f
+
+/* Offset 0x04: Port Control Register */
+#define MV88E6XXX_PORT_CTL0 0x04
+#define MV88E6XXX_PORT_CTL0_USE_CORE_TAG 0x8000
+#define MV88E6XXX_PORT_CTL0_DROP_ON_LOCK 0x4000
+#define MV88E6XXX_PORT_CTL0_EGRESS_MODE_MASK 0x3000
+#define MV88E6XXX_PORT_CTL0_EGRESS_MODE_UNMODIFIED 0x0000
+#define MV88E6XXX_PORT_CTL0_EGRESS_MODE_UNTAGGED 0x1000
+#define MV88E6XXX_PORT_CTL0_EGRESS_MODE_TAGGED 0x2000
+#define MV88E6XXX_PORT_CTL0_EGRESS_MODE_ETHER_TYPE_DSA 0x3000
+#define MV88E6XXX_PORT_CTL0_HEADER 0x0800
+#define MV88E6XXX_PORT_CTL0_IGMP_MLD_SNOOP 0x0400
+#define MV88E6XXX_PORT_CTL0_DOUBLE_TAG 0x0200
+#define MV88E6XXX_PORT_CTL0_FRAME_MODE_MASK 0x0300
+#define MV88E6XXX_PORT_CTL0_FRAME_MODE_NORMAL 0x0000
+#define MV88E6XXX_PORT_CTL0_FRAME_MODE_DSA 0x0100
+#define MV88E6XXX_PORT_CTL0_FRAME_MODE_PROVIDER 0x0200
+#define MV88E6XXX_PORT_CTL0_FRAME_MODE_ETHER_TYPE_DSA 0x0300
+#define MV88E6XXX_PORT_CTL0_DSA_TAG 0x0100
+#define MV88E6XXX_PORT_CTL0_VLAN_TUNNEL 0x0080
+#define MV88E6XXX_PORT_CTL0_TAG_IF_BOTH 0x0040
+#define MV88E6185_PORT_CTL0_USE_IP 0x0020
+#define MV88E6185_PORT_CTL0_USE_TAG 0x0010
+#define MV88E6185_PORT_CTL0_FORWARD_UNKNOWN 0x0004
+#define MV88E6352_PORT_CTL0_EGRESS_FLOODS_MASK 0x000c
+#define MV88E6352_PORT_CTL0_EGRESS_FLOODS_NO_UNKNOWN_DA 0x0000
+#define MV88E6352_PORT_CTL0_EGRESS_FLOODS_NO_UNKNOWN_MC_DA 0x0004
+#define MV88E6352_PORT_CTL0_EGRESS_FLOODS_NO_UNKNOWN_UC_DA 0x0008
+#define MV88E6352_PORT_CTL0_EGRESS_FLOODS_ALL_UNKNOWN_DA 0x000c
+#define MV88E6XXX_PORT_CTL0_STATE_MASK 0x0003
+#define MV88E6XXX_PORT_CTL0_STATE_DISABLED 0x0000
+#define MV88E6XXX_PORT_CTL0_STATE_BLOCKING 0x0001
+#define MV88E6XXX_PORT_CTL0_STATE_LEARNING 0x0002
+#define MV88E6XXX_PORT_CTL0_STATE_FORWARDING 0x0003
+
+/* Offset 0x05: Port Control 1 */
+#define MV88E6XXX_PORT_CTL1 0x05
+#define MV88E6XXX_PORT_CTL1_MESSAGE_PORT 0x8000
+#define MV88E6XXX_PORT_CTL1_FID_11_4_MASK 0x00ff
+
+/* Offset 0x06: Port Based VLAN Map */
+#define MV88E6XXX_PORT_BASE_VLAN 0x06
+#define MV88E6XXX_PORT_BASE_VLAN_FID_3_0_MASK 0xf000
+
+/* Offset 0x07: Default Port VLAN ID & Priority */
+#define MV88E6XXX_PORT_DEFAULT_VLAN 0x07
+#define MV88E6XXX_PORT_DEFAULT_VLAN_MASK 0x0fff
+
+/* Offset 0x08: Port Control 2 Register */
+#define MV88E6XXX_PORT_CTL2 0x08
+#define MV88E6XXX_PORT_CTL2_IGNORE_FCS 0x8000
+#define MV88E6XXX_PORT_CTL2_VTU_PRI_OVERRIDE 0x4000
+#define MV88E6XXX_PORT_CTL2_SA_PRIO_OVERRIDE 0x2000
+#define MV88E6XXX_PORT_CTL2_DA_PRIO_OVERRIDE 0x1000
+#define MV88E6XXX_PORT_CTL2_JUMBO_MODE_MASK 0x3000
+#define MV88E6XXX_PORT_CTL2_JUMBO_MODE_1522 0x0000
+#define MV88E6XXX_PORT_CTL2_JUMBO_MODE_2048 0x1000
+#define MV88E6XXX_PORT_CTL2_JUMBO_MODE_10240 0x2000
+#define MV88E6XXX_PORT_CTL2_8021Q_MODE_MASK 0x0c00
+#define MV88E6XXX_PORT_CTL2_8021Q_MODE_DISABLED 0x0000
+#define MV88E6XXX_PORT_CTL2_8021Q_MODE_FALLBACK 0x0400
+#define MV88E6XXX_PORT_CTL2_8021Q_MODE_CHECK 0x0800
+#define MV88E6XXX_PORT_CTL2_8021Q_MODE_SECURE 0x0c00
+#define MV88E6XXX_PORT_CTL2_DISCARD_TAGGED 0x0200
+#define MV88E6XXX_PORT_CTL2_DISCARD_UNTAGGED 0x0100
+#define MV88E6XXX_PORT_CTL2_MAP_DA 0x0080
+#define MV88E6XXX_PORT_CTL2_DEFAULT_FORWARD 0x0040
+#define MV88E6XXX_PORT_CTL2_EGRESS_MONITOR 0x0020
+#define MV88E6XXX_PORT_CTL2_INGRESS_MONITOR 0x0010
+#define MV88E6095_PORT_CTL2_CPU_PORT_MASK 0x000f
+
+/* Offset 0x09: Egress Rate Control */
+#define MV88E6XXX_PORT_EGRESS_RATE_CTL1 0x09
+
+/* Offset 0x0A: Egress Rate Control 2 */
+#define MV88E6XXX_PORT_EGRESS_RATE_CTL2 0x0a
+
+/* Offset 0x0B: Port Association Vector */
+#define MV88E6XXX_PORT_ASSOC_VECTOR 0x0b
+#define MV88E6XXX_PORT_ASSOC_VECTOR_HOLD_AT_1 0x8000
+#define MV88E6XXX_PORT_ASSOC_VECTOR_INT_AGE_OUT 0x4000
+#define MV88E6XXX_PORT_ASSOC_VECTOR_LOCKED_PORT 0x2000
+#define MV88E6XXX_PORT_ASSOC_VECTOR_IGNORE_WRONG 0x1000
+#define MV88E6XXX_PORT_ASSOC_VECTOR_REFRESH_LOCKED 0x0800
+
+/* Offset 0x0C: Port ATU Control */
+#define MV88E6XXX_PORT_ATU_CTL 0x0c
+
+/* Offset 0x0D: Priority Override Register */
+#define MV88E6XXX_PORT_PRI_OVERRIDE 0x0d
+
+/* Offset 0x0E: Policy Control Register */
+#define MV88E6XXX_PORT_POLICY_CTL 0x0e
+
+/* Offset 0x0F: Port Special Ether Type */
+#define MV88E6XXX_PORT_ETH_TYPE 0x0f
+#define MV88E6XXX_PORT_ETH_TYPE_DEFAULT 0x9100
+
+/* Offset 0x10: InDiscards Low Counter */
+#define MV88E6XXX_PORT_IN_DISCARD_LO 0x10
+
+/* Offset 0x11: InDiscards High Counter */
+#define MV88E6XXX_PORT_IN_DISCARD_HI 0x11
+
+/* Offset 0x12: InFiltered Counter */
+#define MV88E6XXX_PORT_IN_FILTERED 0x12
+
+/* Offset 0x13: OutFiltered Counter */
+#define MV88E6XXX_PORT_OUT_FILTERED 0x13
+
+/* Offset 0x16: LED Control */
+#define MV88E6XXX_PORT_LED_CONTROL 0x16
+
+/* Offset 0x18: IEEE Priority Mapping Table */
+#define MV88E6390_PORT_IEEE_PRIO_MAP_TABLE 0x18
+#define MV88E6390_PORT_IEEE_PRIO_MAP_TABLE_UPDATE 0x8000
+#define MV88E6390_PORT_IEEE_PRIO_MAP_TABLE_MASK 0x7000
+#define MV88E6390_PORT_IEEE_PRIO_MAP_TABLE_INGRESS_PCP 0x0000
+#define MV88E6390_PORT_IEEE_PRIO_MAP_TABLE_EGRESS_GREEN_PCP 0x1000
+#define MV88E6390_PORT_IEEE_PRIO_MAP_TABLE_EGRESS_YELLOW_PCP 0x2000
+#define MV88E6390_PORT_IEEE_PRIO_MAP_TABLE_EGRESS_AVB_PCP 0x3000
+#define MV88E6390_PORT_IEEE_PRIO_MAP_TABLE_EGRESS_GREEN_DSCP 0x5000
+#define MV88E6390_PORT_IEEE_PRIO_MAP_TABLE_EGRESS_YELLOW_DSCP 0x6000
+#define MV88E6390_PORT_IEEE_PRIO_MAP_TABLE_EGRESS_AVB_DSCP 0x7000
+#define MV88E6390_PORT_IEEE_PRIO_MAP_TABLE_PTR_MASK 0x0e00
+#define MV88E6390_PORT_IEEE_PRIO_MAP_TABLE_DATA_MASK 0x01ff
+
+/* Offset 0x18: Port IEEE Priority Remapping Registers (0-3) */
+#define MV88E6095_PORT_IEEE_PRIO_REMAP_0123 0x18
+
+/* Offset 0x19: Port IEEE Priority Remapping Registers (4-7) */
+#define MV88E6095_PORT_IEEE_PRIO_REMAP_4567 0x19
int mv88e6xxx_port_read(struct mv88e6xxx_chip *chip, int port, int reg,
u16 *val);
@@ -52,7 +274,7 @@
int mv88e6095_port_tag_remap(struct mv88e6xxx_chip *chip, int port);
int mv88e6390_port_tag_remap(struct mv88e6xxx_chip *chip, int port);
int mv88e6xxx_port_set_egress_mode(struct mv88e6xxx_chip *chip, int port,
- u16 mode);
+ enum mv88e6xxx_egress_mode mode);
int mv88e6085_port_set_frame_mode(struct mv88e6xxx_chip *chip, int port,
enum mv88e6xxx_frame_mode mode);
int mv88e6351_port_set_frame_mode(struct mv88e6xxx_chip *chip, int port,
@@ -65,11 +287,14 @@
u16 etype);
int mv88e6xxx_port_set_message_port(struct mv88e6xxx_chip *chip, int port,
bool message_port);
-int mv88e6165_port_jumbo_config(struct mv88e6xxx_chip *chip, int port);
+int mv88e6165_port_set_jumbo_size(struct mv88e6xxx_chip *chip, int port,
+ size_t size);
int mv88e6095_port_egress_rate_limiting(struct mv88e6xxx_chip *chip, int port);
int mv88e6097_port_egress_rate_limiting(struct mv88e6xxx_chip *chip, int port);
-int mv88e6097_port_pause_config(struct mv88e6xxx_chip *chip, int port);
-int mv88e6390_port_pause_config(struct mv88e6xxx_chip *chip, int port);
+int mv88e6097_port_pause_limit(struct mv88e6xxx_chip *chip, int port, u8 in,
+ u8 out);
+int mv88e6390_port_pause_limit(struct mv88e6xxx_chip *chip, int port, u8 in,
+ u8 out);
int mv88e6390x_port_set_cmode(struct mv88e6xxx_chip *chip, int port,
phy_interface_t mode);
int mv88e6xxx_port_get_cmode(struct mv88e6xxx_chip *chip, int port, u8 *cmode);
diff --git a/drivers/net/dsa/mv88e6xxx/serdes.c b/drivers/net/dsa/mv88e6xxx/serdes.c
new file mode 100644
index 0000000..411b4f5
--- /dev/null
+++ b/drivers/net/dsa/mv88e6xxx/serdes.c
@@ -0,0 +1,229 @@
+/*
+ * Marvell 88E6xxx SERDES manipulation, via SMI bus
+ *
+ * Copyright (c) 2008 Marvell Semiconductor
+ *
+ * Copyright (c) 2017 Andrew Lunn <andrew@lunn.ch>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/mii.h>
+
+#include "chip.h"
+#include "global2.h"
+#include "phy.h"
+#include "port.h"
+#include "serdes.h"
+
+static int mv88e6352_serdes_read(struct mv88e6xxx_chip *chip, int reg,
+ u16 *val)
+{
+ return mv88e6xxx_phy_page_read(chip, MV88E6352_ADDR_SERDES,
+ MV88E6352_SERDES_PAGE_FIBER,
+ reg, val);
+}
+
+static int mv88e6352_serdes_write(struct mv88e6xxx_chip *chip, int reg,
+ u16 val)
+{
+ return mv88e6xxx_phy_page_write(chip, MV88E6352_ADDR_SERDES,
+ MV88E6352_SERDES_PAGE_FIBER,
+ reg, val);
+}
+
+static int mv88e6352_serdes_power_set(struct mv88e6xxx_chip *chip, bool on)
+{
+ u16 val, new_val;
+ int err;
+
+ err = mv88e6352_serdes_read(chip, MII_BMCR, &val);
+ if (err)
+ return err;
+
+ if (on)
+ new_val = val & ~BMCR_PDOWN;
+ else
+ new_val = val | BMCR_PDOWN;
+
+ if (val != new_val)
+ err = mv88e6352_serdes_write(chip, MII_BMCR, new_val);
+
+ return err;
+}
+
+int mv88e6352_serdes_power(struct mv88e6xxx_chip *chip, int port, bool on)
+{
+ int err;
+ u8 cmode;
+
+ err = mv88e6xxx_port_get_cmode(chip, port, &cmode);
+ if (err)
+ return err;
+
+ if ((cmode == MV88E6XXX_PORT_STS_CMODE_100BASE_X) ||
+ (cmode == MV88E6XXX_PORT_STS_CMODE_1000BASE_X) ||
+ (cmode == MV88E6XXX_PORT_STS_CMODE_SGMII)) {
+ err = mv88e6352_serdes_power_set(chip, on);
+ if (err < 0)
+ return err;
+ }
+
+ return 0;
+}
+
+/* Set the power on/off for 10GBASE-R and 10GBASE-X4/X2 */
+static int mv88e6390_serdes_10g(struct mv88e6xxx_chip *chip, int addr, bool on)
+{
+ u16 val, new_val;
+ int reg_c45;
+ int err;
+
+ reg_c45 = MII_ADDR_C45 | MV88E6390_SERDES_DEVICE |
+ MV88E6390_PCS_CONTROL_1;
+ err = mv88e6xxx_phy_read(chip, addr, reg_c45, &val);
+ if (err)
+ return err;
+
+ if (on)
+ new_val = val & ~(MV88E6390_PCS_CONTROL_1_RESET |
+ MV88E6390_PCS_CONTROL_1_LOOPBACK |
+ MV88E6390_PCS_CONTROL_1_PDOWN);
+ else
+ new_val = val | MV88E6390_PCS_CONTROL_1_PDOWN;
+
+ if (val != new_val)
+ err = mv88e6xxx_phy_write(chip, addr, reg_c45, new_val);
+
+ return err;
+}
+
+/* Set the power on/off for 10GBASE-R and 10GBASE-X4/X2 */
+static int mv88e6390_serdes_sgmii(struct mv88e6xxx_chip *chip, int addr,
+ bool on)
+{
+ u16 val, new_val;
+ int reg_c45;
+ int err;
+
+ reg_c45 = MII_ADDR_C45 | MV88E6390_SERDES_DEVICE |
+ MV88E6390_SGMII_CONTROL;
+ err = mv88e6xxx_phy_read(chip, addr, reg_c45, &val);
+ if (err)
+ return err;
+
+ if (on)
+ new_val = val & ~(MV88E6390_SGMII_CONTROL_RESET |
+ MV88E6390_SGMII_CONTROL_LOOPBACK |
+ MV88E6390_SGMII_CONTROL_PDOWN);
+ else
+ new_val = val | MV88E6390_SGMII_CONTROL_PDOWN;
+
+ if (val != new_val)
+ err = mv88e6xxx_phy_write(chip, addr, reg_c45, new_val);
+
+ return err;
+}
+
+static int mv88e6390_serdes_lower(struct mv88e6xxx_chip *chip, u8 cmode,
+ int port_donor, int lane, bool rxaui, bool on)
+{
+ int err;
+ u8 cmode_donor;
+
+ err = mv88e6xxx_port_get_cmode(chip, port_donor, &cmode_donor);
+ if (err)
+ return err;
+
+ switch (cmode_donor) {
+ case MV88E6XXX_PORT_STS_CMODE_RXAUI:
+ if (!rxaui)
+ break;
+ /* Fall through */
+ case MV88E6XXX_PORT_STS_CMODE_1000BASE_X:
+ case MV88E6XXX_PORT_STS_CMODE_SGMII:
+ case MV88E6XXX_PORT_STS_CMODE_2500BASEX:
+ if (cmode == MV88E6XXX_PORT_STS_CMODE_1000BASE_X ||
+ cmode == MV88E6XXX_PORT_STS_CMODE_SGMII)
+ return mv88e6390_serdes_sgmii(chip, lane, on);
+ }
+ return 0;
+}
+
+static int mv88e6390_serdes_port9(struct mv88e6xxx_chip *chip, u8 cmode,
+ bool on)
+{
+ switch (cmode) {
+ case MV88E6XXX_PORT_STS_CMODE_1000BASE_X:
+ case MV88E6XXX_PORT_STS_CMODE_SGMII:
+ return mv88e6390_serdes_sgmii(chip, MV88E6390_PORT9_LANE0, on);
+ case MV88E6XXX_PORT_STS_CMODE_XAUI:
+ case MV88E6XXX_PORT_STS_CMODE_RXAUI:
+ case MV88E6XXX_PORT_STS_CMODE_2500BASEX:
+ return mv88e6390_serdes_10g(chip, MV88E6390_PORT9_LANE0, on);
+ }
+
+ return 0;
+}
+
+static int mv88e6390_serdes_port10(struct mv88e6xxx_chip *chip, u8 cmode,
+ bool on)
+{
+ switch (cmode) {
+ case MV88E6XXX_PORT_STS_CMODE_SGMII:
+ return mv88e6390_serdes_sgmii(chip, MV88E6390_PORT10_LANE0, on);
+ case MV88E6XXX_PORT_STS_CMODE_XAUI:
+ case MV88E6XXX_PORT_STS_CMODE_RXAUI:
+ case MV88E6XXX_PORT_STS_CMODE_1000BASE_X:
+ case MV88E6XXX_PORT_STS_CMODE_2500BASEX:
+ return mv88e6390_serdes_10g(chip, MV88E6390_PORT10_LANE0, on);
+ }
+
+ return 0;
+}
+
+int mv88e6390_serdes_power(struct mv88e6xxx_chip *chip, int port, bool on)
+{
+ u8 cmode;
+ int err;
+
+ err = mv88e6xxx_port_get_cmode(chip, port, &cmode);
+ if (err)
+ return cmode;
+
+ switch (port) {
+ case 2:
+ return mv88e6390_serdes_lower(chip, cmode, 9,
+ MV88E6390_PORT9_LANE1,
+ false, on);
+ case 3:
+ return mv88e6390_serdes_lower(chip, cmode, 9,
+ MV88E6390_PORT9_LANE2,
+ true, on);
+ case 4:
+ return mv88e6390_serdes_lower(chip, cmode, 9,
+ MV88E6390_PORT9_LANE3,
+ true, on);
+ case 5:
+ return mv88e6390_serdes_lower(chip, cmode, 10,
+ MV88E6390_PORT10_LANE1,
+ false, on);
+ case 6:
+ return mv88e6390_serdes_lower(chip, cmode, 10,
+ MV88E6390_PORT10_LANE2,
+ true, on);
+ case 7:
+ return mv88e6390_serdes_lower(chip, cmode, 10,
+ MV88E6390_PORT10_LANE3,
+ true, on);
+ case 9:
+ return mv88e6390_serdes_port9(chip, cmode, on);
+ case 10:
+ return mv88e6390_serdes_port10(chip, cmode, on);
+ }
+
+ return 0;
+}
diff --git a/drivers/net/dsa/mv88e6xxx/serdes.h b/drivers/net/dsa/mv88e6xxx/serdes.h
new file mode 100644
index 0000000..5c1cd6d
--- /dev/null
+++ b/drivers/net/dsa/mv88e6xxx/serdes.h
@@ -0,0 +1,48 @@
+/*
+ * Marvell 88E6xxx SERDES manipulation, via SMI bus
+ *
+ * Copyright (c) 2008 Marvell Semiconductor
+ *
+ * Copyright (c) 2016 Andrew Lunn <andrew@lunn.ch>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef _MV88E6XXX_SERDES_H
+#define _MV88E6XXX_SERDES_H
+
+#include "chip.h"
+
+#define MV88E6352_ADDR_SERDES 0x0f
+#define MV88E6352_SERDES_PAGE_FIBER 0x01
+
+#define MV88E6390_PORT9_LANE0 0x09
+#define MV88E6390_PORT9_LANE1 0x12
+#define MV88E6390_PORT9_LANE2 0x13
+#define MV88E6390_PORT9_LANE3 0x14
+#define MV88E6390_PORT10_LANE0 0x0a
+#define MV88E6390_PORT10_LANE1 0x15
+#define MV88E6390_PORT10_LANE2 0x16
+#define MV88E6390_PORT10_LANE3 0x17
+#define MV88E6390_SERDES_DEVICE (4 << 16)
+
+/* 10GBASE-R and 10GBASE-X4/X2 */
+#define MV88E6390_PCS_CONTROL_1 0x1000
+#define MV88E6390_PCS_CONTROL_1_RESET BIT(15)
+#define MV88E6390_PCS_CONTROL_1_LOOPBACK BIT(14)
+#define MV88E6390_PCS_CONTROL_1_SPEED BIT(13)
+#define MV88E6390_PCS_CONTROL_1_PDOWN BIT(11)
+
+/* 1000BASE-X and SGMII */
+#define MV88E6390_SGMII_CONTROL 0x2000
+#define MV88E6390_SGMII_CONTROL_RESET BIT(15)
+#define MV88E6390_SGMII_CONTROL_LOOPBACK BIT(14)
+#define MV88E6390_SGMII_CONTROL_PDOWN BIT(11)
+
+int mv88e6352_serdes_power(struct mv88e6xxx_chip *chip, int port, bool on);
+int mv88e6390_serdes_power(struct mv88e6xxx_chip *chip, int port, bool on);
+
+#endif
diff --git a/drivers/net/dsa/qca8k.c b/drivers/net/dsa/qca8k.c
index a4fd4cc..b3bee7e 100644
--- a/drivers/net/dsa/qca8k.c
+++ b/drivers/net/dsa/qca8k.c
@@ -18,7 +18,6 @@
#include <linux/phy.h>
#include <linux/netdevice.h>
#include <net/dsa.h>
-#include <net/switchdev.h>
#include <linux/of_net.h>
#include <linux/of_platform.h>
#include <linux/if_bridge.h>
@@ -507,7 +506,7 @@
pr_warn("regmap initialization failed");
/* Initialize CPU port pad mode (xMII type, delays...) */
- phy_mode = of_get_phy_mode(ds->ports[ds->dst->cpu_port].dn);
+ phy_mode = of_get_phy_mode(ds->dst->cpu_dp->dn);
if (phy_mode < 0) {
pr_err("Can't find phy-mode for master device\n");
return phy_mode;
@@ -873,7 +872,7 @@
static int
qca8k_port_fdb_dump(struct dsa_switch *ds, int port,
struct switchdev_obj_port_fdb *fdb,
- int (*cb)(struct switchdev_obj *obj))
+ switchdev_obj_dump_cb_t *cb)
{
struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv;
struct qca8k_fdb _fdb = { 0 };
@@ -959,7 +958,7 @@
mutex_init(&priv->reg_mutex);
dev_set_drvdata(&mdiodev->dev, priv);
- return dsa_register_switch(priv->ds, &mdiodev->dev);
+ return dsa_register_switch(priv->ds);
}
static void
diff --git a/drivers/net/ethernet/3com/3c509.c b/drivers/net/ethernet/3com/3c509.c
index db8592d..f66c971 100644
--- a/drivers/net/ethernet/3com/3c509.c
+++ b/drivers/net/ethernet/3com/3c509.c
@@ -1039,7 +1039,7 @@
return tmp & (1<<11);
}
-static int
+static void
el3_netdev_get_ecmd(struct net_device *dev, struct ethtool_link_ksettings *cmd)
{
u16 tmp;
@@ -1082,7 +1082,6 @@
supported);
cmd->base.speed = SPEED_10;
EL3WINDOW(1);
- return 0;
}
static int
@@ -1151,12 +1150,11 @@
struct ethtool_link_ksettings *cmd)
{
struct el3_private *lp = netdev_priv(dev);
- int ret;
spin_lock_irq(&lp->lock);
- ret = el3_netdev_get_ecmd(dev, cmd);
+ el3_netdev_get_ecmd(dev, cmd);
spin_unlock_irq(&lp->lock);
- return ret;
+ return 0;
}
static int el3_set_link_ksettings(struct net_device *dev,
diff --git a/drivers/net/ethernet/3com/3c515.c b/drivers/net/ethernet/3com/3c515.c
index e7b1fa5..c5987f5 100644
--- a/drivers/net/ethernet/3com/3c515.c
+++ b/drivers/net/ethernet/3com/3c515.c
@@ -1370,9 +1370,9 @@
(skb = netdev_alloc_skb(dev, pkt_len + 4)) != NULL) {
skb_reserve(skb, 2); /* Align IP on 16 byte boundaries */
/* 'skb_put()' points to the start of sk_buff data area. */
- memcpy(skb_put(skb, pkt_len),
- isa_bus_to_virt(vp->rx_ring[entry].
- addr), pkt_len);
+ skb_put_data(skb,
+ isa_bus_to_virt(vp->rx_ring[entry].addr),
+ pkt_len);
rx_copy++;
} else {
void *temp;
diff --git a/drivers/net/ethernet/3com/3c59x.c b/drivers/net/ethernet/3com/3c59x.c
index e41245a..3b516eb 100644
--- a/drivers/net/ethernet/3com/3c59x.c
+++ b/drivers/net/ethernet/3com/3c59x.c
@@ -2628,9 +2628,8 @@
skb_reserve(skb, 2); /* Align IP on 16 byte boundaries */
pci_dma_sync_single_for_cpu(VORTEX_PCI(vp), dma, PKT_BUF_SZ, PCI_DMA_FROMDEVICE);
/* 'skb_put()' points to the start of sk_buff data area. */
- memcpy(skb_put(skb, pkt_len),
- vp->rx_skbuff[entry]->data,
- pkt_len);
+ skb_put_data(skb, vp->rx_skbuff[entry]->data,
+ pkt_len);
pci_dma_sync_single_for_device(VORTEX_PCI(vp), dma, PKT_BUF_SZ, PCI_DMA_FROMDEVICE);
vp->rx_copy++;
} else {
@@ -2912,7 +2911,9 @@
{
struct vortex_private *vp = netdev_priv(dev);
- return mii_ethtool_get_link_ksettings(&vp->mii, cmd);
+ mii_ethtool_get_link_ksettings(&vp->mii, cmd);
+
+ return 0;
}
static int vortex_set_link_ksettings(struct net_device *dev,
diff --git a/drivers/net/ethernet/8390/ax88796.c b/drivers/net/ethernet/8390/ax88796.c
index db02bc2..05d9d3e 100644
--- a/drivers/net/ethernet/8390/ax88796.c
+++ b/drivers/net/ethernet/8390/ax88796.c
@@ -723,6 +723,12 @@
ax->plat->mac_addr)
memcpy(dev->dev_addr, ax->plat->mac_addr, ETH_ALEN);
+ if (!is_valid_ether_addr(dev->dev_addr)) {
+ eth_hw_addr_random(dev);
+ dev_info(&dev->dev, "Using random MAC address: %pM\n",
+ dev->dev_addr);
+ }
+
ax_reset_8390(dev);
ei_local->name = "AX88796";
diff --git a/drivers/net/ethernet/aeroflex/greth.c b/drivers/net/ethernet/aeroflex/greth.c
index d8e133c..4309be3 100644
--- a/drivers/net/ethernet/aeroflex/greth.c
+++ b/drivers/net/ethernet/aeroflex/greth.c
@@ -807,7 +807,8 @@
if (netif_msg_pktdata(greth))
greth_print_rx_packet(phys_to_virt(dma_addr), pkt_len);
- memcpy(skb_put(skb, pkt_len), phys_to_virt(dma_addr), pkt_len);
+ skb_put_data(skb, phys_to_virt(dma_addr),
+ pkt_len);
skb->protocol = eth_type_trans(skb, dev);
dev->stats.rx_bytes += pkt_len;
diff --git a/drivers/net/ethernet/agere/et131x.c b/drivers/net/ethernet/agere/et131x.c
index 87a11b9..54eff90 100644
--- a/drivers/net/ethernet/agere/et131x.c
+++ b/drivers/net/ethernet/agere/et131x.c
@@ -2282,7 +2282,7 @@
adapter->netdev->stats.rx_bytes += rfd->len;
- memcpy(skb_put(skb, rfd->len), fbr->virt[buff_index], rfd->len);
+ skb_put_data(skb, fbr->virt[buff_index], rfd->len);
skb->protocol = eth_type_trans(skb, adapter->netdev);
skb->ip_summed = CHECKSUM_NONE;
diff --git a/drivers/net/ethernet/allwinner/sun4i-emac.c b/drivers/net/ethernet/allwinner/sun4i-emac.c
index c8f4d26..3143de4 100644
--- a/drivers/net/ethernet/allwinner/sun4i-emac.c
+++ b/drivers/net/ethernet/allwinner/sun4i-emac.c
@@ -633,7 +633,7 @@
if (!skb)
continue;
skb_reserve(skb, 2);
- rdptr = (u8 *) skb_put(skb, rxlen - 4);
+ rdptr = skb_put(skb, rxlen - 4);
/* Read received packet from RX SRAM */
if (netif_msg_rx_status(db))
diff --git a/drivers/net/ethernet/amd/pcnet32.c b/drivers/net/ethernet/amd/pcnet32.c
index 86369d7..7f60d17 100644
--- a/drivers/net/ethernet/amd/pcnet32.c
+++ b/drivers/net/ethernet/amd/pcnet32.c
@@ -731,12 +731,10 @@
{
struct pcnet32_private *lp = netdev_priv(dev);
unsigned long flags;
- int r = -EOPNOTSUPP;
spin_lock_irqsave(&lp->lock, flags);
if (lp->mii) {
mii_ethtool_get_link_ksettings(&lp->mii_if, cmd);
- r = 0;
} else if (lp->chip_version == PCNET32_79C970A) {
if (lp->autoneg) {
cmd->base.autoneg = AUTONEG_ENABLE;
@@ -753,10 +751,9 @@
ethtool_convert_legacy_u32_to_link_mode(
cmd->link_modes.supported,
SUPPORTED_TP | SUPPORTED_AUI);
- r = 0;
}
spin_unlock_irqrestore(&lp->lock, flags);
- return r;
+ return 0;
}
static int pcnet32_set_link_ksettings(struct net_device *dev,
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
index c772420..a934bd5 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
@@ -1268,6 +1268,7 @@
case HWTSTAMP_FILTER_NONE:
break;
+ case HWTSTAMP_FILTER_NTP_ALL:
case HWTSTAMP_FILTER_ALL:
XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSENALL, 1);
XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSENA, 1);
@@ -1390,8 +1391,7 @@
spin_unlock_irqrestore(&pdata->tstamp_lock, flags);
}
- if (!XGMAC_GET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES, PTP))
- skb_tx_timestamp(skb);
+ skb_tx_timestamp(skb);
}
static void xgbe_prep_vlan(struct sk_buff *skb, struct xgbe_packet_data *packet)
@@ -1846,7 +1846,8 @@
}
#endif /* End CONFIG_NET_POLL_CONTROLLER */
-static int xgbe_setup_tc(struct net_device *netdev, u32 handle, __be16 proto,
+static int xgbe_setup_tc(struct net_device *netdev, u32 handle, u32 chain_index,
+ __be16 proto,
struct tc_to_netdev *tc_to_netdev)
{
struct xgbe_prv_data *pdata = netdev_priv(netdev);
diff --git a/drivers/net/ethernet/apm/xgene-v2/ethtool.c b/drivers/net/ethernet/apm/xgene-v2/ethtool.c
index b6666e4..d31ad82 100644
--- a/drivers/net/ethernet/apm/xgene-v2/ethtool.c
+++ b/drivers/net/ethernet/apm/xgene-v2/ethtool.c
@@ -157,7 +157,9 @@
if (!phydev)
return -ENODEV;
- return phy_ethtool_ksettings_get(phydev, cmd);
+ phy_ethtool_ksettings_get(phydev, cmd);
+
+ return 0;
}
static int xge_set_link_ksettings(struct net_device *ndev,
diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_ethtool.c b/drivers/net/ethernet/apm/xgene/xgene_enet_ethtool.c
index 28fdedc..4f50f11 100644
--- a/drivers/net/ethernet/apm/xgene/xgene_enet_ethtool.c
+++ b/drivers/net/ethernet/apm/xgene/xgene_enet_ethtool.c
@@ -23,9 +23,17 @@
struct xgene_gstrings_stats {
char name[ETH_GSTRING_LEN];
int offset;
+ u32 addr;
+ u32 mask;
};
-#define XGENE_STAT(m) { #m, offsetof(struct xgene_enet_pdata, stats.m) }
+#define XGENE_STAT(m) { #m, offsetof(struct rtnl_link_stats64, m) }
+#define XGENE_EXTD_STAT(s, a, m) \
+ { \
+ .name = #s, \
+ .addr = a ## _ADDR, \
+ .mask = m \
+ }
static const struct xgene_gstrings_stats gstrings_stats[] = {
XGENE_STAT(rx_packets),
@@ -40,7 +48,65 @@
XGENE_STAT(rx_fifo_errors)
};
+static const struct xgene_gstrings_stats gstrings_extd_stats[] = {
+ XGENE_EXTD_STAT(tx_rx_64b_frame_cntr, TR64, 31),
+ XGENE_EXTD_STAT(tx_rx_127b_frame_cntr, TR127, 31),
+ XGENE_EXTD_STAT(tx_rx_255b_frame_cntr, TR255, 31),
+ XGENE_EXTD_STAT(tx_rx_511b_frame_cntr, TR511, 31),
+ XGENE_EXTD_STAT(tx_rx_1023b_frame_cntr, TR1K, 31),
+ XGENE_EXTD_STAT(tx_rx_1518b_frame_cntr, TRMAX, 31),
+ XGENE_EXTD_STAT(tx_rx_1522b_frame_cntr, TRMGV, 31),
+ XGENE_EXTD_STAT(rx_fcs_error_cntr, RFCS, 16),
+ XGENE_EXTD_STAT(rx_multicast_pkt_cntr, RMCA, 31),
+ XGENE_EXTD_STAT(rx_broadcast_pkt_cntr, RBCA, 31),
+ XGENE_EXTD_STAT(rx_ctrl_frame_pkt_cntr, RXCF, 16),
+ XGENE_EXTD_STAT(rx_pause_frame_pkt_cntr, RXPF, 16),
+ XGENE_EXTD_STAT(rx_unk_opcode_cntr, RXUO, 16),
+ XGENE_EXTD_STAT(rx_align_err_cntr, RALN, 16),
+ XGENE_EXTD_STAT(rx_frame_len_err_cntr, RFLR, 16),
+ XGENE_EXTD_STAT(rx_frame_len_err_recov_cntr, DUMP, 0),
+ XGENE_EXTD_STAT(rx_code_err_cntr, RCDE, 16),
+ XGENE_EXTD_STAT(rx_carrier_sense_err_cntr, RCSE, 16),
+ XGENE_EXTD_STAT(rx_undersize_pkt_cntr, RUND, 16),
+ XGENE_EXTD_STAT(rx_oversize_pkt_cntr, ROVR, 16),
+ XGENE_EXTD_STAT(rx_fragments_cntr, RFRG, 16),
+ XGENE_EXTD_STAT(rx_jabber_cntr, RJBR, 16),
+ XGENE_EXTD_STAT(rx_jabber_recov_cntr, DUMP, 0),
+ XGENE_EXTD_STAT(rx_dropped_pkt_cntr, RDRP, 16),
+ XGENE_EXTD_STAT(rx_overrun_cntr, DUMP, 0),
+ XGENE_EXTD_STAT(tx_multicast_pkt_cntr, TMCA, 31),
+ XGENE_EXTD_STAT(tx_broadcast_pkt_cntr, TBCA, 31),
+ XGENE_EXTD_STAT(tx_pause_ctrl_frame_cntr, TXPF, 16),
+ XGENE_EXTD_STAT(tx_defer_pkt_cntr, TDFR, 31),
+ XGENE_EXTD_STAT(tx_excv_defer_pkt_cntr, TEDF, 31),
+ XGENE_EXTD_STAT(tx_single_col_pkt_cntr, TSCL, 31),
+ XGENE_EXTD_STAT(tx_multi_col_pkt_cntr, TMCL, 31),
+ XGENE_EXTD_STAT(tx_late_col_pkt_cntr, TLCL, 31),
+ XGENE_EXTD_STAT(tx_excv_col_pkt_cntr, TXCL, 31),
+ XGENE_EXTD_STAT(tx_total_col_cntr, TNCL, 31),
+ XGENE_EXTD_STAT(tx_pause_frames_hnrd_cntr, TPFH, 16),
+ XGENE_EXTD_STAT(tx_drop_frame_cntr, TDRP, 16),
+ XGENE_EXTD_STAT(tx_jabber_frame_cntr, TJBR, 12),
+ XGENE_EXTD_STAT(tx_fcs_error_cntr, TFCS, 12),
+ XGENE_EXTD_STAT(tx_ctrl_frame_cntr, TXCF, 12),
+ XGENE_EXTD_STAT(tx_oversize_frame_cntr, TOVR, 12),
+ XGENE_EXTD_STAT(tx_undersize_frame_cntr, TUND, 12),
+ XGENE_EXTD_STAT(tx_fragments_cntr, TFRG, 12),
+ XGENE_EXTD_STAT(tx_underrun_cntr, DUMP, 0)
+};
+
#define XGENE_STATS_LEN ARRAY_SIZE(gstrings_stats)
+#define XGENE_EXTD_STATS_LEN ARRAY_SIZE(gstrings_extd_stats)
+#define RFCS_IDX 7
+#define RALN_IDX 13
+#define RFLR_IDX 14
+#define FALSE_RFLR_IDX 15
+#define RUND_IDX 18
+#define FALSE_RJBR_IDX 22
+#define RX_OVERRUN_IDX 24
+#define TFCS_IDX 38
+#define TFRG_IDX 42
+#define TX_UNDERRUN_IDX 43
static void xgene_get_drvinfo(struct net_device *ndev,
struct ethtool_drvinfo *info)
@@ -61,17 +127,21 @@
struct phy_device *phydev = ndev->phydev;
u32 supported;
- if (pdata->phy_mode == PHY_INTERFACE_MODE_RGMII) {
+ if (phy_interface_mode_is_rgmii(pdata->phy_mode)) {
if (phydev == NULL)
return -ENODEV;
- return phy_ethtool_ksettings_get(phydev, cmd);
+ phy_ethtool_ksettings_get(phydev, cmd);
+
+ return 0;
} else if (pdata->phy_mode == PHY_INTERFACE_MODE_SGMII) {
if (pdata->mdio_driver) {
if (!phydev)
return -ENODEV;
- return phy_ethtool_ksettings_get(phydev, cmd);
+ phy_ethtool_ksettings_get(phydev, cmd);
+
+ return 0;
}
supported = SUPPORTED_1000baseT_Full | SUPPORTED_Autoneg |
@@ -111,7 +181,7 @@
struct xgene_enet_pdata *pdata = netdev_priv(ndev);
struct phy_device *phydev = ndev->phydev;
- if (pdata->phy_mode == PHY_INTERFACE_MODE_RGMII) {
+ if (phy_interface_mode_is_rgmii(pdata->phy_mode)) {
if (!phydev)
return -ENODEV;
@@ -142,6 +212,11 @@
memcpy(p, gstrings_stats[i].name, ETH_GSTRING_LEN);
p += ETH_GSTRING_LEN;
}
+
+ for (i = 0; i < XGENE_EXTD_STATS_LEN; i++) {
+ memcpy(p, gstrings_extd_stats[i].name, ETH_GSTRING_LEN);
+ p += ETH_GSTRING_LEN;
+ }
}
static int xgene_get_sset_count(struct net_device *ndev, int sset)
@@ -149,18 +224,71 @@
if (sset != ETH_SS_STATS)
return -EINVAL;
- return XGENE_STATS_LEN;
+ return XGENE_STATS_LEN + XGENE_EXTD_STATS_LEN;
+}
+
+static void xgene_get_extd_stats(struct xgene_enet_pdata *pdata)
+{
+ u32 rx_drop, tx_drop;
+ u32 mask, tmp;
+ int i;
+
+ for (i = 0; i < XGENE_EXTD_STATS_LEN; i++) {
+ tmp = xgene_enet_rd_stat(pdata, gstrings_extd_stats[i].addr);
+ if (gstrings_extd_stats[i].mask) {
+ mask = GENMASK(gstrings_extd_stats[i].mask - 1, 0);
+ pdata->extd_stats[i] += (tmp & mask);
+ }
+ }
+
+ if (pdata->phy_mode == PHY_INTERFACE_MODE_XGMII) {
+ /* Errata 10GE_10 - SW should intepret RALN as 0 */
+ pdata->extd_stats[RALN_IDX] = 0;
+ } else {
+ /* Errata ENET_15 - Fixes RFCS, RFLR, TFCS counter */
+ pdata->extd_stats[RFCS_IDX] -= pdata->extd_stats[RALN_IDX];
+ pdata->extd_stats[RFLR_IDX] -= pdata->extd_stats[RUND_IDX];
+ pdata->extd_stats[TFCS_IDX] -= pdata->extd_stats[TFRG_IDX];
+ }
+
+ pdata->mac_ops->get_drop_cnt(pdata, &rx_drop, &tx_drop);
+ pdata->extd_stats[RX_OVERRUN_IDX] += rx_drop;
+ pdata->extd_stats[TX_UNDERRUN_IDX] += tx_drop;
+
+ /* Errata 10GE_8 - Update Frame recovered from Errata 10GE_8/ENET_11 */
+ pdata->extd_stats[FALSE_RFLR_IDX] = pdata->false_rflr;
+ /* Errata ENET_15 - Jabber Frame recov'ed from Errata 10GE_10/ENET_15 */
+ pdata->extd_stats[FALSE_RJBR_IDX] = pdata->vlan_rjbr;
+}
+
+int xgene_extd_stats_init(struct xgene_enet_pdata *pdata)
+{
+ pdata->extd_stats = devm_kmalloc_array(&pdata->pdev->dev,
+ XGENE_EXTD_STATS_LEN, sizeof(u64), GFP_KERNEL);
+ if (!pdata->extd_stats)
+ return -ENOMEM;
+
+ xgene_get_extd_stats(pdata);
+ memset(pdata->extd_stats, 0, XGENE_EXTD_STATS_LEN * sizeof(u64));
+
+ return 0;
}
static void xgene_get_ethtool_stats(struct net_device *ndev,
struct ethtool_stats *dummy,
u64 *data)
{
- void *pdata = netdev_priv(ndev);
+ struct xgene_enet_pdata *pdata = netdev_priv(ndev);
+ struct rtnl_link_stats64 stats;
int i;
+ dev_get_stats(ndev, &stats);
for (i = 0; i < XGENE_STATS_LEN; i++)
- *data++ = *(u64 *)(pdata + gstrings_stats[i].offset);
+ data[i] = *(u64 *)((char *)&stats + gstrings_stats[i].offset);
+
+ xgene_get_extd_stats(pdata);
+ for (i = 0; i < XGENE_EXTD_STATS_LEN; i++)
+ data[i + XGENE_STATS_LEN] = pdata->extd_stats[i];
}
static void xgene_get_pauseparam(struct net_device *ndev,
@@ -180,7 +308,7 @@
struct phy_device *phydev = ndev->phydev;
u32 oldadv, newadv;
- if (pdata->phy_mode == PHY_INTERFACE_MODE_RGMII ||
+ if (phy_interface_mode_is_rgmii(pdata->phy_mode) ||
pdata->phy_mode == PHY_INTERFACE_MODE_SGMII) {
if (!phydev)
return -EINVAL;
diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c b/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c
index 2a835e0..e45b587 100644
--- a/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c
+++ b/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c
@@ -205,30 +205,24 @@
}
void xgene_enet_parse_error(struct xgene_enet_desc_ring *ring,
- struct xgene_enet_pdata *pdata,
enum xgene_enet_err_code status)
{
switch (status) {
case INGRESS_CRC:
ring->rx_crc_errors++;
- ring->rx_dropped++;
break;
case INGRESS_CHECKSUM:
case INGRESS_CHECKSUM_COMPUTE:
ring->rx_errors++;
- ring->rx_dropped++;
break;
case INGRESS_TRUNC_FRAME:
ring->rx_frame_errors++;
- ring->rx_dropped++;
break;
case INGRESS_PKT_LEN:
ring->rx_length_errors++;
- ring->rx_dropped++;
break;
case INGRESS_PKT_UNDER:
ring->rx_frame_errors++;
- ring->rx_dropped++;
break;
case INGRESS_FIFO_OVERRUN:
ring->rx_fifo_errors++;
@@ -270,42 +264,39 @@
iowrite32(val, addr);
}
-static bool xgene_enet_wr_indirect(void __iomem *addr, void __iomem *wr,
- void __iomem *cmd, void __iomem *cmd_done,
- u32 wr_addr, u32 wr_data)
-{
- u32 done;
- u8 wait = 10;
-
- iowrite32(wr_addr, addr);
- iowrite32(wr_data, wr);
- iowrite32(XGENE_ENET_WR_CMD, cmd);
-
- /* wait for write command to complete */
- while (!(done = ioread32(cmd_done)) && wait--)
- udelay(1);
-
- if (!done)
- return false;
-
- iowrite32(0, cmd);
-
- return true;
-}
-
-static void xgene_enet_wr_mcx_mac(struct xgene_enet_pdata *pdata,
- u32 wr_addr, u32 wr_data)
+void xgene_enet_wr_mac(struct xgene_enet_pdata *pdata, u32 wr_addr, u32 wr_data)
{
void __iomem *addr, *wr, *cmd, *cmd_done;
+ struct net_device *ndev = pdata->ndev;
+ u8 wait = 10;
+ u32 done;
+
+ if (pdata->mdio_driver && ndev->phydev &&
+ phy_interface_mode_is_rgmii(pdata->phy_mode)) {
+ struct mii_bus *bus = ndev->phydev->mdio.bus;
+
+ return xgene_mdio_wr_mac(bus->priv, wr_addr, wr_data);
+ }
addr = pdata->mcx_mac_addr + MAC_ADDR_REG_OFFSET;
wr = pdata->mcx_mac_addr + MAC_WRITE_REG_OFFSET;
cmd = pdata->mcx_mac_addr + MAC_COMMAND_REG_OFFSET;
cmd_done = pdata->mcx_mac_addr + MAC_COMMAND_DONE_REG_OFFSET;
- if (!xgene_enet_wr_indirect(addr, wr, cmd, cmd_done, wr_addr, wr_data))
- netdev_err(pdata->ndev, "MCX mac write failed, addr: %04x\n",
- wr_addr);
+ spin_lock(&pdata->mac_lock);
+ iowrite32(wr_addr, addr);
+ iowrite32(wr_data, wr);
+ iowrite32(XGENE_ENET_WR_CMD, cmd);
+
+ while (!(done = ioread32(cmd_done)) && wait--)
+ udelay(1);
+
+ if (!done)
+ netdev_err(ndev, "mac write failed, addr: %04x data: %08x\n",
+ wr_addr, wr_data);
+
+ iowrite32(0, cmd);
+ spin_unlock(&pdata->mac_lock);
}
static void xgene_enet_rd_csr(struct xgene_enet_pdata *pdata,
@@ -332,42 +323,69 @@
*val = ioread32(addr);
}
-static bool xgene_enet_rd_indirect(void __iomem *addr, void __iomem *rd,
- void __iomem *cmd, void __iomem *cmd_done,
- u32 rd_addr, u32 *rd_data)
-{
- u32 done;
- u8 wait = 10;
-
- iowrite32(rd_addr, addr);
- iowrite32(XGENE_ENET_RD_CMD, cmd);
-
- /* wait for read command to complete */
- while (!(done = ioread32(cmd_done)) && wait--)
- udelay(1);
-
- if (!done)
- return false;
-
- *rd_data = ioread32(rd);
- iowrite32(0, cmd);
-
- return true;
-}
-
-static void xgene_enet_rd_mcx_mac(struct xgene_enet_pdata *pdata,
- u32 rd_addr, u32 *rd_data)
+u32 xgene_enet_rd_mac(struct xgene_enet_pdata *pdata, u32 rd_addr)
{
void __iomem *addr, *rd, *cmd, *cmd_done;
+ struct net_device *ndev = pdata->ndev;
+ u32 done, rd_data;
+ u8 wait = 10;
+
+ if (pdata->mdio_driver && ndev->phydev &&
+ phy_interface_mode_is_rgmii(pdata->phy_mode)) {
+ struct mii_bus *bus = ndev->phydev->mdio.bus;
+
+ return xgene_mdio_rd_mac(bus->priv, rd_addr);
+ }
addr = pdata->mcx_mac_addr + MAC_ADDR_REG_OFFSET;
rd = pdata->mcx_mac_addr + MAC_READ_REG_OFFSET;
cmd = pdata->mcx_mac_addr + MAC_COMMAND_REG_OFFSET;
cmd_done = pdata->mcx_mac_addr + MAC_COMMAND_DONE_REG_OFFSET;
- if (!xgene_enet_rd_indirect(addr, rd, cmd, cmd_done, rd_addr, rd_data))
- netdev_err(pdata->ndev, "MCX mac read failed, addr: %04x\n",
+ spin_lock(&pdata->mac_lock);
+ iowrite32(rd_addr, addr);
+ iowrite32(XGENE_ENET_RD_CMD, cmd);
+
+ while (!(done = ioread32(cmd_done)) && wait--)
+ udelay(1);
+
+ if (!done)
+ netdev_err(ndev, "mac read failed, addr: %04x\n", rd_addr);
+
+ rd_data = ioread32(rd);
+ iowrite32(0, cmd);
+ spin_unlock(&pdata->mac_lock);
+
+ return rd_data;
+}
+
+u32 xgene_enet_rd_stat(struct xgene_enet_pdata *pdata, u32 rd_addr)
+{
+ void __iomem *addr, *rd, *cmd, *cmd_done;
+ u32 done, rd_data;
+ u8 wait = 10;
+
+ addr = pdata->mcx_stats_addr + STAT_ADDR_REG_OFFSET;
+ rd = pdata->mcx_stats_addr + STAT_READ_REG_OFFSET;
+ cmd = pdata->mcx_stats_addr + STAT_COMMAND_REG_OFFSET;
+ cmd_done = pdata->mcx_stats_addr + STAT_COMMAND_DONE_REG_OFFSET;
+
+ spin_lock(&pdata->stats_lock);
+ iowrite32(rd_addr, addr);
+ iowrite32(XGENE_ENET_RD_CMD, cmd);
+
+ while (!(done = ioread32(cmd_done)) && wait--)
+ udelay(1);
+
+ if (!done)
+ netdev_err(pdata->ndev, "mac stats read failed, addr: %04x\n",
rd_addr);
+
+ rd_data = ioread32(rd);
+ iowrite32(0, cmd);
+ spin_unlock(&pdata->stats_lock);
+
+ return rd_data;
}
static void xgene_gmac_set_mac_addr(struct xgene_enet_pdata *pdata)
@@ -379,8 +397,8 @@
(dev_addr[1] << 8) | dev_addr[0];
addr1 = (dev_addr[5] << 24) | (dev_addr[4] << 16);
- xgene_enet_wr_mcx_mac(pdata, STATION_ADDR0_ADDR, addr0);
- xgene_enet_wr_mcx_mac(pdata, STATION_ADDR1_ADDR, addr1);
+ xgene_enet_wr_mac(pdata, STATION_ADDR0_ADDR, addr0);
+ xgene_enet_wr_mac(pdata, STATION_ADDR1_ADDR, addr1);
}
static int xgene_enet_ecc_init(struct xgene_enet_pdata *pdata)
@@ -405,8 +423,8 @@
static void xgene_gmac_reset(struct xgene_enet_pdata *pdata)
{
- xgene_enet_wr_mcx_mac(pdata, MAC_CONFIG_1_ADDR, SOFT_RESET1);
- xgene_enet_wr_mcx_mac(pdata, MAC_CONFIG_1_ADDR, 0);
+ xgene_enet_wr_mac(pdata, MAC_CONFIG_1_ADDR, SOFT_RESET1);
+ xgene_enet_wr_mac(pdata, MAC_CONFIG_1_ADDR, 0);
}
static void xgene_enet_configure_clock(struct xgene_enet_pdata *pdata)
@@ -456,8 +474,8 @@
xgene_enet_rd_mcx_csr(pdata, ICM_CONFIG0_REG_0_ADDR, &icm0);
xgene_enet_rd_mcx_csr(pdata, ICM_CONFIG2_REG_0_ADDR, &icm2);
- xgene_enet_rd_mcx_mac(pdata, MAC_CONFIG_2_ADDR, &mc2);
- xgene_enet_rd_mcx_mac(pdata, INTERFACE_CONTROL_ADDR, &intf_ctl);
+ mc2 = xgene_enet_rd_mac(pdata, MAC_CONFIG_2_ADDR);
+ intf_ctl = xgene_enet_rd_mac(pdata, INTERFACE_CONTROL_ADDR);
xgene_enet_rd_csr(pdata, RGMII_REG_0_ADDR, &rgmii);
switch (pdata->phy_speed) {
@@ -495,8 +513,8 @@
}
mc2 |= FULL_DUPLEX2 | PAD_CRC | LENGTH_CHK;
- xgene_enet_wr_mcx_mac(pdata, MAC_CONFIG_2_ADDR, mc2);
- xgene_enet_wr_mcx_mac(pdata, INTERFACE_CONTROL_ADDR, intf_ctl);
+ xgene_enet_wr_mac(pdata, MAC_CONFIG_2_ADDR, mc2);
+ xgene_enet_wr_mac(pdata, INTERFACE_CONTROL_ADDR, intf_ctl);
xgene_enet_wr_csr(pdata, RGMII_REG_0_ADDR, rgmii);
xgene_enet_configure_clock(pdata);
@@ -506,7 +524,7 @@
static void xgene_enet_set_frame_size(struct xgene_enet_pdata *pdata, int size)
{
- xgene_enet_wr_mcx_mac(pdata, MAX_FRAME_LEN_ADDR, size);
+ xgene_enet_wr_mac(pdata, MAX_FRAME_LEN_ADDR, size);
}
static void xgene_gmac_enable_tx_pause(struct xgene_enet_pdata *pdata,
@@ -528,14 +546,14 @@
{
u32 data;
- xgene_enet_rd_mcx_mac(pdata, MAC_CONFIG_1_ADDR, &data);
+ data = xgene_enet_rd_mac(pdata, MAC_CONFIG_1_ADDR);
if (enable)
data |= TX_FLOW_EN;
else
data &= ~TX_FLOW_EN;
- xgene_enet_wr_mcx_mac(pdata, MAC_CONFIG_1_ADDR, data);
+ xgene_enet_wr_mac(pdata, MAC_CONFIG_1_ADDR, data);
pdata->mac_ops->enable_tx_pause(pdata, enable);
}
@@ -544,14 +562,14 @@
{
u32 data;
- xgene_enet_rd_mcx_mac(pdata, MAC_CONFIG_1_ADDR, &data);
+ data = xgene_enet_rd_mac(pdata, MAC_CONFIG_1_ADDR);
if (enable)
data |= RX_FLOW_EN;
else
data &= ~RX_FLOW_EN;
- xgene_enet_wr_mcx_mac(pdata, MAC_CONFIG_1_ADDR, data);
+ xgene_enet_wr_mac(pdata, MAC_CONFIG_1_ADDR, data);
}
static void xgene_gmac_init(struct xgene_enet_pdata *pdata)
@@ -565,9 +583,9 @@
xgene_gmac_set_mac_addr(pdata);
/* Adjust MDC clock frequency */
- xgene_enet_rd_mcx_mac(pdata, MII_MGMT_CONFIG_ADDR, &value);
+ value = xgene_enet_rd_mac(pdata, MII_MGMT_CONFIG_ADDR);
MGMT_CLOCK_SEL_SET(&value, 7);
- xgene_enet_wr_mcx_mac(pdata, MII_MGMT_CONFIG_ADDR, value);
+ xgene_enet_wr_mac(pdata, MII_MGMT_CONFIG_ADDR, value);
/* Enable drop if bufpool not available */
xgene_enet_rd_csr(pdata, RSIF_CONFIG_REG_ADDR, &value);
@@ -600,6 +618,18 @@
xgene_enet_wr_csr(pdata, CFG_BYPASS_ADDR, RESUME_TX);
}
+static void xgene_gmac_get_drop_cnt(struct xgene_enet_pdata *pdata,
+ u32 *rx, u32 *tx)
+{
+ u32 count;
+
+ xgene_enet_rd_mcx_csr(pdata, ICM_ECM_DROP_COUNT_REG0_ADDR, &count);
+ *rx = ICM_DROP_COUNT(count);
+ *tx = ECM_DROP_COUNT(count);
+ /* Errata: 10GE_4 - Fix ICM_ECM_DROP_COUNT not clear-on-read */
+ xgene_enet_rd_mcx_csr(pdata, ECM_CONFIG0_REG_0_ADDR, &count);
+}
+
static void xgene_enet_config_ring_if_assoc(struct xgene_enet_pdata *pdata)
{
u32 val = 0xffffffff;
@@ -637,32 +667,32 @@
{
u32 data;
- xgene_enet_rd_mcx_mac(pdata, MAC_CONFIG_1_ADDR, &data);
- xgene_enet_wr_mcx_mac(pdata, MAC_CONFIG_1_ADDR, data | RX_EN);
+ data = xgene_enet_rd_mac(pdata, MAC_CONFIG_1_ADDR);
+ xgene_enet_wr_mac(pdata, MAC_CONFIG_1_ADDR, data | RX_EN);
}
static void xgene_gmac_tx_enable(struct xgene_enet_pdata *pdata)
{
u32 data;
- xgene_enet_rd_mcx_mac(pdata, MAC_CONFIG_1_ADDR, &data);
- xgene_enet_wr_mcx_mac(pdata, MAC_CONFIG_1_ADDR, data | TX_EN);
+ data = xgene_enet_rd_mac(pdata, MAC_CONFIG_1_ADDR);
+ xgene_enet_wr_mac(pdata, MAC_CONFIG_1_ADDR, data | TX_EN);
}
static void xgene_gmac_rx_disable(struct xgene_enet_pdata *pdata)
{
u32 data;
- xgene_enet_rd_mcx_mac(pdata, MAC_CONFIG_1_ADDR, &data);
- xgene_enet_wr_mcx_mac(pdata, MAC_CONFIG_1_ADDR, data & ~RX_EN);
+ data = xgene_enet_rd_mac(pdata, MAC_CONFIG_1_ADDR);
+ xgene_enet_wr_mac(pdata, MAC_CONFIG_1_ADDR, data & ~RX_EN);
}
static void xgene_gmac_tx_disable(struct xgene_enet_pdata *pdata)
{
u32 data;
- xgene_enet_rd_mcx_mac(pdata, MAC_CONFIG_1_ADDR, &data);
- xgene_enet_wr_mcx_mac(pdata, MAC_CONFIG_1_ADDR, data & ~TX_EN);
+ data = xgene_enet_rd_mac(pdata, MAC_CONFIG_1_ADDR);
+ xgene_enet_wr_mac(pdata, MAC_CONFIG_1_ADDR, data & ~TX_EN);
}
bool xgene_ring_mgr_init(struct xgene_enet_pdata *p)
@@ -733,27 +763,6 @@
static void xgene_gport_shutdown(struct xgene_enet_pdata *pdata)
{
struct device *dev = &pdata->pdev->dev;
- struct xgene_enet_desc_ring *ring;
- u32 pb;
- int i;
-
- pb = 0;
- for (i = 0; i < pdata->rxq_cnt; i++) {
- ring = pdata->rx_ring[i]->buf_pool;
- pb |= BIT(xgene_enet_get_fpsel(ring->id));
- ring = pdata->rx_ring[i]->page_pool;
- if (ring)
- pb |= BIT(xgene_enet_get_fpsel(ring->id));
-
- }
- xgene_enet_wr_ring_if(pdata, ENET_CFGSSQMIFPRESET_ADDR, pb);
-
- pb = 0;
- for (i = 0; i < pdata->txq_cnt; i++) {
- ring = pdata->tx_ring[i];
- pb |= BIT(xgene_enet_ring_bufnum(ring->id));
- }
- xgene_enet_wr_ring_if(pdata, ENET_CFGSSQMIWQRESET_ADDR, pb);
if (dev->of_node) {
if (!IS_ERR(pdata->clk))
@@ -1009,6 +1018,7 @@
.tx_enable = xgene_gmac_tx_enable,
.rx_disable = xgene_gmac_rx_disable,
.tx_disable = xgene_gmac_tx_disable,
+ .get_drop_cnt = xgene_gmac_get_drop_cnt,
.set_speed = xgene_gmac_set_speed,
.set_mac_addr = xgene_gmac_set_mac_addr,
.set_framesize = xgene_enet_set_frame_size,
diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_hw.h b/drivers/net/ethernet/apm/xgene/xgene_enet_hw.h
index d250bfe..5d3e18d 100644
--- a/drivers/net/ethernet/apm/xgene/xgene_enet_hw.h
+++ b/drivers/net/ethernet/apm/xgene/xgene_enet_hw.h
@@ -115,6 +115,7 @@
#define BLOCK_ETH_CLKRST_CSR_OFFSET 0xc000
#define BLOCK_ETH_DIAG_CSR_OFFSET 0xD000
#define BLOCK_ETH_MAC_OFFSET 0x0000
+#define BLOCK_ETH_STATS_OFFSET 0x0000
#define BLOCK_ETH_MAC_CSR_OFFSET 0x2800
#define CLKEN_ADDR 0xc208
@@ -126,6 +127,12 @@
#define MAC_READ_REG_OFFSET 0x0c
#define MAC_COMMAND_DONE_REG_OFFSET 0x10
+#define STAT_ADDR_REG_OFFSET 0x14
+#define STAT_COMMAND_REG_OFFSET 0x18
+#define STAT_WRITE_REG_OFFSET 0x1c
+#define STAT_READ_REG_OFFSET 0x20
+#define STAT_COMMAND_DONE_REG_OFFSET 0x24
+
#define PCS_ADDR_REG_OFFSET 0x00
#define PCS_COMMAND_REG_OFFSET 0x04
#define PCS_WRITE_REG_OFFSET 0x08
@@ -185,6 +192,10 @@
#define CFG_CLE_NXTFPSEL0(val) (((val) << 20) & GENMASK(23, 20))
#define ICM_CONFIG0_REG_0_ADDR 0x0400
#define ICM_CONFIG2_REG_0_ADDR 0x0410
+#define ECM_CONFIG0_REG_0_ADDR 0x0500
+#define ECM_CONFIG0_REG_1_ADDR 0x0504
+#define ICM_ECM_DROP_COUNT_REG0_ADDR 0x0508
+#define ICM_ECM_DROP_COUNT_REG1_ADDR 0x050c
#define RX_DV_GATE_REG_0_ADDR 0x05fc
#define TX_DV_GATE_EN0 BIT(2)
#define RX_DV_GATE_EN0 BIT(1)
@@ -217,12 +228,53 @@
#define FULL_DUPLEX2 BIT(0)
#define PAD_CRC BIT(2)
#define LENGTH_CHK BIT(4)
-#define SCAN_AUTO_INCR BIT(5)
-#define TBYT_ADDR 0x38
-#define TPKT_ADDR 0x39
-#define TDRP_ADDR 0x45
-#define TFCS_ADDR 0x47
-#define TUND_ADDR 0x4a
+
+#define TR64_ADDR 0x20
+#define TR127_ADDR 0x21
+#define TR255_ADDR 0x22
+#define TR511_ADDR 0x23
+#define TR1K_ADDR 0x24
+#define TRMAX_ADDR 0x25
+#define TRMGV_ADDR 0x26
+
+#define RFCS_ADDR 0x29
+#define RMCA_ADDR 0x2a
+#define RBCA_ADDR 0x2b
+#define RXCF_ADDR 0x2c
+#define RXPF_ADDR 0x2d
+#define RXUO_ADDR 0x2e
+#define RALN_ADDR 0x2f
+#define RFLR_ADDR 0x30
+#define RCDE_ADDR 0x31
+#define RCSE_ADDR 0x32
+#define RUND_ADDR 0x33
+#define ROVR_ADDR 0x34
+#define RFRG_ADDR 0x35
+#define RJBR_ADDR 0x36
+#define RDRP_ADDR 0x37
+
+#define TMCA_ADDR 0x3a
+#define TBCA_ADDR 0x3b
+#define TXPF_ADDR 0x3c
+#define TDFR_ADDR 0x3d
+#define TEDF_ADDR 0x3e
+#define TSCL_ADDR 0x3f
+#define TMCL_ADDR 0x40
+#define TLCL_ADDR 0x41
+#define TXCL_ADDR 0x42
+#define TNCL_ADDR 0x43
+#define TPFH_ADDR 0x44
+#define TDRP_ADDR 0x45
+#define TJBR_ADDR 0x46
+#define TFCS_ADDR 0x47
+#define TXCF_ADDR 0x48
+#define TOVR_ADDR 0x49
+#define TUND_ADDR 0x4a
+#define TFRG_ADDR 0x4b
+#define DUMP_ADDR 0x27
+
+#define ECM_DROP_COUNT(src) xgene_get_bits(src, 0, 15)
+#define ICM_DROP_COUNT(src) xgene_get_bits(src, 16, 31)
#define TSO_IPPROTO_TCP 1
@@ -380,14 +432,16 @@
}
void xgene_enet_parse_error(struct xgene_enet_desc_ring *ring,
- struct xgene_enet_pdata *pdata,
enum xgene_enet_err_code status);
-
int xgene_enet_mdio_config(struct xgene_enet_pdata *pdata);
void xgene_enet_mdio_remove(struct xgene_enet_pdata *pdata);
bool xgene_ring_mgr_init(struct xgene_enet_pdata *p);
int xgene_enet_phy_connect(struct net_device *ndev);
void xgene_enet_phy_disconnect(struct xgene_enet_pdata *pdata);
+u32 xgene_enet_rd_mac(struct xgene_enet_pdata *pdata, u32 rd_addr);
+void xgene_enet_wr_mac(struct xgene_enet_pdata *pdata, u32 wr_addr,
+ u32 wr_data);
+u32 xgene_enet_rd_stat(struct xgene_enet_pdata *pdata, u32 rd_addr);
extern const struct xgene_mac_ops xgene_gmac_ops;
extern const struct xgene_port_ops xgene_gport_ops;
diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_main.c b/drivers/net/ethernet/apm/xgene/xgene_enet_main.c
index 5f37ed3..d3906f6 100644
--- a/drivers/net/ethernet/apm/xgene/xgene_enet_main.c
+++ b/drivers/net/ethernet/apm/xgene/xgene_enet_main.c
@@ -246,9 +246,9 @@
skb_frag_t *frag;
dma_addr_t *frag_dma_addr;
u16 skb_index;
- u8 status;
- int i, ret = 0;
u8 mss_index;
+ u8 status;
+ int i;
skb_index = GET_VAL(USERINFO, le64_to_cpu(raw_desc->m0));
skb = cp_ring->cp_skb[skb_index];
@@ -275,19 +275,17 @@
/* Checking for error */
status = GET_VAL(LERR, le64_to_cpu(raw_desc->m0));
if (unlikely(status > 2)) {
- xgene_enet_parse_error(cp_ring, netdev_priv(cp_ring->ndev),
- status);
- ret = -EIO;
+ cp_ring->tx_dropped++;
+ cp_ring->tx_errors++;
}
if (likely(skb)) {
dev_kfree_skb_any(skb);
} else {
netdev_err(cp_ring->ndev, "completion skb is NULL\n");
- ret = -EIO;
}
- return ret;
+ return 0;
}
static int xgene_enet_setup_mss(struct net_device *ndev, u32 mss)
@@ -658,6 +656,18 @@
buf_pool->head = head;
}
+/* Errata 10GE_10 and ENET_15 - Fix duplicated HW statistic counters */
+static bool xgene_enet_errata_10GE_10(struct sk_buff *skb, u32 len, u8 status)
+{
+ if (status == INGRESS_CRC &&
+ len >= (ETHER_STD_PACKET + 1) &&
+ len <= (ETHER_STD_PACKET + 4) &&
+ skb->protocol == htons(ETH_P_8021Q))
+ return true;
+
+ return false;
+}
+
/* Errata 10GE_8 and ENET_11 - allow packet with length <=64B */
static bool xgene_enet_errata_10GE_8(struct sk_buff *skb, u32 len, u8 status)
{
@@ -708,10 +718,15 @@
status = (GET_VAL(ELERR, le64_to_cpu(raw_desc->m0)) << LERR_LEN) |
GET_VAL(LERR, le64_to_cpu(raw_desc->m0));
if (unlikely(status)) {
- if (!xgene_enet_errata_10GE_8(skb, datalen, status)) {
+ if (xgene_enet_errata_10GE_8(skb, datalen, status)) {
+ pdata->false_rflr++;
+ } else if (xgene_enet_errata_10GE_10(skb, datalen, status)) {
+ pdata->vlan_rjbr++;
+ } else {
dev_kfree_skb_any(skb);
xgene_enet_free_pagepool(page_pool, raw_desc, exp_desc);
- xgene_enet_parse_error(rx_ring, pdata, status);
+ xgene_enet_parse_error(rx_ring, status);
+ rx_ring->rx_dropped++;
goto out;
}
}
@@ -1466,10 +1481,9 @@
static void xgene_enet_get_stats64(
struct net_device *ndev,
- struct rtnl_link_stats64 *storage)
+ struct rtnl_link_stats64 *stats)
{
struct xgene_enet_pdata *pdata = netdev_priv(ndev);
- struct rtnl_link_stats64 *stats = &pdata->stats;
struct xgene_enet_desc_ring *ring;
int i;
@@ -1478,6 +1492,8 @@
if (ring) {
stats->tx_packets += ring->tx_packets;
stats->tx_bytes += ring->tx_bytes;
+ stats->tx_dropped += ring->tx_dropped;
+ stats->tx_errors += ring->tx_errors;
}
}
@@ -1486,14 +1502,18 @@
if (ring) {
stats->rx_packets += ring->rx_packets;
stats->rx_bytes += ring->rx_bytes;
- stats->rx_errors += ring->rx_length_errors +
+ stats->rx_dropped += ring->rx_dropped;
+ stats->rx_errors += ring->rx_errors +
+ ring->rx_length_errors +
ring->rx_crc_errors +
ring->rx_frame_errors +
ring->rx_fifo_errors;
- stats->rx_dropped += ring->rx_dropped;
+ stats->rx_length_errors += ring->rx_length_errors;
+ stats->rx_crc_errors += ring->rx_crc_errors;
+ stats->rx_frame_errors += ring->rx_frame_errors;
+ stats->rx_fifo_errors += ring->rx_fifo_errors;
}
}
- memcpy(storage, stats, sizeof(struct rtnl_link_stats64));
}
static int xgene_enet_set_mac_address(struct net_device *ndev, void *addr)
@@ -1614,7 +1634,7 @@
struct device *dev = &pdev->dev;
int i, ret, max_irqs;
- if (pdata->phy_mode == PHY_INTERFACE_MODE_RGMII)
+ if (phy_interface_mode_is_rgmii(pdata->phy_mode))
max_irqs = 1;
else if (pdata->phy_mode == PHY_INTERFACE_MODE_SGMII)
max_irqs = 2;
@@ -1740,7 +1760,7 @@
dev_err(dev, "Unable to get phy-connection-type\n");
return pdata->phy_mode;
}
- if (pdata->phy_mode != PHY_INTERFACE_MODE_RGMII &&
+ if (!phy_interface_mode_is_rgmii(pdata->phy_mode) &&
pdata->phy_mode != PHY_INTERFACE_MODE_SGMII &&
pdata->phy_mode != PHY_INTERFACE_MODE_XGMII) {
dev_err(dev, "Incorrect phy-connection-type specified\n");
@@ -1785,15 +1805,18 @@
pdata->cle.base = base_addr + BLOCK_ETH_CLE_CSR_OFFSET;
pdata->eth_ring_if_addr = base_addr + BLOCK_ETH_RING_IF_OFFSET;
pdata->eth_diag_csr_addr = base_addr + BLOCK_ETH_DIAG_CSR_OFFSET;
- if (pdata->phy_mode == PHY_INTERFACE_MODE_RGMII ||
+ if (phy_interface_mode_is_rgmii(pdata->phy_mode) ||
pdata->phy_mode == PHY_INTERFACE_MODE_SGMII) {
pdata->mcx_mac_addr = pdata->base_addr + BLOCK_ETH_MAC_OFFSET;
+ pdata->mcx_stats_addr =
+ pdata->base_addr + BLOCK_ETH_STATS_OFFSET;
offset = (pdata->enet_id == XGENE_ENET1) ?
BLOCK_ETH_MAC_CSR_OFFSET :
X2_BLOCK_ETH_MAC_CSR_OFFSET;
pdata->mcx_mac_csr_addr = base_addr + offset;
} else {
pdata->mcx_mac_addr = base_addr + BLOCK_AXG_MAC_OFFSET;
+ pdata->mcx_stats_addr = base_addr + BLOCK_AXG_STATS_OFFSET;
pdata->mcx_mac_csr_addr = base_addr + BLOCK_AXG_MAC_CSR_OFFSET;
pdata->pcs_addr = base_addr + BLOCK_PCS_OFFSET;
}
@@ -1881,6 +1904,9 @@
{
switch (pdata->phy_mode) {
case PHY_INTERFACE_MODE_RGMII:
+ case PHY_INTERFACE_MODE_RGMII_ID:
+ case PHY_INTERFACE_MODE_RGMII_RXID:
+ case PHY_INTERFACE_MODE_RGMII_TXID:
pdata->mac_ops = &xgene_gmac_ops;
pdata->port_ops = &xgene_gport_ops;
pdata->rm = RM3;
@@ -2055,6 +2081,7 @@
goto err;
xgene_enet_setup_ops(pdata);
+ spin_lock_init(&pdata->mac_lock);
if (pdata->phy_mode == PHY_INTERFACE_MODE_XGMII) {
ndev->features |= NETIF_F_TSO | NETIF_F_RXCSUM;
@@ -2076,7 +2103,7 @@
if (pdata->phy_mode == PHY_INTERFACE_MODE_XGMII) {
INIT_DELAYED_WORK(&pdata->link_work, link_state);
} else if (!pdata->mdio_driver) {
- if (pdata->phy_mode == PHY_INTERFACE_MODE_RGMII)
+ if (phy_interface_mode_is_rgmii(pdata->phy_mode))
ret = xgene_enet_mdio_config(pdata);
else
INIT_DELAYED_WORK(&pdata->link_work, link_state);
@@ -2085,6 +2112,11 @@
goto err1;
}
+ spin_lock_init(&pdata->stats_lock);
+ ret = xgene_extd_stats_init(pdata);
+ if (ret)
+ goto err2;
+
xgene_enet_napi_add(pdata);
ret = register_netdev(ndev);
if (ret) {
@@ -2102,7 +2134,7 @@
if (pdata->mdio_driver)
xgene_enet_phy_disconnect(pdata);
- else if (pdata->phy_mode == PHY_INTERFACE_MODE_RGMII)
+ else if (phy_interface_mode_is_rgmii(pdata->phy_mode))
xgene_enet_mdio_remove(pdata);
err1:
xgene_enet_delete_desc_rings(pdata);
@@ -2126,12 +2158,12 @@
if (pdata->mdio_driver)
xgene_enet_phy_disconnect(pdata);
- else if (pdata->phy_mode == PHY_INTERFACE_MODE_RGMII)
+ else if (phy_interface_mode_is_rgmii(pdata->phy_mode))
xgene_enet_mdio_remove(pdata);
unregister_netdev(ndev);
- pdata->port_ops->shutdown(pdata);
xgene_enet_delete_desc_rings(pdata);
+ pdata->port_ops->shutdown(pdata);
free_netdev(ndev);
return 0;
diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_main.h b/drivers/net/ethernet/apm/xgene/xgene_enet_main.h
index 0d4be24..9857685 100644
--- a/drivers/net/ethernet/apm/xgene/xgene_enet_main.h
+++ b/drivers/net/ethernet/apm/xgene/xgene_enet_main.h
@@ -42,6 +42,7 @@
#define XGENE_DRV_VERSION "v1.0"
#define ETHER_MIN_PACKET 64
+#define ETHER_STD_PACKET 1518
#define XGENE_ENET_STD_MTU 1536
#define XGENE_ENET_MAX_MTU 9600
#define SKB_BUFFER_SIZE (XGENE_ENET_STD_MTU - NET_IP_ALIGN)
@@ -138,6 +139,8 @@
__le64 *exp_bufs;
u64 tx_packets;
u64 tx_bytes;
+ u64 tx_dropped;
+ u64 tx_errors;
u64 rx_packets;
u64 rx_bytes;
u64 rx_dropped;
@@ -155,6 +158,7 @@
void (*rx_enable)(struct xgene_enet_pdata *pdata);
void (*tx_disable)(struct xgene_enet_pdata *pdata);
void (*rx_disable)(struct xgene_enet_pdata *pdata);
+ void (*get_drop_cnt)(struct xgene_enet_pdata *pdata, u32 *rx, u32 *tx);
void (*set_speed)(struct xgene_enet_pdata *pdata);
void (*set_mac_addr)(struct xgene_enet_pdata *pdata);
void (*set_framesize)(struct xgene_enet_pdata *pdata, int framesize);
@@ -212,6 +216,7 @@
void __iomem *eth_diag_csr_addr;
void __iomem *mcx_mac_addr;
void __iomem *mcx_mac_csr_addr;
+ void __iomem *mcx_stats_addr;
void __iomem *base_addr;
void __iomem *pcs_addr;
void __iomem *ring_csr_addr;
@@ -219,8 +224,12 @@
int phy_mode;
enum xgene_enet_rm rm;
struct xgene_enet_cle cle;
- struct rtnl_link_stats64 stats;
+ u64 *extd_stats;
+ u64 false_rflr;
+ u64 vlan_rjbr;
+ spinlock_t stats_lock; /* statistics lock */
const struct xgene_mac_ops *mac_ops;
+ spinlock_t mac_lock; /* mac lock */
const struct xgene_port_ops *port_ops;
struct xgene_ring_ops *ring_ops;
const struct xgene_cle_ops *cle_ops;
@@ -263,5 +272,6 @@
}
void xgene_enet_set_ethtool_ops(struct net_device *netdev);
+int xgene_extd_stats_init(struct xgene_enet_pdata *pdata);
#endif /* __XGENE_ENET_MAIN_H__ */
diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_sgmac.c b/drivers/net/ethernet/apm/xgene/xgene_enet_sgmac.c
index a8e063b..b1a83fd 100644
--- a/drivers/net/ethernet/apm/xgene/xgene_enet_sgmac.c
+++ b/drivers/net/ethernet/apm/xgene/xgene_enet_sgmac.c
@@ -54,41 +54,6 @@
iowrite32(val, addr);
}
-static bool xgene_enet_wr_indirect(struct xgene_indirect_ctl *ctl,
- u32 wr_addr, u32 wr_data)
-{
- int i;
-
- iowrite32(wr_addr, ctl->addr);
- iowrite32(wr_data, ctl->ctl);
- iowrite32(XGENE_ENET_WR_CMD, ctl->cmd);
-
- /* wait for write command to complete */
- for (i = 0; i < 10; i++) {
- if (ioread32(ctl->cmd_done)) {
- iowrite32(0, ctl->cmd);
- return true;
- }
- udelay(1);
- }
-
- return false;
-}
-
-static void xgene_enet_wr_mac(struct xgene_enet_pdata *p,
- u32 wr_addr, u32 wr_data)
-{
- struct xgene_indirect_ctl ctl = {
- .addr = p->mcx_mac_addr + MAC_ADDR_REG_OFFSET,
- .ctl = p->mcx_mac_addr + MAC_WRITE_REG_OFFSET,
- .cmd = p->mcx_mac_addr + MAC_COMMAND_REG_OFFSET,
- .cmd_done = p->mcx_mac_addr + MAC_COMMAND_DONE_REG_OFFSET
- };
-
- if (!xgene_enet_wr_indirect(&ctl, wr_addr, wr_data))
- netdev_err(p->ndev, "mac write failed, addr: %04x\n", wr_addr);
-}
-
static u32 xgene_enet_rd_csr(struct xgene_enet_pdata *p, u32 offset)
{
return ioread32(p->eth_csr_addr + offset);
@@ -104,42 +69,6 @@
return ioread32(p->mcx_mac_csr_addr + offset);
}
-static u32 xgene_enet_rd_indirect(struct xgene_indirect_ctl *ctl, u32 rd_addr)
-{
- u32 rd_data;
- int i;
-
- iowrite32(rd_addr, ctl->addr);
- iowrite32(XGENE_ENET_RD_CMD, ctl->cmd);
-
- /* wait for read command to complete */
- for (i = 0; i < 10; i++) {
- if (ioread32(ctl->cmd_done)) {
- rd_data = ioread32(ctl->ctl);
- iowrite32(0, ctl->cmd);
-
- return rd_data;
- }
- udelay(1);
- }
-
- pr_err("%s: mac read failed, addr: %04x\n", __func__, rd_addr);
-
- return 0;
-}
-
-static u32 xgene_enet_rd_mac(struct xgene_enet_pdata *p, u32 rd_addr)
-{
- struct xgene_indirect_ctl ctl = {
- .addr = p->mcx_mac_addr + MAC_ADDR_REG_OFFSET,
- .ctl = p->mcx_mac_addr + MAC_READ_REG_OFFSET,
- .cmd = p->mcx_mac_addr + MAC_COMMAND_REG_OFFSET,
- .cmd_done = p->mcx_mac_addr + MAC_COMMAND_DONE_REG_OFFSET
- };
-
- return xgene_enet_rd_indirect(&ctl, rd_addr);
-}
-
static int xgene_enet_ecc_init(struct xgene_enet_pdata *p)
{
struct net_device *ndev = p->ndev;
@@ -166,6 +95,24 @@
return -ENODEV;
}
+static void xgene_sgmac_get_drop_cnt(struct xgene_enet_pdata *pdata,
+ u32 *rx, u32 *tx)
+{
+ u32 addr, count;
+
+ addr = (pdata->enet_id != XGENE_ENET1) ?
+ XG_MCX_ICM_ECM_DROP_COUNT_REG0_ADDR :
+ ICM_ECM_DROP_COUNT_REG0_ADDR + pdata->port_id * OFFSET_4;
+ count = xgene_enet_rd_mcx_csr(pdata, addr);
+ *rx = ICM_DROP_COUNT(count);
+ *tx = ECM_DROP_COUNT(count);
+ /* Errata: 10GE_4 - ICM_ECM_DROP_COUNT not clear-on-read */
+ addr = (pdata->enet_id != XGENE_ENET1) ?
+ XG_MCX_ECM_CONFIG0_REG_0_ADDR :
+ ECM_CONFIG0_REG_0_ADDR + pdata->port_id * OFFSET_4;
+ xgene_enet_rd_mcx_csr(pdata, addr);
+}
+
static void xgene_enet_config_ring_if_assoc(struct xgene_enet_pdata *p)
{
u32 val;
@@ -587,26 +534,6 @@
static void xgene_enet_shutdown(struct xgene_enet_pdata *p)
{
struct device *dev = &p->pdev->dev;
- struct xgene_enet_desc_ring *ring;
- u32 pb;
- int i;
-
- pb = 0;
- for (i = 0; i < p->rxq_cnt; i++) {
- ring = p->rx_ring[i]->buf_pool;
- pb |= BIT(xgene_enet_get_fpsel(ring->id));
- ring = p->rx_ring[i]->page_pool;
- if (ring)
- pb |= BIT(xgene_enet_get_fpsel(ring->id));
- }
- xgene_enet_wr_ring_if(p, ENET_CFGSSQMIFPRESET_ADDR, pb);
-
- pb = 0;
- for (i = 0; i < p->txq_cnt; i++) {
- ring = p->tx_ring[i];
- pb |= BIT(xgene_enet_ring_bufnum(ring->id));
- }
- xgene_enet_wr_ring_if(p, ENET_CFGSSQMIWQRESET_ADDR, pb);
if (dev->of_node) {
if (!IS_ERR(p->clk))
@@ -671,6 +598,7 @@
.tx_enable = xgene_sgmac_tx_enable,
.rx_disable = xgene_sgmac_rx_disable,
.tx_disable = xgene_sgmac_tx_disable,
+ .get_drop_cnt = xgene_sgmac_get_drop_cnt,
.set_speed = xgene_sgmac_set_speed,
.set_mac_addr = xgene_sgmac_set_mac_addr,
.set_framesize = xgene_sgmac_set_frame_size,
diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_xgmac.c b/drivers/net/ethernet/apm/xgene/xgene_enet_xgmac.c
index 423240c..b7d75d0 100644
--- a/drivers/net/ethernet/apm/xgene/xgene_enet_xgmac.c
+++ b/drivers/net/ethernet/apm/xgene/xgene_enet_xgmac.c
@@ -71,21 +71,6 @@
return true;
}
-static void xgene_enet_wr_mac(struct xgene_enet_pdata *pdata,
- u32 wr_addr, u32 wr_data)
-{
- void __iomem *addr, *wr, *cmd, *cmd_done;
-
- addr = pdata->mcx_mac_addr + MAC_ADDR_REG_OFFSET;
- wr = pdata->mcx_mac_addr + MAC_WRITE_REG_OFFSET;
- cmd = pdata->mcx_mac_addr + MAC_COMMAND_REG_OFFSET;
- cmd_done = pdata->mcx_mac_addr + MAC_COMMAND_DONE_REG_OFFSET;
-
- if (!xgene_enet_wr_indirect(addr, wr, cmd, cmd_done, wr_addr, wr_data))
- netdev_err(pdata->ndev, "MCX mac write failed, addr: %04x\n",
- wr_addr);
-}
-
static void xgene_enet_wr_pcs(struct xgene_enet_pdata *pdata,
u32 wr_addr, u32 wr_data)
{
@@ -148,21 +133,6 @@
return true;
}
-static void xgene_enet_rd_mac(struct xgene_enet_pdata *pdata,
- u32 rd_addr, u32 *rd_data)
-{
- void __iomem *addr, *rd, *cmd, *cmd_done;
-
- addr = pdata->mcx_mac_addr + MAC_ADDR_REG_OFFSET;
- rd = pdata->mcx_mac_addr + MAC_READ_REG_OFFSET;
- cmd = pdata->mcx_mac_addr + MAC_COMMAND_REG_OFFSET;
- cmd_done = pdata->mcx_mac_addr + MAC_COMMAND_DONE_REG_OFFSET;
-
- if (!xgene_enet_rd_indirect(addr, rd, cmd, cmd_done, rd_addr, rd_data))
- netdev_err(pdata->ndev, "MCX mac read failed, addr: %04x\n",
- rd_addr);
-}
-
static bool xgene_enet_rd_pcs(struct xgene_enet_pdata *pdata,
u32 rd_addr, u32 *rd_data)
{
@@ -210,6 +180,18 @@
return 0;
}
+static void xgene_xgmac_get_drop_cnt(struct xgene_enet_pdata *pdata,
+ u32 *rx, u32 *tx)
+{
+ u32 count;
+
+ xgene_enet_rd_axg_csr(pdata, XGENET_ICM_ECM_DROP_COUNT_REG0, &count);
+ *rx = ICM_DROP_COUNT(count);
+ *tx = ECM_DROP_COUNT(count);
+ /* Errata: 10GE_4 - ICM_ECM_DROP_COUNT not clear-on-read */
+ xgene_enet_rd_axg_csr(pdata, XGENET_ECM_CONFIG0_REG_0, &count);
+}
+
static void xgene_enet_config_ring_if_assoc(struct xgene_enet_pdata *pdata)
{
xgene_enet_wr_ring_if(pdata, ENET_CFGSSQMIWQASSOC_ADDR, 0);
@@ -300,7 +282,7 @@
{
u32 data;
- xgene_enet_rd_mac(pdata, AXGMAC_CONFIG_1, &data);
+ data = xgene_enet_rd_mac(pdata, AXGMAC_CONFIG_1);
if (enable)
data |= HSTTCTLEN;
@@ -316,7 +298,7 @@
{
u32 data;
- xgene_enet_rd_mac(pdata, AXGMAC_CONFIG_1, &data);
+ data = xgene_enet_rd_mac(pdata, AXGMAC_CONFIG_1);
if (enable)
data |= HSTRCTLEN;
@@ -332,7 +314,7 @@
xgene_xgmac_reset(pdata);
- xgene_enet_rd_mac(pdata, AXGMAC_CONFIG_1, &data);
+ data = xgene_enet_rd_mac(pdata, AXGMAC_CONFIG_1);
data |= HSTPPEN;
data &= ~HSTLENCHK;
xgene_enet_wr_mac(pdata, AXGMAC_CONFIG_1, data);
@@ -379,7 +361,7 @@
{
u32 data;
- xgene_enet_rd_mac(pdata, AXGMAC_CONFIG_1, &data);
+ data = xgene_enet_rd_mac(pdata, AXGMAC_CONFIG_1);
xgene_enet_wr_mac(pdata, AXGMAC_CONFIG_1, data | HSTRFEN);
}
@@ -387,7 +369,7 @@
{
u32 data;
- xgene_enet_rd_mac(pdata, AXGMAC_CONFIG_1, &data);
+ data = xgene_enet_rd_mac(pdata, AXGMAC_CONFIG_1);
xgene_enet_wr_mac(pdata, AXGMAC_CONFIG_1, data | HSTTFEN);
}
@@ -395,7 +377,7 @@
{
u32 data;
- xgene_enet_rd_mac(pdata, AXGMAC_CONFIG_1, &data);
+ data = xgene_enet_rd_mac(pdata, AXGMAC_CONFIG_1);
xgene_enet_wr_mac(pdata, AXGMAC_CONFIG_1, data & ~HSTRFEN);
}
@@ -403,7 +385,7 @@
{
u32 data;
- xgene_enet_rd_mac(pdata, AXGMAC_CONFIG_1, &data);
+ data = xgene_enet_rd_mac(pdata, AXGMAC_CONFIG_1);
xgene_enet_wr_mac(pdata, AXGMAC_CONFIG_1, data & ~HSTTFEN);
}
@@ -464,26 +446,6 @@
static void xgene_enet_shutdown(struct xgene_enet_pdata *pdata)
{
struct device *dev = &pdata->pdev->dev;
- struct xgene_enet_desc_ring *ring;
- u32 pb;
- int i;
-
- pb = 0;
- for (i = 0; i < pdata->rxq_cnt; i++) {
- ring = pdata->rx_ring[i]->buf_pool;
- pb |= BIT(xgene_enet_get_fpsel(ring->id));
- ring = pdata->rx_ring[i]->page_pool;
- if (ring)
- pb |= BIT(xgene_enet_get_fpsel(ring->id));
- }
- xgene_enet_wr_ring_if(pdata, ENET_CFGSSQMIFPRESET_ADDR, pb);
-
- pb = 0;
- for (i = 0; i < pdata->txq_cnt; i++) {
- ring = pdata->tx_ring[i];
- pb |= BIT(xgene_enet_ring_bufnum(ring->id));
- }
- xgene_enet_wr_ring_if(pdata, ENET_CFGSSQMIWQRESET_ADDR, pb);
if (dev->of_node) {
if (!IS_ERR(pdata->clk))
@@ -567,6 +529,7 @@
.set_mac_addr = xgene_xgmac_set_mac_addr,
.set_framesize = xgene_xgmac_set_frame_size,
.set_mss = xgene_xgmac_set_mss,
+ .get_drop_cnt = xgene_xgmac_get_drop_cnt,
.link_state = xgene_enet_link_state,
.enable_tx_pause = xgene_xgmac_enable_tx_pause,
.flowctl_rx = xgene_xgmac_flowctl_rx,
diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_xgmac.h b/drivers/net/ethernet/apm/xgene/xgene_enet_xgmac.h
index e644a42..a3b4551 100644
--- a/drivers/net/ethernet/apm/xgene/xgene_enet_xgmac.h
+++ b/drivers/net/ethernet/apm/xgene/xgene_enet_xgmac.h
@@ -23,6 +23,7 @@
#define X2_BLOCK_ETH_MAC_CSR_OFFSET 0x3000
#define BLOCK_AXG_MAC_OFFSET 0x0800
+#define BLOCK_AXG_STATS_OFFSET 0x0800
#define BLOCK_AXG_MAC_CSR_OFFSET 0x2000
#define BLOCK_PCS_OFFSET 0x3800
@@ -70,6 +71,8 @@
#define XG_RSIF_CONFIG1_REG_ADDR 0x00b8
#define XG_RSIF_PLC_CLE_BUFF_THRESH 0x1
#define RSIF_PLC_CLE_BUFF_THRESH_SET(dst, val) xgene_set_bits(dst, val, 0, 2)
+#define XG_MCX_ECM_CONFIG0_REG_0_ADDR 0x0070
+#define XG_MCX_ICM_ECM_DROP_COUNT_REG0_ADDR 0x0124
#define XCLE_BYPASS_REG0_ADDR 0x0160
#define XCLE_BYPASS_REG1_ADDR 0x0164
#define XG_CFG_BYPASS_ADDR 0x0204
@@ -80,6 +83,8 @@
#define XG_ENET_SPARE_CFG_REG_ADDR 0x040c
#define XG_ENET_SPARE_CFG_REG_1_ADDR 0x0410
#define XGENET_RX_DV_GATE_REG_0_ADDR 0x0804
+#define XGENET_ECM_CONFIG0_REG_0 0x0870
+#define XGENET_ICM_ECM_DROP_COUNT_REG0 0x0924
#define XGENET_CSR_ECM_CFG_0_ADDR 0x0880
#define XGENET_CSR_MULTI_DPF0_ADDR 0x0888
#define XGENET_CSR_MULTI_DPF1_ADDR 0x088c
diff --git a/drivers/net/ethernet/apple/bmac.c b/drivers/net/ethernet/apple/bmac.c
index 2b2d870..eac740c 100644
--- a/drivers/net/ethernet/apple/bmac.c
+++ b/drivers/net/ethernet/apple/bmac.c
@@ -1218,8 +1218,7 @@
*/
skb = netdev_alloc_skb(dev, ETHERMINPACKET);
if (skb != NULL) {
- data = skb_put(skb, ETHERMINPACKET);
- memset(data, 0, ETHERMINPACKET);
+ data = skb_put_zero(skb, ETHERMINPACKET);
memcpy(data, dev->dev_addr, ETH_ALEN);
memcpy(data + ETH_ALEN, dev->dev_addr, ETH_ALEN);
bmac_transmit_packet(skb, dev);
diff --git a/drivers/net/ethernet/apple/macmace.c b/drivers/net/ethernet/apple/macmace.c
index 857df9c..f17a160 100644
--- a/drivers/net/ethernet/apple/macmace.c
+++ b/drivers/net/ethernet/apple/macmace.c
@@ -663,7 +663,7 @@
return;
}
skb_reserve(skb, 2);
- memcpy(skb_put(skb, frame_length), mf->data, frame_length);
+ skb_put_data(skb, mf->data, frame_length);
skb->protocol = eth_type_trans(skb, dev);
netif_rx(skb);
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_ring.c b/drivers/net/ethernet/aquantia/atlantic/aq_ring.c
index 3a8a4aa..9a08179 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_ring.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_ring.c
@@ -223,7 +223,7 @@
skb->protocol = eth_type_trans(skb, ndev);
if (unlikely(buff->is_cso_err)) {
++self->stats.rx.errors;
- __skb_mark_checksum_bad(skb);
+ skb->ip_summed = CHECKSUM_NONE;
} else {
if (buff->is_ip_cso) {
__skb_incr_checksum_unnecessary(skb);
diff --git a/drivers/net/ethernet/aurora/nb8800.c b/drivers/net/ethernet/aurora/nb8800.c
index 5711fbb..041cfb7 100644
--- a/drivers/net/ethernet/aurora/nb8800.c
+++ b/drivers/net/ethernet/aurora/nb8800.c
@@ -251,7 +251,7 @@
if (len <= RX_COPYBREAK) {
dma_sync_single_for_cpu(&dev->dev, dma, len, DMA_FROM_DEVICE);
- memcpy(skb_put(skb, len), data, len);
+ skb_put_data(skb, data, len);
dma_sync_single_for_device(&dev->dev, dma, len,
DMA_FROM_DEVICE);
} else {
@@ -264,7 +264,7 @@
}
dma_unmap_page(&dev->dev, dma, RX_BUF_SIZE, DMA_FROM_DEVICE);
- memcpy(skb_put(skb, RX_COPYHDR), data, RX_COPYHDR);
+ skb_put_data(skb, data, RX_COPYHDR);
skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page,
offset + RX_COPYHDR, len - RX_COPYHDR,
RX_BUF_SIZE);
diff --git a/drivers/net/ethernet/broadcom/b44.c b/drivers/net/ethernet/broadcom/b44.c
index 5b95bb4..f411936 100644
--- a/drivers/net/ethernet/broadcom/b44.c
+++ b/drivers/net/ethernet/broadcom/b44.c
@@ -1836,7 +1836,9 @@
if (bp->flags & B44_FLAG_EXTERNAL_PHY) {
BUG_ON(!dev->phydev);
- return phy_ethtool_ksettings_get(dev->phydev, cmd);
+ phy_ethtool_ksettings_get(dev->phydev, cmd);
+
+ return 0;
}
supported = (SUPPORTED_Autoneg);
diff --git a/drivers/net/ethernet/broadcom/bcm63xx_enet.c b/drivers/net/ethernet/broadcom/bcm63xx_enet.c
index 50d88d3..61a88b6 100644
--- a/drivers/net/ethernet/broadcom/bcm63xx_enet.c
+++ b/drivers/net/ethernet/broadcom/bcm63xx_enet.c
@@ -609,8 +609,7 @@
dev_kfree_skb(skb);
skb = nskb;
}
- data = skb_put(skb, needed);
- memset(data, 0, needed);
+ data = skb_put_zero(skb, needed);
}
/* point to the next available desc */
@@ -1453,7 +1452,10 @@
if (priv->has_phy) {
if (!dev->phydev)
return -ENODEV;
- return phy_ethtool_ksettings_get(dev->phydev, cmd);
+
+ phy_ethtool_ksettings_get(dev->phydev, cmd);
+
+ return 0;
} else {
cmd->base.autoneg = 0;
cmd->base.speed = (priv->force_speed_100) ?
diff --git a/drivers/net/ethernet/broadcom/bcmsysport.c b/drivers/net/ethernet/broadcom/bcmsysport.c
index 5274501..5333601 100644
--- a/drivers/net/ethernet/broadcom/bcmsysport.c
+++ b/drivers/net/ethernet/broadcom/bcmsysport.c
@@ -1099,7 +1099,7 @@
skb = nskb;
}
- tsb = (struct bcm_tsb *)skb_push(skb, sizeof(*tsb));
+ tsb = skb_push(skb, sizeof(*tsb));
/* Zero-out TSB by default */
memset(tsb, 0, sizeof(*tsb));
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
index f619c4c..67fe3d8 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
@@ -4284,8 +4284,8 @@
return 0;
}
-int __bnx2x_setup_tc(struct net_device *dev, u32 handle, __be16 proto,
- struct tc_to_netdev *tc)
+int __bnx2x_setup_tc(struct net_device *dev, u32 handle, u32 chain_index,
+ __be16 proto, struct tc_to_netdev *tc)
{
if (tc->type != TC_SETUP_MQPRIO)
return -EINVAL;
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
index 243cb97..c26688d 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
@@ -486,8 +486,8 @@
/* setup_tc callback */
int bnx2x_setup_tc(struct net_device *dev, u8 num_tc);
-int __bnx2x_setup_tc(struct net_device *dev, u32 handle, __be16 proto,
- struct tc_to_netdev *tc);
+int __bnx2x_setup_tc(struct net_device *dev, u32 handle, u32 chain_index,
+ __be16 proto, struct tc_to_netdev *tc);
int bnx2x_get_vf_config(struct net_device *dev, int vf,
struct ifla_vf_info *ivi);
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
index a851f95..14c236e 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
@@ -10303,7 +10303,7 @@
}
if (test_and_clear_bit(BNX2X_SP_RTNL_VFPF_CHANNEL_DOWN,
&bp->sp_rtnl_state)){
- if (!test_bit(__LINK_STATE_NOCARRIER, &bp->dev->state)) {
+ if (netif_carrier_ok(bp->dev)) {
bnx2x_tx_disable(bp);
BNX2X_ERR("PF indicated channel is not servicable anymore. This means this VF device is no longer operational\n");
}
@@ -15351,6 +15351,7 @@
break;
case HWTSTAMP_FILTER_ALL:
case HWTSTAMP_FILTER_SOME:
+ case HWTSTAMP_FILTER_NTP_ALL:
bp->rx_filter = HWTSTAMP_FILTER_NONE;
break;
case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
index 03f55da..11e8a86 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
@@ -175,6 +175,8 @@
{ PCI_VDEVICE(BROADCOM, 0x16f1), .driver_data = BCM57452 },
{ PCI_VDEVICE(BROADCOM, 0x1614), .driver_data = BCM57454 },
#ifdef CONFIG_BNXT_SRIOV
+ { PCI_VDEVICE(BROADCOM, 0x1606), .driver_data = NETXTREME_E_VF },
+ { PCI_VDEVICE(BROADCOM, 0x1609), .driver_data = NETXTREME_E_VF },
{ PCI_VDEVICE(BROADCOM, 0x16c1), .driver_data = NETXTREME_E_VF },
{ PCI_VDEVICE(BROADCOM, 0x16cb), .driver_data = NETXTREME_C_VF },
{ PCI_VDEVICE(BROADCOM, 0x16d3), .driver_data = NETXTREME_E_VF },
@@ -461,14 +463,17 @@
prod = NEXT_TX(prod);
txr->tx_prod = prod;
- writel(DB_KEY_TX | prod, txr->tx_doorbell);
- writel(DB_KEY_TX | prod, txr->tx_doorbell);
+ if (!skb->xmit_more || netif_xmit_stopped(txq))
+ bnxt_db_write(bp, txr->tx_doorbell, DB_KEY_TX | prod);
tx_done:
mmiowb();
if (unlikely(bnxt_tx_avail(bp, txr) <= MAX_SKB_FRAGS + 1)) {
+ if (skb->xmit_more && !tx_buf->is_push)
+ bnxt_db_write(bp, txr->tx_doorbell, DB_KEY_TX | prod);
+
netif_tx_stop_queue(txq);
/* netif_tx_stop_queue() must be done before checking
@@ -582,7 +587,8 @@
if (!page)
return NULL;
- *mapping = dma_map_page(dev, page, 0, PAGE_SIZE, bp->rx_dir);
+ *mapping = dma_map_page_attrs(dev, page, 0, PAGE_SIZE, bp->rx_dir,
+ DMA_ATTR_WEAK_ORDERING);
if (dma_mapping_error(dev, *mapping)) {
__free_page(page);
return NULL;
@@ -601,8 +607,9 @@
if (!data)
return NULL;
- *mapping = dma_map_single(&pdev->dev, data + bp->rx_dma_offset,
- bp->rx_buf_use_size, bp->rx_dir);
+ *mapping = dma_map_single_attrs(&pdev->dev, data + bp->rx_dma_offset,
+ bp->rx_buf_use_size, bp->rx_dir,
+ DMA_ATTR_WEAK_ORDERING);
if (dma_mapping_error(&pdev->dev, *mapping)) {
kfree(data);
@@ -705,8 +712,9 @@
return -ENOMEM;
}
- mapping = dma_map_page(&pdev->dev, page, offset, BNXT_RX_PAGE_SIZE,
- PCI_DMA_FROMDEVICE);
+ mapping = dma_map_page_attrs(&pdev->dev, page, offset,
+ BNXT_RX_PAGE_SIZE, PCI_DMA_FROMDEVICE,
+ DMA_ATTR_WEAK_ORDERING);
if (dma_mapping_error(&pdev->dev, mapping)) {
__free_page(page);
return -EIO;
@@ -799,7 +807,8 @@
return NULL;
}
dma_addr -= bp->rx_dma_offset;
- dma_unmap_page(&bp->pdev->dev, dma_addr, PAGE_SIZE, bp->rx_dir);
+ dma_unmap_page_attrs(&bp->pdev->dev, dma_addr, PAGE_SIZE, bp->rx_dir,
+ DMA_ATTR_WEAK_ORDERING);
if (unlikely(!payload))
payload = eth_get_headlen(data_ptr, len);
@@ -841,8 +850,8 @@
}
skb = build_skb(data, 0);
- dma_unmap_single(&bp->pdev->dev, dma_addr, bp->rx_buf_use_size,
- bp->rx_dir);
+ dma_unmap_single_attrs(&bp->pdev->dev, dma_addr, bp->rx_buf_use_size,
+ bp->rx_dir, DMA_ATTR_WEAK_ORDERING);
if (!skb) {
kfree(data);
return NULL;
@@ -909,8 +918,9 @@
return NULL;
}
- dma_unmap_page(&pdev->dev, mapping, BNXT_RX_PAGE_SIZE,
- PCI_DMA_FROMDEVICE);
+ dma_unmap_page_attrs(&pdev->dev, mapping, BNXT_RX_PAGE_SIZE,
+ PCI_DMA_FROMDEVICE,
+ DMA_ATTR_WEAK_ORDERING);
skb->data_len += frag_len;
skb->len += frag_len;
@@ -1329,8 +1339,9 @@
tpa_info->mapping = new_mapping;
skb = build_skb(data, 0);
- dma_unmap_single(&bp->pdev->dev, mapping, bp->rx_buf_use_size,
- bp->rx_dir);
+ dma_unmap_single_attrs(&bp->pdev->dev, mapping,
+ bp->rx_buf_use_size, bp->rx_dir,
+ DMA_ATTR_WEAK_ORDERING);
if (!skb) {
kfree(data);
@@ -1771,8 +1782,7 @@
/* Sync BD data before updating doorbell */
wmb();
- writel(DB_KEY_TX | prod, db);
- writel(DB_KEY_TX | prod, db);
+ bnxt_db_write(bp, db, DB_KEY_TX | prod);
}
cpr->cp_raw_cons = raw_cons;
@@ -1788,14 +1798,10 @@
if (event & BNXT_RX_EVENT) {
struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
- writel(DB_KEY_RX | rxr->rx_prod, rxr->rx_doorbell);
- writel(DB_KEY_RX | rxr->rx_prod, rxr->rx_doorbell);
- if (event & BNXT_AGG_EVENT) {
- writel(DB_KEY_RX | rxr->rx_agg_prod,
- rxr->rx_agg_doorbell);
- writel(DB_KEY_RX | rxr->rx_agg_prod,
- rxr->rx_agg_doorbell);
- }
+ bnxt_db_write(bp, rxr->rx_doorbell, DB_KEY_RX | rxr->rx_prod);
+ if (event & BNXT_AGG_EVENT)
+ bnxt_db_write(bp, rxr->rx_agg_doorbell,
+ DB_KEY_RX | rxr->rx_agg_prod);
}
return rx_pkts;
}
@@ -1855,13 +1861,11 @@
cpr->cp_raw_cons = raw_cons;
BNXT_CP_DB(cpr->cp_doorbell, cpr->cp_raw_cons);
- writel(DB_KEY_RX | rxr->rx_prod, rxr->rx_doorbell);
- writel(DB_KEY_RX | rxr->rx_prod, rxr->rx_doorbell);
+ bnxt_db_write(bp, rxr->rx_doorbell, DB_KEY_RX | rxr->rx_prod);
- if (event & BNXT_AGG_EVENT) {
- writel(DB_KEY_RX | rxr->rx_agg_prod, rxr->rx_agg_doorbell);
- writel(DB_KEY_RX | rxr->rx_agg_prod, rxr->rx_agg_doorbell);
- }
+ if (event & BNXT_AGG_EVENT)
+ bnxt_db_write(bp, rxr->rx_agg_doorbell,
+ DB_KEY_RX | rxr->rx_agg_prod);
if (!bnxt_has_work(bp, cpr) && rx_pkts < budget) {
napi_complete_done(napi, rx_pkts);
@@ -1971,9 +1975,11 @@
if (!data)
continue;
- dma_unmap_single(&pdev->dev, tpa_info->mapping,
- bp->rx_buf_use_size,
- bp->rx_dir);
+ dma_unmap_single_attrs(&pdev->dev,
+ tpa_info->mapping,
+ bp->rx_buf_use_size,
+ bp->rx_dir,
+ DMA_ATTR_WEAK_ORDERING);
tpa_info->data = NULL;
@@ -1993,13 +1999,15 @@
if (BNXT_RX_PAGE_MODE(bp)) {
mapping -= bp->rx_dma_offset;
- dma_unmap_page(&pdev->dev, mapping,
- PAGE_SIZE, bp->rx_dir);
+ dma_unmap_page_attrs(&pdev->dev, mapping,
+ PAGE_SIZE, bp->rx_dir,
+ DMA_ATTR_WEAK_ORDERING);
__free_page(data);
} else {
- dma_unmap_single(&pdev->dev, mapping,
- bp->rx_buf_use_size,
- bp->rx_dir);
+ dma_unmap_single_attrs(&pdev->dev, mapping,
+ bp->rx_buf_use_size,
+ bp->rx_dir,
+ DMA_ATTR_WEAK_ORDERING);
kfree(data);
}
}
@@ -2012,8 +2020,10 @@
if (!page)
continue;
- dma_unmap_page(&pdev->dev, rx_agg_buf->mapping,
- BNXT_RX_PAGE_SIZE, PCI_DMA_FROMDEVICE);
+ dma_unmap_page_attrs(&pdev->dev, rx_agg_buf->mapping,
+ BNXT_RX_PAGE_SIZE,
+ PCI_DMA_FROMDEVICE,
+ DMA_ATTR_WEAK_ORDERING);
rx_agg_buf->page = NULL;
__clear_bit(j, rxr->rx_agg_bmap);
@@ -2856,6 +2866,32 @@
return 0;
}
+static void bnxt_free_hwrm_short_cmd_req(struct bnxt *bp)
+{
+ if (bp->hwrm_short_cmd_req_addr) {
+ struct pci_dev *pdev = bp->pdev;
+
+ dma_free_coherent(&pdev->dev, BNXT_HWRM_MAX_REQ_LEN,
+ bp->hwrm_short_cmd_req_addr,
+ bp->hwrm_short_cmd_req_dma_addr);
+ bp->hwrm_short_cmd_req_addr = NULL;
+ }
+}
+
+static int bnxt_alloc_hwrm_short_cmd_req(struct bnxt *bp)
+{
+ struct pci_dev *pdev = bp->pdev;
+
+ bp->hwrm_short_cmd_req_addr =
+ dma_alloc_coherent(&pdev->dev, BNXT_HWRM_MAX_REQ_LEN,
+ &bp->hwrm_short_cmd_req_dma_addr,
+ GFP_KERNEL);
+ if (!bp->hwrm_short_cmd_req_addr)
+ return -ENOMEM;
+
+ return 0;
+}
+
static void bnxt_free_stats(struct bnxt *bp)
{
u32 size, i;
@@ -3203,16 +3239,41 @@
__le32 *resp_len, *valid;
u16 cp_ring_id, len = 0;
struct hwrm_err_output *resp = bp->hwrm_cmd_resp_addr;
+ u16 max_req_len = BNXT_HWRM_MAX_REQ_LEN;
req->seq_id = cpu_to_le16(bp->hwrm_cmd_seq++);
memset(resp, 0, PAGE_SIZE);
cp_ring_id = le16_to_cpu(req->cmpl_ring);
intr_process = (cp_ring_id == INVALID_HW_RING_ID) ? 0 : 1;
+ if (bp->flags & BNXT_FLAG_SHORT_CMD) {
+ void *short_cmd_req = bp->hwrm_short_cmd_req_addr;
+ struct hwrm_short_input short_input = {0};
+
+ memcpy(short_cmd_req, req, msg_len);
+ memset(short_cmd_req + msg_len, 0, BNXT_HWRM_MAX_REQ_LEN -
+ msg_len);
+
+ short_input.req_type = req->req_type;
+ short_input.signature =
+ cpu_to_le16(SHORT_REQ_SIGNATURE_SHORT_CMD);
+ short_input.size = cpu_to_le16(msg_len);
+ short_input.req_addr =
+ cpu_to_le64(bp->hwrm_short_cmd_req_dma_addr);
+
+ data = (u32 *)&short_input;
+ msg_len = sizeof(short_input);
+
+ /* Sync memory write before updating doorbell */
+ wmb();
+
+ max_req_len = BNXT_HWRM_SHORT_REQ_LEN;
+ }
+
/* Write request msg to hwrm channel */
__iowrite32_copy(bp->bar0, data, msg_len / 4);
- for (i = msg_len; i < BNXT_HWRM_MAX_REQ_LEN; i += 4)
+ for (i = msg_len; i < max_req_len; i += 4)
writel(0, bp->bar0 + i);
/* currently supports only one outstanding message */
@@ -4650,6 +4711,7 @@
int rc;
struct hwrm_ver_get_input req = {0};
struct hwrm_ver_get_output *resp = bp->hwrm_cmd_resp_addr;
+ u32 dev_caps_cfg;
bp->hwrm_max_req_len = HWRM_MAX_REQ_LEN;
bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VER_GET, -1, -1);
@@ -4687,6 +4749,11 @@
!resp->chip_metal)
bp->flags |= BNXT_FLAG_CHIP_NITRO_A0;
+ dev_caps_cfg = le32_to_cpu(resp->dev_caps_cfg);
+ if ((dev_caps_cfg & VER_GET_RESP_DEV_CAPS_CFG_SHORT_CMD_SUPPORTED) &&
+ (dev_caps_cfg & VER_GET_RESP_DEV_CAPS_CFG_SHORT_CMD_REQUIRED))
+ bp->flags |= BNXT_FLAG_SHORT_CMD;
+
hwrm_ver_get_exit:
mutex_unlock(&bp->hwrm_cmd_lock);
return rc;
@@ -7036,8 +7103,8 @@
return 0;
}
-static int bnxt_setup_tc(struct net_device *dev, u32 handle, __be16 proto,
- struct tc_to_netdev *ntc)
+static int bnxt_setup_tc(struct net_device *dev, u32 handle, u32 chain_index,
+ __be16 proto, struct tc_to_netdev *ntc)
{
if (ntc->type != TC_SETUP_MQPRIO)
return -EINVAL;
@@ -7345,6 +7412,7 @@
bnxt_clear_int_mode(bp);
bnxt_hwrm_func_drv_unrgtr(bp);
bnxt_free_hwrm_resources(bp);
+ bnxt_free_hwrm_short_cmd_req(bp);
bnxt_ethtool_free(bp);
bnxt_dcb_free(bp);
kfree(bp->edev);
@@ -7499,10 +7567,9 @@
return rc;
}
-static int bnxt_set_dflt_rings(struct bnxt *bp)
+static int bnxt_set_dflt_rings(struct bnxt *bp, bool sh)
{
int dflt_rings, max_rx_rings, max_tx_rings, rc;
- bool sh = true;
if (sh)
bp->flags |= BNXT_FLAG_SHARED_RINGS;
@@ -7595,6 +7662,12 @@
if (rc)
goto init_err_pci_clean;
+ if (bp->flags & BNXT_FLAG_SHORT_CMD) {
+ rc = bnxt_alloc_hwrm_short_cmd_req(bp);
+ if (rc)
+ goto init_err_pci_clean;
+ }
+
rc = bnxt_hwrm_func_reset(bp);
if (rc)
goto init_err_pci_clean;
@@ -7634,8 +7707,10 @@
init_waitqueue_head(&bp->sriov_cfg_wait);
#endif
bp->gro_func = bnxt_gro_func_5730x;
- if (BNXT_CHIP_NUM_57X1X(bp->chip_num))
+ if (BNXT_CHIP_P4_PLUS(bp))
bp->gro_func = bnxt_gro_func_5731x;
+ else
+ bp->flags |= BNXT_FLAG_DOUBLE_DB;
rc = bnxt_hwrm_func_drv_rgtr(bp);
if (rc)
@@ -7673,7 +7748,7 @@
bnxt_set_tpa_flags(bp);
bnxt_set_ring_params(bp);
bnxt_set_max_func_irqs(bp, max_irqs);
- rc = bnxt_set_dflt_rings(bp);
+ rc = bnxt_set_dflt_rings(bp, true);
if (rc) {
netdev_err(bp->dev, "Not enough rings available.\n");
rc = -ENOMEM;
@@ -7685,9 +7760,7 @@
VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV4 |
VNIC_RSS_CFG_REQ_HASH_TYPE_IPV6 |
VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV6;
- if (!BNXT_CHIP_NUM_57X0X(bp->chip_num) &&
- !BNXT_CHIP_TYPE_NITRO_A0(bp) &&
- bp->hwrm_spec_code >= 0x10501) {
+ if (BNXT_CHIP_P4_PLUS(bp) && bp->hwrm_spec_code >= 0x10501) {
bp->flags |= BNXT_FLAG_UDP_RSS_CAP;
bp->rss_hash_cfg |= VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV4 |
VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV6;
@@ -7759,6 +7832,7 @@
dev_close(dev);
if (system_state == SYSTEM_POWER_OFF) {
+ bnxt_ulp_shutdown(bp);
bnxt_clear_int_mode(bp);
pci_wake_from_d3(pdev, bp->wol);
pci_set_power_state(pdev, PCI_D3hot);
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
index 3ef42db..5984423 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
@@ -500,6 +500,7 @@
#define NEXT_CMP(idx) RING_CMP(ADV_RAW_CMP(idx, 1))
#define BNXT_HWRM_MAX_REQ_LEN (bp->hwrm_max_req_len)
+#define BNXT_HWRM_SHORT_REQ_LEN sizeof(struct hwrm_short_input)
#define DFLT_HWRM_CMD_TIMEOUT 500
#define HWRM_CMD_TIMEOUT (bp->hwrm_cmd_timeout)
#define HWRM_RESET_TIMEOUT ((HWRM_CMD_TIMEOUT) * 4)
@@ -937,31 +938,45 @@
#define CHIP_NUM_57402 0x16d0
#define CHIP_NUM_57404 0x16d1
#define CHIP_NUM_57406 0x16d2
+#define CHIP_NUM_57407 0x16d5
#define CHIP_NUM_57311 0x16ce
#define CHIP_NUM_57312 0x16cf
#define CHIP_NUM_57314 0x16df
+#define CHIP_NUM_57317 0x16e0
#define CHIP_NUM_57412 0x16d6
#define CHIP_NUM_57414 0x16d7
#define CHIP_NUM_57416 0x16d8
#define CHIP_NUM_57417 0x16d9
+#define CHIP_NUM_57412L 0x16da
+#define CHIP_NUM_57414L 0x16db
+
+#define CHIP_NUM_5745X 0xd730
#define BNXT_CHIP_NUM_5730X(chip_num) \
((chip_num) >= CHIP_NUM_57301 && \
(chip_num) <= CHIP_NUM_57304)
#define BNXT_CHIP_NUM_5740X(chip_num) \
- ((chip_num) >= CHIP_NUM_57402 && \
- (chip_num) <= CHIP_NUM_57406)
+ (((chip_num) >= CHIP_NUM_57402 && \
+ (chip_num) <= CHIP_NUM_57406) || \
+ (chip_num) == CHIP_NUM_57407)
#define BNXT_CHIP_NUM_5731X(chip_num) \
((chip_num) == CHIP_NUM_57311 || \
(chip_num) == CHIP_NUM_57312 || \
- (chip_num) == CHIP_NUM_57314)
+ (chip_num) == CHIP_NUM_57314 || \
+ (chip_num) == CHIP_NUM_57317)
#define BNXT_CHIP_NUM_5741X(chip_num) \
((chip_num) >= CHIP_NUM_57412 && \
- (chip_num) <= CHIP_NUM_57417)
+ (chip_num) <= CHIP_NUM_57414L)
+
+#define BNXT_CHIP_NUM_58700(chip_num) \
+ ((chip_num) == CHIP_NUM_58700)
+
+#define BNXT_CHIP_NUM_5745X(chip_num) \
+ ((chip_num) == CHIP_NUM_5745X)
#define BNXT_CHIP_NUM_57X0X(chip_num) \
(BNXT_CHIP_NUM_5730X(chip_num) || BNXT_CHIP_NUM_5740X(chip_num))
@@ -1006,6 +1021,8 @@
#define BNXT_FLAG_RX_PAGE_MODE 0x40000
#define BNXT_FLAG_FW_LLDP_AGENT 0x80000
#define BNXT_FLAG_MULTI_HOST 0x100000
+ #define BNXT_FLAG_SHORT_CMD 0x200000
+ #define BNXT_FLAG_DOUBLE_DB 0x400000
#define BNXT_FLAG_CHIP_NITRO_A0 0x1000000
#define BNXT_FLAG_ALL_CONFIG_FEATS (BNXT_FLAG_TPA | \
@@ -1020,6 +1037,13 @@
#define BNXT_CHIP_TYPE_NITRO_A0(bp) ((bp)->flags & BNXT_FLAG_CHIP_NITRO_A0)
#define BNXT_RX_PAGE_MODE(bp) ((bp)->flags & BNXT_FLAG_RX_PAGE_MODE)
+/* Chip class phase 4 and later */
+#define BNXT_CHIP_P4_PLUS(bp) \
+ (BNXT_CHIP_NUM_57X1X((bp)->chip_num) || \
+ BNXT_CHIP_NUM_5745X((bp)->chip_num) || \
+ (BNXT_CHIP_NUM_58700((bp)->chip_num) && \
+ !BNXT_CHIP_TYPE_NITRO_A0(bp)))
+
struct bnxt_en_dev *edev;
struct bnxt_en_dev * (*ulp_probe)(struct net_device *);
@@ -1106,6 +1130,8 @@
u32 hwrm_spec_code;
u16 hwrm_cmd_seq;
u32 hwrm_intr_seq_id;
+ void *hwrm_short_cmd_req_addr;
+ dma_addr_t hwrm_short_cmd_req_dma_addr;
void *hwrm_cmd_resp_addr;
dma_addr_t hwrm_cmd_resp_dma_addr;
void *hwrm_dbg_resp_addr;
@@ -1229,6 +1255,14 @@
((txr->tx_prod - txr->tx_cons) & bp->tx_ring_mask);
}
+/* For TX and RX ring doorbells */
+static inline void bnxt_db_write(struct bnxt *bp, void __iomem *db, u32 val)
+{
+ writel(val, db);
+ if (bp->flags & BNXT_FLAG_DOUBLE_DB)
+ writel(val, db);
+}
+
extern const u16 bnxt_lhint_arr[];
int bnxt_alloc_rx_data(struct bnxt *bp, struct bnxt_rx_ring_info *rxr,
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
index 11ddf0a..fd11815 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
@@ -2376,8 +2376,7 @@
/* Sync BD data before updating doorbell */
wmb();
- writel(DB_KEY_TX | txr->tx_prod, txr->tx_doorbell);
- writel(DB_KEY_TX | txr->tx_prod, txr->tx_doorbell);
+ bnxt_db_write(bp, txr->tx_doorbell, DB_KEY_TX | txr->tx_prod);
rc = bnxt_poll_loopback(bp, pkt_size);
dma_unmap_single(&bp->pdev->dev, map, pkt_size, PCI_DMA_TODEVICE);
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c
index 8b7464b..77da75a 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c
@@ -266,6 +266,25 @@
}
}
+void bnxt_ulp_shutdown(struct bnxt *bp)
+{
+ struct bnxt_en_dev *edev = bp->edev;
+ struct bnxt_ulp_ops *ops;
+ int i;
+
+ if (!edev)
+ return;
+
+ for (i = 0; i < BNXT_MAX_ULP; i++) {
+ struct bnxt_ulp *ulp = &edev->ulp_tbl[i];
+
+ ops = rtnl_dereference(ulp->ulp_ops);
+ if (!ops || !ops->ulp_shutdown)
+ continue;
+ ops->ulp_shutdown(ulp->handle);
+ }
+}
+
void bnxt_ulp_async_events(struct bnxt *bp, struct hwrm_async_event_cmpl *cmpl)
{
u16 event_id = le16_to_cpu(cmpl->event_id);
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.h
index 74f816e..d247106 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.h
@@ -26,6 +26,7 @@
void (*ulp_stop)(void *);
void (*ulp_start)(void *);
void (*ulp_sriov_config)(void *, int);
+ void (*ulp_shutdown)(void *);
};
struct bnxt_msix_entry {
@@ -87,6 +88,7 @@
void bnxt_ulp_stop(struct bnxt *bp);
void bnxt_ulp_start(struct bnxt *bp);
void bnxt_ulp_sriov_cfg(struct bnxt *bp, int num_vfs);
+void bnxt_ulp_shutdown(struct bnxt *bp);
void bnxt_ulp_async_events(struct bnxt *bp, struct hwrm_async_event_cmpl *cmpl);
struct bnxt_en_dev *bnxt_ulp_probe(struct net_device *dev);
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c
index 9dae327..7d67552 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c
@@ -63,7 +63,7 @@
tx_buf = &txr->tx_buf_ring[last_tx_cons];
rx_prod = tx_buf->rx_prod;
}
- writel(DB_KEY_RX | rx_prod, rxr->rx_doorbell);
+ bnxt_db_write(bp, rxr->rx_doorbell, DB_KEY_RX | rx_prod);
}
/* returns the following:
@@ -218,6 +218,7 @@
break;
case XDP_QUERY_PROG:
xdp->prog_attached = !!bp->xdp_prog;
+ xdp->prog_id = bp->xdp_prog ? bp->xdp_prog->aux->id : 0;
rc = 0;
break;
default:
diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.c b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
index a205a9f..daca1c9 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmgenet.c
+++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
@@ -477,7 +477,9 @@
if (!priv->phydev)
return -ENODEV;
- return phy_ethtool_ksettings_get(priv->phydev, cmd);
+ phy_ethtool_ksettings_get(priv->phydev, cmd);
+
+ return 0;
}
static int bcmgenet_set_link_ksettings(struct net_device *dev,
diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c
index 537d571..d600c41 100644
--- a/drivers/net/ethernet/broadcom/tg3.c
+++ b/drivers/net/ethernet/broadcom/tg3.c
@@ -12097,7 +12097,9 @@
if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
return -EAGAIN;
phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
- return phy_ethtool_ksettings_get(phydev, cmd);
+ phy_ethtool_ksettings_get(phydev, cmd);
+
+ return 0;
}
supported = (SUPPORTED_Autoneg);
diff --git a/drivers/net/ethernet/cadence/macb.c b/drivers/net/ethernet/cadence/macb.c
index 91f7492..4b0168b 100644
--- a/drivers/net/ethernet/cadence/macb.c
+++ b/drivers/net/ethernet/cadence/macb.c
@@ -2992,7 +2992,7 @@
skb = netdev_alloc_skb(dev, pktlen + 2);
if (skb) {
skb_reserve(skb, 2);
- memcpy(skb_put(skb, pktlen), p_recv, pktlen);
+ skb_put_data(skb, p_recv, pktlen);
skb->protocol = eth_type_trans(skb, dev);
dev->stats.rx_packets++;
diff --git a/drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.c b/drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.c
index 962dcbc..6081c31 100644
--- a/drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.c
+++ b/drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.c
@@ -493,9 +493,8 @@
for (q_no = srn; q_no < ern; q_no++) {
reg_val = octeon_read_csr(oct, CN23XX_SLI_OQ_PKT_CONTROL(q_no));
- /* set IPTR & DPTR */
- reg_val |=
- (CN23XX_PKT_OUTPUT_CTL_IPTR | CN23XX_PKT_OUTPUT_CTL_DPTR);
+ /* set DPTR */
+ reg_val |= CN23XX_PKT_OUTPUT_CTL_DPTR;
/* reset BMODE */
reg_val &= ~(CN23XX_PKT_OUTPUT_CTL_BMODE);
@@ -638,7 +637,7 @@
octeon_write_csr(oct, CN23XX_SLI_OQ_SIZE(oq_no), droq->max_count);
octeon_write_csr(oct, CN23XX_SLI_OQ_BUFF_INFO_SIZE(oq_no),
- (droq->buffer_size | (OCT_RH_SIZE << 16)));
+ droq->buffer_size);
/* Get the mapped address of the pkt_sent and pkts_credit regs */
droq->pkts_sent_reg =
@@ -1343,8 +1342,7 @@
return 1;
}
- if (!(CFG_GET_OQ_INFO_PTR(conf23xx)) ||
- !(CFG_GET_OQ_REFILL_THRESHOLD(conf23xx))) {
+ if (!CFG_GET_OQ_REFILL_THRESHOLD(conf23xx)) {
dev_err(&oct->pci_dev->dev, "%s: Invalid parameter for OQ\n",
__func__);
return 1;
diff --git a/drivers/net/ethernet/cavium/liquidio/cn23xx_vf_device.c b/drivers/net/ethernet/cavium/liquidio/cn23xx_vf_device.c
index b6117b6..9338a00 100644
--- a/drivers/net/ethernet/cavium/liquidio/cn23xx_vf_device.c
+++ b/drivers/net/ethernet/cavium/liquidio/cn23xx_vf_device.c
@@ -165,9 +165,8 @@
reg_val =
octeon_read_csr(oct, CN23XX_VF_SLI_OQ_PKT_CONTROL(q_no));
- /* set IPTR & DPTR */
- reg_val |=
- (CN23XX_PKT_OUTPUT_CTL_IPTR | CN23XX_PKT_OUTPUT_CTL_DPTR);
+ /* set DPTR */
+ reg_val |= CN23XX_PKT_OUTPUT_CTL_DPTR;
/* reset BMODE */
reg_val &= ~(CN23XX_PKT_OUTPUT_CTL_BMODE);
@@ -249,7 +248,7 @@
octeon_write_csr(oct, CN23XX_VF_SLI_OQ_SIZE(oq_no), droq->max_count);
octeon_write_csr(oct, CN23XX_VF_SLI_OQ_BUFF_INFO_SIZE(oq_no),
- (droq->buffer_size | (OCT_RH_SIZE << 16)));
+ droq->buffer_size);
/* Get the mapped address of the pkt_sent and pkts_credit regs */
droq->pkts_sent_reg =
@@ -431,11 +430,6 @@
mbox_cmd.fn = (octeon_mbox_callback_t)octeon_pfvf_hs_callback;
mbox_cmd.fn_arg = &status;
- /* Interrupts are not enabled at this point.
- * Enable them with default oq ticks
- */
- oct->fn_list.enable_interrupt(oct, OCTEON_ALL_INTR);
-
octeon_mbox_write(oct, &mbox_cmd);
atomic_set(&status, 0);
@@ -444,11 +438,6 @@
schedule_timeout_uninterruptible(1);
} while ((!atomic_read(&status)) && (count++ < 100000));
- /* Disable the interrupt so that the interrupsts will be reenabled
- * with the oq ticks received from the PF
- */
- oct->fn_list.disable_interrupt(oct, OCTEON_ALL_INTR);
-
ret = atomic_read(&status);
if (!ret) {
dev_err(&oct->pci_dev->dev, "octeon_pfvf_handshake timeout\n");
diff --git a/drivers/net/ethernet/cavium/liquidio/cn66xx_device.c b/drivers/net/ethernet/cavium/liquidio/cn66xx_device.c
index bdec051..b28253c 100644
--- a/drivers/net/ethernet/cavium/liquidio/cn66xx_device.c
+++ b/drivers/net/ethernet/cavium/liquidio/cn66xx_device.c
@@ -209,9 +209,6 @@
octeon_write_csr64(oct, CN6XXX_SLI_OQ_WMARK, 0);
}
- /* / Select Info Ptr for length & data */
- octeon_write_csr(oct, CN6XXX_SLI_PKT_IPTR, 0xFFFFFFFF);
-
/* / Select Packet count instead of bytes for SLI_PKTi_CNTS[CNT] */
octeon_write_csr(oct, CN6XXX_SLI_PKT_OUT_BMODE, 0);
@@ -314,7 +311,7 @@
octeon_write_csr(oct, CN6XXX_SLI_OQ_SIZE(oq_no), droq->max_count);
octeon_write_csr(oct, CN6XXX_SLI_OQ_BUFF_INFO_SIZE(oq_no),
- (droq->buffer_size | (OCT_RH_SIZE << 16)));
+ droq->buffer_size);
/* Get the mapped address of the pkt_sent and pkts_credit regs */
droq->pkts_sent_reg =
@@ -734,8 +731,7 @@
__func__);
return 1;
}
- if (!(CFG_GET_OQ_INFO_PTR(conf6xxx)) ||
- !(CFG_GET_OQ_REFILL_THRESHOLD(conf6xxx))) {
+ if (!CFG_GET_OQ_REFILL_THRESHOLD(conf6xxx)) {
dev_err(&oct->pci_dev->dev, "%s: Invalid parameter for OQ\n",
__func__);
return 1;
diff --git a/drivers/net/ethernet/cavium/liquidio/lio_core.c b/drivers/net/ethernet/cavium/liquidio/lio_core.c
index 796c2cb..adde774 100644
--- a/drivers/net/ethernet/cavium/liquidio/lio_core.c
+++ b/drivers/net/ethernet/cavium/liquidio/lio_core.c
@@ -202,9 +202,13 @@
netdev->name);
break;
- case OCTNET_CMD_ENABLE_VLAN_FILTER:
- dev_info(&oct->pci_dev->dev, "%s VLAN filter enabled\n",
- netdev->name);
+ case OCTNET_CMD_VLAN_FILTER_CTL:
+ if (nctrl->ncmd.s.param1)
+ dev_info(&oct->pci_dev->dev,
+ "%s VLAN filter enabled\n", netdev->name);
+ else
+ dev_info(&oct->pci_dev->dev,
+ "%s VLAN filter disabled\n", netdev->name);
break;
case OCTNET_CMD_ADD_VLAN_FILTER:
diff --git a/drivers/net/ethernet/cavium/liquidio/lio_ethtool.c b/drivers/net/ethernet/cavium/liquidio/lio_ethtool.c
index 579dc73..28ecda3 100644
--- a/drivers/net/ethernet/cavium/liquidio/lio_ethtool.c
+++ b/drivers/net/ethernet/cavium/liquidio/lio_ethtool.c
@@ -700,6 +700,13 @@
lio->msg_enable = msglvl;
}
+static void lio_vf_set_msglevel(struct net_device *netdev, u32 msglvl)
+{
+ struct lio *lio = GET_LIO(netdev);
+
+ lio->msg_enable = msglvl;
+}
+
static void
lio_get_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *pause)
{
@@ -984,11 +991,11 @@
data[i++] =
CVM_CAST64(oct_dev->instr_queue[j]->stats.instr_posted);
/*# of instructions processed */
- data[i++] = CVM_CAST64(oct_dev->instr_queue[j]->
- stats.instr_processed);
+ data[i++] = CVM_CAST64(
+ oct_dev->instr_queue[j]->stats.instr_processed);
/*# of instructions could not be processed */
- data[i++] = CVM_CAST64(oct_dev->instr_queue[j]->
- stats.instr_dropped);
+ data[i++] = CVM_CAST64(
+ oct_dev->instr_queue[j]->stats.instr_dropped);
/*bytes sent through the queue */
data[i++] =
CVM_CAST64(oct_dev->instr_queue[j]->stats.bytes_sent);
@@ -1801,7 +1808,7 @@
(octeon_read_csr64(
oct, CN23XX_VF_SLI_OQ_PKT_INT_LEVELS(q_no)) &
(0x3fffff00000000UL)) |
- rx_max_coalesced_frames);
+ (rx_max_coalesced_frames - 1));
/*consider writing to resend bit here*/
}
intrmod->rx_frames = rx_max_coalesced_frames;
@@ -2611,7 +2618,7 @@
.get_regs_len = lio_get_regs_len,
.get_regs = lio_get_regs,
.get_msglevel = lio_get_msglevel,
- .set_msglevel = lio_set_msglevel,
+ .set_msglevel = lio_vf_set_msglevel,
.get_sset_count = lio_vf_get_sset_count,
.get_coalesce = lio_get_intr_coalesce,
.set_coalesce = lio_set_intr_coalesce,
diff --git a/drivers/net/ethernet/cavium/liquidio/lio_main.c b/drivers/net/ethernet/cavium/liquidio/lio_main.c
index 927617c..51583ae 100644
--- a/drivers/net/ethernet/cavium/liquidio/lio_main.c
+++ b/drivers/net/ethernet/cavium/liquidio/lio_main.c
@@ -1421,7 +1421,7 @@
*/
static void octeon_destroy_resources(struct octeon_device *oct)
{
- int i;
+ int i, refcount;
struct msix_entry *msix_entries;
struct octeon_device_priv *oct_priv =
(struct octeon_device_priv *)oct->priv;
@@ -1556,10 +1556,14 @@
/* fallthrough */
case OCT_DEV_PCI_MAP_DONE:
+ refcount = octeon_deregister_device(oct);
+
if (!fw_type_is_none()) {
- /* Soft reset the octeon device before exiting */
- if (!OCTEON_CN23XX_PF(oct) ||
- (OCTEON_CN23XX_PF(oct) && !oct->octeon_id))
+ /* Soft reset the octeon device before exiting.
+ * Implementation note: here, we reset the device
+ * if it is a CN6XXX OR the last CN23XX device.
+ */
+ if (OCTEON_CN6XXX(oct) || !refcount)
oct->fn_list.soft_reset(oct);
}
@@ -3020,6 +3024,7 @@
case HWTSTAMP_FILTER_PTP_V2_EVENT:
case HWTSTAMP_FILTER_PTP_V2_SYNC:
case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
+ case HWTSTAMP_FILTER_NTP_ALL:
conf.rx_filter = HWTSTAMP_FILTER_ALL;
break;
default:
@@ -3586,6 +3591,10 @@
(lio->dev_capability & NETIF_F_LRO))
request &= ~NETIF_F_LRO;
+ if ((request & NETIF_F_HW_VLAN_CTAG_FILTER) &&
+ !(lio->dev_capability & NETIF_F_HW_VLAN_CTAG_FILTER))
+ request &= ~NETIF_F_HW_VLAN_CTAG_FILTER;
+
return request;
}
@@ -3598,14 +3607,14 @@
{
struct lio *lio = netdev_priv(netdev);
- if (!((netdev->features ^ features) & NETIF_F_LRO))
- return 0;
-
- if ((features & NETIF_F_LRO) && (lio->dev_capability & NETIF_F_LRO))
+ if ((features & NETIF_F_LRO) &&
+ (lio->dev_capability & NETIF_F_LRO) &&
+ !(netdev->features & NETIF_F_LRO))
liquidio_set_feature(netdev, OCTNET_CMD_LRO_ENABLE,
OCTNIC_LROIPV4 | OCTNIC_LROIPV6);
else if (!(features & NETIF_F_LRO) &&
- (lio->dev_capability & NETIF_F_LRO))
+ (lio->dev_capability & NETIF_F_LRO) &&
+ (netdev->features & NETIF_F_LRO))
liquidio_set_feature(netdev, OCTNET_CMD_LRO_DISABLE,
OCTNIC_LROIPV4 | OCTNIC_LROIPV6);
@@ -3624,6 +3633,17 @@
liquidio_set_rxcsum_command(netdev, OCTNET_CMD_TNL_RX_CSUM_CTL,
OCTNET_CMD_RXCSUM_DISABLE);
+ if ((features & NETIF_F_HW_VLAN_CTAG_FILTER) &&
+ (lio->dev_capability & NETIF_F_HW_VLAN_CTAG_FILTER) &&
+ !(netdev->features & NETIF_F_HW_VLAN_CTAG_FILTER))
+ liquidio_set_feature(netdev, OCTNET_CMD_VLAN_FILTER_CTL,
+ OCTNET_CMD_VLAN_FILTER_ENABLE);
+ else if (!(features & NETIF_F_HW_VLAN_CTAG_FILTER) &&
+ (lio->dev_capability & NETIF_F_HW_VLAN_CTAG_FILTER) &&
+ (netdev->features & NETIF_F_HW_VLAN_CTAG_FILTER))
+ liquidio_set_feature(netdev, OCTNET_CMD_VLAN_FILTER_CTL,
+ OCTNET_CMD_VLAN_FILTER_DISABLE);
+
return 0;
}
@@ -3694,6 +3714,9 @@
struct octeon_device *oct = lio->oct_dev;
int retval;
+ if (vfidx < 0 || vfidx >= oct->sriov_info.num_vfs_alloced)
+ return -EINVAL;
+
retval = __liquidio_set_vf_mac(netdev, vfidx, mac, true);
if (!retval)
cn23xx_tell_vf_its_macaddr_changed(oct, vfidx, mac);
@@ -3876,7 +3899,7 @@
union oct_link_status *ls;
int i;
- if (recv_pkt->buffer_size[0] != sizeof(*ls)) {
+ if (recv_pkt->buffer_size[0] != (sizeof(*ls) + OCT_DROQ_INFO_SIZE)) {
dev_err(&oct->pci_dev->dev, "Malformed NIC_INFO, len=%d, ifidx=%d\n",
recv_pkt->buffer_size[0],
recv_pkt->rh.r_nic_info.gmxport);
@@ -3884,7 +3907,8 @@
}
gmxport = recv_pkt->rh.r_nic_info.gmxport;
- ls = (union oct_link_status *)get_rbd(recv_pkt->buffer_ptr[0]);
+ ls = (union oct_link_status *)(get_rbd(recv_pkt->buffer_ptr[0]) +
+ OCT_DROQ_INFO_SIZE);
octeon_swap_8B_data((u64 *)ls, (sizeof(union oct_link_status)) >> 3);
for (i = 0; i < oct->ifcount; i++) {
@@ -4191,7 +4215,8 @@
liquidio_set_feature(netdev, OCTNET_CMD_LRO_ENABLE,
OCTNIC_LROIPV4 | OCTNIC_LROIPV6);
- liquidio_set_feature(netdev, OCTNET_CMD_ENABLE_VLAN_FILTER, 0);
+ liquidio_set_feature(netdev, OCTNET_CMD_VLAN_FILTER_CTL,
+ OCTNET_CMD_VLAN_FILTER_ENABLE);
if ((debug != -1) && (debug & NETIF_MSG_HW))
liquidio_set_feature(netdev,
@@ -4441,7 +4466,7 @@
u64 *data, vf_num;
notice = recv_pkt->rh.r.ossp;
- data = (u64 *)get_rbd(recv_pkt->buffer_ptr[0]);
+ data = (u64 *)(get_rbd(recv_pkt->buffer_ptr[0]) + OCT_DROQ_INFO_SIZE);
/* the first 64-bit word of data is the vf_num */
vf_num = data[0];
@@ -4511,6 +4536,15 @@
atomic_set(&octeon_dev->status, OCT_DEV_PCI_MAP_DONE);
+ /* Only add a reference after setting status 'OCT_DEV_PCI_MAP_DONE',
+ * since that is what is required for the reference to be removed
+ * during de-initialization (see 'octeon_destroy_resources').
+ */
+ octeon_register_device(octeon_dev, octeon_dev->pci_dev->bus->number,
+ PCI_SLOT(octeon_dev->pci_dev->devfn),
+ PCI_FUNC(octeon_dev->pci_dev->devfn),
+ true);
+
octeon_dev->app_mode = CVM_DRV_INVALID_APP;
if (OCTEON_CN23XX_PF(octeon_dev)) {
diff --git a/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c b/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c
index 34c7782..9b24710 100644
--- a/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c
+++ b/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c
@@ -879,8 +879,6 @@
*/
static void octeon_pci_flr(struct octeon_device *oct)
{
- u16 status;
-
pci_save_state(oct->pci_dev);
pci_cfg_access_lock(oct->pci_dev);
@@ -889,20 +887,7 @@
pci_write_config_word(oct->pci_dev, PCI_COMMAND,
PCI_COMMAND_INTX_DISABLE);
- /* Wait for Transaction Pending bit clean */
- msleep(100);
- pcie_capability_read_word(oct->pci_dev, PCI_EXP_DEVSTA, &status);
- if (status & PCI_EXP_DEVSTA_TRPND) {
- dev_info(&oct->pci_dev->dev, "Function reset incomplete after 100ms, sleeping for 5 seconds\n");
- ssleep(5);
- pcie_capability_read_word(oct->pci_dev, PCI_EXP_DEVSTA,
- &status);
- if (status & PCI_EXP_DEVSTA_TRPND)
- dev_info(&oct->pci_dev->dev, "Function reset still incomplete after 5s, reset anyway\n");
- }
- pcie_capability_set_word(oct->pci_dev, PCI_EXP_DEVCTL,
- PCI_EXP_DEVCTL_BCR_FLR);
- mdelay(100);
+ pcie_flr(oct->pci_dev);
pci_cfg_access_unlock(oct->pci_dev);
@@ -2100,6 +2085,7 @@
case HWTSTAMP_FILTER_PTP_V2_EVENT:
case HWTSTAMP_FILTER_PTP_V2_SYNC:
case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
+ case HWTSTAMP_FILTER_NTP_ALL:
conf.rx_filter = HWTSTAMP_FILTER_ALL;
break;
default:
@@ -2732,7 +2718,7 @@
int gmxport = 0;
int i;
- if (recv_pkt->buffer_size[0] != sizeof(*ls)) {
+ if (recv_pkt->buffer_size[0] != (sizeof(*ls) + OCT_DROQ_INFO_SIZE)) {
dev_err(&oct->pci_dev->dev, "Malformed NIC_INFO, len=%d, ifidx=%d\n",
recv_pkt->buffer_size[0],
recv_pkt->rh.r_nic_info.gmxport);
@@ -2740,7 +2726,8 @@
}
gmxport = recv_pkt->rh.r_nic_info.gmxport;
- ls = (union oct_link_status *)get_rbd(recv_pkt->buffer_ptr[0]);
+ ls = (union oct_link_status *)(get_rbd(recv_pkt->buffer_ptr[0]) +
+ OCT_DROQ_INFO_SIZE);
octeon_swap_8B_data((u64 *)ls, (sizeof(union oct_link_status)) >> 3);
@@ -3011,10 +2998,6 @@
liquidio_set_feature(netdev, OCTNET_CMD_LRO_ENABLE,
OCTNIC_LROIPV4 | OCTNIC_LROIPV6);
- if ((debug != -1) && (debug & NETIF_MSG_HW))
- liquidio_set_feature(netdev, OCTNET_CMD_VERBOSE_ENABLE,
- 0);
-
if (setup_link_status_change_wq(netdev))
goto setup_nic_dev_fail;
@@ -3202,13 +3185,28 @@
if (octeon_setup_interrupt(oct))
return 1;
- if (cn23xx_octeon_pfvf_handshake(oct))
- return 1;
+ atomic_set(&oct->status, OCT_DEV_INTR_SET_DONE);
+
+ /* ***************************************************************
+ * The interrupts need to be enabled for the PF<-->VF handshake.
+ * They are [re]-enabled after the PF<-->VF handshake so that the
+ * correct OQ tick value is used (i.e. the value retrieved from
+ * the PF as part of the handshake).
+ */
/* Enable Octeon device interrupts */
oct->fn_list.enable_interrupt(oct, OCTEON_ALL_INTR);
- atomic_set(&oct->status, OCT_DEV_INTR_SET_DONE);
+ if (cn23xx_octeon_pfvf_handshake(oct))
+ return 1;
+
+ /* Here we [re]-enable the interrupts so that the correct OQ tick value
+ * is used (i.e. the value that was retrieved during the handshake)
+ */
+
+ /* Enable Octeon device interrupts */
+ oct->fn_list.enable_interrupt(oct, OCTEON_ALL_INTR);
+ /* *************************************************************** */
/* Enable the input and output queues for this Octeon device */
if (oct->fn_list.enable_io_queues(oct)) {
diff --git a/drivers/net/ethernet/cavium/liquidio/liquidio_common.h b/drivers/net/ethernet/cavium/liquidio/liquidio_common.h
index 8ea2323..231dd7f 100644
--- a/drivers/net/ethernet/cavium/liquidio/liquidio_common.h
+++ b/drivers/net/ethernet/cavium/liquidio/liquidio_common.h
@@ -173,6 +173,8 @@
/*------------------------- End Scatter/Gather ---------------------------*/
+#define OCTNET_FRM_LENGTH_SIZE 8
+
#define OCTNET_FRM_PTP_HEADER_SIZE 8
#define OCTNET_FRM_HEADER_SIZE 22 /* VLAN + Ethernet */
@@ -215,7 +217,7 @@
#define OCTNET_CMD_VERBOSE_ENABLE 0x14
#define OCTNET_CMD_VERBOSE_DISABLE 0x15
-#define OCTNET_CMD_ENABLE_VLAN_FILTER 0x16
+#define OCTNET_CMD_VLAN_FILTER_CTL 0x16
#define OCTNET_CMD_ADD_VLAN_FILTER 0x17
#define OCTNET_CMD_DEL_VLAN_FILTER 0x18
#define OCTNET_CMD_VXLAN_PORT_CONFIG 0x19
@@ -230,6 +232,8 @@
#define OCTNET_CMD_RXCSUM_DISABLE 0x1
#define OCTNET_CMD_TXCSUM_ENABLE 0x0
#define OCTNET_CMD_TXCSUM_DISABLE 0x1
+#define OCTNET_CMD_VLAN_FILTER_ENABLE 0x1
+#define OCTNET_CMD_VLAN_FILTER_DISABLE 0x0
/* RX(packets coming from wire) Checksum verification flags */
/* TCP/UDP csum */
diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_config.h b/drivers/net/ethernet/cavium/liquidio/octeon_config.h
index d29ebc5..f229d79 100644
--- a/drivers/net/ethernet/cavium/liquidio/octeon_config.h
+++ b/drivers/net/ethernet/cavium/liquidio/octeon_config.h
@@ -47,7 +47,7 @@
/* CN6xxx OQ configuration macros */
#define CN6XXX_MAX_OUTPUT_QUEUES 32
#define CN6XXX_MAX_OQ_DESCRIPTORS 2048
-#define CN6XXX_OQ_BUF_SIZE 1536
+#define CN6XXX_OQ_BUF_SIZE 1664
#define CN6XXX_OQ_PKTSPER_INTR ((CN6XXX_MAX_OQ_DESCRIPTORS < 512) ? \
(CN6XXX_MAX_OQ_DESCRIPTORS / 4) : 128)
#define CN6XXX_OQ_REFIL_THRESHOLD ((CN6XXX_MAX_OQ_DESCRIPTORS < 512) ? \
@@ -78,7 +78,7 @@
#define CN23XX_MAX_OUTPUT_QUEUES CN23XX_MAX_RINGS_PER_PF
#define CN23XX_MAX_OQ_DESCRIPTORS 512
-#define CN23XX_OQ_BUF_SIZE 1536
+#define CN23XX_OQ_BUF_SIZE 1664
#define CN23XX_OQ_PKTSPER_INTR 128
/*#define CAVIUM_ONLY_CN23XX_RX_PERF*/
#define CN23XX_OQ_REFIL_THRESHOLD 16
@@ -98,8 +98,6 @@
#define OCTEON_32BYTE_INSTR 32
#define OCTEON_64BYTE_INSTR 64
#define OCTEON_MAX_BASE_IOQ 4
-#define OCTEON_OQ_BUFPTR_MODE 0
-#define OCTEON_OQ_INFOPTR_MODE 1
#define OCTEON_DMA_INTR_PKT 64
#define OCTEON_DMA_INTR_TIME 1000
@@ -125,7 +123,6 @@
#define CFG_SET_IQ_INTR_PKT(cfg, val) (cfg)->iq.iq_intr_pkt = val
#define CFG_GET_OQ_MAX_Q(cfg) ((cfg)->oq.max_oqs)
-#define CFG_GET_OQ_INFO_PTR(cfg) ((cfg)->oq.info_ptr)
#define CFG_GET_OQ_PKTS_PER_INTR(cfg) ((cfg)->oq.pkts_per_intr)
#define CFG_GET_OQ_REFILL_THRESHOLD(cfg) ((cfg)->oq.refill_threshold)
#define CFG_GET_OQ_INTR_PKT(cfg) ((cfg)->oq.oq_intr_pkt)
@@ -266,9 +263,6 @@
*/
u64 refill_threshold:16;
- /** If set, the Output queue uses info-pointer mode. (Default: 1) */
- u64 info_ptr:32;
-
/* Max number of OQs available */
u64 max_oqs:8;
@@ -276,9 +270,6 @@
/* Max number of OQs available */
u64 max_oqs:8;
- /** If set, the Output queue uses info-pointer mode. (Default: 1) */
- u64 info_ptr:32;
-
/** The number of buffers that were consumed during packet processing by
* the driver on this Output queue before the driver attempts to
* replenish
diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_device.c b/drivers/net/ethernet/cavium/liquidio/octeon_device.c
index e21b477..623e28c 100644
--- a/drivers/net/ethernet/cavium/liquidio/octeon_device.c
+++ b/drivers/net/ethernet/cavium/liquidio/octeon_device.c
@@ -51,7 +51,6 @@
/** OQ attributes */
.oq = {
.max_oqs = CN6XXX_CFG_IO_QUEUES,
- .info_ptr = OCTEON_OQ_INFOPTR_MODE,
.refill_threshold = CN6XXX_OQ_REFIL_THRESHOLD,
.oq_intr_pkt = CN6XXX_OQ_INTR_PKT,
.oq_intr_time = CN6XXX_OQ_INTR_TIME,
@@ -161,7 +160,6 @@
/** OQ attributes */
.oq = {
.max_oqs = CN6XXX_CFG_IO_QUEUES,
- .info_ptr = OCTEON_OQ_INFOPTR_MODE,
.refill_threshold = CN6XXX_OQ_REFIL_THRESHOLD,
.oq_intr_pkt = CN6XXX_OQ_INTR_PKT,
.oq_intr_time = CN6XXX_OQ_INTR_TIME,
@@ -328,7 +326,6 @@
/** OQ attributes */
.oq = {
.max_oqs = CN6XXX_CFG_IO_QUEUES,
- .info_ptr = OCTEON_OQ_INFOPTR_MODE,
.refill_threshold = CN6XXX_OQ_REFIL_THRESHOLD,
.oq_intr_pkt = CN6XXX_OQ_INTR_PKT,
.oq_intr_time = CN6XXX_OQ_INTR_TIME,
@@ -432,7 +429,6 @@
/** OQ attributes */
.oq = {
.max_oqs = CN23XX_CFG_IO_QUEUES,
- .info_ptr = OCTEON_OQ_INFOPTR_MODE,
.pkts_per_intr = CN23XX_OQ_PKTSPER_INTR,
.refill_threshold = CN23XX_OQ_REFIL_THRESHOLD,
.oq_intr_pkt = CN23XX_OQ_INTR_PKT,
@@ -543,7 +539,11 @@
"BASE", "NIC", "UNKNOWN"};
static struct octeon_device *octeon_device[MAX_OCTEON_DEVICES];
+static atomic_t adapter_refcounts[MAX_OCTEON_DEVICES];
+
static u32 octeon_device_count;
+/* locks device array (i.e. octeon_device[]) */
+static spinlock_t octeon_devices_lock;
static struct octeon_core_setup core_setup[MAX_OCTEON_DEVICES];
@@ -561,6 +561,7 @@
memset(octeon_device, 0, (sizeof(void *) * MAX_OCTEON_DEVICES));
for (i = 0; i < MAX_OCTEON_DEVICES; i++)
oct_set_config_info(i, conf_type);
+ spin_lock_init(&octeon_devices_lock);
}
static void *__retrieve_octeon_config_info(struct octeon_device *oct,
@@ -720,23 +721,27 @@
u32 oct_idx = 0;
struct octeon_device *oct = NULL;
+ spin_lock(&octeon_devices_lock);
+
for (oct_idx = 0; oct_idx < MAX_OCTEON_DEVICES; oct_idx++)
if (!octeon_device[oct_idx])
break;
- if (oct_idx == MAX_OCTEON_DEVICES)
- return NULL;
+ if (oct_idx < MAX_OCTEON_DEVICES) {
+ oct = octeon_allocate_device_mem(pci_id, priv_size);
+ if (oct) {
+ octeon_device_count++;
+ octeon_device[oct_idx] = oct;
+ }
+ }
- oct = octeon_allocate_device_mem(pci_id, priv_size);
+ spin_unlock(&octeon_devices_lock);
if (!oct)
return NULL;
spin_lock_init(&oct->pci_win_lock);
spin_lock_init(&oct->mem_access_lock);
- octeon_device_count++;
- octeon_device[oct_idx] = oct;
-
oct->octeon_id = oct_idx;
snprintf(oct->device_name, sizeof(oct->device_name),
"LiquidIO%d", (oct->octeon_id));
@@ -744,6 +749,72 @@
return oct;
}
+/** Register a device's bus location at initialization time.
+ * @param octeon_dev - pointer to the octeon device structure.
+ * @param bus - PCIe bus #
+ * @param dev - PCIe device #
+ * @param func - PCIe function #
+ * @param is_pf - TRUE for PF, FALSE for VF
+ * @return reference count of device's adapter
+ */
+int octeon_register_device(struct octeon_device *oct,
+ int bus, int dev, int func, int is_pf)
+{
+ int idx, refcount;
+
+ oct->loc.bus = bus;
+ oct->loc.dev = dev;
+ oct->loc.func = func;
+
+ oct->adapter_refcount = &adapter_refcounts[oct->octeon_id];
+ atomic_set(oct->adapter_refcount, 0);
+
+ spin_lock(&octeon_devices_lock);
+ for (idx = (int)oct->octeon_id - 1; idx >= 0; idx--) {
+ if (!octeon_device[idx]) {
+ dev_err(&oct->pci_dev->dev,
+ "%s: Internal driver error, missing dev",
+ __func__);
+ spin_unlock(&octeon_devices_lock);
+ atomic_inc(oct->adapter_refcount);
+ return 1; /* here, refcount is guaranteed to be 1 */
+ }
+ /* if another device is at same bus/dev, use its refcounter */
+ if ((octeon_device[idx]->loc.bus == bus) &&
+ (octeon_device[idx]->loc.dev == dev)) {
+ oct->adapter_refcount =
+ octeon_device[idx]->adapter_refcount;
+ break;
+ }
+ }
+ spin_unlock(&octeon_devices_lock);
+
+ atomic_inc(oct->adapter_refcount);
+ refcount = atomic_read(oct->adapter_refcount);
+
+ dev_dbg(&oct->pci_dev->dev, "%s: %02x:%02x:%d refcount %u", __func__,
+ oct->loc.bus, oct->loc.dev, oct->loc.func, refcount);
+
+ return refcount;
+}
+
+/** Deregister a device at de-initialization time.
+ * @param octeon_dev - pointer to the octeon device structure.
+ * @return reference count of device's adapter
+ */
+int octeon_deregister_device(struct octeon_device *oct)
+{
+ int refcount;
+
+ atomic_dec(oct->adapter_refcount);
+ refcount = atomic_read(oct->adapter_refcount);
+
+ dev_dbg(&oct->pci_dev->dev, "%s: %04d:%02d:%d refcount %u", __func__,
+ oct->loc.bus, oct->loc.dev, oct->loc.func, refcount);
+
+ return refcount;
+}
+
int
octeon_allocate_ioq_vector(struct octeon_device *oct)
{
@@ -1161,13 +1232,15 @@
cs = &core_setup[oct->octeon_id];
- if (recv_pkt->buffer_size[0] != sizeof(*cs)) {
+ if (recv_pkt->buffer_size[0] != (sizeof(*cs) + OCT_DROQ_INFO_SIZE)) {
dev_dbg(&oct->pci_dev->dev, "Core setup bytes expected %u found %d\n",
(u32)sizeof(*cs),
recv_pkt->buffer_size[0]);
}
- memcpy(cs, get_rbd(recv_pkt->buffer_ptr[0]), sizeof(*cs));
+ memcpy(cs, get_rbd(
+ recv_pkt->buffer_ptr[0]) + OCT_DROQ_INFO_SIZE, sizeof(*cs));
+
strncpy(oct->boardinfo.name, cs->boardname, OCT_BOARD_NAME);
strncpy(oct->boardinfo.serial_number, cs->board_serial_number,
OCT_SERIAL_LEN);
@@ -1354,13 +1427,15 @@
void lio_enable_irq(struct octeon_droq *droq, struct octeon_instr_queue *iq)
{
u64 instr_cnt;
+ u32 pkts_pend;
struct octeon_device *oct = NULL;
/* the whole thing needs to be atomic, ideally */
if (droq) {
+ pkts_pend = (u32)atomic_read(&droq->pkts_pending);
spin_lock_bh(&droq->lock);
- writel(droq->pkt_count, droq->pkts_sent_reg);
- droq->pkt_count = 0;
+ writel(droq->pkt_count - pkts_pend, droq->pkts_sent_reg);
+ droq->pkt_count = pkts_pend;
/* this write needs to be flushed before we release the lock */
mmiowb();
spin_unlock_bh(&droq->lock);
diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_device.h b/drivers/net/ethernet/cavium/liquidio/octeon_device.h
index 92f67de..c90ed48 100644
--- a/drivers/net/ethernet/cavium/liquidio/octeon_device.h
+++ b/drivers/net/ethernet/cavium/liquidio/octeon_device.h
@@ -544,6 +544,14 @@
u32 tx_max_coalesced_frames;
bool cores_crashed;
+
+ struct {
+ int bus;
+ int dev;
+ int func;
+ } loc;
+
+ atomic_t *adapter_refcount; /* reference count of adapter */
};
#define OCT_DRV_ONLINE 1
@@ -572,6 +580,23 @@
struct octeon_device *octeon_allocate_device(u32 pci_id,
u32 priv_size);
+/** Register a device's bus location at initialization time.
+ * @param octeon_dev - pointer to the octeon device structure.
+ * @param bus - PCIe bus #
+ * @param dev - PCIe device #
+ * @param func - PCIe function #
+ * @param is_pf - TRUE for PF, FALSE for VF
+ * @return reference count of device's adapter
+ */
+int octeon_register_device(struct octeon_device *oct,
+ int bus, int dev, int func, int is_pf);
+
+/** Deregister a device at de-initialization time.
+ * @param octeon_dev - pointer to the octeon device structure.
+ * @return reference count of device's adapter
+ */
+int octeon_deregister_device(struct octeon_device *oct);
+
/** Initialize the driver's dispatch list which is a mix of a hash table
* and a linked list. This is done at driver load time.
* @param octeon_dev - pointer to the octeon device structure.
diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_droq.c b/drivers/net/ethernet/cavium/liquidio/octeon_droq.c
index 286be55..2e190de 100644
--- a/drivers/net/ethernet/cavium/liquidio/octeon_droq.c
+++ b/drivers/net/ethernet/cavium/liquidio/octeon_droq.c
@@ -181,10 +181,7 @@
droq->recv_buf_list[i].buffer = buf;
droq->recv_buf_list[i].data = get_rbd(buf);
- droq->info_list[i].length = 0;
-
- /* map ring buffers into memory */
- desc_ring[i].info_ptr = lio_map_ring_info(droq, i);
+ desc_ring[i].info_ptr = 0;
desc_ring[i].buffer_ptr =
lio_map_ring(droq->recv_buf_list[i].buffer);
}
@@ -205,9 +202,6 @@
octeon_droq_destroy_ring_buffers(oct, droq);
vfree(droq->recv_buf_list);
- if (droq->info_base_addr)
- lio_free_info_buffer(oct, droq);
-
if (droq->desc_ring)
lio_dma_free(oct, (droq->max_count * OCT_DROQ_DESC_SIZE),
droq->desc_ring, droq->desc_ring_dma);
@@ -280,14 +274,6 @@
dev_dbg(&oct->pci_dev->dev, "droq[%d]: num_desc: %d\n", q_no,
droq->max_count);
- droq->info_list = lio_alloc_info_buffer(oct, droq);
- if (!droq->info_list) {
- dev_err(&oct->pci_dev->dev, "Cannot allocate memory for info list.\n");
- lio_dma_free(oct, (droq->max_count * OCT_DROQ_DESC_SIZE),
- droq->desc_ring, droq->desc_ring_dma);
- return 1;
- }
-
droq->recv_buf_list = (struct octeon_recv_buffer *)
vmalloc_node(droq->max_count *
OCT_DROQ_RECVBUF_SIZE,
@@ -357,7 +343,7 @@
u32 i, bytes_left;
struct octeon_skb_page_info *pg_info;
- info = &droq->info_list[idx];
+ info = (struct octeon_droq_info *)droq->recv_buf_list[idx].data;
recv_info = octeon_alloc_recv_info(sizeof(struct __dispatch));
if (!recv_info)
@@ -425,8 +411,7 @@
droq->max_count);
desc_refilled++;
droq->refill_count--;
- } while (droq->recv_buf_list[droq->refill_idx].
- buffer);
+ } while (droq->recv_buf_list[droq->refill_idx].buffer);
}
refill_index = incr_index(refill_index, 1, droq->max_count);
} /* while */
@@ -490,10 +475,8 @@
droq->recv_buf_list[droq->refill_idx].data = data;
desc_ring[droq->refill_idx].buffer_ptr =
- lio_map_ring(droq->recv_buf_list[droq->
- refill_idx].buffer);
- /* Reset any previous values in the length field. */
- droq->info_list[droq->refill_idx].length = 0;
+ lio_map_ring(droq->recv_buf_list[
+ droq->refill_idx].buffer);
droq->refill_idx = incr_index(droq->refill_idx, 1,
droq->max_count);
@@ -542,11 +525,7 @@
static inline u32
octeon_droq_get_bufcount(u32 buf_size, u32 total_len)
{
- u32 buf_cnt = 0;
-
- while (total_len > (buf_size * buf_cnt))
- buf_cnt++;
- return buf_cnt;
+ return ((total_len + buf_size - 1) / buf_size);
}
static int
@@ -594,11 +573,12 @@
struct octeon_droq_info *info;
for (i = 0; i < cnt; i++) {
- info = &droq->info_list[droq->read_idx];
+ info = (struct octeon_droq_info *)
+ droq->recv_buf_list[droq->read_idx].data;
octeon_swap_8B_data((u64 *)info, 2);
if (info->length) {
- info->length -= OCT_RH_SIZE;
+ info->length += OCTNET_FRM_LENGTH_SIZE;
droq->stats.bytes_received += info->length;
buf_cnt = octeon_droq_get_bufcount(droq->buffer_size,
(u32)info->length);
@@ -630,7 +610,8 @@
struct octeon_skb_page_info *pg_info;
void *buf;
- info = &droq->info_list[droq->read_idx];
+ info = (struct octeon_droq_info *)
+ droq->recv_buf_list[droq->read_idx].data;
octeon_swap_8B_data((u64 *)info, 2);
if (!info->length) {
@@ -644,9 +625,10 @@
}
/* Len of resp hdr in included in the received data len. */
- info->length -= OCT_RH_SIZE;
rh = &info->rh;
+ info->length += OCTNET_FRM_LENGTH_SIZE;
+ rh->r_dh.len += (ROUNDUP8(OCT_DROQ_INFO_SIZE) / sizeof(u64));
total_len += (u32)info->length;
if (opcode_slow_path(rh)) {
u32 buf_cnt;
@@ -690,8 +672,8 @@
nicbuf,
cpy_len,
idx);
- buf = droq->recv_buf_list[idx].
- buffer;
+ buf = droq->recv_buf_list[
+ idx].buffer;
recv_buffer_fast_free(buf);
droq->recv_buf_list[idx].buffer
= NULL;
diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_droq.h b/drivers/net/ethernet/cavium/liquidio/octeon_droq.h
index 9781577..6efd139 100644
--- a/drivers/net/ethernet/cavium/liquidio/octeon_droq.h
+++ b/drivers/net/ethernet/cavium/liquidio/octeon_droq.h
@@ -51,11 +51,11 @@
* about the packet.
*/
struct octeon_droq_info {
- /** The Output Receive Header. */
- union octeon_rh rh;
-
/** The Length of the packet. */
u64 length;
+
+ /** The Output Receive Header. */
+ union octeon_rh rh;
};
#define OCT_DROQ_INFO_SIZE (sizeof(struct octeon_droq_info))
@@ -294,9 +294,6 @@
*/
u32 max_empty_descs;
- /** The 8B aligned info ptrs begin from this address. */
- struct octeon_droq_info *info_list;
-
/** The receive buffer list. This list has the virtual addresses of the
* buffers.
*/
@@ -324,15 +321,6 @@
/** DMA mapped address of the DROQ descriptor ring. */
size_t desc_ring_dma;
- /** Info ptr list are allocated at this virtual address. */
- void *info_base_addr;
-
- /** DMA mapped address of the info list */
- dma_addr_t info_list_dma;
-
- /** Allocated size of info list. */
- u32 info_alloc_size;
-
/** application context */
void *app_ctx;
diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_iq.h b/drivers/net/ethernet/cavium/liquidio/octeon_iq.h
index 5063a12..5c3c8da 100644
--- a/drivers/net/ethernet/cavium/liquidio/octeon_iq.h
+++ b/drivers/net/ethernet/cavium/liquidio/octeon_iq.h
@@ -251,7 +251,7 @@
/** The size of each buffer in soft command buffer pool
*/
-#define SOFT_COMMAND_BUFFER_SIZE 1536
+#define SOFT_COMMAND_BUFFER_SIZE 2048
struct octeon_soft_command {
/** Soft command buffer info. */
diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_mailbox.c b/drivers/net/ethernet/cavium/liquidio/octeon_mailbox.c
index 5cca73b..57af7df 100644
--- a/drivers/net/ethernet/cavium/liquidio/octeon_mailbox.c
+++ b/drivers/net/ethernet/cavium/liquidio/octeon_mailbox.c
@@ -178,7 +178,10 @@
break;
}
}
- writeq(mbox_cmd->data[i], mbox->mbox_write_reg);
+ if (ret == OCTEON_MBOX_STATUS_SUCCESS)
+ writeq(mbox_cmd->data[i], mbox->mbox_write_reg);
+ else
+ break;
}
}
diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_mailbox.h b/drivers/net/ethernet/cavium/liquidio/octeon_mailbox.h
index c9376fe..1def22a 100644
--- a/drivers/net/ethernet/cavium/liquidio/octeon_mailbox.h
+++ b/drivers/net/ethernet/cavium/liquidio/octeon_mailbox.h
@@ -20,16 +20,16 @@
/* Macros for Mail Box Communication */
-#define OCTEON_MBOX_DATA_MAX 32
+#define OCTEON_MBOX_DATA_MAX 32
#define OCTEON_VF_ACTIVE 0x1
#define OCTEON_VF_FLR_REQUEST 0x2
#define OCTEON_PF_CHANGED_VF_MACADDR 0x4
/*Macro for Read acknowldgement*/
-#define OCTEON_PFVFACK 0xffffffffffffffff
-#define OCTEON_PFVFSIG 0x1122334455667788
-#define OCTEON_PFVFERR 0xDEADDEADDEADDEAD
+#define OCTEON_PFVFACK 0xffffffffffffffffULL
+#define OCTEON_PFVFSIG 0x1122334455667788ULL
+#define OCTEON_PFVFERR 0xDEADDEADDEADDEADULL
#define LIO_MBOX_WRITE_WAIT_CNT 1000
#define LIO_MBOX_WRITE_WAIT_TIME msecs_to_jiffies(1)
@@ -74,8 +74,8 @@
OCTEON_MBOX_STATE_REQUEST_RECEIVED = 4,
OCTEON_MBOX_STATE_RESPONSE_PENDING = 8,
OCTEON_MBOX_STATE_RESPONSE_RECEIVING = 16,
- OCTEON_MBOX_STATE_RESPONSE_RECEIVED = 16,
- OCTEON_MBOX_STATE_ERROR = 32
+ OCTEON_MBOX_STATE_RESPONSE_RECEIVED = 32,
+ OCTEON_MBOX_STATE_ERROR = 64
};
struct octeon_mbox {
diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_network.h b/drivers/net/ethernet/cavium/liquidio/octeon_network.h
index bf48393..ec8504b 100644
--- a/drivers/net/ethernet/cavium/liquidio/octeon_network.h
+++ b/drivers/net/ethernet/cavium/liquidio/octeon_network.h
@@ -356,29 +356,6 @@
#define lio_dma_free(oct, size, virt_addr, dma_addr) \
dma_free_coherent(&(oct)->pci_dev->dev, size, virt_addr, dma_addr)
-static inline void *
-lio_alloc_info_buffer(struct octeon_device *oct,
- struct octeon_droq *droq)
-{
- void *virt_ptr;
-
- virt_ptr = lio_dma_alloc(oct, (droq->max_count * OCT_DROQ_INFO_SIZE),
- &droq->info_list_dma);
- if (virt_ptr) {
- droq->info_alloc_size = droq->max_count * OCT_DROQ_INFO_SIZE;
- droq->info_base_addr = virt_ptr;
- }
-
- return virt_ptr;
-}
-
-static inline void lio_free_info_buffer(struct octeon_device *oct,
- struct octeon_droq *droq)
-{
- lio_dma_free(oct, droq->info_alloc_size, droq->info_base_addr,
- droq->info_list_dma);
-}
-
static inline
void *get_rbd(struct sk_buff *skb)
{
@@ -392,12 +369,6 @@
}
static inline u64
-lio_map_ring_info(struct octeon_droq *droq, u32 i)
-{
- return droq->info_list_dma + (i * sizeof(struct octeon_droq_info));
-}
-
-static inline u64
lio_map_ring(void *buf)
{
dma_addr_t dma_addr;
@@ -443,8 +414,8 @@
int copy_len,
int idx)
{
- memcpy(skb_put(nicbuf, copy_len),
- get_rbd(droq->recv_buf_list[idx].buffer), copy_len);
+ skb_put_data(nicbuf, get_rbd(droq->recv_buf_list[idx].buffer),
+ copy_len);
}
/**
diff --git a/drivers/net/ethernet/cavium/liquidio/request_manager.c b/drivers/net/ethernet/cavium/liquidio/request_manager.c
index 261f448..7b297f1 100644
--- a/drivers/net/ethernet/cavium/liquidio/request_manager.c
+++ b/drivers/net/ethernet/cavium/liquidio/request_manager.c
@@ -252,8 +252,7 @@
if (!(oct->io_qmask.iq & BIT_ULL(i)))
continue;
pending =
- atomic_read(&oct->
- instr_queue[i]->instr_pending);
+ atomic_read(&oct->instr_queue[i]->instr_pending);
if (pending)
__check_db_timeout(oct, i);
instr_cnt += pending;
diff --git a/drivers/net/ethernet/cavium/octeon/octeon_mgmt.c b/drivers/net/ethernet/cavium/octeon/octeon_mgmt.c
index a213868..2887bca 100644
--- a/drivers/net/ethernet/cavium/octeon/octeon_mgmt.c
+++ b/drivers/net/ethernet/cavium/octeon/octeon_mgmt.c
@@ -755,6 +755,7 @@
case HWTSTAMP_FILTER_PTP_V2_EVENT:
case HWTSTAMP_FILTER_PTP_V2_SYNC:
case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
+ case HWTSTAMP_FILTER_NTP_ALL:
p->has_rx_tstamp = have_hw_timestamps;
config.rx_filter = HWTSTAMP_FILTER_ALL;
if (p->has_rx_tstamp) {
diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_main.c b/drivers/net/ethernet/cavium/thunder/nicvf_main.c
index d6477af..573755b 100644
--- a/drivers/net/ethernet/cavium/thunder/nicvf_main.c
+++ b/drivers/net/ethernet/cavium/thunder/nicvf_main.c
@@ -1763,6 +1763,7 @@
return nicvf_xdp_setup(nic, xdp->prog);
case XDP_QUERY_PROG:
xdp->prog_attached = !!nic->xdp_prog;
+ xdp->prog_id = nic->xdp_prog ? nic->xdp_prog->aux->id : 0;
return 0;
default:
return -EINVAL;
diff --git a/drivers/net/ethernet/chelsio/cxgb/sge.c b/drivers/net/ethernet/chelsio/cxgb/sge.c
index d56142b..0f13a7f 100644
--- a/drivers/net/ethernet/chelsio/cxgb/sge.c
+++ b/drivers/net/ethernet/chelsio/cxgb/sge.c
@@ -1801,7 +1801,7 @@
eth_type = skb_network_offset(skb) == ETH_HLEN ?
CPL_ETH_II : CPL_ETH_II_VLAN;
- hdr = (struct cpl_tx_pkt_lso *)skb_push(skb, sizeof(*hdr));
+ hdr = skb_push(skb, sizeof(*hdr));
hdr->opcode = CPL_TX_PKT_LSO;
hdr->ip_csum_dis = hdr->l4_csum_dis = 0;
hdr->ip_hdr_words = ip_hdr(skb)->ihl;
@@ -1849,7 +1849,7 @@
}
}
- cpl = (struct cpl_tx_pkt *)__skb_push(skb, sizeof(*cpl));
+ cpl = __skb_push(skb, sizeof(*cpl));
cpl->opcode = CPL_TX_PKT;
cpl->ip_csum_dis = 1; /* SW calculates IP csum */
cpl->l4_csum_dis = skb->ip_summed == CHECKSUM_PARTIAL ? 0 : 1;
diff --git a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
index 2ff6bd1..0bc6a4f 100644
--- a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
@@ -471,8 +471,7 @@
if (!skb)
goto alloc_skb_fail;
- req = (struct cpl_smt_write_req *)__skb_put(skb, sizeof(*req));
- memset(req, 0, sizeof(*req));
+ req = __skb_put_zero(skb, sizeof(*req));
req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SMT_WRITE_REQ, i));
req->mtu_idx = NMTUS - 1;
@@ -495,8 +494,7 @@
if (!skb)
goto alloc_skb_fail;
- req = (struct cpl_l2t_write_req *)__skb_put(skb, sizeof(*req));
- memset(req, 0, sizeof(*req));
+ req = __skb_put_zero(skb, sizeof(*req));
req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_L2T_WRITE_REQ, i));
req->params = htonl(V_L2T_W_IDX(i));
@@ -518,8 +516,7 @@
if (!skb)
goto alloc_skb_fail;
- req = (struct cpl_rte_write_req *)__skb_put(skb, sizeof(*req));
- memset(req, 0, sizeof(*req));
+ req = __skb_put_zero(skb, sizeof(*req));
req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_RTE_WRITE_REQ, i));
req->l2t_idx = htonl(V_L2T_W_IDX(i));
@@ -538,8 +535,7 @@
if (!skb)
goto alloc_skb_fail;
- greq = (struct cpl_set_tcb_field *)__skb_put(skb, sizeof(*greq));
- memset(greq, 0, sizeof(*greq));
+ greq = __skb_put_zero(skb, sizeof(*greq));
greq->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
OPCODE_TID(greq) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, 0));
greq->mask = cpu_to_be64(1);
@@ -909,7 +905,7 @@
if (!skb)
return -ENOMEM;
- req = (struct cpl_smt_write_req *)__skb_put(skb, sizeof(*req));
+ req = __skb_put(skb, sizeof(*req));
req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SMT_WRITE_REQ, idx));
req->mtu_idx = NMTUS - 1; /* should be 0 but there's a T3 bug */
@@ -952,7 +948,7 @@
if (!skb)
return -ENOMEM;
- req = (struct mngt_pktsched_wr *)skb_put(skb, sizeof(*req));
+ req = skb_put(skb, sizeof(*req));
req->wr_hi = htonl(V_WR_OP(FW_WROPCODE_MNGT));
req->mngt_opcode = FW_MNGTOPCODE_PKTSCHED_SET;
req->sched = sched;
diff --git a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c
index fa81445..50cd660 100644
--- a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c
+++ b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c
@@ -552,7 +552,7 @@
struct cpl_tid_release *req;
skb->priority = CPL_PRIORITY_SETUP;
- req = (struct cpl_tid_release *)__skb_put(skb, sizeof(*req));
+ req = __skb_put(skb, sizeof(*req));
req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_TID_RELEASE, tid));
}
@@ -1096,7 +1096,7 @@
return;
}
skb->priority = CPL_PRIORITY_CONTROL;
- req = (struct cpl_set_tcb_field *)skb_put(skb, sizeof(*req));
+ req = skb_put(skb, sizeof(*req));
req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, tid));
req->reply = 0;
diff --git a/drivers/net/ethernet/chelsio/cxgb3/l2t.c b/drivers/net/ethernet/chelsio/cxgb3/l2t.c
index 2626412..248e40c 100644
--- a/drivers/net/ethernet/chelsio/cxgb3/l2t.c
+++ b/drivers/net/ethernet/chelsio/cxgb3/l2t.c
@@ -96,7 +96,7 @@
return -ENOMEM;
}
- req = (struct cpl_l2t_write_req *)__skb_put(skb, sizeof(*req));
+ req = __skb_put(skb, sizeof(*req));
req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_L2T_WRITE_REQ, e->idx));
req->params = htonl(V_L2T_W_IDX(e->idx) | V_L2T_W_IFF(e->smt_idx) |
diff --git a/drivers/net/ethernet/chelsio/cxgb3/sge.c b/drivers/net/ethernet/chelsio/cxgb3/sge.c
index 1b9d154..e2d3426 100644
--- a/drivers/net/ethernet/chelsio/cxgb3/sge.c
+++ b/drivers/net/ethernet/chelsio/cxgb3/sge.c
@@ -2282,7 +2282,7 @@
if (!skb)
goto no_mem;
- memcpy(__skb_put(skb, AN_PKT_SIZE), r, AN_PKT_SIZE);
+ __skb_put_data(skb, r, AN_PKT_SIZE);
skb->data[0] = CPL_ASYNC_NOTIF;
rss_hi = htonl(CPL_ASYNC_NOTIF << 24);
q->async_notif++;
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
index e88c180..b7a92eb 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
@@ -777,6 +777,7 @@
struct vf_info {
unsigned char vf_mac_addr[ETH_ALEN];
+ unsigned int tx_rate;
bool pf_set_mac;
};
@@ -1551,6 +1552,7 @@
unsigned int vf, unsigned int eqid);
int t4_sge_ctxt_flush(struct adapter *adap, unsigned int mbox);
void t4_handle_get_port_info(struct port_info *pi, const __be64 *rpl);
+int t4_update_port_info(struct port_info *pi);
int t4_handle_fw_rpl(struct adapter *adap, const __be64 *rpl);
void t4_db_full(struct adapter *adapter);
void t4_db_dropped(struct adapter *adapter);
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c
index 1fa34b0..00044d7 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c
@@ -2669,6 +2669,8 @@
if (t4_read_reg(adap, LE_DB_CONFIG_A) & HASHEN_F) {
unsigned int sb;
+ seq_printf(seq, "Connections in use: %u\n",
+ atomic_read(&t->conns_in_use));
if (chip <= CHELSIO_T5)
sb = t4_read_reg(adap, LE_DB_SERVER_INDEX_A) / 4;
@@ -2699,17 +2701,23 @@
atomic_read(&t->hash_tids_in_use));
}
} else if (t->ntids) {
+ seq_printf(seq, "Connections in use: %u\n",
+ atomic_read(&t->conns_in_use));
+
seq_printf(seq, "TID range: 0..%u", t->ntids - 1);
seq_printf(seq, ", in use: %u\n",
atomic_read(&t->tids_in_use));
}
if (t->nstids)
- seq_printf(seq, "STID range: %u..%u, in use: %u\n",
+ seq_printf(seq, "STID range: %u..%u, in use-IPv4/IPv6: %u/%u\n",
(!t->stid_base &&
(chip <= CHELSIO_T5)) ?
t->stid_base + 1 : t->stid_base,
- t->stid_base + t->nstids - 1, t->stids_in_use);
+ t->stid_base + t->nstids - 1,
+ t->stids_in_use - t->v6_stids_in_use,
+ t->v6_stids_in_use);
+
if (t->natids)
seq_printf(seq, "ATID range: 0..%u, in use: %u\n",
t->natids - 1, t->atids_in_use);
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c
index 0ba7866..e9bab72 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c
@@ -500,7 +500,11 @@
} else if (port_type == FW_PORT_TYPE_SFP ||
port_type == FW_PORT_TYPE_QSFP_10G ||
port_type == FW_PORT_TYPE_QSA ||
- port_type == FW_PORT_TYPE_QSFP) {
+ port_type == FW_PORT_TYPE_QSFP ||
+ port_type == FW_PORT_TYPE_CR4_QSFP ||
+ port_type == FW_PORT_TYPE_CR_QSFP ||
+ port_type == FW_PORT_TYPE_CR2_QSFP ||
+ port_type == FW_PORT_TYPE_SFP28) {
if (mod_type == FW_PORT_MOD_TYPE_LR ||
mod_type == FW_PORT_MOD_TYPE_SR ||
mod_type == FW_PORT_MOD_TYPE_ER ||
@@ -511,6 +515,9 @@
return PORT_DA;
else
return PORT_OTHER;
+ } else if (port_type == FW_PORT_TYPE_KR4_100G ||
+ port_type == FW_PORT_TYPE_KR_SFP28) {
+ return PORT_NONE;
}
return PORT_OTHER;
@@ -618,7 +625,21 @@
case FW_PORT_TYPE_CR_QSFP:
case FW_PORT_TYPE_SFP28:
SET_LMM(FIBRE);
- SET_LMM(25000baseCR_Full);
+ FW_CAPS_TO_LMM(SPEED_1G, 1000baseT_Full);
+ FW_CAPS_TO_LMM(SPEED_10G, 10000baseT_Full);
+ FW_CAPS_TO_LMM(SPEED_25G, 25000baseCR_Full);
+ break;
+
+ case FW_PORT_TYPE_KR_SFP28:
+ SET_LMM(Backplane);
+ FW_CAPS_TO_LMM(SPEED_1G, 1000baseT_Full);
+ FW_CAPS_TO_LMM(SPEED_10G, 10000baseKR_Full);
+ FW_CAPS_TO_LMM(SPEED_25G, 25000baseKR_Full);
+ break;
+
+ case FW_PORT_TYPE_CR2_QSFP:
+ SET_LMM(FIBRE);
+ SET_LMM(50000baseSR2_Full);
break;
case FW_PORT_TYPE_KR4_100G:
@@ -674,13 +695,20 @@
static int get_link_ksettings(struct net_device *dev,
struct ethtool_link_ksettings *link_ksettings)
{
- const struct port_info *pi = netdev_priv(dev);
+ struct port_info *pi = netdev_priv(dev);
struct ethtool_link_settings *base = &link_ksettings->base;
ethtool_link_ksettings_zero_link_mode(link_ksettings, supported);
ethtool_link_ksettings_zero_link_mode(link_ksettings, advertising);
ethtool_link_ksettings_zero_link_mode(link_ksettings, lp_advertising);
+ /* For the nonce, the Firmware doesn't send up Port State changes
+ * when the Virtual Interface attached to the Port is down. So
+ * if it's down, let's grab any changes.
+ */
+ if (!netif_running(dev))
+ (void)t4_update_port_info(pi);
+
base->port = from_fw_port_mod_type(pi->port_type, pi->mod_type);
if (pi->mdio_addr >= 0) {
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c
index 1073673..45b5853 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c
@@ -190,7 +190,7 @@
if (!skb)
return -ENOMEM;
- fwr = (struct fw_filter_wr *)__skb_put(skb, len);
+ fwr = __skb_put(skb, len);
t4_mk_filtdelwr(f->tid, fwr, adapter->sge.fw_evtq.abs_id);
/* Mark the filter as "pending" and ship off the Filter Work Request.
@@ -231,8 +231,7 @@
}
}
- fwr = (struct fw_filter_wr *)__skb_put(skb, sizeof(*fwr));
- memset(fwr, 0, sizeof(*fwr));
+ fwr = __skb_put_zero(skb, sizeof(*fwr));
/* It would be nice to put most of the following in t4_hw.c but most
* of the work is translating the cxgbtool ch_filter_specification
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
index 53309f6..b3753ed 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
@@ -824,9 +824,12 @@
{
int err, i, j;
struct sge *s = &adap->sge;
- struct sge_uld_rxq_info *rxq_info = s->uld_rxq_info[CXGB4_ULD_RDMA];
+ struct sge_uld_rxq_info *rxq_info = NULL;
unsigned int cmplqid = 0;
+ if (is_uld(adap))
+ rxq_info = s->uld_rxq_info[CXGB4_ULD_RDMA];
+
for_each_port(adap, i) {
struct net_device *dev = adap->port[i];
struct port_info *pi = netdev_priv(dev);
@@ -891,7 +894,7 @@
* The skb's priority is determined via the VLAN Tag Priority Code
* Point field.
*/
- if (cxgb4_dcb_enabled(dev)) {
+ if (cxgb4_dcb_enabled(dev) && !is_kdump_kernel()) {
u16 vlan_tci;
int err;
@@ -1093,10 +1096,12 @@
* This is equivalent to 4 TIDs. With CLIP enabled it
* needs 2 TIDs.
*/
- if (family == PF_INET)
- t->stids_in_use++;
- else
+ if (family == PF_INET6) {
t->stids_in_use += 2;
+ t->v6_stids_in_use += 2;
+ } else {
+ t->stids_in_use++;
+ }
}
spin_unlock_bh(&t->stid_lock);
return stid;
@@ -1150,13 +1155,16 @@
bitmap_release_region(t->stid_bmap, stid, 1);
t->stid_tab[stid].data = NULL;
if (stid < t->nstids) {
- if (family == PF_INET)
- t->stids_in_use--;
- else
+ if (family == PF_INET6) {
t->stids_in_use -= 2;
+ t->v6_stids_in_use -= 2;
+ } else {
+ t->stids_in_use--;
+ }
} else {
t->sftids_in_use--;
}
+
spin_unlock_bh(&t->stid_lock);
}
EXPORT_SYMBOL(cxgb4_free_stid);
@@ -1170,7 +1178,7 @@
struct cpl_tid_release *req;
set_wr_txq(skb, CPL_PRIORITY_SETUP, chan);
- req = (struct cpl_tid_release *)__skb_put(skb, sizeof(*req));
+ req = __skb_put(skb, sizeof(*req));
INIT_TP_WR(req, tid);
OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_TID_RELEASE, tid));
}
@@ -1232,7 +1240,8 @@
* Release a TID and inform HW. If we are unable to allocate the release
* message we defer to a work queue.
*/
-void cxgb4_remove_tid(struct tid_info *t, unsigned int chan, unsigned int tid)
+void cxgb4_remove_tid(struct tid_info *t, unsigned int chan, unsigned int tid,
+ unsigned short family)
{
struct sk_buff *skb;
struct adapter *adap = container_of(t, struct adapter, tids);
@@ -1241,10 +1250,18 @@
if (t->tid_tab[tid]) {
t->tid_tab[tid] = NULL;
- if (t->hash_base && (tid >= t->hash_base))
- atomic_dec(&t->hash_tids_in_use);
- else
- atomic_dec(&t->tids_in_use);
+ atomic_dec(&t->conns_in_use);
+ if (t->hash_base && (tid >= t->hash_base)) {
+ if (family == AF_INET6)
+ atomic_sub(2, &t->hash_tids_in_use);
+ else
+ atomic_dec(&t->hash_tids_in_use);
+ } else {
+ if (family == AF_INET6)
+ atomic_sub(2, &t->tids_in_use);
+ else
+ atomic_dec(&t->tids_in_use);
+ }
}
skb = alloc_skb(sizeof(struct cpl_tid_release), GFP_ATOMIC);
@@ -1292,10 +1309,12 @@
spin_lock_init(&t->ftid_lock);
t->stids_in_use = 0;
+ t->v6_stids_in_use = 0;
t->sftids_in_use = 0;
t->afree = NULL;
t->atids_in_use = 0;
atomic_set(&t->tids_in_use, 0);
+ atomic_set(&t->conns_in_use, 0);
atomic_set(&t->hash_tids_in_use, 0);
/* Setup the free list for atid_tab and clear the stid bitmap. */
@@ -1343,7 +1362,7 @@
return -ENOMEM;
adap = netdev2adap(dev);
- req = (struct cpl_pass_open_req *)__skb_put(skb, sizeof(*req));
+ req = __skb_put(skb, sizeof(*req));
INIT_TP_WR(req, 0);
OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_PASS_OPEN_REQ, stid));
req->local_port = sport;
@@ -1384,7 +1403,7 @@
return -ENOMEM;
adap = netdev2adap(dev);
- req = (struct cpl_pass_open_req6 *)__skb_put(skb, sizeof(*req));
+ req = __skb_put(skb, sizeof(*req));
INIT_TP_WR(req, 0);
OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_PASS_OPEN_REQ6, stid));
req->local_port = sport;
@@ -1416,7 +1435,7 @@
if (!skb)
return -ENOMEM;
- req = (struct cpl_close_listsvr_req *)__skb_put(skb, sizeof(*req));
+ req = __skb_put(skb, sizeof(*req));
INIT_TP_WR(req, 0);
OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_CLOSE_LISTSRV_REQ, stid));
req->reply_ctrl = htons(NO_REPLY_V(0) | (ipv6 ? LISTSVR_IPV6_V(1) :
@@ -2251,6 +2270,13 @@
return err;
}
+ /* It's possible that the basic port information could have
+ * changed since we first read it.
+ */
+ err = t4_update_port_info(pi);
+ if (err < 0)
+ return err;
+
err = link_start(dev);
if (!err)
netif_tx_start_all_queues(dev);
@@ -2562,6 +2588,8 @@
if (vf >= adap->num_vfs)
return -EINVAL;
ivi->vf = vf;
+ ivi->max_tx_rate = adap->vfinfo[vf].tx_rate;
+ ivi->min_tx_rate = 0;
ether_addr_copy(ivi->mac, adap->vfinfo[vf].vf_mac_addr);
return 0;
}
@@ -2578,6 +2606,109 @@
return 0;
}
+static int cxgb_set_vf_rate(struct net_device *dev, int vf, int min_tx_rate,
+ int max_tx_rate)
+{
+ struct port_info *pi = netdev_priv(dev);
+ struct adapter *adap = pi->adapter;
+ struct fw_port_cmd port_cmd, port_rpl;
+ u32 link_status, speed = 0;
+ u32 fw_pfvf, fw_class;
+ int class_id = vf;
+ int link_ok, ret;
+ u16 pktsize;
+
+ if (vf >= adap->num_vfs)
+ return -EINVAL;
+
+ if (min_tx_rate) {
+ dev_err(adap->pdev_dev,
+ "Min tx rate (%d) (> 0) for VF %d is Invalid.\n",
+ min_tx_rate, vf);
+ return -EINVAL;
+ }
+ /* Retrieve link details for VF port */
+ memset(&port_cmd, 0, sizeof(port_cmd));
+ port_cmd.op_to_portid = cpu_to_be32(FW_CMD_OP_V(FW_PORT_CMD) |
+ FW_CMD_REQUEST_F |
+ FW_CMD_READ_F |
+ FW_PORT_CMD_PORTID_V(pi->port_id));
+ port_cmd.action_to_len16 =
+ cpu_to_be32(FW_PORT_CMD_ACTION_V(FW_PORT_ACTION_GET_PORT_INFO) |
+ FW_LEN16(port_cmd));
+ ret = t4_wr_mbox(adap, adap->mbox, &port_cmd, sizeof(port_cmd),
+ &port_rpl);
+ if (ret != FW_SUCCESS) {
+ dev_err(adap->pdev_dev,
+ "Failed to get link status for VF %d\n", vf);
+ return -EINVAL;
+ }
+ link_status = be32_to_cpu(port_rpl.u.info.lstatus_to_modtype);
+ link_ok = (link_status & FW_PORT_CMD_LSTATUS_F) != 0;
+ if (!link_ok) {
+ dev_err(adap->pdev_dev, "Link down for VF %d\n", vf);
+ return -EINVAL;
+ }
+ /* Determine link speed */
+ if (link_status & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_100M))
+ speed = 100;
+ else if (link_status & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_1G))
+ speed = 1000;
+ else if (link_status & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_10G))
+ speed = 10000;
+ else if (link_status & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_25G))
+ speed = 25000;
+ else if (link_status & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_40G))
+ speed = 40000;
+ else if (link_status & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_100G))
+ speed = 100000;
+
+ if (max_tx_rate > speed) {
+ dev_err(adap->pdev_dev,
+ "Max tx rate %d for VF %d can't be > link-speed %u",
+ max_tx_rate, vf, speed);
+ return -EINVAL;
+ }
+ pktsize = be16_to_cpu(port_rpl.u.info.mtu);
+ /* subtract ethhdr size and 4 bytes crc since, f/w appends it */
+ pktsize = pktsize - sizeof(struct ethhdr) - 4;
+ /* subtract ipv4 hdr size, tcp hdr size to get typical IPv4 MSS size */
+ pktsize = pktsize - sizeof(struct iphdr) - sizeof(struct tcphdr);
+ /* configure Traffic Class for rate-limiting */
+ ret = t4_sched_params(adap, SCHED_CLASS_TYPE_PACKET,
+ SCHED_CLASS_LEVEL_CL_RL,
+ SCHED_CLASS_MODE_CLASS,
+ SCHED_CLASS_RATEUNIT_BITS,
+ SCHED_CLASS_RATEMODE_ABS,
+ pi->port_id, class_id, 0,
+ max_tx_rate * 1000, 0, pktsize);
+ if (ret) {
+ dev_err(adap->pdev_dev, "Err %d for Traffic Class config\n",
+ ret);
+ return -EINVAL;
+ }
+ dev_info(adap->pdev_dev,
+ "Class %d with MSS %u configured with rate %u\n",
+ class_id, pktsize, max_tx_rate);
+
+ /* bind VF to configured Traffic Class */
+ fw_pfvf = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_PFVF) |
+ FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_PFVF_SCHEDCLASS_ETH));
+ fw_class = class_id;
+ ret = t4_set_params(adap, adap->mbox, adap->pf, vf + 1, 1, &fw_pfvf,
+ &fw_class);
+ if (ret) {
+ dev_err(adap->pdev_dev,
+ "Err %d in binding VF %d to Traffic Class %d\n",
+ ret, vf, class_id);
+ return -EINVAL;
+ }
+ dev_info(adap->pdev_dev, "PF %d VF %d is bound to Class %d\n",
+ adap->pf, vf, class_id);
+ adap->vfinfo[vf].tx_rate = max_tx_rate;
+ return 0;
+}
+
#endif
static int cxgb_set_mac_addr(struct net_device *dev, void *p)
@@ -2697,12 +2828,15 @@
return err;
}
-static int cxgb_setup_tc(struct net_device *dev, u32 handle, __be16 proto,
- struct tc_to_netdev *tc)
+static int cxgb_setup_tc(struct net_device *dev, u32 handle, u32 chain_index,
+ __be16 proto, struct tc_to_netdev *tc)
{
struct port_info *pi = netdev2pinfo(dev);
struct adapter *adap = netdev2adap(dev);
+ if (chain_index)
+ return -EOPNOTSUPP;
+
if (!(adap->flags & FULL_INIT_DONE)) {
dev_err(adap->pdev_dev,
"Failed to setup tc on port %d. Link Down?\n",
@@ -2726,6 +2860,16 @@
return -EOPNOTSUPP;
}
+static netdev_features_t cxgb_fix_features(struct net_device *dev,
+ netdev_features_t features)
+{
+ /* Disable GRO, if RX_CSUM is disabled */
+ if (!(features & NETIF_F_RXCSUM))
+ features &= ~NETIF_F_GRO;
+
+ return features;
+}
+
static const struct net_device_ops cxgb4_netdev_ops = {
.ndo_open = cxgb_open,
.ndo_stop = cxgb_close,
@@ -2747,6 +2891,7 @@
#endif /* CONFIG_CHELSIO_T4_FCOE */
.ndo_set_tx_maxrate = cxgb_set_tx_maxrate,
.ndo_setup_tc = cxgb_setup_tc,
+ .ndo_fix_features = cxgb_fix_features,
};
#ifdef CONFIG_PCI_IOV
@@ -2754,6 +2899,7 @@
.ndo_open = dummy_open,
.ndo_set_vf_mac = cxgb_set_vf_mac,
.ndo_get_vf_config = cxgb_get_vf_config,
+ .ndo_set_vf_rate = cxgb_set_vf_rate,
.ndo_get_phys_port_id = cxgb_get_phys_port_id,
};
#endif
@@ -4018,10 +4164,7 @@
/* Reduce memory usage in kdump environment, disable all offload.
*/
- if (is_kdump_kernel()) {
- adap->params.offload = 0;
- adap->params.crypto = 0;
- } else if (is_uld(adap) && t4_uld_mem_alloc(adap)) {
+ if (is_kdump_kernel() || (is_uld(adap) && t4_uld_mem_alloc(adap))) {
adap->params.offload = 0;
adap->params.crypto = 0;
}
@@ -4042,7 +4185,7 @@
struct port_info *pi = adap2pinfo(adap, i);
pi->first_qset = qidx;
- pi->nqsets = 8;
+ pi->nqsets = is_kdump_kernel() ? 1 : 8;
qidx += pi->nqsets;
}
#else /* !CONFIG_CHELSIO_T4_DCB */
@@ -4055,6 +4198,9 @@
if (q10g > netif_get_num_default_rss_queues())
q10g = netif_get_num_default_rss_queues();
+ if (is_kdump_kernel())
+ q10g = 1;
+
for_each_port(adap, i) {
struct port_info *pi = adap2pinfo(adap, i);
@@ -4960,6 +5106,8 @@
netif_set_real_num_tx_queues(adapter->port[i], pi->nqsets);
netif_set_real_num_rx_queues(adapter->port[i], pi->nqsets);
+ netif_carrier_off(adapter->port[i]);
+
err = register_netdev(adapter->port[i]);
if (err)
break;
@@ -5026,13 +5174,15 @@
&v, &port_vec);
if (err < 0) {
dev_err(adapter->pdev_dev, "Could not fetch port params\n");
- goto free_adapter;
+ goto free_mbox_log;
}
adapter->params.nports = hweight32(port_vec);
pci_set_drvdata(pdev, adapter);
return 0;
+free_mbox_log:
+ kfree(adapter->mbox_log);
free_adapter:
kfree(adapter);
free_pci_region:
@@ -5132,6 +5282,7 @@
unregister_netdev(adapter->port[0]);
iounmap(adapter->regs);
kfree(adapter->vfinfo);
+ kfree(adapter->mbox_log);
kfree(adapter);
pci_disable_sriov(pdev);
pci_release_regions(pdev);
@@ -5178,6 +5329,7 @@
unregister_netdev(adapter->port[0]);
iounmap(adapter->regs);
kfree(adapter->vfinfo);
+ kfree(adapter->mbox_log);
kfree(adapter);
pci_disable_sriov(pdev);
pci_release_regions(pdev);
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h
index 6e74040..ce0d9fb 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h
@@ -123,12 +123,14 @@
spinlock_t stid_lock;
unsigned int stids_in_use;
+ unsigned int v6_stids_in_use;
unsigned int sftids_in_use;
/* TIDs in the TCAM */
atomic_t tids_in_use;
/* TIDs in the HASH */
atomic_t hash_tids_in_use;
+ atomic_t conns_in_use;
/* lock for setting/clearing filter bitmap */
spinlock_t ftid_lock;
};
@@ -157,13 +159,21 @@
}
static inline void cxgb4_insert_tid(struct tid_info *t, void *data,
- unsigned int tid)
+ unsigned int tid, unsigned short family)
{
t->tid_tab[tid] = data;
- if (t->hash_base && (tid >= t->hash_base))
- atomic_inc(&t->hash_tids_in_use);
- else
- atomic_inc(&t->tids_in_use);
+ if (t->hash_base && (tid >= t->hash_base)) {
+ if (family == AF_INET6)
+ atomic_add(2, &t->hash_tids_in_use);
+ else
+ atomic_inc(&t->hash_tids_in_use);
+ } else {
+ if (family == AF_INET6)
+ atomic_add(2, &t->tids_in_use);
+ else
+ atomic_inc(&t->tids_in_use);
+ }
+ atomic_inc(&t->conns_in_use);
}
int cxgb4_alloc_atid(struct tid_info *t, void *data);
@@ -171,8 +181,8 @@
int cxgb4_alloc_sftid(struct tid_info *t, int family, void *data);
void cxgb4_free_atid(struct tid_info *t, unsigned int atid);
void cxgb4_free_stid(struct tid_info *t, unsigned int stid, int family);
-void cxgb4_remove_tid(struct tid_info *t, unsigned int qid, unsigned int tid);
-
+void cxgb4_remove_tid(struct tid_info *t, unsigned int qid, unsigned int tid,
+ unsigned short family);
struct in6_addr;
int cxgb4_create_server(const struct net_device *dev, unsigned int stid,
diff --git a/drivers/net/ethernet/chelsio/cxgb4/l2t.c b/drivers/net/ethernet/chelsio/cxgb4/l2t.c
index 6f3692d..f7ef887 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/l2t.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/l2t.c
@@ -146,7 +146,7 @@
if (!skb)
return -ENOMEM;
- req = (struct cpl_l2t_write_req *)__skb_put(skb, sizeof(*req));
+ req = __skb_put(skb, sizeof(*req));
INIT_TP_WR(req, 0);
OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_L2T_WRITE_REQ,
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
index 3a34aa6..d5e316d 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
@@ -4040,6 +4040,7 @@
{ MBHOSTPARERR_F, "CIM mailbox host parity error", -1, 1 },
{ TIEQINPARERRINT_F, "CIM TIEQ outgoing parity error", -1, 1 },
{ TIEQOUTPARERRINT_F, "CIM TIEQ incoming parity error", -1, 1 },
+ { TIMER0INT_F, "CIM TIMER0 interrupt", -1, 1 },
{ 0 }
};
static const struct intr_info cim_upintr_info[] = {
@@ -4074,11 +4075,27 @@
{ 0 }
};
+ u32 val, fw_err;
int fat;
- if (t4_read_reg(adapter, PCIE_FW_A) & PCIE_FW_ERR_F)
+ fw_err = t4_read_reg(adapter, PCIE_FW_A);
+ if (fw_err & PCIE_FW_ERR_F)
t4_report_fw_error(adapter);
+ /* When the Firmware detects an internal error which normally
+ * wouldn't raise a Host Interrupt, it forces a CIM Timer0 interrupt
+ * in order to make sure the Host sees the Firmware Crash. So
+ * if we have a Timer0 interrupt and don't see a Firmware Crash,
+ * ignore the Timer0 interrupt.
+ */
+
+ val = t4_read_reg(adapter, CIM_HOST_INT_CAUSE_A);
+ if (val & TIMER0INT_F)
+ if (!(fw_err & PCIE_FW_ERR_F) ||
+ (PCIE_FW_EVAL_G(fw_err) != PCIE_FW_EVAL_CRASH))
+ t4_write_reg(adapter, CIM_HOST_INT_CAUSE_A,
+ TIMER0INT_F);
+
fat = t4_handle_intr_status(adapter, CIM_HOST_INT_CAUSE_A,
cim_intr_info) +
t4_handle_intr_status(adapter, CIM_HOST_UPACC_INT_CAUSE_A,
@@ -4445,7 +4462,7 @@
#define PF_INTR_MASK (PFSW_F)
#define GLBL_INTR_MASK (CIM_F | MPS_F | PL_F | PCIE_F | MC_F | EDC0_F | \
EDC1_F | LE_F | TP_F | MA_F | PM_TX_F | PM_RX_F | ULP_RX_F | \
- CPL_SWITCH_F | SGE_F | ULP_TX_F)
+ CPL_SWITCH_F | SGE_F | ULP_TX_F | SF_F)
/**
* t4_slow_intr_handler - control path interrupt handler
@@ -6293,13 +6310,18 @@
if (!t4_fw_matches_chip(adap, fw_hdr))
return -EINVAL;
+ /* Disable FW_OK flag so that mbox commands with FW_OK flag set
+ * wont be sent when we are flashing FW.
+ */
+ adap->flags &= ~FW_OK;
+
ret = t4_fw_halt(adap, mbox, force);
if (ret < 0 && !force)
- return ret;
+ goto out;
ret = t4_load_fw(adap, fw_data, size);
if (ret < 0)
- return ret;
+ goto out;
/*
* Older versions of the firmware don't understand the new
@@ -6310,7 +6332,17 @@
* its header flags to see if it advertises the capability.
*/
reset = ((be32_to_cpu(fw_hdr->flags) & FW_HDR_FLAGS_RESET_HALT) == 0);
- return t4_fw_restart(adap, mbox, reset);
+ ret = t4_fw_restart(adap, mbox, reset);
+
+ /* Grab potentially new Firmware Device Log parameters so we can see
+ * how healthy the new Firmware is. It's okay to contact the new
+ * Firmware for these parameters even though, as far as it's
+ * concerned, we've never said "HELLO" to it ...
+ */
+ (void)t4_init_devlog_params(adap);
+out:
+ adap->flags |= FW_OK;
+ return ret;
}
/**
@@ -7360,11 +7392,41 @@
lc->fc = fc;
lc->supported = be16_to_cpu(p->u.info.pcap);
lc->lp_advertising = be16_to_cpu(p->u.info.lpacap);
+
t4_os_link_changed(adap, pi->port_id, link_ok);
}
}
/**
+ * t4_update_port_info - retrieve and update port information if changed
+ * @pi: the port_info
+ *
+ * We issue a Get Port Information Command to the Firmware and, if
+ * successful, we check to see if anything is different from what we
+ * last recorded and update things accordingly.
+ */
+int t4_update_port_info(struct port_info *pi)
+{
+ struct fw_port_cmd port_cmd;
+ int ret;
+
+ memset(&port_cmd, 0, sizeof(port_cmd));
+ port_cmd.op_to_portid = cpu_to_be32(FW_CMD_OP_V(FW_PORT_CMD) |
+ FW_CMD_REQUEST_F | FW_CMD_READ_F |
+ FW_PORT_CMD_PORTID_V(pi->port_id));
+ port_cmd.action_to_len16 = cpu_to_be32(
+ FW_PORT_CMD_ACTION_V(FW_PORT_ACTION_GET_PORT_INFO) |
+ FW_LEN16(port_cmd));
+ ret = t4_wr_mbox(pi->adapter, pi->adapter->mbox,
+ &port_cmd, sizeof(port_cmd), &port_cmd);
+ if (ret)
+ return ret;
+
+ t4_handle_get_port_info(pi, (__be64 *)&port_cmd);
+ return 0;
+}
+
+/**
* t4_handle_fw_rpl - process a FW reply message
* @adap: the adapter
* @rpl: start of the FW message
@@ -7643,10 +7705,9 @@
t4_intr_disable(adapter);
t4_write_reg(adapter, DBG_GPIO_EN_A, 0);
for_each_port(adapter, port) {
- u32 a_port_cfg = PORT_REG(port,
- is_t4(adapter->params.chip)
- ? XGMAC_PORT_CFG_A
- : MAC_PORT_CFG_A);
+ u32 a_port_cfg = is_t4(adapter->params.chip) ?
+ PORT_REG(port, XGMAC_PORT_CFG_A) :
+ T5_PORT_REG(port, MAC_PORT_CFG_A);
t4_write_reg(adapter, a_port_cfg,
t4_read_reg(adapter, a_port_cfg)
@@ -8272,7 +8333,16 @@
ret = t4_cim_read(adap, UP_UP_DBG_LA_DATA_A, 1, &la_buf[i]);
if (ret)
break;
- idx = (idx + 1) & UPDBGLARDPTR_M;
+
+ /* Bits 0-3 of UpDbgLaRdPtr can be between 0000 to 1001 to
+ * identify the 32-bit portion of the full 312-bit data
+ */
+ if (is_t6(adap->params.chip) && (idx & 0xf) >= 9)
+ idx = (idx & 0xff0) + 0x10;
+ else
+ idx++;
+ /* address can't exceed 0xfff */
+ idx &= UPDBGLARDPTR_M;
}
restart:
if (cfg & UPDBGLAEN_F) {
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h b/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h
index a323185..99987d8 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h
@@ -172,6 +172,8 @@
CH_PCI_ID_TABLE_FENTRY(0x509e), /* Custom T520-CR */
CH_PCI_ID_TABLE_FENTRY(0x509f), /* Custom T540-CR */
CH_PCI_ID_TABLE_FENTRY(0x50a0), /* Custom T540-CR */
+ CH_PCI_ID_TABLE_FENTRY(0x50a1), /* Custom T540-CR */
+ CH_PCI_ID_TABLE_FENTRY(0x50a2), /* Custom T540-KR4 */
/* T6 adapters:
*/
@@ -190,6 +192,9 @@
CH_PCI_ID_TABLE_FENTRY(0x6015),
CH_PCI_ID_TABLE_FENTRY(0x6080),
CH_PCI_ID_TABLE_FENTRY(0x6081),
+ CH_PCI_ID_TABLE_FENTRY(0x6082), /* Custom T6225-CR SFP28 */
+ CH_PCI_ID_TABLE_FENTRY(0x6083), /* Custom T62100-CR QSFP28 */
+ CH_PCI_ID_TABLE_FENTRY(0x6084), /* Custom T64100-CR QSFP28 */
CH_PCI_DEVICE_ID_TABLE_DEFINE_END;
#endif /* __T4_PCI_ID_TBL_H__ */
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h b/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h
index 3348d33..3884336 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h
@@ -1077,6 +1077,10 @@
#define TIEQINPARERRINT_V(x) ((x) << TIEQINPARERRINT_S)
#define TIEQINPARERRINT_F TIEQINPARERRINT_V(1U)
+#define TIMER0INT_S 2
+#define TIMER0INT_V(x) ((x) << TIMER0INT_S)
+#define TIMER0INT_F TIMER0INT_V(1U)
+
#define PREFDROPINT_S 1
#define PREFDROPINT_V(x) ((x) << PREFDROPINT_S)
#define PREFDROPINT_F PREFDROPINT_V(1U)
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h b/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h
index 251a35e..f47461a 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h
@@ -2572,6 +2572,7 @@
FW_PORT_TYPE_CR_QSFP,
FW_PORT_TYPE_CR2_QSFP,
FW_PORT_TYPE_SFP28,
+ FW_PORT_TYPE_KR_SFP28,
FW_PORT_TYPE_NONE = FW_PORT_CMD_PTYPE_M
};
@@ -3087,6 +3088,10 @@
#define FW_DEBUG_CMD_TYPE_G(x) \
(((x) >> FW_DEBUG_CMD_TYPE_S) & FW_DEBUG_CMD_TYPE_M)
+enum pcie_fw_eval {
+ PCIE_FW_EVAL_CRASH = 0,
+};
+
#define PCIE_FW_ERR_S 31
#define PCIE_FW_ERR_V(x) ((x) << PCIE_FW_ERR_S)
#define PCIE_FW_ERR_F PCIE_FW_ERR_V(1U)
diff --git a/drivers/net/ethernet/chelsio/libcxgb/libcxgb_cm.h b/drivers/net/ethernet/chelsio/libcxgb/libcxgb_cm.h
index 515b94f..4b5aacc 100644
--- a/drivers/net/ethernet/chelsio/libcxgb/libcxgb_cm.h
+++ b/drivers/net/ethernet/chelsio/libcxgb/libcxgb_cm.h
@@ -90,7 +90,7 @@
{
struct cpl_tid_release *req;
- req = (struct cpl_tid_release *)__skb_put(skb, len);
+ req = __skb_put(skb, len);
memset(req, 0, len);
INIT_TP_WR(req, tid);
@@ -104,7 +104,7 @@
{
struct cpl_close_con_req *req;
- req = (struct cpl_close_con_req *)__skb_put(skb, len);
+ req = __skb_put(skb, len);
memset(req, 0, len);
INIT_TP_WR(req, tid);
@@ -119,7 +119,7 @@
{
struct cpl_abort_req *req;
- req = (struct cpl_abort_req *)__skb_put(skb, len);
+ req = __skb_put(skb, len);
memset(req, 0, len);
INIT_TP_WR(req, tid);
@@ -134,7 +134,7 @@
{
struct cpl_abort_rpl *rpl;
- rpl = (struct cpl_abort_rpl *)__skb_put(skb, len);
+ rpl = __skb_put(skb, len);
memset(rpl, 0, len);
INIT_TP_WR(rpl, tid);
@@ -149,7 +149,7 @@
{
struct cpl_rx_data_ack *req;
- req = (struct cpl_rx_data_ack *)__skb_put(skb, len);
+ req = __skb_put(skb, len);
memset(req, 0, len);
INIT_TP_WR(req, tid);
diff --git a/drivers/net/ethernet/cirrus/cs89x0.c b/drivers/net/ethernet/cirrus/cs89x0.c
index da5b58b..410a0a9 100644
--- a/drivers/net/ethernet/cirrus/cs89x0.c
+++ b/drivers/net/ethernet/cirrus/cs89x0.c
@@ -450,11 +450,10 @@
if (bp + length > lp->end_dma_buff) {
int semi_cnt = lp->end_dma_buff - bp;
- memcpy(skb_put(skb, semi_cnt), bp, semi_cnt);
- memcpy(skb_put(skb, length - semi_cnt), lp->dma_buff,
- length - semi_cnt);
+ skb_put_data(skb, bp, semi_cnt);
+ skb_put_data(skb, lp->dma_buff, length - semi_cnt);
} else {
- memcpy(skb_put(skb, length), bp, length);
+ skb_put_data(skb, bp, length);
}
bp += (length + 3) & ~3;
if (bp >= lp->end_dma_buff)
diff --git a/drivers/net/ethernet/cirrus/ep93xx_eth.c b/drivers/net/ethernet/cirrus/ep93xx_eth.c
index 7a7c02f..e2a7029 100644
--- a/drivers/net/ethernet/cirrus/ep93xx_eth.c
+++ b/drivers/net/ethernet/cirrus/ep93xx_eth.c
@@ -702,7 +702,10 @@
struct ethtool_link_ksettings *cmd)
{
struct ep93xx_priv *ep = netdev_priv(dev);
- return mii_ethtool_get_link_ksettings(&ep->mii, cmd);
+
+ mii_ethtool_get_link_ksettings(&ep->mii, cmd);
+
+ return 0;
}
static int ep93xx_set_link_ksettings(struct net_device *dev,
diff --git a/drivers/net/ethernet/cisco/enic/enic.h b/drivers/net/ethernet/cisco/enic/enic.h
index 2b23f46..ba032ac 100644
--- a/drivers/net/ethernet/cisco/enic/enic.h
+++ b/drivers/net/ethernet/cisco/enic/enic.h
@@ -33,7 +33,7 @@
#define DRV_NAME "enic"
#define DRV_DESCRIPTION "Cisco VIC Ethernet NIC Driver"
-#define DRV_VERSION "2.3.0.31"
+#define DRV_VERSION "2.3.0.42"
#define DRV_COPYRIGHT "Copyright 2008-2013 Cisco Systems, Inc"
#define ENIC_BARS_MAX 6
@@ -47,7 +47,7 @@
struct enic_msix_entry {
int requested;
- char devname[IFNAMSIZ];
+ char devname[IFNAMSIZ + 8];
irqreturn_t (*isr)(int, void *);
void *devid;
cpumask_var_t affinity_mask;
diff --git a/drivers/net/ethernet/cisco/enic/enic_main.c b/drivers/net/ethernet/cisco/enic/enic_main.c
index 4b87bee..d24ee1a 100644
--- a/drivers/net/ethernet/cisco/enic/enic_main.c
+++ b/drivers/net/ethernet/cisco/enic/enic_main.c
@@ -1537,13 +1537,12 @@
*/
enic_calc_int_moderation(enic, &enic->rq[0]);
- if (rq_work_done < rq_work_to_do) {
+ if ((rq_work_done < budget) && napi_complete_done(napi, rq_work_done)) {
/* Some work done, but not enough to stay in polling,
* exit polling
*/
- napi_complete_done(napi, rq_work_done);
if (enic->rx_coalesce_setting.use_adaptive_rx_coalesce)
enic_set_int_moderation(enic, &enic->rq[0]);
vnic_intr_unmask(&enic->intr[intr]);
@@ -1663,13 +1662,12 @@
*/
enic_calc_int_moderation(enic, &enic->rq[rq]);
- if (work_done < work_to_do) {
+ if ((work_done < budget) && napi_complete_done(napi, work_done)) {
/* Some work done, but not enough to stay in polling,
* exit polling
*/
- napi_complete_done(napi, work_done);
if (enic->rx_coalesce_setting.use_adaptive_rx_coalesce)
enic_set_int_moderation(enic, &enic->rq[rq]);
vnic_intr_unmask(&enic->intr[intr]);
@@ -1739,7 +1737,7 @@
intr = enic_msix_rq_intr(enic, i);
snprintf(enic->msix[intr].devname,
sizeof(enic->msix[intr].devname),
- "%.11s-rx-%u", netdev->name, i);
+ "%s-rx-%u", netdev->name, i);
enic->msix[intr].isr = enic_isr_msix;
enic->msix[intr].devid = &enic->napi[i];
}
@@ -1750,7 +1748,7 @@
intr = enic_msix_wq_intr(enic, i);
snprintf(enic->msix[intr].devname,
sizeof(enic->msix[intr].devname),
- "%.11s-tx-%u", netdev->name, i);
+ "%s-tx-%u", netdev->name, i);
enic->msix[intr].isr = enic_isr_msix;
enic->msix[intr].devid = &enic->napi[wq];
}
@@ -1758,14 +1756,14 @@
intr = enic_msix_err_intr(enic);
snprintf(enic->msix[intr].devname,
sizeof(enic->msix[intr].devname),
- "%.11s-err", netdev->name);
+ "%s-err", netdev->name);
enic->msix[intr].isr = enic_isr_msix_err;
enic->msix[intr].devid = enic;
intr = enic_msix_notify_intr(enic);
snprintf(enic->msix[intr].devname,
sizeof(enic->msix[intr].devname),
- "%.11s-notify", netdev->name);
+ "%s-notify", netdev->name);
enic->msix[intr].isr = enic_isr_msix_notify;
enic->msix[intr].devid = enic;
diff --git a/drivers/net/ethernet/davicom/dm9000.c b/drivers/net/ethernet/davicom/dm9000.c
index 008dc81..16fe776 100644
--- a/drivers/net/ethernet/davicom/dm9000.c
+++ b/drivers/net/ethernet/davicom/dm9000.c
@@ -1171,7 +1171,7 @@
if (GoodPacket &&
((skb = netdev_alloc_skb(dev, RxLen + 4)) != NULL)) {
skb_reserve(skb, 2);
- rdptr = (u8 *) skb_put(skb, RxLen - 4);
+ rdptr = skb_put(skb, RxLen - 4);
/* Read received packet from RX SRAM */
diff --git a/drivers/net/ethernet/dec/tulip/de2104x.c b/drivers/net/ethernet/dec/tulip/de2104x.c
index 91b8f6f..c87b8cc 100644
--- a/drivers/net/ethernet/dec/tulip/de2104x.c
+++ b/drivers/net/ethernet/dec/tulip/de2104x.c
@@ -1483,8 +1483,8 @@
de_rx_missed(de, rbuf[8]);
}
-static int __de_get_link_ksettings(struct de_private *de,
- struct ethtool_link_ksettings *cmd)
+static void __de_get_link_ksettings(struct de_private *de,
+ struct ethtool_link_ksettings *cmd)
{
ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
de->media_supported);
@@ -1517,8 +1517,6 @@
cmd->base.autoneg = AUTONEG_ENABLE;
/* ignore maxtxpkt, maxrxpkt for now */
-
- return 0;
}
static int __de_set_link_ksettings(struct de_private *de,
@@ -1615,13 +1613,12 @@
struct ethtool_link_ksettings *cmd)
{
struct de_private *de = netdev_priv(dev);
- int rc;
spin_lock_irq(&de->lock);
- rc = __de_get_link_ksettings(de, cmd);
+ __de_get_link_ksettings(de, cmd);
spin_unlock_irq(&de->lock);
- return rc;
+ return 0;
}
static int de_set_link_ksettings(struct net_device *dev,
diff --git a/drivers/net/ethernet/dec/tulip/de4x5.c b/drivers/net/ethernet/dec/tulip/de4x5.c
index fd6bcf0..47be501 100644
--- a/drivers/net/ethernet/dec/tulip/de4x5.c
+++ b/drivers/net/ethernet/dec/tulip/de4x5.c
@@ -3624,10 +3624,10 @@
skb_reserve(p, 2); /* Align */
if (index < lp->rx_old) { /* Wrapped buffer */
short tlen = (lp->rxRingSize - lp->rx_old) * RX_BUFF_SZ;
- memcpy(skb_put(p,tlen),lp->rx_bufs + lp->rx_old * RX_BUFF_SZ,tlen);
- memcpy(skb_put(p,len-tlen),lp->rx_bufs,len-tlen);
+ skb_put_data(p, lp->rx_bufs + lp->rx_old * RX_BUFF_SZ, tlen);
+ skb_put_data(p, lp->rx_bufs, len - tlen);
} else { /* Linear buffer */
- memcpy(skb_put(p,len),lp->rx_bufs + lp->rx_old * RX_BUFF_SZ,len);
+ skb_put_data(p, lp->rx_bufs + lp->rx_old * RX_BUFF_SZ, len);
}
return p;
diff --git a/drivers/net/ethernet/dec/tulip/interrupt.c b/drivers/net/ethernet/dec/tulip/interrupt.c
index ba6ae24..8df8088 100644
--- a/drivers/net/ethernet/dec/tulip/interrupt.c
+++ b/drivers/net/ethernet/dec/tulip/interrupt.c
@@ -218,9 +218,9 @@
pkt_len);
skb_put(skb, pkt_len);
#else
- memcpy(skb_put(skb, pkt_len),
- tp->rx_buffers[entry].skb->data,
- pkt_len);
+ skb_put_data(skb,
+ tp->rx_buffers[entry].skb->data,
+ pkt_len);
#endif
pci_dma_sync_single_for_device(tp->pdev,
tp->rx_buffers[entry].mapping,
@@ -444,9 +444,9 @@
pkt_len);
skb_put(skb, pkt_len);
#else
- memcpy(skb_put(skb, pkt_len),
- tp->rx_buffers[entry].skb->data,
- pkt_len);
+ skb_put_data(skb,
+ tp->rx_buffers[entry].skb->data,
+ pkt_len);
#endif
pci_dma_sync_single_for_device(tp->pdev,
tp->rx_buffers[entry].mapping,
diff --git a/drivers/net/ethernet/dec/tulip/uli526x.c b/drivers/net/ethernet/dec/tulip/uli526x.c
index 8d98b259..7fc248e 100644
--- a/drivers/net/ethernet/dec/tulip/uli526x.c
+++ b/drivers/net/ethernet/dec/tulip/uli526x.c
@@ -864,9 +864,9 @@
skb = new_skb;
/* size less than COPY_SIZE, allocate a rxlen SKB */
skb_reserve(skb, 2); /* 16byte align */
- memcpy(skb_put(skb, rxlen),
- skb_tail_pointer(rxptr->rx_skb_ptr),
- rxlen);
+ skb_put_data(skb,
+ skb_tail_pointer(rxptr->rx_skb_ptr),
+ rxlen);
uli526x_reuse_skb(db, rxptr->rx_skb_ptr);
} else
skb_put(skb, rxlen);
diff --git a/drivers/net/ethernet/dec/tulip/winbond-840.c b/drivers/net/ethernet/dec/tulip/winbond-840.c
index d1f2f3c..32d7229 100644
--- a/drivers/net/ethernet/dec/tulip/winbond-840.c
+++ b/drivers/net/ethernet/dec/tulip/winbond-840.c
@@ -1395,13 +1395,12 @@
struct ethtool_link_ksettings *cmd)
{
struct netdev_private *np = netdev_priv(dev);
- int rc;
spin_lock_irq(&np->lock);
- rc = mii_ethtool_get_link_ksettings(&np->mii_if, cmd);
+ mii_ethtool_get_link_ksettings(&np->mii_if, cmd);
spin_unlock_irq(&np->lock);
- return rc;
+ return 0;
}
static int netdev_set_link_ksettings(struct net_device *dev,
diff --git a/drivers/net/ethernet/dnet.c b/drivers/net/ethernet/dnet.c
index 3e77dd8..5a84794 100644
--- a/drivers/net/ethernet/dnet.c
+++ b/drivers/net/ethernet/dnet.c
@@ -399,7 +399,7 @@
* 'skb_put()' points to the start of sk_buff
* data area.
*/
- data_ptr = (unsigned int *)skb_put(skb, pkt_len);
+ data_ptr = skb_put(skb, pkt_len);
for (i = 0; i < (pkt_len + 3) >> 2; i++)
*data_ptr++ = dnet_readl(bp, RX_DATA_FIFO);
skb->protocol = eth_type_trans(skb, dev);
diff --git a/drivers/net/ethernet/ec_bhf.c b/drivers/net/ethernet/ec_bhf.c
index 278f139..4ee042c 100644
--- a/drivers/net/ethernet/ec_bhf.c
+++ b/drivers/net/ethernet/ec_bhf.c
@@ -223,7 +223,7 @@
skb = netdev_alloc_skb_ip_align(priv->net_dev, pkt_size);
if (skb) {
- memcpy(skb_put(skb, pkt_size), data, pkt_size);
+ skb_put_data(skb, data, pkt_size);
skb->protocol = eth_type_trans(skb, priv->net_dev);
priv->stat_rx_bytes += pkt_size;
diff --git a/drivers/net/ethernet/emulex/benet/be.h b/drivers/net/ethernet/emulex/benet/be.h
index 5056624..674cf9d 100644
--- a/drivers/net/ethernet/emulex/benet/be.h
+++ b/drivers/net/ethernet/emulex/benet/be.h
@@ -37,7 +37,7 @@
#include "be_hw.h"
#include "be_roce.h"
-#define DRV_VER "11.1.0.0"
+#define DRV_VER "11.4.0.0"
#define DRV_NAME "be2net"
#define BE_NAME "Emulex BladeEngine2"
#define BE3_NAME "Emulex BladeEngine3"
diff --git a/drivers/net/ethernet/emulex/benet/be_hw.h b/drivers/net/ethernet/emulex/benet/be_hw.h
index 36e4232..c967f45 100644
--- a/drivers/net/ethernet/emulex/benet/be_hw.h
+++ b/drivers/net/ethernet/emulex/benet/be_hw.h
@@ -49,6 +49,9 @@
#define POST_STAGE_BE_RESET 0x3 /* Host wants to reset chip */
#define POST_STAGE_ARMFW_RDY 0xc000 /* FW is done with POST */
#define POST_STAGE_RECOVERABLE_ERR 0xE000 /* Recoverable err detected */
+/* FW has detected a UE and is dumping FAT log data */
+#define POST_STAGE_FAT_LOG_START 0x0D00
+#define POST_STAGE_ARMFW_UE 0xF000 /*FW has asserted an UE*/
/* Lancer SLIPORT registers */
#define SLIPORT_STATUS_OFFSET 0x404
diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
index 4eee18c..319eee3 100644
--- a/drivers/net/ethernet/emulex/benet/be_main.c
+++ b/drivers/net/ethernet/emulex/benet/be_main.c
@@ -3241,8 +3241,9 @@
{
u32 ue_lo = 0, ue_hi = 0, ue_lo_mask = 0, ue_hi_mask = 0;
u32 sliport_status = 0, sliport_err1 = 0, sliport_err2 = 0;
- u32 i;
struct device *dev = &adapter->pdev->dev;
+ u16 val;
+ u32 i;
if (be_check_error(adapter, BE_ERROR_HW))
return;
@@ -3280,15 +3281,25 @@
ue_lo = (ue_lo & ~ue_lo_mask);
ue_hi = (ue_hi & ~ue_hi_mask);
- /* On certain platforms BE hardware can indicate spurious UEs.
- * Allow HW to stop working completely in case of a real UE.
- * Hence not setting the hw_error for UE detection.
- */
-
if (ue_lo || ue_hi) {
+ /* On certain platforms BE3 hardware can indicate
+ * spurious UEs. In case of a UE in the chip,
+ * the POST register correctly reports either a
+ * FAT_LOG_START state (FW is currently dumping
+ * FAT log data) or a ARMFW_UE state. Check for the
+ * above states to ascertain if the UE is valid or not.
+ */
+ if (BE3_chip(adapter)) {
+ val = be_POST_stage_get(adapter);
+ if ((val & POST_STAGE_FAT_LOG_START)
+ != POST_STAGE_FAT_LOG_START &&
+ (val & POST_STAGE_ARMFW_UE)
+ != POST_STAGE_ARMFW_UE)
+ return;
+ }
+
dev_err(dev, "Error detected in the adapter");
- if (skyhawk_chip(adapter))
- be_set_error(adapter, BE_ERROR_UE);
+ be_set_error(adapter, BE_ERROR_UE);
for (i = 0; ue_lo; ue_lo >>= 1, i++) {
if (ue_lo & 1)
diff --git a/drivers/net/ethernet/faraday/ftmac100.c b/drivers/net/ethernet/faraday/ftmac100.c
index 1536356..66928a9 100644
--- a/drivers/net/ethernet/faraday/ftmac100.c
+++ b/drivers/net/ethernet/faraday/ftmac100.c
@@ -829,7 +829,10 @@
struct ethtool_link_ksettings *cmd)
{
struct ftmac100 *priv = netdev_priv(netdev);
- return mii_ethtool_get_link_ksettings(&priv->mii, cmd);
+
+ mii_ethtool_get_link_ksettings(&priv->mii, cmd);
+
+ return 0;
}
static int ftmac100_set_link_ksettings(struct net_device *netdev,
diff --git a/drivers/net/ethernet/fealnx.c b/drivers/net/ethernet/fealnx.c
index 766636a..e92859d 100644
--- a/drivers/net/ethernet/fealnx.c
+++ b/drivers/net/ethernet/fealnx.c
@@ -1711,8 +1711,8 @@
np->cur_rx->skbuff->data, pkt_len);
skb_put(skb, pkt_len);
#else
- memcpy(skb_put(skb, pkt_len),
- np->cur_rx->skbuff->data, pkt_len);
+ skb_put_data(skb, np->cur_rx->skbuff->data,
+ pkt_len);
#endif
pci_dma_sync_single_for_device(np->pci_dev,
np->cur_rx->buffer,
@@ -1821,13 +1821,12 @@
struct ethtool_link_ksettings *cmd)
{
struct netdev_private *np = netdev_priv(dev);
- int rc;
spin_lock_irq(&np->lock);
- rc = mii_ethtool_get_link_ksettings(&np->mii, cmd);
+ mii_ethtool_get_link_ksettings(&np->mii, cmd);
spin_unlock_irq(&np->lock);
- return rc;
+ return 0;
}
static int netdev_set_link_ksettings(struct net_device *dev,
diff --git a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
index 290ad05..757b873 100644
--- a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
+++ b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
@@ -342,8 +342,8 @@
}
}
-static int dpaa_setup_tc(struct net_device *net_dev, u32 handle, __be16 proto,
- struct tc_to_netdev *tc)
+static int dpaa_setup_tc(struct net_device *net_dev, u32 handle,
+ u32 chain_index, __be16 proto, struct tc_to_netdev *tc)
{
struct dpaa_priv *priv = netdev_priv(net_dev);
u8 num_tc;
diff --git a/drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c b/drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c
index 15571e2..aad825088 100644
--- a/drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c
+++ b/drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c
@@ -75,16 +75,14 @@
static int dpaa_get_link_ksettings(struct net_device *net_dev,
struct ethtool_link_ksettings *cmd)
{
- int err;
-
if (!net_dev->phydev) {
netdev_dbg(net_dev, "phy device not initialized\n");
return 0;
}
- err = phy_ethtool_ksettings_get(net_dev->phydev, cmd);
+ phy_ethtool_ksettings_get(net_dev->phydev, cmd);
- return err;
+ return 0;
}
static int dpaa_set_link_ksettings(struct net_device *net_dev,
diff --git a/drivers/net/ethernet/freescale/fec.h b/drivers/net/ethernet/freescale/fec.h
index 5ea740b..38c7b21 100644
--- a/drivers/net/ethernet/freescale/fec.h
+++ b/drivers/net/ethernet/freescale/fec.h
@@ -446,6 +446,10 @@
#define FEC_QUIRK_HAS_COALESCE (1 << 13)
/* Interrupt doesn't wake CPU from deep idle */
#define FEC_QUIRK_ERR006687 (1 << 14)
+/* The MIB counters should be cleared and enabled during
+ * initialisation.
+ */
+#define FEC_QUIRK_MIB_CLEAR (1 << 15)
struct bufdesc_prop {
int qid;
diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
index f7c8649..a6e323f 100644
--- a/drivers/net/ethernet/freescale/fec_main.c
+++ b/drivers/net/ethernet/freescale/fec_main.c
@@ -89,10 +89,10 @@
.driver_data = 0,
}, {
.name = "imx25-fec",
- .driver_data = FEC_QUIRK_USE_GASKET,
+ .driver_data = FEC_QUIRK_USE_GASKET | FEC_QUIRK_MIB_CLEAR,
}, {
.name = "imx27-fec",
- .driver_data = 0,
+ .driver_data = FEC_QUIRK_MIB_CLEAR,
}, {
.name = "imx28-fec",
.driver_data = FEC_QUIRK_ENET_MAC | FEC_QUIRK_SWAP_FRAME |
@@ -184,6 +184,9 @@
#define FEC_RACC_SHIFT16 BIT(7)
#define FEC_RACC_OPTIONS (FEC_RACC_IPDIS | FEC_RACC_PRODIS)
+/* MIB Control Register */
+#define FEC_MIB_CTRLSTAT_DISABLE BIT(31)
+
/*
* The 5270/5271/5280/5282/532x RX control register also contains maximum frame
* size bits. Other FEC hardware does not, so we need to take that into
@@ -2356,11 +2359,30 @@
}
}
+static void fec_enet_clear_ethtool_stats(struct net_device *dev)
+{
+ struct fec_enet_private *fep = netdev_priv(dev);
+ int i;
+
+ /* Disable MIB statistics counters */
+ writel(FEC_MIB_CTRLSTAT_DISABLE, fep->hwp + FEC_MIB_CTRLSTAT);
+
+ for (i = 0; i < ARRAY_SIZE(fec_stats); i++)
+ writel(0, fep->hwp + fec_stats[i].offset);
+
+ /* Don't disable MIB statistics counters */
+ writel(0, fep->hwp + FEC_MIB_CTRLSTAT);
+}
+
#else /* !defined(CONFIG_M5272) */
#define FEC_STATS_SIZE 0
static inline void fec_enet_update_ethtool_stats(struct net_device *dev)
{
}
+
+static inline void fec_enet_clear_ethtool_stats(struct net_device *dev)
+{
+}
#endif /* !defined(CONFIG_M5272) */
/* ITR clock source is enet system clock (clk_ahb).
@@ -3182,7 +3204,10 @@
fec_restart(ndev);
- fec_enet_update_ethtool_stats(ndev);
+ if (fep->quirks & FEC_QUIRK_MIB_CLEAR)
+ fec_enet_clear_ethtool_stats(ndev);
+ else
+ fec_enet_update_ethtool_stats(ndev);
return 0;
}
diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c
index 0ff166e..a79e257 100644
--- a/drivers/net/ethernet/freescale/gianfar.c
+++ b/drivers/net/ethernet/freescale/gianfar.c
@@ -2250,7 +2250,7 @@
static inline struct txfcb *gfar_add_fcb(struct sk_buff *skb)
{
- struct txfcb *fcb = (struct txfcb *)skb_push(skb, GMAC_FCB_LEN);
+ struct txfcb *fcb = skb_push(skb, GMAC_FCB_LEN);
memset(fcb, 0, GMAC_FCB_LEN);
diff --git a/drivers/net/ethernet/freescale/ucc_geth_ethtool.c b/drivers/net/ethernet/freescale/ucc_geth_ethtool.c
index b642990..4df282e 100644
--- a/drivers/net/ethernet/freescale/ucc_geth_ethtool.c
+++ b/drivers/net/ethernet/freescale/ucc_geth_ethtool.c
@@ -113,7 +113,9 @@
if (!phydev)
return -ENODEV;
- return phy_ethtool_ksettings_get(phydev, cmd);
+ phy_ethtool_ksettings_get(phydev, cmd);
+
+ return 0;
}
static int
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c b/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c
index e95795b..00e57bb 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c
@@ -150,7 +150,7 @@
cmd->base.duplex = duplex;
if (net_dev->phydev)
- (void)phy_ethtool_ksettings_get(net_dev->phydev, cmd);
+ phy_ethtool_ksettings_get(net_dev->phydev, cmd);
link_stat = hns_nic_get_link(net_dev);
if (!link_stat) {
diff --git a/drivers/net/ethernet/hp/hp100.c b/drivers/net/ethernet/hp/hp100.c
index 5673b07..c6164a9 100644
--- a/drivers/net/ethernet/hp/hp100.c
+++ b/drivers/net/ethernet/hp/hp100.c
@@ -1281,7 +1281,7 @@
*/
skb_reserve(ringptr->skb, 2);
- ringptr->skb->data = (u_char *) skb_put(ringptr->skb, MAX_ETHER_SIZE);
+ ringptr->skb->data = skb_put(ringptr->skb, MAX_ETHER_SIZE);
/* ringptr->pdl points to the beginning of the PDL, i.e. the PDH */
/* Note: 1st Fragment is used for the 4 byte packet status
diff --git a/drivers/net/ethernet/i825xx/82596.c b/drivers/net/ethernet/i825xx/82596.c
index 9458838..d719668 100644
--- a/drivers/net/ethernet/i825xx/82596.c
+++ b/drivers/net/ethernet/i825xx/82596.c
@@ -809,7 +809,8 @@
if (!rx_in_place) {
/* 16 byte align the data fields */
skb_reserve(skb, 2);
- memcpy(skb_put(skb,pkt_len), rbd->v_data, pkt_len);
+ skb_put_data(skb, rbd->v_data,
+ pkt_len);
}
skb->protocol=eth_type_trans(skb,dev);
skb->len = pkt_len;
diff --git a/drivers/net/ethernet/i825xx/lib82596.c b/drivers/net/ethernet/i825xx/lib82596.c
index e867733..8449c58 100644
--- a/drivers/net/ethernet/i825xx/lib82596.c
+++ b/drivers/net/ethernet/i825xx/lib82596.c
@@ -727,7 +727,8 @@
dma_sync_single_for_cpu(dev->dev.parent,
(dma_addr_t)SWAP32(rbd->b_data),
PKT_BUF_SZ, DMA_FROM_DEVICE);
- memcpy(skb_put(skb, pkt_len), rbd->v_data, pkt_len);
+ skb_put_data(skb, rbd->v_data,
+ pkt_len);
dma_sync_single_for_device(dev->dev.parent,
(dma_addr_t)SWAP32(rbd->b_data),
PKT_BUF_SZ, DMA_FROM_DEVICE);
diff --git a/drivers/net/ethernet/ibm/emac/phy.c b/drivers/net/ethernet/ibm/emac/phy.c
index 5b88cc6..35865d0 100644
--- a/drivers/net/ethernet/ibm/emac/phy.c
+++ b/drivers/net/ethernet/ibm/emac/phy.c
@@ -276,7 +276,7 @@
}
/* Generic implementation for most 10/100/1000 PHYs */
-static struct mii_phy_ops generic_phy_ops = {
+static const struct mii_phy_ops generic_phy_ops = {
.setup_aneg = genmii_setup_aneg,
.setup_forced = genmii_setup_forced,
.poll_link = genmii_poll_link,
@@ -340,7 +340,7 @@
return 0;
}
-static struct mii_phy_ops cis8201_phy_ops = {
+static const struct mii_phy_ops cis8201_phy_ops = {
.init = cis8201_init,
.setup_aneg = genmii_setup_aneg,
.setup_forced = genmii_setup_forced,
@@ -420,7 +420,7 @@
return 0;
}
-static struct mii_phy_ops et1011c_phy_ops = {
+static const struct mii_phy_ops et1011c_phy_ops = {
.init = et1011c_init,
.setup_aneg = genmii_setup_aneg,
.setup_forced = genmii_setup_forced,
@@ -439,7 +439,7 @@
-static struct mii_phy_ops m88e1111_phy_ops = {
+static const struct mii_phy_ops m88e1111_phy_ops = {
.init = m88e1111_init,
.setup_aneg = genmii_setup_aneg,
.setup_forced = genmii_setup_forced,
@@ -455,7 +455,7 @@
.ops = &m88e1111_phy_ops,
};
-static struct mii_phy_ops m88e1112_phy_ops = {
+static const struct mii_phy_ops m88e1112_phy_ops = {
.init = m88e1112_init,
.setup_aneg = genmii_setup_aneg,
.setup_forced = genmii_setup_forced,
@@ -480,7 +480,7 @@
return 0;
}
-static struct mii_phy_ops ar8035_phy_ops = {
+static const struct mii_phy_ops ar8035_phy_ops = {
.init = ar8035_init,
.setup_aneg = genmii_setup_aneg,
.setup_forced = genmii_setup_forced,
diff --git a/drivers/net/ethernet/ibm/ibmveth.c b/drivers/net/ethernet/ibm/ibmveth.c
index 72ab7b6..9a74c4e 100644
--- a/drivers/net/ethernet/ibm/ibmveth.c
+++ b/drivers/net/ethernet/ibm/ibmveth.c
@@ -46,6 +46,8 @@
#include <asm/vio.h>
#include <asm/iommu.h>
#include <asm/firmware.h>
+#include <net/tcp.h>
+#include <net/ip6_checksum.h>
#include "ibmveth.h"
@@ -808,8 +810,7 @@
ret = h_illan_attributes(adapter->vdev->unit_address, 0, 0, &ret_attr);
- if (ret == H_SUCCESS && !(ret_attr & IBMVETH_ILLAN_ACTIVE_TRUNK) &&
- !(ret_attr & IBMVETH_ILLAN_TRUNK_PRI_MASK) &&
+ if (ret == H_SUCCESS &&
(ret_attr & IBMVETH_ILLAN_PADDED_PKT_CSUM)) {
ret4 = h_illan_attributes(adapter->vdev->unit_address, clr_attr,
set_attr, &ret_attr);
@@ -1040,6 +1041,15 @@
dma_addr_t dma_addr;
unsigned long mss = 0;
+ /* veth doesn't handle frag_list, so linearize the skb.
+ * When GRO is enabled SKB's can have frag_list.
+ */
+ if (adapter->is_active_trunk &&
+ skb_has_frag_list(skb) && __skb_linearize(skb)) {
+ netdev->stats.tx_dropped++;
+ goto out;
+ }
+
/*
* veth handles a maximum of 6 segments including the header, so
* we have to linearize the skb if there are more than this.
@@ -1064,9 +1074,6 @@
desc_flags = IBMVETH_BUF_VALID;
- if (skb_is_gso(skb) && adapter->fw_large_send_support)
- desc_flags |= IBMVETH_BUF_LRG_SND;
-
if (skb->ip_summed == CHECKSUM_PARTIAL) {
unsigned char *buf = skb_transport_header(skb) +
skb->csum_offset;
@@ -1076,6 +1083,9 @@
/* Need to zero out the checksum */
buf[0] = 0;
buf[1] = 0;
+
+ if (skb_is_gso(skb) && adapter->fw_large_send_support)
+ desc_flags |= IBMVETH_BUF_LRG_SND;
}
retry_bounce:
@@ -1128,7 +1138,7 @@
descs[i+1].fields.address = dma_addr;
}
- if (skb_is_gso(skb)) {
+ if (skb->ip_summed == CHECKSUM_PARTIAL && skb_is_gso(skb)) {
if (adapter->fw_large_send_support) {
mss = (unsigned long)skb_shinfo(skb)->gso_size;
adapter->tx_large_packets++;
@@ -1232,6 +1242,71 @@
}
}
+static void ibmveth_rx_csum_helper(struct sk_buff *skb,
+ struct ibmveth_adapter *adapter)
+{
+ struct iphdr *iph = NULL;
+ struct ipv6hdr *iph6 = NULL;
+ __be16 skb_proto = 0;
+ u16 iphlen = 0;
+ u16 iph_proto = 0;
+ u16 tcphdrlen = 0;
+
+ skb_proto = be16_to_cpu(skb->protocol);
+
+ if (skb_proto == ETH_P_IP) {
+ iph = (struct iphdr *)skb->data;
+
+ /* If the IP checksum is not offloaded and if the packet
+ * is large send, the checksum must be rebuilt.
+ */
+ if (iph->check == 0xffff) {
+ iph->check = 0;
+ iph->check = ip_fast_csum((unsigned char *)iph,
+ iph->ihl);
+ }
+
+ iphlen = iph->ihl * 4;
+ iph_proto = iph->protocol;
+ } else if (skb_proto == ETH_P_IPV6) {
+ iph6 = (struct ipv6hdr *)skb->data;
+ iphlen = sizeof(struct ipv6hdr);
+ iph_proto = iph6->nexthdr;
+ }
+
+ /* In OVS environment, when a flow is not cached, specifically for a
+ * new TCP connection, the first packet information is passed up
+ * the user space for finding a flow. During this process, OVS computes
+ * checksum on the first packet when CHECKSUM_PARTIAL flag is set.
+ *
+ * Given that we zeroed out TCP checksum field in transmit path
+ * (refer ibmveth_start_xmit routine) as we set "no checksum bit",
+ * OVS computed checksum will be incorrect w/o TCP pseudo checksum
+ * in the packet. This leads to OVS dropping the packet and hence
+ * TCP retransmissions are seen.
+ *
+ * So, re-compute TCP pseudo header checksum.
+ */
+ if (iph_proto == IPPROTO_TCP && adapter->is_active_trunk) {
+ struct tcphdr *tcph = (struct tcphdr *)(skb->data + iphlen);
+
+ tcphdrlen = skb->len - iphlen;
+
+ /* Recompute TCP pseudo header checksum */
+ if (skb_proto == ETH_P_IP)
+ tcph->check = ~csum_tcpudp_magic(iph->saddr,
+ iph->daddr, tcphdrlen, iph_proto, 0);
+ else if (skb_proto == ETH_P_IPV6)
+ tcph->check = ~csum_ipv6_magic(&iph6->saddr,
+ &iph6->daddr, tcphdrlen, iph_proto, 0);
+
+ /* Setup SKB fields for checksum offload */
+ skb_partial_csum_set(skb, iphlen,
+ offsetof(struct tcphdr, check));
+ skb_reset_network_header(skb);
+ }
+}
+
static int ibmveth_poll(struct napi_struct *napi, int budget)
{
struct ibmveth_adapter *adapter =
@@ -1239,7 +1314,6 @@
struct net_device *netdev = adapter->netdev;
int frames_processed = 0;
unsigned long lpar_rc;
- struct iphdr *iph;
u16 mss = 0;
restart_poll:
@@ -1297,17 +1371,7 @@
if (csum_good) {
skb->ip_summed = CHECKSUM_UNNECESSARY;
- if (be16_to_cpu(skb->protocol) == ETH_P_IP) {
- iph = (struct iphdr *)skb->data;
-
- /* If the IP checksum is not offloaded and if the packet
- * is large send, the checksum must be rebuilt.
- */
- if (iph->check == 0xffff) {
- iph->check = 0;
- iph->check = ip_fast_csum((unsigned char *)iph, iph->ihl);
- }
- }
+ ibmveth_rx_csum_helper(skb, adapter);
}
if (length > netdev->mtu + ETH_HLEN) {
@@ -1626,6 +1690,13 @@
netdev->hw_features |= NETIF_F_TSO;
}
+ adapter->is_active_trunk = false;
+ if (ret == H_SUCCESS && (ret_attr & IBMVETH_ILLAN_ACTIVE_TRUNK)) {
+ adapter->is_active_trunk = true;
+ netdev->hw_features |= NETIF_F_FRAGLIST;
+ netdev->features |= NETIF_F_FRAGLIST;
+ }
+
netdev->min_mtu = IBMVETH_MIN_MTU;
netdev->max_mtu = ETH_MAX_MTU;
diff --git a/drivers/net/ethernet/ibm/ibmveth.h b/drivers/net/ethernet/ibm/ibmveth.h
index ed8780c..01c587f 100644
--- a/drivers/net/ethernet/ibm/ibmveth.h
+++ b/drivers/net/ethernet/ibm/ibmveth.h
@@ -156,6 +156,7 @@
int pool_config;
int rx_csum;
int large_send;
+ bool is_active_trunk;
void *bounce_buffer;
dma_addr_t bounce_buffer_dma;
diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c
index c0fbeb3..0135095 100644
--- a/drivers/net/ethernet/ibm/ibmvnic.c
+++ b/drivers/net/ethernet/ibm/ibmvnic.c
@@ -163,6 +163,16 @@
return rc;
}
+static void reset_long_term_buff(struct ibmvnic_adapter *adapter,
+ struct ibmvnic_long_term_buff *ltb)
+{
+ memset(ltb->buff, 0, ltb->size);
+
+ init_completion(&adapter->fw_done);
+ send_request_map(adapter, ltb->addr, ltb->size, ltb->map_id);
+ wait_for_completion(&adapter->fw_done);
+}
+
static int alloc_long_term_buff(struct ibmvnic_adapter *adapter,
struct ibmvnic_long_term_buff *ltb, int size)
{
@@ -200,6 +210,15 @@
dma_free_coherent(dev, ltb->size, ltb->buff, ltb->addr);
}
+static void deactivate_rx_pools(struct ibmvnic_adapter *adapter)
+{
+ int i;
+
+ for (i = 0; i < be32_to_cpu(adapter->login_rsp_buf->num_rxadd_subcrqs);
+ i++)
+ adapter->rx_pool[i].active = 0;
+}
+
static void replenish_rx_pool(struct ibmvnic_adapter *adapter,
struct ibmvnic_rx_pool *pool)
{
@@ -217,6 +236,9 @@
int index;
int i;
+ if (!pool->active)
+ return;
+
handle_array = (u64 *)((u8 *)(adapter->login_rsp_buf) +
be32_to_cpu(adapter->login_rsp_buf->
off_rxadd_subcrqs));
@@ -287,6 +309,15 @@
dev_kfree_skb_any(skb);
adapter->replenish_add_buff_failure++;
atomic_add(buffers_added, &pool->available);
+
+ if (lpar_rc == H_CLOSED) {
+ /* Disable buffer pool replenishment and report carrier off if
+ * queue is closed. Firmware guarantees that a signal will
+ * be sent to the driver, triggering a reset.
+ */
+ deactivate_rx_pools(adapter);
+ netif_carrier_off(adapter->netdev);
+ }
}
static void replenish_pools(struct ibmvnic_adapter *adapter)
@@ -331,6 +362,33 @@
return 0;
}
+static int reset_rx_pools(struct ibmvnic_adapter *adapter)
+{
+ struct ibmvnic_rx_pool *rx_pool;
+ int rx_scrqs;
+ int i, j;
+
+ rx_scrqs = be32_to_cpu(adapter->login_rsp_buf->num_rxadd_subcrqs);
+ for (i = 0; i < rx_scrqs; i++) {
+ rx_pool = &adapter->rx_pool[i];
+
+ reset_long_term_buff(adapter, &rx_pool->long_term_buff);
+
+ for (j = 0; j < rx_pool->size; j++)
+ rx_pool->free_map[j] = j;
+
+ memset(rx_pool->rx_buff, 0,
+ rx_pool->size * sizeof(struct ibmvnic_rx_buff));
+
+ atomic_set(&rx_pool->available, 0);
+ rx_pool->next_alloc = 0;
+ rx_pool->next_free = 0;
+ rx_pool->active = 1;
+ }
+
+ return 0;
+}
+
static void release_rx_pools(struct ibmvnic_adapter *adapter)
{
struct ibmvnic_rx_pool *rx_pool;
@@ -432,6 +490,32 @@
return 0;
}
+static int reset_tx_pools(struct ibmvnic_adapter *adapter)
+{
+ struct ibmvnic_tx_pool *tx_pool;
+ int tx_scrqs;
+ int i, j;
+
+ tx_scrqs = be32_to_cpu(adapter->login_rsp_buf->num_txsubm_subcrqs);
+ for (i = 0; i < tx_scrqs; i++) {
+ tx_pool = &adapter->tx_pool[i];
+
+ reset_long_term_buff(adapter, &tx_pool->long_term_buff);
+
+ memset(tx_pool->tx_buff, 0,
+ adapter->req_tx_entries_per_subcrq *
+ sizeof(struct ibmvnic_tx_buff));
+
+ for (j = 0; j < adapter->req_tx_entries_per_subcrq; j++)
+ tx_pool->free_map[j] = j;
+
+ tx_pool->consumer_index = 0;
+ tx_pool->producer_index = 0;
+ }
+
+ return 0;
+}
+
static void release_tx_pools(struct ibmvnic_adapter *adapter)
{
struct ibmvnic_tx_pool *tx_pool;
@@ -518,6 +602,32 @@
spin_unlock_irqrestore(&adapter->error_list_lock, flags);
}
+static void ibmvnic_napi_enable(struct ibmvnic_adapter *adapter)
+{
+ int i;
+
+ if (adapter->napi_enabled)
+ return;
+
+ for (i = 0; i < adapter->req_rx_queues; i++)
+ napi_enable(&adapter->napi[i]);
+
+ adapter->napi_enabled = true;
+}
+
+static void ibmvnic_napi_disable(struct ibmvnic_adapter *adapter)
+{
+ int i;
+
+ if (!adapter->napi_enabled)
+ return;
+
+ for (i = 0; i < adapter->req_rx_queues; i++)
+ napi_disable(&adapter->napi[i]);
+
+ adapter->napi_enabled = false;
+}
+
static int ibmvnic_login(struct net_device *netdev)
{
struct ibmvnic_adapter *adapter = netdev_priv(netdev);
@@ -674,9 +784,7 @@
adapter->state = VNIC_OPENING;
replenish_pools(adapter);
-
- for (i = 0; i < adapter->req_rx_queues; i++)
- napi_enable(&adapter->napi[i]);
+ ibmvnic_napi_enable(adapter);
/* We're ready to receive frames, enable the sub-crq interrupts and
* set the logical link state to up
@@ -778,14 +886,14 @@
int i;
adapter->state = VNIC_CLOSING;
- netif_tx_stop_all_queues(netdev);
- if (adapter->napi) {
- for (i = 0; i < adapter->req_rx_queues; i++)
- napi_disable(&adapter->napi[i]);
- }
+ /* ensure that transmissions are stopped if called by do_reset */
+ if (adapter->resetting)
+ netif_tx_disable(netdev);
+ else
+ netif_tx_stop_all_queues(netdev);
- clean_tx_pools(adapter);
+ ibmvnic_napi_disable(adapter);
if (adapter->tx_scrq) {
for (i = 0; i < adapter->req_tx_queues; i++)
@@ -814,6 +922,7 @@
}
}
+ clean_tx_pools(adapter);
adapter->state = VNIC_CLOSED;
return rc;
}
@@ -1092,8 +1201,14 @@
dev_kfree_skb_any(skb);
tx_buff->skb = NULL;
- if (lpar_rc == H_CLOSED)
- netif_stop_subqueue(netdev, queue_num);
+ if (lpar_rc == H_CLOSED) {
+ /* Disable TX and report carrier off if queue is closed.
+ * Firmware guarantees that a signal will be sent to the
+ * driver, triggering a reset or some other action.
+ */
+ netif_tx_stop_all_queues(netdev);
+ netif_carrier_off(netdev);
+ }
tx_send_failed++;
tx_dropped++;
@@ -1206,38 +1321,40 @@
if (rc)
return rc;
- /* remove the closed state so when we call open it appears
- * we are coming from the probed state.
- */
- adapter->state = VNIC_PROBED;
-
- release_resources(adapter);
- release_sub_crqs(adapter);
- release_crq_queue(adapter);
-
- rc = ibmvnic_init(adapter);
- if (rc)
- return 0;
-
- /* If the adapter was in PROBE state prior to the reset, exit here. */
- if (reset_state == VNIC_PROBED)
- return 0;
-
- rc = ibmvnic_login(netdev);
- if (rc) {
+ if (adapter->reset_reason != VNIC_RESET_NON_FATAL) {
+ /* remove the closed state so when we call open it appears
+ * we are coming from the probed state.
+ */
adapter->state = VNIC_PROBED;
- return 0;
+
+ rc = ibmvnic_init(adapter);
+ if (rc)
+ return 0;
+
+ /* If the adapter was in PROBE state prior to the reset,
+ * exit here.
+ */
+ if (reset_state == VNIC_PROBED)
+ return 0;
+
+ rc = ibmvnic_login(netdev);
+ if (rc) {
+ adapter->state = VNIC_PROBED;
+ return 0;
+ }
+
+ rc = reset_tx_pools(adapter);
+ if (rc)
+ return rc;
+
+ rc = reset_rx_pools(adapter);
+ if (rc)
+ return rc;
+
+ if (reset_state == VNIC_CLOSED)
+ return 0;
}
- rtnl_lock();
- rc = init_resources(adapter);
- rtnl_unlock();
- if (rc)
- return rc;
-
- if (reset_state == VNIC_CLOSED)
- return 0;
-
rc = __ibmvnic_open(netdev);
if (rc) {
if (list_empty(&adapter->rwi_list))
@@ -1254,6 +1371,9 @@
for (i = 0; i < adapter->req_rx_queues; i++)
napi_schedule(&adapter->napi[i]);
+ if (adapter->reset_reason != VNIC_RESET_FAILOVER)
+ netdev_notify_peers(netdev);
+
return 0;
}
@@ -1313,6 +1433,7 @@
if (rc) {
free_all_rwi(adapter);
+ mutex_unlock(&adapter->reset_lock);
return;
}
@@ -1333,6 +1454,12 @@
return;
}
+ if (adapter->state == VNIC_PROBING) {
+ netdev_warn(netdev, "Adapter reset during probe\n");
+ adapter->init_done_rc = EAGAIN;
+ return;
+ }
+
mutex_lock(&adapter->rwi_lock);
list_for_each(entry, &adapter->rwi_list) {
@@ -1383,6 +1510,7 @@
struct ibmvnic_adapter *adapter = netdev_priv(netdev);
int scrq_num = (int)(napi - adapter->napi);
int frames_processed = 0;
+
restart_poll:
while (frames_processed < budget) {
struct sk_buff *skb;
@@ -1392,6 +1520,12 @@
u16 offset;
u8 flags = 0;
+ if (unlikely(adapter->resetting)) {
+ enable_scrq_irq(adapter, adapter->rx_scrq[scrq_num]);
+ napi_complete_done(napi, frames_processed);
+ return frames_processed;
+ }
+
if (!pending_scrq(adapter, adapter->rx_scrq[scrq_num]))
break;
next = ibmvnic_next_scrq(adapter, adapter->rx_scrq[scrq_num]);
@@ -1441,7 +1575,9 @@
netdev->stats.rx_bytes += length;
frames_processed++;
}
- replenish_rx_pool(adapter, &adapter->rx_pool[scrq_num]);
+
+ if (adapter->state != VNIC_CLOSING)
+ replenish_rx_pool(adapter, &adapter->rx_pool[scrq_num]);
if (frames_processed < budget) {
enable_scrq_irq(adapter, adapter->rx_scrq[scrq_num]);
@@ -1614,6 +1750,45 @@
/* Routines for managing CRQs/sCRQs */
+static int reset_one_sub_crq_queue(struct ibmvnic_adapter *adapter,
+ struct ibmvnic_sub_crq_queue *scrq)
+{
+ int rc;
+
+ if (scrq->irq) {
+ free_irq(scrq->irq, scrq);
+ irq_dispose_mapping(scrq->irq);
+ scrq->irq = 0;
+ }
+
+ memset(scrq->msgs, 0, 4 * PAGE_SIZE);
+ scrq->cur = 0;
+
+ rc = h_reg_sub_crq(adapter->vdev->unit_address, scrq->msg_token,
+ 4 * PAGE_SIZE, &scrq->crq_num, &scrq->hw_irq);
+ return rc;
+}
+
+static int reset_sub_crq_queues(struct ibmvnic_adapter *adapter)
+{
+ int i, rc;
+
+ for (i = 0; i < adapter->req_tx_queues; i++) {
+ rc = reset_one_sub_crq_queue(adapter, adapter->tx_scrq[i]);
+ if (rc)
+ return rc;
+ }
+
+ for (i = 0; i < adapter->req_rx_queues; i++) {
+ rc = reset_one_sub_crq_queue(adapter, adapter->rx_scrq[i]);
+ if (rc)
+ return rc;
+ }
+
+ rc = init_sub_crq_irqs(adapter);
+ return rc;
+}
+
static void release_sub_crq_queue(struct ibmvnic_adapter *adapter,
struct ibmvnic_sub_crq_queue *scrq)
{
@@ -2109,8 +2284,7 @@
{
union sub_crq *entry = &scrq->msgs[scrq->cur];
- if (entry->generic.first & IBMVNIC_CRQ_CMD_RSP ||
- adapter->state == VNIC_CLOSING)
+ if (entry->generic.first & IBMVNIC_CRQ_CMD_RSP)
return 1;
else
return 0;
@@ -2748,6 +2922,8 @@
if (crq->error_indication.flags & IBMVNIC_FATAL_ERROR)
ibmvnic_reset(adapter, VNIC_RESET_FATAL);
+ else
+ ibmvnic_reset(adapter, VNIC_RESET_NON_FATAL);
}
static void handle_change_mac_rsp(union ibmvnic_crq *crq,
@@ -3153,6 +3329,8 @@
switch (gen_crq->cmd) {
case IBMVNIC_CRQ_INIT:
dev_info(dev, "Partner initialized\n");
+ adapter->from_passive_init = true;
+ complete(&adapter->init_done);
break;
case IBMVNIC_CRQ_INIT_COMPLETE:
dev_info(dev, "Partner initialization complete\n");
@@ -3461,21 +3639,44 @@
unsigned long timeout = msecs_to_jiffies(30000);
int rc;
- rc = init_crq_queue(adapter);
+ if (adapter->resetting) {
+ rc = ibmvnic_reset_crq(adapter);
+ if (!rc)
+ rc = vio_enable_interrupts(adapter->vdev);
+ } else {
+ rc = init_crq_queue(adapter);
+ }
+
if (rc) {
dev_err(dev, "Couldn't initialize crq. rc=%d\n", rc);
return rc;
}
+ adapter->from_passive_init = false;
+
init_completion(&adapter->init_done);
+ adapter->init_done_rc = 0;
ibmvnic_send_crq_init(adapter);
if (!wait_for_completion_timeout(&adapter->init_done, timeout)) {
dev_err(dev, "Initialization sequence timed out\n");
- release_crq_queue(adapter);
return -1;
}
- rc = init_sub_crqs(adapter);
+ if (adapter->init_done_rc) {
+ release_crq_queue(adapter);
+ return adapter->init_done_rc;
+ }
+
+ if (adapter->from_passive_init) {
+ adapter->state = VNIC_OPEN;
+ adapter->from_passive_init = false;
+ return -1;
+ }
+
+ if (adapter->resetting)
+ rc = reset_sub_crq_queues(adapter);
+ else
+ rc = init_sub_crqs(adapter);
if (rc) {
dev_err(dev, "Initialization of sub crqs failed\n");
release_crq_queue(adapter);
@@ -3484,6 +3685,8 @@
return rc;
}
+static struct device_attribute dev_attr_failover;
+
static int ibmvnic_probe(struct vio_dev *dev, const struct vio_device_id *id)
{
struct ibmvnic_adapter *adapter;
@@ -3532,17 +3735,26 @@
mutex_init(&adapter->rwi_lock);
adapter->resetting = false;
- rc = ibmvnic_init(adapter);
+ do {
+ rc = ibmvnic_init(adapter);
+ if (rc != EAGAIN) {
+ free_netdev(netdev);
+ return rc;
+ }
+ } while (rc == EAGAIN);
+
+ netdev->mtu = adapter->req_mtu - ETH_HLEN;
+
+ rc = device_create_file(&dev->dev, &dev_attr_failover);
if (rc) {
free_netdev(netdev);
return rc;
}
- netdev->mtu = adapter->req_mtu - ETH_HLEN;
-
rc = register_netdev(netdev);
if (rc) {
dev_err(&dev->dev, "failed to register netdev rc=%d\n", rc);
+ device_remove_file(&dev->dev, &dev_attr_failover);
free_netdev(netdev);
return rc;
}
@@ -3568,12 +3780,49 @@
adapter->state = VNIC_REMOVED;
mutex_unlock(&adapter->reset_lock);
+ device_remove_file(&dev->dev, &dev_attr_failover);
free_netdev(netdev);
dev_set_drvdata(&dev->dev, NULL);
return 0;
}
+static ssize_t failover_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct net_device *netdev = dev_get_drvdata(dev);
+ struct ibmvnic_adapter *adapter = netdev_priv(netdev);
+ unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
+ __be64 session_token;
+ long rc;
+
+ if (!sysfs_streq(buf, "1"))
+ return -EINVAL;
+
+ rc = plpar_hcall(H_VIOCTL, retbuf, adapter->vdev->unit_address,
+ H_GET_SESSION_TOKEN, 0, 0, 0);
+ if (rc) {
+ netdev_err(netdev, "Couldn't retrieve session token, rc %ld\n",
+ rc);
+ return -EINVAL;
+ }
+
+ session_token = (__be64)retbuf[0];
+ netdev_dbg(netdev, "Initiating client failover, session id %llx\n",
+ be64_to_cpu(session_token));
+ rc = plpar_hcall_norets(H_VIOCTL, adapter->vdev->unit_address,
+ H_SESSION_ERR_DETECTED, session_token, 0, 0);
+ if (rc) {
+ netdev_err(netdev, "Client initiated failover failed, rc %ld\n",
+ rc);
+ return -EINVAL;
+ }
+
+ return count;
+}
+
+static DEVICE_ATTR(failover, 0200, NULL, failover_store);
+
static unsigned long ibmvnic_get_desired_dma(struct vio_dev *vdev)
{
struct net_device *netdev = dev_get_drvdata(&vdev->dev);
@@ -3610,6 +3859,9 @@
struct ibmvnic_adapter *adapter = netdev_priv(netdev);
int i;
+ if (adapter->state != VNIC_OPEN)
+ return 0;
+
/* kick the interrupt handlers just in case we lost an interrupt */
for (i = 0; i < adapter->req_rx_queues; i++)
ibmvnic_interrupt_rx(adapter->rx_scrq[i]->irq,
diff --git a/drivers/net/ethernet/ibm/ibmvnic.h b/drivers/net/ethernet/ibm/ibmvnic.h
index 4702b48..7e2300e 100644
--- a/drivers/net/ethernet/ibm/ibmvnic.h
+++ b/drivers/net/ethernet/ibm/ibmvnic.h
@@ -925,6 +925,7 @@
enum ibmvnic_reset_reason {VNIC_RESET_FAILOVER = 1,
VNIC_RESET_MOBILITY,
VNIC_RESET_FATAL,
+ VNIC_RESET_NON_FATAL,
VNIC_RESET_TIMEOUT};
struct ibmvnic_rwi {
@@ -1031,4 +1032,5 @@
struct list_head rwi_list;
struct work_struct ibmvnic_reset;
bool resetting;
+ bool napi_enabled, from_passive_init;
};
diff --git a/drivers/net/ethernet/intel/Kconfig b/drivers/net/ethernet/intel/Kconfig
index 1542a21..1feb54b 100644
--- a/drivers/net/ethernet/intel/Kconfig
+++ b/drivers/net/ethernet/intel/Kconfig
@@ -236,12 +236,14 @@
If unsure, say N.
config I40EVF
- tristate "Intel(R) XL710 X710 Virtual Function Ethernet support"
+ tristate "Intel(R) Ethernet Adaptive Virtual Function support"
depends on PCI_MSI
---help---
- This driver supports Intel(R) XL710 and X710 virtual functions.
- For more information on how to identify your adapter, go to the
- Adapter & Driver ID Guide that can be located at:
+ This driver supports virtual functions for Intel XL710,
+ X710, X722, and all devices advertising support for Intel
+ Ethernet Adaptive Virtual Function devices. For more
+ information on how to identify your adapter, go to the Adapter
+ & Driver ID Guide that can be located at:
<http://support.intel.com>
diff --git a/drivers/net/ethernet/intel/e100.c b/drivers/net/ethernet/intel/e100.c
index 2b7323d..4d10270 100644
--- a/drivers/net/ethernet/intel/e100.c
+++ b/drivers/net/ethernet/intel/e100.c
@@ -2430,7 +2430,10 @@
struct ethtool_link_ksettings *cmd)
{
struct nic *nic = netdev_priv(netdev);
- return mii_ethtool_get_link_ksettings(&nic->mii, cmd);
+
+ mii_ethtool_get_link_ksettings(&nic->mii, cmd);
+
+ return 0;
}
static int e100_set_link_ksettings(struct net_device *netdev,
diff --git a/drivers/net/ethernet/intel/e1000/e1000_main.c b/drivers/net/ethernet/intel/e1000/e1000_main.c
index bd8b05f..98375e1 100644
--- a/drivers/net/ethernet/intel/e1000/e1000_main.c
+++ b/drivers/net/ethernet/intel/e1000/e1000_main.c
@@ -4345,7 +4345,7 @@
dma_sync_single_for_cpu(&adapter->pdev->dev, buffer_info->dma,
length, DMA_FROM_DEVICE);
- memcpy(skb_put(skb, length), data, length);
+ skb_put_data(skb, data, length);
return skb;
}
diff --git a/drivers/net/ethernet/intel/e1000e/e1000.h b/drivers/net/ethernet/intel/e1000e/e1000.h
index c7c994e..98e6888 100644
--- a/drivers/net/ethernet/intel/e1000e/e1000.h
+++ b/drivers/net/ethernet/intel/e1000e/e1000.h
@@ -268,6 +268,7 @@
u32 tx_fifo_size;
u32 tx_dma_failed;
u32 tx_hwtstamp_timeouts;
+ u32 tx_hwtstamp_skipped;
/* Rx */
bool (*clean_rx)(struct e1000_ring *ring, int *work_done,
diff --git a/drivers/net/ethernet/intel/e1000e/ethtool.c b/drivers/net/ethernet/intel/e1000e/ethtool.c
index e23dbd9..003cbd6 100644
--- a/drivers/net/ethernet/intel/e1000e/ethtool.c
+++ b/drivers/net/ethernet/intel/e1000e/ethtool.c
@@ -105,6 +105,7 @@
E1000_STAT("uncorr_ecc_errors", uncorr_errors),
E1000_STAT("corr_ecc_errors", corr_errors),
E1000_STAT("tx_hwtstamp_timeouts", tx_hwtstamp_timeouts),
+ E1000_STAT("tx_hwtstamp_skipped", tx_hwtstamp_skipped),
};
#define E1000_GLOBAL_STATS_LEN ARRAY_SIZE(e1000_gstrings_stats)
@@ -2072,7 +2073,7 @@
pm_runtime_get_sync(netdev->dev.parent);
- e1000e_get_stats64(netdev, &net_stats);
+ dev_get_stats(netdev, &net_stats);
pm_runtime_put_sync(netdev->dev.parent);
diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c
index b367972..2dcb546 100644
--- a/drivers/net/ethernet/intel/e1000e/netdev.c
+++ b/drivers/net/ethernet/intel/e1000e/netdev.c
@@ -1183,6 +1183,7 @@
struct e1000_hw *hw = &adapter->hw;
if (er32(TSYNCTXCTL) & E1000_TSYNCTXCTL_VALID) {
+ struct sk_buff *skb = adapter->tx_hwtstamp_skb;
struct skb_shared_hwtstamps shhwtstamps;
u64 txstmp;
@@ -1191,9 +1192,14 @@
e1000e_systim_to_hwtstamp(adapter, &shhwtstamps, txstmp);
- skb_tstamp_tx(adapter->tx_hwtstamp_skb, &shhwtstamps);
- dev_kfree_skb_any(adapter->tx_hwtstamp_skb);
+ /* Clear the global tx_hwtstamp_skb pointer and force writes
+ * prior to notifying the stack of a Tx timestamp.
+ */
adapter->tx_hwtstamp_skb = NULL;
+ wmb(); /* force write prior to skb_tstamp_tx */
+
+ skb_tstamp_tx(skb, &shhwtstamps);
+ dev_kfree_skb_any(skb);
} else if (time_after(jiffies, adapter->tx_hwtstamp_start
+ adapter->tx_timeout_factor * HZ)) {
dev_kfree_skb_any(adapter->tx_hwtstamp_skb);
@@ -3680,6 +3686,7 @@
* Delay Request messages but not both so fall-through to
* time stamp all packets.
*/
+ case HWTSTAMP_FILTER_NTP_ALL:
case HWTSTAMP_FILTER_ALL:
is_l2 = true;
is_l4 = true;
@@ -5860,17 +5867,20 @@
nr_frags);
if (count) {
if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
- (adapter->flags & FLAG_HAS_HW_TIMESTAMP) &&
- !adapter->tx_hwtstamp_skb) {
- skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
- tx_flags |= E1000_TX_FLAGS_HWTSTAMP;
- adapter->tx_hwtstamp_skb = skb_get(skb);
- adapter->tx_hwtstamp_start = jiffies;
- schedule_work(&adapter->tx_hwtstamp_work);
- } else {
- skb_tx_timestamp(skb);
+ (adapter->flags & FLAG_HAS_HW_TIMESTAMP)) {
+ if (!adapter->tx_hwtstamp_skb) {
+ skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
+ tx_flags |= E1000_TX_FLAGS_HWTSTAMP;
+ adapter->tx_hwtstamp_skb = skb_get(skb);
+ adapter->tx_hwtstamp_start = jiffies;
+ schedule_work(&adapter->tx_hwtstamp_work);
+ } else {
+ adapter->tx_hwtstamp_skipped++;
+ }
}
+ skb_tx_timestamp(skb);
+
netdev_sent_queue(netdev, skb->len);
e1000_tx_queue(tx_ring, tx_flags, count);
/* Make sure there is space in the ring for the next send. */
@@ -6630,12 +6640,17 @@
static int e1000e_pm_suspend(struct device *dev)
{
struct pci_dev *pdev = to_pci_dev(dev);
+ int rc;
e1000e_flush_lpic(pdev);
e1000e_pm_freeze(dev);
- return __e1000_shutdown(pdev, false);
+ rc = __e1000_shutdown(pdev, false);
+ if (rc)
+ e1000e_pm_thaw(dev);
+
+ return rc;
}
static int e1000e_pm_resume(struct device *dev)
@@ -6733,20 +6748,20 @@
vector = 0;
msix_irq = adapter->msix_entries[vector].vector;
- disable_irq(msix_irq);
- e1000_intr_msix_rx(msix_irq, netdev);
+ if (disable_hardirq(msix_irq))
+ e1000_intr_msix_rx(msix_irq, netdev);
enable_irq(msix_irq);
vector++;
msix_irq = adapter->msix_entries[vector].vector;
- disable_irq(msix_irq);
- e1000_intr_msix_tx(msix_irq, netdev);
+ if (disable_hardirq(msix_irq))
+ e1000_intr_msix_tx(msix_irq, netdev);
enable_irq(msix_irq);
vector++;
msix_irq = adapter->msix_entries[vector].vector;
- disable_irq(msix_irq);
- e1000_msix_other(msix_irq, netdev);
+ if (disable_hardirq(msix_irq))
+ e1000_msix_other(msix_irq, netdev);
enable_irq(msix_irq);
}
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c b/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c
index 24f2f6f..5e37387 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c
@@ -1265,8 +1265,8 @@
return err;
}
-static int __fm10k_setup_tc(struct net_device *dev, u32 handle, __be16 proto,
- struct tc_to_netdev *tc)
+static int __fm10k_setup_tc(struct net_device *dev, u32 handle, u32 chain_index,
+ __be16 proto, struct tc_to_netdev *tc)
{
if (tc->type != TC_SETUP_MQPRIO)
return -EINVAL;
diff --git a/drivers/net/ethernet/intel/i40e/i40e.h b/drivers/net/ethernet/intel/i40e/i40e.h
index 44d9610..d616f69 100644
--- a/drivers/net/ethernet/intel/i40e/i40e.h
+++ b/drivers/net/ethernet/intel/i40e/i40e.h
@@ -1,7 +1,7 @@
/*******************************************************************************
*
* Intel Ethernet Controller XL710 Family Linux Driver
- * Copyright(c) 2013 - 2016 Intel Corporation.
+ * Copyright(c) 2013 - 2017 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
@@ -57,7 +57,7 @@
#include "i40e_type.h"
#include "i40e_prototype.h"
#include "i40e_client.h"
-#include "i40e_virtchnl.h"
+#include <linux/avf/virtchnl.h>
#include "i40e_virtchnl_pf.h"
#include "i40e_txrx.h"
#include "i40e_dcb.h"
@@ -103,6 +103,12 @@
(I40E_AQ_PHY_DEBUG_DISABLE_LINK_FW | \
I40E_AQ_PHY_DEBUG_DISABLE_ALL_LINK_FW)
+#define I40E_OEM_EETRACK_ID 0xffffffff
+#define I40E_OEM_GEN_SHIFT 24
+#define I40E_OEM_SNAP_MASK 0x00ff0000
+#define I40E_OEM_SNAP_SHIFT 16
+#define I40E_OEM_RELEASE_MASK 0x0000ffff
+
/* The values in here are decimal coded as hex as is the case in the NVM map*/
#define I40E_CURRENT_NVM_VERSION_HI 0x2
#define I40E_CURRENT_NVM_VERSION_LO 0x40
@@ -503,10 +509,12 @@
struct ptp_clock *ptp_clock;
struct ptp_clock_info ptp_caps;
struct sk_buff *ptp_tx_skb;
+ unsigned long ptp_tx_start;
struct hwtstamp_config tstamp_config;
struct mutex tmreg_lock; /* Used to protect the SYSTIME registers. */
u64 ptp_base_adj;
u32 tx_hwtstamp_timeouts;
+ u32 tx_hwtstamp_skipped;
u32 rx_hwtstamp_cleared;
u32 latch_event_flags;
spinlock_t ptp_rx_lock; /* Used to protect Rx timestamp registers. */
@@ -514,9 +522,8 @@
bool ptp_tx;
bool ptp_rx;
u16 rss_table_size; /* HW RSS table size */
- /* These are only valid in NPAR modes */
- u32 npar_max_bw;
- u32 npar_min_bw;
+ u32 max_bw;
+ u32 min_bw;
u32 ioremap_len;
u32 fd_inv;
@@ -627,6 +634,7 @@
/* These are containers of ring pointers, allocated at run-time */
struct i40e_ring **rx_rings;
struct i40e_ring **tx_rings;
+ struct i40e_ring **xdp_rings; /* XDP Tx rings */
u32 active_filters;
u32 promisc_threshold;
@@ -643,6 +651,8 @@
u16 max_frame;
u16 rx_buf_len;
+ struct bpf_prog *xdp_prog;
+
/* List of q_vectors allocated to this VSI */
struct i40e_q_vector **q_vectors;
int num_q_vectors;
@@ -730,22 +740,36 @@
{
static char buf[32];
u32 full_ver;
- u8 ver, patch;
- u16 build;
full_ver = hw->nvm.oem_ver;
- ver = (u8)(full_ver >> I40E_OEM_VER_SHIFT);
- build = (u16)((full_ver >> I40E_OEM_VER_BUILD_SHIFT) &
- I40E_OEM_VER_BUILD_MASK);
- patch = (u8)(full_ver & I40E_OEM_VER_PATCH_MASK);
- snprintf(buf, sizeof(buf),
- "%x.%02x 0x%x %d.%d.%d",
- (hw->nvm.version & I40E_NVM_VERSION_HI_MASK) >>
- I40E_NVM_VERSION_HI_SHIFT,
- (hw->nvm.version & I40E_NVM_VERSION_LO_MASK) >>
- I40E_NVM_VERSION_LO_SHIFT,
- hw->nvm.eetrack, ver, build, patch);
+ if (hw->nvm.eetrack == I40E_OEM_EETRACK_ID) {
+ u8 gen, snap;
+ u16 release;
+
+ gen = (u8)(full_ver >> I40E_OEM_GEN_SHIFT);
+ snap = (u8)((full_ver & I40E_OEM_SNAP_MASK) >>
+ I40E_OEM_SNAP_SHIFT);
+ release = (u16)(full_ver & I40E_OEM_RELEASE_MASK);
+
+ snprintf(buf, sizeof(buf), "%x.%x.%x", gen, snap, release);
+ } else {
+ u8 ver, patch;
+ u16 build;
+
+ ver = (u8)(full_ver >> I40E_OEM_VER_SHIFT);
+ build = (u16)((full_ver >> I40E_OEM_VER_BUILD_SHIFT) &
+ I40E_OEM_VER_BUILD_MASK);
+ patch = (u8)(full_ver & I40E_OEM_VER_PATCH_MASK);
+
+ snprintf(buf, sizeof(buf),
+ "%x.%02x 0x%x %d.%d.%d",
+ (hw->nvm.version & I40E_NVM_VERSION_HI_MASK) >>
+ I40E_NVM_VERSION_HI_SHIFT,
+ (hw->nvm.version & I40E_NVM_VERSION_LO_MASK) >>
+ I40E_NVM_VERSION_LO_SHIFT,
+ hw->nvm.eetrack, ver, build, patch);
+ }
return buf;
}
@@ -956,7 +980,8 @@
struct i40e_dcbx_config *old_cfg,
struct i40e_dcbx_config *new_cfg);
#endif /* CONFIG_I40E_DCB */
-void i40e_ptp_rx_hang(struct i40e_vsi *vsi);
+void i40e_ptp_rx_hang(struct i40e_pf *pf);
+void i40e_ptp_tx_hang(struct i40e_pf *pf);
void i40e_ptp_tx_hwtstamp(struct i40e_pf *pf);
void i40e_ptp_rx_hwtstamp(struct i40e_pf *pf, struct sk_buff *skb, u8 index);
void i40e_ptp_set_increment(struct i40e_pf *pf);
@@ -965,8 +990,13 @@
void i40e_ptp_init(struct i40e_pf *pf);
void i40e_ptp_stop(struct i40e_pf *pf);
int i40e_is_vsi_uplink_mode_veb(struct i40e_vsi *vsi);
-i40e_status i40e_get_npar_bw_setting(struct i40e_pf *pf);
-i40e_status i40e_set_npar_bw_setting(struct i40e_pf *pf);
-i40e_status i40e_commit_npar_bw_setting(struct i40e_pf *pf);
+i40e_status i40e_get_partition_bw_setting(struct i40e_pf *pf);
+i40e_status i40e_set_partition_bw_setting(struct i40e_pf *pf);
+i40e_status i40e_commit_partition_bw_setting(struct i40e_pf *pf);
void i40e_print_link_message(struct i40e_vsi *vsi, bool isup);
+
+static inline bool i40e_enabled_xdp_vsi(struct i40e_vsi *vsi)
+{
+ return !!vsi->xdp_prog;
+}
#endif /* _I40E_H_ */
diff --git a/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h b/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h
index 5eb0411..5d5f422 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h
@@ -1,7 +1,7 @@
/*******************************************************************************
*
* Intel Ethernet Controller XL710 Family Linux Driver
- * Copyright(c) 2013 - 2016 Intel Corporation.
+ * Copyright(c) 2013 - 2017 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
@@ -531,7 +531,7 @@
#define I40E_AQC_PORT_ADDR_VALID 0x40
#define I40E_AQC_WOL_ADDR_VALID 0x80
#define I40E_AQC_MC_MAG_EN_VALID 0x100
-#define I40E_AQC_ADDR_VALID_MASK 0x1F0
+#define I40E_AQC_ADDR_VALID_MASK 0x3F0
u8 reserved[6];
__le32 addr_high;
__le32 addr_low;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_client.c b/drivers/net/ethernet/intel/i40e/i40e_client.c
index c3b81a9..1b1e2ac 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_client.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_client.c
@@ -1,7 +1,7 @@
/*******************************************************************************
*
* Intel Ethernet Controller XL710 Family Linux Driver
- * Copyright(c) 2013 - 2015 Intel Corporation.
+ * Copyright(c) 2013 - 2017 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
@@ -273,8 +273,8 @@
if (!cdev || !cdev->client)
goto out;
if (!cdev->client->ops || !cdev->client->ops->vf_capable) {
- dev_info(&pf->pdev->dev,
- "Cannot locate client instance VF capability routine\n");
+ dev_dbg(&pf->pdev->dev,
+ "Cannot locate client instance VF capability routine\n");
goto out;
}
if (!test_bit(__I40E_CLIENT_INSTANCE_OPENED, &cdev->state))
@@ -565,7 +565,7 @@
struct i40e_hw *hw = &pf->hw;
i40e_status err;
- err = i40e_aq_send_msg_to_vf(hw, vf_id, I40E_VIRTCHNL_OP_IWARP,
+ err = i40e_aq_send_msg_to_vf(hw, vf_id, VIRTCHNL_OP_IWARP,
0, msg, len, NULL);
if (err)
dev_err(&pf->pdev->dev, "Unable to send iWarp message to VF, error %d, aq status %d\n",
@@ -595,6 +595,8 @@
size = sizeof(struct i40e_qvlist_info) +
(sizeof(struct i40e_qv_info) * (qvlist_info->num_vectors - 1));
ldev->qvlist_info = kzalloc(size, GFP_KERNEL);
+ if (!ldev->qvlist_info)
+ return -ENOMEM;
ldev->qvlist_info->num_vectors = qvlist_info->num_vectors;
for (i = 0; i < qvlist_info->num_vectors; i++) {
diff --git a/drivers/net/ethernet/intel/i40e/i40e_common.c b/drivers/net/ethernet/intel/i40e/i40e_common.c
index 24f0206..8e082a9 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_common.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_common.c
@@ -27,7 +27,7 @@
#include "i40e_type.h"
#include "i40e_adminq.h"
#include "i40e_prototype.h"
-#include "i40e_virtchnl.h"
+#include <linux/avf/virtchnl.h>
/**
* i40e_set_mac_type - Sets MAC type
@@ -3614,11 +3614,15 @@
/**
* i40e_aq_add_udp_tunnel
* @hw: pointer to the hw struct
- * @udp_port: the UDP port to add
+ * @udp_port: the UDP port to add in Host byte order
* @header_len: length of the tunneling header length in DWords
* @protocol_index: protocol index type
* @filter_index: pointer to filter index
* @cmd_details: pointer to command details structure or NULL
+ *
+ * Note: Firmware expects the udp_port value to be in Little Endian format,
+ * and this function will call cpu_to_le16 to convert from Host byte order to
+ * Little Endian order.
**/
i40e_status i40e_aq_add_udp_tunnel(struct i40e_hw *hw,
u16 udp_port, u8 protocol_index,
diff --git a/drivers/net/ethernet/intel/i40e/i40e_dcb.c b/drivers/net/ethernet/intel/i40e/i40e_dcb.c
index 0fab3a9..55079fe 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_dcb.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_dcb.c
@@ -1,7 +1,7 @@
/*******************************************************************************
*
* Intel Ethernet Controller XL710 Family Linux Driver
- * Copyright(c) 2013 - 2014 Intel Corporation.
+ * Copyright(c) 2013 - 2017 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
@@ -390,6 +390,8 @@
if (!dcbcfg->numapps)
return;
+ if (dcbcfg->numapps > I40E_DCBX_MAX_APPS)
+ dcbcfg->numapps = I40E_DCBX_MAX_APPS;
for (i = 0; i < dcbcfg->numapps; i++) {
u8 up, selector;
@@ -618,14 +620,17 @@
/* CEE PG data to ETS config */
dcbcfg->etscfg.maxtcs = cee_cfg->oper_num_tc;
+ /* Note that the FW creates the oper_prio_tc nibbles reversed
+ * from those in the CEE Priority Group sub-TLV.
+ */
for (i = 0; i < 4; i++) {
tc = (u8)((cee_cfg->oper_prio_tc[i] &
- I40E_CEE_PGID_PRIO_1_MASK) >>
- I40E_CEE_PGID_PRIO_1_SHIFT);
- dcbcfg->etscfg.prioritytable[i*2] = tc;
- tc = (u8)((cee_cfg->oper_prio_tc[i] &
I40E_CEE_PGID_PRIO_0_MASK) >>
I40E_CEE_PGID_PRIO_0_SHIFT);
+ dcbcfg->etscfg.prioritytable[i * 2] = tc;
+ tc = (u8)((cee_cfg->oper_prio_tc[i] &
+ I40E_CEE_PGID_PRIO_1_MASK) >>
+ I40E_CEE_PGID_PRIO_1_SHIFT);
dcbcfg->etscfg.prioritytable[i*2 + 1] = tc;
}
diff --git a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
index 894c8e5..9692a52 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
@@ -147,6 +147,7 @@
I40E_PF_STAT("VF_admin_queue_requests", vf_aq_requests),
I40E_PF_STAT("arq_overflows", arq_overflows),
I40E_PF_STAT("rx_hwtstamp_cleared", rx_hwtstamp_cleared),
+ I40E_PF_STAT("tx_hwtstamp_skipped", tx_hwtstamp_skipped),
I40E_PF_STAT("fdir_flush_cnt", fd_flush_cnt),
I40E_PF_STAT("fdir_atr_match", stats.fd_atr_match),
I40E_PF_STAT("fdir_atr_tunnel_match", stats.fd_atr_tunnel_match),
@@ -1298,6 +1299,17 @@
ring->rx_jumbo_pending = 0;
}
+static bool i40e_active_tx_ring_index(struct i40e_vsi *vsi, u16 index)
+{
+ if (i40e_enabled_xdp_vsi(vsi)) {
+ return index < vsi->num_queue_pairs ||
+ (index >= vsi->alloc_queue_pairs &&
+ index < vsi->alloc_queue_pairs + vsi->num_queue_pairs);
+ }
+
+ return index < vsi->num_queue_pairs;
+}
+
static int i40e_set_ringparam(struct net_device *netdev,
struct ethtool_ringparam *ring)
{
@@ -1307,6 +1319,7 @@
struct i40e_vsi *vsi = np->vsi;
struct i40e_pf *pf = vsi->back;
u32 new_rx_count, new_tx_count;
+ u16 tx_alloc_queue_pairs;
int timeout = 50;
int i, err = 0;
@@ -1344,6 +1357,8 @@
for (i = 0; i < vsi->num_queue_pairs; i++) {
vsi->tx_rings[i]->count = new_tx_count;
vsi->rx_rings[i]->count = new_rx_count;
+ if (i40e_enabled_xdp_vsi(vsi))
+ vsi->xdp_rings[i]->count = new_tx_count;
}
goto done;
}
@@ -1353,20 +1368,24 @@
* to the Tx and Rx ring structs.
*/
- /* alloc updated Tx resources */
+ /* alloc updated Tx and XDP Tx resources */
+ tx_alloc_queue_pairs = vsi->alloc_queue_pairs *
+ (i40e_enabled_xdp_vsi(vsi) ? 2 : 1);
if (new_tx_count != vsi->tx_rings[0]->count) {
netdev_info(netdev,
"Changing Tx descriptor count from %d to %d.\n",
vsi->tx_rings[0]->count, new_tx_count);
- tx_rings = kcalloc(vsi->alloc_queue_pairs,
+ tx_rings = kcalloc(tx_alloc_queue_pairs,
sizeof(struct i40e_ring), GFP_KERNEL);
if (!tx_rings) {
err = -ENOMEM;
goto done;
}
- for (i = 0; i < vsi->num_queue_pairs; i++) {
- /* clone ring and setup updated count */
+ for (i = 0; i < tx_alloc_queue_pairs; i++) {
+ if (!i40e_active_tx_ring_index(vsi, i))
+ continue;
+
tx_rings[i] = *vsi->tx_rings[i];
tx_rings[i].count = new_tx_count;
/* the desc and bi pointers will be reallocated in the
@@ -1378,6 +1397,8 @@
if (err) {
while (i) {
i--;
+ if (!i40e_active_tx_ring_index(vsi, i))
+ continue;
i40e_free_tx_resources(&tx_rings[i]);
}
kfree(tx_rings);
@@ -1445,9 +1466,11 @@
i40e_down(vsi);
if (tx_rings) {
- for (i = 0; i < vsi->num_queue_pairs; i++) {
- i40e_free_tx_resources(vsi->tx_rings[i]);
- *vsi->tx_rings[i] = tx_rings[i];
+ for (i = 0; i < tx_alloc_queue_pairs; i++) {
+ if (i40e_active_tx_ring_index(vsi, i)) {
+ i40e_free_tx_resources(vsi->tx_rings[i]);
+ *vsi->tx_rings[i] = tx_rings[i];
+ }
}
kfree(tx_rings);
tx_rings = NULL;
@@ -1478,8 +1501,10 @@
free_tx:
/* error cleanup if the Rx allocations failed after getting Tx */
if (tx_rings) {
- for (i = 0; i < vsi->num_queue_pairs; i++)
- i40e_free_tx_resources(&tx_rings[i]);
+ for (i = 0; i < tx_alloc_queue_pairs; i++) {
+ if (i40e_active_tx_ring_index(vsi, i))
+ i40e_free_tx_resources(vsi->tx_rings[i]);
+ }
kfree(tx_rings);
tx_rings = NULL;
}
@@ -2686,6 +2711,12 @@
u8 flow_pctype = 0;
u64 i_set, i_setc;
+ if (pf->flags & I40E_FLAG_MFP_ENABLED) {
+ dev_err(&pf->pdev->dev,
+ "Change of RSS hash input set is not supported when MFP mode is enabled\n");
+ return -EOPNOTSUPP;
+ }
+
/* RSS does not support anything other than hashing
* to queues on src and dst IPs and ports
*/
diff --git a/drivers/net/ethernet/intel/i40e/i40e_fcoe.c b/drivers/net/ethernet/intel/i40e/i40e_fcoe.c
index b077ef8b..2d1253c 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_fcoe.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_fcoe.c
@@ -762,7 +762,7 @@
(fh->fh_r_ctl == FC_RCTL_DD_SOL_DATA)) {
struct fcoe_crc_eof *crc = NULL;
- crc = (struct fcoe_crc_eof *)skb_put(skb, sizeof(*crc));
+ crc = skb_put(skb, sizeof(*crc));
crc->fcoe_eof = FC_EOF_T;
} else {
/* otherwise, drop the header only frame */
diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
index a7a4b28..2db93d3 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
@@ -1,7 +1,7 @@
/*******************************************************************************
*
* Intel Ethernet Controller XL710 Family Linux Driver
- * Copyright(c) 2013 - 2016 Intel Corporation.
+ * Copyright(c) 2013 - 2017 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
@@ -27,6 +27,7 @@
#include <linux/etherdevice.h>
#include <linux/of_net.h>
#include <linux/pci.h>
+#include <linux/bpf.h>
/* Local includes */
#include "i40e.h"
@@ -407,6 +408,27 @@
}
/**
+ * i40e_get_netdev_stats_struct_tx - populate stats from a Tx ring
+ * @ring: Tx ring to get statistics from
+ * @stats: statistics entry to be updated
+ **/
+static void i40e_get_netdev_stats_struct_tx(struct i40e_ring *ring,
+ struct rtnl_link_stats64 *stats)
+{
+ u64 bytes, packets;
+ unsigned int start;
+
+ do {
+ start = u64_stats_fetch_begin_irq(&ring->syncp);
+ packets = ring->stats.packets;
+ bytes = ring->stats.bytes;
+ } while (u64_stats_fetch_retry_irq(&ring->syncp, start));
+
+ stats->tx_packets += packets;
+ stats->tx_bytes += bytes;
+}
+
+/**
* i40e_get_netdev_stats_struct - Get statistics for netdev interface
* @netdev: network interface device structure
*
@@ -436,15 +458,8 @@
tx_ring = ACCESS_ONCE(vsi->tx_rings[i]);
if (!tx_ring)
continue;
+ i40e_get_netdev_stats_struct_tx(tx_ring, stats);
- do {
- start = u64_stats_fetch_begin_irq(&tx_ring->syncp);
- packets = tx_ring->stats.packets;
- bytes = tx_ring->stats.bytes;
- } while (u64_stats_fetch_retry_irq(&tx_ring->syncp, start));
-
- stats->tx_packets += packets;
- stats->tx_bytes += bytes;
rx_ring = &tx_ring[1];
do {
@@ -455,6 +470,9 @@
stats->rx_packets += packets;
stats->rx_bytes += bytes;
+
+ if (i40e_enabled_xdp_vsi(vsi))
+ i40e_get_netdev_stats_struct_tx(&rx_ring[1], stats);
}
rcu_read_unlock();
@@ -2263,9 +2281,8 @@
i40e_aq_str(hw, hw->aq.asq_last_status));
}
}
- if ((changed_flags & IFF_PROMISC) ||
- (promisc_changed &&
- test_bit(__I40E_VSI_OVERFLOW_PROMISC, vsi->state))) {
+
+ if ((changed_flags & IFF_PROMISC) || promisc_changed) {
bool cur_promisc;
cur_promisc = (!!(vsi->current_netdev_flags & IFF_PROMISC) ||
@@ -2396,6 +2413,18 @@
}
/**
+ * i40e_max_xdp_frame_size - returns the maximum allowed frame size for XDP
+ * @vsi: the vsi
+ **/
+static int i40e_max_xdp_frame_size(struct i40e_vsi *vsi)
+{
+ if (PAGE_SIZE >= 8192 || (vsi->back->flags & I40E_FLAG_LEGACY_RX))
+ return I40E_RXBUFFER_2048;
+ else
+ return I40E_RXBUFFER_3072;
+}
+
+/**
* i40e_change_mtu - NDO callback to change the Maximum Transfer Unit
* @netdev: network interface device structure
* @new_mtu: new value for maximum frame size
@@ -2408,6 +2437,13 @@
struct i40e_vsi *vsi = np->vsi;
struct i40e_pf *pf = vsi->back;
+ if (i40e_enabled_xdp_vsi(vsi)) {
+ int frame_size = new_mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
+
+ if (frame_size > i40e_max_xdp_frame_size(vsi))
+ return -EINVAL;
+ }
+
netdev_info(netdev, "changing MTU from %d to %d\n",
netdev->mtu, new_mtu);
netdev->mtu = new_mtu;
@@ -2794,6 +2830,12 @@
for (i = 0; i < vsi->num_queue_pairs && !err; i++)
err = i40e_setup_tx_descriptors(vsi->tx_rings[i]);
+ if (!i40e_enabled_xdp_vsi(vsi))
+ return err;
+
+ for (i = 0; i < vsi->num_queue_pairs && !err; i++)
+ err = i40e_setup_tx_descriptors(vsi->xdp_rings[i]);
+
return err;
}
@@ -2807,12 +2849,17 @@
{
int i;
- if (!vsi->tx_rings)
- return;
+ if (vsi->tx_rings) {
+ for (i = 0; i < vsi->num_queue_pairs; i++)
+ if (vsi->tx_rings[i] && vsi->tx_rings[i]->desc)
+ i40e_free_tx_resources(vsi->tx_rings[i]);
+ }
- for (i = 0; i < vsi->num_queue_pairs; i++)
- if (vsi->tx_rings[i] && vsi->tx_rings[i]->desc)
- i40e_free_tx_resources(vsi->tx_rings[i]);
+ if (vsi->xdp_rings) {
+ for (i = 0; i < vsi->num_queue_pairs; i++)
+ if (vsi->xdp_rings[i] && vsi->xdp_rings[i]->desc)
+ i40e_free_tx_resources(vsi->xdp_rings[i]);
+ }
}
/**
@@ -3073,6 +3120,12 @@
for (i = 0; (i < vsi->num_queue_pairs) && !err; i++)
err = i40e_configure_tx_ring(vsi->tx_rings[i]);
+ if (!i40e_enabled_xdp_vsi(vsi))
+ return err;
+
+ for (i = 0; (i < vsi->num_queue_pairs) && !err; i++)
+ err = i40e_configure_tx_ring(vsi->xdp_rings[i]);
+
return err;
}
@@ -3217,6 +3270,7 @@
**/
static void i40e_vsi_configure_msix(struct i40e_vsi *vsi)
{
+ bool has_xdp = i40e_enabled_xdp_vsi(vsi);
struct i40e_pf *pf = vsi->back;
struct i40e_hw *hw = &pf->hw;
u16 vector;
@@ -3247,28 +3301,40 @@
/* Linked list for the queuepairs assigned to this vector */
wr32(hw, I40E_PFINT_LNKLSTN(vector - 1), qp);
for (q = 0; q < q_vector->num_ringpairs; q++) {
+ u32 nextqp = has_xdp ? qp + vsi->alloc_queue_pairs : qp;
u32 val;
val = I40E_QINT_RQCTL_CAUSE_ENA_MASK |
- (I40E_RX_ITR << I40E_QINT_RQCTL_ITR_INDX_SHIFT) |
- (vector << I40E_QINT_RQCTL_MSIX_INDX_SHIFT) |
- (qp << I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT)|
- (I40E_QUEUE_TYPE_TX
- << I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT);
+ (I40E_RX_ITR << I40E_QINT_RQCTL_ITR_INDX_SHIFT) |
+ (vector << I40E_QINT_RQCTL_MSIX_INDX_SHIFT) |
+ (nextqp << I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT) |
+ (I40E_QUEUE_TYPE_TX <<
+ I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT);
wr32(hw, I40E_QINT_RQCTL(qp), val);
+ if (has_xdp) {
+ val = I40E_QINT_TQCTL_CAUSE_ENA_MASK |
+ (I40E_TX_ITR << I40E_QINT_TQCTL_ITR_INDX_SHIFT) |
+ (vector << I40E_QINT_TQCTL_MSIX_INDX_SHIFT) |
+ (qp << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT) |
+ (I40E_QUEUE_TYPE_TX <<
+ I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT);
+
+ wr32(hw, I40E_QINT_TQCTL(nextqp), val);
+ }
+
val = I40E_QINT_TQCTL_CAUSE_ENA_MASK |
- (I40E_TX_ITR << I40E_QINT_TQCTL_ITR_INDX_SHIFT) |
- (vector << I40E_QINT_TQCTL_MSIX_INDX_SHIFT) |
- ((qp+1) << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT)|
- (I40E_QUEUE_TYPE_RX
- << I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT);
+ (I40E_TX_ITR << I40E_QINT_TQCTL_ITR_INDX_SHIFT) |
+ (vector << I40E_QINT_TQCTL_MSIX_INDX_SHIFT) |
+ ((qp + 1) << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT) |
+ (I40E_QUEUE_TYPE_RX <<
+ I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT);
/* Terminate the linked list */
if (q == (q_vector->num_ringpairs - 1))
- val |= (I40E_QUEUE_END_OF_LIST
- << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT);
+ val |= (I40E_QUEUE_END_OF_LIST <<
+ I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT);
wr32(hw, I40E_QINT_TQCTL(qp), val);
qp++;
@@ -3322,6 +3388,7 @@
**/
static void i40e_configure_msi_and_legacy(struct i40e_vsi *vsi)
{
+ u32 nextqp = i40e_enabled_xdp_vsi(vsi) ? vsi->alloc_queue_pairs : 0;
struct i40e_q_vector *q_vector = vsi->q_vectors[0];
struct i40e_pf *pf = vsi->back;
struct i40e_hw *hw = &pf->hw;
@@ -3342,12 +3409,22 @@
wr32(hw, I40E_PFINT_LNKLST0, 0);
/* Associate the queue pair to the vector and enable the queue int */
- val = I40E_QINT_RQCTL_CAUSE_ENA_MASK |
- (I40E_RX_ITR << I40E_QINT_RQCTL_ITR_INDX_SHIFT) |
+ val = I40E_QINT_RQCTL_CAUSE_ENA_MASK |
+ (I40E_RX_ITR << I40E_QINT_RQCTL_ITR_INDX_SHIFT) |
+ (nextqp << I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT)|
(I40E_QUEUE_TYPE_TX << I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT);
wr32(hw, I40E_QINT_RQCTL(0), val);
+ if (i40e_enabled_xdp_vsi(vsi)) {
+ val = I40E_QINT_TQCTL_CAUSE_ENA_MASK |
+ (I40E_TX_ITR << I40E_QINT_TQCTL_ITR_INDX_SHIFT)|
+ (I40E_QUEUE_TYPE_TX
+ << I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT);
+
+ wr32(hw, I40E_QINT_TQCTL(nextqp), val);
+ }
+
val = I40E_QINT_TQCTL_CAUSE_ENA_MASK |
(I40E_TX_ITR << I40E_QINT_TQCTL_ITR_INDX_SHIFT) |
(I40E_QUEUE_END_OF_LIST << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT);
@@ -3511,11 +3588,24 @@
int base = vsi->base_vector;
int i;
+ /* disable interrupt causation from each queue */
for (i = 0; i < vsi->num_queue_pairs; i++) {
- wr32(hw, I40E_QINT_TQCTL(vsi->tx_rings[i]->reg_idx), 0);
- wr32(hw, I40E_QINT_RQCTL(vsi->rx_rings[i]->reg_idx), 0);
+ u32 val;
+
+ val = rd32(hw, I40E_QINT_TQCTL(vsi->tx_rings[i]->reg_idx));
+ val &= ~I40E_QINT_TQCTL_CAUSE_ENA_MASK;
+ wr32(hw, I40E_QINT_TQCTL(vsi->tx_rings[i]->reg_idx), val);
+
+ val = rd32(hw, I40E_QINT_RQCTL(vsi->rx_rings[i]->reg_idx));
+ val &= ~I40E_QINT_RQCTL_CAUSE_ENA_MASK;
+ wr32(hw, I40E_QINT_RQCTL(vsi->rx_rings[i]->reg_idx), val);
+
+ if (!i40e_enabled_xdp_vsi(vsi))
+ continue;
+ wr32(hw, I40E_QINT_TQCTL(vsi->xdp_rings[i]->reg_idx), 0);
}
+ /* disable each interrupt */
if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
for (i = vsi->base_vector;
i < (vsi->num_q_vectors + vsi->base_vector); i++)
@@ -3594,10 +3684,10 @@
pf->sw_int_count++;
if ((pf->flags & I40E_FLAG_IWARP_ENABLED) &&
- (ena_mask & I40E_PFINT_ICR0_ENA_PE_CRITERR_MASK)) {
+ (icr0 & I40E_PFINT_ICR0_ENA_PE_CRITERR_MASK)) {
ena_mask &= ~I40E_PFINT_ICR0_ENA_PE_CRITERR_MASK;
- icr0 &= ~I40E_PFINT_ICR0_ENA_PE_CRITERR_MASK;
dev_dbg(&pf->pdev->dev, "cleared PE_CRITERR\n");
+ set_bit(__I40E_CORE_RESET_REQUESTED, pf->state);
}
/* only q0 is used in MSI/Legacy mode, and none are used in MSIX */
@@ -3816,6 +3906,16 @@
q_vector->tx.ring = tx_ring;
q_vector->tx.count++;
+ /* Place XDP Tx ring in the same q_vector ring list as regular Tx */
+ if (i40e_enabled_xdp_vsi(vsi)) {
+ struct i40e_ring *xdp_ring = vsi->xdp_rings[qp_idx];
+
+ xdp_ring->q_vector = q_vector;
+ xdp_ring->next = q_vector->tx.ring;
+ q_vector->tx.ring = xdp_ring;
+ q_vector->tx.count++;
+ }
+
rx_ring->q_vector = q_vector;
rx_ring->next = q_vector->rx.ring;
q_vector->rx.ring = rx_ring;
@@ -3995,6 +4095,33 @@
}
/**
+ * i40e_control_wait_tx_q - Start/stop Tx queue and wait for completion
+ * @seid: VSI SEID
+ * @pf: the PF structure
+ * @pf_q: the PF queue to configure
+ * @is_xdp: true if the queue is used for XDP
+ * @enable: start or stop the queue
+ **/
+static int i40e_control_wait_tx_q(int seid, struct i40e_pf *pf, int pf_q,
+ bool is_xdp, bool enable)
+{
+ int ret;
+
+ i40e_control_tx_q(pf, pf_q, enable);
+
+ /* wait for the change to finish */
+ ret = i40e_pf_txq_wait(pf, pf_q, enable);
+ if (ret) {
+ dev_info(&pf->pdev->dev,
+ "VSI seid %d %sTx ring %d %sable timeout\n",
+ seid, (is_xdp ? "XDP " : ""), pf_q,
+ (enable ? "en" : "dis"));
+ }
+
+ return ret;
+}
+
+/**
* i40e_vsi_control_tx - Start or stop a VSI's rings
* @vsi: the VSI being configured
* @enable: start or stop the rings
@@ -4006,16 +4133,20 @@
pf_q = vsi->base_queue;
for (i = 0; i < vsi->num_queue_pairs; i++, pf_q++) {
- i40e_control_tx_q(pf, pf_q, enable);
-
- /* wait for the change to finish */
- ret = i40e_pf_txq_wait(pf, pf_q, enable);
- if (ret) {
- dev_info(&pf->pdev->dev,
- "VSI seid %d Tx ring %d %sable timeout\n",
- vsi->seid, pf_q, (enable ? "en" : "dis"));
+ ret = i40e_control_wait_tx_q(vsi->seid, pf,
+ pf_q,
+ false /*is xdp*/, enable);
+ if (ret)
break;
- }
+
+ if (!i40e_enabled_xdp_vsi(vsi))
+ continue;
+
+ ret = i40e_control_wait_tx_q(vsi->seid, pf,
+ pf_q + vsi->alloc_queue_pairs,
+ true /*is xdp*/, enable);
+ if (ret)
+ break;
}
return ret;
@@ -4527,7 +4658,21 @@
vsi->seid, pf_q);
return ret;
}
- /* Check and wait for the Tx queue */
+
+ if (!i40e_enabled_xdp_vsi(vsi))
+ goto wait_rx;
+
+ /* Check and wait for the XDP Tx queue */
+ ret = i40e_pf_txq_wait(pf, pf_q + vsi->alloc_queue_pairs,
+ false);
+ if (ret) {
+ dev_info(&pf->pdev->dev,
+ "VSI seid %d XDP Tx ring %d disable timeout\n",
+ vsi->seid, pf_q);
+ return ret;
+ }
+wait_rx:
+ /* Check and wait for the Rx queue */
ret = i40e_pf_rxq_wait(pf, pf_q, false);
if (ret) {
dev_info(&pf->pdev->dev,
@@ -5446,6 +5591,8 @@
for (i = 0; i < vsi->num_queue_pairs; i++) {
i40e_clean_tx_ring(vsi->tx_rings[i]);
+ if (i40e_enabled_xdp_vsi(vsi))
+ i40e_clean_tx_ring(vsi->xdp_rings[i]);
i40e_clean_rx_ring(vsi->rx_rings[i]);
}
@@ -5509,7 +5656,8 @@
return ret;
}
-static int __i40e_setup_tc(struct net_device *netdev, u32 handle, __be16 proto,
+static int __i40e_setup_tc(struct net_device *netdev, u32 handle,
+ u32 chain_index, __be16 proto,
struct tc_to_netdev *tc)
{
if (tc->type != TC_SETUP_MQPRIO)
@@ -6372,7 +6520,8 @@
i40e_update_veb_stats(pf->veb[i]);
}
- i40e_ptp_rx_hang(pf->vsi[pf->lan_vsi]);
+ i40e_ptp_rx_hang(pf);
+ i40e_ptp_tx_hang(pf);
}
/**
@@ -6417,9 +6566,7 @@
if (reset_flags &&
!test_bit(__I40E_DOWN, pf->state) &&
!test_bit(__I40E_CONFIG_BUSY, pf->state)) {
- rtnl_lock();
- i40e_do_reset(pf, reset_flags, true);
- rtnl_unlock();
+ i40e_do_reset(pf, reset_flags, false);
}
}
@@ -6968,6 +7115,51 @@
}
/**
+ * i40e_get_oem_version - get OEM specific version information
+ * @hw: pointer to the hardware structure
+ **/
+static void i40e_get_oem_version(struct i40e_hw *hw)
+{
+ u16 block_offset = 0xffff;
+ u16 block_length = 0;
+ u16 capabilities = 0;
+ u16 gen_snap = 0;
+ u16 release = 0;
+
+#define I40E_SR_NVM_OEM_VERSION_PTR 0x1B
+#define I40E_NVM_OEM_LENGTH_OFFSET 0x00
+#define I40E_NVM_OEM_CAPABILITIES_OFFSET 0x01
+#define I40E_NVM_OEM_GEN_OFFSET 0x02
+#define I40E_NVM_OEM_RELEASE_OFFSET 0x03
+#define I40E_NVM_OEM_CAPABILITIES_MASK 0x000F
+#define I40E_NVM_OEM_LENGTH 3
+
+ /* Check if pointer to OEM version block is valid. */
+ i40e_read_nvm_word(hw, I40E_SR_NVM_OEM_VERSION_PTR, &block_offset);
+ if (block_offset == 0xffff)
+ return;
+
+ /* Check if OEM version block has correct length. */
+ i40e_read_nvm_word(hw, block_offset + I40E_NVM_OEM_LENGTH_OFFSET,
+ &block_length);
+ if (block_length < I40E_NVM_OEM_LENGTH)
+ return;
+
+ /* Check if OEM version format is as expected. */
+ i40e_read_nvm_word(hw, block_offset + I40E_NVM_OEM_CAPABILITIES_OFFSET,
+ &capabilities);
+ if ((capabilities & I40E_NVM_OEM_CAPABILITIES_MASK) != 0)
+ return;
+
+ i40e_read_nvm_word(hw, block_offset + I40E_NVM_OEM_GEN_OFFSET,
+ &gen_snap);
+ i40e_read_nvm_word(hw, block_offset + I40E_NVM_OEM_RELEASE_OFFSET,
+ &release);
+ hw->nvm.oem_ver = (gen_snap << I40E_OEM_SNAP_SHIFT) | release;
+ hw->nvm.eetrack = I40E_OEM_EETRACK_ID;
+}
+
+/**
* i40e_reset - wait for core reset to finish reset, reset pf if corer not seen
* @pf: board private structure
**/
@@ -7014,6 +7206,7 @@
i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
goto clear_recovery;
}
+ i40e_get_oem_version(&pf->hw);
/* re-verify the eeprom if we just had an EMP reset */
if (test_and_clear_bit(__I40E_EMP_RESET_INTR_RECEIVED, pf->state))
@@ -7513,15 +7706,22 @@
**/
static int i40e_vsi_alloc_arrays(struct i40e_vsi *vsi, bool alloc_qvectors)
{
+ struct i40e_ring **next_rings;
int size;
int ret = 0;
- /* allocate memory for both Tx and Rx ring pointers */
- size = sizeof(struct i40e_ring *) * vsi->alloc_queue_pairs * 2;
+ /* allocate memory for both Tx, XDP Tx and Rx ring pointers */
+ size = sizeof(struct i40e_ring *) * vsi->alloc_queue_pairs *
+ (i40e_enabled_xdp_vsi(vsi) ? 3 : 2);
vsi->tx_rings = kzalloc(size, GFP_KERNEL);
if (!vsi->tx_rings)
return -ENOMEM;
- vsi->rx_rings = &vsi->tx_rings[vsi->alloc_queue_pairs];
+ next_rings = vsi->tx_rings + vsi->alloc_queue_pairs;
+ if (i40e_enabled_xdp_vsi(vsi)) {
+ vsi->xdp_rings = next_rings;
+ next_rings += vsi->alloc_queue_pairs;
+ }
+ vsi->rx_rings = next_rings;
if (alloc_qvectors) {
/* allocate memory for q_vector pointers */
@@ -7641,6 +7841,7 @@
kfree(vsi->tx_rings);
vsi->tx_rings = NULL;
vsi->rx_rings = NULL;
+ vsi->xdp_rings = NULL;
}
/**
@@ -7724,6 +7925,8 @@
kfree_rcu(vsi->tx_rings[i], rcu);
vsi->tx_rings[i] = NULL;
vsi->rx_rings[i] = NULL;
+ if (vsi->xdp_rings)
+ vsi->xdp_rings[i] = NULL;
}
}
}
@@ -7734,43 +7937,61 @@
**/
static int i40e_alloc_rings(struct i40e_vsi *vsi)
{
- struct i40e_ring *tx_ring, *rx_ring;
+ int i, qpv = i40e_enabled_xdp_vsi(vsi) ? 3 : 2;
struct i40e_pf *pf = vsi->back;
- int i;
+ struct i40e_ring *ring;
/* Set basic values in the rings to be used later during open() */
for (i = 0; i < vsi->alloc_queue_pairs; i++) {
/* allocate space for both Tx and Rx in one shot */
- tx_ring = kzalloc(sizeof(struct i40e_ring) * 2, GFP_KERNEL);
- if (!tx_ring)
+ ring = kcalloc(qpv, sizeof(struct i40e_ring), GFP_KERNEL);
+ if (!ring)
goto err_out;
- tx_ring->queue_index = i;
- tx_ring->reg_idx = vsi->base_queue + i;
- tx_ring->ring_active = false;
- tx_ring->vsi = vsi;
- tx_ring->netdev = vsi->netdev;
- tx_ring->dev = &pf->pdev->dev;
- tx_ring->count = vsi->num_desc;
- tx_ring->size = 0;
- tx_ring->dcb_tc = 0;
+ ring->queue_index = i;
+ ring->reg_idx = vsi->base_queue + i;
+ ring->ring_active = false;
+ ring->vsi = vsi;
+ ring->netdev = vsi->netdev;
+ ring->dev = &pf->pdev->dev;
+ ring->count = vsi->num_desc;
+ ring->size = 0;
+ ring->dcb_tc = 0;
if (vsi->back->flags & I40E_FLAG_WB_ON_ITR_CAPABLE)
- tx_ring->flags = I40E_TXR_FLAGS_WB_ON_ITR;
- tx_ring->tx_itr_setting = pf->tx_itr_default;
- vsi->tx_rings[i] = tx_ring;
+ ring->flags = I40E_TXR_FLAGS_WB_ON_ITR;
+ ring->tx_itr_setting = pf->tx_itr_default;
+ vsi->tx_rings[i] = ring++;
- rx_ring = &tx_ring[1];
- rx_ring->queue_index = i;
- rx_ring->reg_idx = vsi->base_queue + i;
- rx_ring->ring_active = false;
- rx_ring->vsi = vsi;
- rx_ring->netdev = vsi->netdev;
- rx_ring->dev = &pf->pdev->dev;
- rx_ring->count = vsi->num_desc;
- rx_ring->size = 0;
- rx_ring->dcb_tc = 0;
- rx_ring->rx_itr_setting = pf->rx_itr_default;
- vsi->rx_rings[i] = rx_ring;
+ if (!i40e_enabled_xdp_vsi(vsi))
+ goto setup_rx;
+
+ ring->queue_index = vsi->alloc_queue_pairs + i;
+ ring->reg_idx = vsi->base_queue + ring->queue_index;
+ ring->ring_active = false;
+ ring->vsi = vsi;
+ ring->netdev = NULL;
+ ring->dev = &pf->pdev->dev;
+ ring->count = vsi->num_desc;
+ ring->size = 0;
+ ring->dcb_tc = 0;
+ if (vsi->back->flags & I40E_FLAG_WB_ON_ITR_CAPABLE)
+ ring->flags = I40E_TXR_FLAGS_WB_ON_ITR;
+ set_ring_xdp(ring);
+ ring->tx_itr_setting = pf->tx_itr_default;
+ vsi->xdp_rings[i] = ring++;
+
+setup_rx:
+ ring->queue_index = i;
+ ring->reg_idx = vsi->base_queue + i;
+ ring->ring_active = false;
+ ring->vsi = vsi;
+ ring->netdev = vsi->netdev;
+ ring->dev = &pf->pdev->dev;
+ ring->count = vsi->num_desc;
+ ring->size = 0;
+ ring->dcb_tc = 0;
+ ring->rx_itr_setting = pf->rx_itr_default;
+ vsi->rx_rings[i] = ring;
}
return 0;
@@ -8572,10 +8793,10 @@
}
/**
- * i40e_get_npar_bw_setting - Retrieve BW settings for this PF partition
+ * i40e_get_partition_bw_setting - Retrieve BW settings for this PF partition
* @pf: board private structure
**/
-i40e_status i40e_get_npar_bw_setting(struct i40e_pf *pf)
+i40e_status i40e_get_partition_bw_setting(struct i40e_pf *pf)
{
i40e_status status;
bool min_valid, max_valid;
@@ -8586,27 +8807,27 @@
if (!status) {
if (min_valid)
- pf->npar_min_bw = min_bw;
+ pf->min_bw = min_bw;
if (max_valid)
- pf->npar_max_bw = max_bw;
+ pf->max_bw = max_bw;
}
return status;
}
/**
- * i40e_set_npar_bw_setting - Set BW settings for this PF partition
+ * i40e_set_partition_bw_setting - Set BW settings for this PF partition
* @pf: board private structure
**/
-i40e_status i40e_set_npar_bw_setting(struct i40e_pf *pf)
+i40e_status i40e_set_partition_bw_setting(struct i40e_pf *pf)
{
struct i40e_aqc_configure_partition_bw_data bw_data;
i40e_status status;
/* Set the valid bit for this PF */
bw_data.pf_valid_bits = cpu_to_le16(BIT(pf->hw.pf_id));
- bw_data.max_bw[pf->hw.pf_id] = pf->npar_max_bw & I40E_ALT_BW_VALUE_MASK;
- bw_data.min_bw[pf->hw.pf_id] = pf->npar_min_bw & I40E_ALT_BW_VALUE_MASK;
+ bw_data.max_bw[pf->hw.pf_id] = pf->max_bw & I40E_ALT_BW_VALUE_MASK;
+ bw_data.min_bw[pf->hw.pf_id] = pf->min_bw & I40E_ALT_BW_VALUE_MASK;
/* Set the new bandwidths */
status = i40e_aq_configure_partition_bw(&pf->hw, &bw_data, NULL);
@@ -8615,10 +8836,10 @@
}
/**
- * i40e_commit_npar_bw_setting - Commit BW settings for this PF partition
+ * i40e_commit_partition_bw_setting - Commit BW settings for this PF partition
* @pf: board private structure
**/
-i40e_status i40e_commit_npar_bw_setting(struct i40e_pf *pf)
+i40e_status i40e_commit_partition_bw_setting(struct i40e_pf *pf)
{
/* Commit temporary BW setting to permanent NVM image */
enum i40e_admin_queue_err last_aq_status;
@@ -8737,16 +8958,19 @@
if (pf->hw.func_caps.npar_enable || pf->hw.func_caps.flex10_enable) {
pf->flags |= I40E_FLAG_MFP_ENABLED;
dev_info(&pf->pdev->dev, "MFP mode Enabled\n");
- if (i40e_get_npar_bw_setting(pf))
+ if (i40e_get_partition_bw_setting(pf)) {
dev_warn(&pf->pdev->dev,
- "Could not get NPAR bw settings\n");
- else
+ "Could not get partition bw settings\n");
+ } else {
dev_info(&pf->pdev->dev,
- "Min BW = %8.8x, Max BW = %8.8x\n",
- pf->npar_min_bw, pf->npar_max_bw);
+ "Partition BW Min = %8.8x, Max = %8.8x\n",
+ pf->min_bw, pf->max_bw);
+
+ /* nudge the Tx scheduler */
+ i40e_set_partition_bw_setting(pf);
+ }
}
- /* FW/NVM is not yet fixed in this regard */
if ((pf->hw.func_caps.fd_filters_guaranteed > 0) ||
(pf->hw.func_caps.fd_filters_best_effort > 0)) {
pf->flags |= I40E_FLAG_FD_ATR_ENABLED;
@@ -8849,10 +9073,6 @@
mutex_init(&pf->switch_mutex);
- /* If NPAR is enabled nudge the Tx scheduler */
- if (pf->hw.func_caps.npar_enable && (!i40e_get_npar_bw_setting(pf)))
- i40e_set_npar_bw_setting(pf);
-
sw_init_done:
return err;
}
@@ -9309,6 +9529,72 @@
return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
}
+/**
+ * i40e_xdp_setup - add/remove an XDP program
+ * @vsi: VSI to changed
+ * @prog: XDP program
+ **/
+static int i40e_xdp_setup(struct i40e_vsi *vsi,
+ struct bpf_prog *prog)
+{
+ int frame_size = vsi->netdev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
+ struct i40e_pf *pf = vsi->back;
+ struct bpf_prog *old_prog;
+ bool need_reset;
+ int i;
+
+ /* Don't allow frames that span over multiple buffers */
+ if (frame_size > vsi->rx_buf_len)
+ return -EINVAL;
+
+ if (!i40e_enabled_xdp_vsi(vsi) && !prog)
+ return 0;
+
+ /* When turning XDP on->off/off->on we reset and rebuild the rings. */
+ need_reset = (i40e_enabled_xdp_vsi(vsi) != !!prog);
+
+ if (need_reset)
+ i40e_prep_for_reset(pf, true);
+
+ old_prog = xchg(&vsi->xdp_prog, prog);
+
+ if (need_reset)
+ i40e_reset_and_rebuild(pf, true, true);
+
+ for (i = 0; i < vsi->num_queue_pairs; i++)
+ WRITE_ONCE(vsi->rx_rings[i]->xdp_prog, vsi->xdp_prog);
+
+ if (old_prog)
+ bpf_prog_put(old_prog);
+
+ return 0;
+}
+
+/**
+ * i40e_xdp - implements ndo_xdp for i40e
+ * @dev: netdevice
+ * @xdp: XDP command
+ **/
+static int i40e_xdp(struct net_device *dev,
+ struct netdev_xdp *xdp)
+{
+ struct i40e_netdev_priv *np = netdev_priv(dev);
+ struct i40e_vsi *vsi = np->vsi;
+
+ if (vsi->type != I40E_VSI_MAIN)
+ return -EINVAL;
+
+ switch (xdp->command) {
+ case XDP_SETUP_PROG:
+ return i40e_xdp_setup(vsi, xdp->prog);
+ case XDP_QUERY_PROG:
+ xdp->prog_attached = i40e_enabled_xdp_vsi(vsi);
+ return 0;
+ default:
+ return -EINVAL;
+ }
+}
+
static const struct net_device_ops i40e_netdev_ops = {
.ndo_open = i40e_open,
.ndo_stop = i40e_close,
@@ -9341,6 +9627,7 @@
.ndo_features_check = i40e_features_check,
.ndo_bridge_getlink = i40e_ndo_bridge_getlink,
.ndo_bridge_setlink = i40e_ndo_bridge_setlink,
+ .ndo_xdp = i40e_xdp,
};
/**
@@ -9909,6 +10196,7 @@
**/
static struct i40e_vsi *i40e_vsi_reinit_setup(struct i40e_vsi *vsi)
{
+ u16 alloc_queue_pairs;
struct i40e_pf *pf;
u8 enabled_tc;
int ret;
@@ -9927,11 +10215,14 @@
if (ret)
goto err_vsi;
- ret = i40e_get_lump(pf, pf->qp_pile, vsi->alloc_queue_pairs, vsi->idx);
+ alloc_queue_pairs = vsi->alloc_queue_pairs *
+ (i40e_enabled_xdp_vsi(vsi) ? 2 : 1);
+
+ ret = i40e_get_lump(pf, pf->qp_pile, alloc_queue_pairs, vsi->idx);
if (ret < 0) {
dev_info(&pf->pdev->dev,
"failed to get tracking for %d queues for VSI %d err %d\n",
- vsi->alloc_queue_pairs, vsi->seid, ret);
+ alloc_queue_pairs, vsi->seid, ret);
goto err_vsi;
}
vsi->base_queue = ret;
@@ -9987,6 +10278,7 @@
{
struct i40e_vsi *vsi = NULL;
struct i40e_veb *veb = NULL;
+ u16 alloc_queue_pairs;
int ret, i;
int v_idx;
@@ -10074,12 +10366,14 @@
else if (type == I40E_VSI_SRIOV)
vsi->vf_id = param1;
/* assign it some queues */
- ret = i40e_get_lump(pf, pf->qp_pile, vsi->alloc_queue_pairs,
- vsi->idx);
+ alloc_queue_pairs = vsi->alloc_queue_pairs *
+ (i40e_enabled_xdp_vsi(vsi) ? 2 : 1);
+
+ ret = i40e_get_lump(pf, pf->qp_pile, alloc_queue_pairs, vsi->idx);
if (ret < 0) {
dev_info(&pf->pdev->dev,
"failed to get tracking for %d queues for VSI %d err=%d\n",
- vsi->alloc_queue_pairs, vsi->seid, ret);
+ alloc_queue_pairs, vsi->seid, ret);
goto err_vsi;
}
vsi->base_queue = ret;
@@ -11097,6 +11391,7 @@
goto err_pf_reset;
}
+ i40e_get_oem_version(hw);
/* provide nvm, fw, api versions */
dev_info(&pdev->dev, "fw %d.%d.%05d api %d.%d nvm %s\n",
@@ -11609,11 +11904,8 @@
}
/* shutdown all operations */
- if (!test_bit(__I40E_SUSPENDED, pf->state)) {
- rtnl_lock();
- i40e_prep_for_reset(pf, true);
- rtnl_unlock();
- }
+ if (!test_bit(__I40E_SUSPENDED, pf->state))
+ i40e_prep_for_reset(pf, false);
/* Request a slot reset */
return PCI_ERS_RESULT_NEED_RESET;
@@ -11679,9 +11971,7 @@
if (test_bit(__I40E_SUSPENDED, pf->state))
return;
- rtnl_lock();
- i40e_handle_reset_warning(pf, true);
- rtnl_unlock();
+ i40e_handle_reset_warning(pf, false);
}
/**
@@ -11761,9 +12051,7 @@
if (pf->wol_en && (pf->flags & I40E_FLAG_WOL_MC_MAGIC_PKT_WAKE))
i40e_enable_mc_magic_wake(pf);
- rtnl_lock();
- i40e_prep_for_reset(pf, true);
- rtnl_unlock();
+ i40e_prep_for_reset(pf, false);
wr32(hw, I40E_PFPM_APM,
(pf->wol_en ? I40E_PFPM_APM_APME_MASK : 0));
@@ -11795,9 +12083,7 @@
if (pf->wol_en && (pf->flags & I40E_FLAG_WOL_MC_MAGIC_PKT_WAKE))
i40e_enable_mc_magic_wake(pf);
- rtnl_lock();
- i40e_prep_for_reset(pf, true);
- rtnl_unlock();
+ i40e_prep_for_reset(pf, false);
wr32(hw, I40E_PFPM_APM, (pf->wol_en ? I40E_PFPM_APM_APME_MASK : 0));
wr32(hw, I40E_PFPM_WUFC, (pf->wol_en ? I40E_PFPM_WUFC_MAG_MASK : 0));
@@ -11843,9 +12129,7 @@
/* handling the reset will rebuild the device state */
if (test_and_clear_bit(__I40E_SUSPENDED, pf->state)) {
clear_bit(__I40E_DOWN, pf->state);
- rtnl_lock();
- i40e_reset_and_rebuild(pf, false, true);
- rtnl_unlock();
+ i40e_reset_and_rebuild(pf, false, false);
}
return 0;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_prototype.h b/drivers/net/ethernet/intel/i40e/i40e_prototype.h
index c56d976..df613ea 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_prototype.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_prototype.h
@@ -29,7 +29,7 @@
#include "i40e_type.h"
#include "i40e_alloc.h"
-#include "i40e_virtchnl.h"
+#include <linux/avf/virtchnl.h>
/* Prototypes for shared code functions that are not in
* the standard function pointer structures. These are
@@ -333,10 +333,10 @@
/* i40e_common for VF drivers*/
void i40e_vf_parse_hw_config(struct i40e_hw *hw,
- struct i40e_virtchnl_vf_resource *msg);
+ struct virtchnl_vf_resource *msg);
i40e_status i40e_vf_reset(struct i40e_hw *hw);
i40e_status i40e_aq_send_msg_to_pf(struct i40e_hw *hw,
- enum i40e_virtchnl_ops v_opcode,
+ enum virtchnl_ops v_opcode,
i40e_status v_retval,
u8 *msg, u16 msglen,
struct i40e_asq_cmd_details *cmd_details);
diff --git a/drivers/net/ethernet/intel/i40e/i40e_ptp.c b/drivers/net/ethernet/intel/i40e/i40e_ptp.c
index 18c1cc0..1a0be83 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_ptp.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_ptp.c
@@ -269,6 +269,7 @@
/**
* i40e_ptp_rx_hang - Detect error case when Rx timestamp registers are hung
+ * @pf: The PF private data structure
* @vsi: The VSI with the rings relevant to 1588
*
* This watchdog task is scheduled to detect error case where hardware has
@@ -276,9 +277,8 @@
* particular error is rare but leaves the device in a state unable to timestamp
* any future packets.
**/
-void i40e_ptp_rx_hang(struct i40e_vsi *vsi)
+void i40e_ptp_rx_hang(struct i40e_pf *pf)
{
- struct i40e_pf *pf = vsi->back;
struct i40e_hw *hw = &pf->hw;
unsigned int i, cleared = 0;
@@ -328,6 +328,36 @@
}
/**
+ * i40e_ptp_tx_hang - Detect error case when Tx timestamp register is hung
+ * @pf: The PF private data structure
+ *
+ * This watchdog task is run periodically to make sure that we clear the Tx
+ * timestamp logic if we don't obtain a timestamp in a reasonable amount of
+ * time. It is unexpected in the normal case but if it occurs it results in
+ * permanently prevent timestamps of future packets
+ **/
+void i40e_ptp_tx_hang(struct i40e_pf *pf)
+{
+ if (!(pf->flags & I40E_FLAG_PTP) || !pf->ptp_tx)
+ return;
+
+ /* Nothing to do if we're not already waiting for a timestamp */
+ if (!test_bit(__I40E_PTP_TX_IN_PROGRESS, pf->state))
+ return;
+
+ /* We already have a handler routine which is run when we are notified
+ * of a Tx timestamp in the hardware. If we don't get an interrupt
+ * within a second it is reasonable to assume that we never will.
+ */
+ if (time_is_before_jiffies(pf->ptp_tx_start + HZ)) {
+ dev_kfree_skb_any(pf->ptp_tx_skb);
+ pf->ptp_tx_skb = NULL;
+ clear_bit_unlock(__I40E_PTP_TX_IN_PROGRESS, pf->state);
+ pf->tx_hwtstamp_timeouts++;
+ }
+}
+
+/**
* i40e_ptp_tx_hwtstamp - Utility function which returns the Tx timestamp
* @pf: Board private structure
*
@@ -338,6 +368,7 @@
void i40e_ptp_tx_hwtstamp(struct i40e_pf *pf)
{
struct skb_shared_hwtstamps shhwtstamps;
+ struct sk_buff *skb = pf->ptp_tx_skb;
struct i40e_hw *hw = &pf->hw;
u32 hi, lo;
u64 ns;
@@ -353,12 +384,19 @@
hi = rd32(hw, I40E_PRTTSYN_TXTIME_H);
ns = (((u64)hi) << 32) | lo;
-
i40e_ptp_convert_to_hwtstamp(&shhwtstamps, ns);
- skb_tstamp_tx(pf->ptp_tx_skb, &shhwtstamps);
- dev_kfree_skb_any(pf->ptp_tx_skb);
+
+ /* Clear the bit lock as soon as possible after reading the register,
+ * and prior to notifying the stack via skb_tstamp_tx(). Otherwise
+ * applications might wake up and attempt to request another transmit
+ * timestamp prior to the bit lock being cleared.
+ */
pf->ptp_tx_skb = NULL;
clear_bit_unlock(__I40E_PTP_TX_IN_PROGRESS, pf->state);
+
+ /* Notify the stack and free the skb after we've unlocked */
+ skb_tstamp_tx(skb, &shhwtstamps);
+ dev_kfree_skb_any(skb);
}
/**
@@ -562,6 +600,7 @@
config->rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_EVENT;
}
break;
+ case HWTSTAMP_FILTER_NTP_ALL:
case HWTSTAMP_FILTER_ALL:
default:
return -ERANGE;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
index 77115c2..b936feb 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
@@ -26,6 +26,7 @@
#include <linux/prefetch.h>
#include <net/busy_poll.h>
+#include <linux/bpf_trace.h>
#include "i40e.h"
#include "i40e_trace.h"
#include "i40e_prototype.h"
@@ -629,6 +630,8 @@
if (tx_buffer->skb) {
if (tx_buffer->tx_flags & I40E_TX_FLAGS_FD_SB)
kfree(tx_buffer->raw_buf);
+ else if (ring_is_xdp(ring))
+ page_frag_free(tx_buffer->raw_buf);
else
dev_kfree_skb_any(tx_buffer->skb);
if (dma_unmap_len(tx_buffer, len))
@@ -770,8 +773,11 @@
total_bytes += tx_buf->bytecount;
total_packets += tx_buf->gso_segs;
- /* free the skb */
- napi_consume_skb(tx_buf->skb, napi_budget);
+ /* free the skb/XDP data */
+ if (ring_is_xdp(tx_ring))
+ page_frag_free(tx_buf->raw_buf);
+ else
+ napi_consume_skb(tx_buf->skb, napi_budget);
/* unmap skb header data */
dma_unmap_single(tx_ring->dev,
@@ -847,6 +853,9 @@
tx_ring->arm_wb = true;
}
+ if (ring_is_xdp(tx_ring))
+ return !!budget;
+
/* notify netdev of completed buffers */
netdev_tx_completed_queue(txring_txq(tx_ring),
total_packets, total_bytes);
@@ -1195,6 +1204,7 @@
void i40e_free_rx_resources(struct i40e_ring *rx_ring)
{
i40e_clean_rx_ring(rx_ring);
+ rx_ring->xdp_prog = NULL;
kfree(rx_ring->rx_bi);
rx_ring->rx_bi = NULL;
@@ -1241,6 +1251,8 @@
rx_ring->next_to_clean = 0;
rx_ring->next_to_use = 0;
+ rx_ring->xdp_prog = rx_ring->vsi->xdp_prog;
+
return 0;
err:
kfree(rx_ring->rx_bi);
@@ -1593,6 +1605,7 @@
* i40e_cleanup_headers - Correct empty headers
* @rx_ring: rx descriptor ring packet is being transacted on
* @skb: pointer to current skb being fixed
+ * @rx_desc: pointer to the EOP Rx descriptor
*
* Also address the case where we are pulling data in on pages only
* and as such no data is present in the skb header.
@@ -1602,8 +1615,25 @@
*
* Returns true if an error was encountered and skb was freed.
**/
-static bool i40e_cleanup_headers(struct i40e_ring *rx_ring, struct sk_buff *skb)
+static bool i40e_cleanup_headers(struct i40e_ring *rx_ring, struct sk_buff *skb,
+ union i40e_rx_desc *rx_desc)
+
{
+ /* XDP packets use error pointer so abort at this point */
+ if (IS_ERR(skb))
+ return true;
+
+ /* ERR_MASK will only have valid bits if EOP set, and
+ * what we are doing here is actually checking
+ * I40E_RX_DESC_ERROR_RXE_SHIFT, since it is the zeroth bit in
+ * the error field
+ */
+ if (unlikely(i40e_test_staterr(rx_desc,
+ BIT(I40E_RXD_QW1_ERROR_SHIFT)))) {
+ dev_kfree_skb_any(skb);
+ return true;
+ }
+
/* if eth_skb_pad returns an error the skb was freed */
if (eth_skb_pad(skb))
return true;
@@ -1776,7 +1806,7 @@
* i40e_construct_skb - Allocate skb and populate it
* @rx_ring: rx descriptor ring to transact packets on
* @rx_buffer: rx buffer to pull data from
- * @size: size of buffer to add to skb
+ * @xdp: xdp_buff pointing to the data
*
* This function allocates an skb. It then populates it with the page
* data from the current receive descriptor, taking care to set up the
@@ -1784,9 +1814,9 @@
*/
static struct sk_buff *i40e_construct_skb(struct i40e_ring *rx_ring,
struct i40e_rx_buffer *rx_buffer,
- unsigned int size)
+ struct xdp_buff *xdp)
{
- void *va = page_address(rx_buffer->page) + rx_buffer->page_offset;
+ unsigned int size = xdp->data_end - xdp->data;
#if (PAGE_SIZE < 8192)
unsigned int truesize = i40e_rx_pg_size(rx_ring) / 2;
#else
@@ -1796,9 +1826,9 @@
struct sk_buff *skb;
/* prefetch first cache line of first page */
- prefetch(va);
+ prefetch(xdp->data);
#if L1_CACHE_BYTES < 128
- prefetch(va + L1_CACHE_BYTES);
+ prefetch(xdp->data + L1_CACHE_BYTES);
#endif
/* allocate a skb to store the frags */
@@ -1811,10 +1841,11 @@
/* Determine available headroom for copy */
headlen = size;
if (headlen > I40E_RX_HDR_SIZE)
- headlen = eth_get_headlen(va, I40E_RX_HDR_SIZE);
+ headlen = eth_get_headlen(xdp->data, I40E_RX_HDR_SIZE);
/* align pull length to size of long to optimize memcpy performance */
- memcpy(__skb_put(skb, headlen), va, ALIGN(headlen, sizeof(long)));
+ memcpy(__skb_put(skb, headlen), xdp->data,
+ ALIGN(headlen, sizeof(long)));
/* update all of the pointers */
size -= headlen;
@@ -1841,16 +1872,16 @@
* i40e_build_skb - Build skb around an existing buffer
* @rx_ring: Rx descriptor ring to transact packets on
* @rx_buffer: Rx buffer to pull data from
- * @size: size of buffer to add to skb
+ * @xdp: xdp_buff pointing to the data
*
* This function builds an skb around an existing Rx buffer, taking care
* to set up the skb correctly and avoid any memcpy overhead.
*/
static struct sk_buff *i40e_build_skb(struct i40e_ring *rx_ring,
struct i40e_rx_buffer *rx_buffer,
- unsigned int size)
+ struct xdp_buff *xdp)
{
- void *va = page_address(rx_buffer->page) + rx_buffer->page_offset;
+ unsigned int size = xdp->data_end - xdp->data;
#if (PAGE_SIZE < 8192)
unsigned int truesize = i40e_rx_pg_size(rx_ring) / 2;
#else
@@ -1860,12 +1891,12 @@
struct sk_buff *skb;
/* prefetch first cache line of first page */
- prefetch(va);
+ prefetch(xdp->data);
#if L1_CACHE_BYTES < 128
- prefetch(va + L1_CACHE_BYTES);
+ prefetch(xdp->data + L1_CACHE_BYTES);
#endif
/* build an skb around the page buffer */
- skb = build_skb(va - I40E_SKB_PAD, truesize);
+ skb = build_skb(xdp->data_hard_start, truesize);
if (unlikely(!skb))
return NULL;
@@ -1944,6 +1975,75 @@
return true;
}
+#define I40E_XDP_PASS 0
+#define I40E_XDP_CONSUMED 1
+#define I40E_XDP_TX 2
+
+static int i40e_xmit_xdp_ring(struct xdp_buff *xdp,
+ struct i40e_ring *xdp_ring);
+
+/**
+ * i40e_run_xdp - run an XDP program
+ * @rx_ring: Rx ring being processed
+ * @xdp: XDP buffer containing the frame
+ **/
+static struct sk_buff *i40e_run_xdp(struct i40e_ring *rx_ring,
+ struct xdp_buff *xdp)
+{
+ int result = I40E_XDP_PASS;
+ struct i40e_ring *xdp_ring;
+ struct bpf_prog *xdp_prog;
+ u32 act;
+
+ rcu_read_lock();
+ xdp_prog = READ_ONCE(rx_ring->xdp_prog);
+
+ if (!xdp_prog)
+ goto xdp_out;
+
+ act = bpf_prog_run_xdp(xdp_prog, xdp);
+ switch (act) {
+ case XDP_PASS:
+ break;
+ case XDP_TX:
+ xdp_ring = rx_ring->vsi->xdp_rings[rx_ring->queue_index];
+ result = i40e_xmit_xdp_ring(xdp, xdp_ring);
+ break;
+ default:
+ bpf_warn_invalid_xdp_action(act);
+ case XDP_ABORTED:
+ trace_xdp_exception(rx_ring->netdev, xdp_prog, act);
+ /* fallthrough -- handle aborts by dropping packet */
+ case XDP_DROP:
+ result = I40E_XDP_CONSUMED;
+ break;
+ }
+xdp_out:
+ rcu_read_unlock();
+ return ERR_PTR(-result);
+}
+
+/**
+ * i40e_rx_buffer_flip - adjusted rx_buffer to point to an unused region
+ * @rx_ring: Rx ring
+ * @rx_buffer: Rx buffer to adjust
+ * @size: Size of adjustment
+ **/
+static void i40e_rx_buffer_flip(struct i40e_ring *rx_ring,
+ struct i40e_rx_buffer *rx_buffer,
+ unsigned int size)
+{
+#if (PAGE_SIZE < 8192)
+ unsigned int truesize = i40e_rx_pg_size(rx_ring) / 2;
+
+ rx_buffer->page_offset ^= truesize;
+#else
+ unsigned int truesize = SKB_DATA_ALIGN(i40e_rx_offset(rx_ring) + size);
+
+ rx_buffer->page_offset += truesize;
+#endif
+}
+
/**
* i40e_clean_rx_irq - Clean completed descriptors from Rx ring - bounce buf
* @rx_ring: rx descriptor ring to transact packets on
@@ -1961,11 +2061,12 @@
unsigned int total_rx_bytes = 0, total_rx_packets = 0;
struct sk_buff *skb = rx_ring->skb;
u16 cleaned_count = I40E_DESC_UNUSED(rx_ring);
- bool failure = false;
+ bool failure = false, xdp_xmit = false;
while (likely(total_rx_packets < budget)) {
struct i40e_rx_buffer *rx_buffer;
union i40e_rx_desc *rx_desc;
+ struct xdp_buff xdp;
unsigned int size;
u16 vlan_tag;
u8 rx_ptype;
@@ -2006,12 +2107,32 @@
rx_buffer = i40e_get_rx_buffer(rx_ring, size);
/* retrieve a buffer from the ring */
- if (skb)
+ if (!skb) {
+ xdp.data = page_address(rx_buffer->page) +
+ rx_buffer->page_offset;
+ xdp.data_hard_start = xdp.data -
+ i40e_rx_offset(rx_ring);
+ xdp.data_end = xdp.data + size;
+
+ skb = i40e_run_xdp(rx_ring, &xdp);
+ }
+
+ if (IS_ERR(skb)) {
+ if (PTR_ERR(skb) == -I40E_XDP_TX) {
+ xdp_xmit = true;
+ i40e_rx_buffer_flip(rx_ring, rx_buffer, size);
+ } else {
+ rx_buffer->pagecnt_bias++;
+ }
+ total_rx_bytes += size;
+ total_rx_packets++;
+ } else if (skb) {
i40e_add_rx_frag(rx_ring, rx_buffer, skb, size);
- else if (ring_uses_build_skb(rx_ring))
- skb = i40e_build_skb(rx_ring, rx_buffer, size);
- else
- skb = i40e_construct_skb(rx_ring, rx_buffer, size);
+ } else if (ring_uses_build_skb(rx_ring)) {
+ skb = i40e_build_skb(rx_ring, rx_buffer, &xdp);
+ } else {
+ skb = i40e_construct_skb(rx_ring, rx_buffer, &xdp);
+ }
/* exit if we failed to retrieve a buffer */
if (!skb) {
@@ -2026,18 +2147,7 @@
if (i40e_is_non_eop(rx_ring, rx_desc, skb))
continue;
- /* ERR_MASK will only have valid bits if EOP set, and
- * what we are doing here is actually checking
- * I40E_RX_DESC_ERROR_RXE_SHIFT, since it is the zeroth bit in
- * the error field
- */
- if (unlikely(i40e_test_staterr(rx_desc, BIT(I40E_RXD_QW1_ERROR_SHIFT)))) {
- dev_kfree_skb_any(skb);
- skb = NULL;
- continue;
- }
-
- if (i40e_cleanup_headers(rx_ring, skb)) {
+ if (i40e_cleanup_headers(rx_ring, skb, rx_desc)) {
skb = NULL;
continue;
}
@@ -2063,6 +2173,19 @@
total_rx_packets++;
}
+ if (xdp_xmit) {
+ struct i40e_ring *xdp_ring;
+
+ xdp_ring = rx_ring->vsi->xdp_rings[rx_ring->queue_index];
+
+ /* Force memory writes to complete before letting h/w
+ * know there are new descriptors to fetch.
+ */
+ wmb();
+
+ writel(xdp_ring->next_to_use, xdp_ring->tail);
+ }
+
rx_ring->skb = skb;
u64_stats_update_begin(&rx_ring->syncp);
@@ -2629,8 +2752,10 @@
if (pf->ptp_tx &&
!test_and_set_bit_lock(__I40E_PTP_TX_IN_PROGRESS, pf->state)) {
skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
+ pf->ptp_tx_start = jiffies;
pf->ptp_tx_skb = skb_get(skb);
} else {
+ pf->tx_hwtstamp_skipped++;
return 0;
}
@@ -2933,10 +3058,12 @@
* @hdr_len: size of the packet header
* @td_cmd: the command field in the descriptor
* @td_offset: offset for checksum or crc
+ *
+ * Returns 0 on success, -1 on failure to DMA
**/
-static inline void i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
- struct i40e_tx_buffer *first, u32 tx_flags,
- const u8 hdr_len, u32 td_cmd, u32 td_offset)
+static inline int i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
+ struct i40e_tx_buffer *first, u32 tx_flags,
+ const u8 hdr_len, u32 td_cmd, u32 td_offset)
{
unsigned int data_len = skb->data_len;
unsigned int size = skb_headlen(skb);
@@ -3094,7 +3221,7 @@
mmiowb();
}
- return;
+ return 0;
dma_error:
dev_info(tx_ring->dev, "TX DMA map failed\n");
@@ -3111,6 +3238,61 @@
}
tx_ring->next_to_use = i;
+
+ return -1;
+}
+
+/**
+ * i40e_xmit_xdp_ring - transmits an XDP buffer to an XDP Tx ring
+ * @xdp: data to transmit
+ * @xdp_ring: XDP Tx ring
+ **/
+static int i40e_xmit_xdp_ring(struct xdp_buff *xdp,
+ struct i40e_ring *xdp_ring)
+{
+ u32 size = xdp->data_end - xdp->data;
+ u16 i = xdp_ring->next_to_use;
+ struct i40e_tx_buffer *tx_bi;
+ struct i40e_tx_desc *tx_desc;
+ dma_addr_t dma;
+
+ if (!unlikely(I40E_DESC_UNUSED(xdp_ring))) {
+ xdp_ring->tx_stats.tx_busy++;
+ return I40E_XDP_CONSUMED;
+ }
+
+ dma = dma_map_single(xdp_ring->dev, xdp->data, size, DMA_TO_DEVICE);
+ if (dma_mapping_error(xdp_ring->dev, dma))
+ return I40E_XDP_CONSUMED;
+
+ tx_bi = &xdp_ring->tx_bi[i];
+ tx_bi->bytecount = size;
+ tx_bi->gso_segs = 1;
+ tx_bi->raw_buf = xdp->data;
+
+ /* record length, and DMA address */
+ dma_unmap_len_set(tx_bi, len, size);
+ dma_unmap_addr_set(tx_bi, dma, dma);
+
+ tx_desc = I40E_TX_DESC(xdp_ring, i);
+ tx_desc->buffer_addr = cpu_to_le64(dma);
+ tx_desc->cmd_type_offset_bsz = build_ctob(I40E_TX_DESC_CMD_ICRC
+ | I40E_TXD_CMD,
+ 0, size, 0);
+
+ /* Make certain all of the status bits have been updated
+ * before next_to_watch is written.
+ */
+ smp_wmb();
+
+ i++;
+ if (i == xdp_ring->count)
+ i = 0;
+
+ tx_bi->next_to_watch = tx_desc;
+ xdp_ring->next_to_use = i;
+
+ return I40E_XDP_TX;
}
/**
@@ -3211,8 +3393,9 @@
*/
i40e_atr(tx_ring, skb, tx_flags);
- i40e_tx_map(tx_ring, skb, first, tx_flags, hdr_len,
- td_cmd, td_offset);
+ if (i40e_tx_map(tx_ring, skb, first, tx_flags, hdr_len,
+ td_cmd, td_offset))
+ goto cleanup_tx_tstamp;
return NETDEV_TX_OK;
@@ -3220,6 +3403,15 @@
i40e_trace(xmit_frame_ring_drop, first->skb, tx_ring);
dev_kfree_skb_any(first->skb);
first->skb = NULL;
+cleanup_tx_tstamp:
+ if (unlikely(tx_flags & I40E_TX_FLAGS_TSYN)) {
+ struct i40e_pf *pf = i40e_netdev_to_pf(tx_ring->netdev);
+
+ dev_kfree_skb_any(pf->ptp_tx_skb);
+ pf->ptp_tx_skb = NULL;
+ clear_bit_unlock(__I40E_PTP_TX_IN_PROGRESS, pf->state);
+ }
+
return NETDEV_TX_OK;
}
diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.h b/drivers/net/ethernet/intel/i40e/i40e_txrx.h
index f5de511..b288d58 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_txrx.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.h
@@ -360,6 +360,7 @@
void *desc; /* Descriptor ring memory */
struct device *dev; /* Used for DMA mapping */
struct net_device *netdev; /* netdev ring maps to */
+ struct bpf_prog *xdp_prog;
union {
struct i40e_tx_buffer *tx_bi;
struct i40e_rx_buffer *rx_bi;
@@ -395,6 +396,7 @@
u16 flags;
#define I40E_TXR_FLAGS_WB_ON_ITR BIT(0)
#define I40E_RXR_FLAGS_BUILD_SKB_ENABLED BIT(1)
+#define I40E_TXR_FLAGS_XDP BIT(2)
/* stats structs */
struct i40e_queue_stats stats;
@@ -437,6 +439,16 @@
ring->flags &= ~I40E_RXR_FLAGS_BUILD_SKB_ENABLED;
}
+static inline bool ring_is_xdp(struct i40e_ring *ring)
+{
+ return !!(ring->flags & I40E_TXR_FLAGS_XDP);
+}
+
+static inline void set_ring_xdp(struct i40e_ring *ring)
+{
+ ring->flags |= I40E_TXR_FLAGS_XDP;
+}
+
enum i40e_latency_range {
I40E_LOWEST_LATENCY = 0,
I40E_LOW_LATENCY = 1,
diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl.h b/drivers/net/ethernet/intel/i40e/i40e_virtchnl.h
deleted file mode 100644
index 8552192..0000000
--- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl.h
+++ /dev/null
@@ -1,449 +0,0 @@
-/*******************************************************************************
- *
- * Intel Ethernet Controller XL710 Family Linux Driver
- * Copyright(c) 2013 - 2014 Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program. If not, see <http://www.gnu.org/licenses/>.
- *
- * The full GNU General Public License is included in this distribution in
- * the file called "COPYING".
- *
- * Contact Information:
- * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *
- ******************************************************************************/
-
-#ifndef _I40E_VIRTCHNL_H_
-#define _I40E_VIRTCHNL_H_
-
-#include "i40e_type.h"
-
-/* Description:
- * This header file describes the VF-PF communication protocol used
- * by the various i40e drivers.
- *
- * Admin queue buffer usage:
- * desc->opcode is always i40e_aqc_opc_send_msg_to_pf
- * flags, retval, datalen, and data addr are all used normally.
- * Firmware copies the cookie fields when sending messages between the PF and
- * VF, but uses all other fields internally. Due to this limitation, we
- * must send all messages as "indirect", i.e. using an external buffer.
- *
- * All the vsi indexes are relative to the VF. Each VF can have maximum of
- * three VSIs. All the queue indexes are relative to the VSI. Each VF can
- * have a maximum of sixteen queues for all of its VSIs.
- *
- * The PF is required to return a status code in v_retval for all messages
- * except RESET_VF, which does not require any response. The return value is of
- * i40e_status_code type, defined in the i40e_type.h.
- *
- * In general, VF driver initialization should roughly follow the order of these
- * opcodes. The VF driver must first validate the API version of the PF driver,
- * then request a reset, then get resources, then configure queues and
- * interrupts. After these operations are complete, the VF driver may start
- * its queues, optionally add MAC and VLAN filters, and process traffic.
- */
-
-/* Opcodes for VF-PF communication. These are placed in the v_opcode field
- * of the virtchnl_msg structure.
- */
-enum i40e_virtchnl_ops {
-/* The PF sends status change events to VFs using
- * the I40E_VIRTCHNL_OP_EVENT opcode.
- * VFs send requests to the PF using the other ops.
- */
- I40E_VIRTCHNL_OP_UNKNOWN = 0,
- I40E_VIRTCHNL_OP_VERSION = 1, /* must ALWAYS be 1 */
- I40E_VIRTCHNL_OP_RESET_VF = 2,
- I40E_VIRTCHNL_OP_GET_VF_RESOURCES = 3,
- I40E_VIRTCHNL_OP_CONFIG_TX_QUEUE = 4,
- I40E_VIRTCHNL_OP_CONFIG_RX_QUEUE = 5,
- I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES = 6,
- I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP = 7,
- I40E_VIRTCHNL_OP_ENABLE_QUEUES = 8,
- I40E_VIRTCHNL_OP_DISABLE_QUEUES = 9,
- I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS = 10,
- I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS = 11,
- I40E_VIRTCHNL_OP_ADD_VLAN = 12,
- I40E_VIRTCHNL_OP_DEL_VLAN = 13,
- I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE = 14,
- I40E_VIRTCHNL_OP_GET_STATS = 15,
- I40E_VIRTCHNL_OP_FCOE = 16,
- I40E_VIRTCHNL_OP_EVENT = 17, /* must ALWAYS be 17 */
- I40E_VIRTCHNL_OP_IWARP = 20,
- I40E_VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP = 21,
- I40E_VIRTCHNL_OP_RELEASE_IWARP_IRQ_MAP = 22,
- I40E_VIRTCHNL_OP_CONFIG_RSS_KEY = 23,
- I40E_VIRTCHNL_OP_CONFIG_RSS_LUT = 24,
- I40E_VIRTCHNL_OP_GET_RSS_HENA_CAPS = 25,
- I40E_VIRTCHNL_OP_SET_RSS_HENA = 26,
-
-};
-
-/* Virtual channel message descriptor. This overlays the admin queue
- * descriptor. All other data is passed in external buffers.
- */
-
-struct i40e_virtchnl_msg {
- u8 pad[8]; /* AQ flags/opcode/len/retval fields */
- enum i40e_virtchnl_ops v_opcode; /* avoid confusion with desc->opcode */
- i40e_status v_retval; /* ditto for desc->retval */
- u32 vfid; /* used by PF when sending to VF */
-};
-
-/* Message descriptions and data structures.*/
-
-/* I40E_VIRTCHNL_OP_VERSION
- * VF posts its version number to the PF. PF responds with its version number
- * in the same format, along with a return code.
- * Reply from PF has its major/minor versions also in param0 and param1.
- * If there is a major version mismatch, then the VF cannot operate.
- * If there is a minor version mismatch, then the VF can operate but should
- * add a warning to the system log.
- *
- * This enum element MUST always be specified as == 1, regardless of other
- * changes in the API. The PF must always respond to this message without
- * error regardless of version mismatch.
- */
-#define I40E_VIRTCHNL_VERSION_MAJOR 1
-#define I40E_VIRTCHNL_VERSION_MINOR 1
-#define I40E_VIRTCHNL_VERSION_MINOR_NO_VF_CAPS 0
-
-struct i40e_virtchnl_version_info {
- u32 major;
- u32 minor;
-};
-
-/* I40E_VIRTCHNL_OP_RESET_VF
- * VF sends this request to PF with no parameters
- * PF does NOT respond! VF driver must delay then poll VFGEN_RSTAT register
- * until reset completion is indicated. The admin queue must be reinitialized
- * after this operation.
- *
- * When reset is complete, PF must ensure that all queues in all VSIs associated
- * with the VF are stopped, all queue configurations in the HMC are set to 0,
- * and all MAC and VLAN filters (except the default MAC address) on all VSIs
- * are cleared.
- */
-
-/* I40E_VIRTCHNL_OP_GET_VF_RESOURCES
- * Version 1.0 VF sends this request to PF with no parameters
- * Version 1.1 VF sends this request to PF with u32 bitmap of its capabilities
- * PF responds with an indirect message containing
- * i40e_virtchnl_vf_resource and one or more
- * i40e_virtchnl_vsi_resource structures.
- */
-
-struct i40e_virtchnl_vsi_resource {
- u16 vsi_id;
- u16 num_queue_pairs;
- enum i40e_vsi_type vsi_type;
- u16 qset_handle;
- u8 default_mac_addr[ETH_ALEN];
-};
-/* VF offload flags */
-#define I40E_VIRTCHNL_VF_OFFLOAD_L2 0x00000001
-#define I40E_VIRTCHNL_VF_OFFLOAD_IWARP 0x00000002
-#define I40E_VIRTCHNL_VF_OFFLOAD_FCOE 0x00000004
-#define I40E_VIRTCHNL_VF_OFFLOAD_RSS_AQ 0x00000008
-#define I40E_VIRTCHNL_VF_OFFLOAD_RSS_REG 0x00000010
-#define I40E_VIRTCHNL_VF_OFFLOAD_WB_ON_ITR 0x00000020
-#define I40E_VIRTCHNL_VF_OFFLOAD_VLAN 0x00010000
-#define I40E_VIRTCHNL_VF_OFFLOAD_RX_POLLING 0x00020000
-#define I40E_VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2 0x00040000
-#define I40E_VIRTCHNL_VF_OFFLOAD_RSS_PF 0X00080000
-#define I40E_VIRTCHNL_VF_OFFLOAD_ENCAP 0X00100000
-#define I40E_VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM 0X00200000
-
-#define I40E_VF_BASE_MODE_OFFLOADS (I40E_VIRTCHNL_VF_OFFLOAD_L2 | \
- I40E_VIRTCHNL_VF_OFFLOAD_VLAN | \
- I40E_VIRTCHNL_VF_OFFLOAD_RSS_PF)
-
-struct i40e_virtchnl_vf_resource {
- u16 num_vsis;
- u16 num_queue_pairs;
- u16 max_vectors;
- u16 max_mtu;
-
- u32 vf_offload_flags;
- u32 rss_key_size;
- u32 rss_lut_size;
-
- struct i40e_virtchnl_vsi_resource vsi_res[1];
-};
-
-/* I40E_VIRTCHNL_OP_CONFIG_TX_QUEUE
- * VF sends this message to set up parameters for one TX queue.
- * External data buffer contains one instance of i40e_virtchnl_txq_info.
- * PF configures requested queue and returns a status code.
- */
-
-/* Tx queue config info */
-struct i40e_virtchnl_txq_info {
- u16 vsi_id;
- u16 queue_id;
- u16 ring_len; /* number of descriptors, multiple of 8 */
- u16 headwb_enabled;
- u64 dma_ring_addr;
- u64 dma_headwb_addr;
-};
-
-/* I40E_VIRTCHNL_OP_CONFIG_RX_QUEUE
- * VF sends this message to set up parameters for one RX queue.
- * External data buffer contains one instance of i40e_virtchnl_rxq_info.
- * PF configures requested queue and returns a status code.
- */
-
-/* Rx queue config info */
-struct i40e_virtchnl_rxq_info {
- u16 vsi_id;
- u16 queue_id;
- u32 ring_len; /* number of descriptors, multiple of 32 */
- u16 hdr_size;
- u16 splithdr_enabled;
- u32 databuffer_size;
- u32 max_pkt_size;
- u64 dma_ring_addr;
- enum i40e_hmc_obj_rx_hsplit_0 rx_split_pos;
-};
-
-/* I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES
- * VF sends this message to set parameters for all active TX and RX queues
- * associated with the specified VSI.
- * PF configures queues and returns status.
- * If the number of queues specified is greater than the number of queues
- * associated with the VSI, an error is returned and no queues are configured.
- */
-struct i40e_virtchnl_queue_pair_info {
- /* NOTE: vsi_id and queue_id should be identical for both queues. */
- struct i40e_virtchnl_txq_info txq;
- struct i40e_virtchnl_rxq_info rxq;
-};
-
-struct i40e_virtchnl_vsi_queue_config_info {
- u16 vsi_id;
- u16 num_queue_pairs;
- struct i40e_virtchnl_queue_pair_info qpair[1];
-};
-
-/* I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP
- * VF uses this message to map vectors to queues.
- * The rxq_map and txq_map fields are bitmaps used to indicate which queues
- * are to be associated with the specified vector.
- * The "other" causes are always mapped to vector 0.
- * PF configures interrupt mapping and returns status.
- */
-struct i40e_virtchnl_vector_map {
- u16 vsi_id;
- u16 vector_id;
- u16 rxq_map;
- u16 txq_map;
- u16 rxitr_idx;
- u16 txitr_idx;
-};
-
-struct i40e_virtchnl_irq_map_info {
- u16 num_vectors;
- struct i40e_virtchnl_vector_map vecmap[1];
-};
-
-/* I40E_VIRTCHNL_OP_ENABLE_QUEUES
- * I40E_VIRTCHNL_OP_DISABLE_QUEUES
- * VF sends these message to enable or disable TX/RX queue pairs.
- * The queues fields are bitmaps indicating which queues to act upon.
- * (Currently, we only support 16 queues per VF, but we make the field
- * u32 to allow for expansion.)
- * PF performs requested action and returns status.
- */
-struct i40e_virtchnl_queue_select {
- u16 vsi_id;
- u16 pad;
- u32 rx_queues;
- u32 tx_queues;
-};
-
-/* I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS
- * VF sends this message in order to add one or more unicast or multicast
- * address filters for the specified VSI.
- * PF adds the filters and returns status.
- */
-
-/* I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS
- * VF sends this message in order to remove one or more unicast or multicast
- * filters for the specified VSI.
- * PF removes the filters and returns status.
- */
-
-struct i40e_virtchnl_ether_addr {
- u8 addr[ETH_ALEN];
- u8 pad[2];
-};
-
-struct i40e_virtchnl_ether_addr_list {
- u16 vsi_id;
- u16 num_elements;
- struct i40e_virtchnl_ether_addr list[1];
-};
-
-/* I40E_VIRTCHNL_OP_ADD_VLAN
- * VF sends this message to add one or more VLAN tag filters for receives.
- * PF adds the filters and returns status.
- * If a port VLAN is configured by the PF, this operation will return an
- * error to the VF.
- */
-
-/* I40E_VIRTCHNL_OP_DEL_VLAN
- * VF sends this message to remove one or more VLAN tag filters for receives.
- * PF removes the filters and returns status.
- * If a port VLAN is configured by the PF, this operation will return an
- * error to the VF.
- */
-
-struct i40e_virtchnl_vlan_filter_list {
- u16 vsi_id;
- u16 num_elements;
- u16 vlan_id[1];
-};
-
-/* I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE
- * VF sends VSI id and flags.
- * PF returns status code in retval.
- * Note: we assume that broadcast accept mode is always enabled.
- */
-struct i40e_virtchnl_promisc_info {
- u16 vsi_id;
- u16 flags;
-};
-
-#define I40E_FLAG_VF_UNICAST_PROMISC 0x00000001
-#define I40E_FLAG_VF_MULTICAST_PROMISC 0x00000002
-
-/* I40E_VIRTCHNL_OP_GET_STATS
- * VF sends this message to request stats for the selected VSI. VF uses
- * the i40e_virtchnl_queue_select struct to specify the VSI. The queue_id
- * field is ignored by the PF.
- *
- * PF replies with struct i40e_eth_stats in an external buffer.
- */
-
-/* I40E_VIRTCHNL_OP_CONFIG_RSS_KEY
- * I40E_VIRTCHNL_OP_CONFIG_RSS_LUT
- * VF sends these messages to configure RSS. Only supported if both PF
- * and VF drivers set the I40E_VIRTCHNL_VF_OFFLOAD_RSS_PF bit during
- * configuration negotiation. If this is the case, then the RSS fields in
- * the VF resource struct are valid.
- * Both the key and LUT are initialized to 0 by the PF, meaning that
- * RSS is effectively disabled until set up by the VF.
- */
-struct i40e_virtchnl_rss_key {
- u16 vsi_id;
- u16 key_len;
- u8 key[1]; /* RSS hash key, packed bytes */
-};
-
-struct i40e_virtchnl_rss_lut {
- u16 vsi_id;
- u16 lut_entries;
- u8 lut[1]; /* RSS lookup table*/
-};
-
-/* I40E_VIRTCHNL_OP_GET_RSS_HENA_CAPS
- * I40E_VIRTCHNL_OP_SET_RSS_HENA
- * VF sends these messages to get and set the hash filter enable bits for RSS.
- * By default, the PF sets these to all possible traffic types that the
- * hardware supports. The VF can query this value if it wants to change the
- * traffic types that are hashed by the hardware.
- * Traffic types are defined in the i40e_filter_pctype enum in i40e_type.h
- */
-struct i40e_virtchnl_rss_hena {
- u64 hena;
-};
-
-/* I40E_VIRTCHNL_OP_EVENT
- * PF sends this message to inform the VF driver of events that may affect it.
- * No direct response is expected from the VF, though it may generate other
- * messages in response to this one.
- */
-enum i40e_virtchnl_event_codes {
- I40E_VIRTCHNL_EVENT_UNKNOWN = 0,
- I40E_VIRTCHNL_EVENT_LINK_CHANGE,
- I40E_VIRTCHNL_EVENT_RESET_IMPENDING,
- I40E_VIRTCHNL_EVENT_PF_DRIVER_CLOSE,
-};
-#define I40E_PF_EVENT_SEVERITY_INFO 0
-#define I40E_PF_EVENT_SEVERITY_CERTAIN_DOOM 255
-
-struct i40e_virtchnl_pf_event {
- enum i40e_virtchnl_event_codes event;
- union {
- struct {
- enum i40e_aq_link_speed link_speed;
- bool link_status;
- } link_event;
- } event_data;
-
- int severity;
-};
-
-/* I40E_VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP
- * VF uses this message to request PF to map IWARP vectors to IWARP queues.
- * The request for this originates from the VF IWARP driver through
- * a client interface between VF LAN and VF IWARP driver.
- * A vector could have an AEQ and CEQ attached to it although
- * there is a single AEQ per VF IWARP instance in which case
- * most vectors will have an INVALID_IDX for aeq and valid idx for ceq.
- * There will never be a case where there will be multiple CEQs attached
- * to a single vector.
- * PF configures interrupt mapping and returns status.
- */
-
-/* HW does not define a type value for AEQ; only for RX/TX and CEQ.
- * In order for us to keep the interface simple, SW will define a
- * unique type value for AEQ.
-*/
-#define I40E_QUEUE_TYPE_PE_AEQ 0x80
-#define I40E_QUEUE_INVALID_IDX 0xFFFF
-
-struct i40e_virtchnl_iwarp_qv_info {
- u32 v_idx; /* msix_vector */
- u16 ceq_idx;
- u16 aeq_idx;
- u8 itr_idx;
-};
-
-struct i40e_virtchnl_iwarp_qvlist_info {
- u32 num_vectors;
- struct i40e_virtchnl_iwarp_qv_info qv_info[1];
-};
-
-/* VF reset states - these are written into the RSTAT register:
- * I40E_VFGEN_RSTAT1 on the PF
- * I40E_VFGEN_RSTAT on the VF
- * When the PF initiates a reset, it writes 0
- * When the reset is complete, it writes 1
- * When the PF detects that the VF has recovered, it writes 2
- * VF checks this register periodically to determine if a reset has occurred,
- * then polls it to know when the reset is complete.
- * If either the PF or VF reads the register while the hardware
- * is in a reset state, it will return DEADBEEF, which, when masked
- * will result in 3.
- */
-enum i40e_vfr_states {
- I40E_VFR_INPROGRESS = 0,
- I40E_VFR_COMPLETED,
- I40E_VFR_VFACTIVE,
- I40E_VFR_UNKNOWN,
-};
-
-#endif /* _I40E_VIRTCHNL_H_ */
diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
index 0fb38ca..ecbe40e 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
@@ -39,7 +39,7 @@
* send a message to all VFs on a given PF
**/
static void i40e_vc_vf_broadcast(struct i40e_pf *pf,
- enum i40e_virtchnl_ops v_opcode,
+ enum virtchnl_ops v_opcode,
i40e_status v_retval, u8 *msg,
u16 msglen)
{
@@ -70,14 +70,14 @@
**/
static void i40e_vc_notify_vf_link_state(struct i40e_vf *vf)
{
- struct i40e_virtchnl_pf_event pfe;
+ struct virtchnl_pf_event pfe;
struct i40e_pf *pf = vf->pf;
struct i40e_hw *hw = &pf->hw;
struct i40e_link_status *ls = &pf->hw.phy.link_info;
int abs_vf_id = vf->vf_id + (int)hw->func_caps.vf_base_id;
- pfe.event = I40E_VIRTCHNL_EVENT_LINK_CHANGE;
- pfe.severity = I40E_PF_EVENT_SEVERITY_INFO;
+ pfe.event = VIRTCHNL_EVENT_LINK_CHANGE;
+ pfe.severity = PF_EVENT_SEVERITY_INFO;
if (vf->link_forced) {
pfe.event_data.link_event.link_status = vf->link_up;
pfe.event_data.link_event.link_speed =
@@ -85,9 +85,10 @@
} else {
pfe.event_data.link_event.link_status =
ls->link_info & I40E_AQ_LINK_UP;
- pfe.event_data.link_event.link_speed = ls->link_speed;
+ pfe.event_data.link_event.link_speed =
+ (enum virtchnl_link_speed)ls->link_speed;
}
- i40e_aq_send_msg_to_vf(hw, abs_vf_id, I40E_VIRTCHNL_OP_EVENT,
+ i40e_aq_send_msg_to_vf(hw, abs_vf_id, VIRTCHNL_OP_EVENT,
0, (u8 *)&pfe, sizeof(pfe), NULL);
}
@@ -113,12 +114,12 @@
**/
void i40e_vc_notify_reset(struct i40e_pf *pf)
{
- struct i40e_virtchnl_pf_event pfe;
+ struct virtchnl_pf_event pfe;
- pfe.event = I40E_VIRTCHNL_EVENT_RESET_IMPENDING;
- pfe.severity = I40E_PF_EVENT_SEVERITY_CERTAIN_DOOM;
- i40e_vc_vf_broadcast(pf, I40E_VIRTCHNL_OP_EVENT, 0,
- (u8 *)&pfe, sizeof(struct i40e_virtchnl_pf_event));
+ pfe.event = VIRTCHNL_EVENT_RESET_IMPENDING;
+ pfe.severity = PF_EVENT_SEVERITY_CERTAIN_DOOM;
+ i40e_vc_vf_broadcast(pf, VIRTCHNL_OP_EVENT, 0,
+ (u8 *)&pfe, sizeof(struct virtchnl_pf_event));
}
/**
@@ -129,7 +130,7 @@
**/
void i40e_vc_notify_vf_reset(struct i40e_vf *vf)
{
- struct i40e_virtchnl_pf_event pfe;
+ struct virtchnl_pf_event pfe;
int abs_vf_id;
/* validate the request */
@@ -143,11 +144,11 @@
abs_vf_id = vf->vf_id + (int)vf->pf->hw.func_caps.vf_base_id;
- pfe.event = I40E_VIRTCHNL_EVENT_RESET_IMPENDING;
- pfe.severity = I40E_PF_EVENT_SEVERITY_CERTAIN_DOOM;
- i40e_aq_send_msg_to_vf(&vf->pf->hw, abs_vf_id, I40E_VIRTCHNL_OP_EVENT,
+ pfe.event = VIRTCHNL_EVENT_RESET_IMPENDING;
+ pfe.severity = PF_EVENT_SEVERITY_CERTAIN_DOOM;
+ i40e_aq_send_msg_to_vf(&vf->pf->hw, abs_vf_id, VIRTCHNL_OP_EVENT,
0, (u8 *)&pfe,
- sizeof(struct i40e_virtchnl_pf_event), NULL);
+ sizeof(struct virtchnl_pf_event), NULL);
}
/***********************misc routines*****************************/
@@ -250,7 +251,7 @@
* configure irq link list from the map
**/
static void i40e_config_irq_link_list(struct i40e_vf *vf, u16 vsi_id,
- struct i40e_virtchnl_vector_map *vecmap)
+ struct virtchnl_vector_map *vecmap)
{
unsigned long linklistmap = 0, tempmap;
struct i40e_pf *pf = vf->pf;
@@ -338,7 +339,7 @@
/* if the vf is running in polling mode and using interrupt zero,
* need to disable auto-mask on enabling zero interrupt for VFs.
*/
- if ((vf->driver_caps & I40E_VIRTCHNL_VF_OFFLOAD_RX_POLLING) &&
+ if ((vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RX_POLLING) &&
(vector_id == 0)) {
reg = rd32(hw, I40E_GLINT_CTL);
if (!(reg & I40E_GLINT_CTL_DIS_AUTOMASK_VF0_MASK)) {
@@ -359,7 +360,7 @@
static void i40e_release_iwarp_qvlist(struct i40e_vf *vf)
{
struct i40e_pf *pf = vf->pf;
- struct i40e_virtchnl_iwarp_qvlist_info *qvlist_info = vf->qvlist_info;
+ struct virtchnl_iwarp_qvlist_info *qvlist_info = vf->qvlist_info;
u32 msix_vf;
u32 i;
@@ -368,7 +369,7 @@
msix_vf = pf->hw.func_caps.num_msix_vectors_vf;
for (i = 0; i < qvlist_info->num_vectors; i++) {
- struct i40e_virtchnl_iwarp_qv_info *qv_info;
+ struct virtchnl_iwarp_qv_info *qv_info;
u32 next_q_index, next_q_type;
struct i40e_hw *hw = &pf->hw;
u32 v_idx, reg_idx, reg;
@@ -409,17 +410,17 @@
* Return 0 on success or < 0 on error
**/
static int i40e_config_iwarp_qvlist(struct i40e_vf *vf,
- struct i40e_virtchnl_iwarp_qvlist_info *qvlist_info)
+ struct virtchnl_iwarp_qvlist_info *qvlist_info)
{
struct i40e_pf *pf = vf->pf;
struct i40e_hw *hw = &pf->hw;
- struct i40e_virtchnl_iwarp_qv_info *qv_info;
+ struct virtchnl_iwarp_qv_info *qv_info;
u32 v_idx, i, reg_idx, reg;
u32 next_q_idx, next_q_type;
u32 msix_vf, size;
- size = sizeof(struct i40e_virtchnl_iwarp_qvlist_info) +
- (sizeof(struct i40e_virtchnl_iwarp_qv_info) *
+ size = sizeof(struct virtchnl_iwarp_qvlist_info) +
+ (sizeof(struct virtchnl_iwarp_qv_info) *
(qvlist_info->num_vectors - 1));
vf->qvlist_info = kzalloc(size, GFP_KERNEL);
vf->qvlist_info->num_vectors = qvlist_info->num_vectors;
@@ -492,7 +493,7 @@
**/
static int i40e_config_vsi_tx_queue(struct i40e_vf *vf, u16 vsi_id,
u16 vsi_queue_id,
- struct i40e_virtchnl_txq_info *info)
+ struct virtchnl_txq_info *info)
{
struct i40e_pf *pf = vf->pf;
struct i40e_hw *hw = &pf->hw;
@@ -569,7 +570,7 @@
**/
static int i40e_config_vsi_rx_queue(struct i40e_vf *vf, u16 vsi_id,
u16 vsi_queue_id,
- struct i40e_virtchnl_rxq_info *info)
+ struct virtchnl_rxq_info *info)
{
struct i40e_pf *pf = vf->pf;
struct i40e_hw *hw = &pf->hw;
@@ -1017,7 +1018,7 @@
* after VF has been fully initialized, because the VF driver may
* request resources immediately after setting this flag.
*/
- wr32(hw, I40E_VFGEN_RSTAT1(vf->vf_id), I40E_VFR_VFACTIVE);
+ wr32(hw, I40E_VFGEN_RSTAT1(vf->vf_id), VIRTCHNL_VFR_VFACTIVE);
}
/**
@@ -1461,7 +1462,7 @@
* send resp msg to VF
**/
static int i40e_vc_send_resp_to_vf(struct i40e_vf *vf,
- enum i40e_virtchnl_ops opcode,
+ enum virtchnl_ops opcode,
i40e_status retval)
{
return i40e_vc_send_msg_to_vf(vf, opcode, retval, NULL, 0);
@@ -1475,18 +1476,17 @@
**/
static int i40e_vc_get_version_msg(struct i40e_vf *vf, u8 *msg)
{
- struct i40e_virtchnl_version_info info = {
- I40E_VIRTCHNL_VERSION_MAJOR, I40E_VIRTCHNL_VERSION_MINOR
+ struct virtchnl_version_info info = {
+ VIRTCHNL_VERSION_MAJOR, VIRTCHNL_VERSION_MINOR
};
- vf->vf_ver = *(struct i40e_virtchnl_version_info *)msg;
+ vf->vf_ver = *(struct virtchnl_version_info *)msg;
/* VFs running the 1.0 API expect to get 1.0 back or they will cry. */
- if (VF_IS_V10(vf))
- info.minor = I40E_VIRTCHNL_VERSION_MINOR_NO_VF_CAPS;
- return i40e_vc_send_msg_to_vf(vf, I40E_VIRTCHNL_OP_VERSION,
+ if (VF_IS_V10(&vf->vf_ver))
+ info.minor = VIRTCHNL_VERSION_MINOR_NO_VF_CAPS;
+ return i40e_vc_send_msg_to_vf(vf, VIRTCHNL_OP_VERSION,
I40E_SUCCESS, (u8 *)&info,
- sizeof(struct
- i40e_virtchnl_version_info));
+ sizeof(struct virtchnl_version_info));
}
/**
@@ -1499,7 +1499,7 @@
**/
static int i40e_vc_get_vf_resources_msg(struct i40e_vf *vf, u8 *msg)
{
- struct i40e_virtchnl_vf_resource *vfres = NULL;
+ struct virtchnl_vf_resource *vfres = NULL;
struct i40e_pf *pf = vf->pf;
i40e_status aq_ret = 0;
struct i40e_vsi *vsi;
@@ -1512,8 +1512,8 @@
goto err;
}
- len = (sizeof(struct i40e_virtchnl_vf_resource) +
- sizeof(struct i40e_virtchnl_vsi_resource) * num_vsis);
+ len = (sizeof(struct virtchnl_vf_resource) +
+ sizeof(struct virtchnl_vsi_resource) * num_vsis);
vfres = kzalloc(len, GFP_KERNEL);
if (!vfres) {
@@ -1521,50 +1521,48 @@
len = 0;
goto err;
}
- if (VF_IS_V11(vf))
+ if (VF_IS_V11(&vf->vf_ver))
vf->driver_caps = *(u32 *)msg;
else
- vf->driver_caps = I40E_VIRTCHNL_VF_OFFLOAD_L2 |
- I40E_VIRTCHNL_VF_OFFLOAD_RSS_REG |
- I40E_VIRTCHNL_VF_OFFLOAD_VLAN;
+ vf->driver_caps = VIRTCHNL_VF_OFFLOAD_L2 |
+ VIRTCHNL_VF_OFFLOAD_RSS_REG |
+ VIRTCHNL_VF_OFFLOAD_VLAN;
- vfres->vf_offload_flags = I40E_VIRTCHNL_VF_OFFLOAD_L2;
+ vfres->vf_offload_flags = VIRTCHNL_VF_OFFLOAD_L2;
vsi = pf->vsi[vf->lan_vsi_idx];
if (!vsi->info.pvid)
- vfres->vf_offload_flags |= I40E_VIRTCHNL_VF_OFFLOAD_VLAN;
+ vfres->vf_offload_flags |= VIRTCHNL_VF_OFFLOAD_VLAN;
if (i40e_vf_client_capable(pf, vf->vf_id) &&
- (vf->driver_caps & I40E_VIRTCHNL_VF_OFFLOAD_IWARP)) {
- vfres->vf_offload_flags |= I40E_VIRTCHNL_VF_OFFLOAD_IWARP;
+ (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_IWARP)) {
+ vfres->vf_offload_flags |= VIRTCHNL_VF_OFFLOAD_IWARP;
set_bit(I40E_VF_STATE_IWARPENA, &vf->vf_states);
}
- if (vf->driver_caps & I40E_VIRTCHNL_VF_OFFLOAD_RSS_PF) {
- vfres->vf_offload_flags |= I40E_VIRTCHNL_VF_OFFLOAD_RSS_PF;
+ if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RSS_PF) {
+ vfres->vf_offload_flags |= VIRTCHNL_VF_OFFLOAD_RSS_PF;
} else {
if ((pf->flags & I40E_FLAG_RSS_AQ_CAPABLE) &&
- (vf->driver_caps & I40E_VIRTCHNL_VF_OFFLOAD_RSS_AQ))
- vfres->vf_offload_flags |=
- I40E_VIRTCHNL_VF_OFFLOAD_RSS_AQ;
+ (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RSS_AQ))
+ vfres->vf_offload_flags |= VIRTCHNL_VF_OFFLOAD_RSS_AQ;
else
- vfres->vf_offload_flags |=
- I40E_VIRTCHNL_VF_OFFLOAD_RSS_REG;
+ vfres->vf_offload_flags |= VIRTCHNL_VF_OFFLOAD_RSS_REG;
}
if (pf->flags & I40E_FLAG_MULTIPLE_TCP_UDP_RSS_PCTYPE) {
- if (vf->driver_caps & I40E_VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2)
+ if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2)
vfres->vf_offload_flags |=
- I40E_VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2;
+ VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2;
}
- if (vf->driver_caps & I40E_VIRTCHNL_VF_OFFLOAD_ENCAP)
- vfres->vf_offload_flags |= I40E_VIRTCHNL_VF_OFFLOAD_ENCAP;
+ if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_ENCAP)
+ vfres->vf_offload_flags |= VIRTCHNL_VF_OFFLOAD_ENCAP;
if ((pf->flags & I40E_FLAG_OUTER_UDP_CSUM_CAPABLE) &&
- (vf->driver_caps & I40E_VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM))
- vfres->vf_offload_flags |= I40E_VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM;
+ (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM))
+ vfres->vf_offload_flags |= VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM;
- if (vf->driver_caps & I40E_VIRTCHNL_VF_OFFLOAD_RX_POLLING) {
+ if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RX_POLLING) {
if (pf->flags & I40E_FLAG_MFP_ENABLED) {
dev_err(&pf->pdev->dev,
"VF %d requested polling mode: this feature is supported only when the device is running in single function per port (SFP) mode\n",
@@ -1572,13 +1570,13 @@
ret = I40E_ERR_PARAM;
goto err;
}
- vfres->vf_offload_flags |= I40E_VIRTCHNL_VF_OFFLOAD_RX_POLLING;
+ vfres->vf_offload_flags |= VIRTCHNL_VF_OFFLOAD_RX_POLLING;
}
if (pf->flags & I40E_FLAG_WB_ON_ITR_CAPABLE) {
- if (vf->driver_caps & I40E_VIRTCHNL_VF_OFFLOAD_WB_ON_ITR)
+ if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_WB_ON_ITR)
vfres->vf_offload_flags |=
- I40E_VIRTCHNL_VF_OFFLOAD_WB_ON_ITR;
+ VIRTCHNL_VF_OFFLOAD_WB_ON_ITR;
}
vfres->num_vsis = num_vsis;
@@ -1589,7 +1587,7 @@
if (vf->lan_vsi_idx) {
vfres->vsi_res[0].vsi_id = vf->lan_vsi_id;
- vfres->vsi_res[0].vsi_type = I40E_VSI_SRIOV;
+ vfres->vsi_res[0].vsi_type = VIRTCHNL_VSI_SRIOV;
vfres->vsi_res[0].num_queue_pairs = vsi->alloc_queue_pairs;
/* VFs only use TC 0 */
vfres->vsi_res[0].qset_handle
@@ -1601,7 +1599,7 @@
err:
/* send the response back to the VF */
- ret = i40e_vc_send_msg_to_vf(vf, I40E_VIRTCHNL_OP_GET_VF_RESOURCES,
+ ret = i40e_vc_send_msg_to_vf(vf, VIRTCHNL_OP_GET_VF_RESOURCES,
aq_ret, (u8 *)vfres, len);
kfree(vfres);
@@ -1655,8 +1653,8 @@
static int i40e_vc_config_promiscuous_mode_msg(struct i40e_vf *vf,
u8 *msg, u16 msglen)
{
- struct i40e_virtchnl_promisc_info *info =
- (struct i40e_virtchnl_promisc_info *)msg;
+ struct virtchnl_promisc_info *info =
+ (struct virtchnl_promisc_info *)msg;
struct i40e_pf *pf = vf->pf;
struct i40e_hw *hw = &pf->hw;
struct i40e_mac_filter *f;
@@ -1683,7 +1681,7 @@
goto error_param;
}
/* Multicast promiscuous handling*/
- if (info->flags & I40E_FLAG_VF_MULTICAST_PROMISC)
+ if (info->flags & FLAG_VF_MULTICAST_PROMISC)
allmulti = true;
if (vf->port_vlan_id) {
@@ -1734,7 +1732,7 @@
clear_bit(I40E_VF_STATE_MC_PROMISC, &vf->vf_states);
}
- if (info->flags & I40E_FLAG_VF_UNICAST_PROMISC)
+ if (info->flags & FLAG_VF_UNICAST_PROMISC)
alluni = true;
if (vf->port_vlan_id) {
aq_ret = i40e_aq_set_vsi_uc_promisc_on_vlan(hw, vsi->seid,
@@ -1788,7 +1786,7 @@
error_param:
/* send the response to the VF */
return i40e_vc_send_resp_to_vf(vf,
- I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE,
+ VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE,
aq_ret);
}
@@ -1803,9 +1801,9 @@
**/
static int i40e_vc_config_queues_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
{
- struct i40e_virtchnl_vsi_queue_config_info *qci =
- (struct i40e_virtchnl_vsi_queue_config_info *)msg;
- struct i40e_virtchnl_queue_pair_info *qpi;
+ struct virtchnl_vsi_queue_config_info *qci =
+ (struct virtchnl_vsi_queue_config_info *)msg;
+ struct virtchnl_queue_pair_info *qpi;
struct i40e_pf *pf = vf->pf;
u16 vsi_id, vsi_queue_id;
i40e_status aq_ret = 0;
@@ -1845,7 +1843,7 @@
error_param:
/* send the response to the VF */
- return i40e_vc_send_resp_to_vf(vf, I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES,
+ return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_CONFIG_VSI_QUEUES,
aq_ret);
}
@@ -1860,9 +1858,9 @@
**/
static int i40e_vc_config_irq_map_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
{
- struct i40e_virtchnl_irq_map_info *irqmap_info =
- (struct i40e_virtchnl_irq_map_info *)msg;
- struct i40e_virtchnl_vector_map *map;
+ struct virtchnl_irq_map_info *irqmap_info =
+ (struct virtchnl_irq_map_info *)msg;
+ struct virtchnl_vector_map *map;
u16 vsi_id, vsi_queue_id, vector_id;
i40e_status aq_ret = 0;
unsigned long tempmap;
@@ -1908,7 +1906,7 @@
}
error_param:
/* send the response to the VF */
- return i40e_vc_send_resp_to_vf(vf, I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP,
+ return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_CONFIG_IRQ_MAP,
aq_ret);
}
@@ -1922,8 +1920,8 @@
**/
static int i40e_vc_enable_queues_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
{
- struct i40e_virtchnl_queue_select *vqs =
- (struct i40e_virtchnl_queue_select *)msg;
+ struct virtchnl_queue_select *vqs =
+ (struct virtchnl_queue_select *)msg;
struct i40e_pf *pf = vf->pf;
u16 vsi_id = vqs->vsi_id;
i40e_status aq_ret = 0;
@@ -1947,7 +1945,7 @@
aq_ret = I40E_ERR_TIMEOUT;
error_param:
/* send the response to the VF */
- return i40e_vc_send_resp_to_vf(vf, I40E_VIRTCHNL_OP_ENABLE_QUEUES,
+ return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_ENABLE_QUEUES,
aq_ret);
}
@@ -1962,8 +1960,8 @@
**/
static int i40e_vc_disable_queues_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
{
- struct i40e_virtchnl_queue_select *vqs =
- (struct i40e_virtchnl_queue_select *)msg;
+ struct virtchnl_queue_select *vqs =
+ (struct virtchnl_queue_select *)msg;
struct i40e_pf *pf = vf->pf;
i40e_status aq_ret = 0;
@@ -1986,7 +1984,7 @@
error_param:
/* send the response to the VF */
- return i40e_vc_send_resp_to_vf(vf, I40E_VIRTCHNL_OP_DISABLE_QUEUES,
+ return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_DISABLE_QUEUES,
aq_ret);
}
@@ -2000,8 +1998,8 @@
**/
static int i40e_vc_get_stats_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
{
- struct i40e_virtchnl_queue_select *vqs =
- (struct i40e_virtchnl_queue_select *)msg;
+ struct virtchnl_queue_select *vqs =
+ (struct virtchnl_queue_select *)msg;
struct i40e_pf *pf = vf->pf;
struct i40e_eth_stats stats;
i40e_status aq_ret = 0;
@@ -2029,7 +2027,7 @@
error_param:
/* send the response back to the VF */
- return i40e_vc_send_msg_to_vf(vf, I40E_VIRTCHNL_OP_GET_STATS, aq_ret,
+ return i40e_vc_send_msg_to_vf(vf, VIRTCHNL_OP_GET_STATS, aq_ret,
(u8 *)&stats, sizeof(stats));
}
@@ -2088,8 +2086,8 @@
**/
static int i40e_vc_add_mac_addr_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
{
- struct i40e_virtchnl_ether_addr_list *al =
- (struct i40e_virtchnl_ether_addr_list *)msg;
+ struct virtchnl_ether_addr_list *al =
+ (struct virtchnl_ether_addr_list *)msg;
struct i40e_pf *pf = vf->pf;
struct i40e_vsi *vsi = NULL;
u16 vsi_id = al->vsi_id;
@@ -2143,7 +2141,7 @@
error_param:
/* send the response to the VF */
- return i40e_vc_send_resp_to_vf(vf, I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS,
+ return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_ADD_ETH_ADDR,
ret);
}
@@ -2157,8 +2155,8 @@
**/
static int i40e_vc_del_mac_addr_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
{
- struct i40e_virtchnl_ether_addr_list *al =
- (struct i40e_virtchnl_ether_addr_list *)msg;
+ struct virtchnl_ether_addr_list *al =
+ (struct virtchnl_ether_addr_list *)msg;
struct i40e_pf *pf = vf->pf;
struct i40e_vsi *vsi = NULL;
u16 vsi_id = al->vsi_id;
@@ -2203,7 +2201,7 @@
error_param:
/* send the response to the VF */
- return i40e_vc_send_resp_to_vf(vf, I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS,
+ return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_DEL_ETH_ADDR,
ret);
}
@@ -2217,8 +2215,8 @@
**/
static int i40e_vc_add_vlan_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
{
- struct i40e_virtchnl_vlan_filter_list *vfl =
- (struct i40e_virtchnl_vlan_filter_list *)msg;
+ struct virtchnl_vlan_filter_list *vfl =
+ (struct virtchnl_vlan_filter_list *)msg;
struct i40e_pf *pf = vf->pf;
struct i40e_vsi *vsi = NULL;
u16 vsi_id = vfl->vsi_id;
@@ -2277,7 +2275,7 @@
error_param:
/* send the response to the VF */
- return i40e_vc_send_resp_to_vf(vf, I40E_VIRTCHNL_OP_ADD_VLAN, aq_ret);
+ return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_ADD_VLAN, aq_ret);
}
/**
@@ -2290,8 +2288,8 @@
**/
static int i40e_vc_remove_vlan_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
{
- struct i40e_virtchnl_vlan_filter_list *vfl =
- (struct i40e_virtchnl_vlan_filter_list *)msg;
+ struct virtchnl_vlan_filter_list *vfl =
+ (struct virtchnl_vlan_filter_list *)msg;
struct i40e_pf *pf = vf->pf;
struct i40e_vsi *vsi = NULL;
u16 vsi_id = vfl->vsi_id;
@@ -2335,7 +2333,7 @@
error_param:
/* send the response to the VF */
- return i40e_vc_send_resp_to_vf(vf, I40E_VIRTCHNL_OP_DEL_VLAN, aq_ret);
+ return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_DEL_VLAN, aq_ret);
}
/**
@@ -2363,7 +2361,7 @@
error_param:
/* send the response to the VF */
- return i40e_vc_send_resp_to_vf(vf, I40E_VIRTCHNL_OP_IWARP,
+ return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_IWARP,
aq_ret);
}
@@ -2379,8 +2377,8 @@
static int i40e_vc_iwarp_qvmap_msg(struct i40e_vf *vf, u8 *msg, u16 msglen,
bool config)
{
- struct i40e_virtchnl_iwarp_qvlist_info *qvlist_info =
- (struct i40e_virtchnl_iwarp_qvlist_info *)msg;
+ struct virtchnl_iwarp_qvlist_info *qvlist_info =
+ (struct virtchnl_iwarp_qvlist_info *)msg;
i40e_status aq_ret = 0;
if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states) ||
@@ -2399,8 +2397,8 @@
error_param:
/* send the response to the VF */
return i40e_vc_send_resp_to_vf(vf,
- config ? I40E_VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP :
- I40E_VIRTCHNL_OP_RELEASE_IWARP_IRQ_MAP,
+ config ? VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP :
+ VIRTCHNL_OP_RELEASE_IWARP_IRQ_MAP,
aq_ret);
}
@@ -2414,8 +2412,8 @@
**/
static int i40e_vc_config_rss_key(struct i40e_vf *vf, u8 *msg, u16 msglen)
{
- struct i40e_virtchnl_rss_key *vrk =
- (struct i40e_virtchnl_rss_key *)msg;
+ struct virtchnl_rss_key *vrk =
+ (struct virtchnl_rss_key *)msg;
struct i40e_pf *pf = vf->pf;
struct i40e_vsi *vsi = NULL;
u16 vsi_id = vrk->vsi_id;
@@ -2432,7 +2430,7 @@
aq_ret = i40e_config_rss(vsi, vrk->key, NULL, 0);
err:
/* send the response to the VF */
- return i40e_vc_send_resp_to_vf(vf, I40E_VIRTCHNL_OP_CONFIG_RSS_KEY,
+ return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_CONFIG_RSS_KEY,
aq_ret);
}
@@ -2446,8 +2444,8 @@
**/
static int i40e_vc_config_rss_lut(struct i40e_vf *vf, u8 *msg, u16 msglen)
{
- struct i40e_virtchnl_rss_lut *vrl =
- (struct i40e_virtchnl_rss_lut *)msg;
+ struct virtchnl_rss_lut *vrl =
+ (struct virtchnl_rss_lut *)msg;
struct i40e_pf *pf = vf->pf;
struct i40e_vsi *vsi = NULL;
u16 vsi_id = vrl->vsi_id;
@@ -2464,7 +2462,7 @@
aq_ret = i40e_config_rss(vsi, NULL, vrl->lut, I40E_VF_HLUT_ARRAY_SIZE);
/* send the response to the VF */
err:
- return i40e_vc_send_resp_to_vf(vf, I40E_VIRTCHNL_OP_CONFIG_RSS_LUT,
+ return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_CONFIG_RSS_LUT,
aq_ret);
}
@@ -2478,7 +2476,7 @@
**/
static int i40e_vc_get_rss_hena(struct i40e_vf *vf, u8 *msg, u16 msglen)
{
- struct i40e_virtchnl_rss_hena *vrh = NULL;
+ struct virtchnl_rss_hena *vrh = NULL;
struct i40e_pf *pf = vf->pf;
i40e_status aq_ret = 0;
int len = 0;
@@ -2487,7 +2485,7 @@
aq_ret = I40E_ERR_PARAM;
goto err;
}
- len = sizeof(struct i40e_virtchnl_rss_hena);
+ len = sizeof(struct virtchnl_rss_hena);
vrh = kzalloc(len, GFP_KERNEL);
if (!vrh) {
@@ -2498,7 +2496,7 @@
vrh->hena = i40e_pf_get_default_rss_hena(pf);
err:
/* send the response back to the VF */
- aq_ret = i40e_vc_send_msg_to_vf(vf, I40E_VIRTCHNL_OP_GET_RSS_HENA_CAPS,
+ aq_ret = i40e_vc_send_msg_to_vf(vf, VIRTCHNL_OP_GET_RSS_HENA_CAPS,
aq_ret, (u8 *)vrh, len);
kfree(vrh);
return aq_ret;
@@ -2514,8 +2512,8 @@
**/
static int i40e_vc_set_rss_hena(struct i40e_vf *vf, u8 *msg, u16 msglen)
{
- struct i40e_virtchnl_rss_hena *vrh =
- (struct i40e_virtchnl_rss_hena *)msg;
+ struct virtchnl_rss_hena *vrh =
+ (struct virtchnl_rss_hena *)msg;
struct i40e_pf *pf = vf->pf;
struct i40e_hw *hw = &pf->hw;
i40e_status aq_ret = 0;
@@ -2530,170 +2528,7 @@
/* send the response to the VF */
err:
- return i40e_vc_send_resp_to_vf(vf, I40E_VIRTCHNL_OP_SET_RSS_HENA,
- aq_ret);
-}
-
-/**
- * i40e_vc_validate_vf_msg
- * @vf: pointer to the VF info
- * @msg: pointer to the msg buffer
- * @msglen: msg length
- * @msghndl: msg handle
- *
- * validate msg
- **/
-static int i40e_vc_validate_vf_msg(struct i40e_vf *vf, u32 v_opcode,
- u32 v_retval, u8 *msg, u16 msglen)
-{
- bool err_msg_format = false;
- int valid_len = 0;
-
- /* Check if VF is disabled. */
- if (test_bit(I40E_VF_STATE_DISABLED, &vf->vf_states))
- return I40E_ERR_PARAM;
-
- /* Validate message length. */
- switch (v_opcode) {
- case I40E_VIRTCHNL_OP_VERSION:
- valid_len = sizeof(struct i40e_virtchnl_version_info);
- break;
- case I40E_VIRTCHNL_OP_RESET_VF:
- break;
- case I40E_VIRTCHNL_OP_GET_VF_RESOURCES:
- if (VF_IS_V11(vf))
- valid_len = sizeof(u32);
- break;
- case I40E_VIRTCHNL_OP_CONFIG_TX_QUEUE:
- valid_len = sizeof(struct i40e_virtchnl_txq_info);
- break;
- case I40E_VIRTCHNL_OP_CONFIG_RX_QUEUE:
- valid_len = sizeof(struct i40e_virtchnl_rxq_info);
- break;
- case I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES:
- valid_len = sizeof(struct i40e_virtchnl_vsi_queue_config_info);
- if (msglen >= valid_len) {
- struct i40e_virtchnl_vsi_queue_config_info *vqc =
- (struct i40e_virtchnl_vsi_queue_config_info *)msg;
- valid_len += (vqc->num_queue_pairs *
- sizeof(struct
- i40e_virtchnl_queue_pair_info));
- if (vqc->num_queue_pairs == 0)
- err_msg_format = true;
- }
- break;
- case I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP:
- valid_len = sizeof(struct i40e_virtchnl_irq_map_info);
- if (msglen >= valid_len) {
- struct i40e_virtchnl_irq_map_info *vimi =
- (struct i40e_virtchnl_irq_map_info *)msg;
- valid_len += (vimi->num_vectors *
- sizeof(struct i40e_virtchnl_vector_map));
- if (vimi->num_vectors == 0)
- err_msg_format = true;
- }
- break;
- case I40E_VIRTCHNL_OP_ENABLE_QUEUES:
- case I40E_VIRTCHNL_OP_DISABLE_QUEUES:
- valid_len = sizeof(struct i40e_virtchnl_queue_select);
- break;
- case I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS:
- case I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS:
- valid_len = sizeof(struct i40e_virtchnl_ether_addr_list);
- if (msglen >= valid_len) {
- struct i40e_virtchnl_ether_addr_list *veal =
- (struct i40e_virtchnl_ether_addr_list *)msg;
- valid_len += veal->num_elements *
- sizeof(struct i40e_virtchnl_ether_addr);
- if (veal->num_elements == 0)
- err_msg_format = true;
- }
- break;
- case I40E_VIRTCHNL_OP_ADD_VLAN:
- case I40E_VIRTCHNL_OP_DEL_VLAN:
- valid_len = sizeof(struct i40e_virtchnl_vlan_filter_list);
- if (msglen >= valid_len) {
- struct i40e_virtchnl_vlan_filter_list *vfl =
- (struct i40e_virtchnl_vlan_filter_list *)msg;
- valid_len += vfl->num_elements * sizeof(u16);
- if (vfl->num_elements == 0)
- err_msg_format = true;
- }
- break;
- case I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE:
- valid_len = sizeof(struct i40e_virtchnl_promisc_info);
- break;
- case I40E_VIRTCHNL_OP_GET_STATS:
- valid_len = sizeof(struct i40e_virtchnl_queue_select);
- break;
- case I40E_VIRTCHNL_OP_IWARP:
- /* These messages are opaque to us and will be validated in
- * the RDMA client code. We just need to check for nonzero
- * length. The firmware will enforce max length restrictions.
- */
- if (msglen)
- valid_len = msglen;
- else
- err_msg_format = true;
- break;
- case I40E_VIRTCHNL_OP_RELEASE_IWARP_IRQ_MAP:
- valid_len = 0;
- break;
- case I40E_VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP:
- valid_len = sizeof(struct i40e_virtchnl_iwarp_qvlist_info);
- if (msglen >= valid_len) {
- struct i40e_virtchnl_iwarp_qvlist_info *qv =
- (struct i40e_virtchnl_iwarp_qvlist_info *)msg;
- if (qv->num_vectors == 0) {
- err_msg_format = true;
- break;
- }
- valid_len += ((qv->num_vectors - 1) *
- sizeof(struct i40e_virtchnl_iwarp_qv_info));
- }
- break;
- case I40E_VIRTCHNL_OP_CONFIG_RSS_KEY:
- valid_len = sizeof(struct i40e_virtchnl_rss_key);
- if (msglen >= valid_len) {
- struct i40e_virtchnl_rss_key *vrk =
- (struct i40e_virtchnl_rss_key *)msg;
- if (vrk->key_len != I40E_HKEY_ARRAY_SIZE) {
- err_msg_format = true;
- break;
- }
- valid_len += vrk->key_len - 1;
- }
- break;
- case I40E_VIRTCHNL_OP_CONFIG_RSS_LUT:
- valid_len = sizeof(struct i40e_virtchnl_rss_lut);
- if (msglen >= valid_len) {
- struct i40e_virtchnl_rss_lut *vrl =
- (struct i40e_virtchnl_rss_lut *)msg;
- if (vrl->lut_entries != I40E_VF_HLUT_ARRAY_SIZE) {
- err_msg_format = true;
- break;
- }
- valid_len += vrl->lut_entries - 1;
- }
- break;
- case I40E_VIRTCHNL_OP_GET_RSS_HENA_CAPS:
- break;
- case I40E_VIRTCHNL_OP_SET_RSS_HENA:
- valid_len = sizeof(struct i40e_virtchnl_rss_hena);
- break;
- /* These are always errors coming from the VF. */
- case I40E_VIRTCHNL_OP_EVENT:
- case I40E_VIRTCHNL_OP_UNKNOWN:
- default:
- return -EPERM;
- }
- /* few more checks */
- if ((valid_len != msglen) || (err_msg_format)) {
- i40e_vc_send_resp_to_vf(vf, v_opcode, I40E_ERR_PARAM);
- return -EINVAL;
- } else {
- return 0;
- }
+ return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_SET_RSS_HENA, aq_ret);
}
/**
@@ -2719,80 +2554,104 @@
if (local_vf_id >= pf->num_alloc_vfs)
return -EINVAL;
vf = &(pf->vf[local_vf_id]);
+
+ /* Check if VF is disabled. */
+ if (test_bit(I40E_VF_STATE_DISABLED, &vf->vf_states))
+ return I40E_ERR_PARAM;
+
/* perform basic checks on the msg */
- ret = i40e_vc_validate_vf_msg(vf, v_opcode, v_retval, msg, msglen);
+ ret = virtchnl_vc_validate_vf_msg(&vf->vf_ver, v_opcode, msg, msglen);
+
+ /* perform additional checks specific to this driver */
+ if (v_opcode == VIRTCHNL_OP_CONFIG_RSS_KEY) {
+ struct virtchnl_rss_key *vrk = (struct virtchnl_rss_key *)msg;
+
+ if (vrk->key_len != I40E_HKEY_ARRAY_SIZE)
+ ret = -EINVAL;
+ } else if (v_opcode == VIRTCHNL_OP_CONFIG_RSS_LUT) {
+ struct virtchnl_rss_lut *vrl = (struct virtchnl_rss_lut *)msg;
+
+ if (vrl->lut_entries != I40E_VF_HLUT_ARRAY_SIZE)
+ ret = -EINVAL;
+ }
if (ret) {
+ i40e_vc_send_resp_to_vf(vf, v_opcode, I40E_ERR_PARAM);
dev_err(&pf->pdev->dev, "Invalid message from VF %d, opcode %d, len %d\n",
local_vf_id, v_opcode, msglen);
- return ret;
+ switch (ret) {
+ case VIRTCHNL_ERR_PARAM:
+ return -EPERM;
+ default:
+ return -EINVAL;
+ }
}
switch (v_opcode) {
- case I40E_VIRTCHNL_OP_VERSION:
+ case VIRTCHNL_OP_VERSION:
ret = i40e_vc_get_version_msg(vf, msg);
break;
- case I40E_VIRTCHNL_OP_GET_VF_RESOURCES:
+ case VIRTCHNL_OP_GET_VF_RESOURCES:
ret = i40e_vc_get_vf_resources_msg(vf, msg);
break;
- case I40E_VIRTCHNL_OP_RESET_VF:
+ case VIRTCHNL_OP_RESET_VF:
i40e_vc_reset_vf_msg(vf);
ret = 0;
break;
- case I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE:
+ case VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE:
ret = i40e_vc_config_promiscuous_mode_msg(vf, msg, msglen);
break;
- case I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES:
+ case VIRTCHNL_OP_CONFIG_VSI_QUEUES:
ret = i40e_vc_config_queues_msg(vf, msg, msglen);
break;
- case I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP:
+ case VIRTCHNL_OP_CONFIG_IRQ_MAP:
ret = i40e_vc_config_irq_map_msg(vf, msg, msglen);
break;
- case I40E_VIRTCHNL_OP_ENABLE_QUEUES:
+ case VIRTCHNL_OP_ENABLE_QUEUES:
ret = i40e_vc_enable_queues_msg(vf, msg, msglen);
i40e_vc_notify_vf_link_state(vf);
break;
- case I40E_VIRTCHNL_OP_DISABLE_QUEUES:
+ case VIRTCHNL_OP_DISABLE_QUEUES:
ret = i40e_vc_disable_queues_msg(vf, msg, msglen);
break;
- case I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS:
+ case VIRTCHNL_OP_ADD_ETH_ADDR:
ret = i40e_vc_add_mac_addr_msg(vf, msg, msglen);
break;
- case I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS:
+ case VIRTCHNL_OP_DEL_ETH_ADDR:
ret = i40e_vc_del_mac_addr_msg(vf, msg, msglen);
break;
- case I40E_VIRTCHNL_OP_ADD_VLAN:
+ case VIRTCHNL_OP_ADD_VLAN:
ret = i40e_vc_add_vlan_msg(vf, msg, msglen);
break;
- case I40E_VIRTCHNL_OP_DEL_VLAN:
+ case VIRTCHNL_OP_DEL_VLAN:
ret = i40e_vc_remove_vlan_msg(vf, msg, msglen);
break;
- case I40E_VIRTCHNL_OP_GET_STATS:
+ case VIRTCHNL_OP_GET_STATS:
ret = i40e_vc_get_stats_msg(vf, msg, msglen);
break;
- case I40E_VIRTCHNL_OP_IWARP:
+ case VIRTCHNL_OP_IWARP:
ret = i40e_vc_iwarp_msg(vf, msg, msglen);
break;
- case I40E_VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP:
+ case VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP:
ret = i40e_vc_iwarp_qvmap_msg(vf, msg, msglen, true);
break;
- case I40E_VIRTCHNL_OP_RELEASE_IWARP_IRQ_MAP:
+ case VIRTCHNL_OP_RELEASE_IWARP_IRQ_MAP:
ret = i40e_vc_iwarp_qvmap_msg(vf, msg, msglen, false);
break;
- case I40E_VIRTCHNL_OP_CONFIG_RSS_KEY:
+ case VIRTCHNL_OP_CONFIG_RSS_KEY:
ret = i40e_vc_config_rss_key(vf, msg, msglen);
break;
- case I40E_VIRTCHNL_OP_CONFIG_RSS_LUT:
+ case VIRTCHNL_OP_CONFIG_RSS_LUT:
ret = i40e_vc_config_rss_lut(vf, msg, msglen);
break;
- case I40E_VIRTCHNL_OP_GET_RSS_HENA_CAPS:
+ case VIRTCHNL_OP_GET_RSS_HENA_CAPS:
ret = i40e_vc_get_rss_hena(vf, msg, msglen);
break;
- case I40E_VIRTCHNL_OP_SET_RSS_HENA:
+ case VIRTCHNL_OP_SET_RSS_HENA:
ret = i40e_vc_set_rss_hena(vf, msg, msglen);
break;
- case I40E_VIRTCHNL_OP_UNKNOWN:
+ case VIRTCHNL_OP_UNKNOWN:
default:
dev_err(&pf->pdev->dev, "Unsupported opcode %d from VF %d\n",
v_opcode, local_vf_id);
@@ -3220,7 +3079,7 @@
{
struct i40e_netdev_priv *np = netdev_priv(netdev);
struct i40e_pf *pf = np->vsi->back;
- struct i40e_virtchnl_pf_event pfe;
+ struct virtchnl_pf_event pfe;
struct i40e_hw *hw = &pf->hw;
struct i40e_vf *vf;
int abs_vf_id;
@@ -3236,8 +3095,8 @@
vf = &pf->vf[vf_id];
abs_vf_id = vf->vf_id + hw->func_caps.vf_base_id;
- pfe.event = I40E_VIRTCHNL_EVENT_LINK_CHANGE;
- pfe.severity = I40E_PF_EVENT_SEVERITY_INFO;
+ pfe.event = VIRTCHNL_EVENT_LINK_CHANGE;
+ pfe.severity = PF_EVENT_SEVERITY_INFO;
switch (link) {
case IFLA_VF_LINK_STATE_AUTO:
@@ -3245,6 +3104,7 @@
pfe.event_data.link_event.link_status =
pf->hw.phy.link_info.link_info & I40E_AQ_LINK_UP;
pfe.event_data.link_event.link_speed =
+ (enum virtchnl_link_speed)
pf->hw.phy.link_info.link_speed;
break;
case IFLA_VF_LINK_STATE_ENABLE:
@@ -3264,7 +3124,7 @@
goto error_out;
}
/* Notify the VF of its new link state */
- i40e_aq_send_msg_to_vf(hw, abs_vf_id, I40E_VIRTCHNL_OP_EVENT,
+ i40e_aq_send_msg_to_vf(hw, abs_vf_id, VIRTCHNL_OP_EVENT,
0, (u8 *)&pfe, sizeof(pfe), NULL);
error_out:
diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h
index 20d7c81..1f4b0c5 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h
@@ -40,9 +40,6 @@
#define I40E_VLAN_MASK 0xFFF
#define I40E_PRIORITY_MASK 0x7000
-#define VF_IS_V10(_v) (((_v)->vf_ver.major == 1) && ((_v)->vf_ver.minor == 0))
-#define VF_IS_V11(_v) (((_v)->vf_ver.major == 1) && ((_v)->vf_ver.minor == 1))
-
/* Various queue ctrls */
enum i40e_queue_ctrl {
I40E_QUEUE_CTRL_UNKNOWN = 0,
@@ -81,13 +78,13 @@
s16 vf_id;
/* all VF vsis connect to the same parent */
enum i40e_switch_element_types parent_type;
- struct i40e_virtchnl_version_info vf_ver;
+ struct virtchnl_version_info vf_ver;
u32 driver_caps; /* reported by VF driver */
/* VF Port Extender (PE) stag if used */
u16 stag;
- struct i40e_virtchnl_ether_addr default_lan_addr;
+ struct virtchnl_ether_addr default_lan_addr;
u16 port_vlan_id;
bool pf_set_mac; /* The VMM admin set the VF MAC address */
bool trusted;
@@ -115,7 +112,7 @@
u16 num_vlan;
/* RDMA Client */
- struct i40e_virtchnl_iwarp_qvlist_info *qvlist_info;
+ struct virtchnl_iwarp_qvlist_info *qvlist_info;
};
void i40e_free_vfs(struct i40e_pf *pf);
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h b/drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h
index 91d8786..83e63e5 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h
+++ b/drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h
@@ -1,7 +1,7 @@
/*******************************************************************************
*
* Intel Ethernet Controller XL710 Family Linux Virtual Function Driver
- * Copyright(c) 2013 - 2016 Intel Corporation.
+ * Copyright(c) 2013 - 2017 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
@@ -528,7 +528,7 @@
#define I40E_AQC_PORT_ADDR_VALID 0x40
#define I40E_AQC_WOL_ADDR_VALID 0x80
#define I40E_AQC_MC_MAG_EN_VALID 0x100
-#define I40E_AQC_ADDR_VALID_MASK 0x1F0
+#define I40E_AQC_ADDR_VALID_MASK 0x3F0
u8 reserved[6];
__le32 addr_high;
__le32 addr_low;
@@ -586,6 +586,7 @@
__le16 cmd_flags;
#define I40E_AQC_SET_WOL_FILTER 0x8000
#define I40E_AQC_SET_WOL_FILTER_NO_TCO_WOL 0x4000
+#define I40E_AQC_SET_WOL_FILTER_WOL_PRESERVE_ON_PFR 0x2000
#define I40E_AQC_SET_WOL_FILTER_ACTION_CLEAR 0
#define I40E_AQC_SET_WOL_FILTER_ACTION_SET 1
__le16 valid_flags;
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_common.c b/drivers/net/ethernet/intel/i40evf/i40e_common.c
index 43f1076..1dd1938 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_common.c
+++ b/drivers/net/ethernet/intel/i40evf/i40e_common.c
@@ -27,7 +27,7 @@
#include "i40e_type.h"
#include "i40e_adminq.h"
#include "i40e_prototype.h"
-#include "i40e_virtchnl.h"
+#include <linux/avf/virtchnl.h>
/**
* i40e_set_mac_type - Sets MAC type
@@ -68,6 +68,7 @@
break;
case I40E_DEV_ID_VF:
case I40E_DEV_ID_VF_HV:
+ case I40E_DEV_ID_ADAPTIVE_VF:
hw->mac.type = I40E_MAC_VF;
break;
default:
@@ -1054,7 +1055,7 @@
* completion before returning.
**/
i40e_status i40e_aq_send_msg_to_pf(struct i40e_hw *hw,
- enum i40e_virtchnl_ops v_opcode,
+ enum virtchnl_ops v_opcode,
i40e_status v_retval,
u8 *msg, u16 msglen,
struct i40e_asq_cmd_details *cmd_details)
@@ -1092,9 +1093,9 @@
* with appropriate information.
**/
void i40e_vf_parse_hw_config(struct i40e_hw *hw,
- struct i40e_virtchnl_vf_resource *msg)
+ struct virtchnl_vf_resource *msg)
{
- struct i40e_virtchnl_vsi_resource *vsi_res;
+ struct virtchnl_vsi_resource *vsi_res;
int i;
vsi_res = &msg->vsi_res[0];
@@ -1104,11 +1105,10 @@
hw->dev_caps.num_tx_qp = msg->num_queue_pairs;
hw->dev_caps.num_msix_vectors_vf = msg->max_vectors;
hw->dev_caps.dcb = msg->vf_offload_flags &
- I40E_VIRTCHNL_VF_OFFLOAD_L2;
- hw->dev_caps.fcoe = (msg->vf_offload_flags &
- I40E_VIRTCHNL_VF_OFFLOAD_FCOE) ? 1 : 0;
+ VIRTCHNL_VF_OFFLOAD_L2;
+ hw->dev_caps.fcoe = 0;
for (i = 0; i < msg->num_vsis; i++) {
- if (vsi_res->vsi_type == I40E_VSI_SRIOV) {
+ if (vsi_res->vsi_type == VIRTCHNL_VSI_SRIOV) {
ether_addr_copy(hw->mac.perm_addr,
vsi_res->default_mac_addr);
ether_addr_copy(hw->mac.addr,
@@ -1128,7 +1128,7 @@
**/
i40e_status i40e_vf_reset(struct i40e_hw *hw)
{
- return i40e_aq_send_msg_to_pf(hw, I40E_VIRTCHNL_OP_RESET_VF,
+ return i40e_aq_send_msg_to_pf(hw, VIRTCHNL_OP_RESET_VF,
0, NULL, 0, NULL);
}
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_devids.h b/drivers/net/ethernet/intel/i40evf/i40e_devids.h
index d76393c..0469e4b 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_devids.h
+++ b/drivers/net/ethernet/intel/i40evf/i40e_devids.h
@@ -43,6 +43,7 @@
#define I40E_DEV_ID_25G_SFP28 0x158B
#define I40E_DEV_ID_VF 0x154C
#define I40E_DEV_ID_VF_HV 0x1571
+#define I40E_DEV_ID_ADAPTIVE_VF 0x1889
#define I40E_DEV_ID_SFP_X722 0x37D0
#define I40E_DEV_ID_1G_BASE_T_X722 0x37D1
#define I40E_DEV_ID_10G_BASE_T_X722 0x37D2
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_prototype.h b/drivers/net/ethernet/intel/i40evf/i40e_prototype.h
index 741223d..c9836bb 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_prototype.h
+++ b/drivers/net/ethernet/intel/i40evf/i40e_prototype.h
@@ -29,7 +29,7 @@
#include "i40e_type.h"
#include "i40e_alloc.h"
-#include "i40e_virtchnl.h"
+#include <linux/avf/virtchnl.h>
/* Prototypes for shared code functions that are not in
* the standard function pointer structures. These are
@@ -87,10 +87,10 @@
/* i40e_common for VF drivers*/
void i40e_vf_parse_hw_config(struct i40e_hw *hw,
- struct i40e_virtchnl_vf_resource *msg);
+ struct virtchnl_vf_resource *msg);
i40e_status i40e_vf_reset(struct i40e_hw *hw);
i40e_status i40e_aq_send_msg_to_pf(struct i40e_hw *hw,
- enum i40e_virtchnl_ops v_opcode,
+ enum virtchnl_ops v_opcode,
i40e_status v_retval,
u8 *msg, u16 msglen,
struct i40e_asq_cmd_details *cmd_details);
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_virtchnl.h b/drivers/net/ethernet/intel/i40evf/i40e_virtchnl.h
deleted file mode 100644
index c5ad038..0000000
--- a/drivers/net/ethernet/intel/i40evf/i40e_virtchnl.h
+++ /dev/null
@@ -1,449 +0,0 @@
-/*******************************************************************************
- *
- * Intel Ethernet Controller XL710 Family Linux Virtual Function Driver
- * Copyright(c) 2013 - 2014 Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program. If not, see <http://www.gnu.org/licenses/>.
- *
- * The full GNU General Public License is included in this distribution in
- * the file called "COPYING".
- *
- * Contact Information:
- * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *
- ******************************************************************************/
-
-#ifndef _I40E_VIRTCHNL_H_
-#define _I40E_VIRTCHNL_H_
-
-#include "i40e_type.h"
-
-/* Description:
- * This header file describes the VF-PF communication protocol used
- * by the various i40e drivers.
- *
- * Admin queue buffer usage:
- * desc->opcode is always i40e_aqc_opc_send_msg_to_pf
- * flags, retval, datalen, and data addr are all used normally.
- * Firmware copies the cookie fields when sending messages between the PF and
- * VF, but uses all other fields internally. Due to this limitation, we
- * must send all messages as "indirect", i.e. using an external buffer.
- *
- * All the vsi indexes are relative to the VF. Each VF can have maximum of
- * three VSIs. All the queue indexes are relative to the VSI. Each VF can
- * have a maximum of sixteen queues for all of its VSIs.
- *
- * The PF is required to return a status code in v_retval for all messages
- * except RESET_VF, which does not require any response. The return value is of
- * i40e_status_code type, defined in the i40e_type.h.
- *
- * In general, VF driver initialization should roughly follow the order of these
- * opcodes. The VF driver must first validate the API version of the PF driver,
- * then request a reset, then get resources, then configure queues and
- * interrupts. After these operations are complete, the VF driver may start
- * its queues, optionally add MAC and VLAN filters, and process traffic.
- */
-
-/* Opcodes for VF-PF communication. These are placed in the v_opcode field
- * of the virtchnl_msg structure.
- */
-enum i40e_virtchnl_ops {
-/* The PF sends status change events to VFs using
- * the I40E_VIRTCHNL_OP_EVENT opcode.
- * VFs send requests to the PF using the other ops.
- */
- I40E_VIRTCHNL_OP_UNKNOWN = 0,
- I40E_VIRTCHNL_OP_VERSION = 1, /* must ALWAYS be 1 */
- I40E_VIRTCHNL_OP_RESET_VF = 2,
- I40E_VIRTCHNL_OP_GET_VF_RESOURCES = 3,
- I40E_VIRTCHNL_OP_CONFIG_TX_QUEUE = 4,
- I40E_VIRTCHNL_OP_CONFIG_RX_QUEUE = 5,
- I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES = 6,
- I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP = 7,
- I40E_VIRTCHNL_OP_ENABLE_QUEUES = 8,
- I40E_VIRTCHNL_OP_DISABLE_QUEUES = 9,
- I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS = 10,
- I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS = 11,
- I40E_VIRTCHNL_OP_ADD_VLAN = 12,
- I40E_VIRTCHNL_OP_DEL_VLAN = 13,
- I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE = 14,
- I40E_VIRTCHNL_OP_GET_STATS = 15,
- I40E_VIRTCHNL_OP_FCOE = 16,
- I40E_VIRTCHNL_OP_EVENT = 17, /* must ALWAYS be 17 */
- I40E_VIRTCHNL_OP_IWARP = 20,
- I40E_VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP = 21,
- I40E_VIRTCHNL_OP_RELEASE_IWARP_IRQ_MAP = 22,
- I40E_VIRTCHNL_OP_CONFIG_RSS_KEY = 23,
- I40E_VIRTCHNL_OP_CONFIG_RSS_LUT = 24,
- I40E_VIRTCHNL_OP_GET_RSS_HENA_CAPS = 25,
- I40E_VIRTCHNL_OP_SET_RSS_HENA = 26,
-
-};
-
-/* Virtual channel message descriptor. This overlays the admin queue
- * descriptor. All other data is passed in external buffers.
- */
-
-struct i40e_virtchnl_msg {
- u8 pad[8]; /* AQ flags/opcode/len/retval fields */
- enum i40e_virtchnl_ops v_opcode; /* avoid confusion with desc->opcode */
- i40e_status v_retval; /* ditto for desc->retval */
- u32 vfid; /* used by PF when sending to VF */
-};
-
-/* Message descriptions and data structures.*/
-
-/* I40E_VIRTCHNL_OP_VERSION
- * VF posts its version number to the PF. PF responds with its version number
- * in the same format, along with a return code.
- * Reply from PF has its major/minor versions also in param0 and param1.
- * If there is a major version mismatch, then the VF cannot operate.
- * If there is a minor version mismatch, then the VF can operate but should
- * add a warning to the system log.
- *
- * This enum element MUST always be specified as == 1, regardless of other
- * changes in the API. The PF must always respond to this message without
- * error regardless of version mismatch.
- */
-#define I40E_VIRTCHNL_VERSION_MAJOR 1
-#define I40E_VIRTCHNL_VERSION_MINOR 1
-#define I40E_VIRTCHNL_VERSION_MINOR_NO_VF_CAPS 0
-
-struct i40e_virtchnl_version_info {
- u32 major;
- u32 minor;
-};
-
-/* I40E_VIRTCHNL_OP_RESET_VF
- * VF sends this request to PF with no parameters
- * PF does NOT respond! VF driver must delay then poll VFGEN_RSTAT register
- * until reset completion is indicated. The admin queue must be reinitialized
- * after this operation.
- *
- * When reset is complete, PF must ensure that all queues in all VSIs associated
- * with the VF are stopped, all queue configurations in the HMC are set to 0,
- * and all MAC and VLAN filters (except the default MAC address) on all VSIs
- * are cleared.
- */
-
-/* I40E_VIRTCHNL_OP_GET_VF_RESOURCES
- * Version 1.0 VF sends this request to PF with no parameters
- * Version 1.1 VF sends this request to PF with u32 bitmap of its capabilities
- * PF responds with an indirect message containing
- * i40e_virtchnl_vf_resource and one or more
- * i40e_virtchnl_vsi_resource structures.
- */
-
-struct i40e_virtchnl_vsi_resource {
- u16 vsi_id;
- u16 num_queue_pairs;
- enum i40e_vsi_type vsi_type;
- u16 qset_handle;
- u8 default_mac_addr[ETH_ALEN];
-};
-/* VF offload flags */
-#define I40E_VIRTCHNL_VF_OFFLOAD_L2 0x00000001
-#define I40E_VIRTCHNL_VF_OFFLOAD_IWARP 0x00000002
-#define I40E_VIRTCHNL_VF_OFFLOAD_FCOE 0x00000004
-#define I40E_VIRTCHNL_VF_OFFLOAD_RSS_AQ 0x00000008
-#define I40E_VIRTCHNL_VF_OFFLOAD_RSS_REG 0x00000010
-#define I40E_VIRTCHNL_VF_OFFLOAD_WB_ON_ITR 0x00000020
-#define I40E_VIRTCHNL_VF_OFFLOAD_VLAN 0x00010000
-#define I40E_VIRTCHNL_VF_OFFLOAD_RX_POLLING 0x00020000
-#define I40E_VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2 0x00040000
-#define I40E_VIRTCHNL_VF_OFFLOAD_RSS_PF 0X00080000
-#define I40E_VIRTCHNL_VF_OFFLOAD_ENCAP 0X00100000
-#define I40E_VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM 0X00200000
-
-#define I40E_VF_BASE_MODE_OFFLOADS (I40E_VIRTCHNL_VF_OFFLOAD_L2 | \
- I40E_VIRTCHNL_VF_OFFLOAD_VLAN | \
- I40E_VIRTCHNL_VF_OFFLOAD_RSS_PF)
-
-struct i40e_virtchnl_vf_resource {
- u16 num_vsis;
- u16 num_queue_pairs;
- u16 max_vectors;
- u16 max_mtu;
-
- u32 vf_offload_flags;
- u32 rss_key_size;
- u32 rss_lut_size;
-
- struct i40e_virtchnl_vsi_resource vsi_res[1];
-};
-
-/* I40E_VIRTCHNL_OP_CONFIG_TX_QUEUE
- * VF sends this message to set up parameters for one TX queue.
- * External data buffer contains one instance of i40e_virtchnl_txq_info.
- * PF configures requested queue and returns a status code.
- */
-
-/* Tx queue config info */
-struct i40e_virtchnl_txq_info {
- u16 vsi_id;
- u16 queue_id;
- u16 ring_len; /* number of descriptors, multiple of 8 */
- u16 headwb_enabled;
- u64 dma_ring_addr;
- u64 dma_headwb_addr;
-};
-
-/* I40E_VIRTCHNL_OP_CONFIG_RX_QUEUE
- * VF sends this message to set up parameters for one RX queue.
- * External data buffer contains one instance of i40e_virtchnl_rxq_info.
- * PF configures requested queue and returns a status code.
- */
-
-/* Rx queue config info */
-struct i40e_virtchnl_rxq_info {
- u16 vsi_id;
- u16 queue_id;
- u32 ring_len; /* number of descriptors, multiple of 32 */
- u16 hdr_size;
- u16 splithdr_enabled;
- u32 databuffer_size;
- u32 max_pkt_size;
- u64 dma_ring_addr;
- enum i40e_hmc_obj_rx_hsplit_0 rx_split_pos;
-};
-
-/* I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES
- * VF sends this message to set parameters for all active TX and RX queues
- * associated with the specified VSI.
- * PF configures queues and returns status.
- * If the number of queues specified is greater than the number of queues
- * associated with the VSI, an error is returned and no queues are configured.
- */
-struct i40e_virtchnl_queue_pair_info {
- /* NOTE: vsi_id and queue_id should be identical for both queues. */
- struct i40e_virtchnl_txq_info txq;
- struct i40e_virtchnl_rxq_info rxq;
-};
-
-struct i40e_virtchnl_vsi_queue_config_info {
- u16 vsi_id;
- u16 num_queue_pairs;
- struct i40e_virtchnl_queue_pair_info qpair[1];
-};
-
-/* I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP
- * VF uses this message to map vectors to queues.
- * The rxq_map and txq_map fields are bitmaps used to indicate which queues
- * are to be associated with the specified vector.
- * The "other" causes are always mapped to vector 0.
- * PF configures interrupt mapping and returns status.
- */
-struct i40e_virtchnl_vector_map {
- u16 vsi_id;
- u16 vector_id;
- u16 rxq_map;
- u16 txq_map;
- u16 rxitr_idx;
- u16 txitr_idx;
-};
-
-struct i40e_virtchnl_irq_map_info {
- u16 num_vectors;
- struct i40e_virtchnl_vector_map vecmap[1];
-};
-
-/* I40E_VIRTCHNL_OP_ENABLE_QUEUES
- * I40E_VIRTCHNL_OP_DISABLE_QUEUES
- * VF sends these message to enable or disable TX/RX queue pairs.
- * The queues fields are bitmaps indicating which queues to act upon.
- * (Currently, we only support 16 queues per VF, but we make the field
- * u32 to allow for expansion.)
- * PF performs requested action and returns status.
- */
-struct i40e_virtchnl_queue_select {
- u16 vsi_id;
- u16 pad;
- u32 rx_queues;
- u32 tx_queues;
-};
-
-/* I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS
- * VF sends this message in order to add one or more unicast or multicast
- * address filters for the specified VSI.
- * PF adds the filters and returns status.
- */
-
-/* I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS
- * VF sends this message in order to remove one or more unicast or multicast
- * filters for the specified VSI.
- * PF removes the filters and returns status.
- */
-
-struct i40e_virtchnl_ether_addr {
- u8 addr[ETH_ALEN];
- u8 pad[2];
-};
-
-struct i40e_virtchnl_ether_addr_list {
- u16 vsi_id;
- u16 num_elements;
- struct i40e_virtchnl_ether_addr list[1];
-};
-
-/* I40E_VIRTCHNL_OP_ADD_VLAN
- * VF sends this message to add one or more VLAN tag filters for receives.
- * PF adds the filters and returns status.
- * If a port VLAN is configured by the PF, this operation will return an
- * error to the VF.
- */
-
-/* I40E_VIRTCHNL_OP_DEL_VLAN
- * VF sends this message to remove one or more VLAN tag filters for receives.
- * PF removes the filters and returns status.
- * If a port VLAN is configured by the PF, this operation will return an
- * error to the VF.
- */
-
-struct i40e_virtchnl_vlan_filter_list {
- u16 vsi_id;
- u16 num_elements;
- u16 vlan_id[1];
-};
-
-/* I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE
- * VF sends VSI id and flags.
- * PF returns status code in retval.
- * Note: we assume that broadcast accept mode is always enabled.
- */
-struct i40e_virtchnl_promisc_info {
- u16 vsi_id;
- u16 flags;
-};
-
-#define I40E_FLAG_VF_UNICAST_PROMISC 0x00000001
-#define I40E_FLAG_VF_MULTICAST_PROMISC 0x00000002
-
-/* I40E_VIRTCHNL_OP_GET_STATS
- * VF sends this message to request stats for the selected VSI. VF uses
- * the i40e_virtchnl_queue_select struct to specify the VSI. The queue_id
- * field is ignored by the PF.
- *
- * PF replies with struct i40e_eth_stats in an external buffer.
- */
-
-/* I40E_VIRTCHNL_OP_CONFIG_RSS_KEY
- * I40E_VIRTCHNL_OP_CONFIG_RSS_LUT
- * VF sends these messages to configure RSS. Only supported if both PF
- * and VF drivers set the I40E_VIRTCHNL_VF_OFFLOAD_RSS_PF bit during
- * configuration negotiation. If this is the case, then the RSS fields in
- * the VF resource struct are valid.
- * Both the key and LUT are initialized to 0 by the PF, meaning that
- * RSS is effectively disabled until set up by the VF.
- */
-struct i40e_virtchnl_rss_key {
- u16 vsi_id;
- u16 key_len;
- u8 key[1]; /* RSS hash key, packed bytes */
-};
-
-struct i40e_virtchnl_rss_lut {
- u16 vsi_id;
- u16 lut_entries;
- u8 lut[1]; /* RSS lookup table*/
-};
-
-/* I40E_VIRTCHNL_OP_GET_RSS_HENA_CAPS
- * I40E_VIRTCHNL_OP_SET_RSS_HENA
- * VF sends these messages to get and set the hash filter enable bits for RSS.
- * By default, the PF sets these to all possible traffic types that the
- * hardware supports. The VF can query this value if it wants to change the
- * traffic types that are hashed by the hardware.
- * Traffic types are defined in the i40e_filter_pctype enum in i40e_type.h
- */
-struct i40e_virtchnl_rss_hena {
- u64 hena;
-};
-
-/* I40E_VIRTCHNL_OP_EVENT
- * PF sends this message to inform the VF driver of events that may affect it.
- * No direct response is expected from the VF, though it may generate other
- * messages in response to this one.
- */
-enum i40e_virtchnl_event_codes {
- I40E_VIRTCHNL_EVENT_UNKNOWN = 0,
- I40E_VIRTCHNL_EVENT_LINK_CHANGE,
- I40E_VIRTCHNL_EVENT_RESET_IMPENDING,
- I40E_VIRTCHNL_EVENT_PF_DRIVER_CLOSE,
-};
-#define I40E_PF_EVENT_SEVERITY_INFO 0
-#define I40E_PF_EVENT_SEVERITY_CERTAIN_DOOM 255
-
-struct i40e_virtchnl_pf_event {
- enum i40e_virtchnl_event_codes event;
- union {
- struct {
- enum i40e_aq_link_speed link_speed;
- bool link_status;
- } link_event;
- } event_data;
-
- int severity;
-};
-
-/* I40E_VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP
- * VF uses this message to request PF to map IWARP vectors to IWARP queues.
- * The request for this originates from the VF IWARP driver through
- * a client interface between VF LAN and VF IWARP driver.
- * A vector could have an AEQ and CEQ attached to it although
- * there is a single AEQ per VF IWARP instance in which case
- * most vectors will have an INVALID_IDX for aeq and valid idx for ceq.
- * There will never be a case where there will be multiple CEQs attached
- * to a single vector.
- * PF configures interrupt mapping and returns status.
- */
-
-/* HW does not define a type value for AEQ; only for RX/TX and CEQ.
- * In order for us to keep the interface simple, SW will define a
- * unique type value for AEQ.
- */
-#define I40E_QUEUE_TYPE_PE_AEQ 0x80
-#define I40E_QUEUE_INVALID_IDX 0xFFFF
-
-struct i40e_virtchnl_iwarp_qv_info {
- u32 v_idx; /* msix_vector */
- u16 ceq_idx;
- u16 aeq_idx;
- u8 itr_idx;
-};
-
-struct i40e_virtchnl_iwarp_qvlist_info {
- u32 num_vectors;
- struct i40e_virtchnl_iwarp_qv_info qv_info[1];
-};
-
-/* VF reset states - these are written into the RSTAT register:
- * I40E_VFGEN_RSTAT1 on the PF
- * I40E_VFGEN_RSTAT on the VF
- * When the PF initiates a reset, it writes 0
- * When the reset is complete, it writes 1
- * When the PF detects that the VF has recovered, it writes 2
- * VF checks this register periodically to determine if a reset has occurred,
- * then polls it to know when the reset is complete.
- * If either the PF or VF reads the register while the hardware
- * is in a reset state, it will return DEADBEEF, which, when masked
- * will result in 3.
- */
-enum i40e_vfr_states {
- I40E_VFR_INPROGRESS = 0,
- I40E_VFR_COMPLETED,
- I40E_VFR_VFACTIVE,
- I40E_VFR_UNKNOWN,
-};
-
-#endif /* _I40E_VIRTCHNL_H_ */
diff --git a/drivers/net/ethernet/intel/i40evf/i40evf.h b/drivers/net/ethernet/intel/i40evf/i40evf.h
index b8ada6d..6cc9208 100644
--- a/drivers/net/ethernet/intel/i40evf/i40evf.h
+++ b/drivers/net/ethernet/intel/i40evf/i40evf.h
@@ -43,7 +43,7 @@
#include <net/udp.h>
#include "i40e_type.h"
-#include "i40e_virtchnl.h"
+#include <linux/avf/virtchnl.h>
#include "i40e_txrx.h"
#define DEFAULT_DEBUG_LEVEL_SHIFT 3
@@ -263,26 +263,26 @@
struct work_struct watchdog_task;
bool netdev_registered;
bool link_up;
- enum i40e_aq_link_speed link_speed;
- enum i40e_virtchnl_ops current_op;
+ enum virtchnl_link_speed link_speed;
+ enum virtchnl_ops current_op;
#define CLIENT_ALLOWED(_a) ((_a)->vf_res ? \
(_a)->vf_res->vf_offload_flags & \
- I40E_VIRTCHNL_VF_OFFLOAD_IWARP : \
+ VIRTCHNL_VF_OFFLOAD_IWARP : \
0)
#define CLIENT_ENABLED(_a) ((_a)->cinst)
/* RSS by the PF should be preferred over RSS via other methods. */
#define RSS_PF(_a) ((_a)->vf_res->vf_offload_flags & \
- I40E_VIRTCHNL_VF_OFFLOAD_RSS_PF)
+ VIRTCHNL_VF_OFFLOAD_RSS_PF)
#define RSS_AQ(_a) ((_a)->vf_res->vf_offload_flags & \
- I40E_VIRTCHNL_VF_OFFLOAD_RSS_AQ)
+ VIRTCHNL_VF_OFFLOAD_RSS_AQ)
#define RSS_REG(_a) (!((_a)->vf_res->vf_offload_flags & \
- (I40E_VIRTCHNL_VF_OFFLOAD_RSS_AQ | \
- I40E_VIRTCHNL_VF_OFFLOAD_RSS_PF)))
+ (VIRTCHNL_VF_OFFLOAD_RSS_AQ | \
+ VIRTCHNL_VF_OFFLOAD_RSS_PF)))
#define VLAN_ALLOWED(_a) ((_a)->vf_res->vf_offload_flags & \
- I40E_VIRTCHNL_VF_OFFLOAD_VLAN)
- struct i40e_virtchnl_vf_resource *vf_res; /* incl. all VSIs */
- struct i40e_virtchnl_vsi_resource *vsi_res; /* our LAN VSI */
- struct i40e_virtchnl_version_info pf_version;
+ VIRTCHNL_VF_OFFLOAD_VLAN)
+ struct virtchnl_vf_resource *vf_res; /* incl. all VSIs */
+ struct virtchnl_vsi_resource *vsi_res; /* our LAN VSI */
+ struct virtchnl_version_info pf_version;
#define PF_IS_V11(_a) (((_a)->pf_version.major == 1) && \
((_a)->pf_version.minor == 1))
u16 msg_enable;
@@ -348,7 +348,7 @@
void i40evf_set_rss_key(struct i40evf_adapter *adapter);
void i40evf_set_rss_lut(struct i40evf_adapter *adapter);
void i40evf_virtchnl_completion(struct i40evf_adapter *adapter,
- enum i40e_virtchnl_ops v_opcode,
+ enum virtchnl_ops v_opcode,
i40e_status v_retval, u8 *msg, u16 msglen);
int i40evf_config_rss(struct i40evf_adapter *adapter);
int i40evf_lan_add_device(struct i40evf_adapter *adapter);
diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_client.c b/drivers/net/ethernet/intel/i40evf/i40evf_client.c
index ee73768..93cf5fd 100644
--- a/drivers/net/ethernet/intel/i40evf/i40evf_client.c
+++ b/drivers/net/ethernet/intel/i40evf/i40evf_client.c
@@ -120,7 +120,7 @@
return -EAGAIN;
err = i40e_aq_send_msg_to_pf(&adapter->hw,
- I40E_VIRTCHNL_OP_RELEASE_IWARP_IRQ_MAP,
+ VIRTCHNL_OP_RELEASE_IWARP_IRQ_MAP,
I40E_SUCCESS, NULL, 0, NULL);
if (err)
@@ -410,7 +410,7 @@
if (adapter->aq_required)
return -EAGAIN;
- err = i40e_aq_send_msg_to_pf(&adapter->hw, I40E_VIRTCHNL_OP_IWARP,
+ err = i40e_aq_send_msg_to_pf(&adapter->hw, VIRTCHNL_OP_IWARP,
I40E_SUCCESS, msg, len, NULL);
if (err)
dev_err(&adapter->pdev->dev, "Unable to send iWarp message to PF, error %d, aq status %d\n",
@@ -431,7 +431,7 @@
struct i40e_client *client,
struct i40e_qvlist_info *qvlist_info)
{
- struct i40e_virtchnl_iwarp_qvlist_info *v_qvlist_info;
+ struct virtchnl_iwarp_qvlist_info *v_qvlist_info;
struct i40evf_adapter *adapter = ldev->vf;
struct i40e_qv_info *qv_info;
i40e_status err;
@@ -453,14 +453,14 @@
return -EINVAL;
}
- v_qvlist_info = (struct i40e_virtchnl_iwarp_qvlist_info *)qvlist_info;
- msg_size = sizeof(struct i40e_virtchnl_iwarp_qvlist_info) +
- (sizeof(struct i40e_virtchnl_iwarp_qv_info) *
+ v_qvlist_info = (struct virtchnl_iwarp_qvlist_info *)qvlist_info;
+ msg_size = sizeof(struct virtchnl_iwarp_qvlist_info) +
+ (sizeof(struct virtchnl_iwarp_qv_info) *
(v_qvlist_info->num_vectors - 1));
- adapter->client_pending |= BIT(I40E_VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP);
+ adapter->client_pending |= BIT(VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP);
err = i40e_aq_send_msg_to_pf(&adapter->hw,
- I40E_VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP,
+ VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP,
I40E_SUCCESS, (u8 *)v_qvlist_info, msg_size, NULL);
if (err) {
@@ -474,7 +474,7 @@
for (i = 0; i < 5; i++) {
msleep(100);
if (!(adapter->client_pending &
- BIT(I40E_VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP))) {
+ BIT(VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP))) {
err = 0;
break;
}
diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_main.c b/drivers/net/ethernet/intel/i40evf/i40evf_main.c
index ea110a7..7c213a3 100644
--- a/drivers/net/ethernet/intel/i40evf/i40evf_main.c
+++ b/drivers/net/ethernet/intel/i40evf/i40evf_main.c
@@ -44,9 +44,9 @@
#define DRV_KERN "-k"
-#define DRV_VERSION_MAJOR 2
-#define DRV_VERSION_MINOR 1
-#define DRV_VERSION_BUILD 14
+#define DRV_VERSION_MAJOR 3
+#define DRV_VERSION_MINOR 0
+#define DRV_VERSION_BUILD 0
#define DRV_VERSION __stringify(DRV_VERSION_MAJOR) "." \
__stringify(DRV_VERSION_MINOR) "." \
__stringify(DRV_VERSION_BUILD) \
@@ -67,6 +67,7 @@
{PCI_VDEVICE(INTEL, I40E_DEV_ID_VF), 0},
{PCI_VDEVICE(INTEL, I40E_DEV_ID_VF_HV), 0},
{PCI_VDEVICE(INTEL, I40E_DEV_ID_X722_VF), 0},
+ {PCI_VDEVICE(INTEL, I40E_DEV_ID_ADAPTIVE_VF), 0},
/* required last entry */
{0, }
};
@@ -1131,7 +1132,7 @@
if (!(adapter->flags & I40EVF_FLAG_PF_COMMS_FAILED) &&
adapter->state != __I40EVF_RESETTING) {
/* cancel any current operation */
- adapter->current_op = I40E_VIRTCHNL_OP_UNKNOWN;
+ adapter->current_op = VIRTCHNL_OP_UNKNOWN;
/* Schedule operations to close down the HW. Don't wait
* here for this to complete. The watchdog is still running
* and it will take care of this.
@@ -1197,6 +1198,7 @@
{
if (!adapter->vsi_res)
return;
+ adapter->num_active_queues = 0;
kfree(adapter->tx_rings);
adapter->tx_rings = NULL;
kfree(adapter->rx_rings);
@@ -1213,18 +1215,22 @@
**/
static int i40evf_alloc_queues(struct i40evf_adapter *adapter)
{
- int i;
+ int i, num_active_queues;
- adapter->tx_rings = kcalloc(adapter->num_active_queues,
+ num_active_queues = min_t(int,
+ adapter->vsi_res->num_queue_pairs,
+ (int)(num_online_cpus()));
+
+ adapter->tx_rings = kcalloc(num_active_queues,
sizeof(struct i40e_ring), GFP_KERNEL);
if (!adapter->tx_rings)
goto err_out;
- adapter->rx_rings = kcalloc(adapter->num_active_queues,
+ adapter->rx_rings = kcalloc(num_active_queues,
sizeof(struct i40e_ring), GFP_KERNEL);
if (!adapter->rx_rings)
goto err_out;
- for (i = 0; i < adapter->num_active_queues; i++) {
+ for (i = 0; i < num_active_queues; i++) {
struct i40e_ring *tx_ring;
struct i40e_ring *rx_ring;
@@ -1246,6 +1252,8 @@
rx_ring->rx_itr_setting = (I40E_ITR_DYNAMIC | I40E_ITR_RX_DEF);
}
+ adapter->num_active_queues = num_active_queues;
+
return 0;
err_out:
@@ -1311,7 +1319,7 @@
struct i40e_hw *hw = &adapter->hw;
int ret = 0;
- if (adapter->current_op != I40E_VIRTCHNL_OP_UNKNOWN) {
+ if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
/* bail because we already have a command pending */
dev_err(&adapter->pdev->dev, "Cannot configure RSS, command %d pending\n",
adapter->current_op);
@@ -1410,7 +1418,7 @@
if (!RSS_PF(adapter)) {
/* Enable PCTYPES for RSS, TCP/UDP with IPv4/IPv6 */
if (adapter->vf_res->vf_offload_flags &
- I40E_VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2)
+ VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2)
adapter->hena = I40E_DEFAULT_RSS_HENA_EXPANDED;
else
adapter->hena = I40E_DEFAULT_RSS_HENA;
@@ -1588,8 +1596,8 @@
if (adapter->flags & I40EVF_FLAG_PF_COMMS_FAILED) {
reg_val = rd32(hw, I40E_VFGEN_RSTAT) &
I40E_VFGEN_RSTAT_VFR_STATE_MASK;
- if ((reg_val == I40E_VFR_VFACTIVE) ||
- (reg_val == I40E_VFR_COMPLETED)) {
+ if ((reg_val == VIRTCHNL_VFR_VFACTIVE) ||
+ (reg_val == VIRTCHNL_VFR_COMPLETED)) {
/* A chance for redemption! */
dev_err(&adapter->pdev->dev, "Hardware came out of reset. Attempting reinit.\n");
adapter->state = __I40EVF_STARTUP;
@@ -1605,7 +1613,7 @@
return;
}
adapter->aq_required = 0;
- adapter->current_op = I40E_VIRTCHNL_OP_UNKNOWN;
+ adapter->current_op = VIRTCHNL_OP_UNKNOWN;
goto watchdog_done;
}
@@ -1621,7 +1629,7 @@
dev_err(&adapter->pdev->dev, "Hardware reset detected\n");
schedule_work(&adapter->reset_task);
adapter->aq_required = 0;
- adapter->current_op = I40E_VIRTCHNL_OP_UNKNOWN;
+ adapter->current_op = VIRTCHNL_OP_UNKNOWN;
goto watchdog_done;
}
@@ -1707,13 +1715,13 @@
}
if (adapter->aq_required & I40EVF_FLAG_AQ_REQUEST_PROMISC) {
- i40evf_set_promiscuous(adapter, I40E_FLAG_VF_UNICAST_PROMISC |
- I40E_FLAG_VF_MULTICAST_PROMISC);
+ i40evf_set_promiscuous(adapter, FLAG_VF_UNICAST_PROMISC |
+ FLAG_VF_MULTICAST_PROMISC);
goto watchdog_done;
}
if (adapter->aq_required & I40EVF_FLAG_AQ_REQUEST_ALLMULTI) {
- i40evf_set_promiscuous(adapter, I40E_FLAG_VF_MULTICAST_PROMISC);
+ i40evf_set_promiscuous(adapter, FLAG_VF_MULTICAST_PROMISC);
goto watchdog_done;
}
@@ -1854,7 +1862,7 @@
reg_val = rd32(hw, I40E_VFGEN_RSTAT) &
I40E_VFGEN_RSTAT_VFR_STATE_MASK;
- if (reg_val == I40E_VFR_VFACTIVE)
+ if (reg_val == VIRTCHNL_VFR_VFACTIVE)
break;
}
@@ -1888,7 +1896,7 @@
/* kill and reinit the admin queue */
i40evf_shutdown_adminq(hw);
- adapter->current_op = I40E_VIRTCHNL_OP_UNKNOWN;
+ adapter->current_op = VIRTCHNL_OP_UNKNOWN;
err = i40evf_init_adminq(hw);
if (err)
dev_info(&adapter->pdev->dev, "Failed to init adminq: %d\n",
@@ -1949,7 +1957,7 @@
container_of(work, struct i40evf_adapter, adminq_task);
struct i40e_hw *hw = &adapter->hw;
struct i40e_arq_event_info event;
- struct i40e_virtchnl_msg *v_msg;
+ struct virtchnl_msg *v_msg;
i40e_status ret;
u32 val, oldval;
u16 pending;
@@ -1962,14 +1970,15 @@
if (!event.msg_buf)
goto out;
- v_msg = (struct i40e_virtchnl_msg *)&event.desc;
+ v_msg = (struct virtchnl_msg *)&event.desc;
do {
ret = i40evf_clean_arq_element(hw, &event, &pending);
if (ret || !v_msg->v_opcode)
break; /* No event to process or error cleaning ARQ */
i40evf_virtchnl_completion(adapter, v_msg->v_opcode,
- v_msg->v_retval, event.msg_buf,
+ (i40e_status)v_msg->v_retval,
+ event.msg_buf,
event.msg_len);
if (pending != 0)
memset(event.msg_buf, 0, I40EVF_MAX_AQ_BUF_SIZE);
@@ -2347,7 +2356,7 @@
struct i40evf_adapter *adapter = netdev_priv(netdev);
features &= ~I40EVF_VLAN_FEATURES;
- if (adapter->vf_res->vf_offload_flags & I40E_VIRTCHNL_VF_OFFLOAD_VLAN)
+ if (adapter->vf_res->vf_offload_flags & VIRTCHNL_VF_OFFLOAD_VLAN)
features |= I40EVF_VLAN_FEATURES;
return features;
}
@@ -2384,8 +2393,8 @@
for (i = 0; i < 100; i++) {
rstat = rd32(hw, I40E_VFGEN_RSTAT) &
I40E_VFGEN_RSTAT_VFR_STATE_MASK;
- if ((rstat == I40E_VFR_VFACTIVE) ||
- (rstat == I40E_VFR_COMPLETED))
+ if ((rstat == VIRTCHNL_VFR_VFACTIVE) ||
+ (rstat == VIRTCHNL_VFR_COMPLETED))
return 0;
usleep_range(10, 20);
}
@@ -2401,7 +2410,7 @@
**/
int i40evf_process_config(struct i40evf_adapter *adapter)
{
- struct i40e_virtchnl_vf_resource *vfres = adapter->vf_res;
+ struct virtchnl_vf_resource *vfres = adapter->vf_res;
struct net_device *netdev = adapter->netdev;
struct i40e_vsi *vsi = &adapter->vsi;
int i;
@@ -2410,7 +2419,7 @@
/* got VF config message back from PF, now we can parse it */
for (i = 0; i < vfres->num_vsis; i++) {
- if (vfres->vsi_res[i].vsi_type == I40E_VSI_SRIOV)
+ if (vfres->vsi_res[i].vsi_type == VIRTCHNL_VSI_SRIOV)
adapter->vsi_res = &vfres->vsi_res[i];
}
if (!adapter->vsi_res) {
@@ -2434,7 +2443,7 @@
/* advertise to stack only if offloads for encapsulated packets is
* supported
*/
- if (vfres->vf_offload_flags & I40E_VIRTCHNL_VF_OFFLOAD_ENCAP) {
+ if (vfres->vf_offload_flags & VIRTCHNL_VF_OFFLOAD_ENCAP) {
hw_enc_features |= NETIF_F_GSO_UDP_TUNNEL |
NETIF_F_GSO_GRE |
NETIF_F_GSO_GRE_CSUM |
@@ -2445,7 +2454,7 @@
0;
if (!(vfres->vf_offload_flags &
- I40E_VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM))
+ VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM))
netdev->gso_partial_features |=
NETIF_F_GSO_UDP_TUNNEL_CSUM;
@@ -2472,7 +2481,7 @@
adapter->vsi.work_limit = I40E_DEFAULT_IRQ_WORK;
vsi->netdev = adapter->netdev;
vsi->qs_handle = adapter->vsi_res->qset_handle;
- if (vfres->vf_offload_flags & I40E_VIRTCHNL_VF_OFFLOAD_RSS_PF) {
+ if (vfres->vf_offload_flags & VIRTCHNL_VF_OFFLOAD_RSS_PF) {
adapter->rss_key_size = vfres->rss_key_size;
adapter->rss_lut_size = vfres->rss_lut_size;
} else {
@@ -2558,8 +2567,8 @@
dev_err(&pdev->dev, "Unsupported PF API version %d.%d, expected %d.%d\n",
adapter->pf_version.major,
adapter->pf_version.minor,
- I40E_VIRTCHNL_VERSION_MAJOR,
- I40E_VIRTCHNL_VERSION_MINOR);
+ VIRTCHNL_VERSION_MAJOR,
+ VIRTCHNL_VERSION_MINOR);
goto err;
}
err = i40evf_send_vf_config_msg(adapter);
@@ -2573,9 +2582,9 @@
case __I40EVF_INIT_GET_RESOURCES:
/* aq msg sent, awaiting reply */
if (!adapter->vf_res) {
- bufsz = sizeof(struct i40e_virtchnl_vf_resource) +
+ bufsz = sizeof(struct virtchnl_vf_resource) +
(I40E_MAX_VF_VSI *
- sizeof(struct i40e_virtchnl_vsi_resource));
+ sizeof(struct virtchnl_vsi_resource));
adapter->vf_res = kzalloc(bufsz, GFP_KERNEL);
if (!adapter->vf_res)
goto err;
@@ -2606,7 +2615,7 @@
if (i40evf_process_config(adapter))
goto err_alloc;
- adapter->current_op = I40E_VIRTCHNL_OP_UNKNOWN;
+ adapter->current_op = VIRTCHNL_OP_UNKNOWN;
adapter->flags |= I40EVF_FLAG_RX_CSUM_ENABLED;
@@ -2634,9 +2643,6 @@
adapter->watchdog_timer.data = (unsigned long)adapter;
mod_timer(&adapter->watchdog_timer, jiffies + 1);
- adapter->num_active_queues = min_t(int,
- adapter->vsi_res->num_queue_pairs,
- (int)(num_online_cpus()));
adapter->tx_desc_count = I40EVF_DEFAULT_TXD;
adapter->rx_desc_count = I40EVF_DEFAULT_RXD;
err = i40evf_init_interrupt_scheme(adapter);
@@ -2644,7 +2650,7 @@
goto err_sw_init;
i40evf_map_rings_to_vectors(adapter);
if (adapter->vf_res->vf_offload_flags &
- I40E_VIRTCHNL_VF_OFFLOAD_WB_ON_ITR)
+ VIRTCHNL_VF_OFFLOAD_WB_ON_ITR)
adapter->flags |= I40EVF_FLAG_WB_ON_ITR_CAPABLE;
err = i40evf_request_misc_irq(adapter);
diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c b/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c
index deb2cb8..d2bb250 100644
--- a/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c
+++ b/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c
@@ -42,7 +42,7 @@
* Send message to PF and print status if failure.
**/
static int i40evf_send_pf_msg(struct i40evf_adapter *adapter,
- enum i40e_virtchnl_ops op, u8 *msg, u16 len)
+ enum virtchnl_ops op, u8 *msg, u16 len)
{
struct i40e_hw *hw = &adapter->hw;
i40e_status err;
@@ -68,12 +68,12 @@
**/
int i40evf_send_api_ver(struct i40evf_adapter *adapter)
{
- struct i40e_virtchnl_version_info vvi;
+ struct virtchnl_version_info vvi;
- vvi.major = I40E_VIRTCHNL_VERSION_MAJOR;
- vvi.minor = I40E_VIRTCHNL_VERSION_MINOR;
+ vvi.major = VIRTCHNL_VERSION_MAJOR;
+ vvi.minor = VIRTCHNL_VERSION_MINOR;
- return i40evf_send_pf_msg(adapter, I40E_VIRTCHNL_OP_VERSION, (u8 *)&vvi,
+ return i40evf_send_pf_msg(adapter, VIRTCHNL_OP_VERSION, (u8 *)&vvi,
sizeof(vvi));
}
@@ -88,10 +88,10 @@
**/
int i40evf_verify_api_ver(struct i40evf_adapter *adapter)
{
- struct i40e_virtchnl_version_info *pf_vvi;
+ struct virtchnl_version_info *pf_vvi;
struct i40e_hw *hw = &adapter->hw;
struct i40e_arq_event_info event;
- enum i40e_virtchnl_ops op;
+ enum virtchnl_ops op;
i40e_status err;
event.buf_len = I40EVF_MAX_AQ_BUF_SIZE;
@@ -109,8 +109,8 @@
if (err)
goto out_alloc;
op =
- (enum i40e_virtchnl_ops)le32_to_cpu(event.desc.cookie_high);
- if (op == I40E_VIRTCHNL_OP_VERSION)
+ (enum virtchnl_ops)le32_to_cpu(event.desc.cookie_high);
+ if (op == VIRTCHNL_OP_VERSION)
break;
}
@@ -119,19 +119,19 @@
if (err)
goto out_alloc;
- if (op != I40E_VIRTCHNL_OP_VERSION) {
+ if (op != VIRTCHNL_OP_VERSION) {
dev_info(&adapter->pdev->dev, "Invalid reply type %d from PF\n",
op);
err = -EIO;
goto out_alloc;
}
- pf_vvi = (struct i40e_virtchnl_version_info *)event.msg_buf;
+ pf_vvi = (struct virtchnl_version_info *)event.msg_buf;
adapter->pf_version = *pf_vvi;
- if ((pf_vvi->major > I40E_VIRTCHNL_VERSION_MAJOR) ||
- ((pf_vvi->major == I40E_VIRTCHNL_VERSION_MAJOR) &&
- (pf_vvi->minor > I40E_VIRTCHNL_VERSION_MINOR)))
+ if ((pf_vvi->major > VIRTCHNL_VERSION_MAJOR) ||
+ ((pf_vvi->major == VIRTCHNL_VERSION_MAJOR) &&
+ (pf_vvi->minor > VIRTCHNL_VERSION_MINOR)))
err = -EIO;
out_alloc:
@@ -152,26 +152,25 @@
{
u32 caps;
- adapter->current_op = I40E_VIRTCHNL_OP_GET_VF_RESOURCES;
- adapter->aq_required &= ~I40EVF_FLAG_AQ_GET_CONFIG;
- caps = I40E_VIRTCHNL_VF_OFFLOAD_L2 |
- I40E_VIRTCHNL_VF_OFFLOAD_RSS_AQ |
- I40E_VIRTCHNL_VF_OFFLOAD_RSS_REG |
- I40E_VIRTCHNL_VF_OFFLOAD_VLAN |
- I40E_VIRTCHNL_VF_OFFLOAD_WB_ON_ITR |
- I40E_VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2 |
- I40E_VIRTCHNL_VF_OFFLOAD_ENCAP |
- I40E_VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM;
+ caps = VIRTCHNL_VF_OFFLOAD_L2 |
+ VIRTCHNL_VF_OFFLOAD_RSS_PF |
+ VIRTCHNL_VF_OFFLOAD_RSS_AQ |
+ VIRTCHNL_VF_OFFLOAD_RSS_REG |
+ VIRTCHNL_VF_OFFLOAD_VLAN |
+ VIRTCHNL_VF_OFFLOAD_WB_ON_ITR |
+ VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2 |
+ VIRTCHNL_VF_OFFLOAD_ENCAP |
+ VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM;
- adapter->current_op = I40E_VIRTCHNL_OP_GET_VF_RESOURCES;
+ adapter->current_op = VIRTCHNL_OP_GET_VF_RESOURCES;
adapter->aq_required &= ~I40EVF_FLAG_AQ_GET_CONFIG;
if (PF_IS_V11(adapter))
return i40evf_send_pf_msg(adapter,
- I40E_VIRTCHNL_OP_GET_VF_RESOURCES,
+ VIRTCHNL_OP_GET_VF_RESOURCES,
(u8 *)&caps, sizeof(caps));
else
return i40evf_send_pf_msg(adapter,
- I40E_VIRTCHNL_OP_GET_VF_RESOURCES,
+ VIRTCHNL_OP_GET_VF_RESOURCES,
NULL, 0);
}
@@ -189,12 +188,12 @@
{
struct i40e_hw *hw = &adapter->hw;
struct i40e_arq_event_info event;
- enum i40e_virtchnl_ops op;
+ enum virtchnl_ops op;
i40e_status err;
u16 len;
- len = sizeof(struct i40e_virtchnl_vf_resource) +
- I40E_MAX_VF_VSI * sizeof(struct i40e_virtchnl_vsi_resource);
+ len = sizeof(struct virtchnl_vf_resource) +
+ I40E_MAX_VF_VSI * sizeof(struct virtchnl_vsi_resource);
event.buf_len = len;
event.msg_buf = kzalloc(event.buf_len, GFP_KERNEL);
if (!event.msg_buf) {
@@ -210,8 +209,8 @@
if (err)
goto out_alloc;
op =
- (enum i40e_virtchnl_ops)le32_to_cpu(event.desc.cookie_high);
- if (op == I40E_VIRTCHNL_OP_GET_VF_RESOURCES)
+ (enum virtchnl_ops)le32_to_cpu(event.desc.cookie_high);
+ if (op == VIRTCHNL_OP_GET_VF_RESOURCES)
break;
}
@@ -233,20 +232,20 @@
**/
void i40evf_configure_queues(struct i40evf_adapter *adapter)
{
- struct i40e_virtchnl_vsi_queue_config_info *vqci;
- struct i40e_virtchnl_queue_pair_info *vqpi;
+ struct virtchnl_vsi_queue_config_info *vqci;
+ struct virtchnl_queue_pair_info *vqpi;
int pairs = adapter->num_active_queues;
int i, len, max_frame = I40E_MAX_RXBUFFER;
- if (adapter->current_op != I40E_VIRTCHNL_OP_UNKNOWN) {
+ if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
/* bail because we already have a command pending */
dev_err(&adapter->pdev->dev, "Cannot configure queues, command %d pending\n",
adapter->current_op);
return;
}
- adapter->current_op = I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES;
- len = sizeof(struct i40e_virtchnl_vsi_queue_config_info) +
- (sizeof(struct i40e_virtchnl_queue_pair_info) * pairs);
+ adapter->current_op = VIRTCHNL_OP_CONFIG_VSI_QUEUES;
+ len = sizeof(struct virtchnl_vsi_queue_config_info) +
+ (sizeof(struct virtchnl_queue_pair_info) * pairs);
vqci = kzalloc(len, GFP_KERNEL);
if (!vqci)
return;
@@ -279,7 +278,7 @@
}
adapter->aq_required &= ~I40EVF_FLAG_AQ_CONFIGURE_QUEUES;
- i40evf_send_pf_msg(adapter, I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES,
+ i40evf_send_pf_msg(adapter, VIRTCHNL_OP_CONFIG_VSI_QUEUES,
(u8 *)vqci, len);
kfree(vqci);
}
@@ -292,20 +291,20 @@
**/
void i40evf_enable_queues(struct i40evf_adapter *adapter)
{
- struct i40e_virtchnl_queue_select vqs;
+ struct virtchnl_queue_select vqs;
- if (adapter->current_op != I40E_VIRTCHNL_OP_UNKNOWN) {
+ if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
/* bail because we already have a command pending */
dev_err(&adapter->pdev->dev, "Cannot enable queues, command %d pending\n",
adapter->current_op);
return;
}
- adapter->current_op = I40E_VIRTCHNL_OP_ENABLE_QUEUES;
+ adapter->current_op = VIRTCHNL_OP_ENABLE_QUEUES;
vqs.vsi_id = adapter->vsi_res->vsi_id;
vqs.tx_queues = BIT(adapter->num_active_queues) - 1;
vqs.rx_queues = vqs.tx_queues;
adapter->aq_required &= ~I40EVF_FLAG_AQ_ENABLE_QUEUES;
- i40evf_send_pf_msg(adapter, I40E_VIRTCHNL_OP_ENABLE_QUEUES,
+ i40evf_send_pf_msg(adapter, VIRTCHNL_OP_ENABLE_QUEUES,
(u8 *)&vqs, sizeof(vqs));
}
@@ -317,20 +316,20 @@
**/
void i40evf_disable_queues(struct i40evf_adapter *adapter)
{
- struct i40e_virtchnl_queue_select vqs;
+ struct virtchnl_queue_select vqs;
- if (adapter->current_op != I40E_VIRTCHNL_OP_UNKNOWN) {
+ if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
/* bail because we already have a command pending */
dev_err(&adapter->pdev->dev, "Cannot disable queues, command %d pending\n",
adapter->current_op);
return;
}
- adapter->current_op = I40E_VIRTCHNL_OP_DISABLE_QUEUES;
+ adapter->current_op = VIRTCHNL_OP_DISABLE_QUEUES;
vqs.vsi_id = adapter->vsi_res->vsi_id;
vqs.tx_queues = BIT(adapter->num_active_queues) - 1;
vqs.rx_queues = vqs.tx_queues;
adapter->aq_required &= ~I40EVF_FLAG_AQ_DISABLE_QUEUES;
- i40evf_send_pf_msg(adapter, I40E_VIRTCHNL_OP_DISABLE_QUEUES,
+ i40evf_send_pf_msg(adapter, VIRTCHNL_OP_DISABLE_QUEUES,
(u8 *)&vqs, sizeof(vqs));
}
@@ -343,23 +342,23 @@
**/
void i40evf_map_queues(struct i40evf_adapter *adapter)
{
- struct i40e_virtchnl_irq_map_info *vimi;
+ struct virtchnl_irq_map_info *vimi;
int v_idx, q_vectors, len;
struct i40e_q_vector *q_vector;
- if (adapter->current_op != I40E_VIRTCHNL_OP_UNKNOWN) {
+ if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
/* bail because we already have a command pending */
dev_err(&adapter->pdev->dev, "Cannot map queues to vectors, command %d pending\n",
adapter->current_op);
return;
}
- adapter->current_op = I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP;
+ adapter->current_op = VIRTCHNL_OP_CONFIG_IRQ_MAP;
q_vectors = adapter->num_msix_vectors - NONQ_VECS;
- len = sizeof(struct i40e_virtchnl_irq_map_info) +
+ len = sizeof(struct virtchnl_irq_map_info) +
(adapter->num_msix_vectors *
- sizeof(struct i40e_virtchnl_vector_map));
+ sizeof(struct virtchnl_vector_map));
vimi = kzalloc(len, GFP_KERNEL);
if (!vimi)
return;
@@ -380,7 +379,7 @@
vimi->vecmap[v_idx].rxq_map = 0;
adapter->aq_required &= ~I40EVF_FLAG_AQ_MAP_VECTORS;
- i40evf_send_pf_msg(adapter, I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP,
+ i40evf_send_pf_msg(adapter, VIRTCHNL_OP_CONFIG_IRQ_MAP,
(u8 *)vimi, len);
kfree(vimi);
}
@@ -395,12 +394,12 @@
**/
void i40evf_add_ether_addrs(struct i40evf_adapter *adapter)
{
- struct i40e_virtchnl_ether_addr_list *veal;
+ struct virtchnl_ether_addr_list *veal;
int len, i = 0, count = 0;
struct i40evf_mac_filter *f;
bool more = false;
- if (adapter->current_op != I40E_VIRTCHNL_OP_UNKNOWN) {
+ if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
/* bail because we already have a command pending */
dev_err(&adapter->pdev->dev, "Cannot add filters, command %d pending\n",
adapter->current_op);
@@ -414,17 +413,17 @@
adapter->aq_required &= ~I40EVF_FLAG_AQ_ADD_MAC_FILTER;
return;
}
- adapter->current_op = I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS;
+ adapter->current_op = VIRTCHNL_OP_ADD_ETH_ADDR;
- len = sizeof(struct i40e_virtchnl_ether_addr_list) +
- (count * sizeof(struct i40e_virtchnl_ether_addr));
+ len = sizeof(struct virtchnl_ether_addr_list) +
+ (count * sizeof(struct virtchnl_ether_addr));
if (len > I40EVF_MAX_AQ_BUF_SIZE) {
dev_warn(&adapter->pdev->dev, "Too many add MAC changes in one request\n");
count = (I40EVF_MAX_AQ_BUF_SIZE -
- sizeof(struct i40e_virtchnl_ether_addr_list)) /
- sizeof(struct i40e_virtchnl_ether_addr);
- len = sizeof(struct i40e_virtchnl_ether_addr_list) +
- (count * sizeof(struct i40e_virtchnl_ether_addr));
+ sizeof(struct virtchnl_ether_addr_list)) /
+ sizeof(struct virtchnl_ether_addr);
+ len = sizeof(struct virtchnl_ether_addr_list) +
+ (count * sizeof(struct virtchnl_ether_addr));
more = true;
}
@@ -445,7 +444,7 @@
}
if (!more)
adapter->aq_required &= ~I40EVF_FLAG_AQ_ADD_MAC_FILTER;
- i40evf_send_pf_msg(adapter, I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS,
+ i40evf_send_pf_msg(adapter, VIRTCHNL_OP_ADD_ETH_ADDR,
(u8 *)veal, len);
kfree(veal);
}
@@ -460,12 +459,12 @@
**/
void i40evf_del_ether_addrs(struct i40evf_adapter *adapter)
{
- struct i40e_virtchnl_ether_addr_list *veal;
+ struct virtchnl_ether_addr_list *veal;
struct i40evf_mac_filter *f, *ftmp;
int len, i = 0, count = 0;
bool more = false;
- if (adapter->current_op != I40E_VIRTCHNL_OP_UNKNOWN) {
+ if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
/* bail because we already have a command pending */
dev_err(&adapter->pdev->dev, "Cannot remove filters, command %d pending\n",
adapter->current_op);
@@ -479,17 +478,17 @@
adapter->aq_required &= ~I40EVF_FLAG_AQ_DEL_MAC_FILTER;
return;
}
- adapter->current_op = I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS;
+ adapter->current_op = VIRTCHNL_OP_DEL_ETH_ADDR;
- len = sizeof(struct i40e_virtchnl_ether_addr_list) +
- (count * sizeof(struct i40e_virtchnl_ether_addr));
+ len = sizeof(struct virtchnl_ether_addr_list) +
+ (count * sizeof(struct virtchnl_ether_addr));
if (len > I40EVF_MAX_AQ_BUF_SIZE) {
dev_warn(&adapter->pdev->dev, "Too many delete MAC changes in one request\n");
count = (I40EVF_MAX_AQ_BUF_SIZE -
- sizeof(struct i40e_virtchnl_ether_addr_list)) /
- sizeof(struct i40e_virtchnl_ether_addr);
- len = sizeof(struct i40e_virtchnl_ether_addr_list) +
- (count * sizeof(struct i40e_virtchnl_ether_addr));
+ sizeof(struct virtchnl_ether_addr_list)) /
+ sizeof(struct virtchnl_ether_addr);
+ len = sizeof(struct virtchnl_ether_addr_list) +
+ (count * sizeof(struct virtchnl_ether_addr));
more = true;
}
veal = kzalloc(len, GFP_KERNEL);
@@ -510,7 +509,7 @@
}
if (!more)
adapter->aq_required &= ~I40EVF_FLAG_AQ_DEL_MAC_FILTER;
- i40evf_send_pf_msg(adapter, I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS,
+ i40evf_send_pf_msg(adapter, VIRTCHNL_OP_DEL_ETH_ADDR,
(u8 *)veal, len);
kfree(veal);
}
@@ -525,12 +524,12 @@
**/
void i40evf_add_vlans(struct i40evf_adapter *adapter)
{
- struct i40e_virtchnl_vlan_filter_list *vvfl;
+ struct virtchnl_vlan_filter_list *vvfl;
int len, i = 0, count = 0;
struct i40evf_vlan_filter *f;
bool more = false;
- if (adapter->current_op != I40E_VIRTCHNL_OP_UNKNOWN) {
+ if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
/* bail because we already have a command pending */
dev_err(&adapter->pdev->dev, "Cannot add VLANs, command %d pending\n",
adapter->current_op);
@@ -545,16 +544,16 @@
adapter->aq_required &= ~I40EVF_FLAG_AQ_ADD_VLAN_FILTER;
return;
}
- adapter->current_op = I40E_VIRTCHNL_OP_ADD_VLAN;
+ adapter->current_op = VIRTCHNL_OP_ADD_VLAN;
- len = sizeof(struct i40e_virtchnl_vlan_filter_list) +
+ len = sizeof(struct virtchnl_vlan_filter_list) +
(count * sizeof(u16));
if (len > I40EVF_MAX_AQ_BUF_SIZE) {
dev_warn(&adapter->pdev->dev, "Too many add VLAN changes in one request\n");
count = (I40EVF_MAX_AQ_BUF_SIZE -
- sizeof(struct i40e_virtchnl_vlan_filter_list)) /
+ sizeof(struct virtchnl_vlan_filter_list)) /
sizeof(u16);
- len = sizeof(struct i40e_virtchnl_vlan_filter_list) +
+ len = sizeof(struct virtchnl_vlan_filter_list) +
(count * sizeof(u16));
more = true;
}
@@ -575,7 +574,7 @@
}
if (!more)
adapter->aq_required &= ~I40EVF_FLAG_AQ_ADD_VLAN_FILTER;
- i40evf_send_pf_msg(adapter, I40E_VIRTCHNL_OP_ADD_VLAN, (u8 *)vvfl, len);
+ i40evf_send_pf_msg(adapter, VIRTCHNL_OP_ADD_VLAN, (u8 *)vvfl, len);
kfree(vvfl);
}
@@ -589,12 +588,12 @@
**/
void i40evf_del_vlans(struct i40evf_adapter *adapter)
{
- struct i40e_virtchnl_vlan_filter_list *vvfl;
+ struct virtchnl_vlan_filter_list *vvfl;
struct i40evf_vlan_filter *f, *ftmp;
int len, i = 0, count = 0;
bool more = false;
- if (adapter->current_op != I40E_VIRTCHNL_OP_UNKNOWN) {
+ if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
/* bail because we already have a command pending */
dev_err(&adapter->pdev->dev, "Cannot remove VLANs, command %d pending\n",
adapter->current_op);
@@ -609,16 +608,16 @@
adapter->aq_required &= ~I40EVF_FLAG_AQ_DEL_VLAN_FILTER;
return;
}
- adapter->current_op = I40E_VIRTCHNL_OP_DEL_VLAN;
+ adapter->current_op = VIRTCHNL_OP_DEL_VLAN;
- len = sizeof(struct i40e_virtchnl_vlan_filter_list) +
+ len = sizeof(struct virtchnl_vlan_filter_list) +
(count * sizeof(u16));
if (len > I40EVF_MAX_AQ_BUF_SIZE) {
dev_warn(&adapter->pdev->dev, "Too many delete VLAN changes in one request\n");
count = (I40EVF_MAX_AQ_BUF_SIZE -
- sizeof(struct i40e_virtchnl_vlan_filter_list)) /
+ sizeof(struct virtchnl_vlan_filter_list)) /
sizeof(u16);
- len = sizeof(struct i40e_virtchnl_vlan_filter_list) +
+ len = sizeof(struct virtchnl_vlan_filter_list) +
(count * sizeof(u16));
more = true;
}
@@ -640,7 +639,7 @@
}
if (!more)
adapter->aq_required &= ~I40EVF_FLAG_AQ_DEL_VLAN_FILTER;
- i40evf_send_pf_msg(adapter, I40E_VIRTCHNL_OP_DEL_VLAN, (u8 *)vvfl, len);
+ i40evf_send_pf_msg(adapter, VIRTCHNL_OP_DEL_VLAN, (u8 *)vvfl, len);
kfree(vvfl);
}
@@ -653,25 +652,25 @@
**/
void i40evf_set_promiscuous(struct i40evf_adapter *adapter, int flags)
{
- struct i40e_virtchnl_promisc_info vpi;
+ struct virtchnl_promisc_info vpi;
int promisc_all;
- if (adapter->current_op != I40E_VIRTCHNL_OP_UNKNOWN) {
+ if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
/* bail because we already have a command pending */
dev_err(&adapter->pdev->dev, "Cannot set promiscuous mode, command %d pending\n",
adapter->current_op);
return;
}
- promisc_all = I40E_FLAG_VF_UNICAST_PROMISC |
- I40E_FLAG_VF_MULTICAST_PROMISC;
+ promisc_all = FLAG_VF_UNICAST_PROMISC |
+ FLAG_VF_MULTICAST_PROMISC;
if ((flags & promisc_all) == promisc_all) {
adapter->flags |= I40EVF_FLAG_PROMISC_ON;
adapter->aq_required &= ~I40EVF_FLAG_AQ_REQUEST_PROMISC;
dev_info(&adapter->pdev->dev, "Entering promiscuous mode\n");
}
- if (flags & I40E_FLAG_VF_MULTICAST_PROMISC) {
+ if (flags & FLAG_VF_MULTICAST_PROMISC) {
adapter->flags |= I40EVF_FLAG_ALLMULTI_ON;
adapter->aq_required &= ~I40EVF_FLAG_AQ_REQUEST_ALLMULTI;
dev_info(&adapter->pdev->dev, "Entering multicast promiscuous mode\n");
@@ -683,10 +682,10 @@
dev_info(&adapter->pdev->dev, "Leaving promiscuous mode\n");
}
- adapter->current_op = I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE;
+ adapter->current_op = VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE;
vpi.vsi_id = adapter->vsi_res->vsi_id;
vpi.flags = flags;
- i40evf_send_pf_msg(adapter, I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE,
+ i40evf_send_pf_msg(adapter, VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE,
(u8 *)&vpi, sizeof(vpi));
}
@@ -698,19 +697,19 @@
**/
void i40evf_request_stats(struct i40evf_adapter *adapter)
{
- struct i40e_virtchnl_queue_select vqs;
+ struct virtchnl_queue_select vqs;
- if (adapter->current_op != I40E_VIRTCHNL_OP_UNKNOWN) {
+ if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
/* no error message, this isn't crucial */
return;
}
- adapter->current_op = I40E_VIRTCHNL_OP_GET_STATS;
+ adapter->current_op = VIRTCHNL_OP_GET_STATS;
vqs.vsi_id = adapter->vsi_res->vsi_id;
/* queue maps are ignored for this message - only the vsi is used */
- if (i40evf_send_pf_msg(adapter, I40E_VIRTCHNL_OP_GET_STATS,
+ if (i40evf_send_pf_msg(adapter, VIRTCHNL_OP_GET_STATS,
(u8 *)&vqs, sizeof(vqs)))
/* if the request failed, don't lock out others */
- adapter->current_op = I40E_VIRTCHNL_OP_UNKNOWN;
+ adapter->current_op = VIRTCHNL_OP_UNKNOWN;
}
/**
@@ -721,15 +720,15 @@
**/
void i40evf_get_hena(struct i40evf_adapter *adapter)
{
- if (adapter->current_op != I40E_VIRTCHNL_OP_UNKNOWN) {
+ if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
/* bail because we already have a command pending */
dev_err(&adapter->pdev->dev, "Cannot get RSS hash capabilities, command %d pending\n",
adapter->current_op);
return;
}
- adapter->current_op = I40E_VIRTCHNL_OP_GET_RSS_HENA_CAPS;
+ adapter->current_op = VIRTCHNL_OP_GET_RSS_HENA_CAPS;
adapter->aq_required &= ~I40EVF_FLAG_AQ_GET_HENA;
- i40evf_send_pf_msg(adapter, I40E_VIRTCHNL_OP_GET_RSS_HENA_CAPS,
+ i40evf_send_pf_msg(adapter, VIRTCHNL_OP_GET_RSS_HENA_CAPS,
NULL, 0);
}
@@ -741,18 +740,18 @@
**/
void i40evf_set_hena(struct i40evf_adapter *adapter)
{
- struct i40e_virtchnl_rss_hena vrh;
+ struct virtchnl_rss_hena vrh;
- if (adapter->current_op != I40E_VIRTCHNL_OP_UNKNOWN) {
+ if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
/* bail because we already have a command pending */
dev_err(&adapter->pdev->dev, "Cannot set RSS hash enable, command %d pending\n",
adapter->current_op);
return;
}
vrh.hena = adapter->hena;
- adapter->current_op = I40E_VIRTCHNL_OP_SET_RSS_HENA;
+ adapter->current_op = VIRTCHNL_OP_SET_RSS_HENA;
adapter->aq_required &= ~I40EVF_FLAG_AQ_SET_HENA;
- i40evf_send_pf_msg(adapter, I40E_VIRTCHNL_OP_SET_RSS_HENA,
+ i40evf_send_pf_msg(adapter, VIRTCHNL_OP_SET_RSS_HENA,
(u8 *)&vrh, sizeof(vrh));
}
@@ -764,16 +763,16 @@
**/
void i40evf_set_rss_key(struct i40evf_adapter *adapter)
{
- struct i40e_virtchnl_rss_key *vrk;
+ struct virtchnl_rss_key *vrk;
int len;
- if (adapter->current_op != I40E_VIRTCHNL_OP_UNKNOWN) {
+ if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
/* bail because we already have a command pending */
dev_err(&adapter->pdev->dev, "Cannot set RSS key, command %d pending\n",
adapter->current_op);
return;
}
- len = sizeof(struct i40e_virtchnl_rss_key) +
+ len = sizeof(struct virtchnl_rss_key) +
(adapter->rss_key_size * sizeof(u8)) - 1;
vrk = kzalloc(len, GFP_KERNEL);
if (!vrk)
@@ -782,9 +781,9 @@
vrk->key_len = adapter->rss_key_size;
memcpy(vrk->key, adapter->rss_key, adapter->rss_key_size);
- adapter->current_op = I40E_VIRTCHNL_OP_CONFIG_RSS_KEY;
+ adapter->current_op = VIRTCHNL_OP_CONFIG_RSS_KEY;
adapter->aq_required &= ~I40EVF_FLAG_AQ_SET_RSS_KEY;
- i40evf_send_pf_msg(adapter, I40E_VIRTCHNL_OP_CONFIG_RSS_KEY,
+ i40evf_send_pf_msg(adapter, VIRTCHNL_OP_CONFIG_RSS_KEY,
(u8 *)vrk, len);
kfree(vrk);
}
@@ -797,16 +796,16 @@
**/
void i40evf_set_rss_lut(struct i40evf_adapter *adapter)
{
- struct i40e_virtchnl_rss_lut *vrl;
+ struct virtchnl_rss_lut *vrl;
int len;
- if (adapter->current_op != I40E_VIRTCHNL_OP_UNKNOWN) {
+ if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
/* bail because we already have a command pending */
dev_err(&adapter->pdev->dev, "Cannot set RSS LUT, command %d pending\n",
adapter->current_op);
return;
}
- len = sizeof(struct i40e_virtchnl_rss_lut) +
+ len = sizeof(struct virtchnl_rss_lut) +
(adapter->rss_lut_size * sizeof(u8)) - 1;
vrl = kzalloc(len, GFP_KERNEL);
if (!vrl)
@@ -814,9 +813,9 @@
vrl->vsi_id = adapter->vsi.id;
vrl->lut_entries = adapter->rss_lut_size;
memcpy(vrl->lut, adapter->rss_lut, adapter->rss_lut_size);
- adapter->current_op = I40E_VIRTCHNL_OP_CONFIG_RSS_LUT;
+ adapter->current_op = VIRTCHNL_OP_CONFIG_RSS_LUT;
adapter->aq_required &= ~I40EVF_FLAG_AQ_SET_RSS_LUT;
- i40evf_send_pf_msg(adapter, I40E_VIRTCHNL_OP_CONFIG_RSS_LUT,
+ i40evf_send_pf_msg(adapter, VIRTCHNL_OP_CONFIG_RSS_LUT,
(u8 *)vrl, len);
kfree(vrl);
}
@@ -872,8 +871,8 @@
void i40evf_request_reset(struct i40evf_adapter *adapter)
{
/* Don't check CURRENT_OP - this is always higher priority */
- i40evf_send_pf_msg(adapter, I40E_VIRTCHNL_OP_RESET_VF, NULL, 0);
- adapter->current_op = I40E_VIRTCHNL_OP_UNKNOWN;
+ i40evf_send_pf_msg(adapter, VIRTCHNL_OP_RESET_VF, NULL, 0);
+ adapter->current_op = VIRTCHNL_OP_UNKNOWN;
}
/**
@@ -889,17 +888,17 @@
* This function handles the reply messages.
**/
void i40evf_virtchnl_completion(struct i40evf_adapter *adapter,
- enum i40e_virtchnl_ops v_opcode,
+ enum virtchnl_ops v_opcode,
i40e_status v_retval,
u8 *msg, u16 msglen)
{
struct net_device *netdev = adapter->netdev;
- if (v_opcode == I40E_VIRTCHNL_OP_EVENT) {
- struct i40e_virtchnl_pf_event *vpe =
- (struct i40e_virtchnl_pf_event *)msg;
+ if (v_opcode == VIRTCHNL_OP_EVENT) {
+ struct virtchnl_pf_event *vpe =
+ (struct virtchnl_pf_event *)msg;
switch (vpe->event) {
- case I40E_VIRTCHNL_EVENT_LINK_CHANGE:
+ case VIRTCHNL_EVENT_LINK_CHANGE:
adapter->link_speed =
vpe->event_data.link_event.link_speed;
if (adapter->link_up !=
@@ -916,7 +915,7 @@
i40evf_print_link_message(adapter);
}
break;
- case I40E_VIRTCHNL_EVENT_RESET_IMPENDING:
+ case VIRTCHNL_EVENT_RESET_IMPENDING:
dev_info(&adapter->pdev->dev, "PF reset warning received\n");
if (!(adapter->flags & I40EVF_FLAG_RESET_PENDING)) {
adapter->flags |= I40EVF_FLAG_RESET_PENDING;
@@ -933,19 +932,19 @@
}
if (v_retval) {
switch (v_opcode) {
- case I40E_VIRTCHNL_OP_ADD_VLAN:
+ case VIRTCHNL_OP_ADD_VLAN:
dev_err(&adapter->pdev->dev, "Failed to add VLAN filter, error %s\n",
i40evf_stat_str(&adapter->hw, v_retval));
break;
- case I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS:
+ case VIRTCHNL_OP_ADD_ETH_ADDR:
dev_err(&adapter->pdev->dev, "Failed to add MAC filter, error %s\n",
i40evf_stat_str(&adapter->hw, v_retval));
break;
- case I40E_VIRTCHNL_OP_DEL_VLAN:
+ case VIRTCHNL_OP_DEL_VLAN:
dev_err(&adapter->pdev->dev, "Failed to delete VLAN filter, error %s\n",
i40evf_stat_str(&adapter->hw, v_retval));
break;
- case I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS:
+ case VIRTCHNL_OP_DEL_ETH_ADDR:
dev_err(&adapter->pdev->dev, "Failed to delete MAC filter, error %s\n",
i40evf_stat_str(&adapter->hw, v_retval));
break;
@@ -957,7 +956,7 @@
}
}
switch (v_opcode) {
- case I40E_VIRTCHNL_OP_GET_STATS: {
+ case VIRTCHNL_OP_GET_STATS: {
struct i40e_eth_stats *stats =
(struct i40e_eth_stats *)msg;
netdev->stats.rx_packets = stats->rx_unicast +
@@ -974,10 +973,10 @@
adapter->current_stats = *stats;
}
break;
- case I40E_VIRTCHNL_OP_GET_VF_RESOURCES: {
- u16 len = sizeof(struct i40e_virtchnl_vf_resource) +
+ case VIRTCHNL_OP_GET_VF_RESOURCES: {
+ u16 len = sizeof(struct virtchnl_vf_resource) +
I40E_MAX_VF_VSI *
- sizeof(struct i40e_virtchnl_vsi_resource);
+ sizeof(struct virtchnl_vsi_resource);
memcpy(adapter->vf_res, msg, min(msglen, len));
i40e_vf_parse_hw_config(&adapter->hw, adapter->vf_res);
/* restore current mac address */
@@ -985,18 +984,18 @@
i40evf_process_config(adapter);
}
break;
- case I40E_VIRTCHNL_OP_ENABLE_QUEUES:
+ case VIRTCHNL_OP_ENABLE_QUEUES:
/* enable transmits */
i40evf_irq_enable(adapter, true);
break;
- case I40E_VIRTCHNL_OP_DISABLE_QUEUES:
+ case VIRTCHNL_OP_DISABLE_QUEUES:
i40evf_free_all_tx_resources(adapter);
i40evf_free_all_rx_resources(adapter);
if (adapter->state == __I40EVF_DOWN_PENDING)
adapter->state = __I40EVF_DOWN;
break;
- case I40E_VIRTCHNL_OP_VERSION:
- case I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP:
+ case VIRTCHNL_OP_VERSION:
+ case VIRTCHNL_OP_CONFIG_IRQ_MAP:
/* Don't display an error if we get these out of sequence.
* If the firmware needed to get kicked, we'll get these and
* it's no problem.
@@ -1004,7 +1003,7 @@
if (v_opcode != adapter->current_op)
return;
break;
- case I40E_VIRTCHNL_OP_IWARP:
+ case VIRTCHNL_OP_IWARP:
/* Gobble zero-length replies from the PF. They indicate that
* a previous message was received OK, and the client doesn't
* care about that.
@@ -1014,13 +1013,12 @@
msg, msglen);
break;
- case I40E_VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP:
+ case VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP:
adapter->client_pending &=
- ~(BIT(I40E_VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP));
+ ~(BIT(VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP));
break;
- case I40E_VIRTCHNL_OP_GET_RSS_HENA_CAPS: {
- struct i40e_virtchnl_rss_hena *vrh =
- (struct i40e_virtchnl_rss_hena *)msg;
+ case VIRTCHNL_OP_GET_RSS_HENA_CAPS: {
+ struct virtchnl_rss_hena *vrh = (struct virtchnl_rss_hena *)msg;
if (msglen == sizeof(*vrh))
adapter->hena = vrh->hena;
else
@@ -1034,5 +1032,5 @@
adapter->current_op, v_opcode);
break;
} /* switch v_opcode */
- adapter->current_op = I40E_VIRTCHNL_OP_UNKNOWN;
+ adapter->current_op = VIRTCHNL_OP_UNKNOWN;
}
diff --git a/drivers/net/ethernet/intel/igb/e1000_82575.c b/drivers/net/ethernet/intel/igb/e1000_82575.c
index ee44398..4a50870 100644
--- a/drivers/net/ethernet/intel/igb/e1000_82575.c
+++ b/drivers/net/ethernet/intel/igb/e1000_82575.c
@@ -257,6 +257,7 @@
}
/* Set phy->phy_addr and phy->id. */
+ igb_write_phy_reg_82580(hw, I347AT4_PAGE_SELECT, 0);
ret_val = igb_get_phy_id_82575(hw);
if (ret_val)
return ret_val;
diff --git a/drivers/net/ethernet/intel/igb/igb.h b/drivers/net/ethernet/intel/igb/igb.h
index bf9bf90..06ffb2b 100644
--- a/drivers/net/ethernet/intel/igb/igb.h
+++ b/drivers/net/ethernet/intel/igb/igb.h
@@ -563,6 +563,7 @@
struct cyclecounter cc;
struct timecounter tc;
u32 tx_hwtstamp_timeouts;
+ u32 tx_hwtstamp_skipped;
u32 rx_hwtstamp_cleared;
bool pps_sys_wrap_on;
@@ -666,7 +667,7 @@
void igb_setup_rctl(struct igb_adapter *);
netdev_tx_t igb_xmit_frame_ring(struct sk_buff *, struct igb_ring *);
void igb_alloc_rx_buffers(struct igb_ring *, u16);
-void igb_update_stats(struct igb_adapter *, struct rtnl_link_stats64 *);
+void igb_update_stats(struct igb_adapter *);
bool igb_has_link(struct igb_adapter *adapter);
void igb_set_ethtool_ops(struct net_device *);
void igb_power_up_link(struct igb_adapter *);
@@ -676,6 +677,7 @@
void igb_ptp_reset(struct igb_adapter *adapter);
void igb_ptp_suspend(struct igb_adapter *adapter);
void igb_ptp_rx_hang(struct igb_adapter *adapter);
+void igb_ptp_tx_hang(struct igb_adapter *adapter);
void igb_ptp_rx_rgtstamp(struct igb_q_vector *q_vector, struct sk_buff *skb);
void igb_ptp_rx_pktstamp(struct igb_q_vector *q_vector, void *va,
struct sk_buff *skb);
diff --git a/drivers/net/ethernet/intel/igb/igb_ethtool.c b/drivers/net/ethernet/intel/igb/igb_ethtool.c
index 0efb62d..d06a8db 100644
--- a/drivers/net/ethernet/intel/igb/igb_ethtool.c
+++ b/drivers/net/ethernet/intel/igb/igb_ethtool.c
@@ -90,6 +90,7 @@
IGB_STAT("os2bmc_tx_by_host", stats.o2bspc),
IGB_STAT("os2bmc_rx_by_host", stats.b2ogprc),
IGB_STAT("tx_hwtstamp_timeouts", tx_hwtstamp_timeouts),
+ IGB_STAT("tx_hwtstamp_skipped", tx_hwtstamp_skipped),
IGB_STAT("rx_hwtstamp_cleared", rx_hwtstamp_cleared),
};
@@ -2315,7 +2316,7 @@
char *p;
spin_lock(&adapter->stats64_lock);
- igb_update_stats(adapter, net_stats);
+ igb_update_stats(adapter);
for (i = 0; i < IGB_GLOBAL_STATS_LEN; i++) {
p = (char *)adapter + igb_gstrings_stats[i].stat_offset;
diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
index 1cf74aa..ec62410 100644
--- a/drivers/net/ethernet/intel/igb/igb_main.c
+++ b/drivers/net/ethernet/intel/igb/igb_main.c
@@ -191,10 +191,7 @@
static int igb_pci_disable_sriov(struct pci_dev *dev);
#endif
-#ifdef CONFIG_PM
-#ifdef CONFIG_PM_SLEEP
static int igb_suspend(struct device *);
-#endif
static int igb_resume(struct device *);
static int igb_runtime_suspend(struct device *dev);
static int igb_runtime_resume(struct device *dev);
@@ -204,7 +201,6 @@
SET_RUNTIME_PM_OPS(igb_runtime_suspend, igb_runtime_resume,
igb_runtime_idle)
};
-#endif
static void igb_shutdown(struct pci_dev *);
static int igb_pci_sriov_configure(struct pci_dev *dev, int num_vfs);
#ifdef CONFIG_IGB_DCA
@@ -1822,7 +1818,7 @@
/* record the stats before reset*/
spin_lock(&adapter->stats64_lock);
- igb_update_stats(adapter, &adapter->stats64);
+ igb_update_stats(adapter);
spin_unlock(&adapter->stats64_lock);
adapter->link_speed = 0;
@@ -4690,7 +4686,7 @@
}
spin_lock(&adapter->stats64_lock);
- igb_update_stats(adapter, &adapter->stats64);
+ igb_update_stats(adapter);
spin_unlock(&adapter->stats64_lock);
for (i = 0; i < adapter->num_tx_queues; i++) {
@@ -4726,6 +4722,7 @@
igb_spoof_check(adapter);
igb_ptp_rx_hang(adapter);
+ igb_ptp_tx_hang(adapter);
/* Check LVMMC register on i350/i354 only */
if ((adapter->hw.mac.type == e1000_i350) ||
@@ -5201,9 +5198,9 @@
return __igb_maybe_stop_tx(tx_ring, size);
}
-static void igb_tx_map(struct igb_ring *tx_ring,
- struct igb_tx_buffer *first,
- const u8 hdr_len)
+static int igb_tx_map(struct igb_ring *tx_ring,
+ struct igb_tx_buffer *first,
+ const u8 hdr_len)
{
struct sk_buff *skb = first->skb;
struct igb_tx_buffer *tx_buffer;
@@ -5314,7 +5311,7 @@
*/
mmiowb();
}
- return;
+ return 0;
dma_error:
dev_err(tx_ring->dev, "TX DMA map failed\n");
@@ -5345,6 +5342,8 @@
tx_buffer->skb = NULL;
tx_ring->next_to_use = i;
+
+ return -1;
}
netdev_tx_t igb_xmit_frame_ring(struct sk_buff *skb,
@@ -5390,6 +5389,8 @@
adapter->ptp_tx_start = jiffies;
if (adapter->hw.mac.type == e1000_82576)
schedule_work(&adapter->ptp_tx_work);
+ } else {
+ adapter->tx_hwtstamp_skipped++;
}
}
@@ -5410,13 +5411,24 @@
else if (!tso)
igb_tx_csum(tx_ring, first);
- igb_tx_map(tx_ring, first, hdr_len);
+ if (igb_tx_map(tx_ring, first, hdr_len))
+ goto cleanup_tx_tstamp;
return NETDEV_TX_OK;
out_drop:
dev_kfree_skb_any(first->skb);
first->skb = NULL;
+cleanup_tx_tstamp:
+ if (unlikely(tx_flags & IGB_TX_FLAGS_TSTAMP)) {
+ struct igb_adapter *adapter = netdev_priv(tx_ring->netdev);
+
+ dev_kfree_skb_any(adapter->ptp_tx_skb);
+ adapter->ptp_tx_skb = NULL;
+ if (adapter->hw.mac.type == e1000_82576)
+ cancel_work_sync(&adapter->ptp_tx_work);
+ clear_bit_unlock(__IGB_PTP_TX_IN_PROGRESS, &adapter->state);
+ }
return NETDEV_TX_OK;
}
@@ -5487,7 +5499,7 @@
struct igb_adapter *adapter = netdev_priv(netdev);
spin_lock(&adapter->stats64_lock);
- igb_update_stats(adapter, &adapter->stats64);
+ igb_update_stats(adapter);
memcpy(stats, &adapter->stats64, sizeof(*stats));
spin_unlock(&adapter->stats64_lock);
}
@@ -5536,9 +5548,9 @@
* igb_update_stats - Update the board statistics counters
* @adapter: board private structure
**/
-void igb_update_stats(struct igb_adapter *adapter,
- struct rtnl_link_stats64 *net_stats)
+void igb_update_stats(struct igb_adapter *adapter)
{
+ struct rtnl_link_stats64 *net_stats = &adapter->stats64;
struct e1000_hw *hw = &adapter->hw;
struct pci_dev *pdev = adapter->pdev;
u32 reg, mpc;
@@ -6457,8 +6469,8 @@
igb_rar_set_index(adapter, 0);
}
-int igb_add_mac_filter(struct igb_adapter *adapter, const u8 *addr,
- const u8 queue)
+static int igb_add_mac_filter(struct igb_adapter *adapter, const u8 *addr,
+ const u8 queue)
{
struct e1000_hw *hw = &adapter->hw;
int rar_entries = hw->mac.rar_entry_count -
@@ -6487,8 +6499,8 @@
return -ENOSPC;
}
-int igb_del_mac_filter(struct igb_adapter *adapter, const u8 *addr,
- const u8 queue)
+static int igb_del_mac_filter(struct igb_adapter *adapter, const u8 *addr,
+ const u8 queue)
{
struct e1000_hw *hw = &adapter->hw;
int rar_entries = hw->mac.rar_entry_count -
@@ -6540,8 +6552,8 @@
return 0;
}
-int igb_set_vf_mac_filter(struct igb_adapter *adapter, const int vf,
- const u32 info, const u8 *addr)
+static int igb_set_vf_mac_filter(struct igb_adapter *adapter, const int vf,
+ const u32 info, const u8 *addr)
{
struct pci_dev *pdev = adapter->pdev;
struct vf_data_storage *vf_data = &adapter->vf_data[vf];
@@ -8015,9 +8027,7 @@
netif_rx(skb);
}
-#ifdef CONFIG_PM
-#ifdef CONFIG_PM_SLEEP
-static int igb_suspend(struct device *dev)
+static int __maybe_unused igb_suspend(struct device *dev)
{
int retval;
bool wake;
@@ -8036,9 +8046,8 @@
return 0;
}
-#endif /* CONFIG_PM_SLEEP */
-static int igb_resume(struct device *dev)
+static int __maybe_unused igb_resume(struct device *dev)
{
struct pci_dev *pdev = to_pci_dev(dev);
struct net_device *netdev = pci_get_drvdata(pdev);
@@ -8092,7 +8101,7 @@
return err;
}
-static int igb_runtime_idle(struct device *dev)
+static int __maybe_unused igb_runtime_idle(struct device *dev)
{
struct pci_dev *pdev = to_pci_dev(dev);
struct net_device *netdev = pci_get_drvdata(pdev);
@@ -8104,7 +8113,7 @@
return -EBUSY;
}
-static int igb_runtime_suspend(struct device *dev)
+static int __maybe_unused igb_runtime_suspend(struct device *dev)
{
struct pci_dev *pdev = to_pci_dev(dev);
int retval;
@@ -8124,11 +8133,10 @@
return 0;
}
-static int igb_runtime_resume(struct device *dev)
+static int __maybe_unused igb_runtime_resume(struct device *dev)
{
return igb_resume(dev);
}
-#endif /* CONFIG_PM */
static void igb_shutdown(struct pci_dev *pdev)
{
diff --git a/drivers/net/ethernet/intel/igb/igb_ptp.c b/drivers/net/ethernet/intel/igb/igb_ptp.c
index 7a3fd4d..841c2a0 100644
--- a/drivers/net/ethernet/intel/igb/igb_ptp.c
+++ b/drivers/net/ethernet/intel/igb/igb_ptp.c
@@ -712,6 +712,35 @@
}
/**
+ * igb_ptp_tx_hang - detect error case where Tx timestamp never finishes
+ * @adapter: private network adapter structure
+ */
+void igb_ptp_tx_hang(struct igb_adapter *adapter)
+{
+ bool timeout = time_is_before_jiffies(adapter->ptp_tx_start +
+ IGB_PTP_TX_TIMEOUT);
+
+ if (!adapter->ptp_tx_skb)
+ return;
+
+ if (!test_bit(__IGB_PTP_TX_IN_PROGRESS, &adapter->state))
+ return;
+
+ /* If we haven't received a timestamp within the timeout, it is
+ * reasonable to assume that it will never occur, so we can unlock the
+ * timestamp bit when this occurs.
+ */
+ if (timeout) {
+ cancel_work_sync(&adapter->ptp_tx_work);
+ dev_kfree_skb_any(adapter->ptp_tx_skb);
+ adapter->ptp_tx_skb = NULL;
+ clear_bit_unlock(__IGB_PTP_TX_IN_PROGRESS, &adapter->state);
+ adapter->tx_hwtstamp_timeouts++;
+ dev_warn(&adapter->pdev->dev, "clearing Tx timestamp hang\n");
+ }
+}
+
+/**
* igb_ptp_tx_hwtstamp - utility function which checks for TX time stamp
* @adapter: Board private structure.
*
@@ -721,6 +750,7 @@
**/
static void igb_ptp_tx_hwtstamp(struct igb_adapter *adapter)
{
+ struct sk_buff *skb = adapter->ptp_tx_skb;
struct e1000_hw *hw = &adapter->hw;
struct skb_shared_hwtstamps shhwtstamps;
u64 regval;
@@ -748,10 +778,17 @@
shhwtstamps.hwtstamp =
ktime_add_ns(shhwtstamps.hwtstamp, adjust);
- skb_tstamp_tx(adapter->ptp_tx_skb, &shhwtstamps);
- dev_kfree_skb_any(adapter->ptp_tx_skb);
+ /* Clear the lock early before calling skb_tstamp_tx so that
+ * applications are not woken up before the lock bit is clear. We use
+ * a copy of the skb pointer to ensure other threads can't change it
+ * while we're notifying the stack.
+ */
adapter->ptp_tx_skb = NULL;
clear_bit_unlock(__IGB_PTP_TX_IN_PROGRESS, &adapter->state);
+
+ /* Notify the stack and free the skb after we've unlocked */
+ skb_tstamp_tx(skb, &shhwtstamps);
+ dev_kfree_skb_any(skb);
}
/**
@@ -941,6 +978,7 @@
is_l4 = true;
break;
case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
+ case HWTSTAMP_FILTER_NTP_ALL:
case HWTSTAMP_FILTER_ALL:
/* 82576 cannot timestamp all packets, which it needs to do to
* support both V1 Sync and Delay_Req messages
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe.h b/drivers/net/ethernet/intel/ixgbe/ixgbe.h
index 7626376..dd55787 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe.h
@@ -733,6 +733,7 @@
struct timecounter hw_tc;
u32 base_incval;
u32 tx_hwtstamp_timeouts;
+ u32 tx_hwtstamp_skipped;
u32 rx_hwtstamp_cleared;
void (*ptp_setup_sdp)(struct ixgbe_adapter *);
@@ -960,6 +961,7 @@
void ixgbe_ptp_stop(struct ixgbe_adapter *adapter);
void ixgbe_ptp_overflow_check(struct ixgbe_adapter *adapter);
void ixgbe_ptp_rx_hang(struct ixgbe_adapter *adapter);
+void ixgbe_ptp_tx_hang(struct ixgbe_adapter *adapter);
void ixgbe_ptp_rx_pktstamp(struct ixgbe_q_vector *, struct sk_buff *);
void ixgbe_ptp_rx_rgtstamp(struct ixgbe_q_vector *, struct sk_buff *skb);
static inline void ixgbe_ptp_rx_hwtstamp(struct ixgbe_ring *rx_ring,
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c
index c8ac460..d602637 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c
@@ -1589,15 +1589,17 @@
switch (ntohs(input_mask->formatted.vlan_id) & 0xEFFF) {
case 0x0000:
- /* mask VLAN ID, fall through to mask VLAN priority */
+ /* mask VLAN ID */
fdirm |= IXGBE_FDIRM_VLANID;
+ /* fall through */
case 0x0FFF:
/* mask VLAN priority */
fdirm |= IXGBE_FDIRM_VLANP;
break;
case 0xE000:
- /* mask VLAN ID only, fall through */
+ /* mask VLAN ID only */
fdirm |= IXGBE_FDIRM_VLANID;
+ /* fall through */
case 0xEFFF:
/* no VLAN fields masked */
break;
@@ -1608,8 +1610,9 @@
switch (input_mask->formatted.flex_bytes & 0xFFFF) {
case 0x0000:
- /* Mask Flex Bytes, fall through */
+ /* Mask Flex Bytes */
fdirm |= IXGBE_FDIRM_FLEX;
+ /* fall through */
case 0xFFFF:
break;
default:
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
index c38d50c..4e35e70 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
@@ -155,7 +155,7 @@
if (ret_val)
return ret_val;
- /* only backplane uses autoc so fall though */
+ /* fall through - only backplane uses autoc */
case ixgbe_media_type_fiber:
reg = IXGBE_READ_REG(hw, IXGBE_PCS1GANA);
@@ -395,7 +395,8 @@
}
/* Initialize the LED link active for LED blink support */
- hw->mac.ops.init_led_link_act(hw);
+ if (hw->mac.ops.init_led_link_act)
+ hw->mac.ops.init_led_link_act(hw);
return status;
}
@@ -3548,7 +3549,7 @@
rxpktsize <<= IXGBE_RXPBSIZE_SHIFT;
for (; i < (num_pb / 2); i++)
IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), rxpktsize);
- /* Fall through to configure remaining packet buffers */
+ /* fall through - configure remaining packet buffers */
case (PBA_STRATEGY_EQUAL):
/* Divide the remaining Rx packet buffer evenly among the TCs */
rxpktsize = (pbsize / (num_pb - i)) << IXGBE_RXPBSIZE_SHIFT;
@@ -4120,15 +4121,6 @@
speedcnt++;
highest_link_speed = IXGBE_LINK_SPEED_10GB_FULL;
- /* If we already have link at this speed, just jump out */
- status = hw->mac.ops.check_link(hw, &link_speed, &link_up,
- false);
- if (status)
- return status;
-
- if (link_speed == IXGBE_LINK_SPEED_10GB_FULL && link_up)
- goto out;
-
/* Set the module link speed */
switch (hw->phy.media_type) {
case ixgbe_media_type_fiber:
@@ -4180,15 +4172,6 @@
if (highest_link_speed == IXGBE_LINK_SPEED_UNKNOWN)
highest_link_speed = IXGBE_LINK_SPEED_1GB_FULL;
- /* If we already have link at this speed, just jump out */
- status = hw->mac.ops.check_link(hw, &link_speed, &link_up,
- false);
- if (status)
- return status;
-
- if (link_speed == IXGBE_LINK_SPEED_1GB_FULL && link_up)
- goto out;
-
/* Set the module link speed */
switch (hw->phy.media_type) {
case ixgbe_media_type_fiber:
@@ -4295,4 +4278,23 @@
hw_dbg(hw, "Failed to write Rx Rate Select RS0\n");
return;
}
+
+ /* Set RS1 */
+ status = hw->phy.ops.read_i2c_byte(hw, IXGBE_SFF_SFF_8472_ESCB,
+ IXGBE_I2C_EEPROM_DEV_ADDR2,
+ &eeprom_data);
+ if (status) {
+ hw_dbg(hw, "Failed to read Rx Rate Select RS1\n");
+ return;
+ }
+
+ eeprom_data = (eeprom_data & ~IXGBE_SFF_SOFT_RS_SELECT_MASK) | rs;
+
+ status = hw->phy.ops.write_i2c_byte(hw, IXGBE_SFF_SFF_8472_ESCB,
+ IXGBE_I2C_EEPROM_DEV_ADDR2,
+ eeprom_data);
+ if (status) {
+ hw_dbg(hw, "Failed to write Rx Rate Select RS1\n");
+ return;
+ }
}
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
index 7e5e336..72c5657 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
@@ -111,6 +111,9 @@
{"os2bmc_tx_by_bmc", IXGBE_STAT(stats.b2ospc)},
{"os2bmc_tx_by_host", IXGBE_STAT(stats.o2bspc)},
{"os2bmc_rx_by_host", IXGBE_STAT(stats.b2ogprc)},
+ {"tx_hwtstamp_timeouts", IXGBE_STAT(tx_hwtstamp_timeouts)},
+ {"tx_hwtstamp_skipped", IXGBE_STAT(tx_hwtstamp_skipped)},
+ {"rx_hwtstamp_cleared", IXGBE_STAT(rx_hwtstamp_cleared)},
#ifdef IXGBE_FCOE
{"fcoe_bad_fccrc", IXGBE_STAT(stats.fccrc)},
{"rx_fcoe_dropped", IXGBE_STAT(stats.fcoerpdc)},
@@ -1274,7 +1277,7 @@
u8 *data)
{
char *p = (char *)data;
- int i;
+ unsigned int i;
switch (stringset) {
case ETH_SS_TEST:
@@ -2254,6 +2257,9 @@
struct ixgbe_adapter *adapter = netdev_priv(netdev);
struct ixgbe_hw *hw = &adapter->hw;
+ if (!hw->mac.ops.led_on || !hw->mac.ops.led_off)
+ return -EOPNOTSUPP;
+
switch (state) {
case ETHTOOL_ID_ACTIVE:
adapter->led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL);
@@ -2665,6 +2671,7 @@
*flow_type = IXGBE_ATR_FLOW_TYPE_IPV4;
break;
}
+ /* fall through */
default:
return 0;
}
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c
index 2a653ec..a23c2b5 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c
@@ -491,7 +491,7 @@
if ((fh->fh_r_ctl == FC_RCTL_DD_SOL_DATA) &&
(fctl & FC_FC_END_SEQ)) {
skb_linearize(skb);
- crc = (struct fcoe_crc_eof *)skb_put(skb, sizeof(*crc));
+ crc = skb_put(skb, sizeof(*crc));
crc->fcoe_eof = FC_EOF_T;
}
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
index d39cba2..f1dbdf2 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
@@ -58,6 +58,7 @@
#include <net/tc_act/tc_gact.h>
#include <net/tc_act/tc_mirred.h>
#include <net/vxlan.h>
+#include <net/mpls.h>
#include "ixgbe.h"
#include "ixgbe_common.h"
@@ -75,7 +76,7 @@
static char ixgbe_default_device_descr[] =
"Intel(R) 10 Gigabit Network Connection";
#endif
-#define DRV_VERSION "5.0.0-k"
+#define DRV_VERSION "5.1.0-k"
const char ixgbe_driver_version[] = DRV_VERSION;
static const char ixgbe_copyright[] =
"Copyright (c) 1999-2016 Intel Corporation.";
@@ -1451,7 +1452,7 @@
IXGBE_DCA_CTRL_DCA_MODE_CB2);
break;
}
- /* Fall Through since DCA is disabled. */
+ /* fall through - DCA is disabled. */
case DCA_PROVIDER_REMOVE:
if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) {
dca_remove_requester(dev);
@@ -2232,6 +2233,7 @@
break;
default:
bpf_warn_invalid_xdp_action(act);
+ /* fallthrough */
case XDP_ABORTED:
trace_xdp_exception(rx_ring->netdev, xdp_prog, act);
/* fallthrough -- handle aborts by dropping packet */
@@ -2638,8 +2640,7 @@
if (test_bit(__IXGBE_DOWN, &adapter->state))
return;
- if (!(adapter->flags2 & IXGBE_FLAG2_TEMP_SENSOR_CAPABLE) &&
- !(adapter->flags2 & IXGBE_FLAG2_TEMP_SENSOR_EVENT))
+ if (!(adapter->flags2 & IXGBE_FLAG2_TEMP_SENSOR_EVENT))
return;
adapter->flags2 &= ~IXGBE_FLAG2_TEMP_SENSOR_EVENT;
@@ -3105,23 +3106,23 @@
static int ixgbe_request_msix_irqs(struct ixgbe_adapter *adapter)
{
struct net_device *netdev = adapter->netdev;
+ unsigned int ri = 0, ti = 0;
int vector, err;
- int ri = 0, ti = 0;
for (vector = 0; vector < adapter->num_q_vectors; vector++) {
struct ixgbe_q_vector *q_vector = adapter->q_vector[vector];
struct msix_entry *entry = &adapter->msix_entries[vector];
if (q_vector->tx.ring && q_vector->rx.ring) {
- snprintf(q_vector->name, sizeof(q_vector->name) - 1,
- "%s-%s-%d", netdev->name, "TxRx", ri++);
+ snprintf(q_vector->name, sizeof(q_vector->name),
+ "%s-TxRx-%u", netdev->name, ri++);
ti++;
} else if (q_vector->rx.ring) {
- snprintf(q_vector->name, sizeof(q_vector->name) - 1,
- "%s-%s-%d", netdev->name, "rx", ri++);
+ snprintf(q_vector->name, sizeof(q_vector->name),
+ "%s-rx-%u", netdev->name, ri++);
} else if (q_vector->tx.ring) {
- snprintf(q_vector->name, sizeof(q_vector->name) - 1,
- "%s-%s-%d", netdev->name, "tx", ti++);
+ snprintf(q_vector->name, sizeof(q_vector->name),
+ "%s-tx-%u", netdev->name, ti++);
} else {
/* skip this unused q_vector */
continue;
@@ -3802,6 +3803,9 @@
mrqc = IXGBE_MRQC_VMDQRSS32EN;
else
mrqc = IXGBE_MRQC_VMDQRSS64EN;
+
+ /* Enable L3/L4 for Tx Switched packets */
+ mrqc |= IXGBE_MRQC_L3L4TXSWEN;
} else {
if (tcs > 4)
mrqc = IXGBE_MRQC_RTRSS8TCEN;
@@ -4174,7 +4178,7 @@
case ixgbe_mac_x550em_a:
if (adapter->num_vfs)
rdrxctl |= IXGBE_RDRXCTL_PSP;
- /* fall through for older HW */
+ /* fall through */
case ixgbe_mac_82599EB:
case ixgbe_mac_X540:
/* Disable RSC for ACK packets */
@@ -6183,8 +6187,6 @@
if (!tx_ring->tx_buffer_info)
goto err;
- u64_stats_init(&tx_ring->syncp);
-
/* round up to nearest 4K */
tx_ring->size = tx_ring->count * sizeof(union ixgbe_adv_tx_desc);
tx_ring->size = ALIGN(tx_ring->size, 4096);
@@ -6278,8 +6280,6 @@
if (!rx_ring->rx_buffer_info)
goto err;
- u64_stats_init(&rx_ring->syncp);
-
/* Round up to nearest 4K */
rx_ring->size = rx_ring->count * sizeof(union ixgbe_adv_rx_desc);
rx_ring->size = ALIGN(rx_ring->size, 4096);
@@ -6886,6 +6886,7 @@
hwstats->o2bspc += IXGBE_READ_REG(hw, IXGBE_O2BSPC);
hwstats->b2ospc += IXGBE_READ_REG(hw, IXGBE_B2OSPC);
hwstats->b2ogprc += IXGBE_READ_REG(hw, IXGBE_B2OGPRC);
+ /* fall through */
case ixgbe_mac_82599EB:
for (i = 0; i < 16; i++)
adapter->hw_rx_no_dma_resources +=
@@ -7634,6 +7635,7 @@
if (test_bit(__IXGBE_PTP_RUNNING, &adapter->state)) {
ixgbe_ptp_overflow_check(adapter);
ixgbe_ptp_rx_hang(adapter);
+ ixgbe_ptp_tx_hang(adapter);
}
ixgbe_service_event_complete(adapter);
@@ -7667,7 +7669,10 @@
if (err < 0)
return err;
- ip.hdr = skb_network_header(skb);
+ if (eth_p_mpls(first->protocol))
+ ip.hdr = skb_inner_network_header(skb);
+ else
+ ip.hdr = skb_network_header(skb);
l4.hdr = skb_checksum_start(skb);
/* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */
@@ -7871,9 +7876,9 @@
#define IXGBE_TXD_CMD (IXGBE_TXD_CMD_EOP | \
IXGBE_TXD_CMD_RS)
-static void ixgbe_tx_map(struct ixgbe_ring *tx_ring,
- struct ixgbe_tx_buffer *first,
- const u8 hdr_len)
+static int ixgbe_tx_map(struct ixgbe_ring *tx_ring,
+ struct ixgbe_tx_buffer *first,
+ const u8 hdr_len)
{
struct sk_buff *skb = first->skb;
struct ixgbe_tx_buffer *tx_buffer;
@@ -8000,7 +8005,7 @@
mmiowb();
}
- return;
+ return 0;
dma_error:
dev_err(tx_ring->dev, "TX DMA map failed\n");
tx_buffer = &tx_ring->tx_buffer_info[i];
@@ -8030,6 +8035,8 @@
first->skb = NULL;
tx_ring->next_to_use = i;
+
+ return -1;
}
static void ixgbe_atr(struct ixgbe_ring *ring,
@@ -8205,6 +8212,7 @@
if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED)
break;
+ /* fall through */
default:
return fallback(dev, skb);
}
@@ -8330,16 +8338,19 @@
protocol = vlan_get_protocol(skb);
if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
- adapter->ptp_clock &&
- !test_and_set_bit_lock(__IXGBE_PTP_TX_IN_PROGRESS,
- &adapter->state)) {
- skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
- tx_flags |= IXGBE_TX_FLAGS_TSTAMP;
+ adapter->ptp_clock) {
+ if (!test_and_set_bit_lock(__IXGBE_PTP_TX_IN_PROGRESS,
+ &adapter->state)) {
+ skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
+ tx_flags |= IXGBE_TX_FLAGS_TSTAMP;
- /* schedule check for Tx timestamp */
- adapter->ptp_tx_skb = skb_get(skb);
- adapter->ptp_tx_start = jiffies;
- schedule_work(&adapter->ptp_tx_work);
+ /* schedule check for Tx timestamp */
+ adapter->ptp_tx_skb = skb_get(skb);
+ adapter->ptp_tx_start = jiffies;
+ schedule_work(&adapter->ptp_tx_work);
+ } else {
+ adapter->tx_hwtstamp_skipped++;
+ }
}
skb_tx_timestamp(skb);
@@ -8402,13 +8413,21 @@
#ifdef IXGBE_FCOE
xmit_fcoe:
#endif /* IXGBE_FCOE */
- ixgbe_tx_map(tx_ring, first, hdr_len);
+ if (ixgbe_tx_map(tx_ring, first, hdr_len))
+ goto cleanup_tx_timestamp;
return NETDEV_TX_OK;
out_drop:
dev_kfree_skb_any(first->skb);
first->skb = NULL;
+cleanup_tx_timestamp:
+ if (unlikely(tx_flags & IXGBE_TX_FLAGS_TSTAMP)) {
+ dev_kfree_skb_any(adapter->ptp_tx_skb);
+ adapter->ptp_tx_skb = NULL;
+ cancel_work_sync(&adapter->ptp_tx_work);
+ clear_bit_unlock(__IXGBE_PTP_TX_IN_PROGRESS, &adapter->state);
+ }
return NETDEV_TX_OK;
}
@@ -9195,11 +9214,14 @@
return err;
}
-static int __ixgbe_setup_tc(struct net_device *dev, u32 handle, __be16 proto,
- struct tc_to_netdev *tc)
+static int __ixgbe_setup_tc(struct net_device *dev, u32 handle, u32 chain_index,
+ __be16 proto, struct tc_to_netdev *tc)
{
struct ixgbe_adapter *adapter = netdev_priv(dev);
+ if (chain_index)
+ return -EOPNOTSUPP;
+
if (TC_H_MAJ(handle) == TC_H_MAJ(TC_H_INGRESS) &&
tc->type == TC_SETUP_CLSU32) {
switch (tc->cls_u32->command) {
@@ -9793,6 +9815,8 @@
return ixgbe_xdp_setup(dev, xdp->prog);
case XDP_QUERY_PROG:
xdp->prog_attached = !!(adapter->xdp_prog);
+ xdp->prog_id = adapter->xdp_prog ?
+ adapter->xdp_prog->aux->id : 0;
return 0;
default:
return -EINVAL;
@@ -9929,6 +9953,7 @@
/* only support first port */
if (hw->bus.func != 0)
break;
+ /* fall through */
case IXGBE_SUBDEV_ID_82599_SP_560FLR:
case IXGBE_SUBDEV_ID_82599_SFP:
case IXGBE_SUBDEV_ID_82599_RNDC:
@@ -10191,7 +10216,11 @@
netdev->vlan_features |= netdev->features | NETIF_F_TSO_MANGLEID;
netdev->hw_enc_features |= netdev->vlan_features;
- netdev->mpls_features |= NETIF_F_HW_CSUM;
+ netdev->mpls_features |= NETIF_F_SG |
+ NETIF_F_TSO |
+ NETIF_F_TSO6 |
+ NETIF_F_HW_CSUM;
+ netdev->mpls_features |= IXGBE_GSO_PARTIAL_FEATURES;
/* set this bit last since it cannot be part of vlan_features */
netdev->features |= NETIF_F_HW_VLAN_CTAG_FILTER |
@@ -10275,6 +10304,10 @@
if (err)
goto err_sw_init;
+ for (i = 0; i < adapter->num_rx_queues; i++)
+ u64_stats_init(&adapter->rx_ring[i]->syncp);
+ for (i = 0; i < adapter->num_tx_queues; i++)
+ u64_stats_init(&adapter->tx_ring[i]->syncp);
for (i = 0; i < adapter->num_xdp_queues; i++)
u64_stats_init(&adapter->xdp_ring[i]->syncp);
@@ -10341,11 +10374,11 @@
"hardware.\n");
}
strcpy(netdev->name, "eth%d");
+ pci_set_drvdata(pdev, adapter);
err = register_netdev(netdev);
if (err)
goto err_register;
- pci_set_drvdata(pdev, adapter);
/* power down the optics for 82599 SFP+ fiber */
if (hw->mac.ops.disable_tx_laser)
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h
index 5aa2c3c..b0cac96 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h
@@ -84,8 +84,9 @@
#define IXGBE_CS4227_GLOBAL_ID_LSB 0
#define IXGBE_CS4227_GLOBAL_ID_MSB 1
#define IXGBE_CS4227_SCRATCH 2
-#define IXGBE_CS4223_PHY_ID 0x7003 /* Quad port */
-#define IXGBE_CS4227_PHY_ID 0x3003 /* Dual port */
+#define IXGBE_CS4227_EFUSE_PDF_SKU 0x19F
+#define IXGBE_CS4223_SKU_ID 0x0010 /* Quad port */
+#define IXGBE_CS4227_SKU_ID 0x0014 /* Dual port */
#define IXGBE_CS4227_RESET_PENDING 0x1357
#define IXGBE_CS4227_RESET_COMPLETE 0x5AA5
#define IXGBE_CS4227_RETRIES 15
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
index ef0635e..86d6924 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
@@ -663,6 +663,33 @@
}
/**
+ * ixgbe_ptp_tx_hang - detect error case where Tx timestamp never finishes
+ * @adapter: private network adapter structure
+ */
+void ixgbe_ptp_tx_hang(struct ixgbe_adapter *adapter)
+{
+ bool timeout = time_is_before_jiffies(adapter->ptp_tx_start +
+ IXGBE_PTP_TX_TIMEOUT);
+
+ if (!adapter->ptp_tx_skb)
+ return;
+
+ if (!test_bit(__IXGBE_PTP_TX_IN_PROGRESS, &adapter->state))
+ return;
+
+ /* If we haven't received a timestamp within the timeout, it is
+ * reasonable to assume that it will never occur, so we can unlock the
+ * timestamp bit when this occurs.
+ */
+ if (timeout) {
+ cancel_work_sync(&adapter->ptp_tx_work);
+ ixgbe_ptp_clear_tx_timestamp(adapter);
+ adapter->tx_hwtstamp_timeouts++;
+ e_warn(drv, "clearing Tx timestamp hang\n");
+ }
+}
+
+/**
* ixgbe_ptp_tx_hwtstamp - utility function which checks for TX time stamp
* @adapter: the private adapter struct
*
@@ -672,17 +699,26 @@
*/
static void ixgbe_ptp_tx_hwtstamp(struct ixgbe_adapter *adapter)
{
+ struct sk_buff *skb = adapter->ptp_tx_skb;
struct ixgbe_hw *hw = &adapter->hw;
struct skb_shared_hwtstamps shhwtstamps;
u64 regval = 0;
regval |= (u64)IXGBE_READ_REG(hw, IXGBE_TXSTMPL);
regval |= (u64)IXGBE_READ_REG(hw, IXGBE_TXSTMPH) << 32;
-
ixgbe_ptp_convert_to_hwtstamp(adapter, &shhwtstamps, regval);
- skb_tstamp_tx(adapter->ptp_tx_skb, &shhwtstamps);
- ixgbe_ptp_clear_tx_timestamp(adapter);
+ /* Handle cleanup of the ptp_tx_skb ourselves, and unlock the state
+ * bit prior to notifying the stack via skb_tstamp_tx(). This prevents
+ * well behaved applications from attempting to timestamp again prior
+ * to the lock bit being clear.
+ */
+ adapter->ptp_tx_skb = NULL;
+ clear_bit_unlock(__IXGBE_PTP_TX_IN_PROGRESS, &adapter->state);
+
+ /* Notify the stack and then free the skb after we've unlocked */
+ skb_tstamp_tx(skb, &shhwtstamps);
+ dev_kfree_skb_any(skb);
}
/**
@@ -883,6 +919,7 @@
IXGBE_FLAG_RX_HWTSTAMP_IN_REGISTER);
break;
case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
+ case HWTSTAMP_FILTER_NTP_ALL:
case HWTSTAMP_FILTER_ALL:
/* The X550 controller is capable of timestamping all packets,
* which allows it to accept any filter.
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
index 8baf298..0760bd7 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
@@ -540,16 +540,15 @@
case ixgbe_mbox_api_11:
case ixgbe_mbox_api_12:
case ixgbe_mbox_api_13:
- /*
- * Version 1.1 supports jumbo frames on VFs if PF has
+ /* Version 1.1 supports jumbo frames on VFs if PF has
* jumbo frames enabled which means legacy VFs are
* disabled
*/
if (pf_max_frame > ETH_FRAME_LEN)
break;
+ /* fall through */
default:
- /*
- * If the PF or VF are running w/ jumbo frames enabled
+ /* If the PF or VF are running w/ jumbo frames enabled
* we need to shut down the VF Rx path as we cannot
* support jumbo frames on legacy VFs
*/
@@ -778,11 +777,17 @@
static int ixgbe_set_vf_mac(struct ixgbe_adapter *adapter,
int vf, unsigned char *mac_addr)
{
- ixgbe_del_mac_filter(adapter, adapter->vfinfo[vf].vf_mac_addresses, vf);
- memcpy(adapter->vfinfo[vf].vf_mac_addresses, mac_addr, ETH_ALEN);
- ixgbe_add_mac_filter(adapter, adapter->vfinfo[vf].vf_mac_addresses, vf);
+ s32 retval;
- return 0;
+ ixgbe_del_mac_filter(adapter, adapter->vfinfo[vf].vf_mac_addresses, vf);
+ retval = ixgbe_add_mac_filter(adapter, mac_addr, vf);
+ if (retval >= 0)
+ memcpy(adapter->vfinfo[vf].vf_mac_addresses, mac_addr,
+ ETH_ALEN);
+ else
+ memset(adapter->vfinfo[vf].vf_mac_addresses, 0, ETH_ALEN);
+
+ return retval;
}
int ixgbe_vf_configuration(struct pci_dev *pdev, unsigned int event_mask)
@@ -813,7 +818,7 @@
IXGBE_WRITE_FLUSH(hw);
/* indicate to hardware that we want to set drop enable */
- reg = IXGBE_QDE_WRITE | IXGBE_QDE_ENABLE;
+ reg = IXGBE_QDE_WRITE | qde;
reg |= i << IXGBE_QDE_IDX_SHIFT;
IXGBE_WRITE_REG(hw, IXGBE_QDE, reg);
}
@@ -1347,27 +1352,49 @@
int ixgbe_ndo_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
{
struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ s32 retval;
if (vf >= adapter->num_vfs)
return -EINVAL;
- if (is_zero_ether_addr(mac)) {
- adapter->vfinfo[vf].pf_set_mac = false;
- dev_info(&adapter->pdev->dev, "removing MAC on VF %d\n", vf);
- } else if (is_valid_ether_addr(mac)) {
- adapter->vfinfo[vf].pf_set_mac = true;
+ if (is_valid_ether_addr(mac)) {
dev_info(&adapter->pdev->dev, "setting MAC %pM on VF %d\n",
mac, vf);
dev_info(&adapter->pdev->dev, "Reload the VF driver to make this change effective.");
- if (test_bit(__IXGBE_DOWN, &adapter->state)) {
- dev_warn(&adapter->pdev->dev, "The VF MAC address has been set, but the PF device is not up.\n");
- dev_warn(&adapter->pdev->dev, "Bring the PF device up before attempting to use the VF device.\n");
+
+ retval = ixgbe_set_vf_mac(adapter, vf, mac);
+ if (retval >= 0) {
+ adapter->vfinfo[vf].pf_set_mac = true;
+
+ if (test_bit(__IXGBE_DOWN, &adapter->state)) {
+ dev_warn(&adapter->pdev->dev, "The VF MAC address has been set, but the PF device is not up.\n");
+ dev_warn(&adapter->pdev->dev, "Bring the PF device up before attempting to use the VF device.\n");
+ }
+ } else {
+ dev_warn(&adapter->pdev->dev, "The VF MAC address was NOT set due to invalid or duplicate MAC address.\n");
+ }
+ } else if (is_zero_ether_addr(mac)) {
+ unsigned char *vf_mac_addr =
+ adapter->vfinfo[vf].vf_mac_addresses;
+
+ /* nothing to do */
+ if (is_zero_ether_addr(vf_mac_addr))
+ return 0;
+
+ dev_info(&adapter->pdev->dev, "removing MAC on VF %d\n", vf);
+
+ retval = ixgbe_del_mac_filter(adapter, vf_mac_addr, vf);
+ if (retval >= 0) {
+ adapter->vfinfo[vf].pf_set_mac = false;
+ memcpy(vf_mac_addr, mac, ETH_ALEN);
+ } else {
+ dev_warn(&adapter->pdev->dev, "Could NOT remove the VF MAC address.\n");
}
} else {
- return -EINVAL;
+ retval = -EINVAL;
}
- return ixgbe_set_vf_mac(adapter, vf, mac);
+ return retval;
}
static int ixgbe_enable_port_vlan(struct ixgbe_adapter *adapter, int vf,
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c
index 2ba024b..72d84a0 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c
@@ -1750,14 +1750,14 @@
if (ret_val == IXGBE_ERR_SFP_NOT_PRESENT)
return 0;
- if (!ret_val)
+ if (ret_val)
return ret_val;
/* Configure internal PHY for native SFI based on module type */
ret_val = hw->mac.ops.read_iosf_sb_reg(hw,
IXGBE_KRM_PMD_FLX_MASK_ST20(hw->bus.lan_id),
IXGBE_SB_IOSF_TARGET_KR_PHY, ®_phy_int);
- if (!ret_val)
+ if (ret_val)
return ret_val;
reg_phy_int &= IXGBE_KRM_PMD_FLX_MASK_ST20_SFI_10G_DA;
@@ -1767,7 +1767,7 @@
ret_val = hw->mac.ops.write_iosf_sb_reg(hw,
IXGBE_KRM_PMD_FLX_MASK_ST20(hw->bus.lan_id),
IXGBE_SB_IOSF_TARGET_KR_PHY, reg_phy_int);
- if (!ret_val)
+ if (ret_val)
return ret_val;
/* Setup SFI internal link. */
@@ -1798,7 +1798,7 @@
if (ret_val == IXGBE_ERR_SFP_NOT_PRESENT)
return 0;
- if (!ret_val)
+ if (ret_val)
return ret_val;
/* Configure internal PHY for KR/KX. */
@@ -1807,16 +1807,16 @@
if (hw->phy.mdio.prtad == MDIO_PRTAD_NONE)
return IXGBE_ERR_PHY_ADDR_INVALID;
- /* Get external PHY device id */
- ret_val = hw->phy.ops.read_reg(hw, IXGBE_CS4227_GLOBAL_ID_MSB,
- IXGBE_MDIO_ZERO_DEV_TYPE, ®_phy_ext);
+ /* Get external PHY SKU id */
+ ret_val = hw->phy.ops.read_reg(hw, IXGBE_CS4227_EFUSE_PDF_SKU,
+ IXGBE_MDIO_ZERO_DEV_TYPE, ®_phy_ext);
if (ret_val)
return ret_val;
/* When configuring quad port CS4223, the MAC instance is part
* of the slice offset.
*/
- if (reg_phy_ext == IXGBE_CS4223_PHY_ID)
+ if (reg_phy_ext == IXGBE_CS4223_SKU_ID)
slice_offset = (hw->bus.lan_id +
(hw->bus.instance_id << 1)) << 12;
else
@@ -1824,12 +1824,28 @@
/* Configure CS4227/CS4223 LINE side to proper mode. */
reg_slice = IXGBE_CS4227_LINE_SPARE24_LSB + slice_offset;
+
+ ret_val = hw->phy.ops.read_reg(hw, reg_slice,
+ IXGBE_MDIO_ZERO_DEV_TYPE, ®_phy_ext);
+ if (ret_val)
+ return ret_val;
+
+ reg_phy_ext &= ~((IXGBE_CS4227_EDC_MODE_CX1 << 1) |
+ (IXGBE_CS4227_EDC_MODE_SR << 1));
+
if (setup_linear)
reg_phy_ext = (IXGBE_CS4227_EDC_MODE_CX1 << 1) | 1;
else
reg_phy_ext = (IXGBE_CS4227_EDC_MODE_SR << 1) | 1;
- return hw->phy.ops.write_reg(hw, reg_slice, IXGBE_MDIO_ZERO_DEV_TYPE,
- reg_phy_ext);
+
+ ret_val = hw->phy.ops.write_reg(hw, reg_slice,
+ IXGBE_MDIO_ZERO_DEV_TYPE, reg_phy_ext);
+ if (ret_val)
+ return ret_val;
+
+ /* Flush previous write with a read */
+ return hw->phy.ops.read_reg(hw, reg_slice,
+ IXGBE_MDIO_ZERO_DEV_TYPE, ®_phy_ext);
}
/**
@@ -3206,6 +3222,7 @@
phy->ops.setup_link = NULL;
phy->ops.read_reg = NULL;
phy->ops.write_reg = NULL;
+ phy->ops.reset = NULL;
break;
default:
break;
@@ -3819,6 +3836,28 @@
.write_iosf_sb_reg = ixgbe_write_iosf_sb_reg_x550,
};
+static const struct ixgbe_mac_operations mac_ops_X550EM_x_fw = {
+ X550_COMMON_MAC
+ .led_on = NULL,
+ .led_off = NULL,
+ .init_led_link_act = NULL,
+ .reset_hw = &ixgbe_reset_hw_X550em,
+ .get_media_type = &ixgbe_get_media_type_X550em,
+ .get_san_mac_addr = NULL,
+ .get_wwn_prefix = NULL,
+ .setup_link = &ixgbe_setup_mac_link_X540,
+ .get_link_capabilities = &ixgbe_get_link_capabilities_X550em,
+ .get_bus_info = &ixgbe_get_bus_info_X550em,
+ .setup_sfp = ixgbe_setup_sfp_modules_X550em,
+ .acquire_swfw_sync = &ixgbe_acquire_swfw_sync_X550em,
+ .release_swfw_sync = &ixgbe_release_swfw_sync_X550em,
+ .init_swfw_sync = &ixgbe_init_swfw_sync_X540,
+ .setup_fc = NULL,
+ .fc_autoneg = ixgbe_fc_autoneg,
+ .read_iosf_sb_reg = ixgbe_read_iosf_sb_reg_x550,
+ .write_iosf_sb_reg = ixgbe_write_iosf_sb_reg_x550,
+};
+
static struct ixgbe_mac_operations mac_ops_x550em_a = {
X550_COMMON_MAC
.led_on = ixgbe_led_on_t_x550em,
@@ -3986,7 +4025,7 @@
const struct ixgbe_info ixgbe_x550em_x_fw_info = {
.mac = ixgbe_mac_X550EM_x,
.get_invariants = ixgbe_get_invariants_X550_x_fw,
- .mac_ops = &mac_ops_X550EM_x,
+ .mac_ops = &mac_ops_X550EM_x_fw,
.eeprom_ops = &eeprom_ops_X550EM_x,
.phy_ops = &phy_ops_x550em_x_fw,
.mbx_ops = &mbx_ops_generic,
diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
index eee29bd..084c535 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
+++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
@@ -49,6 +49,7 @@
#include <linux/if.h>
#include <linux/if_vlan.h>
#include <linux/prefetch.h>
+#include <net/mpls.h>
#include "ixgbevf.h"
@@ -56,7 +57,7 @@
static const char ixgbevf_driver_string[] =
"Intel(R) 10 Gigabit PCI Express Virtual Function Network Driver";
-#define DRV_VERSION "3.2.2-k"
+#define DRV_VERSION "4.1.0-k"
const char ixgbevf_driver_version[] = DRV_VERSION;
static char ixgbevf_copyright[] =
"Copyright (c) 2009 - 2015 Intel Corporation.";
@@ -1350,23 +1351,23 @@
{
struct net_device *netdev = adapter->netdev;
int q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
+ unsigned int ri = 0, ti = 0;
int vector, err;
- int ri = 0, ti = 0;
for (vector = 0; vector < q_vectors; vector++) {
struct ixgbevf_q_vector *q_vector = adapter->q_vector[vector];
struct msix_entry *entry = &adapter->msix_entries[vector];
if (q_vector->tx.ring && q_vector->rx.ring) {
- snprintf(q_vector->name, sizeof(q_vector->name) - 1,
- "%s-%s-%d", netdev->name, "TxRx", ri++);
+ snprintf(q_vector->name, sizeof(q_vector->name),
+ "%s-TxRx-%u", netdev->name, ri++);
ti++;
} else if (q_vector->rx.ring) {
- snprintf(q_vector->name, sizeof(q_vector->name) - 1,
- "%s-%s-%d", netdev->name, "rx", ri++);
+ snprintf(q_vector->name, sizeof(q_vector->name),
+ "%s-rx-%u", netdev->name, ri++);
} else if (q_vector->tx.ring) {
- snprintf(q_vector->name, sizeof(q_vector->name) - 1,
- "%s-%s-%d", netdev->name, "tx", ti++);
+ snprintf(q_vector->name, sizeof(q_vector->name),
+ "%s-tx-%u", netdev->name, ti++);
} else {
/* skip this unused q_vector */
continue;
@@ -3321,7 +3322,10 @@
if (err < 0)
return err;
- ip.hdr = skb_network_header(skb);
+ if (eth_p_mpls(first->protocol))
+ ip.hdr = skb_inner_network_header(skb);
+ else
+ ip.hdr = skb_network_header(skb);
l4.hdr = skb_checksum_start(skb);
/* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */
@@ -4075,7 +4079,11 @@
netdev->features |= NETIF_F_HIGHDMA;
netdev->vlan_features |= netdev->features | NETIF_F_TSO_MANGLEID;
- netdev->mpls_features |= NETIF_F_HW_CSUM;
+ netdev->mpls_features |= NETIF_F_SG |
+ NETIF_F_TSO |
+ NETIF_F_TSO6 |
+ NETIF_F_HW_CSUM;
+ netdev->mpls_features |= IXGBEVF_GSO_PARTIAL_FEATURES;
netdev->hw_enc_features |= netdev->vlan_features;
/* set this bit last since it cannot be part of vlan_features */
diff --git a/drivers/net/ethernet/intel/ixgbevf/vf.c b/drivers/net/ethernet/intel/ixgbevf/vf.c
index b6d0c01..0c25006 100644
--- a/drivers/net/ethernet/intel/ixgbevf/vf.c
+++ b/drivers/net/ethernet/intel/ixgbevf/vf.c
@@ -335,6 +335,7 @@
case ixgbe_mbox_api_12:
if (hw->mac.type < ixgbe_mac_X550_vf)
break;
+ /* fall through */
default:
return -EOPNOTSUPP;
}
@@ -401,6 +402,7 @@
case ixgbe_mbox_api_12:
if (hw->mac.type < ixgbe_mac_X550_vf)
break;
+ /* fall through */
default:
return -EOPNOTSUPP;
}
diff --git a/drivers/net/ethernet/jme.c b/drivers/net/ethernet/jme.c
index f580b49..62d848d 100644
--- a/drivers/net/ethernet/jme.c
+++ b/drivers/net/ethernet/jme.c
@@ -691,17 +691,6 @@
}
static inline void
-jme_restart_tx_engine(struct jme_adapter *jme)
-{
- /*
- * Restart TX Engine
- */
- jwrite32(jme, JME_TXCS, jme->reg_txcs |
- TXCS_SELECT_QUEUE0 |
- TXCS_ENABLE);
-}
-
-static inline void
jme_disable_tx_engine(struct jme_adapter *jme)
{
int i;
@@ -2382,37 +2371,6 @@
jme_reset_link(jme);
}
-static inline void jme_pause_rx(struct jme_adapter *jme)
-{
- atomic_dec(&jme->link_changing);
-
- jme_set_rx_pcc(jme, PCC_OFF);
- if (test_bit(JME_FLAG_POLL, &jme->flags)) {
- JME_NAPI_DISABLE(jme);
- } else {
- tasklet_disable(&jme->rxclean_task);
- tasklet_disable(&jme->rxempty_task);
- }
-}
-
-static inline void jme_resume_rx(struct jme_adapter *jme)
-{
- struct dynpcc_info *dpi = &(jme->dpi);
-
- if (test_bit(JME_FLAG_POLL, &jme->flags)) {
- JME_NAPI_ENABLE(jme);
- } else {
- tasklet_enable(&jme->rxclean_task);
- tasklet_enable(&jme->rxempty_task);
- }
- dpi->cur = PCC_P1;
- dpi->attempt = PCC_P1;
- dpi->cnt = 0;
- jme_set_rx_pcc(jme, PCC_P1);
-
- atomic_inc(&jme->link_changing);
-}
-
static void
jme_get_drvinfo(struct net_device *netdev,
struct ethtool_drvinfo *info)
@@ -2652,12 +2610,11 @@
struct ethtool_link_ksettings *cmd)
{
struct jme_adapter *jme = netdev_priv(netdev);
- int rc;
spin_lock_bh(&jme->phy_lock);
- rc = mii_ethtool_get_link_ksettings(&jme->mii_if, cmd);
+ mii_ethtool_get_link_ksettings(&jme->mii_if, cmd);
spin_unlock_bh(&jme->phy_lock);
- return rc;
+ return 0;
}
static int
diff --git a/drivers/net/ethernet/korina.c b/drivers/net/ethernet/korina.c
index 9fae98c..3c0a645 100644
--- a/drivers/net/ethernet/korina.c
+++ b/drivers/net/ethernet/korina.c
@@ -699,13 +699,12 @@
struct ethtool_link_ksettings *cmd)
{
struct korina_private *lp = netdev_priv(dev);
- int rc;
spin_lock_irq(&lp->lock);
- rc = mii_ethtool_get_link_ksettings(&lp->mii_if, cmd);
+ mii_ethtool_get_link_ksettings(&lp->mii_if, cmd);
spin_unlock_irq(&lp->lock);
- return rc;
+ return 0;
}
static int netdev_set_link_ksettings(struct net_device *dev,
diff --git a/drivers/net/ethernet/marvell/mv643xx_eth.c b/drivers/net/ethernet/marvell/mv643xx_eth.c
index 25642de..5794d98 100644
--- a/drivers/net/ethernet/marvell/mv643xx_eth.c
+++ b/drivers/net/ethernet/marvell/mv643xx_eth.c
@@ -1501,10 +1501,9 @@
struct ethtool_link_ksettings *cmd)
{
struct net_device *dev = mp->dev;
- int err;
u32 supported, advertising;
- err = phy_ethtool_ksettings_get(dev->phydev, cmd);
+ phy_ethtool_ksettings_get(dev->phydev, cmd);
/*
* The MAC does not support 1000baseT_Half.
@@ -1520,7 +1519,7 @@
ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising,
advertising);
- return err;
+ return 0;
}
static int
diff --git a/drivers/net/ethernet/marvell/mvmdio.c b/drivers/net/ethernet/marvell/mvmdio.c
index 90a60b9..c979821 100644
--- a/drivers/net/ethernet/marvell/mvmdio.c
+++ b/drivers/net/ethernet/marvell/mvmdio.c
@@ -17,41 +17,52 @@
* warranty of any kind, whether express or implied.
*/
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
#include <linux/kernel.h>
#include <linux/module.h>
-#include <linux/mutex.h>
-#include <linux/phy.h>
-#include <linux/interrupt.h>
-#include <linux/platform_device.h>
-#include <linux/delay.h>
-#include <linux/io.h>
-#include <linux/clk.h>
+#include <linux/of_device.h>
#include <linux/of_mdio.h>
+#include <linux/phy.h>
+#include <linux/platform_device.h>
#include <linux/sched.h>
#include <linux/wait.h>
-#define MVMDIO_SMI_DATA_SHIFT 0
-#define MVMDIO_SMI_PHY_ADDR_SHIFT 16
-#define MVMDIO_SMI_PHY_REG_SHIFT 21
-#define MVMDIO_SMI_READ_OPERATION BIT(26)
-#define MVMDIO_SMI_WRITE_OPERATION 0
-#define MVMDIO_SMI_READ_VALID BIT(27)
-#define MVMDIO_SMI_BUSY BIT(28)
-#define MVMDIO_ERR_INT_CAUSE 0x007C
-#define MVMDIO_ERR_INT_SMI_DONE 0x00000010
-#define MVMDIO_ERR_INT_MASK 0x0080
+#define MVMDIO_SMI_DATA_SHIFT 0
+#define MVMDIO_SMI_PHY_ADDR_SHIFT 16
+#define MVMDIO_SMI_PHY_REG_SHIFT 21
+#define MVMDIO_SMI_READ_OPERATION BIT(26)
+#define MVMDIO_SMI_WRITE_OPERATION 0
+#define MVMDIO_SMI_READ_VALID BIT(27)
+#define MVMDIO_SMI_BUSY BIT(28)
+#define MVMDIO_ERR_INT_CAUSE 0x007C
+#define MVMDIO_ERR_INT_SMI_DONE 0x00000010
+#define MVMDIO_ERR_INT_MASK 0x0080
+
+#define MVMDIO_XSMI_MGNT_REG 0x0
+#define MVMDIO_XSMI_PHYADDR_SHIFT 16
+#define MVMDIO_XSMI_DEVADDR_SHIFT 21
+#define MVMDIO_XSMI_WRITE_OPERATION (0x5 << 26)
+#define MVMDIO_XSMI_READ_OPERATION (0x7 << 26)
+#define MVMDIO_XSMI_READ_VALID BIT(29)
+#define MVMDIO_XSMI_BUSY BIT(30)
+#define MVMDIO_XSMI_ADDR_REG 0x8
/*
* SMI Timeout measurements:
* - Kirkwood 88F6281 (Globalscale Dreamplug): 45us to 95us (Interrupt)
* - Armada 370 (Globalscale Mirabox): 41us to 43us (Polled)
*/
-#define MVMDIO_SMI_TIMEOUT 1000 /* 1000us = 1ms */
-#define MVMDIO_SMI_POLL_INTERVAL_MIN 45
-#define MVMDIO_SMI_POLL_INTERVAL_MAX 55
+#define MVMDIO_SMI_TIMEOUT 1000 /* 1000us = 1ms */
+#define MVMDIO_SMI_POLL_INTERVAL_MIN 45
+#define MVMDIO_SMI_POLL_INTERVAL_MAX 55
+
+#define MVMDIO_XSMI_POLL_INTERVAL_MIN 150
+#define MVMDIO_XSMI_POLL_INTERVAL_MAX 160
struct orion_mdio_dev {
- struct mutex lock;
void __iomem *regs;
struct clk *clk[3];
/*
@@ -64,14 +75,21 @@
wait_queue_head_t smi_busy_wait;
};
-static int orion_mdio_smi_is_done(struct orion_mdio_dev *dev)
-{
- return !(readl(dev->regs) & MVMDIO_SMI_BUSY);
-}
+enum orion_mdio_bus_type {
+ BUS_TYPE_SMI,
+ BUS_TYPE_XSMI
+};
+
+struct orion_mdio_ops {
+ int (*is_done)(struct orion_mdio_dev *);
+ unsigned int poll_interval_min;
+ unsigned int poll_interval_max;
+};
/* Wait for the SMI unit to be ready for another operation
*/
-static int orion_mdio_wait_ready(struct mii_bus *bus)
+static int orion_mdio_wait_ready(const struct orion_mdio_ops *ops,
+ struct mii_bus *bus)
{
struct orion_mdio_dev *dev = bus->priv;
unsigned long timeout = usecs_to_jiffies(MVMDIO_SMI_TIMEOUT);
@@ -79,14 +97,14 @@
int timedout = 0;
while (1) {
- if (orion_mdio_smi_is_done(dev))
+ if (ops->is_done(dev))
return 0;
else if (timedout)
break;
if (dev->err_interrupt <= 0) {
- usleep_range(MVMDIO_SMI_POLL_INTERVAL_MIN,
- MVMDIO_SMI_POLL_INTERVAL_MAX);
+ usleep_range(ops->poll_interval_min,
+ ops->poll_interval_max);
if (time_is_before_jiffies(end))
++timedout;
@@ -98,8 +116,7 @@
if (timeout < 2)
timeout = 2;
wait_event_timeout(dev->smi_busy_wait,
- orion_mdio_smi_is_done(dev),
- timeout);
+ ops->is_done(dev), timeout);
++timedout;
}
@@ -109,52 +126,61 @@
return -ETIMEDOUT;
}
-static int orion_mdio_read(struct mii_bus *bus, int mii_id,
- int regnum)
+static int orion_mdio_smi_is_done(struct orion_mdio_dev *dev)
+{
+ return !(readl(dev->regs) & MVMDIO_SMI_BUSY);
+}
+
+static const struct orion_mdio_ops orion_mdio_smi_ops = {
+ .is_done = orion_mdio_smi_is_done,
+ .poll_interval_min = MVMDIO_SMI_POLL_INTERVAL_MIN,
+ .poll_interval_max = MVMDIO_SMI_POLL_INTERVAL_MAX,
+};
+
+static int orion_mdio_smi_read(struct mii_bus *bus, int mii_id,
+ int regnum)
{
struct orion_mdio_dev *dev = bus->priv;
u32 val;
int ret;
- mutex_lock(&dev->lock);
+ if (regnum & MII_ADDR_C45)
+ return -EOPNOTSUPP;
- ret = orion_mdio_wait_ready(bus);
+ ret = orion_mdio_wait_ready(&orion_mdio_smi_ops, bus);
if (ret < 0)
- goto out;
+ return ret;
writel(((mii_id << MVMDIO_SMI_PHY_ADDR_SHIFT) |
(regnum << MVMDIO_SMI_PHY_REG_SHIFT) |
MVMDIO_SMI_READ_OPERATION),
dev->regs);
- ret = orion_mdio_wait_ready(bus);
+ ret = orion_mdio_wait_ready(&orion_mdio_smi_ops, bus);
if (ret < 0)
- goto out;
+ return ret;
val = readl(dev->regs);
if (!(val & MVMDIO_SMI_READ_VALID)) {
dev_err(bus->parent, "SMI bus read not valid\n");
- ret = -ENODEV;
- goto out;
+ return -ENODEV;
}
- ret = val & 0xFFFF;
-out:
- mutex_unlock(&dev->lock);
- return ret;
+ return val & GENMASK(15, 0);
}
-static int orion_mdio_write(struct mii_bus *bus, int mii_id,
- int regnum, u16 value)
+static int orion_mdio_smi_write(struct mii_bus *bus, int mii_id,
+ int regnum, u16 value)
{
struct orion_mdio_dev *dev = bus->priv;
int ret;
- mutex_lock(&dev->lock);
+ if (regnum & MII_ADDR_C45)
+ return -EOPNOTSUPP;
- ret = orion_mdio_wait_ready(bus);
+ ret = orion_mdio_wait_ready(&orion_mdio_smi_ops, bus);
if (ret < 0)
- goto out;
+ return ret;
writel(((mii_id << MVMDIO_SMI_PHY_ADDR_SHIFT) |
(regnum << MVMDIO_SMI_PHY_REG_SHIFT) |
@@ -162,9 +188,74 @@
(value << MVMDIO_SMI_DATA_SHIFT)),
dev->regs);
-out:
- mutex_unlock(&dev->lock);
- return ret;
+ return 0;
+}
+
+static int orion_mdio_xsmi_is_done(struct orion_mdio_dev *dev)
+{
+ return !(readl(dev->regs + MVMDIO_XSMI_MGNT_REG) & MVMDIO_XSMI_BUSY);
+}
+
+static const struct orion_mdio_ops orion_mdio_xsmi_ops = {
+ .is_done = orion_mdio_xsmi_is_done,
+ .poll_interval_min = MVMDIO_XSMI_POLL_INTERVAL_MIN,
+ .poll_interval_max = MVMDIO_XSMI_POLL_INTERVAL_MAX,
+};
+
+static int orion_mdio_xsmi_read(struct mii_bus *bus, int mii_id,
+ int regnum)
+{
+ struct orion_mdio_dev *dev = bus->priv;
+ u16 dev_addr = (regnum >> 16) & GENMASK(4, 0);
+ int ret;
+
+ if (!(regnum & MII_ADDR_C45))
+ return -EOPNOTSUPP;
+
+ ret = orion_mdio_wait_ready(&orion_mdio_xsmi_ops, bus);
+ if (ret < 0)
+ return ret;
+
+ writel(regnum & GENMASK(15, 0), dev->regs + MVMDIO_XSMI_ADDR_REG);
+ writel((mii_id << MVMDIO_XSMI_PHYADDR_SHIFT) |
+ (dev_addr << MVMDIO_XSMI_DEVADDR_SHIFT) |
+ MVMDIO_XSMI_READ_OPERATION,
+ dev->regs + MVMDIO_XSMI_MGNT_REG);
+
+ ret = orion_mdio_wait_ready(&orion_mdio_xsmi_ops, bus);
+ if (ret < 0)
+ return ret;
+
+ if (!(readl(dev->regs + MVMDIO_XSMI_MGNT_REG) &
+ MVMDIO_XSMI_READ_VALID)) {
+ dev_err(bus->parent, "XSMI bus read not valid\n");
+ return -ENODEV;
+ }
+
+ return readl(dev->regs + MVMDIO_XSMI_MGNT_REG) & GENMASK(15, 0);
+}
+
+static int orion_mdio_xsmi_write(struct mii_bus *bus, int mii_id,
+ int regnum, u16 value)
+{
+ struct orion_mdio_dev *dev = bus->priv;
+ u16 dev_addr = (regnum >> 16) & GENMASK(4, 0);
+ int ret;
+
+ if (!(regnum & MII_ADDR_C45))
+ return -EOPNOTSUPP;
+
+ ret = orion_mdio_wait_ready(&orion_mdio_xsmi_ops, bus);
+ if (ret < 0)
+ return ret;
+
+ writel(regnum & GENMASK(15, 0), dev->regs + MVMDIO_XSMI_ADDR_REG);
+ writel((mii_id << MVMDIO_XSMI_PHYADDR_SHIFT) |
+ (dev_addr << MVMDIO_XSMI_DEVADDR_SHIFT) |
+ MVMDIO_XSMI_WRITE_OPERATION | value,
+ dev->regs + MVMDIO_XSMI_MGNT_REG);
+
+ return 0;
}
static irqreturn_t orion_mdio_err_irq(int irq, void *dev_id)
@@ -184,11 +275,14 @@
static int orion_mdio_probe(struct platform_device *pdev)
{
+ enum orion_mdio_bus_type type;
struct resource *r;
struct mii_bus *bus;
struct orion_mdio_dev *dev;
int i, ret;
+ type = (enum orion_mdio_bus_type)of_device_get_match_data(&pdev->dev);
+
r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!r) {
dev_err(&pdev->dev, "No SMI register address given\n");
@@ -200,9 +294,18 @@
if (!bus)
return -ENOMEM;
+ switch (type) {
+ case BUS_TYPE_SMI:
+ bus->read = orion_mdio_smi_read;
+ bus->write = orion_mdio_smi_write;
+ break;
+ case BUS_TYPE_XSMI:
+ bus->read = orion_mdio_xsmi_read;
+ bus->write = orion_mdio_xsmi_write;
+ break;
+ }
+
bus->name = "orion_mdio_bus";
- bus->read = orion_mdio_read;
- bus->write = orion_mdio_write;
snprintf(bus->id, MII_BUS_ID_SIZE, "%s-mii",
dev_name(&pdev->dev));
bus->parent = &pdev->dev;
@@ -244,8 +347,6 @@
return -EPROBE_DEFER;
}
- mutex_init(&dev->lock);
-
if (pdev->dev.of_node)
ret = of_mdiobus_register(bus, pdev->dev.of_node);
else
@@ -294,7 +395,8 @@
}
static const struct of_device_id orion_mdio_match[] = {
- { .compatible = "marvell,orion-mdio" },
+ { .compatible = "marvell,orion-mdio", .data = (void *)BUS_TYPE_SMI },
+ { .compatible = "marvell,xmdio", .data = (void *)BUS_TYPE_XSMI },
{ }
};
MODULE_DEVICE_TABLE(of, orion_mdio_match);
diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c
index d297011..0aab74c 100644
--- a/drivers/net/ethernet/marvell/mvneta.c
+++ b/drivers/net/ethernet/marvell/mvneta.c
@@ -1976,9 +1976,8 @@
MVNETA_MH_SIZE + NET_SKB_PAD,
rx_bytes,
DMA_FROM_DEVICE);
- memcpy(skb_put(skb, rx_bytes),
- data + MVNETA_MH_SIZE + NET_SKB_PAD,
- rx_bytes);
+ skb_put_data(skb, data + MVNETA_MH_SIZE + NET_SKB_PAD,
+ rx_bytes);
skb->protocol = eth_type_trans(skb, dev);
mvneta_rx_csum(pp, rx_status, skb);
@@ -2103,9 +2102,8 @@
MVNETA_MH_SIZE + NET_SKB_PAD,
rx_bytes,
DMA_FROM_DEVICE);
- memcpy(skb_put(skb, rx_bytes),
- data + MVNETA_MH_SIZE + NET_SKB_PAD,
- rx_bytes);
+ skb_put_data(skb, data + MVNETA_MH_SIZE + NET_SKB_PAD,
+ rx_bytes);
skb->protocol = eth_type_trans(skb, dev);
mvneta_rx_csum(pp, rx_status, skb);
diff --git a/drivers/net/ethernet/marvell/mvpp2.c b/drivers/net/ethernet/marvell/mvpp2.c
index 33c9016..ca4b55c 100644
--- a/drivers/net/ethernet/marvell/mvpp2.c
+++ b/drivers/net/ethernet/marvell/mvpp2.c
@@ -345,9 +345,15 @@
/* Per-port XGMAC registers. PPv2.2 only, only for GOP port 0,
* relative to port->base.
*/
+#define MVPP22_XLG_CTRL0_REG 0x100
+#define MVPP22_XLG_CTRL0_PORT_EN BIT(0)
+#define MVPP22_XLG_CTRL0_MAC_RESET_DIS BIT(1)
+#define MVPP22_XLG_CTRL0_MIB_CNT_DIS BIT(14)
+
#define MVPP22_XLG_CTRL3_REG 0x11c
#define MVPP22_XLG_CTRL3_MACMODESELECT_MASK (7 << 13)
#define MVPP22_XLG_CTRL3_MACMODESELECT_GMAC (0 << 13)
+#define MVPP22_XLG_CTRL3_MACMODESELECT_10G (1 << 13)
/* SMI registers. PPv2.2 only, relative to priv->iface_base. */
#define MVPP22_SMI_MISC_CFG_REG 0x1204
@@ -4186,7 +4192,13 @@
if (port->gop_id == 0) {
val = readl(port->base + MVPP22_XLG_CTRL3_REG);
val &= ~MVPP22_XLG_CTRL3_MACMODESELECT_MASK;
- val |= MVPP22_XLG_CTRL3_MACMODESELECT_GMAC;
+
+ if (port->phy_interface == PHY_INTERFACE_MODE_XAUI ||
+ port->phy_interface == PHY_INTERFACE_MODE_10GKR)
+ val |= MVPP22_XLG_CTRL3_MACMODESELECT_10G;
+ else
+ val |= MVPP22_XLG_CTRL3_MACMODESELECT_GMAC;
+
writel(val, port->base + MVPP22_XLG_CTRL3_REG);
}
@@ -4236,19 +4248,40 @@
{
u32 val;
- val = readl(port->base + MVPP2_GMAC_CTRL_0_REG);
- val |= MVPP2_GMAC_PORT_EN_MASK;
- val |= MVPP2_GMAC_MIB_CNTR_EN_MASK;
- writel(val, port->base + MVPP2_GMAC_CTRL_0_REG);
+ /* Only GOP port 0 has an XLG MAC */
+ if (port->gop_id == 0 &&
+ (port->phy_interface == PHY_INTERFACE_MODE_XAUI ||
+ port->phy_interface == PHY_INTERFACE_MODE_10GKR)) {
+ val = readl(port->base + MVPP22_XLG_CTRL0_REG);
+ val |= MVPP22_XLG_CTRL0_PORT_EN |
+ MVPP22_XLG_CTRL0_MAC_RESET_DIS;
+ val &= ~MVPP22_XLG_CTRL0_MIB_CNT_DIS;
+ writel(val, port->base + MVPP22_XLG_CTRL0_REG);
+ } else {
+ val = readl(port->base + MVPP2_GMAC_CTRL_0_REG);
+ val |= MVPP2_GMAC_PORT_EN_MASK;
+ val |= MVPP2_GMAC_MIB_CNTR_EN_MASK;
+ writel(val, port->base + MVPP2_GMAC_CTRL_0_REG);
+ }
}
static void mvpp2_port_disable(struct mvpp2_port *port)
{
u32 val;
- val = readl(port->base + MVPP2_GMAC_CTRL_0_REG);
- val &= ~(MVPP2_GMAC_PORT_EN_MASK);
- writel(val, port->base + MVPP2_GMAC_CTRL_0_REG);
+ /* Only GOP port 0 has an XLG MAC */
+ if (port->gop_id == 0 &&
+ (port->phy_interface == PHY_INTERFACE_MODE_XAUI ||
+ port->phy_interface == PHY_INTERFACE_MODE_10GKR)) {
+ val = readl(port->base + MVPP22_XLG_CTRL0_REG);
+ val &= ~(MVPP22_XLG_CTRL0_PORT_EN |
+ MVPP22_XLG_CTRL0_MAC_RESET_DIS);
+ writel(val, port->base + MVPP22_XLG_CTRL0_REG);
+ } else {
+ val = readl(port->base + MVPP2_GMAC_CTRL_0_REG);
+ val &= ~(MVPP2_GMAC_PORT_EN_MASK);
+ writel(val, port->base + MVPP2_GMAC_CTRL_0_REG);
+ }
}
/* Set IEEE 802.3x Flow Control Xon Packet Transmission Mode */
diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
index 16f9755..adaaafc 100644
--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
@@ -221,6 +221,9 @@
netif_carrier_on(dev);
else
netif_carrier_off(dev);
+
+ if (!of_phy_is_fixed_link(mac->of_node))
+ phy_print_status(dev->phydev);
}
static int mtk_phy_connect_node(struct mtk_eth *eth, struct mtk_mac *mac,
@@ -369,28 +372,48 @@
mdiobus_unregister(eth->mii_bus);
}
-static inline void mtk_irq_disable(struct mtk_eth *eth,
- unsigned reg, u32 mask)
+static inline void mtk_tx_irq_disable(struct mtk_eth *eth, u32 mask)
{
unsigned long flags;
u32 val;
- spin_lock_irqsave(ð->irq_lock, flags);
- val = mtk_r32(eth, reg);
- mtk_w32(eth, val & ~mask, reg);
- spin_unlock_irqrestore(ð->irq_lock, flags);
+ spin_lock_irqsave(ð->tx_irq_lock, flags);
+ val = mtk_r32(eth, MTK_QDMA_INT_MASK);
+ mtk_w32(eth, val & ~mask, MTK_QDMA_INT_MASK);
+ spin_unlock_irqrestore(ð->tx_irq_lock, flags);
}
-static inline void mtk_irq_enable(struct mtk_eth *eth,
- unsigned reg, u32 mask)
+static inline void mtk_tx_irq_enable(struct mtk_eth *eth, u32 mask)
{
unsigned long flags;
u32 val;
- spin_lock_irqsave(ð->irq_lock, flags);
- val = mtk_r32(eth, reg);
- mtk_w32(eth, val | mask, reg);
- spin_unlock_irqrestore(ð->irq_lock, flags);
+ spin_lock_irqsave(ð->tx_irq_lock, flags);
+ val = mtk_r32(eth, MTK_QDMA_INT_MASK);
+ mtk_w32(eth, val | mask, MTK_QDMA_INT_MASK);
+ spin_unlock_irqrestore(ð->tx_irq_lock, flags);
+}
+
+static inline void mtk_rx_irq_disable(struct mtk_eth *eth, u32 mask)
+{
+ unsigned long flags;
+ u32 val;
+
+ spin_lock_irqsave(ð->rx_irq_lock, flags);
+ val = mtk_r32(eth, MTK_PDMA_INT_MASK);
+ mtk_w32(eth, val & ~mask, MTK_PDMA_INT_MASK);
+ spin_unlock_irqrestore(ð->rx_irq_lock, flags);
+}
+
+static inline void mtk_rx_irq_enable(struct mtk_eth *eth, u32 mask)
+{
+ unsigned long flags;
+ u32 val;
+
+ spin_lock_irqsave(ð->rx_irq_lock, flags);
+ val = mtk_r32(eth, MTK_PDMA_INT_MASK);
+ mtk_w32(eth, val | mask, MTK_PDMA_INT_MASK);
+ spin_unlock_irqrestore(ð->rx_irq_lock, flags);
}
static int mtk_set_mac_address(struct net_device *dev, void *p)
@@ -969,6 +992,7 @@
RX_DMA_VID(trxd.rxd3))
__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
RX_DMA_VID(trxd.rxd3));
+ skb_record_rx_queue(skb, 0);
napi_gro_receive(napi, skb);
ring->data[idx] = new_data;
@@ -1098,7 +1122,7 @@
return budget;
napi_complete(napi);
- mtk_irq_enable(eth, MTK_QDMA_INT_MASK, MTK_TX_DONE_INT);
+ mtk_tx_irq_enable(eth, MTK_TX_DONE_INT);
return tx_done;
}
@@ -1132,7 +1156,7 @@
goto poll_again;
}
napi_complete(napi);
- mtk_irq_enable(eth, MTK_PDMA_INT_MASK, MTK_RX_DONE_INT);
+ mtk_rx_irq_enable(eth, MTK_RX_DONE_INT);
return rx_done + budget - remain_budget;
}
@@ -1667,7 +1691,7 @@
if (likely(napi_schedule_prep(ð->rx_napi))) {
__napi_schedule(ð->rx_napi);
- mtk_irq_disable(eth, MTK_PDMA_INT_MASK, MTK_RX_DONE_INT);
+ mtk_rx_irq_disable(eth, MTK_RX_DONE_INT);
}
return IRQ_HANDLED;
@@ -1679,7 +1703,7 @@
if (likely(napi_schedule_prep(ð->tx_napi))) {
__napi_schedule(ð->tx_napi);
- mtk_irq_disable(eth, MTK_QDMA_INT_MASK, MTK_TX_DONE_INT);
+ mtk_tx_irq_disable(eth, MTK_TX_DONE_INT);
}
return IRQ_HANDLED;
@@ -1691,11 +1715,11 @@
struct mtk_mac *mac = netdev_priv(dev);
struct mtk_eth *eth = mac->hw;
- mtk_irq_disable(eth, MTK_QDMA_INT_MASK, MTK_TX_DONE_INT);
- mtk_irq_disable(eth, MTK_PDMA_INT_MASK, MTK_RX_DONE_INT);
+ mtk_tx_irq_disable(eth, MTK_TX_DONE_INT);
+ mtk_rx_irq_disable(eth, MTK_RX_DONE_INT);
mtk_handle_irq_rx(eth->irq[2], dev);
- mtk_irq_enable(eth, MTK_QDMA_INT_MASK, MTK_TX_DONE_INT);
- mtk_irq_enable(eth, MTK_PDMA_INT_MASK, MTK_RX_DONE_INT);
+ mtk_tx_irq_enable(eth, MTK_TX_DONE_INT);
+ mtk_rx_irq_enable(eth, MTK_RX_DONE_INT);
}
#endif
@@ -1736,8 +1760,8 @@
napi_enable(ð->tx_napi);
napi_enable(ð->rx_napi);
- mtk_irq_enable(eth, MTK_QDMA_INT_MASK, MTK_TX_DONE_INT);
- mtk_irq_enable(eth, MTK_PDMA_INT_MASK, MTK_RX_DONE_INT);
+ mtk_tx_irq_enable(eth, MTK_TX_DONE_INT);
+ mtk_rx_irq_enable(eth, MTK_RX_DONE_INT);
}
atomic_inc(ð->dma_refcnt);
@@ -1782,8 +1806,8 @@
if (!atomic_dec_and_test(ð->dma_refcnt))
return 0;
- mtk_irq_disable(eth, MTK_QDMA_INT_MASK, MTK_TX_DONE_INT);
- mtk_irq_disable(eth, MTK_PDMA_INT_MASK, MTK_RX_DONE_INT);
+ mtk_tx_irq_disable(eth, MTK_TX_DONE_INT);
+ mtk_rx_irq_disable(eth, MTK_RX_DONE_INT);
napi_disable(ð->tx_napi);
napi_disable(ð->rx_napi);
@@ -1858,11 +1882,13 @@
/* Enable RX VLan Offloading */
mtk_w32(eth, 1, MTK_CDMP_EG_CTRL);
+ /* enable interrupt delay for RX */
+ mtk_w32(eth, MTK_PDMA_DELAY_RX_DELAY, MTK_PDMA_DELAY_INT);
+
/* disable delay and normal interrupt */
mtk_w32(eth, 0, MTK_QDMA_DELAY_INT);
- mtk_w32(eth, 0, MTK_PDMA_DELAY_INT);
- mtk_irq_disable(eth, MTK_QDMA_INT_MASK, ~0);
- mtk_irq_disable(eth, MTK_PDMA_INT_MASK, ~0);
+ mtk_tx_irq_disable(eth, ~0);
+ mtk_rx_irq_disable(eth, ~0);
mtk_w32(eth, RST_GL_PSE, MTK_RST_GL);
mtk_w32(eth, 0, MTK_RST_GL);
@@ -1933,8 +1959,8 @@
phy_disconnect(dev->phydev);
if (of_phy_is_fixed_link(mac->of_node))
of_phy_deregister_fixed_link(mac->of_node);
- mtk_irq_disable(eth, MTK_QDMA_INT_MASK, ~0);
- mtk_irq_disable(eth, MTK_PDMA_INT_MASK, ~0);
+ mtk_tx_irq_disable(eth, ~0);
+ mtk_rx_irq_disable(eth, ~0);
}
static int mtk_do_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
@@ -2056,7 +2082,9 @@
if (unlikely(test_bit(MTK_RESETTING, &mac->hw->state)))
return -EBUSY;
- return phy_ethtool_ksettings_get(ndev->phydev, cmd);
+ phy_ethtool_ksettings_get(ndev->phydev, cmd);
+
+ return 0;
}
static int mtk_set_link_ksettings(struct net_device *ndev,
@@ -2392,7 +2420,8 @@
return PTR_ERR(eth->base);
spin_lock_init(ð->page_lock);
- spin_lock_init(ð->irq_lock);
+ spin_lock_init(ð->tx_irq_lock);
+ spin_lock_init(ð->rx_irq_lock);
eth->ethsys = syscon_regmap_lookup_by_phandle(pdev->dev.of_node,
"mediatek,ethsys");
diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.h b/drivers/net/ethernet/mediatek/mtk_eth_soc.h
index 3c46a3b..5868a09 100644
--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.h
+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.h
@@ -125,7 +125,14 @@
#define MTK_PST_DRX_IDX_CFG(x) (MTK_PST_DRX_IDX0 << (x))
/* PDMA Delay Interrupt Register */
-#define MTK_PDMA_DELAY_INT 0xa0c
+#define MTK_PDMA_DELAY_INT 0xa0c
+#define MTK_PDMA_DELAY_RX_EN BIT(15)
+#define MTK_PDMA_DELAY_RX_PINT 4
+#define MTK_PDMA_DELAY_RX_PINT_SHIFT 8
+#define MTK_PDMA_DELAY_RX_PTIME 4
+#define MTK_PDMA_DELAY_RX_DELAY \
+ (MTK_PDMA_DELAY_RX_EN | MTK_PDMA_DELAY_RX_PTIME | \
+ (MTK_PDMA_DELAY_RX_PINT << MTK_PDMA_DELAY_RX_PINT_SHIFT))
/* PDMA Interrupt Status Register */
#define MTK_PDMA_INT_STATUS 0xa20
@@ -206,6 +213,7 @@
/* QDMA Interrupt Status Register */
#define MTK_QMTK_INT_STATUS 0x1A18
+#define MTK_RX_DONE_DLY BIT(30)
#define MTK_RX_DONE_INT3 BIT(19)
#define MTK_RX_DONE_INT2 BIT(18)
#define MTK_RX_DONE_INT1 BIT(17)
@@ -214,8 +222,7 @@
#define MTK_TX_DONE_INT2 BIT(2)
#define MTK_TX_DONE_INT1 BIT(1)
#define MTK_TX_DONE_INT0 BIT(0)
-#define MTK_RX_DONE_INT (MTK_RX_DONE_INT0 | MTK_RX_DONE_INT1 | \
- MTK_RX_DONE_INT2 | MTK_RX_DONE_INT3)
+#define MTK_RX_DONE_INT MTK_RX_DONE_DLY
#define MTK_TX_DONE_INT (MTK_TX_DONE_INT0 | MTK_TX_DONE_INT1 | \
MTK_TX_DONE_INT2 | MTK_TX_DONE_INT3)
@@ -512,6 +519,8 @@
* @dev: The device pointer
* @base: The mapped register i/o base
* @page_lock: Make sure that register operations are atomic
+ * @tx_irq__lock: Make sure that IRQ register operations are atomic
+ * @rx_irq__lock: Make sure that IRQ register operations are atomic
* @dummy_dev: we run 2 netdevs on 1 physical DMA ring and need a
* dummy for NAPI to work
* @netdev: The netdev instances
@@ -540,7 +549,8 @@
struct device *dev;
void __iomem *base;
spinlock_t page_lock;
- spinlock_t irq_lock;
+ spinlock_t tx_irq_lock;
+ spinlock_t rx_irq_lock;
struct net_device dummy_dev;
struct net_device *netdev[MTK_MAX_DEVS];
struct mtk_mac *mac[MTK_MAX_DEVS];
diff --git a/drivers/net/ethernet/mellanox/Kconfig b/drivers/net/ethernet/mellanox/Kconfig
index d547010..84a2007 100644
--- a/drivers/net/ethernet/mellanox/Kconfig
+++ b/drivers/net/ethernet/mellanox/Kconfig
@@ -19,5 +19,6 @@
source "drivers/net/ethernet/mellanox/mlx4/Kconfig"
source "drivers/net/ethernet/mellanox/mlx5/core/Kconfig"
source "drivers/net/ethernet/mellanox/mlxsw/Kconfig"
+source "drivers/net/ethernet/mellanox/mlxfw/Kconfig"
endif # NET_VENDOR_MELLANOX
diff --git a/drivers/net/ethernet/mellanox/Makefile b/drivers/net/ethernet/mellanox/Makefile
index 2e2a5ec..016aa26 100644
--- a/drivers/net/ethernet/mellanox/Makefile
+++ b/drivers/net/ethernet/mellanox/Makefile
@@ -5,3 +5,4 @@
obj-$(CONFIG_MLX4_CORE) += mlx4/
obj-$(CONFIG_MLX5_CORE) += mlx5/core/
obj-$(CONFIG_MLXSW_CORE) += mlxsw/
+obj-$(CONFIG_MLXFW) += mlxfw/
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_cq.c b/drivers/net/ethernet/mellanox/mlx4/en_cq.c
index 09dd377..85fe17e 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_cq.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_cq.c
@@ -146,16 +146,25 @@
if (err)
goto free_eq;
- cq->mcq.comp = cq->type != RX ? mlx4_en_tx_irq : mlx4_en_rx_irq;
cq->mcq.event = mlx4_en_cq_event;
- if (cq->type != RX)
+ switch (cq->type) {
+ case TX:
+ cq->mcq.comp = mlx4_en_tx_irq;
netif_tx_napi_add(cq->dev, &cq->napi, mlx4_en_poll_tx_cq,
NAPI_POLL_WEIGHT);
- else
+ napi_enable(&cq->napi);
+ break;
+ case RX:
+ cq->mcq.comp = mlx4_en_rx_irq;
netif_napi_add(cq->dev, &cq->napi, mlx4_en_poll_rx_cq, 64);
-
- napi_enable(&cq->napi);
+ napi_enable(&cq->napi);
+ break;
+ case TX_XDP:
+ /* nothing regarding napi, it's shared with rx ring */
+ cq->xdp_busy = false;
+ break;
+ }
return 0;
@@ -184,8 +193,10 @@
void mlx4_en_deactivate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq)
{
- napi_disable(&cq->napi);
- netif_napi_del(&cq->napi);
+ if (cq->type != TX_XDP) {
+ napi_disable(&cq->napi);
+ netif_napi_del(&cq->napi);
+ }
mlx4_cq_free(priv->mdev->dev, &cq->mcq);
}
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
index ffbcb27..e97fbf3 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
@@ -89,7 +89,7 @@
struct mlx4_en_dev *mdev = priv->mdev;
strlcpy(drvinfo->driver, DRV_NAME, sizeof(drvinfo->driver));
- strlcpy(drvinfo->version, DRV_VERSION " (" DRV_RELDATE ")",
+ strlcpy(drvinfo->version, DRV_VERSION,
sizeof(drvinfo->version));
snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
"%d.%d.%d",
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_main.c b/drivers/net/ethernet/mellanox/mlx4/en_main.c
index 36a7a54..56cdf38 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_main.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_main.c
@@ -46,11 +46,11 @@
MODULE_AUTHOR("Liran Liss, Yevgeny Petrilin");
MODULE_DESCRIPTION("Mellanox ConnectX HCA Ethernet driver");
MODULE_LICENSE("Dual BSD/GPL");
-MODULE_VERSION(DRV_VERSION " ("DRV_RELDATE")");
+MODULE_VERSION(DRV_VERSION);
static const char mlx4_en_version[] =
DRV_NAME ": Mellanox ConnectX HCA Ethernet driver v"
- DRV_VERSION " (" DRV_RELDATE ")\n";
+ DRV_VERSION "\n";
#define MLX4_EN_PARM_INT(X, def_val, desc) \
static unsigned int X = def_val;\
@@ -125,9 +125,9 @@
priv->flags |= MLX4_EN_FLAG_ENABLE_HW_LOOPBACK;
mutex_lock(&priv->mdev->state_lock);
- if (priv->mdev->dev->caps.flags2 &
- MLX4_DEV_CAP_FLAG2_UPDATE_QP_SRC_CHECK_LB &&
- priv->rss_map.indir_qp.qpn) {
+ if ((priv->mdev->dev->caps.flags2 &
+ MLX4_DEV_CAP_FLAG2_UPDATE_QP_SRC_CHECK_LB) &&
+ priv->rss_map.indir_qp && priv->rss_map.indir_qp->qpn) {
int i;
int err = 0;
int loopback = !!(features & NETIF_F_LOOPBACK);
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
index 94fab20..18252a7 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
@@ -86,7 +86,8 @@
return 0;
}
-static int __mlx4_en_setup_tc(struct net_device *dev, u32 handle, __be16 proto,
+static int __mlx4_en_setup_tc(struct net_device *dev, u32 handle,
+ u32 chain_index, __be16 proto,
struct tc_to_netdev *tc)
{
if (tc->type != TC_SETUP_MQPRIO)
@@ -595,6 +596,8 @@
return err;
}
+ en_info(priv, "Steering Mode %d\n", dev->caps.steering_mode);
+
if (dev->caps.steering_mode == MLX4_STEERING_MODE_A0) {
int base_qpn = mlx4_get_base_qpn(dev, priv->port);
*qpn = base_qpn + index;
@@ -1009,7 +1012,7 @@
memcpy(&mc_list[10], mclist->addr, ETH_ALEN);
mc_list[5] = priv->port;
err = mlx4_multicast_detach(mdev->dev,
- &priv->rss_map.indir_qp,
+ priv->rss_map.indir_qp,
mc_list,
MLX4_PROT_ETH,
mclist->reg_id);
@@ -1031,7 +1034,7 @@
/* needed for B0 steering support */
mc_list[5] = priv->port;
err = mlx4_multicast_attach(mdev->dev,
- &priv->rss_map.indir_qp,
+ priv->rss_map.indir_qp,
mc_list,
priv->port, 0,
MLX4_PROT_ETH,
@@ -1676,13 +1679,15 @@
if (t != TX_XDP) {
tx_ring->tx_queue = netdev_get_tx_queue(dev, i);
tx_ring->recycle_ring = NULL;
+
+ /* Arm CQ for TX completions */
+ mlx4_en_arm_cq(priv, cq);
+
} else {
mlx4_en_init_recycle_ring(priv, i);
+ /* XDP TX CQ should never be armed */
}
- /* Arm CQ for TX completions */
- mlx4_en_arm_cq(priv, cq);
-
/* Set initial ownership of all Tx TXBBs to SW (1) */
for (j = 0; j < tx_ring->buf_size; j += STAMP_STRIDE)
*((u32 *)(tx_ring->buf + j)) = 0xffffffff;
@@ -1741,7 +1746,7 @@
/* Attach rx QP to bradcast address */
eth_broadcast_addr(&mc_list[10]);
mc_list[5] = priv->port; /* needed for B0 steering support */
- if (mlx4_multicast_attach(mdev->dev, &priv->rss_map.indir_qp, mc_list,
+ if (mlx4_multicast_attach(mdev->dev, priv->rss_map.indir_qp, mc_list,
priv->port, 0, MLX4_PROT_ETH,
&priv->broadcast_id))
mlx4_warn(mdev, "Failed Attaching Broadcast\n");
@@ -1865,12 +1870,12 @@
/* Detach All multicasts */
eth_broadcast_addr(&mc_list[10]);
mc_list[5] = priv->port; /* needed for B0 steering support */
- mlx4_multicast_detach(mdev->dev, &priv->rss_map.indir_qp, mc_list,
+ mlx4_multicast_detach(mdev->dev, priv->rss_map.indir_qp, mc_list,
MLX4_PROT_ETH, priv->broadcast_id);
list_for_each_entry(mclist, &priv->curr_list, list) {
memcpy(&mc_list[10], mclist->addr, ETH_ALEN);
mc_list[5] = priv->port;
- mlx4_multicast_detach(mdev->dev, &priv->rss_map.indir_qp,
+ mlx4_multicast_detach(mdev->dev, priv->rss_map.indir_qp,
mc_list, MLX4_PROT_ETH, mclist->reg_id);
if (mclist->tunnel_reg_id)
mlx4_flow_detach(mdev->dev, mclist->tunnel_reg_id);
@@ -2375,6 +2380,7 @@
case HWTSTAMP_FILTER_PTP_V2_EVENT:
case HWTSTAMP_FILTER_PTP_V2_SYNC:
case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
+ case HWTSTAMP_FILTER_NTP_ALL:
config.rx_filter = HWTSTAMP_FILTER_ALL;
break;
default:
@@ -2819,11 +2825,25 @@
return err;
}
-static bool mlx4_xdp_attached(struct net_device *dev)
+static u32 mlx4_xdp_query(struct net_device *dev)
{
struct mlx4_en_priv *priv = netdev_priv(dev);
+ struct mlx4_en_dev *mdev = priv->mdev;
+ const struct bpf_prog *xdp_prog;
+ u32 prog_id = 0;
- return !!priv->tx_ring_num[TX_XDP];
+ if (!priv->tx_ring_num[TX_XDP])
+ return prog_id;
+
+ mutex_lock(&mdev->state_lock);
+ xdp_prog = rcu_dereference_protected(
+ priv->rx_ring[0]->xdp_prog,
+ lockdep_is_held(&mdev->state_lock));
+ if (xdp_prog)
+ prog_id = xdp_prog->aux->id;
+ mutex_unlock(&mdev->state_lock);
+
+ return prog_id;
}
static int mlx4_xdp(struct net_device *dev, struct netdev_xdp *xdp)
@@ -2832,7 +2852,8 @@
case XDP_SETUP_PROG:
return mlx4_xdp_set(dev, xdp->prog);
case XDP_QUERY_PROG:
- xdp->prog_attached = mlx4_xdp_attached(dev);
+ xdp->prog_id = mlx4_xdp_query(dev);
+ xdp->prog_attached = !!xdp->prog_id;
return 0;
default:
return -EINVAL;
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_rx.c b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
index 77abd18..e5fb895 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
@@ -134,10 +134,11 @@
struct mlx4_en_rx_ring *ring, int index,
gfp_t gfp)
{
- struct mlx4_en_rx_desc *rx_desc = ring->buf + (index * ring->stride);
+ struct mlx4_en_rx_desc *rx_desc = ring->buf +
+ (index << ring->log_stride);
struct mlx4_en_rx_alloc *frags = ring->rx_info +
(index << priv->log_rx_info);
- if (ring->page_cache.index > 0) {
+ if (likely(ring->page_cache.index > 0)) {
/* XDP uses a single page per frame */
if (!frags->page) {
ring->page_cache.index--;
@@ -178,6 +179,7 @@
}
}
+/* Function not in fast-path */
static int mlx4_en_fill_rx_buffers(struct mlx4_en_priv *priv)
{
struct mlx4_en_rx_ring *ring;
@@ -539,14 +541,14 @@
priv->loopback_ok = 1;
}
-static bool mlx4_en_refill_rx_buffers(struct mlx4_en_priv *priv,
+static void mlx4_en_refill_rx_buffers(struct mlx4_en_priv *priv,
struct mlx4_en_rx_ring *ring)
{
u32 missing = ring->actual_size - (ring->prod - ring->cons);
/* Try to batch allocations, but not too much. */
if (missing < 8)
- return false;
+ return;
do {
if (mlx4_en_prepare_rx_desc(priv, ring,
ring->prod & ring->size_mask,
@@ -554,9 +556,9 @@
__GFP_MEMALLOC))
break;
ring->prod++;
- } while (--missing);
+ } while (likely(--missing));
- return true;
+ mlx4_en_update_rx_prod_db(ring);
}
/* When hardware doesn't strip the vlan, we need to calculate the checksum
@@ -637,21 +639,14 @@
int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int budget)
{
struct mlx4_en_priv *priv = netdev_priv(dev);
- struct mlx4_en_dev *mdev = priv->mdev;
- struct mlx4_cqe *cqe;
- struct mlx4_en_rx_ring *ring = priv->rx_ring[cq->ring];
- struct mlx4_en_rx_alloc *frags;
- struct bpf_prog *xdp_prog;
- int doorbell_pending;
- struct sk_buff *skb;
- int index;
- int nr;
- unsigned int length;
- int polled = 0;
- int ip_summed;
int factor = priv->cqe_factor;
- u64 timestamp;
- bool l2_tunnel;
+ struct mlx4_en_rx_ring *ring;
+ struct bpf_prog *xdp_prog;
+ int cq_ring = cq->ring;
+ bool doorbell_pending;
+ struct mlx4_cqe *cqe;
+ int polled = 0;
+ int index;
if (unlikely(!priv->port_up))
return 0;
@@ -659,6 +654,8 @@
if (unlikely(budget <= 0))
return polled;
+ ring = priv->rx_ring[cq_ring];
+
/* Protect accesses to: ring->xdp_prog, priv->mac_hash list */
rcu_read_lock();
xdp_prog = rcu_dereference(ring->xdp_prog);
@@ -673,10 +670,17 @@
/* Process all completed CQEs */
while (XNOR(cqe->owner_sr_opcode & MLX4_CQE_OWNER_MASK,
cq->mcq.cons_index & cq->size)) {
+ struct mlx4_en_rx_alloc *frags;
+ enum pkt_hash_types hash_type;
+ struct sk_buff *skb;
+ unsigned int length;
+ int ip_summed;
void *va;
+ int nr;
frags = ring->rx_info + (index << priv->log_rx_info);
va = page_address(frags[0].page) + frags[0].page_offset;
+ prefetchw(va);
/*
* make sure we read the CQE after we read the ownership bit
*/
@@ -768,7 +772,7 @@
break;
case XDP_TX:
if (likely(!mlx4_en_xmit_frame(ring, frags, dev,
- length, cq->ring,
+ length, cq_ring,
&doorbell_pending))) {
frags[0].page = NULL;
goto next;
@@ -790,24 +794,27 @@
ring->packets++;
skb = napi_get_frags(&cq->napi);
- if (!skb)
+ if (unlikely(!skb))
goto next;
if (unlikely(ring->hwtstamp_rx_filter == HWTSTAMP_FILTER_ALL)) {
- timestamp = mlx4_en_get_cqe_ts(cqe);
- mlx4_en_fill_hwtstamps(mdev, skb_hwtstamps(skb),
+ u64 timestamp = mlx4_en_get_cqe_ts(cqe);
+
+ mlx4_en_fill_hwtstamps(priv->mdev, skb_hwtstamps(skb),
timestamp);
}
- skb_record_rx_queue(skb, cq->ring);
+ skb_record_rx_queue(skb, cq_ring);
if (likely(dev->features & NETIF_F_RXCSUM)) {
if (cqe->status & cpu_to_be16(MLX4_CQE_STATUS_TCP |
MLX4_CQE_STATUS_UDP)) {
if ((cqe->status & cpu_to_be16(MLX4_CQE_STATUS_IPOK)) &&
cqe->checksum == cpu_to_be16(0xffff)) {
- ip_summed = CHECKSUM_UNNECESSARY;
- l2_tunnel = (dev->hw_enc_features & NETIF_F_RXCSUM) &&
+ bool l2_tunnel = (dev->hw_enc_features & NETIF_F_RXCSUM) &&
(cqe->vlan_my_qpn & cpu_to_be32(MLX4_CQE_L2_TUNNEL));
+
+ ip_summed = CHECKSUM_UNNECESSARY;
+ hash_type = PKT_HASH_TYPE_L4;
if (l2_tunnel)
skb->csum_level = 1;
ring->csum_ok++;
@@ -822,6 +829,7 @@
goto csum_none;
} else {
ip_summed = CHECKSUM_COMPLETE;
+ hash_type = PKT_HASH_TYPE_L3;
ring->csum_complete++;
}
} else {
@@ -831,16 +839,14 @@
} else {
csum_none:
ip_summed = CHECKSUM_NONE;
+ hash_type = PKT_HASH_TYPE_L3;
ring->csum_none++;
}
skb->ip_summed = ip_summed;
if (dev->features & NETIF_F_RXHASH)
skb_set_hash(skb,
be32_to_cpu(cqe->immed_rss_invalid),
- (ip_summed == CHECKSUM_UNNECESSARY) ?
- PKT_HASH_TYPE_L4 :
- PKT_HASH_TYPE_L3);
-
+ hash_type);
if ((cqe->vlan_my_qpn &
cpu_to_be32(MLX4_CQE_CVLAN_PRESENT_MASK)) &&
@@ -867,15 +873,17 @@
++cq->mcq.cons_index;
index = (cq->mcq.cons_index) & ring->size_mask;
cqe = mlx4_en_get_cqe(cq->buf, index, priv->cqe_size) + factor;
- if (++polled == budget)
+ if (unlikely(++polled == budget))
break;
}
rcu_read_unlock();
- if (polled) {
- if (doorbell_pending)
- mlx4_en_xmit_doorbell(priv->tx_ring[TX_XDP][cq->ring]);
+ if (likely(polled)) {
+ if (doorbell_pending) {
+ priv->tx_cq[TX_XDP][cq_ring]->xdp_busy = true;
+ mlx4_en_xmit_doorbell(priv->tx_ring[TX_XDP][cq_ring]);
+ }
mlx4_cq_set_ci(&cq->mcq);
wmb(); /* ensure HW sees CQ consumer before we post new buffers */
@@ -883,8 +891,7 @@
}
AVG_PERF_COUNTER(priv->pstats.rx_coal_avg, polled);
- if (mlx4_en_refill_rx_buffers(priv, ring))
- mlx4_en_update_rx_prod_db(ring);
+ mlx4_en_refill_rx_buffers(priv, ring);
return polled;
}
@@ -907,16 +914,30 @@
struct mlx4_en_cq *cq = container_of(napi, struct mlx4_en_cq, napi);
struct net_device *dev = cq->dev;
struct mlx4_en_priv *priv = netdev_priv(dev);
+ struct mlx4_en_cq *xdp_tx_cq = NULL;
+ bool clean_complete = true;
int done;
+ if (priv->tx_ring_num[TX_XDP]) {
+ xdp_tx_cq = priv->tx_cq[TX_XDP][cq->ring];
+ if (xdp_tx_cq->xdp_busy) {
+ clean_complete = mlx4_en_process_tx_cq(dev, xdp_tx_cq,
+ budget);
+ xdp_tx_cq->xdp_busy = !clean_complete;
+ }
+ }
+
done = mlx4_en_process_rx_cq(dev, cq, budget);
/* If we used up all the quota - we're probably not done yet... */
- if (done == budget) {
+ if (done == budget || !clean_complete) {
const struct cpumask *aff;
struct irq_data *idata;
int cpu_curr;
+ /* in case we got here because of !clean_complete */
+ done = budget;
+
INC_PERF_COUNTER(priv->pstats.napi_quota);
cpu_curr = smp_processor_id();
@@ -936,7 +957,7 @@
done--;
}
/* Done for now */
- if (napi_complete_done(napi, done))
+ if (likely(napi_complete_done(napi, done)))
mlx4_en_arm_cq(priv, cq);
return done;
}
@@ -1099,11 +1120,14 @@
int i, qpn;
int err = 0;
int good_qps = 0;
+ u8 flags;
en_dbg(DRV, priv, "Configuring rss steering\n");
+
+ flags = priv->rx_ring_num == 1 ? MLX4_RESERVE_A0_QP : 0;
err = mlx4_qp_reserve_range(mdev->dev, priv->rx_ring_num,
priv->rx_ring_num,
- &rss_map->base_qpn, 0);
+ &rss_map->base_qpn, flags);
if (err) {
en_err(priv, "Failed reserving %d qps\n", priv->rx_ring_num);
return err;
@@ -1120,13 +1144,28 @@
++good_qps;
}
+ if (priv->rx_ring_num == 1) {
+ rss_map->indir_qp = &rss_map->qps[0];
+ priv->base_qpn = rss_map->indir_qp->qpn;
+ en_info(priv, "Optimized Non-RSS steering\n");
+ return 0;
+ }
+
+ rss_map->indir_qp = kzalloc(sizeof(*rss_map->indir_qp), GFP_KERNEL);
+ if (!rss_map->indir_qp) {
+ err = -ENOMEM;
+ goto rss_err;
+ }
+
/* Configure RSS indirection qp */
- err = mlx4_qp_alloc(mdev->dev, priv->base_qpn, &rss_map->indir_qp, GFP_KERNEL);
+ err = mlx4_qp_alloc(mdev->dev, priv->base_qpn, rss_map->indir_qp,
+ GFP_KERNEL);
if (err) {
en_err(priv, "Failed to allocate RSS indirection QP\n");
goto rss_err;
}
- rss_map->indir_qp.event = mlx4_en_sqp_event;
+
+ rss_map->indir_qp->event = mlx4_en_sqp_event;
mlx4_en_fill_qp_context(priv, 0, 0, 0, 1, priv->base_qpn,
priv->rx_ring[0]->cqn, -1, &context);
@@ -1164,8 +1203,9 @@
err = -EINVAL;
goto indir_err;
}
+
err = mlx4_qp_to_ready(mdev->dev, &priv->res.mtt, &context,
- &rss_map->indir_qp, &rss_map->indir_state);
+ rss_map->indir_qp, &rss_map->indir_state);
if (err)
goto indir_err;
@@ -1173,9 +1213,11 @@
indir_err:
mlx4_qp_modify(mdev->dev, NULL, rss_map->indir_state,
- MLX4_QP_STATE_RST, NULL, 0, 0, &rss_map->indir_qp);
- mlx4_qp_remove(mdev->dev, &rss_map->indir_qp);
- mlx4_qp_free(mdev->dev, &rss_map->indir_qp);
+ MLX4_QP_STATE_RST, NULL, 0, 0, rss_map->indir_qp);
+ mlx4_qp_remove(mdev->dev, rss_map->indir_qp);
+ mlx4_qp_free(mdev->dev, rss_map->indir_qp);
+ kfree(rss_map->indir_qp);
+ rss_map->indir_qp = NULL;
rss_err:
for (i = 0; i < good_qps; i++) {
mlx4_qp_modify(mdev->dev, NULL, rss_map->state[i],
@@ -1193,10 +1235,15 @@
struct mlx4_en_rss_map *rss_map = &priv->rss_map;
int i;
- mlx4_qp_modify(mdev->dev, NULL, rss_map->indir_state,
- MLX4_QP_STATE_RST, NULL, 0, 0, &rss_map->indir_qp);
- mlx4_qp_remove(mdev->dev, &rss_map->indir_qp);
- mlx4_qp_free(mdev->dev, &rss_map->indir_qp);
+ if (priv->rx_ring_num > 1) {
+ mlx4_qp_modify(mdev->dev, NULL, rss_map->indir_state,
+ MLX4_QP_STATE_RST, NULL, 0, 0,
+ rss_map->indir_qp);
+ mlx4_qp_remove(mdev->dev, rss_map->indir_qp);
+ mlx4_qp_free(mdev->dev, rss_map->indir_qp);
+ kfree(rss_map->indir_qp);
+ rss_map->indir_qp = NULL;
+ }
for (i = 0; i < priv->rx_ring_num; i++) {
mlx4_qp_modify(mdev->dev, NULL, rss_map->state[i],
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_selftest.c b/drivers/net/ethernet/mellanox/mlx4/en_selftest.c
index 17112fa..88699b1 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_selftest.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_selftest.c
@@ -63,8 +63,8 @@
skb_reserve(skb, NET_IP_ALIGN);
- ethh = (struct ethhdr *)skb_put(skb, sizeof(struct ethhdr));
- packet = (unsigned char *)skb_put(skb, packet_size);
+ ethh = skb_put(skb, sizeof(struct ethhdr));
+ packet = skb_put(skb, packet_size);
memcpy(ethh->h_dest, priv->dev->dev_addr, ETH_ALEN);
eth_zero_addr(ethh->h_source);
ethh->h_proto = htons(ETH_P_ARP);
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_tx.c b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
index 6ffd184..7d69d93 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
@@ -234,23 +234,24 @@
u8 owner)
{
__be32 stamp = cpu_to_be32(STAMP_VAL | (!!owner << STAMP_SHIFT));
- struct mlx4_en_tx_desc *tx_desc = ring->buf + index * TXBB_SIZE;
+ struct mlx4_en_tx_desc *tx_desc = ring->buf + (index << LOG_TXBB_SIZE);
struct mlx4_en_tx_info *tx_info = &ring->tx_info[index];
void *end = ring->buf + ring->buf_size;
__be32 *ptr = (__be32 *)tx_desc;
int i;
/* Optimize the common case when there are no wraparounds */
- if (likely((void *)tx_desc + tx_info->nr_txbb * TXBB_SIZE <= end)) {
+ if (likely((void *)tx_desc +
+ (tx_info->nr_txbb << LOG_TXBB_SIZE) <= end)) {
/* Stamp the freed descriptor */
- for (i = 0; i < tx_info->nr_txbb * TXBB_SIZE;
+ for (i = 0; i < tx_info->nr_txbb << LOG_TXBB_SIZE;
i += STAMP_STRIDE) {
*ptr = stamp;
ptr += STAMP_DWORDS;
}
} else {
/* Stamp the freed descriptor */
- for (i = 0; i < tx_info->nr_txbb * TXBB_SIZE;
+ for (i = 0; i < tx_info->nr_txbb << LOG_TXBB_SIZE;
i += STAMP_STRIDE) {
*ptr = stamp;
ptr += STAMP_DWORDS;
@@ -265,11 +266,11 @@
u32 mlx4_en_free_tx_desc(struct mlx4_en_priv *priv,
struct mlx4_en_tx_ring *ring,
- int index, u8 owner, u64 timestamp,
+ int index, u64 timestamp,
int napi_mode)
{
struct mlx4_en_tx_info *tx_info = &ring->tx_info[index];
- struct mlx4_en_tx_desc *tx_desc = ring->buf + index * TXBB_SIZE;
+ struct mlx4_en_tx_desc *tx_desc = ring->buf + (index << LOG_TXBB_SIZE);
struct mlx4_wqe_data_seg *data = (void *) tx_desc + tx_info->data_offset;
void *end = ring->buf + ring->buf_size;
struct sk_buff *skb = tx_info->skb;
@@ -288,19 +289,20 @@
skb_tstamp_tx(skb, &hwts);
}
- /* Optimize the common case when there are no wraparounds */
- if (likely((void *) tx_desc + tx_info->nr_txbb * TXBB_SIZE <= end)) {
- if (!tx_info->inl) {
- if (tx_info->linear)
- dma_unmap_single(priv->ddev,
- tx_info->map0_dma,
- tx_info->map0_byte_count,
- PCI_DMA_TODEVICE);
- else
- dma_unmap_page(priv->ddev,
- tx_info->map0_dma,
- tx_info->map0_byte_count,
- PCI_DMA_TODEVICE);
+ if (!tx_info->inl) {
+ if (tx_info->linear)
+ dma_unmap_single(priv->ddev,
+ tx_info->map0_dma,
+ tx_info->map0_byte_count,
+ PCI_DMA_TODEVICE);
+ else
+ dma_unmap_page(priv->ddev,
+ tx_info->map0_dma,
+ tx_info->map0_byte_count,
+ PCI_DMA_TODEVICE);
+ /* Optimize the common case when there are no wraparounds */
+ if (likely((void *)tx_desc +
+ (tx_info->nr_txbb << LOG_TXBB_SIZE) <= end)) {
for (i = 1; i < nr_maps; i++) {
data++;
dma_unmap_page(priv->ddev,
@@ -308,23 +310,10 @@
be32_to_cpu(data->byte_count),
PCI_DMA_TODEVICE);
}
- }
- } else {
- if (!tx_info->inl) {
- if ((void *) data >= end) {
+ } else {
+ if ((void *)data >= end)
data = ring->buf + ((void *)data - end);
- }
- if (tx_info->linear)
- dma_unmap_single(priv->ddev,
- tx_info->map0_dma,
- tx_info->map0_byte_count,
- PCI_DMA_TODEVICE);
- else
- dma_unmap_page(priv->ddev,
- tx_info->map0_dma,
- tx_info->map0_byte_count,
- PCI_DMA_TODEVICE);
for (i = 1; i < nr_maps; i++) {
data++;
/* Check for wraparound before unmapping */
@@ -344,7 +333,7 @@
u32 mlx4_en_recycle_tx_desc(struct mlx4_en_priv *priv,
struct mlx4_en_tx_ring *ring,
- int index, u8 owner, u64 timestamp,
+ int index, u64 timestamp,
int napi_mode)
{
struct mlx4_en_tx_info *tx_info = &ring->tx_info[index];
@@ -381,8 +370,7 @@
while (ring->cons != ring->prod) {
ring->last_nr_txbb = ring->free_tx_desc(priv, ring,
ring->cons & ring->size_mask,
- !!(ring->cons & ring->size), 0,
- 0 /* Non-NAPI caller */);
+ 0, 0 /* Non-NAPI caller */);
ring->cons += ring->last_nr_txbb;
cnt++;
}
@@ -396,15 +384,14 @@
return cnt;
}
-static bool mlx4_en_process_tx_cq(struct net_device *dev,
- struct mlx4_en_cq *cq, int napi_budget)
+bool mlx4_en_process_tx_cq(struct net_device *dev,
+ struct mlx4_en_cq *cq, int napi_budget)
{
struct mlx4_en_priv *priv = netdev_priv(dev);
struct mlx4_cq *mcq = &cq->mcq;
struct mlx4_en_tx_ring *ring = priv->tx_ring[cq->type][cq->ring];
struct mlx4_cqe *cqe;
- u16 index;
- u16 new_index, ring_index, stamp_index;
+ u16 index, ring_index, stamp_index;
u32 txbbs_skipped = 0;
u32 txbbs_stamp = 0;
u32 cons_index = mcq->cons_index;
@@ -419,7 +406,7 @@
u32 last_nr_txbb;
u32 ring_cons;
- if (!priv->port_up)
+ if (unlikely(!priv->port_up))
return true;
netdev_txq_bql_complete_prefetchw(ring->tx_queue);
@@ -434,6 +421,8 @@
/* Process all completed CQEs */
while (XNOR(cqe->owner_sr_opcode & MLX4_CQE_OWNER_MASK,
cons_index & size) && (done < budget)) {
+ u16 new_index;
+
/*
* make sure we read the CQE after we read the
* ownership bit
@@ -464,8 +453,7 @@
/* free next descriptor */
last_nr_txbb = ring->free_tx_desc(
priv, ring, ring_index,
- !!((ring_cons + txbbs_skipped) &
- ring->size), timestamp, napi_budget);
+ timestamp, napi_budget);
mlx4_en_stamp_wqe(priv, ring, stamp_index,
!!((ring_cons + txbbs_stamp) &
@@ -481,7 +469,6 @@
cqe = mlx4_en_get_cqe(buf, index, priv->cqe_size) + factor;
}
-
/*
* To prevent CQ overflow we first update CQ consumer and only then
* the ring consumer.
@@ -494,7 +481,7 @@
ACCESS_ONCE(ring->last_nr_txbb) = last_nr_txbb;
ACCESS_ONCE(ring->cons) = ring_cons + txbbs_skipped;
- if (ring->free_tx_desc == mlx4_en_recycle_tx_desc)
+ if (cq->type == TX_XDP)
return done < budget;
netdev_tx_completed_queue(ring->tx_queue, packets, bytes);
@@ -506,6 +493,7 @@
netif_tx_wake_queue(ring->tx_queue);
ring->wake_queue++;
}
+
return done < budget;
}
@@ -526,7 +514,7 @@
struct mlx4_en_cq *cq = container_of(napi, struct mlx4_en_cq, napi);
struct net_device *dev = cq->dev;
struct mlx4_en_priv *priv = netdev_priv(dev);
- int clean_complete;
+ bool clean_complete;
clean_complete = mlx4_en_process_tx_cq(dev, cq, budget);
if (!clean_complete)
@@ -543,7 +531,7 @@
u32 index,
unsigned int desc_size)
{
- u32 copy = (ring->size - index) * TXBB_SIZE;
+ u32 copy = (ring->size - index) << LOG_TXBB_SIZE;
int i;
for (i = desc_size - copy - 4; i >= 0; i -= 4) {
@@ -558,12 +546,12 @@
if ((i & (TXBB_SIZE - 1)) == 0)
wmb();
- *((u32 *) (ring->buf + index * TXBB_SIZE + i)) =
+ *((u32 *)(ring->buf + (index << LOG_TXBB_SIZE) + i)) =
*((u32 *) (ring->bounce_buf + i));
}
/* Return real descriptor location */
- return ring->buf + index * TXBB_SIZE;
+ return ring->buf + (index << LOG_TXBB_SIZE);
}
/* Decide if skb can be inlined in tx descriptor to avoid dma mapping
@@ -775,37 +763,101 @@
}
}
+static bool mlx4_en_build_dma_wqe(struct mlx4_en_priv *priv,
+ struct skb_shared_info *shinfo,
+ struct mlx4_wqe_data_seg *data,
+ struct sk_buff *skb,
+ int lso_header_size,
+ __be32 mr_key,
+ struct mlx4_en_tx_info *tx_info)
+{
+ struct device *ddev = priv->ddev;
+ dma_addr_t dma = 0;
+ u32 byte_count = 0;
+ int i_frag;
+
+ /* Map fragments if any */
+ for (i_frag = shinfo->nr_frags - 1; i_frag >= 0; i_frag--) {
+ const struct skb_frag_struct *frag;
+
+ frag = &shinfo->frags[i_frag];
+ byte_count = skb_frag_size(frag);
+ dma = skb_frag_dma_map(ddev, frag,
+ 0, byte_count,
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(ddev, dma))
+ goto tx_drop_unmap;
+
+ data->addr = cpu_to_be64(dma);
+ data->lkey = mr_key;
+ dma_wmb();
+ data->byte_count = cpu_to_be32(byte_count);
+ --data;
+ }
+
+ /* Map linear part if needed */
+ if (tx_info->linear) {
+ byte_count = skb_headlen(skb) - lso_header_size;
+
+ dma = dma_map_single(ddev, skb->data +
+ lso_header_size, byte_count,
+ PCI_DMA_TODEVICE);
+ if (dma_mapping_error(ddev, dma))
+ goto tx_drop_unmap;
+
+ data->addr = cpu_to_be64(dma);
+ data->lkey = mr_key;
+ dma_wmb();
+ data->byte_count = cpu_to_be32(byte_count);
+ }
+ /* tx completion can avoid cache line miss for common cases */
+ tx_info->map0_dma = dma;
+ tx_info->map0_byte_count = byte_count;
+
+ return true;
+
+tx_drop_unmap:
+ en_err(priv, "DMA mapping error\n");
+
+ while (++i_frag < shinfo->nr_frags) {
+ ++data;
+ dma_unmap_page(ddev, (dma_addr_t)be64_to_cpu(data->addr),
+ be32_to_cpu(data->byte_count),
+ PCI_DMA_TODEVICE);
+ }
+
+ return false;
+}
+
netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct skb_shared_info *shinfo = skb_shinfo(skb);
struct mlx4_en_priv *priv = netdev_priv(dev);
union mlx4_wqe_qpn_vlan qpn_vlan = {};
- struct device *ddev = priv->ddev;
struct mlx4_en_tx_ring *ring;
struct mlx4_en_tx_desc *tx_desc;
struct mlx4_wqe_data_seg *data;
struct mlx4_en_tx_info *tx_info;
- int tx_ind = 0;
+ int tx_ind;
int nr_txbb;
int desc_size;
int real_size;
u32 index, bf_index;
__be32 op_own;
- u16 vlan_proto = 0;
- int i_frag;
int lso_header_size;
void *fragptr = NULL;
bool bounce = false;
bool send_doorbell;
bool stop_queue;
bool inline_ok;
+ u8 data_offset;
u32 ring_cons;
bool bf_ok;
tx_ind = skb_get_queue_mapping(skb);
ring = priv->tx_ring[TX][tx_ind];
- if (!priv->port_up)
+ if (unlikely(!priv->port_up))
goto tx_drop;
/* fetch ring->cons far ahead before needing it to avoid stall */
@@ -818,7 +870,7 @@
/* Align descriptor to TXBB size */
desc_size = ALIGN(real_size, TXBB_SIZE);
- nr_txbb = desc_size / TXBB_SIZE;
+ nr_txbb = desc_size >> LOG_TXBB_SIZE;
if (unlikely(nr_txbb > MAX_DESC_TXBBS)) {
if (netif_msg_tx_err(priv))
en_warn(priv, "Oversized header or SG list\n");
@@ -827,6 +879,8 @@
bf_ok = ring->bf_enabled;
if (skb_vlan_tag_present(skb)) {
+ u16 vlan_proto;
+
qpn_vlan.vlan_tag = cpu_to_be16(skb_vlan_tag_get(skb));
vlan_proto = be16_to_cpu(skb->vlan_proto);
if (vlan_proto == ETH_P_8021AD)
@@ -851,7 +905,7 @@
/* See if we have enough space for whole descriptor TXBB for setting
* SW ownership on next descriptor; if not, use a bounce buffer. */
if (likely(index + nr_txbb <= ring->size))
- tx_desc = ring->buf + index * TXBB_SIZE;
+ tx_desc = ring->buf + (index << LOG_TXBB_SIZE);
else {
tx_desc = (struct mlx4_en_tx_desc *) ring->bounce_buf;
bounce = true;
@@ -863,64 +917,31 @@
tx_info->skb = skb;
tx_info->nr_txbb = nr_txbb;
- data = &tx_desc->data;
- if (lso_header_size)
- data = ((void *)&tx_desc->lso + ALIGN(lso_header_size + 4,
- DS_SIZE));
+ if (!lso_header_size) {
+ data = &tx_desc->data;
+ data_offset = offsetof(struct mlx4_en_tx_desc, data);
+ } else {
+ int lso_align = ALIGN(lso_header_size + 4, DS_SIZE);
+
+ data = (void *)&tx_desc->lso + lso_align;
+ data_offset = offsetof(struct mlx4_en_tx_desc, lso) + lso_align;
+ }
/* valid only for none inline segments */
- tx_info->data_offset = (void *)data - (void *)tx_desc;
+ tx_info->data_offset = data_offset;
tx_info->inl = inline_ok;
- tx_info->linear = (lso_header_size < skb_headlen(skb) &&
- !inline_ok) ? 1 : 0;
+ tx_info->linear = lso_header_size < skb_headlen(skb) && !inline_ok;
tx_info->nr_maps = shinfo->nr_frags + tx_info->linear;
data += tx_info->nr_maps - 1;
- if (!tx_info->inl) {
- dma_addr_t dma = 0;
- u32 byte_count = 0;
-
- /* Map fragments if any */
- for (i_frag = shinfo->nr_frags - 1; i_frag >= 0; i_frag--) {
- const struct skb_frag_struct *frag;
-
- frag = &shinfo->frags[i_frag];
- byte_count = skb_frag_size(frag);
- dma = skb_frag_dma_map(ddev, frag,
- 0, byte_count,
- DMA_TO_DEVICE);
- if (dma_mapping_error(ddev, dma))
- goto tx_drop_unmap;
-
- data->addr = cpu_to_be64(dma);
- data->lkey = ring->mr_key;
- dma_wmb();
- data->byte_count = cpu_to_be32(byte_count);
- --data;
- }
-
- /* Map linear part if needed */
- if (tx_info->linear) {
- byte_count = skb_headlen(skb) - lso_header_size;
-
- dma = dma_map_single(ddev, skb->data +
- lso_header_size, byte_count,
- PCI_DMA_TODEVICE);
- if (dma_mapping_error(ddev, dma))
- goto tx_drop_unmap;
-
- data->addr = cpu_to_be64(dma);
- data->lkey = ring->mr_key;
- dma_wmb();
- data->byte_count = cpu_to_be32(byte_count);
- }
- /* tx completion can avoid cache line miss for common cases */
- tx_info->map0_dma = dma;
- tx_info->map0_byte_count = byte_count;
- }
+ if (!tx_info->inl)
+ if (!mlx4_en_build_dma_wqe(priv, shinfo, data, skb,
+ lso_header_size, ring->mr_key,
+ tx_info))
+ goto tx_drop_count;
/*
* For timestamping add flag to skb_shinfo and
@@ -1056,16 +1077,6 @@
}
return NETDEV_TX_OK;
-tx_drop_unmap:
- en_err(priv, "DMA mapping error\n");
-
- while (++i_frag < shinfo->nr_frags) {
- ++data;
- dma_unmap_page(ddev, (dma_addr_t) be64_to_cpu(data->addr),
- be32_to_cpu(data->byte_count),
- PCI_DMA_TODEVICE);
- }
-
tx_drop_count:
ring->tx_dropped++;
tx_drop:
@@ -1073,52 +1084,41 @@
return NETDEV_TX_OK;
}
+#define MLX4_EN_XDP_TX_NRTXBB 1
+#define MLX4_EN_XDP_TX_REAL_SZ (((CTRL_SIZE + MLX4_EN_XDP_TX_NRTXBB * DS_SIZE) \
+ / 16) & 0x3f)
+
netdev_tx_t mlx4_en_xmit_frame(struct mlx4_en_rx_ring *rx_ring,
struct mlx4_en_rx_alloc *frame,
struct net_device *dev, unsigned int length,
- int tx_ind, int *doorbell_pending)
+ int tx_ind, bool *doorbell_pending)
{
struct mlx4_en_priv *priv = netdev_priv(dev);
union mlx4_wqe_qpn_vlan qpn_vlan = {};
- struct mlx4_en_tx_ring *ring;
struct mlx4_en_tx_desc *tx_desc;
- struct mlx4_wqe_data_seg *data;
struct mlx4_en_tx_info *tx_info;
- int index, bf_index;
- bool send_doorbell;
- int nr_txbb = 1;
- bool stop_queue;
+ struct mlx4_wqe_data_seg *data;
+ struct mlx4_en_tx_ring *ring;
dma_addr_t dma;
- int real_size;
__be32 op_own;
- u32 ring_cons;
- bool bf_ok;
+ int index;
- BUILD_BUG_ON_MSG(ALIGN(CTRL_SIZE + DS_SIZE, TXBB_SIZE) != TXBB_SIZE,
- "mlx4_en_xmit_frame requires minimum size tx desc");
+ if (unlikely(!priv->port_up))
+ goto tx_drop;
ring = priv->tx_ring[TX_XDP][tx_ind];
- if (!priv->port_up)
- goto tx_drop;
-
- if (mlx4_en_is_tx_ring_full(ring))
+ if (unlikely(mlx4_en_is_tx_ring_full(ring)))
goto tx_drop_count;
- /* fetch ring->cons far ahead before needing it to avoid stall */
- ring_cons = READ_ONCE(ring->cons);
-
index = ring->prod & ring->size_mask;
tx_info = &ring->tx_info[index];
- bf_ok = ring->bf_enabled;
-
/* Track current inflight packets for performance analysis */
AVG_PERF_COUNTER(priv->pstats.inflight_avg,
- (u32)(ring->prod - ring_cons - 1));
+ (u32)(ring->prod - READ_ONCE(ring->cons) - 1));
- bf_index = ring->prod;
- tx_desc = ring->buf + index * TXBB_SIZE;
+ tx_desc = ring->buf + (index << LOG_TXBB_SIZE);
data = &tx_desc->data;
dma = frame->dma;
@@ -1127,9 +1127,9 @@
frame->page = NULL;
tx_info->map0_dma = dma;
tx_info->map0_byte_count = PAGE_SIZE;
- tx_info->nr_txbb = nr_txbb;
+ tx_info->nr_txbb = MLX4_EN_XDP_TX_NRTXBB;
tx_info->nr_bytes = max_t(unsigned int, length, ETH_ZLEN);
- tx_info->data_offset = (void *)data - (void *)tx_desc;
+ tx_info->data_offset = offsetof(struct mlx4_en_tx_desc, data);
tx_info->ts_requested = 0;
tx_info->nr_maps = 1;
tx_info->linear = 1;
@@ -1153,28 +1153,19 @@
rx_ring->xdp_tx++;
AVG_PERF_COUNTER(priv->pstats.tx_pktsz_avg, length);
- ring->prod += nr_txbb;
+ ring->prod += MLX4_EN_XDP_TX_NRTXBB;
- stop_queue = mlx4_en_is_tx_ring_full(ring);
- send_doorbell = stop_queue ||
- *doorbell_pending > MLX4_EN_DOORBELL_BUDGET;
- bf_ok &= send_doorbell;
+ qpn_vlan.fence_size = MLX4_EN_XDP_TX_REAL_SZ;
- real_size = ((CTRL_SIZE + nr_txbb * DS_SIZE) / 16) & 0x3f;
-
- if (bf_ok)
- qpn_vlan.bf_qpn = ring->doorbell_qpn | cpu_to_be32(real_size);
- else
- qpn_vlan.fence_size = real_size;
-
- mlx4_en_tx_write_desc(ring, tx_desc, qpn_vlan, TXBB_SIZE, bf_index,
- op_own, bf_ok, send_doorbell);
- *doorbell_pending = send_doorbell ? 0 : *doorbell_pending + 1;
+ mlx4_en_tx_write_desc(ring, tx_desc, qpn_vlan, TXBB_SIZE, 0,
+ op_own, false, false);
+ *doorbell_pending = true;
return NETDEV_TX_OK;
tx_drop_count:
rx_ring->xdp_tx_full++;
+ *doorbell_pending = true;
tx_drop:
return NETDEV_TX_BUSY;
}
diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c
index 83aab1e..457e070 100644
--- a/drivers/net/ethernet/mellanox/mlx4/main.c
+++ b/drivers/net/ethernet/mellanox/mlx4/main.c
@@ -119,7 +119,7 @@
static char mlx4_version[] =
DRV_NAME ": Mellanox ConnectX core driver v"
- DRV_VERSION " (" DRV_RELDATE ")\n";
+ DRV_VERSION "\n";
static struct mlx4_profile default_profile = {
.num_qp = 1 << 18,
@@ -2356,8 +2356,8 @@
MLX4_A0_STEERING_TABLE_SIZE;
}
- mlx4_dbg(dev, "DMFS high rate steer mode is: %s\n",
- dmfs_high_rate_steering_mode_str(
+ mlx4_info(dev, "DMFS high rate steer mode is: %s\n",
+ dmfs_high_rate_steering_mode_str(
dev->caps.dmfs_high_steer_mode));
}
} else {
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4.h b/drivers/net/ethernet/mellanox/mlx4/mlx4.h
index b4f1bc5..6ea2b7a 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mlx4.h
+++ b/drivers/net/ethernet/mellanox/mlx4/mlx4.h
@@ -56,8 +56,7 @@
#define DRV_NAME "mlx4_core"
#define PFX DRV_NAME ": "
-#define DRV_VERSION "2.2-1"
-#define DRV_RELDATE "Feb, 2014"
+#define DRV_VERSION "4.0-0"
#define MLX4_FS_UDP_UC_EN (1 << 1)
#define MLX4_FS_TCP_UC_EN (1 << 2)
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
index 39f401a..963b77d 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
+++ b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
@@ -58,8 +58,7 @@
#include "mlx4_stats.h"
#define DRV_NAME "mlx4_en"
-#define DRV_VERSION "2.2-1"
-#define DRV_RELDATE "Feb 2014"
+#define DRV_VERSION "4.0-0"
#define MLX4_EN_MSG_LEVEL (NETIF_MSG_LINK | NETIF_MSG_IFDOWN)
@@ -73,7 +72,8 @@
#define DEF_RX_RINGS 16
#define MAX_RX_RINGS 128
#define MIN_RX_RINGS 4
-#define TXBB_SIZE 64
+#define LOG_TXBB_SIZE 6
+#define TXBB_SIZE BIT(LOG_TXBB_SIZE)
#define HEADROOM (2048 / TXBB_SIZE + 1)
#define STAMP_STRIDE 64
#define STAMP_DWORDS (STAMP_STRIDE / 4)
@@ -116,13 +116,12 @@
#define MLX4_EN_MIN_TX_RING_P_UP 1
#define MLX4_EN_MAX_TX_RING_P_UP 32
#define MLX4_EN_NUM_UP 8
-#define MLX4_EN_DEF_TX_RING_SIZE 512
#define MLX4_EN_DEF_RX_RING_SIZE 1024
+#define MLX4_EN_DEF_TX_RING_SIZE MLX4_EN_DEF_RX_RING_SIZE
#define MAX_TX_RINGS (MLX4_EN_MAX_TX_RING_P_UP * \
MLX4_EN_NUM_UP)
#define MLX4_EN_DEFAULT_TX_WORK 256
-#define MLX4_EN_DOORBELL_BUDGET 8
/* Target number of packets to coalesce with interrupt moderation */
#define MLX4_EN_RX_COAL_TARGET 44
@@ -277,7 +276,7 @@
struct netdev_queue *tx_queue;
u32 (*free_tx_desc)(struct mlx4_en_priv *priv,
struct mlx4_en_tx_ring *ring,
- int index, u8 owner,
+ int index,
u64 timestamp, int napi_mode);
struct mlx4_en_rx_ring *recycle_ring;
@@ -360,7 +359,10 @@
struct mlx4_hwq_resources wqres;
int ring;
struct net_device *dev;
- struct napi_struct napi;
+ union {
+ struct napi_struct napi;
+ bool xdp_busy;
+ };
int size;
int buf_size;
int vector;
@@ -432,7 +434,7 @@
int base_qpn;
struct mlx4_qp qps[MAX_RX_RINGS];
enum mlx4_qp_state state[MAX_RX_RINGS];
- struct mlx4_qp indir_qp;
+ struct mlx4_qp *indir_qp;
enum mlx4_qp_state indir_state;
};
@@ -690,7 +692,7 @@
netdev_tx_t mlx4_en_xmit_frame(struct mlx4_en_rx_ring *rx_ring,
struct mlx4_en_rx_alloc *frame,
struct net_device *dev, unsigned int length,
- int tx_ind, int *doorbell_pending);
+ int tx_ind, bool *doorbell_pending);
void mlx4_en_xmit_doorbell(struct mlx4_en_tx_ring *ring);
bool mlx4_en_rx_recycle(struct mlx4_en_rx_ring *ring,
struct mlx4_en_rx_alloc *frame);
@@ -722,13 +724,15 @@
int budget);
int mlx4_en_poll_rx_cq(struct napi_struct *napi, int budget);
int mlx4_en_poll_tx_cq(struct napi_struct *napi, int budget);
+bool mlx4_en_process_tx_cq(struct net_device *dev,
+ struct mlx4_en_cq *cq, int napi_budget);
u32 mlx4_en_free_tx_desc(struct mlx4_en_priv *priv,
struct mlx4_en_tx_ring *ring,
- int index, u8 owner, u64 timestamp,
+ int index, u64 timestamp,
int napi_mode);
u32 mlx4_en_recycle_tx_desc(struct mlx4_en_priv *priv,
struct mlx4_en_tx_ring *ring,
- int index, u8 owner, u64 timestamp,
+ int index, u64 timestamp,
int napi_mode);
void mlx4_en_fill_qp_context(struct mlx4_en_priv *priv, int size, int stride,
int is_tx, int rss, int qpn, int cqn, int user_prio,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/Kconfig b/drivers/net/ethernet/mellanox/mlx5/core/Kconfig
index 27251a7..cf1ef48 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/Kconfig
+++ b/drivers/net/ethernet/mellanox/mlx5/core/Kconfig
@@ -11,6 +11,16 @@
Core driver for low level functionality of the ConnectX-4 and
Connect-IB cards by Mellanox Technologies.
+config MLX5_FPGA
+ bool "Mellanox Technologies Innova support"
+ depends on MLX5_CORE
+ ---help---
+ Build support for the Innova family of network cards by Mellanox
+ Technologies. Innova network cards are comprised of a ConnectX chip
+ and an FPGA chip on one board. If you select this option, the
+ mlx5_core driver will include the Innova FPGA core and allow building
+ sandbox-specific client drivers.
+
config MLX5_CORE_EN
bool "Mellanox Technologies ConnectX-4 Ethernet support"
depends on NETDEVICES && ETHERNET && INET && PCI && MLX5_CORE
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/Makefile b/drivers/net/ethernet/mellanox/mlx5/core/Makefile
index 9e64461..5ad093a 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/Makefile
+++ b/drivers/net/ethernet/mellanox/mlx5/core/Makefile
@@ -1,10 +1,13 @@
obj-$(CONFIG_MLX5_CORE) += mlx5_core.o
+subdir-ccflags-y += -I$(src)
mlx5_core-y := main.o cmd.o debugfs.o fw.o eq.o uar.o pagealloc.o \
health.o mcg.o cq.o srq.o alloc.o qp.o port.o mr.o pd.o \
mad.o transobj.o vport.o sriov.o fs_cmd.o fs_core.o \
fs_counters.o rl.o lag.o dev.o
+mlx5_core-$(CONFIG_MLX5_FPGA) += fpga/cmd.o fpga/core.o
+
mlx5_core-$(CONFIG_MLX5_CORE_EN) += wq.o eswitch.o eswitch_offloads.o \
en_main.o en_common.o en_fs.o en_ethtool.o en_tx.o \
en_rx.o en_rx_am.o en_txrx.o en_clock.o vxlan.o \
@@ -12,4 +15,4 @@
mlx5_core-$(CONFIG_MLX5_CORE_EN_DCB) += en_dcbnl.o
-mlx5_core-$(CONFIG_MLX5_CORE_IPOIB) += ipoib.o
+mlx5_core-$(CONFIG_MLX5_CORE_IPOIB) += ipoib/ipoib.o ipoib/ethtool.o
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/alloc.c b/drivers/net/ethernet/mellanox/mlx5/core/alloc.c
index 66bd213..3c95f7f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/alloc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/alloc.c
@@ -274,7 +274,6 @@
}
EXPORT_SYMBOL_GPL(mlx5_db_free);
-
void mlx5_fill_page_array(struct mlx5_buf *buf, __be64 *pas)
{
u64 addr;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
index 10d2828..4d5bd01 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
@@ -217,7 +217,6 @@
kfree(ent);
}
-
static int verify_signature(struct mlx5_cmd_work_ent *ent)
{
struct mlx5_cmd_mailbox *next = ent->out->next;
@@ -786,6 +785,8 @@
struct mlx5_cmd_layout *lay;
struct semaphore *sem;
unsigned long flags;
+ bool poll_cmd = ent->polling;
+
sem = ent->page_queue ? &cmd->pages_sem : &cmd->sem;
down(sem);
@@ -846,7 +847,7 @@
iowrite32be(1 << ent->idx, &dev->iseg->cmd_dbell);
mmiowb();
/* if not in polling don't use ent after this point */
- if (cmd->mode == CMD_MODE_POLLING) {
+ if (cmd->mode == CMD_MODE_POLLING || poll_cmd) {
poll_timeout(ent);
/* make sure we read the descriptor after ownership is SW */
rmb();
@@ -874,7 +875,7 @@
case MLX5_CMD_DELIVERY_STAT_IN_LENGTH_ERR:
return "command input length error";
case MLX5_CMD_DELIVERY_STAT_OUT_LENGTH_ERR:
- return "command ouput length error";
+ return "command output length error";
case MLX5_CMD_DELIVERY_STAT_RES_FLD_NOT_CLR_ERR:
return "reserved fields not cleared";
case MLX5_CMD_DELIVERY_STAT_CMD_DESCR_ERR:
@@ -890,7 +891,7 @@
struct mlx5_cmd *cmd = &dev->cmd;
int err;
- if (cmd->mode == CMD_MODE_POLLING) {
+ if (cmd->mode == CMD_MODE_POLLING || ent->polling) {
wait_for_completion(&ent->done);
} else if (!wait_for_completion_timeout(&ent->done, timeout)) {
ent->ret = -ETIMEDOUT;
@@ -918,7 +919,7 @@
struct mlx5_cmd_msg *out, void *uout, int uout_size,
mlx5_cmd_cbk_t callback,
void *context, int page_queue, u8 *status,
- u8 token)
+ u8 token, bool force_polling)
{
struct mlx5_cmd *cmd = &dev->cmd;
struct mlx5_cmd_work_ent *ent;
@@ -936,6 +937,7 @@
return PTR_ERR(ent);
ent->token = token;
+ ent->polling = force_polling;
if (!callback)
init_completion(&ent->done);
@@ -1001,7 +1003,6 @@
return err ? err : count;
}
-
static const struct file_operations fops = {
.owner = THIS_MODULE,
.open = simple_open,
@@ -1153,7 +1154,7 @@
}
static void mlx5_free_cmd_msg(struct mlx5_core_dev *dev,
- struct mlx5_cmd_msg *msg)
+ struct mlx5_cmd_msg *msg)
{
struct mlx5_cmd_mailbox *head = msg->next;
struct mlx5_cmd_mailbox *next;
@@ -1537,7 +1538,8 @@
}
static int cmd_exec(struct mlx5_core_dev *dev, void *in, int in_size, void *out,
- int out_size, mlx5_cmd_cbk_t callback, void *context)
+ int out_size, mlx5_cmd_cbk_t callback, void *context,
+ bool force_polling)
{
struct mlx5_cmd_msg *inb;
struct mlx5_cmd_msg *outb;
@@ -1582,7 +1584,7 @@
}
err = mlx5_cmd_invoke(dev, inb, outb, out, out_size, callback, context,
- pages_queue, &status, token);
+ pages_queue, &status, token, force_polling);
if (err)
goto out_out;
@@ -1610,7 +1612,7 @@
{
int err;
- err = cmd_exec(dev, in, in_size, out, out_size, NULL, NULL);
+ err = cmd_exec(dev, in, in_size, out, out_size, NULL, NULL, false);
return err ? : mlx5_cmd_check(dev, in, out);
}
EXPORT_SYMBOL(mlx5_cmd_exec);
@@ -1619,10 +1621,22 @@
void *out, int out_size, mlx5_cmd_cbk_t callback,
void *context)
{
- return cmd_exec(dev, in, in_size, out, out_size, callback, context);
+ return cmd_exec(dev, in, in_size, out, out_size, callback, context,
+ false);
}
EXPORT_SYMBOL(mlx5_cmd_exec_cb);
+int mlx5_cmd_exec_polling(struct mlx5_core_dev *dev, void *in, int in_size,
+ void *out, int out_size)
+{
+ int err;
+
+ err = cmd_exec(dev, in, in_size, out, out_size, NULL, NULL, true);
+
+ return err ? : mlx5_cmd_check(dev, in, out);
+}
+EXPORT_SYMBOL(mlx5_cmd_exec_polling);
+
static void destroy_msg_cache(struct mlx5_core_dev *dev)
{
struct cmd_msg_cache *ch;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/debugfs.c b/drivers/net/ethernet/mellanox/mlx5/core/debugfs.c
index e94a953..7ecadb5 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/debugfs.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/debugfs.c
@@ -168,7 +168,6 @@
return ret;
}
-
static ssize_t average_write(struct file *filp, const char __user *buf,
size_t count, loff_t *pos)
{
@@ -405,7 +404,7 @@
u32 *out;
int err;
- out = mlx5_vzalloc(outlen);
+ out = kvzalloc(outlen, GFP_KERNEL);
if (!out)
return param;
@@ -466,7 +465,6 @@
return -EINVAL;
}
-
if (is_str)
ret = snprintf(tbuf, sizeof(tbuf), "%s\n", (const char *)(unsigned long)field);
else
@@ -562,7 +560,6 @@
rem_res_tree(qp->dbg);
}
-
int mlx5_debug_eq_add(struct mlx5_core_dev *dev, struct mlx5_eq *eq)
{
int err;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h
index 944fc17..5d9ace4 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h
@@ -52,8 +52,10 @@
#define MLX5_SET_CFG(p, f, v) MLX5_SET(create_flow_group_in, p, f, v)
-#define MLX5E_HW2SW_MTU(hwmtu) ((hwmtu) - (ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN))
-#define MLX5E_SW2HW_MTU(swmtu) ((swmtu) + (ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN))
+#define MLX5E_ETH_HARD_MTU (ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN)
+
+#define MLX5E_HW2SW_MTU(priv, hwmtu) ((hwmtu) - ((priv)->hard_mtu))
+#define MLX5E_SW2HW_MTU(priv, swmtu) ((swmtu) + ((priv)->hard_mtu))
#define MLX5E_MAX_NUM_TC 8
@@ -625,6 +627,8 @@
struct rhashtable_params ht_params;
struct rhashtable ht;
+
+ DECLARE_HASHTABLE(mod_hdr_tbl, 8);
};
struct mlx5e_vlan_table {
@@ -745,6 +749,7 @@
struct mlx5e_tir indir_tir[MLX5E_NUM_INDIR_TIRS];
struct mlx5e_tir direct_tir[MLX5E_MAX_NUM_CHANNELS];
u32 tx_rates[MLX5E_MAX_NUM_SQS];
+ int hard_mtu;
struct mlx5e_flow_steering fs;
struct mlx5e_vxlan_db vxlan;
@@ -780,6 +785,7 @@
void (*enable)(struct mlx5e_priv *priv);
void (*disable)(struct mlx5e_priv *priv);
void (*update_stats)(struct mlx5e_priv *priv);
+ void (*update_carrier)(struct mlx5e_priv *priv);
int (*max_nch)(struct mlx5_core_dev *mdev);
struct {
mlx5e_fp_handle_rx_cqe handle_rx_cqe;
@@ -820,7 +826,7 @@
void mlx5e_rx_am_work(struct work_struct *work);
struct mlx5e_cq_moder mlx5e_am_get_def_profile(u8 rx_cq_period_mode);
-void mlx5e_update_stats(struct mlx5e_priv *priv);
+void mlx5e_update_stats(struct mlx5e_priv *priv, bool full);
int mlx5e_create_flow_steering(struct mlx5e_priv *priv);
void mlx5e_destroy_flow_steering(struct mlx5e_priv *priv);
@@ -847,8 +853,8 @@
void mlx5e_timestamp_cleanup(struct mlx5e_priv *priv);
void mlx5e_pps_event_handler(struct mlx5e_priv *priv,
struct ptp_clock_event *event);
-int mlx5e_hwstamp_set(struct net_device *dev, struct ifreq *ifr);
-int mlx5e_hwstamp_get(struct net_device *dev, struct ifreq *ifr);
+int mlx5e_hwstamp_set(struct mlx5e_priv *priv, struct ifreq *ifr);
+int mlx5e_hwstamp_get(struct mlx5e_priv *priv, struct ifreq *ifr);
int mlx5e_modify_rx_cqe_compression_locked(struct mlx5e_priv *priv, bool val);
int mlx5e_vlan_rx_add_vid(struct net_device *dev, __always_unused __be16 proto,
@@ -1019,6 +1025,29 @@
void mlx5e_update_stats_work(struct work_struct *work);
u32 mlx5e_choose_lro_timeout(struct mlx5_core_dev *mdev, u32 wanted_timeout);
+/* ethtool helpers */
+void mlx5e_ethtool_get_drvinfo(struct mlx5e_priv *priv,
+ struct ethtool_drvinfo *drvinfo);
+void mlx5e_ethtool_get_strings(struct mlx5e_priv *priv,
+ uint32_t stringset, uint8_t *data);
+int mlx5e_ethtool_get_sset_count(struct mlx5e_priv *priv, int sset);
+void mlx5e_ethtool_get_ethtool_stats(struct mlx5e_priv *priv,
+ struct ethtool_stats *stats, u64 *data);
+void mlx5e_ethtool_get_ringparam(struct mlx5e_priv *priv,
+ struct ethtool_ringparam *param);
+int mlx5e_ethtool_set_ringparam(struct mlx5e_priv *priv,
+ struct ethtool_ringparam *param);
+void mlx5e_ethtool_get_channels(struct mlx5e_priv *priv,
+ struct ethtool_channels *ch);
+int mlx5e_ethtool_set_channels(struct mlx5e_priv *priv,
+ struct ethtool_channels *ch);
+int mlx5e_ethtool_get_coalesce(struct mlx5e_priv *priv,
+ struct ethtool_coalesce *coal);
+int mlx5e_ethtool_set_coalesce(struct mlx5e_priv *priv,
+ struct ethtool_coalesce *coal);
+int mlx5e_ethtool_get_ts_info(struct mlx5e_priv *priv,
+ struct ethtool_ts_info *info);
+
/* mlx5e generic netdev management API */
struct net_device*
mlx5e_create_netdev(struct mlx5_core_dev *mdev, const struct mlx5e_profile *profile,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c b/drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c
index c8a0053..12d3ced 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c
@@ -178,34 +178,26 @@
struct mlx5_flow_destination dest;
MLX5_DECLARE_FLOW_ACT(flow_act);
struct mlx5_flow_spec *spec;
+ enum mlx5e_traffic_types tt;
int err = 0;
- spec = mlx5_vzalloc(sizeof(*spec));
+ spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
if (!spec) {
- netdev_err(priv->netdev, "%s: alloc failed\n", __func__);
err = -ENOMEM;
goto out;
}
dest.type = MLX5_FLOW_DESTINATION_TYPE_TIR;
- switch (type) {
- case ARFS_IPV4_TCP:
- dest.tir_num = tir[MLX5E_TT_IPV4_TCP].tirn;
- break;
- case ARFS_IPV4_UDP:
- dest.tir_num = tir[MLX5E_TT_IPV4_UDP].tirn;
- break;
- case ARFS_IPV6_TCP:
- dest.tir_num = tir[MLX5E_TT_IPV6_TCP].tirn;
- break;
- case ARFS_IPV6_UDP:
- dest.tir_num = tir[MLX5E_TT_IPV6_UDP].tirn;
- break;
- default:
+ tt = arfs_get_tt(type);
+ if (tt == -EINVAL) {
+ netdev_err(priv->netdev, "%s: bad arfs_type: %d\n",
+ __func__, type);
err = -EINVAL;
goto out;
}
+ dest.tir_num = tir[tt].tirn;
+
arfs_t->default_rule = mlx5_add_flow_rules(arfs_t->ft.t, spec,
&flow_act,
&dest, 1);
@@ -237,7 +229,7 @@
ft->g = kcalloc(MLX5E_ARFS_NUM_GROUPS,
sizeof(*ft->g), GFP_KERNEL);
- in = mlx5_vzalloc(inlen);
+ in = kvzalloc(inlen, GFP_KERNEL);
if (!in || !ft->g) {
kvfree(ft->g);
kvfree(in);
@@ -481,9 +473,8 @@
struct mlx5_flow_table *ft;
int err = 0;
- spec = mlx5_vzalloc(sizeof(*spec));
+ spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
if (!spec) {
- netdev_err(priv->netdev, "%s: alloc failed\n", __func__);
err = -ENOMEM;
goto out;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_clock.c b/drivers/net/ethernet/mellanox/mlx5/core/en_clock.c
index e706a87..66f4323 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_clock.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_clock.c
@@ -86,9 +86,8 @@
schedule_delayed_work(&tstamp->overflow_work, tstamp->overflow_period);
}
-int mlx5e_hwstamp_set(struct net_device *dev, struct ifreq *ifr)
+int mlx5e_hwstamp_set(struct mlx5e_priv *priv, struct ifreq *ifr)
{
- struct mlx5e_priv *priv = netdev_priv(dev);
struct hwtstamp_config config;
int err;
@@ -128,11 +127,12 @@
case HWTSTAMP_FILTER_PTP_V2_EVENT:
case HWTSTAMP_FILTER_PTP_V2_SYNC:
case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
+ case HWTSTAMP_FILTER_NTP_ALL:
/* Disable CQE compression */
- netdev_warn(dev, "Disabling cqe compression");
+ netdev_warn(priv->netdev, "Disabling cqe compression");
err = mlx5e_modify_rx_cqe_compression_locked(priv, false);
if (err) {
- netdev_err(dev, "Failed disabling cqe compression err=%d\n", err);
+ netdev_err(priv->netdev, "Failed disabling cqe compression err=%d\n", err);
mutex_unlock(&priv->state_lock);
return err;
}
@@ -150,9 +150,8 @@
sizeof(config)) ? -EFAULT : 0;
}
-int mlx5e_hwstamp_get(struct net_device *dev, struct ifreq *ifr)
+int mlx5e_hwstamp_get(struct mlx5e_priv *priv, struct ifreq *ifr)
{
- struct mlx5e_priv *priv = netdev_priv(dev);
struct hwtstamp_config *cfg = &priv->tstamp.hwtstamp_config;
if (!MLX5_CAP_GEN(priv->mdev, device_frequency_khz))
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_common.c b/drivers/net/ethernet/mellanox/mlx5/core/en_common.c
index f1f17f7..ece3fb1 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_common.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_common.c
@@ -65,7 +65,7 @@
u32 *in;
int err;
- in = mlx5_vzalloc(inlen);
+ in = kvzalloc(inlen, GFP_KERNEL);
if (!in)
return -ENOMEM;
@@ -145,9 +145,8 @@
int inlen;
void *in;
-
inlen = MLX5_ST_SZ_BYTES(modify_tir_in);
- in = mlx5_vzalloc(inlen);
+ in = kvzalloc(inlen, GFP_KERNEL);
if (!in)
goto out;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
index 16486df..169435d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
@@ -32,14 +32,13 @@
#include "en.h"
-static void mlx5e_get_drvinfo(struct net_device *dev,
- struct ethtool_drvinfo *drvinfo)
+void mlx5e_ethtool_get_drvinfo(struct mlx5e_priv *priv,
+ struct ethtool_drvinfo *drvinfo)
{
- struct mlx5e_priv *priv = netdev_priv(dev);
struct mlx5_core_dev *mdev = priv->mdev;
strlcpy(drvinfo->driver, DRIVER_NAME, sizeof(drvinfo->driver));
- strlcpy(drvinfo->version, DRIVER_VERSION " (" DRIVER_RELDATE ")",
+ strlcpy(drvinfo->version, DRIVER_VERSION,
sizeof(drvinfo->version));
snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
"%d.%d.%04d (%.16s)",
@@ -49,6 +48,14 @@
sizeof(drvinfo->bus_info));
}
+static void mlx5e_get_drvinfo(struct net_device *dev,
+ struct ethtool_drvinfo *drvinfo)
+{
+ struct mlx5e_priv *priv = netdev_priv(dev);
+
+ mlx5e_ethtool_get_drvinfo(priv, drvinfo);
+}
+
struct ptys2ethtool_config {
__ETHTOOL_DECLARE_LINK_MODE_MASK(supported);
__ETHTOOL_DECLARE_LINK_MODE_MASK(advertised);
@@ -135,6 +142,9 @@
u8 pfc_en_rx;
int err;
+ if (MLX5_CAP_GEN(mdev, port_type) != MLX5_CAP_PORT_TYPE_ETH)
+ return 0;
+
err = mlx5_query_port_pfc(mdev, &pfc_en_tx, &pfc_en_rx);
return err ? 0 : pfc_en_tx | pfc_en_rx;
@@ -147,6 +157,9 @@
u32 tx_pause;
int err;
+ if (MLX5_CAP_GEN(mdev, port_type) != MLX5_CAP_PORT_TYPE_ETH)
+ return false;
+
err = mlx5_query_port_pause(mdev, &rx_pause, &tx_pause);
return err ? false : rx_pause | tx_pause;
@@ -160,9 +173,8 @@
((mlx5e_query_global_pause_combined(priv) + hweight8(mlx5e_query_pfc_combined(priv))) * \
NUM_PPORT_PER_PRIO_PFC_COUNTERS)
-static int mlx5e_get_sset_count(struct net_device *dev, int sset)
+int mlx5e_ethtool_get_sset_count(struct mlx5e_priv *priv, int sset)
{
- struct mlx5e_priv *priv = netdev_priv(dev);
switch (sset) {
case ETH_SS_STATS:
@@ -186,6 +198,13 @@
}
}
+static int mlx5e_get_sset_count(struct net_device *dev, int sset)
+{
+ struct mlx5e_priv *priv = netdev_priv(dev);
+
+ return mlx5e_ethtool_get_sset_count(priv, sset);
+}
+
static void mlx5e_fill_stats_strings(struct mlx5e_priv *priv, uint8_t *data)
{
int i, j, tc, prio, idx = 0;
@@ -273,10 +292,9 @@
priv->channel_tc2txq[i][tc]);
}
-static void mlx5e_get_strings(struct net_device *dev,
- uint32_t stringset, uint8_t *data)
+void mlx5e_ethtool_get_strings(struct mlx5e_priv *priv,
+ uint32_t stringset, uint8_t *data)
{
- struct mlx5e_priv *priv = netdev_priv(dev);
int i;
switch (stringset) {
@@ -297,10 +315,17 @@
}
}
-static void mlx5e_get_ethtool_stats(struct net_device *dev,
- struct ethtool_stats *stats, u64 *data)
+static void mlx5e_get_strings(struct net_device *dev,
+ uint32_t stringset, uint8_t *data)
{
struct mlx5e_priv *priv = netdev_priv(dev);
+
+ mlx5e_ethtool_get_strings(priv, stringset, data);
+}
+
+void mlx5e_ethtool_get_ethtool_stats(struct mlx5e_priv *priv,
+ struct ethtool_stats *stats, u64 *data)
+{
struct mlx5e_channels *channels;
struct mlx5_priv *mlx5_priv;
int i, j, tc, prio, idx = 0;
@@ -311,7 +336,7 @@
mutex_lock(&priv->state_lock);
if (test_bit(MLX5E_STATE_OPENED, &priv->state))
- mlx5e_update_stats(priv);
+ mlx5e_update_stats(priv, true);
channels = &priv->channels;
mutex_unlock(&priv->state_lock);
@@ -395,6 +420,15 @@
sq_stats_desc, j);
}
+static void mlx5e_get_ethtool_stats(struct net_device *dev,
+ struct ethtool_stats *stats,
+ u64 *data)
+{
+ struct mlx5e_priv *priv = netdev_priv(dev);
+
+ mlx5e_ethtool_get_ethtool_stats(priv, stats, data);
+}
+
static u32 mlx5e_rx_wqes_to_packets(struct mlx5e_priv *priv, int rq_wq_type,
int num_wqe)
{
@@ -439,10 +473,9 @@
return 1 << (order_base_2(num_wqes));
}
-static void mlx5e_get_ringparam(struct net_device *dev,
- struct ethtool_ringparam *param)
+void mlx5e_ethtool_get_ringparam(struct mlx5e_priv *priv,
+ struct ethtool_ringparam *param)
{
- struct mlx5e_priv *priv = netdev_priv(dev);
int rq_wq_type = priv->channels.params.rq_wq_type;
param->rx_max_pending = mlx5e_rx_wqes_to_packets(priv, rq_wq_type,
@@ -453,10 +486,17 @@
param->tx_pending = 1 << priv->channels.params.log_sq_size;
}
-static int mlx5e_set_ringparam(struct net_device *dev,
- struct ethtool_ringparam *param)
+static void mlx5e_get_ringparam(struct net_device *dev,
+ struct ethtool_ringparam *param)
{
struct mlx5e_priv *priv = netdev_priv(dev);
+
+ mlx5e_ethtool_get_ringparam(priv, param);
+}
+
+int mlx5e_ethtool_set_ringparam(struct mlx5e_priv *priv,
+ struct ethtool_ringparam *param)
+{
int rq_wq_type = priv->channels.params.rq_wq_type;
struct mlx5e_channels new_channels = {};
u32 rx_pending_wqes;
@@ -468,12 +508,12 @@
int err = 0;
if (param->rx_jumbo_pending) {
- netdev_info(dev, "%s: rx_jumbo_pending not supported\n",
+ netdev_info(priv->netdev, "%s: rx_jumbo_pending not supported\n",
__func__);
return -EINVAL;
}
if (param->rx_mini_pending) {
- netdev_info(dev, "%s: rx_mini_pending not supported\n",
+ netdev_info(priv->netdev, "%s: rx_mini_pending not supported\n",
__func__);
return -EINVAL;
}
@@ -486,13 +526,13 @@
param->rx_pending);
if (param->rx_pending < min_rq_size) {
- netdev_info(dev, "%s: rx_pending (%d) < min (%d)\n",
+ netdev_info(priv->netdev, "%s: rx_pending (%d) < min (%d)\n",
__func__, param->rx_pending,
min_rq_size);
return -EINVAL;
}
if (param->rx_pending > max_rq_size) {
- netdev_info(dev, "%s: rx_pending (%d) > max (%d)\n",
+ netdev_info(priv->netdev, "%s: rx_pending (%d) > max (%d)\n",
__func__, param->rx_pending,
max_rq_size);
return -EINVAL;
@@ -501,19 +541,19 @@
num_mtts = MLX5E_REQUIRED_MTTS(rx_pending_wqes);
if (priv->channels.params.rq_wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ &&
!MLX5E_VALID_NUM_MTTS(num_mtts)) {
- netdev_info(dev, "%s: rx_pending (%d) request can't be satisfied, try to reduce.\n",
+ netdev_info(priv->netdev, "%s: rx_pending (%d) request can't be satisfied, try to reduce.\n",
__func__, param->rx_pending);
return -EINVAL;
}
if (param->tx_pending < (1 << MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE)) {
- netdev_info(dev, "%s: tx_pending (%d) < min (%d)\n",
+ netdev_info(priv->netdev, "%s: tx_pending (%d) < min (%d)\n",
__func__, param->tx_pending,
1 << MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE);
return -EINVAL;
}
if (param->tx_pending > (1 << MLX5E_PARAMS_MAXIMUM_LOG_SQ_SIZE)) {
- netdev_info(dev, "%s: tx_pending (%d) > max (%d)\n",
+ netdev_info(priv->netdev, "%s: tx_pending (%d) > max (%d)\n",
__func__, param->tx_pending,
1 << MLX5E_PARAMS_MAXIMUM_LOG_SQ_SIZE);
return -EINVAL;
@@ -549,26 +589,39 @@
return err;
}
+static int mlx5e_set_ringparam(struct net_device *dev,
+ struct ethtool_ringparam *param)
+{
+ struct mlx5e_priv *priv = netdev_priv(dev);
+
+ return mlx5e_ethtool_set_ringparam(priv, param);
+}
+
+void mlx5e_ethtool_get_channels(struct mlx5e_priv *priv,
+ struct ethtool_channels *ch)
+{
+ ch->max_combined = priv->profile->max_nch(priv->mdev);
+ ch->combined_count = priv->channels.params.num_channels;
+}
+
static void mlx5e_get_channels(struct net_device *dev,
struct ethtool_channels *ch)
{
struct mlx5e_priv *priv = netdev_priv(dev);
- ch->max_combined = priv->profile->max_nch(priv->mdev);
- ch->combined_count = priv->channels.params.num_channels;
+ mlx5e_ethtool_get_channels(priv, ch);
}
-static int mlx5e_set_channels(struct net_device *dev,
- struct ethtool_channels *ch)
+int mlx5e_ethtool_set_channels(struct mlx5e_priv *priv,
+ struct ethtool_channels *ch)
{
- struct mlx5e_priv *priv = netdev_priv(dev);
unsigned int count = ch->combined_count;
struct mlx5e_channels new_channels = {};
bool arfs_enabled;
int err = 0;
if (!count) {
- netdev_info(dev, "%s: combined_count=0 not supported\n",
+ netdev_info(priv->netdev, "%s: combined_count=0 not supported\n",
__func__);
return -EINVAL;
}
@@ -593,7 +646,7 @@
if (err)
goto out;
- arfs_enabled = dev->features & NETIF_F_NTUPLE;
+ arfs_enabled = priv->netdev->features & NETIF_F_NTUPLE;
if (arfs_enabled)
mlx5e_arfs_disable(priv);
@@ -603,7 +656,7 @@
if (arfs_enabled) {
err = mlx5e_arfs_enable(priv);
if (err)
- netdev_err(dev, "%s: mlx5e_arfs_enable failed: %d\n",
+ netdev_err(priv->netdev, "%s: mlx5e_arfs_enable failed: %d\n",
__func__, err);
}
@@ -613,11 +666,17 @@
return err;
}
-static int mlx5e_get_coalesce(struct net_device *netdev,
- struct ethtool_coalesce *coal)
+static int mlx5e_set_channels(struct net_device *dev,
+ struct ethtool_channels *ch)
{
- struct mlx5e_priv *priv = netdev_priv(netdev);
+ struct mlx5e_priv *priv = netdev_priv(dev);
+ return mlx5e_ethtool_set_channels(priv, ch);
+}
+
+int mlx5e_ethtool_get_coalesce(struct mlx5e_priv *priv,
+ struct ethtool_coalesce *coal)
+{
if (!MLX5_CAP_GEN(priv->mdev, cq_moderation))
return -EOPNOTSUPP;
@@ -630,6 +689,14 @@
return 0;
}
+static int mlx5e_get_coalesce(struct net_device *netdev,
+ struct ethtool_coalesce *coal)
+{
+ struct mlx5e_priv *priv = netdev_priv(netdev);
+
+ return mlx5e_ethtool_get_coalesce(priv, coal);
+}
+
static void
mlx5e_set_priv_channels_coalesce(struct mlx5e_priv *priv, struct ethtool_coalesce *coal)
{
@@ -653,10 +720,9 @@
}
}
-static int mlx5e_set_coalesce(struct net_device *netdev,
- struct ethtool_coalesce *coal)
+int mlx5e_ethtool_set_coalesce(struct mlx5e_priv *priv,
+ struct ethtool_coalesce *coal)
{
- struct mlx5e_priv *priv = netdev_priv(netdev);
struct mlx5_core_dev *mdev = priv->mdev;
struct mlx5e_channels new_channels = {};
int err = 0;
@@ -699,6 +765,14 @@
return err;
}
+static int mlx5e_set_coalesce(struct net_device *netdev,
+ struct ethtool_coalesce *coal)
+{
+ struct mlx5e_priv *priv = netdev_priv(netdev);
+
+ return mlx5e_ethtool_set_coalesce(priv, coal);
+}
+
static void ptys2ethtool_supported_link(unsigned long *supported_modes,
u32 eth_proto_cap)
{
@@ -723,24 +797,81 @@
__ETHTOOL_LINK_MODE_MASK_NBITS);
}
-static void ptys2ethtool_supported_port(struct ethtool_link_ksettings *link_ksettings,
- u32 eth_proto_cap)
+static void ptys2ethtool_supported_advertised_port(struct ethtool_link_ksettings *link_ksettings,
+ u32 eth_proto_cap,
+ u8 connector_type)
{
- if (eth_proto_cap & (MLX5E_PROT_MASK(MLX5E_10GBASE_CR)
- | MLX5E_PROT_MASK(MLX5E_10GBASE_SR)
- | MLX5E_PROT_MASK(MLX5E_40GBASE_CR4)
- | MLX5E_PROT_MASK(MLX5E_40GBASE_SR4)
- | MLX5E_PROT_MASK(MLX5E_100GBASE_SR4)
- | MLX5E_PROT_MASK(MLX5E_1000BASE_CX_SGMII))) {
- ethtool_link_ksettings_add_link_mode(link_ksettings, supported, FIBRE);
+ if (!connector_type || connector_type >= MLX5E_CONNECTOR_TYPE_NUMBER) {
+ if (eth_proto_cap & (MLX5E_PROT_MASK(MLX5E_10GBASE_CR)
+ | MLX5E_PROT_MASK(MLX5E_10GBASE_SR)
+ | MLX5E_PROT_MASK(MLX5E_40GBASE_CR4)
+ | MLX5E_PROT_MASK(MLX5E_40GBASE_SR4)
+ | MLX5E_PROT_MASK(MLX5E_100GBASE_SR4)
+ | MLX5E_PROT_MASK(MLX5E_1000BASE_CX_SGMII))) {
+ ethtool_link_ksettings_add_link_mode(link_ksettings,
+ supported,
+ FIBRE);
+ ethtool_link_ksettings_add_link_mode(link_ksettings,
+ advertising,
+ FIBRE);
+ }
+
+ if (eth_proto_cap & (MLX5E_PROT_MASK(MLX5E_100GBASE_KR4)
+ | MLX5E_PROT_MASK(MLX5E_40GBASE_KR4)
+ | MLX5E_PROT_MASK(MLX5E_10GBASE_KR)
+ | MLX5E_PROT_MASK(MLX5E_10GBASE_KX4)
+ | MLX5E_PROT_MASK(MLX5E_1000BASE_KX))) {
+ ethtool_link_ksettings_add_link_mode(link_ksettings,
+ supported,
+ Backplane);
+ ethtool_link_ksettings_add_link_mode(link_ksettings,
+ advertising,
+ Backplane);
+ }
+ return;
}
- if (eth_proto_cap & (MLX5E_PROT_MASK(MLX5E_100GBASE_KR4)
- | MLX5E_PROT_MASK(MLX5E_40GBASE_KR4)
- | MLX5E_PROT_MASK(MLX5E_10GBASE_KR)
- | MLX5E_PROT_MASK(MLX5E_10GBASE_KX4)
- | MLX5E_PROT_MASK(MLX5E_1000BASE_KX))) {
- ethtool_link_ksettings_add_link_mode(link_ksettings, supported, Backplane);
+ switch (connector_type) {
+ case MLX5E_PORT_TP:
+ ethtool_link_ksettings_add_link_mode(link_ksettings,
+ supported, TP);
+ ethtool_link_ksettings_add_link_mode(link_ksettings,
+ advertising, TP);
+ break;
+ case MLX5E_PORT_AUI:
+ ethtool_link_ksettings_add_link_mode(link_ksettings,
+ supported, AUI);
+ ethtool_link_ksettings_add_link_mode(link_ksettings,
+ advertising, AUI);
+ break;
+ case MLX5E_PORT_BNC:
+ ethtool_link_ksettings_add_link_mode(link_ksettings,
+ supported, BNC);
+ ethtool_link_ksettings_add_link_mode(link_ksettings,
+ advertising, BNC);
+ break;
+ case MLX5E_PORT_MII:
+ ethtool_link_ksettings_add_link_mode(link_ksettings,
+ supported, MII);
+ ethtool_link_ksettings_add_link_mode(link_ksettings,
+ advertising, MII);
+ break;
+ case MLX5E_PORT_FIBRE:
+ ethtool_link_ksettings_add_link_mode(link_ksettings,
+ supported, FIBRE);
+ ethtool_link_ksettings_add_link_mode(link_ksettings,
+ advertising, FIBRE);
+ break;
+ case MLX5E_PORT_DA:
+ ethtool_link_ksettings_add_link_mode(link_ksettings,
+ supported, Backplane);
+ ethtool_link_ksettings_add_link_mode(link_ksettings,
+ advertising, Backplane);
+ break;
+ case MLX5E_PORT_NONE:
+ case MLX5E_PORT_OTHER:
+ default:
+ break;
}
}
@@ -791,7 +922,6 @@
{
unsigned long *supported = link_ksettings->link_modes.supported;
- ptys2ethtool_supported_port(link_ksettings, eth_proto_cap);
ptys2ethtool_supported_link(supported, eth_proto_cap);
ethtool_link_ksettings_add_link_mode(link_ksettings, supported, Pause);
}
@@ -809,8 +939,23 @@
ethtool_link_ksettings_add_link_mode(link_ksettings, advertising, Asym_Pause);
}
-static u8 get_connector_port(u32 eth_proto)
+static int ptys2connector_type[MLX5E_CONNECTOR_TYPE_NUMBER] = {
+ [MLX5E_PORT_UNKNOWN] = PORT_OTHER,
+ [MLX5E_PORT_NONE] = PORT_NONE,
+ [MLX5E_PORT_TP] = PORT_TP,
+ [MLX5E_PORT_AUI] = PORT_AUI,
+ [MLX5E_PORT_BNC] = PORT_BNC,
+ [MLX5E_PORT_MII] = PORT_MII,
+ [MLX5E_PORT_FIBRE] = PORT_FIBRE,
+ [MLX5E_PORT_DA] = PORT_DA,
+ [MLX5E_PORT_OTHER] = PORT_OTHER,
+ };
+
+static u8 get_connector_port(u32 eth_proto, u8 connector_type)
{
+ if (connector_type && connector_type < MLX5E_CONNECTOR_TYPE_NUMBER)
+ return ptys2connector_type[connector_type];
+
if (eth_proto & (MLX5E_PROT_MASK(MLX5E_10GBASE_SR)
| MLX5E_PROT_MASK(MLX5E_40GBASE_SR4)
| MLX5E_PROT_MASK(MLX5E_100GBASE_SR4)
@@ -856,6 +1001,7 @@
u32 eth_proto_oper;
u8 an_disable_admin;
u8 an_status;
+ u8 connector_type;
int err;
err = mlx5_query_port_ptys(mdev, out, sizeof(out), MLX5_PTYS_EN, 1);
@@ -871,6 +1017,7 @@
eth_proto_lp = MLX5_GET(ptys_reg, out, eth_proto_lp_advertise);
an_disable_admin = MLX5_GET(ptys_reg, out, an_disable_admin);
an_status = MLX5_GET(ptys_reg, out, an_status);
+ connector_type = MLX5_GET(ptys_reg, out, connector_type);
mlx5_query_port_pause(mdev, &rx_pause, &tx_pause);
@@ -883,7 +1030,10 @@
eth_proto_oper = eth_proto_oper ? eth_proto_oper : eth_proto_cap;
- link_ksettings->base.port = get_connector_port(eth_proto_oper);
+ link_ksettings->base.port = get_connector_port(eth_proto_oper,
+ connector_type);
+ ptys2ethtool_supported_advertised_port(link_ksettings, eth_proto_admin,
+ connector_type);
get_lp_advertising(eth_proto_lp, link_ksettings);
if (an_status == MLX5_AN_COMPLETE)
@@ -1048,7 +1198,7 @@
(hfunc != ETH_RSS_HASH_TOP))
return -EINVAL;
- in = mlx5_vzalloc(inlen);
+ in = kvzalloc(inlen, GFP_KERNEL);
if (!in)
return -ENOMEM;
@@ -1222,13 +1372,12 @@
return err;
}
-static int mlx5e_get_ts_info(struct net_device *dev,
- struct ethtool_ts_info *info)
+int mlx5e_ethtool_get_ts_info(struct mlx5e_priv *priv,
+ struct ethtool_ts_info *info)
{
- struct mlx5e_priv *priv = netdev_priv(dev);
int ret;
- ret = ethtool_op_get_ts_info(dev, info);
+ ret = ethtool_op_get_ts_info(priv->netdev, info);
if (ret)
return ret;
@@ -1251,6 +1400,14 @@
return 0;
}
+static int mlx5e_get_ts_info(struct net_device *dev,
+ struct ethtool_ts_info *info)
+{
+ struct mlx5e_priv *priv = netdev_priv(dev);
+
+ return mlx5e_ethtool_get_ts_info(priv, info);
+}
+
static __u32 mlx5e_get_wol_supported(struct mlx5_core_dev *mdev)
{
__u32 ret = 0;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c b/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c
index 53ed583..dfccb53 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c
@@ -170,7 +170,6 @@
spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
-
switch (rule_type) {
case MLX5E_VLAN_RULE_TYPE_UNTAGGED:
rule_p = &priv->fs.vlan.untagged_rule;
@@ -218,11 +217,9 @@
struct mlx5_flow_spec *spec;
int err = 0;
- spec = mlx5_vzalloc(sizeof(*spec));
- if (!spec) {
- netdev_err(priv->netdev, "%s: alloc failed\n", __func__);
+ spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
+ if (!spec)
return -ENOMEM;
- }
if (rule_type == MLX5E_VLAN_RULE_TYPE_MATCH_VID)
mlx5e_vport_context_update_vlans(priv);
@@ -660,11 +657,9 @@
struct mlx5_flow_spec *spec;
int err = 0;
- spec = mlx5_vzalloc(sizeof(*spec));
- if (!spec) {
- netdev_err(priv->netdev, "%s: alloc failed\n", __func__);
+ spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
+ if (!spec)
return ERR_PTR(-ENOMEM);
- }
if (proto) {
spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
@@ -742,7 +737,7 @@
sizeof(*ft->g), GFP_KERNEL);
if (!ft->g)
return -ENOMEM;
- in = mlx5_vzalloc(inlen);
+ in = kvzalloc(inlen, GFP_KERNEL);
if (!in) {
kfree(ft->g);
return -ENOMEM;
@@ -852,11 +847,9 @@
u8 *mc_dmac;
u8 *mv_dmac;
- spec = mlx5_vzalloc(sizeof(*spec));
- if (!spec) {
- netdev_err(priv->netdev, "%s: alloc failed\n", __func__);
+ spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
+ if (!spec)
return -ENOMEM;
- }
mc_dmac = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
outer_headers.dmac_47_16);
@@ -916,7 +909,7 @@
ft->g = kcalloc(MLX5E_NUM_L2_GROUPS, sizeof(*ft->g), GFP_KERNEL);
if (!ft->g)
return -ENOMEM;
- in = mlx5_vzalloc(inlen);
+ in = kvzalloc(inlen, GFP_KERNEL);
if (!in) {
kfree(ft->g);
return -ENOMEM;
@@ -1071,7 +1064,7 @@
int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
int err;
- in = mlx5_vzalloc(inlen);
+ in = kvzalloc(inlen, GFP_KERNEL);
if (!in)
return -ENOMEM;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c
index 85bf4a3..bdd82c9 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c
@@ -296,7 +296,7 @@
struct mlx5_flow_handle *rule;
int err = 0;
- spec = mlx5_vzalloc(sizeof(*spec));
+ spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
if (!spec)
return ERR_PTR(-ENOMEM);
err = set_flow_attrs(spec->match_criteria, spec->match_value,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
index 277f4de..167bca5 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
@@ -124,7 +124,8 @@
u8 port_state;
port_state = mlx5_query_vport_state(mdev,
- MLX5_QUERY_VPORT_STATE_IN_OP_MOD_VNIC_VPORT, 0);
+ MLX5_QUERY_VPORT_STATE_IN_OP_MOD_VNIC_VPORT,
+ 0);
if (port_state == VPORT_STATE_UP) {
netdev_info(priv->netdev, "Link up\n");
@@ -142,7 +143,8 @@
mutex_lock(&priv->state_lock);
if (test_bit(MLX5E_STATE_OPENED, &priv->state))
- mlx5e_update_carrier(priv);
+ if (priv->profile->update_carrier)
+ priv->profile->update_carrier(priv);
mutex_unlock(&priv->state_lock);
}
@@ -243,18 +245,14 @@
mlx5_cmd_exec(mdev, in, sizeof(in), out, outlen);
}
-static void mlx5e_update_pport_counters(struct mlx5e_priv *priv)
+static void mlx5e_update_pport_counters(struct mlx5e_priv *priv, bool full)
{
struct mlx5e_pport_stats *pstats = &priv->stats.pport;
struct mlx5_core_dev *mdev = priv->mdev;
+ u32 in[MLX5_ST_SZ_DW(ppcnt_reg)] = {0};
int sz = MLX5_ST_SZ_BYTES(ppcnt_reg);
int prio;
void *out;
- u32 *in;
-
- in = mlx5_vzalloc(sz);
- if (!in)
- goto free_out;
MLX5_SET(ppcnt_reg, in, local_port, 1);
@@ -262,6 +260,9 @@
MLX5_SET(ppcnt_reg, in, grp, MLX5_IEEE_802_3_COUNTERS_GROUP);
mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0);
+ if (!full)
+ return;
+
out = pstats->RFC_2863_counters;
MLX5_SET(ppcnt_reg, in, grp, MLX5_RFC_2863_COUNTERS_GROUP);
mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0);
@@ -287,53 +288,55 @@
mlx5_core_access_reg(mdev, in, sz, out, sz,
MLX5_REG_PPCNT, 0, 0);
}
-
-free_out:
- kvfree(in);
}
static void mlx5e_update_q_counter(struct mlx5e_priv *priv)
{
struct mlx5e_qcounter_stats *qcnt = &priv->stats.qcnt;
+ u32 out[MLX5_ST_SZ_DW(query_q_counter_out)];
+ int err;
if (!priv->q_counter)
return;
- mlx5_core_query_out_of_buffer(priv->mdev, priv->q_counter,
- &qcnt->rx_out_of_buffer);
+ err = mlx5_core_query_q_counter(priv->mdev, priv->q_counter, 0, out, sizeof(out));
+ if (err)
+ return;
+
+ qcnt->rx_out_of_buffer = MLX5_GET(query_q_counter_out, out, out_of_buffer);
}
static void mlx5e_update_pcie_counters(struct mlx5e_priv *priv)
{
struct mlx5e_pcie_stats *pcie_stats = &priv->stats.pcie;
struct mlx5_core_dev *mdev = priv->mdev;
+ u32 in[MLX5_ST_SZ_DW(mpcnt_reg)] = {0};
int sz = MLX5_ST_SZ_BYTES(mpcnt_reg);
void *out;
- u32 *in;
if (!MLX5_CAP_MCAM_FEATURE(mdev, pcie_performance_group))
return;
- in = mlx5_vzalloc(sz);
- if (!in)
- return;
-
out = pcie_stats->pcie_perf_counters;
MLX5_SET(mpcnt_reg, in, grp, MLX5_PCIE_PERFORMANCE_COUNTERS_GROUP);
mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_MPCNT, 0, 0);
-
- kvfree(in);
}
-void mlx5e_update_stats(struct mlx5e_priv *priv)
+void mlx5e_update_stats(struct mlx5e_priv *priv, bool full)
{
- mlx5e_update_pcie_counters(priv);
- mlx5e_update_pport_counters(priv);
+ if (full)
+ mlx5e_update_pcie_counters(priv);
+ mlx5e_update_pport_counters(priv, full);
mlx5e_update_vport_counters(priv);
mlx5e_update_q_counter(priv);
mlx5e_update_sw_counters(priv);
}
+static void mlx5e_update_ndo_stats(struct mlx5e_priv *priv)
+{
+ mlx5e_update_stats(priv, false);
+}
+
void mlx5e_update_stats_work(struct work_struct *work)
{
struct delayed_work *dwork = to_delayed_work(work);
@@ -503,7 +506,7 @@
if (!MLX5E_VALID_NUM_MTTS(npages))
return -EINVAL;
- in = mlx5_vzalloc(inlen);
+ in = kvzalloc(inlen, GFP_KERNEL);
if (!in)
return -ENOMEM;
@@ -632,7 +635,7 @@
rq->buff.wqe_sz = params->lro_en ?
params->lro_wqe_sz :
- MLX5E_SW2HW_MTU(c->netdev->mtu);
+ MLX5E_SW2HW_MTU(c->priv, c->netdev->mtu);
byte_count = rq->buff.wqe_sz;
/* calc the required page order */
@@ -711,7 +714,7 @@
inlen = MLX5_ST_SZ_BYTES(create_rq_in) +
sizeof(u64) * rq->wq_ctrl.buf.npages;
- in = mlx5_vzalloc(inlen);
+ in = kvzalloc(inlen, GFP_KERNEL);
if (!in)
return -ENOMEM;
@@ -748,7 +751,7 @@
int err;
inlen = MLX5_ST_SZ_BYTES(modify_rq_in);
- in = mlx5_vzalloc(inlen);
+ in = kvzalloc(inlen, GFP_KERNEL);
if (!in)
return -ENOMEM;
@@ -776,7 +779,7 @@
int err;
inlen = MLX5_ST_SZ_BYTES(modify_rq_in);
- in = mlx5_vzalloc(inlen);
+ in = kvzalloc(inlen, GFP_KERNEL);
if (!in)
return -ENOMEM;
@@ -805,7 +808,7 @@
int err;
inlen = MLX5_ST_SZ_BYTES(modify_rq_in);
- in = mlx5_vzalloc(inlen);
+ in = kvzalloc(inlen, GFP_KERNEL);
if (!in)
return -ENOMEM;
@@ -1134,7 +1137,7 @@
inlen = MLX5_ST_SZ_BYTES(create_sq_in) +
sizeof(u64) * csp->wq_ctrl->buf.npages;
- in = mlx5_vzalloc(inlen);
+ in = kvzalloc(inlen, GFP_KERNEL);
if (!in)
return -ENOMEM;
@@ -1182,7 +1185,7 @@
int err;
inlen = MLX5_ST_SZ_BYTES(modify_sq_in);
- in = mlx5_vzalloc(inlen);
+ in = kvzalloc(inlen, GFP_KERNEL);
if (!in)
return -ENOMEM;
@@ -1496,7 +1499,7 @@
inlen = MLX5_ST_SZ_BYTES(create_cq_in) +
sizeof(u64) * cq->wq_ctrl.frag_buf.npages;
- in = mlx5_vzalloc(inlen);
+ in = kvzalloc(inlen, GFP_KERNEL);
if (!in)
return -ENOMEM;
@@ -2091,7 +2094,7 @@
int i;
inlen = MLX5_ST_SZ_BYTES(create_rqt_in) + sizeof(u32) * sz;
- in = mlx5_vzalloc(inlen);
+ in = kvzalloc(inlen, GFP_KERNEL);
if (!in)
return -ENOMEM;
@@ -2210,7 +2213,7 @@
int err;
inlen = MLX5_ST_SZ_BYTES(modify_rqt_in) + sizeof(u32) * sz;
- in = mlx5_vzalloc(inlen);
+ in = kvzalloc(inlen, GFP_KERNEL);
if (!in)
return -ENOMEM;
@@ -2433,7 +2436,7 @@
int ix;
inlen = MLX5_ST_SZ_BYTES(modify_tir_in);
- in = mlx5_vzalloc(inlen);
+ in = kvzalloc(inlen, GFP_KERNEL);
if (!in)
return -ENOMEM;
@@ -2465,7 +2468,7 @@
static int mlx5e_set_mtu(struct mlx5e_priv *priv, u16 mtu)
{
struct mlx5_core_dev *mdev = priv->mdev;
- u16 hw_mtu = MLX5E_SW2HW_MTU(mtu);
+ u16 hw_mtu = MLX5E_SW2HW_MTU(priv, mtu);
int err;
err = mlx5_set_port_mtu(mdev, hw_mtu, 1);
@@ -2487,7 +2490,7 @@
if (err || !hw_mtu) /* fallback to port oper mtu */
mlx5_query_port_oper_mtu(mdev, &hw_mtu, 1);
- *mtu = MLX5E_HW2SW_MTU(hw_mtu);
+ *mtu = MLX5E_HW2SW_MTU(priv, hw_mtu);
}
static int mlx5e_set_dev_port_mtu(struct mlx5e_priv *priv)
@@ -2596,9 +2599,10 @@
{
struct net_device *netdev = priv->netdev;
int new_num_txqs;
-
+ int carrier_ok;
new_num_txqs = new_chs->num * new_chs->params.num_tc;
+ carrier_ok = netif_carrier_ok(netdev);
netif_carrier_off(netdev);
if (new_num_txqs < netdev->real_num_tx_queues)
@@ -2616,7 +2620,9 @@
mlx5e_refresh_tirs(priv, false);
mlx5e_activate_priv_channels(priv);
- mlx5e_update_carrier(priv);
+ /* return carrier back if needed */
+ if (carrier_ok)
+ netif_carrier_on(netdev);
}
int mlx5e_open_locked(struct net_device *netdev)
@@ -2632,7 +2638,8 @@
mlx5e_refresh_tirs(priv, false);
mlx5e_activate_priv_channels(priv);
- mlx5e_update_carrier(priv);
+ if (priv->profile->update_carrier)
+ priv->profile->update_carrier(priv);
mlx5e_timestamp_init(priv);
if (priv->profile->update_stats)
@@ -2850,7 +2857,7 @@
int tt;
inlen = MLX5_ST_SZ_BYTES(create_tir_in);
- in = mlx5_vzalloc(inlen);
+ in = kvzalloc(inlen, GFP_KERNEL);
if (!in)
return -ENOMEM;
@@ -2889,7 +2896,7 @@
int ix;
inlen = MLX5_ST_SZ_BYTES(create_tir_in);
- in = mlx5_vzalloc(inlen);
+ in = kvzalloc(inlen, GFP_KERNEL);
if (!in)
return -ENOMEM;
@@ -2992,13 +2999,17 @@
}
static int mlx5e_ndo_setup_tc(struct net_device *dev, u32 handle,
- __be16 proto, struct tc_to_netdev *tc)
+ u32 chain_index, __be16 proto,
+ struct tc_to_netdev *tc)
{
struct mlx5e_priv *priv = netdev_priv(dev);
if (TC_H_MAJ(handle) != TC_H_MAJ(TC_H_INGRESS))
goto mqprio;
+ if (chain_index)
+ return -EOPNOTSUPP;
+
switch (tc->type) {
case TC_SETUP_CLSFLOWER:
switch (tc->cls_flower->command) {
@@ -3064,7 +3075,6 @@
*/
stats->multicast =
VPORT_COUNTER_GET(vstats, received_eth_multicast.packets);
-
}
static void mlx5e_set_rx_mode(struct net_device *dev)
@@ -3307,11 +3317,13 @@
static int mlx5e_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
{
+ struct mlx5e_priv *priv = netdev_priv(dev);
+
switch (cmd) {
case SIOCSHWTSTAMP:
- return mlx5e_hwstamp_set(dev, ifr);
+ return mlx5e_hwstamp_set(priv, ifr);
case SIOCGHWTSTAMP:
- return mlx5e_hwstamp_get(dev, ifr);
+ return mlx5e_hwstamp_get(priv, ifr);
default:
return -EOPNOTSUPP;
}
@@ -3596,11 +3608,19 @@
return err;
}
-static bool mlx5e_xdp_attached(struct net_device *dev)
+static u32 mlx5e_xdp_query(struct net_device *dev)
{
struct mlx5e_priv *priv = netdev_priv(dev);
+ const struct bpf_prog *xdp_prog;
+ u32 prog_id = 0;
- return !!priv->channels.params.xdp_prog;
+ mutex_lock(&priv->state_lock);
+ xdp_prog = priv->channels.params.xdp_prog;
+ if (xdp_prog)
+ prog_id = xdp_prog->aux->id;
+ mutex_unlock(&priv->state_lock);
+
+ return prog_id;
}
static int mlx5e_xdp(struct net_device *dev, struct netdev_xdp *xdp)
@@ -3609,7 +3629,8 @@
case XDP_SETUP_PROG:
return mlx5e_xdp_set(dev, xdp->prog);
case XDP_QUERY_PROG:
- xdp->prog_attached = mlx5e_xdp_attached(dev);
+ xdp->prog_id = mlx5e_xdp_query(dev);
+ xdp->prog_attached = !!xdp->prog_id;
return 0;
default:
return -EINVAL;
@@ -3715,7 +3736,7 @@
if (!MLX5_CAP_ETH(mdev, self_lb_en_modifiable))
mlx5_core_warn(mdev, "Self loop back prevention is not supported\n");
if (!MLX5_CAP_GEN(mdev, cq_moderation))
- mlx5_core_warn(mdev, "CQ modiration is not supported\n");
+ mlx5_core_warn(mdev, "CQ moderation is not supported\n");
return 0;
}
@@ -3848,7 +3869,7 @@
/* set CQE compression */
params->rx_cqe_compress_def = false;
if (MLX5_CAP_GEN(mdev, cqe_compression) &&
- MLX5_CAP_GEN(mdev, vport_group_manager))
+ MLX5_CAP_GEN(mdev, vport_group_manager))
params->rx_cqe_compress_def = cqe_compress_heuristic(link_speed, pci_bw);
MLX5E_SET_PFLAG(params, MLX5E_PFLAG_RX_CQE_COMPRESS, params->rx_cqe_compress_def);
@@ -3857,6 +3878,7 @@
mlx5e_set_rq_params(mdev, params);
/* HW LRO */
+
/* TODO: && MLX5_CAP_ETH(mdev, lro_cap) */
if (params->rq_wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ)
params->lro_en = hw_lro_heuristic(link_speed, pci_bw);
@@ -3897,6 +3919,7 @@
priv->netdev = netdev;
priv->profile = profile;
priv->ppriv = ppriv;
+ priv->hard_mtu = MLX5E_ETH_HARD_MTU;
mlx5e_build_nic_params(mdev, &priv->channels.params, profile->max_nch(mdev));
@@ -4142,7 +4165,7 @@
/* MTU range: 68 - hw-specific max */
netdev->min_mtu = ETH_MIN_MTU;
mlx5_query_port_max_mtu(priv->mdev, &max_mtu, 1);
- netdev->max_mtu = MLX5E_HW2SW_MTU(max_mtu);
+ netdev->max_mtu = MLX5E_HW2SW_MTU(priv, max_mtu);
mlx5e_set_dev_port_mtu(priv);
mlx5_lag_add(mdev, netdev);
@@ -4199,8 +4222,9 @@
.cleanup_tx = mlx5e_cleanup_nic_tx,
.enable = mlx5e_nic_enable,
.disable = mlx5e_nic_disable,
- .update_stats = mlx5e_update_stats,
+ .update_stats = mlx5e_update_ndo_stats,
.max_nch = mlx5e_get_max_num_channels,
+ .update_carrier = mlx5e_update_carrier,
.rx_handlers.handle_rx_cqe = mlx5e_handle_rx_cqe,
.rx_handlers.handle_rx_cqe_mpwqe = mlx5e_handle_rx_cqe_mpwrq,
.max_tc = MLX5E_MAX_NUM_TC,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
index 46984a5..45e60be 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
@@ -652,7 +652,8 @@
}
static int mlx5e_rep_ndo_setup_tc(struct net_device *dev, u32 handle,
- __be16 proto, struct tc_to_netdev *tc)
+ u32 chain_index, __be16 proto,
+ struct tc_to_netdev *tc)
{
struct mlx5e_priv *priv = netdev_priv(dev);
@@ -664,9 +665,13 @@
struct net_device *uplink_dev = mlx5_eswitch_get_uplink_netdev(esw);
return uplink_dev->netdev_ops->ndo_setup_tc(uplink_dev, handle,
+ chain_index,
proto, tc);
}
+ if (chain_index)
+ return -EOPNOTSUPP;
+
switch (tc->type) {
case TC_SETUP_CLSFLOWER:
switch (tc->cls_flower->command) {
@@ -830,6 +835,9 @@
INIT_DELAYED_WORK(&priv->update_stats_work, mlx5e_update_stats_work);
priv->channels.params.num_channels = profile->max_nch(mdev);
+
+ priv->hard_mtu = MLX5E_ETH_HARD_MTU;
+
mlx5e_build_rep_params(mdev, &priv->channels.params);
mlx5e_build_rep_netdev(netdev);
}
@@ -913,6 +921,7 @@
.cleanup_tx = mlx5e_cleanup_nic_tx,
.update_stats = mlx5e_rep_update_stats,
.max_nch = mlx5e_get_rep_max_num_channels,
+ .update_carrier = NULL,
.rx_handlers.handle_rx_cqe = mlx5e_handle_rx_cqe_rep,
.rx_handlers.handle_rx_cqe_mpwqe = NULL /* Not supported */,
.max_tc = 1,
@@ -1016,7 +1025,6 @@
mlx5e_destroy_netdev(netdev_priv(netdev));
kfree(rpriv);
return err;
-
}
static void
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
index 66b5fec..abf6d2f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
@@ -40,7 +40,7 @@
#include "en_tc.h"
#include "eswitch.h"
#include "en_rep.h"
-#include "ipoib.h"
+#include "ipoib/ipoib.h"
static inline bool mlx5e_rx_hw_stamp(struct mlx5e_tstamp *tstamp)
{
@@ -648,7 +648,7 @@
prefetchw(wqe);
if (unlikely(dma_len < MLX5E_XDP_MIN_INLINE ||
- MLX5E_SW2HW_MTU(rq->netdev->mtu) < dma_len)) {
+ MLX5E_SW2HW_MTU(rq->channel->priv, rq->netdev->mtu) < dma_len)) {
rq->stats.xdp_drop++;
mlx5e_page_release(rq, di, true);
return false;
@@ -1038,11 +1038,7 @@
#ifdef CONFIG_MLX5_CORE_IPOIB
#define MLX5_IB_GRH_DGID_OFFSET 24
-#define MLX5_IB_GRH_BYTES 40
-#define MLX5_IPOIB_ENCAP_LEN 4
#define MLX5_GID_SIZE 16
-#define MLX5_IPOIB_PSEUDO_LEN 20
-#define MLX5_IPOIB_HARD_LEN (MLX5_IPOIB_PSEUDO_LEN + MLX5_IPOIB_ENCAP_LEN)
static inline void mlx5i_complete_rx_cqe(struct mlx5e_rq *rq,
struct mlx5_cqe64 *cqe,
@@ -1050,6 +1046,7 @@
struct sk_buff *skb)
{
struct net_device *netdev = rq->netdev;
+ struct mlx5e_tstamp *tstamp = rq->tstamp;
char *pseudo_header;
u8 *dgid;
u8 g;
@@ -1074,6 +1071,9 @@
skb->ip_summed = CHECKSUM_COMPLETE;
skb->csum = csum_unfold((__force __sum16)cqe->check_sum);
+ if (unlikely(mlx5e_rx_hw_stamp(tstamp)))
+ mlx5e_fill_hwstamp(tstamp, get_cqe_ts(cqe), skb_hwtstamps(skb));
+
skb_record_rx_queue(skb, rq->ix);
if (likely(netdev->features & NETIF_F_RXHASH))
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c b/drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c
index 5225f22..898759f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c
@@ -132,14 +132,14 @@
skb_reserve(skb, NET_IP_ALIGN);
/* Reserve for ethernet and IP header */
- ethh = (struct ethhdr *)skb_push(skb, ETH_HLEN);
+ ethh = skb_push(skb, ETH_HLEN);
skb_reset_mac_header(skb);
skb_set_network_header(skb, skb->len);
- iph = (struct iphdr *)skb_put(skb, sizeof(struct iphdr));
+ iph = skb_put(skb, sizeof(struct iphdr));
skb_set_transport_header(skb, skb->len);
- udph = (struct udphdr *)skb_put(skb, sizeof(struct udphdr));
+ udph = skb_put(skb, sizeof(struct udphdr));
/* Fill ETH header */
ether_addr_copy(ethh->h_dest, priv->netdev->dev_addr);
@@ -167,12 +167,12 @@
ip_send_check(iph);
/* Fill test header and data */
- mlxh = (struct mlx5ehdr *)skb_put(skb, sizeof(*mlxh));
+ mlxh = skb_put(skb, sizeof(*mlxh));
mlxh->version = 0;
mlxh->magic = cpu_to_be64(MLX5E_TEST_MAGIC);
strlcpy(mlxh->text, mlx5e_test_text, sizeof(mlxh->text));
datalen -= sizeof(*mlxh);
- memset(skb_put(skb, datalen), 0, datalen);
+ skb_put_zero(skb, datalen);
skb->csum = 0;
skb->ip_summed = CHECKSUM_PARTIAL;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h
index f81c3aa..fda2475 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h
@@ -268,7 +268,7 @@
};
static const struct counter_desc pport_phy_statistical_stats_desc[] = {
- { "rx_symbol_errors_phy", PPORT_PHY_STATISTICAL_OFF(phy_symbol_errors) },
+ { "rx_pcs_symbol_err_phy", PPORT_PHY_STATISTICAL_OFF(phy_symbol_errors) },
{ "rx_corrected_bits_phy", PPORT_PHY_STATISTICAL_OFF(phy_corrected_bits) },
};
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
index 9df9fc0..b9170c5 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
@@ -69,7 +69,8 @@
u64 cookie;
u8 flags;
struct mlx5_flow_handle *rule;
- struct list_head encap; /* flows sharing the same encap */
+ struct list_head encap; /* flows sharing the same encap ID */
+ struct list_head mod_hdr; /* flows sharing the same mod hdr ID */
union {
struct mlx5_esw_flow_attr esw_attr[0];
struct mlx5_nic_flow_attr nic_attr[0];
@@ -90,6 +91,135 @@
#define MLX5E_TC_TABLE_NUM_ENTRIES 1024
#define MLX5E_TC_TABLE_NUM_GROUPS 4
+struct mod_hdr_key {
+ int num_actions;
+ void *actions;
+};
+
+struct mlx5e_mod_hdr_entry {
+ /* a node of a hash table which keeps all the mod_hdr entries */
+ struct hlist_node mod_hdr_hlist;
+
+ /* flows sharing the same mod_hdr entry */
+ struct list_head flows;
+
+ struct mod_hdr_key key;
+
+ u32 mod_hdr_id;
+};
+
+#define MLX5_MH_ACT_SZ MLX5_UN_SZ_BYTES(set_action_in_add_action_in_auto)
+
+static inline u32 hash_mod_hdr_info(struct mod_hdr_key *key)
+{
+ return jhash(key->actions,
+ key->num_actions * MLX5_MH_ACT_SZ, 0);
+}
+
+static inline int cmp_mod_hdr_info(struct mod_hdr_key *a,
+ struct mod_hdr_key *b)
+{
+ if (a->num_actions != b->num_actions)
+ return 1;
+
+ return memcmp(a->actions, b->actions, a->num_actions * MLX5_MH_ACT_SZ);
+}
+
+static int mlx5e_attach_mod_hdr(struct mlx5e_priv *priv,
+ struct mlx5e_tc_flow *flow,
+ struct mlx5e_tc_flow_parse_attr *parse_attr)
+{
+ struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
+ int num_actions, actions_size, namespace, err;
+ struct mlx5e_mod_hdr_entry *mh;
+ struct mod_hdr_key key;
+ bool found = false;
+ u32 hash_key;
+
+ num_actions = parse_attr->num_mod_hdr_actions;
+ actions_size = MLX5_MH_ACT_SZ * num_actions;
+
+ key.actions = parse_attr->mod_hdr_actions;
+ key.num_actions = num_actions;
+
+ hash_key = hash_mod_hdr_info(&key);
+
+ if (flow->flags & MLX5E_TC_FLOW_ESWITCH) {
+ namespace = MLX5_FLOW_NAMESPACE_FDB;
+ hash_for_each_possible(esw->offloads.mod_hdr_tbl, mh,
+ mod_hdr_hlist, hash_key) {
+ if (!cmp_mod_hdr_info(&mh->key, &key)) {
+ found = true;
+ break;
+ }
+ }
+ } else {
+ namespace = MLX5_FLOW_NAMESPACE_KERNEL;
+ hash_for_each_possible(priv->fs.tc.mod_hdr_tbl, mh,
+ mod_hdr_hlist, hash_key) {
+ if (!cmp_mod_hdr_info(&mh->key, &key)) {
+ found = true;
+ break;
+ }
+ }
+ }
+
+ if (found)
+ goto attach_flow;
+
+ mh = kzalloc(sizeof(*mh) + actions_size, GFP_KERNEL);
+ if (!mh)
+ return -ENOMEM;
+
+ mh->key.actions = (void *)mh + sizeof(*mh);
+ memcpy(mh->key.actions, key.actions, actions_size);
+ mh->key.num_actions = num_actions;
+ INIT_LIST_HEAD(&mh->flows);
+
+ err = mlx5_modify_header_alloc(priv->mdev, namespace,
+ mh->key.num_actions,
+ mh->key.actions,
+ &mh->mod_hdr_id);
+ if (err)
+ goto out_err;
+
+ if (flow->flags & MLX5E_TC_FLOW_ESWITCH)
+ hash_add(esw->offloads.mod_hdr_tbl, &mh->mod_hdr_hlist, hash_key);
+ else
+ hash_add(priv->fs.tc.mod_hdr_tbl, &mh->mod_hdr_hlist, hash_key);
+
+attach_flow:
+ list_add(&flow->mod_hdr, &mh->flows);
+ if (flow->flags & MLX5E_TC_FLOW_ESWITCH)
+ flow->esw_attr->mod_hdr_id = mh->mod_hdr_id;
+ else
+ flow->nic_attr->mod_hdr_id = mh->mod_hdr_id;
+
+ return 0;
+
+out_err:
+ kfree(mh);
+ return err;
+}
+
+static void mlx5e_detach_mod_hdr(struct mlx5e_priv *priv,
+ struct mlx5e_tc_flow *flow)
+{
+ struct list_head *next = flow->mod_hdr.next;
+
+ list_del(&flow->mod_hdr);
+
+ if (list_empty(next)) {
+ struct mlx5e_mod_hdr_entry *mh;
+
+ mh = list_entry(next, struct mlx5e_mod_hdr_entry, flows);
+
+ mlx5_modify_header_dealloc(priv->mdev, mh->mod_hdr_id);
+ hash_del(&mh->mod_hdr_hlist);
+ kfree(mh);
+ }
+}
+
static struct mlx5_flow_handle *
mlx5e_tc_add_nic_flow(struct mlx5e_priv *priv,
struct mlx5e_tc_flow_parse_attr *parse_attr,
@@ -121,10 +251,7 @@
}
if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR) {
- err = mlx5_modify_header_alloc(dev, MLX5_FLOW_NAMESPACE_KERNEL,
- parse_attr->num_mod_hdr_actions,
- parse_attr->mod_hdr_actions,
- &attr->mod_hdr_id);
+ err = mlx5e_attach_mod_hdr(priv, flow, parse_attr);
flow_act.modify_id = attr->mod_hdr_id;
kfree(parse_attr->mod_hdr_actions);
if (err) {
@@ -166,8 +293,7 @@
}
err_create_ft:
if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)
- mlx5_modify_header_dealloc(priv->mdev,
- attr->mod_hdr_id);
+ mlx5e_detach_mod_hdr(priv, flow);
err_create_mod_hdr_id:
mlx5_fc_destroy(dev, counter);
@@ -177,6 +303,7 @@
static void mlx5e_tc_del_nic_flow(struct mlx5e_priv *priv,
struct mlx5e_tc_flow *flow)
{
+ struct mlx5_nic_flow_attr *attr = flow->nic_attr;
struct mlx5_fc *counter = NULL;
counter = mlx5_flow_rule_counter(flow->rule);
@@ -188,9 +315,8 @@
priv->fs.tc.t = NULL;
}
- if (flow->nic_attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)
- mlx5_modify_header_dealloc(priv->mdev,
- flow->nic_attr->mod_hdr_id);
+ if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)
+ mlx5e_detach_mod_hdr(priv, flow);
}
static void mlx5e_detach_encap(struct mlx5e_priv *priv,
@@ -213,10 +339,7 @@
}
if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR) {
- err = mlx5_modify_header_alloc(priv->mdev, MLX5_FLOW_NAMESPACE_FDB,
- parse_attr->num_mod_hdr_actions,
- parse_attr->mod_hdr_actions,
- &attr->mod_hdr_id);
+ err = mlx5e_attach_mod_hdr(priv, flow, parse_attr);
kfree(parse_attr->mod_hdr_actions);
if (err) {
rule = ERR_PTR(err);
@@ -231,9 +354,8 @@
return rule;
err_add_rule:
- if (flow->esw_attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)
- mlx5_modify_header_dealloc(priv->mdev,
- attr->mod_hdr_id);
+ if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)
+ mlx5e_detach_mod_hdr(priv, flow);
err_mod_hdr:
mlx5_eswitch_del_vlan_action(esw, attr);
err_add_vlan:
@@ -250,19 +372,18 @@
if (flow->flags & MLX5E_TC_FLOW_OFFLOADED) {
flow->flags &= ~MLX5E_TC_FLOW_OFFLOADED;
- mlx5_eswitch_del_offloaded_rule(esw, flow->rule, flow->esw_attr);
+ mlx5_eswitch_del_offloaded_rule(esw, flow->rule, attr);
}
- mlx5_eswitch_del_vlan_action(esw, flow->esw_attr);
+ mlx5_eswitch_del_vlan_action(esw, attr);
- if (flow->esw_attr->action & MLX5_FLOW_CONTEXT_ACTION_ENCAP) {
+ if (attr->action & MLX5_FLOW_CONTEXT_ACTION_ENCAP) {
mlx5e_detach_encap(priv, flow);
- kvfree(flow->esw_attr->parse_attr);
+ kvfree(attr->parse_attr);
}
- if (flow->esw_attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)
- mlx5_modify_header_dealloc(priv->mdev,
- attr->mod_hdr_id);
+ if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)
+ mlx5e_detach_mod_hdr(priv, flow);
}
void mlx5e_tc_encap_flows_add(struct mlx5e_priv *priv,
@@ -581,7 +702,9 @@
BIT(FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS) |
BIT(FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS) |
BIT(FLOW_DISSECTOR_KEY_ENC_PORTS) |
- BIT(FLOW_DISSECTOR_KEY_ENC_CONTROL))) {
+ BIT(FLOW_DISSECTOR_KEY_ENC_CONTROL) |
+ BIT(FLOW_DISSECTOR_KEY_TCP) |
+ BIT(FLOW_DISSECTOR_KEY_IP))) {
netdev_warn(priv->netdev, "Unsupported key used: 0x%x\n",
f->dissector->used_keys);
return -EOPNOTSUPP;
@@ -808,6 +931,48 @@
*min_inline = MLX5_INLINE_MODE_TCP_UDP;
}
+ if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_IP)) {
+ struct flow_dissector_key_ip *key =
+ skb_flow_dissector_target(f->dissector,
+ FLOW_DISSECTOR_KEY_IP,
+ f->key);
+ struct flow_dissector_key_ip *mask =
+ skb_flow_dissector_target(f->dissector,
+ FLOW_DISSECTOR_KEY_IP,
+ f->mask);
+
+ MLX5_SET(fte_match_set_lyr_2_4, headers_c, ip_ecn, mask->tos & 0x3);
+ MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_ecn, key->tos & 0x3);
+
+ MLX5_SET(fte_match_set_lyr_2_4, headers_c, ip_dscp, mask->tos >> 2);
+ MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_dscp, key->tos >> 2);
+
+ if (mask->tos)
+ *min_inline = MLX5_INLINE_MODE_IP;
+
+ if (mask->ttl) /* currently not supported */
+ return -EOPNOTSUPP;
+ }
+
+ if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_TCP)) {
+ struct flow_dissector_key_tcp *key =
+ skb_flow_dissector_target(f->dissector,
+ FLOW_DISSECTOR_KEY_TCP,
+ f->key);
+ struct flow_dissector_key_tcp *mask =
+ skb_flow_dissector_target(f->dissector,
+ FLOW_DISSECTOR_KEY_TCP,
+ f->mask);
+
+ MLX5_SET(fte_match_set_lyr_2_4, headers_c, tcp_flags,
+ ntohs(mask->flags));
+ MLX5_SET(fte_match_set_lyr_2_4, headers_v, tcp_flags,
+ ntohs(key->flags));
+
+ if (mask->flags)
+ *min_inline = MLX5_INLINE_MODE_TCP_UDP;
+ }
+
return 0;
}
@@ -925,12 +1090,14 @@
struct mlx5e_tc_flow_parse_attr *parse_attr)
{
struct pedit_headers *set_masks, *add_masks, *set_vals, *add_vals;
- int i, action_size, nactions, max_actions, first, last, first_z;
+ int i, action_size, nactions, max_actions, first, last, next_z;
void *s_masks_p, *a_masks_p, *vals_p;
struct mlx5_fields *f;
u8 cmd, field_bsize;
u32 s_mask, a_mask;
unsigned long mask;
+ __be32 mask_be32;
+ __be16 mask_be16;
void *action;
set_masks = &masks[TCA_PEDIT_KEY_EX_CMD_SET];
@@ -984,11 +1151,19 @@
field_bsize = f->size * BITS_PER_BYTE;
- first_z = find_first_zero_bit(&mask, field_bsize);
+ if (field_bsize == 32) {
+ mask_be32 = *(__be32 *)&mask;
+ mask = (__force unsigned long)cpu_to_le32(be32_to_cpu(mask_be32));
+ } else if (field_bsize == 16) {
+ mask_be16 = *(__be16 *)&mask;
+ mask = (__force unsigned long)cpu_to_le16(be16_to_cpu(mask_be16));
+ }
+
first = find_first_bit(&mask, field_bsize);
+ next_z = find_next_zero_bit(&mask, field_bsize, first);
last = find_last_bit(&mask, field_bsize);
- if (first > 0 || last != (field_bsize - 1) || first_z < last) {
- printk(KERN_WARNING "mlx5: partial rewrite (mask %lx) is currently not offloaded\n",
+ if (first < next_z && next_z < last) {
+ printk(KERN_WARNING "mlx5: rewrite of few sub-fields (mask %lx) isn't offloaded\n",
mask);
return -EOPNOTSUPP;
}
@@ -997,17 +1172,17 @@
MLX5_SET(set_action_in, action, field, f->field);
if (cmd == MLX5_ACTION_TYPE_SET) {
- MLX5_SET(set_action_in, action, offset, 0);
+ MLX5_SET(set_action_in, action, offset, first);
/* length is num of bits to be written, zero means length of 32 */
- MLX5_SET(set_action_in, action, length, field_bsize);
+ MLX5_SET(set_action_in, action, length, (last - first + 1));
}
if (field_bsize == 32)
- MLX5_SET(set_action_in, action, data, ntohl(*(__be32 *)vals_p));
+ MLX5_SET(set_action_in, action, data, ntohl(*(__be32 *)vals_p) >> first);
else if (field_bsize == 16)
- MLX5_SET(set_action_in, action, data, ntohs(*(__be16 *)vals_p));
+ MLX5_SET(set_action_in, action, data, ntohs(*(__be16 *)vals_p) >> first);
else if (field_bsize == 8)
- MLX5_SET(set_action_in, action, data, *(u8 *)vals_p);
+ MLX5_SET(set_action_in, action, data, *(u8 *)vals_p >> first);
action += action_size;
nactions++;
@@ -1149,10 +1324,6 @@
tcf_exts_to_list(exts, &actions);
list_for_each_entry(a, &actions, list) {
- /* Only support a single action per rule */
- if (attr->action)
- return -EINVAL;
-
if (is_tcf_gact_shot(a)) {
attr->action |= MLX5_FLOW_CONTEXT_ACTION_DROP;
if (MLX5_CAP_FLOWTABLE(priv->mdev,
@@ -1435,8 +1606,8 @@
if (!(nud_state & NUD_VALID)) {
neigh_event_send(n, NULL);
- neigh_release(n);
- return -EAGAIN;
+ err = -EAGAIN;
+ goto out;
}
err = mlx5_encap_alloc(priv->mdev, e->tunnel_type,
@@ -1541,8 +1712,8 @@
if (!(nud_state & NUD_VALID)) {
neigh_event_send(n, NULL);
- neigh_release(n);
- return -EAGAIN;
+ err = -EAGAIN;
+ goto out;
}
err = mlx5_encap_alloc(priv->mdev, e->tunnel_type,
@@ -1777,7 +1948,7 @@
}
flow = kzalloc(sizeof(*flow) + attr_size, GFP_KERNEL);
- parse_attr = mlx5_vzalloc(sizeof(*parse_attr));
+ parse_attr = kvzalloc(sizeof(*parse_attr), GFP_KERNEL);
if (!parse_attr || !flow) {
err = -ENOMEM;
goto err_free;
@@ -1862,9 +2033,7 @@
{
struct mlx5e_tc_table *tc = &priv->fs.tc;
struct mlx5e_tc_flow *flow;
- struct tc_action *a;
struct mlx5_fc *counter;
- LIST_HEAD(actions);
u64 bytes;
u64 packets;
u64 lastuse;
@@ -1883,13 +2052,7 @@
mlx5_fc_query_cached(counter, &bytes, &packets, &lastuse);
- preempt_disable();
-
- tcf_exts_to_list(f->exts, &actions);
- list_for_each_entry(a, &actions, list)
- tcf_action_stats_update(a, bytes, packets, lastuse);
-
- preempt_enable();
+ tcf_exts_stats_update(f->exts, bytes, packets, lastuse);
return 0;
}
@@ -1905,6 +2068,8 @@
{
struct mlx5e_tc_table *tc = &priv->fs.tc;
+ hash_init(tc->mod_hdr_tbl);
+
tc->ht_params = mlx5e_tc_flow_ht_params;
return rhashtable_init(&tc->ht, &tc->ht_params);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
index ab3bb02..0433d69 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
@@ -33,7 +33,7 @@
#include <linux/tcp.h>
#include <linux/if_vlan.h>
#include "en.h"
-#include "ipoib.h"
+#include "ipoib/ipoib.h"
#define MLX5E_SQ_NOPS_ROOM MLX5_SEND_WQE_MAX_WQEBBS
#define MLX5E_SQ_STOP_ROOM (MLX5_SEND_WQE_MAX_WQEBBS +\
@@ -245,7 +245,7 @@
int fsz = skb_frag_size(frag);
dma_addr = skb_frag_dma_map(sq->pdev, frag, 0, fsz,
- DMA_TO_DEVICE);
+ DMA_TO_DEVICE);
if (unlikely(dma_mapping_error(sq->pdev, dma_addr)))
return -ENOMEM;
@@ -557,11 +557,16 @@
if (skb_is_gso(skb)) {
opcode = MLX5_OPCODE_LSO;
ihs = mlx5e_txwqe_build_eseg_gso(sq, skb, eseg, &num_bytes);
+ sq->stats.packets += skb_shinfo(skb)->gso_segs;
} else {
ihs = mlx5e_calc_min_inline(sq->min_inline_mode, skb);
num_bytes = max_t(unsigned int, skb->len, ETH_ZLEN);
+ sq->stats.packets++;
}
+ sq->stats.bytes += num_bytes;
+ sq->stats.xmit_more += skb->xmit_more;
+
ds_cnt = sizeof(*wqe) / MLX5_SEND_WQE_DS;
if (ihs) {
memcpy(eseg->inline_hdr.start, skb_data, ihs);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eq.c b/drivers/net/ethernet/mellanox/mlx5/core/eq.c
index 33eae5a..af51a5d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eq.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eq.c
@@ -35,6 +35,7 @@
#include <linux/mlx5/driver.h>
#include <linux/mlx5/cmd.h>
#include "mlx5_core.h"
+#include "fpga/core.h"
#ifdef CONFIG_MLX5_CORE_EN
#include "eswitch.h"
#endif
@@ -156,6 +157,10 @@
return "MLX5_EVENT_TYPE_PAGE_FAULT";
case MLX5_EVENT_TYPE_PPS_EVENT:
return "MLX5_EVENT_TYPE_PPS_EVENT";
+ case MLX5_EVENT_TYPE_NIC_VPORT_CHANGE:
+ return "MLX5_EVENT_TYPE_NIC_VPORT_CHANGE";
+ case MLX5_EVENT_TYPE_FPGA_ERROR:
+ return "MLX5_EVENT_TYPE_FPGA_ERROR";
default:
return "Unrecognized event";
}
@@ -186,7 +191,7 @@
{
__be32 __iomem *addr = eq->doorbell + (arm ? 0 : 2);
u32 val = (eq->cons_index & 0xffffff) | (eq->eqn << 24);
- __raw_writel((__force u32) cpu_to_be32(val), addr);
+ __raw_writel((__force u32)cpu_to_be32(val), addr);
/* We still want ordering, just not swabbing, so add a barrier */
mb();
}
@@ -476,6 +481,11 @@
if (dev->event)
dev->event(dev, MLX5_DEV_EVENT_PPS, (unsigned long)eqe);
break;
+
+ case MLX5_EVENT_TYPE_FPGA_ERROR:
+ mlx5_fpga_event(dev, eqe->type, &eqe->data.raw);
+ break;
+
default:
mlx5_core_warn(dev, "Unhandled event 0x%x on EQ 0x%x\n",
eqe->type, eq->eqn);
@@ -548,7 +558,7 @@
inlen = MLX5_ST_SZ_BYTES(create_eq_in) +
MLX5_FLD_SZ_BYTES(create_eq_in, pas[0]) * eq->buf.npages;
- in = mlx5_vzalloc(inlen);
+ in = kvzalloc(inlen, GFP_KERNEL);
if (!in) {
err = -ENOMEM;
goto err_buf;
@@ -667,7 +677,6 @@
return err;
}
-
void mlx5_eq_cleanup(struct mlx5_core_dev *dev)
{
mlx5_eq_debugfs_cleanup(dev);
@@ -679,7 +688,6 @@
u64 async_event_mask = MLX5_ASYNC_EVENT_MASK;
int err;
-
if (MLX5_CAP_GEN(dev, port_type) == MLX5_CAP_PORT_TYPE_ETH &&
MLX5_CAP_GEN(dev, vport_group_manager) &&
mlx5_core_is_pf(dev))
@@ -693,6 +701,9 @@
if (MLX5_CAP_GEN(dev, pps))
async_event_mask |= (1ull << MLX5_EVENT_TYPE_PPS_EVENT);
+ if (MLX5_CAP_GEN(dev, fpga))
+ async_event_mask |= (1ull << MLX5_EVENT_TYPE_FPGA_ERROR);
+
err = mlx5_create_map_eq(dev, &table->cmd_eq, MLX5_EQ_VEC_CMD,
MLX5_NUM_CMD_EQE, 1ull << MLX5_EVENT_TYPE_CMD,
"mlx5_cmd_eq", MLX5_EQ_TYPE_ASYNC);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
index 2e34d95..89bfda4 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
@@ -248,11 +248,10 @@
if (rx_rule)
match_header |= MLX5_MATCH_MISC_PARAMETERS;
- spec = mlx5_vzalloc(sizeof(*spec));
- if (!spec) {
- esw_warn(esw->dev, "FDB: Failed to alloc match parameters\n");
+ spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
+ if (!spec)
return NULL;
- }
+
dmac_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
outer_headers.dmac_47_16);
dmac_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
@@ -350,10 +349,9 @@
return -EOPNOTSUPP;
}
- flow_group_in = mlx5_vzalloc(inlen);
+ flow_group_in = kvzalloc(inlen, GFP_KERNEL);
if (!flow_group_in)
return -ENOMEM;
- memset(flow_group_in, 0, inlen);
table_size = BIT(MLX5_CAP_ESW_FLOWTABLE_FDB(dev, log_max_ft_size));
@@ -961,7 +959,7 @@
return -EOPNOTSUPP;
}
- flow_group_in = mlx5_vzalloc(inlen);
+ flow_group_in = kvzalloc(inlen, GFP_KERNEL);
if (!flow_group_in)
return -ENOMEM;
@@ -1078,7 +1076,7 @@
return -EOPNOTSUPP;
}
- flow_group_in = mlx5_vzalloc(inlen);
+ flow_group_in = kvzalloc(inlen, GFP_KERNEL);
if (!flow_group_in)
return -ENOMEM;
@@ -1219,7 +1217,6 @@
"vport[%d] configure ingress rules failed, illegal mac with spoofchk\n",
vport->vport);
return -EPERM;
-
}
esw_vport_cleanup_ingress_rules(esw, vport);
@@ -1241,11 +1238,9 @@
"vport[%d] configure ingress rules, vlan(%d) qos(%d)\n",
vport->vport, vport->info.vlan, vport->info.qos);
- spec = mlx5_vzalloc(sizeof(*spec));
+ spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
if (!spec) {
err = -ENOMEM;
- esw_warn(esw->dev, "vport[%d] configure ingress rules failed, err(%d)\n",
- vport->vport, err);
goto out;
}
@@ -1322,11 +1317,9 @@
"vport[%d] configure egress rules, vlan(%d) qos(%d)\n",
vport->vport, vport->info.vlan, vport->info.qos);
- spec = mlx5_vzalloc(sizeof(*spec));
+ spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
if (!spec) {
err = -ENOMEM;
- esw_warn(esw->dev, "vport[%d] configure egress rules failed, err(%d)\n",
- vport->vport, err);
goto out;
}
@@ -1775,6 +1768,7 @@
}
hash_init(esw->offloads.encap_tbl);
+ hash_init(esw->offloads.mod_hdr_tbl);
mutex_init(&esw->state_lock);
for (vport_num = 0; vport_num < total_vports; vport_num++) {
@@ -2158,7 +2152,7 @@
if (!LEGAL_VPORT(esw, vport))
return -EINVAL;
- out = mlx5_vzalloc(outlen);
+ out = kvzalloc(outlen, GFP_KERNEL);
if (!out)
return -ENOMEM;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
index b746f62..834a330 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
@@ -207,6 +207,7 @@
struct mlx5_flow_group *vport_rx_group;
struct mlx5_eswitch_rep *vport_reps;
DECLARE_HASHTABLE(encap_tbl, 8);
+ DECLARE_HASHTABLE(mod_hdr_tbl, 8);
u8 inline_mode;
u64 num_flows;
u8 encap;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
index a53e982..95b6402 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
@@ -311,9 +311,8 @@
struct mlx5_flow_spec *spec;
void *misc;
- spec = mlx5_vzalloc(sizeof(*spec));
+ spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
if (!spec) {
- esw_warn(esw->dev, "FDB: Failed to alloc match parameters\n");
flow_rule = ERR_PTR(-ENOMEM);
goto out;
}
@@ -401,9 +400,8 @@
struct mlx5_flow_spec *spec;
int err = 0;
- spec = mlx5_vzalloc(sizeof(*spec));
+ spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
if (!spec) {
- esw_warn(esw->dev, "FDB: Failed to alloc match parameters\n");
err = -ENOMEM;
goto out;
}
@@ -488,7 +486,7 @@
u32 *flow_group_in;
esw_debug(esw->dev, "Create offloads FDB Tables\n");
- flow_group_in = mlx5_vzalloc(inlen);
+ flow_group_in = kvzalloc(inlen, GFP_KERNEL);
if (!flow_group_in)
return -ENOMEM;
@@ -631,7 +629,7 @@
int err = 0;
int nvports = priv->sriov.num_vfs + 2;
- flow_group_in = mlx5_vzalloc(inlen);
+ flow_group_in = kvzalloc(inlen, GFP_KERNEL);
if (!flow_group_in)
return -ENOMEM;
@@ -675,9 +673,8 @@
struct mlx5_flow_spec *spec;
void *misc;
- spec = mlx5_vzalloc(sizeof(*spec));
+ spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
if (!spec) {
- esw_warn(esw->dev, "Failed to alloc match parameters\n");
flow_rule = ERR_PTR(-ENOMEM);
goto out;
}
@@ -694,7 +691,7 @@
flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
flow_rule = mlx5_add_flow_rules(esw->offloads.ft_offloads, spec,
- &flow_act, &dest, 1);
+ &flow_act, &dest, 1);
if (IS_ERR(flow_rule)) {
esw_warn(esw->dev, "fs offloads: Failed to add vport rx rule err %ld\n", PTR_ERR(flow_rule));
goto out;
@@ -1100,7 +1097,7 @@
if (err) {
esw_warn(esw->dev, "Failed re-creating fast FDB table, err %d\n", err);
esw->offloads.encap = !encap;
- (void) esw_create_offloads_fast_fdb_table(esw);
+ (void)esw_create_offloads_fast_fdb_table(esw);
}
return err;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/ipoib.h b/drivers/net/ethernet/mellanox/mlx5/core/fpga/cmd.c
similarity index 62%
copy from drivers/net/ethernet/mellanox/mlx5/core/ipoib.h
copy to drivers/net/ethernet/mellanox/mlx5/core/fpga/cmd.c
index 213191a..99cba64 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/ipoib.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fpga/cmd.c
@@ -30,27 +30,35 @@
* SOFTWARE.
*/
-#ifndef __MLX5E_IPOB_H__
-#define __MLX5E_IPOB_H__
+#include <linux/etherdevice.h>
+#include <linux/mlx5/cmd.h>
+#include <linux/mlx5/driver.h>
-#include <linux/mlx5/fs.h>
-#include "en.h"
+#include "mlx5_core.h"
+#include "fpga/cmd.h"
-#define MLX5I_MAX_NUM_TC 1
+int mlx5_fpga_caps(struct mlx5_core_dev *dev, u32 *caps)
+{
+ u32 in[MLX5_ST_SZ_DW(fpga_cap)] = {0};
-/* ipoib rdma netdev's private data structure */
-struct mlx5i_priv {
- struct rdma_netdev rn; /* keep this first */
- struct mlx5_core_qp qp;
- u32 qkey;
- char *mlx5e_priv[0];
-};
+ return mlx5_core_access_reg(dev, in, sizeof(in), caps,
+ MLX5_ST_SZ_BYTES(fpga_cap),
+ MLX5_REG_FPGA_CAP, 0, 0);
+}
-/* Extract mlx5e_priv from IPoIB netdev */
-#define mlx5i_epriv(netdev) ((void *)(((struct mlx5i_priv *)netdev_priv(netdev))->mlx5e_priv))
+int mlx5_fpga_query(struct mlx5_core_dev *dev, struct mlx5_fpga_query *query)
+{
+ u32 in[MLX5_ST_SZ_DW(fpga_ctrl)] = {0};
+ u32 out[MLX5_ST_SZ_DW(fpga_ctrl)];
+ int err;
-netdev_tx_t mlx5i_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb,
- struct mlx5_av *av, u32 dqpn, u32 dqkey);
-void mlx5i_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe);
+ err = mlx5_core_access_reg(dev, in, sizeof(in), out, sizeof(out),
+ MLX5_REG_FPGA_CTRL, 0, false);
+ if (err)
+ return err;
-#endif /* __MLX5E_IPOB_H__ */
+ query->status = MLX5_GET(fpga_ctrl, out, status);
+ query->admin_image = MLX5_GET(fpga_ctrl, out, flash_select_admin);
+ query->oper_image = MLX5_GET(fpga_ctrl, out, flash_select_oper);
+ return 0;
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/ipoib.h b/drivers/net/ethernet/mellanox/mlx5/core/fpga/cmd.h
similarity index 65%
copy from drivers/net/ethernet/mellanox/mlx5/core/ipoib.h
copy to drivers/net/ethernet/mellanox/mlx5/core/fpga/cmd.h
index 213191a..a74396a 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/ipoib.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fpga/cmd.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017, Mellanox Technologies. All rights reserved.
+ * Copyright (c) 2017, Mellanox Technologies, Ltd. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
@@ -30,27 +30,30 @@
* SOFTWARE.
*/
-#ifndef __MLX5E_IPOB_H__
-#define __MLX5E_IPOB_H__
+#ifndef __MLX5_FPGA_H__
+#define __MLX5_FPGA_H__
-#include <linux/mlx5/fs.h>
-#include "en.h"
+#include <linux/mlx5/driver.h>
-#define MLX5I_MAX_NUM_TC 1
-
-/* ipoib rdma netdev's private data structure */
-struct mlx5i_priv {
- struct rdma_netdev rn; /* keep this first */
- struct mlx5_core_qp qp;
- u32 qkey;
- char *mlx5e_priv[0];
+enum mlx5_fpga_image {
+ MLX5_FPGA_IMAGE_USER = 0,
+ MLX5_FPGA_IMAGE_FACTORY,
};
-/* Extract mlx5e_priv from IPoIB netdev */
-#define mlx5i_epriv(netdev) ((void *)(((struct mlx5i_priv *)netdev_priv(netdev))->mlx5e_priv))
+enum mlx5_fpga_status {
+ MLX5_FPGA_STATUS_SUCCESS = 0,
+ MLX5_FPGA_STATUS_FAILURE = 1,
+ MLX5_FPGA_STATUS_IN_PROGRESS = 2,
+ MLX5_FPGA_STATUS_NONE = 0xFFFF,
+};
-netdev_tx_t mlx5i_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb,
- struct mlx5_av *av, u32 dqpn, u32 dqkey);
-void mlx5i_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe);
+struct mlx5_fpga_query {
+ enum mlx5_fpga_image admin_image;
+ enum mlx5_fpga_image oper_image;
+ enum mlx5_fpga_status status;
+};
-#endif /* __MLX5E_IPOB_H__ */
+int mlx5_fpga_caps(struct mlx5_core_dev *dev, u32 *caps);
+int mlx5_fpga_query(struct mlx5_core_dev *dev, struct mlx5_fpga_query *query);
+
+#endif /* __MLX5_FPGA_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fpga/core.c b/drivers/net/ethernet/mellanox/mlx5/core/fpga/core.c
new file mode 100644
index 0000000..d88b332
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fpga/core.c
@@ -0,0 +1,202 @@
+/*
+ * Copyright (c) 2017, Mellanox Technologies. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/module.h>
+#include <linux/etherdevice.h>
+#include <linux/mlx5/driver.h>
+
+#include "mlx5_core.h"
+#include "fpga/core.h"
+
+static const char *const mlx5_fpga_error_strings[] = {
+ "Null Syndrome",
+ "Corrupted DDR",
+ "Flash Timeout",
+ "Internal Link Error",
+ "Watchdog HW Failure",
+ "I2C Failure",
+ "Image Changed",
+ "Temperature Critical",
+};
+
+static struct mlx5_fpga_device *mlx5_fpga_device_alloc(void)
+{
+ struct mlx5_fpga_device *fdev = NULL;
+
+ fdev = kzalloc(sizeof(*fdev), GFP_KERNEL);
+ if (!fdev)
+ return NULL;
+
+ spin_lock_init(&fdev->state_lock);
+ fdev->state = MLX5_FPGA_STATUS_NONE;
+ return fdev;
+}
+
+static const char *mlx5_fpga_image_name(enum mlx5_fpga_image image)
+{
+ switch (image) {
+ case MLX5_FPGA_IMAGE_USER:
+ return "user";
+ case MLX5_FPGA_IMAGE_FACTORY:
+ return "factory";
+ default:
+ return "unknown";
+ }
+}
+
+static int mlx5_fpga_device_load_check(struct mlx5_fpga_device *fdev)
+{
+ struct mlx5_fpga_query query;
+ int err;
+
+ err = mlx5_fpga_query(fdev->mdev, &query);
+ if (err) {
+ mlx5_fpga_err(fdev, "Failed to query status: %d\n", err);
+ return err;
+ }
+
+ fdev->last_admin_image = query.admin_image;
+ fdev->last_oper_image = query.oper_image;
+
+ mlx5_fpga_dbg(fdev, "Status %u; Admin image %u; Oper image %u\n",
+ query.status, query.admin_image, query.oper_image);
+
+ if (query.status != MLX5_FPGA_STATUS_SUCCESS) {
+ mlx5_fpga_err(fdev, "%s image failed to load; status %u\n",
+ mlx5_fpga_image_name(fdev->last_oper_image),
+ query.status);
+ return -EIO;
+ }
+
+ return 0;
+}
+
+int mlx5_fpga_device_start(struct mlx5_core_dev *mdev)
+{
+ struct mlx5_fpga_device *fdev = mdev->fpga;
+ unsigned long flags;
+ int err;
+
+ if (!fdev)
+ return 0;
+
+ err = mlx5_fpga_device_load_check(fdev);
+ if (err)
+ goto out;
+
+ err = mlx5_fpga_caps(fdev->mdev,
+ fdev->mdev->caps.hca_cur[MLX5_CAP_FPGA]);
+ if (err)
+ goto out;
+
+ mlx5_fpga_info(fdev, "device %u; %s image, version %u\n",
+ MLX5_CAP_FPGA(fdev->mdev, fpga_device),
+ mlx5_fpga_image_name(fdev->last_oper_image),
+ MLX5_CAP_FPGA(fdev->mdev, image_version));
+
+out:
+ spin_lock_irqsave(&fdev->state_lock, flags);
+ fdev->state = err ? MLX5_FPGA_STATUS_FAILURE : MLX5_FPGA_STATUS_SUCCESS;
+ spin_unlock_irqrestore(&fdev->state_lock, flags);
+ return err;
+}
+
+int mlx5_fpga_device_init(struct mlx5_core_dev *mdev)
+{
+ struct mlx5_fpga_device *fdev = NULL;
+
+ if (!MLX5_CAP_GEN(mdev, fpga)) {
+ mlx5_core_dbg(mdev, "FPGA capability not present\n");
+ return 0;
+ }
+
+ mlx5_core_dbg(mdev, "Initializing FPGA\n");
+
+ fdev = mlx5_fpga_device_alloc();
+ if (!fdev)
+ return -ENOMEM;
+
+ fdev->mdev = mdev;
+ mdev->fpga = fdev;
+
+ return 0;
+}
+
+void mlx5_fpga_device_cleanup(struct mlx5_core_dev *mdev)
+{
+ kfree(mdev->fpga);
+ mdev->fpga = NULL;
+}
+
+static const char *mlx5_fpga_syndrome_to_string(u8 syndrome)
+{
+ if (syndrome < ARRAY_SIZE(mlx5_fpga_error_strings))
+ return mlx5_fpga_error_strings[syndrome];
+ return "Unknown";
+}
+
+void mlx5_fpga_event(struct mlx5_core_dev *mdev, u8 event, void *data)
+{
+ struct mlx5_fpga_device *fdev = mdev->fpga;
+ const char *event_name;
+ bool teardown = false;
+ unsigned long flags;
+ u8 syndrome;
+
+ if (event != MLX5_EVENT_TYPE_FPGA_ERROR) {
+ mlx5_fpga_warn_ratelimited(fdev, "Unexpected event %u\n",
+ event);
+ return;
+ }
+
+ syndrome = MLX5_GET(fpga_error_event, data, syndrome);
+ event_name = mlx5_fpga_syndrome_to_string(syndrome);
+
+ spin_lock_irqsave(&fdev->state_lock, flags);
+ switch (fdev->state) {
+ case MLX5_FPGA_STATUS_SUCCESS:
+ mlx5_fpga_warn(fdev, "Error %u: %s\n", syndrome, event_name);
+ teardown = true;
+ break;
+ default:
+ mlx5_fpga_warn_ratelimited(fdev, "Unexpected error event %u: %s\n",
+ syndrome, event_name);
+ }
+ spin_unlock_irqrestore(&fdev->state_lock, flags);
+ /* We tear-down the card's interfaces and functionality because
+ * the FPGA bump-on-the-wire is misbehaving and we lose ability
+ * to communicate with the network. User may still be able to
+ * recover by re-programming or debugging the FPGA
+ */
+ if (teardown)
+ mlx5_trigger_health_work(fdev->mdev);
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fpga/core.h b/drivers/net/ethernet/mellanox/mlx5/core/fpga/core.h
new file mode 100644
index 0000000..c55044d
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fpga/core.h
@@ -0,0 +1,99 @@
+/*
+ * Copyright (c) 2017, Mellanox Technologies, Ltd. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef __MLX5_FPGA_CORE_H__
+#define __MLX5_FPGA_CORE_H__
+
+#ifdef CONFIG_MLX5_FPGA
+
+#include "fpga/cmd.h"
+
+/* Represents an Innova device */
+struct mlx5_fpga_device {
+ struct mlx5_core_dev *mdev;
+ spinlock_t state_lock; /* Protects state transitions */
+ enum mlx5_fpga_status state;
+ enum mlx5_fpga_image last_admin_image;
+ enum mlx5_fpga_image last_oper_image;
+};
+
+#define mlx5_fpga_dbg(__adev, format, ...) \
+ dev_dbg(&(__adev)->mdev->pdev->dev, "FPGA: %s:%d:(pid %d): " format, \
+ __func__, __LINE__, current->pid, ##__VA_ARGS__)
+
+#define mlx5_fpga_err(__adev, format, ...) \
+ dev_err(&(__adev)->mdev->pdev->dev, "FPGA: %s:%d:(pid %d): " format, \
+ __func__, __LINE__, current->pid, ##__VA_ARGS__)
+
+#define mlx5_fpga_warn(__adev, format, ...) \
+ dev_warn(&(__adev)->mdev->pdev->dev, "FPGA: %s:%d:(pid %d): " format, \
+ __func__, __LINE__, current->pid, ##__VA_ARGS__)
+
+#define mlx5_fpga_warn_ratelimited(__adev, format, ...) \
+ dev_warn_ratelimited(&(__adev)->mdev->pdev->dev, "FPGA: %s:%d: " \
+ format, __func__, __LINE__, ##__VA_ARGS__)
+
+#define mlx5_fpga_notice(__adev, format, ...) \
+ dev_notice(&(__adev)->mdev->pdev->dev, "FPGA: " format, ##__VA_ARGS__)
+
+#define mlx5_fpga_info(__adev, format, ...) \
+ dev_info(&(__adev)->mdev->pdev->dev, "FPGA: " format, ##__VA_ARGS__)
+
+int mlx5_fpga_device_init(struct mlx5_core_dev *mdev);
+void mlx5_fpga_device_cleanup(struct mlx5_core_dev *mdev);
+int mlx5_fpga_device_start(struct mlx5_core_dev *mdev);
+void mlx5_fpga_event(struct mlx5_core_dev *mdev, u8 event, void *data);
+
+#else
+
+static inline int mlx5_fpga_device_init(struct mlx5_core_dev *mdev)
+{
+ return 0;
+}
+
+static inline void mlx5_fpga_device_cleanup(struct mlx5_core_dev *mdev)
+{
+}
+
+static inline int mlx5_fpga_device_start(struct mlx5_core_dev *mdev)
+{
+ return 0;
+}
+
+static inline void mlx5_fpga_event(struct mlx5_core_dev *mdev, u8 event,
+ void *data)
+{
+}
+
+#endif
+
+#endif /* __MLX5_FPGA_CORE_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c
index fcec7be..e750f07 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c
@@ -78,28 +78,33 @@
MLX5_CMD_OP_CREATE_FLOW_TABLE);
MLX5_SET(create_flow_table_in, in, table_type, type);
- MLX5_SET(create_flow_table_in, in, level, level);
- MLX5_SET(create_flow_table_in, in, log_size, log_size);
+ MLX5_SET(create_flow_table_in, in, flow_table_context.level, level);
+ MLX5_SET(create_flow_table_in, in, flow_table_context.log_size, log_size);
if (vport) {
MLX5_SET(create_flow_table_in, in, vport_number, vport);
MLX5_SET(create_flow_table_in, in, other_vport, 1);
}
- MLX5_SET(create_flow_table_in, in, decap_en, en_encap_decap);
- MLX5_SET(create_flow_table_in, in, encap_en, en_encap_decap);
+ MLX5_SET(create_flow_table_in, in, flow_table_context.decap_en,
+ en_encap_decap);
+ MLX5_SET(create_flow_table_in, in, flow_table_context.encap_en,
+ en_encap_decap);
switch (op_mod) {
case FS_FT_OP_MOD_NORMAL:
if (next_ft) {
- MLX5_SET(create_flow_table_in, in, table_miss_mode, 1);
- MLX5_SET(create_flow_table_in, in, table_miss_id, next_ft->id);
+ MLX5_SET(create_flow_table_in, in,
+ flow_table_context.table_miss_action, 1);
+ MLX5_SET(create_flow_table_in, in,
+ flow_table_context.table_miss_id, next_ft->id);
}
break;
case FS_FT_OP_MOD_LAG_DEMUX:
MLX5_SET(create_flow_table_in, in, op_mod, 0x1);
if (next_ft)
- MLX5_SET(create_flow_table_in, in, lag_master_next_table_id,
+ MLX5_SET(create_flow_table_in, in,
+ flow_table_context.lag_master_next_table_id,
next_ft->id);
break;
}
@@ -146,10 +151,10 @@
MLX5_MODIFY_FLOW_TABLE_LAG_NEXT_TABLE_ID);
if (next_ft) {
MLX5_SET(modify_flow_table_in, in,
- lag_master_next_table_id, next_ft->id);
+ flow_table_context.lag_master_next_table_id, next_ft->id);
} else {
MLX5_SET(modify_flow_table_in, in,
- lag_master_next_table_id, 0);
+ flow_table_context.lag_master_next_table_id, 0);
}
} else {
if (ft->vport) {
@@ -160,11 +165,14 @@
MLX5_SET(modify_flow_table_in, in, modify_field_select,
MLX5_MODIFY_FLOW_TABLE_MISS_TABLE_ID);
if (next_ft) {
- MLX5_SET(modify_flow_table_in, in, table_miss_mode, 1);
- MLX5_SET(modify_flow_table_in, in, table_miss_id,
+ MLX5_SET(modify_flow_table_in, in,
+ flow_table_context.table_miss_action, 1);
+ MLX5_SET(modify_flow_table_in, in,
+ flow_table_context.table_miss_id,
next_ft->id);
} else {
- MLX5_SET(modify_flow_table_in, in, table_miss_mode, 0);
+ MLX5_SET(modify_flow_table_in, in,
+ flow_table_context.table_miss_action, 0);
}
}
@@ -232,11 +240,9 @@
u32 *in;
int err;
- in = mlx5_vzalloc(inlen);
- if (!in) {
- mlx5_core_warn(dev, "failed to allocate inbox\n");
+ in = kvzalloc(inlen, GFP_KERNEL);
+ if (!in)
return -ENOMEM;
- }
MLX5_SET(set_fte_in, in, opcode, MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY);
MLX5_SET(set_fte_in, in, op_mod, opmod);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
index 8f5125c..e8690fe 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
@@ -104,6 +104,7 @@
size_t arr_sz;
long *caps;
};
+
static struct init_tree_node {
enum fs_node_type type;
struct init_tree_node *children;
@@ -376,11 +377,9 @@
int err;
bool update_fte = false;
- match_value = mlx5_vzalloc(match_len);
- if (!match_value) {
- mlx5_core_warn(dev, "failed to allocate inbox\n");
+ match_value = kvzalloc(match_len, GFP_KERNEL);
+ if (!match_value)
return;
- }
fs_get_obj(rule, node);
fs_get_obj(fte, rule->node.parent);
@@ -1157,7 +1156,7 @@
if (!ft->autogroup.active)
return ERR_PTR(-ENOENT);
- in = mlx5_vzalloc(inlen);
+ in = kvzalloc(inlen, GFP_KERNEL);
if (!in)
return ERR_PTR(-ENOMEM);
@@ -1777,7 +1776,7 @@
struct mlx5_flow_namespace *ns;
/* Create the root namespace */
- root_ns = mlx5_vzalloc(sizeof(*root_ns));
+ root_ns = kvzalloc(sizeof(*root_ns), GFP_KERNEL);
if (!root_ns)
return NULL;
@@ -1860,7 +1859,6 @@
static int init_root_ns(struct mlx5_flow_steering *steering)
{
-
steering->root_ns = create_root_ns(steering, FS_FT_NIC_RX);
if (!steering->root_ns)
goto cleanup;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fw.c b/drivers/net/ethernet/mellanox/mlx5/core/fw.c
index 1bc14d0..e9489e8 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fw.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fw.c
@@ -195,3 +195,31 @@
MLX5_SET(teardown_hca_in, in, opcode, MLX5_CMD_OP_TEARDOWN_HCA);
return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
}
+
+int mlx5_cmd_force_teardown_hca(struct mlx5_core_dev *dev)
+{
+ u32 out[MLX5_ST_SZ_DW(teardown_hca_out)] = {0};
+ u32 in[MLX5_ST_SZ_DW(teardown_hca_in)] = {0};
+ int force_state;
+ int ret;
+
+ if (!MLX5_CAP_GEN(dev, force_teardown)) {
+ mlx5_core_dbg(dev, "force teardown is not supported in the firmware\n");
+ return -EOPNOTSUPP;
+ }
+
+ MLX5_SET(teardown_hca_in, in, opcode, MLX5_CMD_OP_TEARDOWN_HCA);
+ MLX5_SET(teardown_hca_in, in, profile, MLX5_TEARDOWN_HCA_IN_PROFILE_FORCE_CLOSE);
+
+ ret = mlx5_cmd_exec_polling(dev, in, sizeof(in), out, sizeof(out));
+ if (ret)
+ return ret;
+
+ force_state = MLX5_GET(teardown_hca_out, out, force_state);
+ if (force_state == MLX5_TEARDOWN_HCA_OUT_FORCE_STATE_FAIL) {
+ mlx5_core_err(dev, "teardown with force mode failed\n");
+ return -EIO;
+ }
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/health.c b/drivers/net/ethernet/mellanox/mlx5/core/health.c
index f27f84f..0648a65 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/health.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/health.c
@@ -111,14 +111,14 @@
return 0;
}
-void mlx5_enter_error_state(struct mlx5_core_dev *dev)
+void mlx5_enter_error_state(struct mlx5_core_dev *dev, bool force)
{
mutex_lock(&dev->intf_state_mutex);
if (dev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR)
goto unlock;
mlx5_core_err(dev, "start\n");
- if (pci_channel_offline(dev->pdev) || in_fatal(dev)) {
+ if (pci_channel_offline(dev->pdev) || in_fatal(dev) || force) {
dev->state = MLX5_DEVICE_STATE_INTERNAL_ERROR;
trigger_cmd_completions(dev);
}
@@ -185,6 +185,7 @@
struct mlx5_core_health *health;
struct mlx5_core_dev *dev;
struct mlx5_priv *priv;
+ unsigned long flags;
health = container_of(work, struct mlx5_core_health, work);
priv = container_of(health, struct mlx5_priv, health);
@@ -192,13 +193,13 @@
mlx5_core_warn(dev, "handling bad device here\n");
mlx5_handle_bad_state(dev);
- spin_lock(&health->wq_lock);
+ spin_lock_irqsave(&health->wq_lock, flags);
if (!test_bit(MLX5_DROP_NEW_HEALTH_WORK, &health->flags))
schedule_delayed_work(&health->recover_work, recover_delay);
else
dev_err(&dev->pdev->dev,
"new health works are not permitted at this stage\n");
- spin_unlock(&health->wq_lock);
+ spin_unlock_irqrestore(&health->wq_lock, flags);
}
static const char *hsynd_str(u8 synd)
@@ -269,6 +270,20 @@
return next;
}
+void mlx5_trigger_health_work(struct mlx5_core_dev *dev)
+{
+ struct mlx5_core_health *health = &dev->priv.health;
+ unsigned long flags;
+
+ spin_lock_irqsave(&health->wq_lock, flags);
+ if (!test_bit(MLX5_DROP_NEW_HEALTH_WORK, &health->flags))
+ queue_work(health->wq, &health->work);
+ else
+ dev_err(&dev->pdev->dev,
+ "new health works are not permitted at this stage\n");
+ spin_unlock_irqrestore(&health->wq_lock, flags);
+}
+
static void poll_health(unsigned long data)
{
struct mlx5_core_dev *dev = (struct mlx5_core_dev *)data;
@@ -293,13 +308,7 @@
if (in_fatal(dev) && !health->sick) {
health->sick = true;
print_health_info(dev);
- spin_lock(&health->wq_lock);
- if (!test_bit(MLX5_DROP_NEW_HEALTH_WORK, &health->flags))
- queue_work(health->wq, &health->work);
- else
- dev_err(&dev->pdev->dev,
- "new health works are not permitted at this stage\n");
- spin_unlock(&health->wq_lock);
+ mlx5_trigger_health_work(dev);
}
out:
@@ -332,10 +341,11 @@
void mlx5_drain_health_wq(struct mlx5_core_dev *dev)
{
struct mlx5_core_health *health = &dev->priv.health;
+ unsigned long flags;
- spin_lock(&health->wq_lock);
+ spin_lock_irqsave(&health->wq_lock, flags);
set_bit(MLX5_DROP_NEW_HEALTH_WORK, &health->flags);
- spin_unlock(&health->wq_lock);
+ spin_unlock_irqrestore(&health->wq_lock, flags);
cancel_delayed_work_sync(&health->recover_work);
cancel_work_sync(&health->work);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ethtool.c
new file mode 100644
index 0000000..bf686f0
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ethtool.c
@@ -0,0 +1,136 @@
+/*
+ * Copyright (c) 2017, Mellanox Technologies. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include "en.h"
+#include "ipoib.h"
+
+static void mlx5i_get_drvinfo(struct net_device *dev,
+ struct ethtool_drvinfo *drvinfo)
+{
+ struct mlx5e_priv *priv = mlx5i_epriv(dev);
+
+ mlx5e_ethtool_get_drvinfo(priv, drvinfo);
+}
+
+static void mlx5i_get_strings(struct net_device *dev,
+ uint32_t stringset, uint8_t *data)
+{
+ struct mlx5e_priv *priv = mlx5i_epriv(dev);
+
+ mlx5e_ethtool_get_strings(priv, stringset, data);
+}
+
+static int mlx5i_get_sset_count(struct net_device *dev, int sset)
+{
+ struct mlx5e_priv *priv = mlx5i_epriv(dev);
+
+ return mlx5e_ethtool_get_sset_count(priv, sset);
+}
+
+static void mlx5i_get_ethtool_stats(struct net_device *dev,
+ struct ethtool_stats *stats,
+ u64 *data)
+{
+ struct mlx5e_priv *priv = mlx5i_epriv(dev);
+
+ mlx5e_ethtool_get_ethtool_stats(priv, stats, data);
+}
+
+static int mlx5i_set_ringparam(struct net_device *dev,
+ struct ethtool_ringparam *param)
+{
+ struct mlx5e_priv *priv = mlx5i_epriv(dev);
+
+ return mlx5e_ethtool_set_ringparam(priv, param);
+}
+
+static void mlx5i_get_ringparam(struct net_device *dev,
+ struct ethtool_ringparam *param)
+{
+ struct mlx5e_priv *priv = mlx5i_epriv(dev);
+
+ mlx5e_ethtool_get_ringparam(priv, param);
+}
+
+static int mlx5i_set_channels(struct net_device *dev,
+ struct ethtool_channels *ch)
+{
+ struct mlx5e_priv *priv = mlx5i_epriv(dev);
+
+ return mlx5e_ethtool_set_channels(priv, ch);
+}
+
+static void mlx5i_get_channels(struct net_device *dev,
+ struct ethtool_channels *ch)
+{
+ struct mlx5e_priv *priv = mlx5i_epriv(dev);
+
+ mlx5e_ethtool_get_channels(priv, ch);
+}
+
+static int mlx5i_set_coalesce(struct net_device *netdev,
+ struct ethtool_coalesce *coal)
+{
+ struct mlx5e_priv *priv = mlx5i_epriv(netdev);
+
+ return mlx5e_ethtool_set_coalesce(priv, coal);
+}
+
+static int mlx5i_get_coalesce(struct net_device *netdev,
+ struct ethtool_coalesce *coal)
+{
+ struct mlx5e_priv *priv = mlx5i_epriv(netdev);
+
+ return mlx5e_ethtool_get_coalesce(priv, coal);
+}
+
+static int mlx5i_get_ts_info(struct net_device *netdev,
+ struct ethtool_ts_info *info)
+{
+ struct mlx5e_priv *priv = mlx5i_epriv(netdev);
+
+ return mlx5e_ethtool_get_ts_info(priv, info);
+}
+
+const struct ethtool_ops mlx5i_ethtool_ops = {
+ .get_drvinfo = mlx5i_get_drvinfo,
+ .get_strings = mlx5i_get_strings,
+ .get_sset_count = mlx5i_get_sset_count,
+ .get_ethtool_stats = mlx5i_get_ethtool_stats,
+ .get_ringparam = mlx5i_get_ringparam,
+ .set_ringparam = mlx5i_set_ringparam,
+ .get_channels = mlx5i_get_channels,
+ .set_channels = mlx5i_set_channels,
+ .get_coalesce = mlx5i_get_coalesce,
+ .set_coalesce = mlx5i_set_coalesce,
+ .get_ts_info = mlx5i_get_ts_info,
+};
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/ipoib.c b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c
similarity index 87%
rename from drivers/net/ethernet/mellanox/mlx5/core/ipoib.c
rename to drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c
index cc18587..1ee5bce 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/ipoib.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c
@@ -36,20 +36,38 @@
#include "ipoib.h"
#define IB_DEFAULT_Q_KEY 0xb1b
+#define MLX5I_PARAMS_DEFAULT_LOG_RQ_SIZE 9
static int mlx5i_open(struct net_device *netdev);
static int mlx5i_close(struct net_device *netdev);
static int mlx5i_dev_init(struct net_device *dev);
static void mlx5i_dev_cleanup(struct net_device *dev);
+static int mlx5i_change_mtu(struct net_device *netdev, int new_mtu);
+static int mlx5i_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd);
static const struct net_device_ops mlx5i_netdev_ops = {
.ndo_open = mlx5i_open,
.ndo_stop = mlx5i_close,
.ndo_init = mlx5i_dev_init,
.ndo_uninit = mlx5i_dev_cleanup,
+ .ndo_change_mtu = mlx5i_change_mtu,
+ .ndo_do_ioctl = mlx5i_ioctl,
};
/* IPoIB mlx5 netdev profile */
+static void mlx5i_build_nic_params(struct mlx5_core_dev *mdev,
+ struct mlx5e_params *params)
+{
+ /* Override RQ params as IPoIB supports only LINKED LIST RQ for now */
+ mlx5e_set_rq_type_params(mdev, params, MLX5_WQ_TYPE_LINKED_LIST);
+
+ /* RQ size in ipoib by default is 512 */
+ params->log_rq_size = is_kdump_kernel() ?
+ MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE :
+ MLX5I_PARAMS_DEFAULT_LOG_RQ_SIZE;
+
+ params->lro_en = false;
+}
/* Called directly after IPoIB netdevice was created to initialize SW structs */
static void mlx5i_init(struct mlx5_core_dev *mdev,
@@ -59,19 +77,18 @@
{
struct mlx5e_priv *priv = mlx5i_epriv(netdev);
+ /* priv init */
priv->mdev = mdev;
priv->netdev = netdev;
priv->profile = profile;
priv->ppriv = ppriv;
-
- mlx5e_build_nic_params(mdev, &priv->channels.params, profile->max_nch(mdev));
-
- /* Override RQ params as IPoIB supports only LINKED LIST RQ for now */
- mlx5e_set_rq_type_params(mdev, &priv->channels.params, MLX5_WQ_TYPE_LINKED_LIST);
- priv->channels.params.lro_en = false;
-
+ priv->hard_mtu = MLX5_IB_GRH_BYTES + MLX5_IPOIB_HARD_LEN;
mutex_init(&priv->state_lock);
+ mlx5e_build_nic_params(mdev, &priv->channels.params, profile->max_nch(mdev));
+ mlx5i_build_nic_params(mdev, &priv->channels.params);
+
+ /* netdev init */
netdev->hw_features |= NETIF_F_SG;
netdev->hw_features |= NETIF_F_IP_CSUM;
netdev->hw_features |= NETIF_F_IPV6_CSUM;
@@ -82,6 +99,7 @@
netdev->hw_features |= NETIF_F_RXHASH;
netdev->netdev_ops = &mlx5i_netdev_ops;
+ netdev->ethtool_ops = &mlx5i_ethtool_ops;
}
/* Called directly before IPoIB netdevice is destroyed to cleanup SW structs */
@@ -102,7 +120,7 @@
void *qpc;
inlen = MLX5_ST_SZ_BYTES(create_qp_in);
- in = mlx5_vzalloc(inlen);
+ in = kvzalloc(inlen, GFP_KERNEL);
if (!in)
return -ENOMEM;
@@ -290,6 +308,7 @@
.disable = NULL, /* mlx5i_disable */
.update_stats = NULL, /* mlx5i_update_stats */
.max_nch = mlx5e_get_max_num_channels,
+ .update_carrier = NULL, /* no HW update in IB link */
.rx_handlers.handle_rx_cqe = mlx5i_handle_rx_cqe,
.rx_handlers.handle_rx_cqe_mpwqe = NULL, /* Not supported */
.max_tc = MLX5I_MAX_NUM_TC,
@@ -297,6 +316,35 @@
/* mlx5i netdev NDos */
+static int mlx5i_change_mtu(struct net_device *netdev, int new_mtu)
+{
+ struct mlx5e_priv *priv = mlx5i_epriv(netdev);
+ struct mlx5e_channels new_channels = {};
+ int curr_mtu;
+ int err = 0;
+
+ mutex_lock(&priv->state_lock);
+
+ curr_mtu = netdev->mtu;
+ netdev->mtu = new_mtu;
+
+ if (!test_bit(MLX5E_STATE_OPENED, &priv->state))
+ goto out;
+
+ new_channels.params = priv->channels.params;
+ err = mlx5e_open_channels(priv, &new_channels);
+ if (err) {
+ netdev->mtu = curr_mtu;
+ goto out;
+ }
+
+ mlx5e_switch_priv_channels(priv, &new_channels, NULL);
+
+out:
+ mutex_unlock(&priv->state_lock);
+ return err;
+}
+
static int mlx5i_dev_init(struct net_device *dev)
{
struct mlx5e_priv *priv = mlx5i_epriv(dev);
@@ -310,6 +358,20 @@
return 0;
}
+static int mlx5i_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
+{
+ struct mlx5e_priv *priv = mlx5i_epriv(dev);
+
+ switch (cmd) {
+ case SIOCSHWTSTAMP:
+ return mlx5e_hwstamp_set(priv, ifr);
+ case SIOCGHWTSTAMP:
+ return mlx5e_hwstamp_get(priv, ifr);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
static void mlx5i_dev_cleanup(struct net_device *dev)
{
struct mlx5e_priv *priv = mlx5i_epriv(dev);
@@ -336,6 +398,8 @@
mlx5e_refresh_tirs(priv, false);
mlx5e_activate_priv_channels(priv);
+ mlx5e_timestamp_init(priv);
+
mutex_unlock(&priv->state_lock);
return 0;
@@ -359,6 +423,7 @@
clear_bit(MLX5E_STATE_OPENED, &priv->state);
+ mlx5e_timestamp_cleanup(priv);
netif_carrier_off(priv->netdev);
mlx5e_deactivate_priv_channels(priv);
mlx5e_close_channels(&priv->channels);
@@ -510,4 +575,3 @@
mlx5e_destroy_mdev_resources(priv->mdev);
}
EXPORT_SYMBOL(mlx5_rdma_netdev_free);
-
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/ipoib.h b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.h
similarity index 89%
rename from drivers/net/ethernet/mellanox/mlx5/core/ipoib.h
rename to drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.h
index 213191a..a0f405f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/ipoib.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.h
@@ -38,6 +38,13 @@
#define MLX5I_MAX_NUM_TC 1
+extern const struct ethtool_ops mlx5i_ethtool_ops;
+
+#define MLX5_IB_GRH_BYTES 40
+#define MLX5_IPOIB_ENCAP_LEN 4
+#define MLX5_IPOIB_PSEUDO_LEN 20
+#define MLX5_IPOIB_HARD_LEN (MLX5_IPOIB_PSEUDO_LEN + MLX5_IPOIB_ENCAP_LEN)
+
/* ipoib rdma netdev's private data structure */
struct mlx5i_priv {
struct rdma_netdev rn; /* keep this first */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag.c b/drivers/net/ethernet/mellanox/mlx5/core/lag.c
index b5d5519..a3a836b 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lag.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lag.c
@@ -61,6 +61,11 @@
struct lag_tracker tracker;
struct delayed_work bond_work;
struct notifier_block nb;
+
+ /* Admin state. Allow lag only if allowed is true
+ * even if network conditions for lag were met
+ */
+ bool allowed;
};
/* General purpose, use for short periods of time.
@@ -214,6 +219,7 @@
struct lag_tracker tracker;
u8 v2p_port1, v2p_port2;
int i, err;
+ bool do_bond;
if (!dev0 || !dev1)
return;
@@ -222,13 +228,9 @@
tracker = ldev->tracker;
mutex_unlock(&lag_mutex);
- if (tracker.is_bonded && !mlx5_lag_is_bonded(ldev)) {
- if (mlx5_sriov_is_enabled(dev0) ||
- mlx5_sriov_is_enabled(dev1)) {
- mlx5_core_warn(dev0, "LAG is not supported with SRIOV");
- return;
- }
+ do_bond = tracker.is_bonded && ldev->allowed;
+ if (do_bond && !mlx5_lag_is_bonded(ldev)) {
for (i = 0; i < MLX5_MAX_PORTS; i++)
mlx5_remove_dev_by_protocol(ldev->pf[i].dev,
MLX5_INTERFACE_PROTOCOL_IB);
@@ -237,7 +239,7 @@
mlx5_add_dev_by_protocol(dev0, MLX5_INTERFACE_PROTOCOL_IB);
mlx5_nic_vport_enable_roce(dev1);
- } else if (tracker.is_bonded && mlx5_lag_is_bonded(ldev)) {
+ } else if (do_bond && mlx5_lag_is_bonded(ldev)) {
mlx5_infer_tx_affinity_mapping(&tracker, &v2p_port1,
&v2p_port2);
@@ -252,7 +254,7 @@
"Failed to modify LAG (%d)\n",
err);
}
- } else if (!tracker.is_bonded && mlx5_lag_is_bonded(ldev)) {
+ } else if (!do_bond && mlx5_lag_is_bonded(ldev)) {
mlx5_remove_dev_by_protocol(dev0, MLX5_INTERFACE_PROTOCOL_IB);
mlx5_nic_vport_disable_roce(dev1);
@@ -411,6 +413,15 @@
return NOTIFY_DONE;
}
+static bool mlx5_lag_check_prereq(struct mlx5_lag *ldev)
+{
+ if ((ldev->pf[0].dev && mlx5_sriov_is_enabled(ldev->pf[0].dev)) ||
+ (ldev->pf[1].dev && mlx5_sriov_is_enabled(ldev->pf[1].dev)))
+ return false;
+ else
+ return true;
+}
+
static struct mlx5_lag *mlx5_lag_dev_alloc(void)
{
struct mlx5_lag *ldev;
@@ -420,6 +431,7 @@
return NULL;
INIT_DELAYED_WORK(&ldev->bond_work, mlx5_do_bond_work);
+ ldev->allowed = mlx5_lag_check_prereq(ldev);
return ldev;
}
@@ -444,7 +456,9 @@
ldev->tracker.netdev_state[fn].link_up = 0;
ldev->tracker.netdev_state[fn].tx_enabled = 0;
+ ldev->allowed = mlx5_lag_check_prereq(ldev);
dev->priv.lag = ldev;
+
mutex_unlock(&lag_mutex);
}
@@ -464,10 +478,10 @@
memset(&ldev->pf[i], 0, sizeof(*ldev->pf));
dev->priv.lag = NULL;
+ ldev->allowed = mlx5_lag_check_prereq(ldev);
mutex_unlock(&lag_mutex);
}
-
/* Must be called with intf_mutex held */
void mlx5_lag_add(struct mlx5_core_dev *dev, struct net_device *netdev)
{
@@ -543,6 +557,44 @@
}
EXPORT_SYMBOL(mlx5_lag_is_active);
+static int mlx5_lag_set_state(struct mlx5_core_dev *dev, bool allow)
+{
+ struct mlx5_lag *ldev;
+ int ret = 0;
+ bool lag_active;
+
+ mlx5_dev_list_lock();
+
+ ldev = mlx5_lag_dev_get(dev);
+ if (!ldev) {
+ ret = -ENODEV;
+ goto unlock;
+ }
+ lag_active = mlx5_lag_is_bonded(ldev);
+ if (!mlx5_lag_check_prereq(ldev) && allow) {
+ ret = -EINVAL;
+ goto unlock;
+ }
+ if (ldev->allowed == allow)
+ goto unlock;
+ ldev->allowed = allow;
+ if ((lag_active && !allow) || allow)
+ mlx5_do_bond(ldev);
+unlock:
+ mlx5_dev_list_unlock();
+ return ret;
+}
+
+int mlx5_lag_forbid(struct mlx5_core_dev *dev)
+{
+ return mlx5_lag_set_state(dev, false);
+}
+
+int mlx5_lag_allow(struct mlx5_core_dev *dev)
+{
+ return mlx5_lag_set_state(dev, true);
+}
+
struct net_device *mlx5_lag_get_roce_netdev(struct mlx5_core_dev *dev)
{
struct net_device *ndev = NULL;
@@ -586,4 +638,3 @@
/* If bonded, we do not add an IB device for PF1. */
return false;
}
-
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c
index 13be264..db55ba4 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c
@@ -56,6 +56,7 @@
#ifdef CONFIG_MLX5_CORE_EN
#include "eswitch.h"
#endif
+#include "fpga/core.h"
MODULE_AUTHOR("Eli Cohen <eli@mellanox.com>");
MODULE_DESCRIPTION("Mellanox Connect-IB, ConnectX-4 core driver");
@@ -356,12 +357,11 @@
kfree(priv->msix_arr);
}
-struct mlx5_reg_host_endianess {
+struct mlx5_reg_host_endianness {
u8 he;
u8 rsvd[15];
};
-
#define CAP_MASK(pos, size) ((u64)((1 << (size)) - 1) << (pos))
enum {
@@ -475,7 +475,7 @@
req_endianness =
MLX5_CAP_ATOMIC(dev,
- supported_atomic_req_8B_endianess_mode_1);
+ supported_atomic_req_8B_endianness_mode_1);
if (req_endianness != MLX5_ATOMIC_REQ_MODE_HOST_ENDIANNESS)
return 0;
@@ -487,7 +487,7 @@
set_hca_cap = MLX5_ADDR_OF(set_hca_cap_in, set_ctx, capability);
/* Set requestor to host endianness */
- MLX5_SET(atomic_caps, set_hca_cap, atomic_req_8B_endianess_mode,
+ MLX5_SET(atomic_caps, set_hca_cap, atomic_req_8B_endianness_mode,
MLX5_ATOMIC_REQ_MODE_HOST_ENDIANNESS);
err = set_caps(dev, set_ctx, set_sz, MLX5_SET_HCA_CAP_OP_MOD_ATOMIC);
@@ -562,8 +562,8 @@
static int set_hca_ctrl(struct mlx5_core_dev *dev)
{
- struct mlx5_reg_host_endianess he_in;
- struct mlx5_reg_host_endianess he_out;
+ struct mlx5_reg_host_endianness he_in;
+ struct mlx5_reg_host_endianness he_out;
int err;
if (!mlx5_core_is_pf(dev))
@@ -1117,10 +1117,16 @@
goto err_disable_msix;
}
+ err = mlx5_fpga_device_init(dev);
+ if (err) {
+ dev_err(&pdev->dev, "fpga device init failed %d\n", err);
+ goto err_put_uars;
+ }
+
err = mlx5_start_eqs(dev);
if (err) {
dev_err(&pdev->dev, "Failed to start pages and async EQs\n");
- goto err_put_uars;
+ goto err_fpga_init;
}
err = alloc_comp_eqs(dev);
@@ -1151,6 +1157,12 @@
goto err_sriov;
}
+ err = mlx5_fpga_device_start(dev);
+ if (err) {
+ dev_err(&pdev->dev, "fpga device start failed %d\n", err);
+ goto err_reg_dev;
+ }
+
if (mlx5_device_registered(dev)) {
mlx5_attach_device(dev);
} else {
@@ -1186,6 +1198,9 @@
err_stop_eqs:
mlx5_stop_eqs(dev);
+err_fpga_init:
+ mlx5_fpga_device_cleanup(dev);
+
err_put_uars:
mlx5_put_uars_page(dev, priv->uar);
@@ -1250,6 +1265,7 @@
mlx5_irq_clear_affinity_hints(dev);
free_comp_eqs(dev);
mlx5_stop_eqs(dev);
+ mlx5_fpga_device_cleanup(dev);
mlx5_put_uars_page(dev, priv->uar);
mlx5_disable_msix(dev);
if (cleanup)
@@ -1412,7 +1428,7 @@
dev_info(&pdev->dev, "%s was called\n", __func__);
- mlx5_enter_error_state(dev);
+ mlx5_enter_error_state(dev, false);
mlx5_unload_one(dev, priv, false);
/* In case of kernel call drain the health wq */
if (state) {
@@ -1499,15 +1515,43 @@
.resume = mlx5_pci_resume
};
+static int mlx5_try_fast_unload(struct mlx5_core_dev *dev)
+{
+ int ret;
+
+ if (!MLX5_CAP_GEN(dev, force_teardown)) {
+ mlx5_core_dbg(dev, "force teardown is not supported in the firmware\n");
+ return -EOPNOTSUPP;
+ }
+
+ if (dev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR) {
+ mlx5_core_dbg(dev, "Device in internal error state, giving up\n");
+ return -EAGAIN;
+ }
+
+ ret = mlx5_cmd_force_teardown_hca(dev);
+ if (ret) {
+ mlx5_core_dbg(dev, "Firmware couldn't do fast unload error: %d\n", ret);
+ return ret;
+ }
+
+ mlx5_enter_error_state(dev, true);
+
+ return 0;
+}
+
static void shutdown(struct pci_dev *pdev)
{
struct mlx5_core_dev *dev = pci_get_drvdata(pdev);
struct mlx5_priv *priv = &dev->priv;
+ int err;
dev_info(&pdev->dev, "Shutdown was called\n");
/* Notify mlx5 clients that the kernel is being shut down */
set_bit(MLX5_INTERFACE_STATE_SHUTDOWN, &dev->intf_state);
- mlx5_unload_one(dev, priv, false);
+ err = mlx5_try_fast_unload(dev);
+ if (err)
+ mlx5_unload_one(dev, priv, false);
mlx5_pci_disable_device(dev);
}
@@ -1524,6 +1568,8 @@
{ PCI_VDEVICE(MELLANOX, 0x101a), MLX5_PCI_DEV_IS_VF}, /* ConnectX-5 Ex VF */
{ PCI_VDEVICE(MELLANOX, 0x101b) }, /* ConnectX-6 */
{ PCI_VDEVICE(MELLANOX, 0x101c), MLX5_PCI_DEV_IS_VF}, /* ConnectX-6 VF */
+ { PCI_VDEVICE(MELLANOX, 0xa2d2) }, /* BlueField integrated ConnectX-5 network controller */
+ { PCI_VDEVICE(MELLANOX, 0xa2d3), MLX5_PCI_DEV_IS_VF}, /* BlueField integrated ConnectX-5 network controller VF */
{ 0, }
};
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
index fbc6e9e..5ccdf43 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
@@ -39,8 +39,7 @@
#include <linux/if_link.h>
#define DRIVER_NAME "mlx5_core"
-#define DRIVER_VERSION "3.0-1"
-#define DRIVER_RELDATE "January 2015"
+#define DRIVER_VERSION "5.0-0"
#define MLX5_TOTAL_VPORTS(mdev) (1 + pci_sriov_get_totalvfs(mdev->pdev))
@@ -84,12 +83,13 @@
int mlx5_query_board_id(struct mlx5_core_dev *dev);
int mlx5_cmd_init_hca(struct mlx5_core_dev *dev);
int mlx5_cmd_teardown_hca(struct mlx5_core_dev *dev);
+int mlx5_cmd_force_teardown_hca(struct mlx5_core_dev *dev);
void mlx5_core_event(struct mlx5_core_dev *dev, enum mlx5_dev_event event,
unsigned long param);
void mlx5_core_page_fault(struct mlx5_core_dev *dev,
struct mlx5_pagefault *pfault);
void mlx5_port_module_event(struct mlx5_core_dev *dev, struct mlx5_eqe *eqe);
-void mlx5_enter_error_state(struct mlx5_core_dev *dev);
+void mlx5_enter_error_state(struct mlx5_core_dev *dev, bool force);
void mlx5_disable_device(struct mlx5_core_dev *dev);
void mlx5_recover_device(struct mlx5_core_dev *dev);
int mlx5_sriov_init(struct mlx5_core_dev *dev);
@@ -168,4 +168,7 @@
MLX5_CAP_GEN(dev, lag_master);
}
+int mlx5_lag_allow(struct mlx5_core_dev *dev);
+int mlx5_lag_forbid(struct mlx5_core_dev *dev);
+
#endif /* __MLX5_CORE_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c b/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c
index a57d5a8..e36d3e3 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c
@@ -279,7 +279,7 @@
int i;
inlen += npages * MLX5_FLD_SZ_BYTES(manage_pages_in, pas[0]);
- in = mlx5_vzalloc(inlen);
+ in = kvzalloc(inlen, GFP_KERNEL);
if (!in) {
err = -ENOMEM;
mlx5_core_warn(dev, "vzalloc failed %d\n", inlen);
@@ -376,7 +376,7 @@
*nclaimed = 0;
outlen += npages * MLX5_FLD_SZ_BYTES(manage_pages_out, pas[0]);
- out = mlx5_vzalloc(outlen);
+ out = kvzalloc(outlen, GFP_KERNEL);
if (!out)
return -ENOMEM;
@@ -403,7 +403,6 @@
for (i = 0; i < num_claimed; i++)
free_4k(dev, MLX5_GET64(manage_pages_out, out, pas[i]));
-
if (nclaimed)
*nclaimed = num_claimed;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/port.c b/drivers/net/ethernet/mellanox/mlx5/core/port.c
index 141583d..1975d43 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/port.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/port.c
@@ -47,8 +47,8 @@
u32 *in = NULL;
void *data;
- in = mlx5_vzalloc(inlen);
- out = mlx5_vzalloc(outlen);
+ in = kvzalloc(inlen, GFP_KERNEL);
+ out = kvzalloc(outlen, GFP_KERNEL);
if (!in || !out)
goto out;
@@ -454,7 +454,7 @@
u32 *in;
int err;
- in = mlx5_vzalloc(sz);
+ in = kvzalloc(sz, GFP_KERNEL);
if (!in) {
err = -ENOMEM;
return err;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/qp.c b/drivers/net/ethernet/mellanox/mlx5/core/qp.c
index cbbcef2..340f281 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/qp.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/qp.c
@@ -30,7 +30,6 @@
* SOFTWARE.
*/
-
#include <linux/gfp.h>
#include <linux/export.h>
#include <linux/mlx5/cmd.h>
@@ -519,23 +518,3 @@
return mlx5_cmd_exec(dev, in, sizeof(in), out, out_size);
}
EXPORT_SYMBOL_GPL(mlx5_core_query_q_counter);
-
-int mlx5_core_query_out_of_buffer(struct mlx5_core_dev *dev, u16 counter_id,
- u32 *out_of_buffer)
-{
- int outlen = MLX5_ST_SZ_BYTES(query_q_counter_out);
- void *out;
- int err;
-
- out = mlx5_vzalloc(outlen);
- if (!out)
- return -ENOMEM;
-
- err = mlx5_core_query_q_counter(dev, counter_id, 0, out, outlen);
- if (!err)
- *out_of_buffer = MLX5_GET(query_q_counter_out, out,
- out_of_buffer);
-
- kfree(out);
- return err;
-}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/sriov.c b/drivers/net/ethernet/mellanox/mlx5/core/sriov.c
index e086277..bcdf777 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/sriov.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/sriov.c
@@ -175,15 +175,20 @@
if (!mlx5_core_is_pf(dev))
return -EPERM;
- if (num_vfs && mlx5_lag_is_active(dev)) {
- mlx5_core_warn(dev, "can't turn sriov on while LAG is active");
- return -EINVAL;
+ if (num_vfs) {
+ int ret;
+
+ ret = mlx5_lag_forbid(dev);
+ if (ret && (ret != -ENODEV))
+ return ret;
}
- if (num_vfs)
+ if (num_vfs) {
err = mlx5_sriov_enable(pdev, num_vfs);
- else
+ } else {
mlx5_sriov_disable(pdev);
+ mlx5_lag_allow(dev);
+ }
return err ? err : num_vfs;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/srq.c b/drivers/net/ethernet/mellanox/mlx5/core/srq.c
index 30996300..f774de6 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/srq.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/srq.c
@@ -162,7 +162,7 @@
pas_size = get_pas_size(in);
inlen = MLX5_ST_SZ_BYTES(create_srq_in) + pas_size;
- create_in = mlx5_vzalloc(inlen);
+ create_in = kvzalloc(inlen, GFP_KERNEL);
if (!create_in)
return -ENOMEM;
@@ -221,7 +221,7 @@
void *srqc;
int err;
- srq_out = mlx5_vzalloc(MLX5_ST_SZ_BYTES(query_srq_out));
+ srq_out = kvzalloc(MLX5_ST_SZ_BYTES(query_srq_out), GFP_KERNEL);
if (!srq_out)
return -ENOMEM;
@@ -256,7 +256,7 @@
pas_size = get_pas_size(in);
inlen = MLX5_ST_SZ_BYTES(create_xrc_srq_in) + pas_size;
- create_in = mlx5_vzalloc(inlen);
+ create_in = kvzalloc(inlen, GFP_KERNEL);
if (!create_in)
return -ENOMEM;
@@ -320,7 +320,7 @@
void *xrc_srqc;
int err;
- xrcsrq_out = mlx5_vzalloc(MLX5_ST_SZ_BYTES(query_xrc_srq_out));
+ xrcsrq_out = kvzalloc(MLX5_ST_SZ_BYTES(query_xrc_srq_out), GFP_KERNEL);
if (!xrcsrq_out)
return -ENOMEM;
memset(xrcsrq_in, 0, sizeof(xrcsrq_in));
@@ -357,7 +357,7 @@
pas_size = get_pas_size(in);
inlen = MLX5_ST_SZ_BYTES(create_rmp_in) + pas_size;
- create_in = mlx5_vzalloc(inlen);
+ create_in = kvzalloc(inlen, GFP_KERNEL);
if (!create_in)
return -ENOMEM;
@@ -390,7 +390,7 @@
void *bitmask;
int err;
- in = mlx5_vzalloc(MLX5_ST_SZ_BYTES(modify_rmp_in));
+ in = kvzalloc(MLX5_ST_SZ_BYTES(modify_rmp_in), GFP_KERNEL);
if (!in)
return -ENOMEM;
@@ -417,7 +417,7 @@
void *rmpc;
int err;
- rmp_out = mlx5_vzalloc(MLX5_ST_SZ_BYTES(query_rmp_out));
+ rmp_out = kvzalloc(MLX5_ST_SZ_BYTES(query_rmp_out), GFP_KERNEL);
if (!rmp_out)
return -ENOMEM;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/transobj.c b/drivers/net/ethernet/mellanox/mlx5/core/transobj.c
index a00ff49..5e128d7 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/transobj.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/transobj.c
@@ -284,7 +284,7 @@
void *bitmask;
int err;
- in = mlx5_vzalloc(MLX5_ST_SZ_BYTES(modify_rmp_in));
+ in = kvzalloc(MLX5_ST_SZ_BYTES(modify_rmp_in), GFP_KERNEL);
if (!in)
return -ENOMEM;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/vport.c b/drivers/net/ethernet/mellanox/mlx5/core/vport.c
index 15c2294..06019d0 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/vport.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/vport.c
@@ -172,7 +172,7 @@
u8 *out_addr;
int err;
- out = mlx5_vzalloc(outlen);
+ out = kvzalloc(outlen, GFP_KERNEL);
if (!out)
return -ENOMEM;
@@ -197,11 +197,9 @@
void *nic_vport_ctx;
u8 *perm_mac;
- in = mlx5_vzalloc(inlen);
- if (!in) {
- mlx5_core_warn(mdev, "failed to allocate inbox\n");
+ in = kvzalloc(inlen, GFP_KERNEL);
+ if (!in)
return -ENOMEM;
- }
MLX5_SET(modify_nic_vport_context_in, in,
field_select.permanent_address, 1);
@@ -231,7 +229,7 @@
u32 *out;
int err;
- out = mlx5_vzalloc(outlen);
+ out = kvzalloc(outlen, GFP_KERNEL);
if (!out)
return -ENOMEM;
@@ -251,7 +249,7 @@
void *in;
int err;
- in = mlx5_vzalloc(inlen);
+ in = kvzalloc(inlen, GFP_KERNEL);
if (!in)
return -ENOMEM;
@@ -501,7 +499,7 @@
u32 *out;
int outlen = MLX5_ST_SZ_BYTES(query_nic_vport_context_out);
- out = mlx5_vzalloc(outlen);
+ out = kvzalloc(outlen, GFP_KERNEL);
if (!out)
return -ENOMEM;
@@ -521,7 +519,7 @@
u32 *out;
int outlen = MLX5_ST_SZ_BYTES(query_nic_vport_context_out);
- out = mlx5_vzalloc(outlen);
+ out = kvzalloc(outlen, GFP_KERNEL);
if (!out)
return -ENOMEM;
@@ -551,7 +549,7 @@
if (!MLX5_CAP_ESW(mdev, nic_vport_node_guid_modify))
return -EOPNOTSUPP;
- in = mlx5_vzalloc(inlen);
+ in = kvzalloc(inlen, GFP_KERNEL);
if (!in)
return -ENOMEM;
@@ -577,7 +575,7 @@
u32 *out;
int outlen = MLX5_ST_SZ_BYTES(query_nic_vport_context_out);
- out = mlx5_vzalloc(outlen);
+ out = kvzalloc(outlen, GFP_KERNEL);
if (!out)
return -ENOMEM;
@@ -879,11 +877,9 @@
int inlen = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in);
int err;
- in = mlx5_vzalloc(inlen);
- if (!in) {
- mlx5_core_err(mdev, "failed to allocate inbox\n");
+ in = kvzalloc(inlen, GFP_KERNEL);
+ if (!in)
return -ENOMEM;
- }
MLX5_SET(modify_nic_vport_context_in, in, field_select.promisc, 1);
MLX5_SET(modify_nic_vport_context_in, in,
@@ -913,11 +909,9 @@
int inlen = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in);
int err;
- in = mlx5_vzalloc(inlen);
- if (!in) {
- mlx5_core_warn(mdev, "failed to allocate inbox\n");
+ in = kvzalloc(inlen, GFP_KERNEL);
+ if (!in)
return -ENOMEM;
- }
MLX5_SET(modify_nic_vport_context_in, in, field_select.roce_en, 1);
MLX5_SET(modify_nic_vport_context_in, in, nic_vport_context.roce_en,
@@ -952,7 +946,7 @@
int err;
is_group_manager = MLX5_CAP_GEN(dev, vport_group_manager);
- in = mlx5_vzalloc(in_sz);
+ in = kvzalloc(in_sz, GFP_KERNEL);
if (!in) {
err = -ENOMEM;
return err;
diff --git a/drivers/net/ethernet/mellanox/mlxfw/Kconfig b/drivers/net/ethernet/mellanox/mlxfw/Kconfig
new file mode 100644
index 0000000..2b21af8
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlxfw/Kconfig
@@ -0,0 +1,7 @@
+#
+# Mellanox firmware flash library configuration
+#
+
+config MLXFW
+ tristate "mlxfw" if COMPILE_TEST
+ select XZ_DEC
diff --git a/drivers/net/ethernet/mellanox/mlxfw/Makefile b/drivers/net/ethernet/mellanox/mlxfw/Makefile
new file mode 100644
index 0000000..7448b30
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlxfw/Makefile
@@ -0,0 +1,2 @@
+obj-$(CONFIG_MLXFW) += mlxfw.o
+mlxfw-objs := mlxfw_fsm.o mlxfw_mfa2_tlv_multi.o mlxfw_mfa2.o
diff --git a/drivers/net/ethernet/mellanox/mlxfw/mlxfw.h b/drivers/net/ethernet/mellanox/mlxfw/mlxfw.h
new file mode 100644
index 0000000..beea4ba
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlxfw/mlxfw.h
@@ -0,0 +1,102 @@
+/*
+ * drivers/net/ethernet/mellanox/mlxfw/mlxfw.h
+ * Copyright (c) 2017 Mellanox Technologies. All rights reserved.
+ * Copyright (c) 2017 Yotam Gigi <yotamg@mellanox.com>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the names of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _MLXFW_H
+#define _MLXFW_H
+
+#include <linux/firmware.h>
+
+enum mlxfw_fsm_state {
+ MLXFW_FSM_STATE_IDLE,
+ MLXFW_FSM_STATE_LOCKED,
+ MLXFW_FSM_STATE_INITIALIZE,
+ MLXFW_FSM_STATE_DOWNLOAD,
+ MLXFW_FSM_STATE_VERIFY,
+ MLXFW_FSM_STATE_APPLY,
+ MLXFW_FSM_STATE_ACTIVATE,
+};
+
+enum mlxfw_fsm_state_err {
+ MLXFW_FSM_STATE_ERR_OK,
+ MLXFW_FSM_STATE_ERR_ERROR,
+ MLXFW_FSM_STATE_ERR_REJECTED_DIGEST_ERR,
+ MLXFW_FSM_STATE_ERR_REJECTED_NOT_APPLICABLE,
+ MLXFW_FSM_STATE_ERR_REJECTED_UNKNOWN_KEY,
+ MLXFW_FSM_STATE_ERR_REJECTED_AUTH_FAILED,
+ MLXFW_FSM_STATE_ERR_REJECTED_UNSIGNED,
+ MLXFW_FSM_STATE_ERR_REJECTED_KEY_NOT_APPLICABLE,
+ MLXFW_FSM_STATE_ERR_REJECTED_BAD_FORMAT,
+ MLXFW_FSM_STATE_ERR_BLOCKED_PENDING_RESET,
+ MLXFW_FSM_STATE_ERR_MAX,
+};
+
+struct mlxfw_dev;
+
+struct mlxfw_dev_ops {
+ int (*component_query)(struct mlxfw_dev *mlxfw_dev, u16 component_index,
+ u32 *p_max_size, u8 *p_align_bits,
+ u16 *p_max_write_size);
+
+ int (*fsm_lock)(struct mlxfw_dev *mlxfw_dev, u32 *fwhandle);
+
+ int (*fsm_component_update)(struct mlxfw_dev *mlxfw_dev, u32 fwhandle,
+ u16 component_index, u32 component_size);
+
+ int (*fsm_block_download)(struct mlxfw_dev *mlxfw_dev, u32 fwhandle,
+ u8 *data, u16 size, u32 offset);
+
+ int (*fsm_component_verify)(struct mlxfw_dev *mlxfw_dev, u32 fwhandle,
+ u16 component_index);
+
+ int (*fsm_activate)(struct mlxfw_dev *mlxfw_dev, u32 fwhandle);
+
+ int (*fsm_query_state)(struct mlxfw_dev *mlxfw_dev, u32 fwhandle,
+ enum mlxfw_fsm_state *fsm_state,
+ enum mlxfw_fsm_state_err *fsm_state_err);
+
+ void (*fsm_cancel)(struct mlxfw_dev *mlxfw_dev, u32 fwhandle);
+
+ void (*fsm_release)(struct mlxfw_dev *mlxfw_dev, u32 fwhandle);
+};
+
+struct mlxfw_dev {
+ const struct mlxfw_dev_ops *ops;
+ const char *psid;
+ u16 psid_size;
+};
+
+int mlxfw_firmware_flash(struct mlxfw_dev *mlxfw_dev,
+ const struct firmware *firmware);
+
+#endif
diff --git a/drivers/net/ethernet/mellanox/mlxfw/mlxfw_fsm.c b/drivers/net/ethernet/mellanox/mlxfw/mlxfw_fsm.c
new file mode 100644
index 0000000..2cf8912
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlxfw/mlxfw_fsm.c
@@ -0,0 +1,273 @@
+/*
+ * drivers/net/ethernet/mellanox/mlxfw/mlxfw.c
+ * Copyright (c) 2017 Mellanox Technologies. All rights reserved.
+ * Copyright (c) 2017 Yotam Gigi <yotamg@mellanox.com>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the names of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#define pr_fmt(fmt) "mlxfw: " fmt
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/delay.h>
+
+#include "mlxfw.h"
+#include "mlxfw_mfa2.h"
+
+#define MLXFW_FSM_STATE_WAIT_CYCLE_MS 200
+#define MLXFW_FSM_STATE_WAIT_TIMEOUT_MS 30000
+#define MLXFW_FSM_STATE_WAIT_ROUNDS \
+ (MLXFW_FSM_STATE_WAIT_TIMEOUT_MS / MLXFW_FSM_STATE_WAIT_CYCLE_MS)
+#define MLXFW_FSM_MAX_COMPONENT_SIZE (10 * (1 << 20))
+
+static const char * const mlxfw_fsm_state_err_str[] = {
+ [MLXFW_FSM_STATE_ERR_ERROR] =
+ "general error",
+ [MLXFW_FSM_STATE_ERR_REJECTED_DIGEST_ERR] =
+ "component hash mismatch",
+ [MLXFW_FSM_STATE_ERR_REJECTED_NOT_APPLICABLE] =
+ "component not applicable",
+ [MLXFW_FSM_STATE_ERR_REJECTED_UNKNOWN_KEY] =
+ "unknown key",
+ [MLXFW_FSM_STATE_ERR_REJECTED_AUTH_FAILED] =
+ "authentication failed",
+ [MLXFW_FSM_STATE_ERR_REJECTED_UNSIGNED] =
+ "component was not signed",
+ [MLXFW_FSM_STATE_ERR_REJECTED_KEY_NOT_APPLICABLE] =
+ "key not applicable",
+ [MLXFW_FSM_STATE_ERR_REJECTED_BAD_FORMAT] =
+ "bad format",
+ [MLXFW_FSM_STATE_ERR_BLOCKED_PENDING_RESET] =
+ "pending reset",
+ [MLXFW_FSM_STATE_ERR_MAX] =
+ "unknown error"
+};
+
+static int mlxfw_fsm_state_wait(struct mlxfw_dev *mlxfw_dev, u32 fwhandle,
+ enum mlxfw_fsm_state fsm_state)
+{
+ enum mlxfw_fsm_state_err fsm_state_err;
+ enum mlxfw_fsm_state curr_fsm_state;
+ int times;
+ int err;
+
+ times = MLXFW_FSM_STATE_WAIT_ROUNDS;
+retry:
+ err = mlxfw_dev->ops->fsm_query_state(mlxfw_dev, fwhandle,
+ &curr_fsm_state, &fsm_state_err);
+ if (err)
+ return err;
+
+ if (fsm_state_err != MLXFW_FSM_STATE_ERR_OK) {
+ pr_err("Firmware flash failed: %s\n",
+ mlxfw_fsm_state_err_str[fsm_state_err]);
+ return -EINVAL;
+ }
+ if (curr_fsm_state != fsm_state) {
+ if (--times == 0) {
+ pr_err("Timeout reached on FSM state change");
+ return -ETIMEDOUT;
+ }
+ msleep(MLXFW_FSM_STATE_WAIT_CYCLE_MS);
+ goto retry;
+ }
+ return 0;
+}
+
+#define MLXFW_ALIGN_DOWN(x, align_bits) ((x) & ~((1 << (align_bits)) - 1))
+#define MLXFW_ALIGN_UP(x, align_bits) \
+ MLXFW_ALIGN_DOWN((x) + ((1 << (align_bits)) - 1), (align_bits))
+
+static int mlxfw_flash_component(struct mlxfw_dev *mlxfw_dev,
+ u32 fwhandle,
+ struct mlxfw_mfa2_component *comp)
+{
+ u16 comp_max_write_size;
+ u8 comp_align_bits;
+ u32 comp_max_size;
+ u16 block_size;
+ u8 *block_ptr;
+ u32 offset;
+ int err;
+
+ err = mlxfw_dev->ops->component_query(mlxfw_dev, comp->index,
+ &comp_max_size, &comp_align_bits,
+ &comp_max_write_size);
+ if (err)
+ return err;
+
+ comp_max_size = min_t(u32, comp_max_size, MLXFW_FSM_MAX_COMPONENT_SIZE);
+ if (comp->data_size > comp_max_size) {
+ pr_err("Component %d is of size %d which is bigger than limit %d\n",
+ comp->index, comp->data_size, comp_max_size);
+ return -EINVAL;
+ }
+
+ comp_max_write_size = MLXFW_ALIGN_DOWN(comp_max_write_size,
+ comp_align_bits);
+
+ pr_debug("Component update\n");
+ err = mlxfw_dev->ops->fsm_component_update(mlxfw_dev, fwhandle,
+ comp->index,
+ comp->data_size);
+ if (err)
+ return err;
+
+ err = mlxfw_fsm_state_wait(mlxfw_dev, fwhandle,
+ MLXFW_FSM_STATE_DOWNLOAD);
+ if (err)
+ goto err_out;
+
+ pr_debug("Component download\n");
+ for (offset = 0;
+ offset < MLXFW_ALIGN_UP(comp->data_size, comp_align_bits);
+ offset += comp_max_write_size) {
+ block_ptr = comp->data + offset;
+ block_size = (u16) min_t(u32, comp->data_size - offset,
+ comp_max_write_size);
+ err = mlxfw_dev->ops->fsm_block_download(mlxfw_dev, fwhandle,
+ block_ptr, block_size,
+ offset);
+ if (err)
+ goto err_out;
+ }
+
+ pr_debug("Component verify\n");
+ err = mlxfw_dev->ops->fsm_component_verify(mlxfw_dev, fwhandle,
+ comp->index);
+ if (err)
+ goto err_out;
+
+ err = mlxfw_fsm_state_wait(mlxfw_dev, fwhandle, MLXFW_FSM_STATE_LOCKED);
+ if (err)
+ goto err_out;
+ return 0;
+
+err_out:
+ mlxfw_dev->ops->fsm_cancel(mlxfw_dev, fwhandle);
+ return err;
+}
+
+static int mlxfw_flash_components(struct mlxfw_dev *mlxfw_dev, u32 fwhandle,
+ struct mlxfw_mfa2_file *mfa2_file)
+{
+ u32 component_count;
+ int err;
+ int i;
+
+ err = mlxfw_mfa2_file_component_count(mfa2_file, mlxfw_dev->psid,
+ mlxfw_dev->psid_size,
+ &component_count);
+ if (err) {
+ pr_err("Could not find device PSID in MFA2 file\n");
+ return err;
+ }
+
+ for (i = 0; i < component_count; i++) {
+ struct mlxfw_mfa2_component *comp;
+
+ comp = mlxfw_mfa2_file_component_get(mfa2_file, mlxfw_dev->psid,
+ mlxfw_dev->psid_size, i);
+ if (IS_ERR(comp))
+ return PTR_ERR(comp);
+
+ pr_info("Flashing component type %d\n", comp->index);
+ err = mlxfw_flash_component(mlxfw_dev, fwhandle, comp);
+ mlxfw_mfa2_file_component_put(comp);
+ if (err)
+ return err;
+ }
+ return 0;
+}
+
+int mlxfw_firmware_flash(struct mlxfw_dev *mlxfw_dev,
+ const struct firmware *firmware)
+{
+ struct mlxfw_mfa2_file *mfa2_file;
+ u32 fwhandle;
+ int err;
+
+ if (!mlxfw_mfa2_check(firmware)) {
+ pr_err("Firmware file is not MFA2\n");
+ return -EINVAL;
+ }
+
+ mfa2_file = mlxfw_mfa2_file_init(firmware);
+ if (IS_ERR(mfa2_file))
+ return PTR_ERR(mfa2_file);
+
+ pr_info("Initialize firmware flash process\n");
+ err = mlxfw_dev->ops->fsm_lock(mlxfw_dev, &fwhandle);
+ if (err) {
+ pr_err("Could not lock the firmware FSM\n");
+ goto err_fsm_lock;
+ }
+
+ err = mlxfw_fsm_state_wait(mlxfw_dev, fwhandle,
+ MLXFW_FSM_STATE_LOCKED);
+ if (err)
+ goto err_state_wait_idle_to_locked;
+
+ err = mlxfw_flash_components(mlxfw_dev, fwhandle, mfa2_file);
+ if (err)
+ goto err_flash_components;
+
+ pr_debug("Activate image\n");
+ err = mlxfw_dev->ops->fsm_activate(mlxfw_dev, fwhandle);
+ if (err) {
+ pr_err("Could not activate the downloaded image\n");
+ goto err_fsm_activate;
+ }
+
+ err = mlxfw_fsm_state_wait(mlxfw_dev, fwhandle, MLXFW_FSM_STATE_LOCKED);
+ if (err)
+ goto err_state_wait_activate_to_locked;
+
+ pr_debug("Handle release\n");
+ mlxfw_dev->ops->fsm_release(mlxfw_dev, fwhandle);
+
+ pr_info("Firmware flash done.\n");
+ mlxfw_mfa2_file_fini(mfa2_file);
+ return 0;
+
+err_state_wait_activate_to_locked:
+err_fsm_activate:
+err_flash_components:
+err_state_wait_idle_to_locked:
+ mlxfw_dev->ops->fsm_release(mlxfw_dev, fwhandle);
+err_fsm_lock:
+ mlxfw_mfa2_file_fini(mfa2_file);
+ return err;
+}
+EXPORT_SYMBOL(mlxfw_firmware_flash);
+
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_AUTHOR("Yotam Gigi <yotamg@mellanox.com>");
+MODULE_DESCRIPTION("Mellanox firmware flash lib");
diff --git a/drivers/net/ethernet/mellanox/mlxfw/mlxfw_mfa2.c b/drivers/net/ethernet/mellanox/mlxfw/mlxfw_mfa2.c
new file mode 100644
index 0000000..993cb5b
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlxfw/mlxfw_mfa2.c
@@ -0,0 +1,619 @@
+/*
+ * drivers/net/ethernet/mellanox/mlxfw/mlxfw_mfa2.c
+ * Copyright (c) 2017 Mellanox Technologies. All rights reserved.
+ * Copyright (c) 2017 Yotam Gigi <yotamg@mellanox.com>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the names of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#define pr_fmt(fmt) "mlxfw_mfa2: " fmt
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/netlink.h>
+#include <linux/xz.h>
+#include "mlxfw_mfa2.h"
+#include "mlxfw_mfa2_file.h"
+#include "mlxfw_mfa2_tlv.h"
+#include "mlxfw_mfa2_format.h"
+#include "mlxfw_mfa2_tlv_multi.h"
+
+/* MFA2 FILE
+ * +----------------------------------+
+ * | MFA2 finger print |
+ * +----------------------------------+
+ * | package descriptor multi_tlv |
+ * | +------------------------------+ | +-----------------+
+ * | | package descriptor tlv +-----> |num_devices=n |
+ * | +------------------------------+ | |num_components=m |
+ * +----------------------------------+ |CB offset |
+ * | device descriptor multi_tlv | |... |
+ * | +------------------------------+ | | |
+ * | | PSID tlv | | +-----------------+
+ * | +------------------------------+ |
+ * | | component index tlv | |
+ * | +------------------------------+ |
+ * +----------------------------------+
+ * | component descriptor multi_tlv |
+ * | +------------------------------+ | +-----------------+
+ * | | component descriptor tlv +-----> |Among others: |
+ * | +------------------------------+ | |CB offset=o |
+ * +----------------------------------+ |comp index=i |
+ * | | |... |
+ * | | | |
+ * | | +-----------------+
+ * | COMPONENT BLOCK (CB) |
+ * | |
+ * | |
+ * | |
+ * +----------------------------------+
+ *
+ * On the top level, an MFA2 file contains:
+ * - Fingerprint
+ * - Several multi_tlvs (TLVs of type MLXFW_MFA2_TLV_MULTI, as defined in
+ * mlxfw_mfa2_format.h)
+ * - Compresses content block
+ *
+ * The first multi_tlv
+ * -------------------
+ * The first multi TLV is treated as package descriptor, and expected to have a
+ * first TLV child of type MLXFW_MFA2_TLV_PACKAGE_DESCRIPTOR which contains all
+ * the global information needed to parse the file. Among others, it contains
+ * the number of device descriptors and component descriptor following this
+ * multi TLV.
+ *
+ * The device descriptor multi_tlv
+ * -------------------------------
+ * The multi TLVs following the package descriptor are treated as device
+ * descriptor, and are expected to have the following children:
+ * - PSID TLV child of type MLXFW_MFA2_TLV_PSID containing that device PSID.
+ * - Component index of type MLXFW_MFA2_TLV_COMPONENT_PTR that contains that
+ * device component index.
+ *
+ * The component descriptor multi_tlv
+ * ----------------------------------
+ * The multi TLVs following the device descriptor multi TLVs are treated as
+ * component descriptor, and are expected to have a first child of type
+ * MLXFW_MFA2_TLV_COMPONENT_DESCRIPTOR that contains mostly the component index,
+ * needed for the flash process and the offset to the binary within the
+ * component block.
+ */
+
+static const u8 mlxfw_mfa2_fingerprint[] = "MLNX.MFA2.XZ.00!";
+static const int mlxfw_mfa2_fingerprint_len =
+ sizeof(mlxfw_mfa2_fingerprint) - 1;
+
+static const u8 mlxfw_mfa2_comp_magic[] = "#BIN.COMPONENT!#";
+static const int mlxfw_mfa2_comp_magic_len = sizeof(mlxfw_mfa2_comp_magic) - 1;
+
+bool mlxfw_mfa2_check(const struct firmware *fw)
+{
+ if (fw->size < sizeof(mlxfw_mfa2_fingerprint))
+ return false;
+
+ return memcmp(fw->data, mlxfw_mfa2_fingerprint,
+ mlxfw_mfa2_fingerprint_len) == 0;
+}
+
+static bool
+mlxfw_mfa2_tlv_multi_validate(const struct mlxfw_mfa2_file *mfa2_file,
+ const struct mlxfw_mfa2_tlv_multi *multi)
+{
+ const struct mlxfw_mfa2_tlv *tlv;
+ u16 idx;
+
+ /* Check that all children are valid */
+ mlxfw_mfa2_tlv_multi_foreach(mfa2_file, tlv, idx, multi) {
+ if (!tlv) {
+ pr_err("Multi has invalid child");
+ return false;
+ }
+ }
+ return true;
+}
+
+static bool
+mlxfw_mfa2_file_dev_validate(const struct mlxfw_mfa2_file *mfa2_file,
+ const struct mlxfw_mfa2_tlv *dev_tlv,
+ u16 dev_idx)
+{
+ const struct mlxfw_mfa2_tlv_component_ptr *cptr;
+ const struct mlxfw_mfa2_tlv_multi *multi;
+ const struct mlxfw_mfa2_tlv_psid *psid;
+ const struct mlxfw_mfa2_tlv *tlv;
+ u16 cptr_count;
+ u16 cptr_idx;
+ int err;
+
+ pr_debug("Device %d\n", dev_idx);
+
+ multi = mlxfw_mfa2_tlv_multi_get(mfa2_file, dev_tlv);
+ if (!multi) {
+ pr_err("Device %d is not a valid TLV error\n", dev_idx);
+ return false;
+ }
+
+ if (!mlxfw_mfa2_tlv_multi_validate(mfa2_file, multi))
+ return false;
+
+ /* Validate the device has PSID tlv */
+ tlv = mlxfw_mfa2_tlv_multi_child_find(mfa2_file, multi,
+ MLXFW_MFA2_TLV_PSID, 0);
+ if (!tlv) {
+ pr_err("Device %d does not have PSID\n", dev_idx);
+ return false;
+ }
+
+ psid = mlxfw_mfa2_tlv_psid_get(mfa2_file, tlv);
+ if (!psid) {
+ pr_err("Device %d PSID TLV is not valid\n", dev_idx);
+ return false;
+ }
+
+ print_hex_dump_debug(" -- Device PSID ", DUMP_PREFIX_NONE, 16, 16,
+ psid->psid, be16_to_cpu(tlv->len), true);
+
+ /* Validate the device has COMPONENT_PTR */
+ err = mlxfw_mfa2_tlv_multi_child_count(mfa2_file, multi,
+ MLXFW_MFA2_TLV_COMPONENT_PTR,
+ &cptr_count);
+ if (err)
+ return false;
+
+ if (cptr_count == 0) {
+ pr_err("Device %d has no components\n", dev_idx);
+ return false;
+ }
+
+ for (cptr_idx = 0; cptr_idx < cptr_count; cptr_idx++) {
+ tlv = mlxfw_mfa2_tlv_multi_child_find(mfa2_file, multi,
+ MLXFW_MFA2_TLV_COMPONENT_PTR,
+ cptr_idx);
+ if (!tlv)
+ return false;
+
+ cptr = mlxfw_mfa2_tlv_component_ptr_get(mfa2_file, tlv);
+ if (!cptr) {
+ pr_err("Device %d COMPONENT_PTR TLV is not valid\n",
+ dev_idx);
+ return false;
+ }
+
+ pr_debug(" -- Component index %d\n",
+ be16_to_cpu(cptr->component_index));
+ }
+ return true;
+}
+
+static bool
+mlxfw_mfa2_file_comp_validate(const struct mlxfw_mfa2_file *mfa2_file,
+ const struct mlxfw_mfa2_tlv *comp_tlv,
+ u16 comp_idx)
+{
+ const struct mlxfw_mfa2_tlv_component_descriptor *cdesc;
+ const struct mlxfw_mfa2_tlv_multi *multi;
+ const struct mlxfw_mfa2_tlv *tlv;
+
+ pr_debug("Component %d\n", comp_idx);
+
+ multi = mlxfw_mfa2_tlv_multi_get(mfa2_file, comp_tlv);
+ if (!multi) {
+ pr_err("Component %d is not a valid TLV error\n", comp_idx);
+ return false;
+ }
+
+ if (!mlxfw_mfa2_tlv_multi_validate(mfa2_file, multi))
+ return false;
+
+ /* Check that component have COMPONENT_DESCRIPTOR as first child */
+ tlv = mlxfw_mfa2_tlv_multi_child(mfa2_file, multi);
+ if (!tlv) {
+ pr_err("Component descriptor %d multi TLV error\n", comp_idx);
+ return false;
+ }
+
+ cdesc = mlxfw_mfa2_tlv_component_descriptor_get(mfa2_file, tlv);
+ if (!cdesc) {
+ pr_err("Component %d does not have a valid descriptor\n",
+ comp_idx);
+ return false;
+ }
+ pr_debug(" -- Component type %d\n", be16_to_cpu(cdesc->identifier));
+ pr_debug(" -- Offset 0x%llx and size %d\n",
+ ((u64) be32_to_cpu(cdesc->cb_offset_h) << 32)
+ | be32_to_cpu(cdesc->cb_offset_l), be32_to_cpu(cdesc->size));
+
+ return true;
+}
+
+static bool mlxfw_mfa2_file_validate(const struct mlxfw_mfa2_file *mfa2_file)
+{
+ const struct mlxfw_mfa2_tlv *tlv;
+ u16 idx;
+
+ pr_debug("Validating file\n");
+
+ /* check that all the devices exist */
+ mlxfw_mfa2_tlv_foreach(mfa2_file, tlv, idx, mfa2_file->first_dev,
+ mfa2_file->dev_count) {
+ if (!tlv) {
+ pr_err("Device TLV error\n");
+ return false;
+ }
+
+ /* Check each device */
+ if (!mlxfw_mfa2_file_dev_validate(mfa2_file, tlv, idx))
+ return false;
+ }
+
+ /* check that all the components exist */
+ mlxfw_mfa2_tlv_foreach(mfa2_file, tlv, idx, mfa2_file->first_component,
+ mfa2_file->component_count) {
+ if (!tlv) {
+ pr_err("Device TLV error\n");
+ return false;
+ }
+
+ /* Check each component */
+ if (!mlxfw_mfa2_file_comp_validate(mfa2_file, tlv, idx))
+ return false;
+ }
+ return true;
+}
+
+struct mlxfw_mfa2_file *mlxfw_mfa2_file_init(const struct firmware *fw)
+{
+ const struct mlxfw_mfa2_tlv_package_descriptor *pd;
+ const struct mlxfw_mfa2_tlv_multi *multi;
+ const struct mlxfw_mfa2_tlv *multi_child;
+ const struct mlxfw_mfa2_tlv *first_tlv;
+ struct mlxfw_mfa2_file *mfa2_file;
+ const void *first_tlv_ptr;
+ const void *cb_top_ptr;
+
+ mfa2_file = kcalloc(1, sizeof(*mfa2_file), GFP_KERNEL);
+ if (!mfa2_file)
+ return ERR_PTR(-ENOMEM);
+
+ mfa2_file->fw = fw;
+ first_tlv_ptr = fw->data + NLA_ALIGN(mlxfw_mfa2_fingerprint_len);
+ first_tlv = mlxfw_mfa2_tlv_get(mfa2_file, first_tlv_ptr);
+ if (!first_tlv) {
+ pr_err("Could not parse package descriptor TLV\n");
+ goto err_out;
+ }
+
+ multi = mlxfw_mfa2_tlv_multi_get(mfa2_file, first_tlv);
+ if (!multi) {
+ pr_err("First TLV is not of valid multi type\n");
+ goto err_out;
+ }
+
+ multi_child = mlxfw_mfa2_tlv_multi_child(mfa2_file, multi);
+ if (!multi_child)
+ goto err_out;
+
+ pd = mlxfw_mfa2_tlv_package_descriptor_get(mfa2_file, multi_child);
+ if (!pd) {
+ pr_err("Could not parse package descriptor TLV\n");
+ goto err_out;
+ }
+
+ mfa2_file->first_dev = mlxfw_mfa2_tlv_next(mfa2_file, first_tlv);
+ if (!mfa2_file->first_dev) {
+ pr_err("First device TLV is not valid\n");
+ goto err_out;
+ }
+
+ mfa2_file->dev_count = be16_to_cpu(pd->num_devices);
+ mfa2_file->first_component = mlxfw_mfa2_tlv_advance(mfa2_file,
+ mfa2_file->first_dev,
+ mfa2_file->dev_count);
+ mfa2_file->component_count = be16_to_cpu(pd->num_components);
+ mfa2_file->cb = fw->data + NLA_ALIGN(be32_to_cpu(pd->cb_offset));
+ if (!mlxfw_mfa2_valid_ptr(mfa2_file, mfa2_file->cb)) {
+ pr_err("Component block is out side the file\n");
+ goto err_out;
+ }
+ mfa2_file->cb_archive_size = be32_to_cpu(pd->cb_archive_size);
+ cb_top_ptr = mfa2_file->cb + mfa2_file->cb_archive_size - 1;
+ if (!mlxfw_mfa2_valid_ptr(mfa2_file, cb_top_ptr)) {
+ pr_err("Component block size is too big\n");
+ goto err_out;
+ }
+
+ if (!mlxfw_mfa2_file_validate(mfa2_file))
+ goto err_out;
+ return mfa2_file;
+err_out:
+ kfree(mfa2_file);
+ return ERR_PTR(-EINVAL);
+}
+
+static const struct mlxfw_mfa2_tlv_multi *
+mlxfw_mfa2_tlv_dev_get(const struct mlxfw_mfa2_file *mfa2_file,
+ const char *psid, u16 psid_size)
+{
+ const struct mlxfw_mfa2_tlv_psid *tlv_psid;
+ const struct mlxfw_mfa2_tlv_multi *dev_multi;
+ const struct mlxfw_mfa2_tlv *dev_tlv;
+ const struct mlxfw_mfa2_tlv *tlv;
+ u32 idx;
+
+ /* for each device tlv */
+ mlxfw_mfa2_tlv_foreach(mfa2_file, dev_tlv, idx, mfa2_file->first_dev,
+ mfa2_file->dev_count) {
+ if (!dev_tlv)
+ return NULL;
+
+ dev_multi = mlxfw_mfa2_tlv_multi_get(mfa2_file, dev_tlv);
+ if (!dev_multi)
+ return NULL;
+
+ /* find psid child and compare */
+ tlv = mlxfw_mfa2_tlv_multi_child_find(mfa2_file, dev_multi,
+ MLXFW_MFA2_TLV_PSID, 0);
+ if (!tlv)
+ return NULL;
+ if (be16_to_cpu(tlv->len) != psid_size)
+ continue;
+
+ tlv_psid = mlxfw_mfa2_tlv_psid_get(mfa2_file, tlv);
+ if (!tlv_psid)
+ return NULL;
+
+ if (memcmp(psid, tlv_psid->psid, psid_size) == 0)
+ return dev_multi;
+ }
+
+ return NULL;
+}
+
+int mlxfw_mfa2_file_component_count(const struct mlxfw_mfa2_file *mfa2_file,
+ const char *psid, u32 psid_size,
+ u32 *p_count)
+{
+ const struct mlxfw_mfa2_tlv_multi *dev_multi;
+ u16 count;
+ int err;
+
+ dev_multi = mlxfw_mfa2_tlv_dev_get(mfa2_file, psid, psid_size);
+ if (!dev_multi)
+ return -EINVAL;
+
+ err = mlxfw_mfa2_tlv_multi_child_count(mfa2_file, dev_multi,
+ MLXFW_MFA2_TLV_COMPONENT_PTR,
+ &count);
+ if (err)
+ return err;
+
+ *p_count = count;
+ return 0;
+}
+
+static int mlxfw_mfa2_xz_dec_run(struct xz_dec *xz_dec, struct xz_buf *xz_buf,
+ bool *finished)
+{
+ enum xz_ret xz_ret;
+
+ xz_ret = xz_dec_run(xz_dec, xz_buf);
+
+ switch (xz_ret) {
+ case XZ_STREAM_END:
+ *finished = true;
+ return 0;
+ case XZ_OK:
+ *finished = false;
+ return 0;
+ case XZ_MEM_ERROR:
+ pr_err("xz no memory\n");
+ return -ENOMEM;
+ case XZ_DATA_ERROR:
+ pr_err("xz file corrupted\n");
+ return -EINVAL;
+ case XZ_FORMAT_ERROR:
+ pr_err("xz format not found\n");
+ return -EINVAL;
+ case XZ_OPTIONS_ERROR:
+ pr_err("unsupported xz option\n");
+ return -EINVAL;
+ case XZ_MEMLIMIT_ERROR:
+ pr_err("xz dictionary too small\n");
+ return -EINVAL;
+ default:
+ pr_err("xz error %d\n", xz_ret);
+ return -EINVAL;
+ }
+}
+
+static int mlxfw_mfa2_file_cb_offset_xz(const struct mlxfw_mfa2_file *mfa2_file,
+ off_t off, size_t size, u8 *buf)
+{
+ struct xz_dec *xz_dec;
+ struct xz_buf dec_buf;
+ off_t curr_off = 0;
+ bool finished;
+ int err;
+
+ xz_dec = xz_dec_init(XZ_DYNALLOC, (u32) -1);
+ if (!xz_dec)
+ return -EINVAL;
+
+ dec_buf.in_size = mfa2_file->cb_archive_size;
+ dec_buf.in = mfa2_file->cb;
+ dec_buf.in_pos = 0;
+ dec_buf.out = buf;
+
+ /* decode up to the offset */
+ do {
+ dec_buf.out_pos = 0;
+ dec_buf.out_size = min_t(size_t, size, off - curr_off);
+ if (dec_buf.out_size == 0)
+ break;
+
+ err = mlxfw_mfa2_xz_dec_run(xz_dec, &dec_buf, &finished);
+ if (err)
+ goto out;
+ if (finished) {
+ pr_err("xz section too short\n");
+ err = -EINVAL;
+ goto out;
+ }
+ curr_off += dec_buf.out_pos;
+ } while (curr_off != off);
+
+ /* decode the needed section */
+ dec_buf.out_pos = 0;
+ dec_buf.out_size = size;
+ err = mlxfw_mfa2_xz_dec_run(xz_dec, &dec_buf, &finished);
+out:
+ xz_dec_end(xz_dec);
+ return err;
+}
+
+static const struct mlxfw_mfa2_tlv_component_descriptor *
+mlxfw_mfa2_file_component_tlv_get(const struct mlxfw_mfa2_file *mfa2_file,
+ u16 comp_index)
+{
+ const struct mlxfw_mfa2_tlv_multi *multi;
+ const struct mlxfw_mfa2_tlv *multi_child;
+ const struct mlxfw_mfa2_tlv *comp_tlv;
+
+ if (comp_index > mfa2_file->component_count)
+ return NULL;
+
+ comp_tlv = mlxfw_mfa2_tlv_advance(mfa2_file, mfa2_file->first_component,
+ comp_index);
+ if (!comp_tlv)
+ return NULL;
+
+ multi = mlxfw_mfa2_tlv_multi_get(mfa2_file, comp_tlv);
+ if (!multi)
+ return NULL;
+
+ multi_child = mlxfw_mfa2_tlv_multi_child(mfa2_file, multi);
+ if (!multi_child)
+ return NULL;
+
+ return mlxfw_mfa2_tlv_component_descriptor_get(mfa2_file, multi_child);
+}
+
+struct mlxfw_mfa2_comp_data {
+ struct mlxfw_mfa2_component comp;
+ u8 buff[0];
+};
+
+static const struct mlxfw_mfa2_tlv_component_descriptor *
+mlxfw_mfa2_file_component_find(const struct mlxfw_mfa2_file *mfa2_file,
+ const char *psid, int psid_size,
+ int component_index)
+{
+ const struct mlxfw_mfa2_tlv_component_ptr *cptr;
+ const struct mlxfw_mfa2_tlv_multi *dev_multi;
+ const struct mlxfw_mfa2_tlv *cptr_tlv;
+ u16 comp_idx;
+
+ dev_multi = mlxfw_mfa2_tlv_dev_get(mfa2_file, psid, psid_size);
+ if (!dev_multi)
+ return NULL;
+
+ cptr_tlv = mlxfw_mfa2_tlv_multi_child_find(mfa2_file, dev_multi,
+ MLXFW_MFA2_TLV_COMPONENT_PTR,
+ component_index);
+ if (!cptr_tlv)
+ return NULL;
+
+ cptr = mlxfw_mfa2_tlv_component_ptr_get(mfa2_file, cptr_tlv);
+ if (!cptr)
+ return NULL;
+
+ comp_idx = be16_to_cpu(cptr->component_index);
+ return mlxfw_mfa2_file_component_tlv_get(mfa2_file, comp_idx);
+}
+
+struct mlxfw_mfa2_component *
+mlxfw_mfa2_file_component_get(const struct mlxfw_mfa2_file *mfa2_file,
+ const char *psid, int psid_size,
+ int component_index)
+{
+ const struct mlxfw_mfa2_tlv_component_descriptor *comp;
+ struct mlxfw_mfa2_comp_data *comp_data;
+ u32 comp_buf_size;
+ off_t cb_offset;
+ u32 comp_size;
+ int err;
+
+ comp = mlxfw_mfa2_file_component_find(mfa2_file, psid, psid_size,
+ component_index);
+ if (!comp)
+ return ERR_PTR(-EINVAL);
+
+ cb_offset = (u64) be32_to_cpu(comp->cb_offset_h) << 32 |
+ be32_to_cpu(comp->cb_offset_l);
+ comp_size = be32_to_cpu(comp->size);
+ comp_buf_size = comp_size + mlxfw_mfa2_comp_magic_len;
+
+ comp_data = kmalloc(sizeof(*comp_data) + comp_buf_size, GFP_KERNEL);
+ if (!comp_data)
+ return ERR_PTR(-ENOMEM);
+ comp_data->comp.data_size = comp_size;
+ comp_data->comp.index = be16_to_cpu(comp->identifier);
+ err = mlxfw_mfa2_file_cb_offset_xz(mfa2_file, cb_offset, comp_buf_size,
+ comp_data->buff);
+ if (err) {
+ pr_err("Component could not be reached in CB\n");
+ goto err_out;
+ }
+
+ if (memcmp(comp_data->buff, mlxfw_mfa2_comp_magic,
+ mlxfw_mfa2_comp_magic_len) != 0) {
+ pr_err("Component has wrong magic\n");
+ err = -EINVAL;
+ goto err_out;
+ }
+
+ comp_data->comp.data = comp_data->buff + mlxfw_mfa2_comp_magic_len;
+ return &comp_data->comp;
+err_out:
+ kfree(comp_data);
+ return ERR_PTR(err);
+}
+
+void mlxfw_mfa2_file_component_put(struct mlxfw_mfa2_component *comp)
+{
+ const struct mlxfw_mfa2_comp_data *comp_data;
+
+ comp_data = container_of(comp, struct mlxfw_mfa2_comp_data, comp);
+ kfree(comp_data);
+}
+
+void mlxfw_mfa2_file_fini(struct mlxfw_mfa2_file *mfa2_file)
+{
+ kfree(mfa2_file);
+}
diff --git a/drivers/net/ethernet/mellanox/mlxfw/mlxfw_mfa2.h b/drivers/net/ethernet/mellanox/mlxfw/mlxfw_mfa2.h
new file mode 100644
index 0000000..20472aa
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlxfw/mlxfw_mfa2.h
@@ -0,0 +1,66 @@
+/*
+ * drivers/net/ethernet/mellanox/mlxfw/mlxfw_mfa2.h
+ * Copyright (c) 2017 Mellanox Technologies. All rights reserved.
+ * Copyright (c) 2017 Yotam Gigi <yotamg@mellanox.com>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the names of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _MLXFW_MFA2_H
+#define _MLXFW_MFA2_H
+
+#include <linux/firmware.h>
+#include "mlxfw.h"
+
+struct mlxfw_mfa2_component {
+ u16 index;
+ u32 data_size;
+ u8 *data;
+};
+
+struct mlxfw_mfa2_file;
+
+bool mlxfw_mfa2_check(const struct firmware *fw);
+
+struct mlxfw_mfa2_file *mlxfw_mfa2_file_init(const struct firmware *fw);
+
+int mlxfw_mfa2_file_component_count(const struct mlxfw_mfa2_file *mfa2_file,
+ const char *psid, u32 psid_size,
+ u32 *p_count);
+
+struct mlxfw_mfa2_component *
+mlxfw_mfa2_file_component_get(const struct mlxfw_mfa2_file *mfa2_file,
+ const char *psid, int psid_size,
+ int component_index);
+
+void mlxfw_mfa2_file_component_put(struct mlxfw_mfa2_component *component);
+
+void mlxfw_mfa2_file_fini(struct mlxfw_mfa2_file *mfa2_file);
+
+#endif
diff --git a/drivers/net/ethernet/mellanox/mlxfw/mlxfw_mfa2_file.h b/drivers/net/ethernet/mellanox/mlxfw/mlxfw_mfa2_file.h
new file mode 100644
index 0000000..f667942
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlxfw/mlxfw_mfa2_file.h
@@ -0,0 +1,60 @@
+/*
+ * drivers/net/ethernet/mellanox/mlxfw/mlxfw_mfa2_file.h
+ * Copyright (c) 2017 Mellanox Technologies. All rights reserved.
+ * Copyright (c) 2017 Yotam Gigi <yotamg@mellanox.com>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the names of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _MLXFW_MFA2_FILE_H
+#define _MLXFW_MFA2_FILE_H
+
+#include <linux/firmware.h>
+#include <linux/kernel.h>
+
+struct mlxfw_mfa2_file {
+ const struct firmware *fw;
+ const struct mlxfw_mfa2_tlv *first_dev;
+ u16 dev_count;
+ const struct mlxfw_mfa2_tlv *first_component;
+ u16 component_count;
+ const void *cb; /* components block */
+ u32 cb_archive_size; /* size of compressed components block */
+};
+
+static inline bool mlxfw_mfa2_valid_ptr(const struct mlxfw_mfa2_file *mfa2_file,
+ const void *ptr)
+{
+ const void *valid_to = mfa2_file->fw->data + mfa2_file->fw->size;
+ const void *valid_from = mfa2_file->fw->data;
+
+ return ptr > valid_from && ptr < valid_to;
+}
+
+#endif
diff --git a/drivers/net/ethernet/mellanox/mlxfw/mlxfw_mfa2_format.h b/drivers/net/ethernet/mellanox/mlxfw/mlxfw_mfa2_format.h
new file mode 100644
index 0000000..dd66737
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlxfw/mlxfw_mfa2_format.h
@@ -0,0 +1,103 @@
+/*
+ * drivers/net/ethernet/mellanox/mlxfw/mlxfw_mfa2_format.h
+ * Copyright (c) 2017 Mellanox Technologies. All rights reserved.
+ * Copyright (c) 2017 Yotam Gigi <yotamg@mellanox.com>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the names of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifndef _MLXFW_MFA2_FORMAT_H
+#define _MLXFW_MFA2_FORMAT_H
+
+#include "mlxfw_mfa2_file.h"
+#include "mlxfw_mfa2_tlv.h"
+
+enum mlxfw_mfa2_tlv_type {
+ MLXFW_MFA2_TLV_MULTI_PART = 0x01,
+ MLXFW_MFA2_TLV_PACKAGE_DESCRIPTOR = 0x02,
+ MLXFW_MFA2_TLV_COMPONENT_DESCRIPTOR = 0x04,
+ MLXFW_MFA2_TLV_COMPONENT_PTR = 0x22,
+ MLXFW_MFA2_TLV_PSID = 0x2A,
+};
+
+enum mlxfw_mfa2_compression_type {
+ MLXFW_MFA2_COMPRESSION_TYPE_NONE,
+ MLXFW_MFA2_COMPRESSION_TYPE_XZ,
+};
+
+struct mlxfw_mfa2_tlv_package_descriptor {
+ __be16 num_components;
+ __be16 num_devices;
+ __be32 cb_offset;
+ __be32 cb_archive_size;
+ __be32 cb_size_h;
+ __be32 cb_size_l;
+ u8 padding[3];
+ u8 cv_compression;
+ __be32 user_data_offset;
+} __packed;
+
+MLXFW_MFA2_TLV(package_descriptor, struct mlxfw_mfa2_tlv_package_descriptor,
+ MLXFW_MFA2_TLV_PACKAGE_DESCRIPTOR);
+
+struct mlxfw_mfa2_tlv_multi {
+ __be16 num_extensions;
+ __be16 total_len;
+} __packed;
+
+MLXFW_MFA2_TLV(multi, struct mlxfw_mfa2_tlv_multi,
+ MLXFW_MFA2_TLV_MULTI_PART);
+
+struct mlxfw_mfa2_tlv_psid {
+ u8 psid[0];
+} __packed;
+
+MLXFW_MFA2_TLV_VARSIZE(psid, struct mlxfw_mfa2_tlv_psid,
+ MLXFW_MFA2_TLV_PSID);
+
+struct mlxfw_mfa2_tlv_component_ptr {
+ __be16 storage_id;
+ __be16 component_index;
+ __be32 storage_address;
+} __packed;
+
+MLXFW_MFA2_TLV(component_ptr, struct mlxfw_mfa2_tlv_component_ptr,
+ MLXFW_MFA2_TLV_COMPONENT_PTR);
+
+struct mlxfw_mfa2_tlv_component_descriptor {
+ __be16 pldm_classification;
+ __be16 identifier;
+ __be32 cb_offset_h;
+ __be32 cb_offset_l;
+ __be32 size;
+} __packed;
+
+MLXFW_MFA2_TLV(component_descriptor, struct mlxfw_mfa2_tlv_component_descriptor,
+ MLXFW_MFA2_TLV_COMPONENT_DESCRIPTOR);
+
+#endif
diff --git a/drivers/net/ethernet/mellanox/mlxfw/mlxfw_mfa2_tlv.h b/drivers/net/ethernet/mellanox/mlxfw/mlxfw_mfa2_tlv.h
new file mode 100644
index 0000000..cc013e7
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlxfw/mlxfw_mfa2_tlv.h
@@ -0,0 +1,98 @@
+/*
+ * drivers/net/ethernet/mellanox/mlxfw/mlxfw_mfa2_tlv.h
+ * Copyright (c) 2017 Mellanox Technologies. All rights reserved.
+ * Copyright (c) 2017 Yotam Gigi <yotamg@mellanox.com>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the names of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _MLXFW_MFA2_TLV_H
+#define _MLXFW_MFA2_TLV_H
+
+#include <linux/kernel.h>
+#include "mlxfw_mfa2_file.h"
+
+struct mlxfw_mfa2_tlv {
+ u8 version;
+ u8 type;
+ __be16 len;
+ u8 data[0];
+} __packed;
+
+static inline const struct mlxfw_mfa2_tlv *
+mlxfw_mfa2_tlv_get(const struct mlxfw_mfa2_file *mfa2_file, const void *ptr)
+{
+ if (!mlxfw_mfa2_valid_ptr(mfa2_file, ptr) ||
+ !mlxfw_mfa2_valid_ptr(mfa2_file, ptr + sizeof(struct mlxfw_mfa2_tlv)))
+ return NULL;
+ return ptr;
+}
+
+static inline const void *
+mlxfw_mfa2_tlv_payload_get(const struct mlxfw_mfa2_file *mfa2_file,
+ const struct mlxfw_mfa2_tlv *tlv, u8 payload_type,
+ size_t payload_size, bool varsize)
+{
+ void *tlv_top;
+
+ tlv_top = (void *) tlv + be16_to_cpu(tlv->len) - 1;
+ if (!mlxfw_mfa2_valid_ptr(mfa2_file, tlv) ||
+ !mlxfw_mfa2_valid_ptr(mfa2_file, tlv_top))
+ return NULL;
+ if (tlv->type != payload_type)
+ return NULL;
+ if (varsize && (be16_to_cpu(tlv->len) < payload_size))
+ return NULL;
+ if (!varsize && (be16_to_cpu(tlv->len) != payload_size))
+ return NULL;
+
+ return tlv->data;
+}
+
+#define MLXFW_MFA2_TLV(name, payload_type, tlv_type) \
+static inline const payload_type * \
+mlxfw_mfa2_tlv_ ## name ## _get(const struct mlxfw_mfa2_file *mfa2_file, \
+ const struct mlxfw_mfa2_tlv *tlv) \
+{ \
+ return mlxfw_mfa2_tlv_payload_get(mfa2_file, tlv, \
+ tlv_type, sizeof(payload_type), \
+ false); \
+}
+
+#define MLXFW_MFA2_TLV_VARSIZE(name, payload_type, tlv_type) \
+static inline const payload_type * \
+mlxfw_mfa2_tlv_ ## name ## _get(const struct mlxfw_mfa2_file *mfa2_file, \
+ const struct mlxfw_mfa2_tlv *tlv) \
+{ \
+ return mlxfw_mfa2_tlv_payload_get(mfa2_file, tlv, \
+ tlv_type, sizeof(payload_type), \
+ true); \
+}
+
+#endif
diff --git a/drivers/net/ethernet/mellanox/mlxfw/mlxfw_mfa2_tlv_multi.c b/drivers/net/ethernet/mellanox/mlxfw/mlxfw_mfa2_tlv_multi.c
new file mode 100644
index 0000000..0094b92
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlxfw/mlxfw_mfa2_tlv_multi.c
@@ -0,0 +1,126 @@
+/*
+ * drivers/net/ethernet/mellanox/mlxfw/mlxfw_mfa2_tlv_multi.c
+ * Copyright (c) 2017 Mellanox Technologies. All rights reserved.
+ * Copyright (c) 2017 Yotam Gigi <yotamg@mellanox.com>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the names of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#define pr_fmt(fmt) "MFA2: " fmt
+
+#include "mlxfw_mfa2_tlv_multi.h"
+#include <uapi/linux/netlink.h>
+
+#define MLXFW_MFA2_TLV_TOTAL_SIZE(tlv) \
+ NLA_ALIGN(sizeof(*(tlv)) + be16_to_cpu((tlv)->len))
+
+const struct mlxfw_mfa2_tlv *
+mlxfw_mfa2_tlv_multi_child(const struct mlxfw_mfa2_file *mfa2_file,
+ const struct mlxfw_mfa2_tlv_multi *multi)
+{
+ size_t multi_len;
+
+ multi_len = NLA_ALIGN(sizeof(struct mlxfw_mfa2_tlv_multi));
+ return mlxfw_mfa2_tlv_get(mfa2_file, (void *) multi + multi_len);
+}
+
+const struct mlxfw_mfa2_tlv *
+mlxfw_mfa2_tlv_next(const struct mlxfw_mfa2_file *mfa2_file,
+ const struct mlxfw_mfa2_tlv *tlv)
+{
+ const struct mlxfw_mfa2_tlv_multi *multi;
+ u16 tlv_len;
+ void *next;
+
+ tlv_len = MLXFW_MFA2_TLV_TOTAL_SIZE(tlv);
+
+ if (tlv->type == MLXFW_MFA2_TLV_MULTI_PART) {
+ multi = mlxfw_mfa2_tlv_multi_get(mfa2_file, tlv);
+ tlv_len = NLA_ALIGN(tlv_len + be16_to_cpu(multi->total_len));
+ }
+
+ next = (void *) tlv + tlv_len;
+ return mlxfw_mfa2_tlv_get(mfa2_file, next);
+}
+
+const struct mlxfw_mfa2_tlv *
+mlxfw_mfa2_tlv_advance(const struct mlxfw_mfa2_file *mfa2_file,
+ const struct mlxfw_mfa2_tlv *from_tlv, u16 count)
+{
+ const struct mlxfw_mfa2_tlv *tlv;
+ u16 idx;
+
+ mlxfw_mfa2_tlv_foreach(mfa2_file, tlv, idx, from_tlv, count)
+ if (!tlv)
+ return NULL;
+ return tlv;
+}
+
+const struct mlxfw_mfa2_tlv *
+mlxfw_mfa2_tlv_multi_child_find(const struct mlxfw_mfa2_file *mfa2_file,
+ const struct mlxfw_mfa2_tlv_multi *multi,
+ enum mlxfw_mfa2_tlv_type type, u16 index)
+{
+ const struct mlxfw_mfa2_tlv *tlv;
+ u16 skip = 0;
+ u16 idx;
+
+ mlxfw_mfa2_tlv_multi_foreach(mfa2_file, tlv, idx, multi) {
+ if (!tlv) {
+ pr_err("TLV parsing error\n");
+ return NULL;
+ }
+ if (tlv->type == type)
+ if (skip++ == index)
+ return tlv;
+ }
+ return NULL;
+}
+
+int mlxfw_mfa2_tlv_multi_child_count(const struct mlxfw_mfa2_file *mfa2_file,
+ const struct mlxfw_mfa2_tlv_multi *multi,
+ enum mlxfw_mfa2_tlv_type type,
+ u16 *p_count)
+{
+ const struct mlxfw_mfa2_tlv *tlv;
+ u16 count = 0;
+ u16 idx;
+
+ mlxfw_mfa2_tlv_multi_foreach(mfa2_file, tlv, idx, multi) {
+ if (!tlv) {
+ pr_err("TLV parsing error\n");
+ return -EINVAL;
+ }
+
+ if (tlv->type == type)
+ count++;
+ }
+ *p_count = count;
+ return 0;
+}
diff --git a/drivers/net/ethernet/mellanox/mlxfw/mlxfw_mfa2_tlv_multi.h b/drivers/net/ethernet/mellanox/mlxfw/mlxfw_mfa2_tlv_multi.h
new file mode 100644
index 0000000..2c66789
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlxfw/mlxfw_mfa2_tlv_multi.h
@@ -0,0 +1,71 @@
+/*
+ * drivers/net/ethernet/mellanox/mlxfw/mlxfw_mfa2_tlv_multi.h
+ * Copyright (c) 2017 Mellanox Technologies. All rights reserved.
+ * Copyright (c) 2017 Yotam Gigi <yotamg@mellanox.com>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the names of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifndef _MLXFW_MFA2_TLV_MULTI_H
+#define _MLXFW_MFA2_TLV_MULTI_H
+
+#include "mlxfw_mfa2_tlv.h"
+#include "mlxfw_mfa2_format.h"
+#include "mlxfw_mfa2_file.h"
+
+const struct mlxfw_mfa2_tlv *
+mlxfw_mfa2_tlv_multi_child(const struct mlxfw_mfa2_file *mfa2_file,
+ const struct mlxfw_mfa2_tlv_multi *multi);
+
+const struct mlxfw_mfa2_tlv *
+mlxfw_mfa2_tlv_next(const struct mlxfw_mfa2_file *mfa2_file,
+ const struct mlxfw_mfa2_tlv *tlv);
+
+const struct mlxfw_mfa2_tlv *
+mlxfw_mfa2_tlv_advance(const struct mlxfw_mfa2_file *mfa2_file,
+ const struct mlxfw_mfa2_tlv *from_tlv, u16 count);
+
+const struct mlxfw_mfa2_tlv *
+mlxfw_mfa2_tlv_multi_child_find(const struct mlxfw_mfa2_file *mfa2_file,
+ const struct mlxfw_mfa2_tlv_multi *multi,
+ enum mlxfw_mfa2_tlv_type type, u16 index);
+
+int mlxfw_mfa2_tlv_multi_child_count(const struct mlxfw_mfa2_file *mfa2_file,
+ const struct mlxfw_mfa2_tlv_multi *multi,
+ enum mlxfw_mfa2_tlv_type type,
+ u16 *p_count);
+
+#define mlxfw_mfa2_tlv_foreach(mfa2_file, tlv, idx, from_tlv, count) \
+ for (idx = 0, tlv = from_tlv; idx < (count); \
+ idx++, tlv = mlxfw_mfa2_tlv_next(mfa2_file, tlv))
+
+#define mlxfw_mfa2_tlv_multi_foreach(mfa2_file, tlv, idx, multi) \
+ mlxfw_mfa2_tlv_foreach(mfa2_file, tlv, idx, \
+ mlxfw_mfa2_tlv_multi_child(mfa2_file, multi), \
+ be16_to_cpu(multi->num_extensions) + 1)
+#endif
diff --git a/drivers/net/ethernet/mellanox/mlxsw/Kconfig b/drivers/net/ethernet/mellanox/mlxsw/Kconfig
index ef23eae..695adff 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/Kconfig
+++ b/drivers/net/ethernet/mellanox/mlxsw/Kconfig
@@ -74,7 +74,9 @@
tristate "Mellanox Technologies Spectrum support"
depends on MLXSW_CORE && MLXSW_PCI && NET_SWITCHDEV && VLAN_8021Q
depends on PSAMPLE || PSAMPLE=n
+ depends on BRIDGE || BRIDGE=n
select PARMAN
+ select MLXFW
default m
---help---
This driver supports Mellanox Technologies Spectrum Ethernet
diff --git a/drivers/net/ethernet/mellanox/mlxsw/Makefile b/drivers/net/ethernet/mellanox/mlxsw/Makefile
index 2fb8c65..62fc42f 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/Makefile
+++ b/drivers/net/ethernet/mellanox/mlxsw/Makefile
@@ -16,7 +16,8 @@
spectrum_switchdev.o spectrum_router.o \
spectrum_kvdl.o spectrum_acl_tcam.o \
spectrum_acl.o spectrum_flower.o \
- spectrum_cnt.o spectrum_dpipe.o
+ spectrum_cnt.o spectrum_dpipe.o \
+ spectrum_fid.o
mlxsw_spectrum-$(CONFIG_MLXSW_SPECTRUM_DCB) += spectrum_dcb.o
obj-$(CONFIG_MLXSW_MINIMAL) += mlxsw_minimal.o
mlxsw_minimal-objs := minimal.o
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core.h b/drivers/net/ethernet/mellanox/mlxsw/core.h
index 7fb3539..6e966af 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/core.h
@@ -344,15 +344,17 @@
u8 features;
};
+struct mlxsw_fw_rev {
+ u16 major;
+ u16 minor;
+ u16 subminor;
+};
+
struct mlxsw_bus_info {
const char *device_kind;
const char *device_name;
struct device *dev;
- struct {
- u16 major;
- u16 minor;
- u16 subminor;
- } fw_rev;
+ struct mlxsw_fw_rev fw_rev;
u8 vsd[MLXSW_CMD_BOARDINFO_VSD_LEN];
u8 psid[MLXSW_CMD_BOARDINFO_PSID_LEN];
};
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c
index 46304ff..5ae1101 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c
@@ -40,6 +40,7 @@
#include <linux/list.h>
#include "item.h"
+#include "trap.h"
#include "core_acl_flex_actions.h"
enum mlxsw_afa_set_type {
@@ -662,6 +663,16 @@
#define MLXSW_AFA_TRAPDISC_CODE 0x03
#define MLXSW_AFA_TRAPDISC_SIZE 1
+enum mlxsw_afa_trapdisc_trap_action {
+ MLXSW_AFA_TRAPDISC_TRAP_ACTION_NOP = 0,
+ MLXSW_AFA_TRAPDISC_TRAP_ACTION_TRAP = 2,
+};
+
+/* afa_trapdisc_trap_action
+ * Trap Action.
+ */
+MLXSW_ITEM32(afa, trapdisc, trap_action, 0x00, 24, 4);
+
enum mlxsw_afa_trapdisc_forward_action {
MLXSW_AFA_TRAPDISC_FORWARD_ACTION_DISCARD = 3,
};
@@ -671,11 +682,20 @@
*/
MLXSW_ITEM32(afa, trapdisc, forward_action, 0x00, 0, 4);
+/* afa_trapdisc_trap_id
+ * Trap ID to configure.
+ */
+MLXSW_ITEM32(afa, trapdisc, trap_id, 0x04, 0, 9);
+
static inline void
mlxsw_afa_trapdisc_pack(char *payload,
- enum mlxsw_afa_trapdisc_forward_action forward_action)
+ enum mlxsw_afa_trapdisc_trap_action trap_action,
+ enum mlxsw_afa_trapdisc_forward_action forward_action,
+ u16 trap_id)
{
+ mlxsw_afa_trapdisc_trap_action_set(payload, trap_action);
mlxsw_afa_trapdisc_forward_action_set(payload, forward_action);
+ mlxsw_afa_trapdisc_trap_id_set(payload, trap_id);
}
int mlxsw_afa_block_append_drop(struct mlxsw_afa_block *block)
@@ -686,11 +706,27 @@
if (!act)
return -ENOBUFS;
- mlxsw_afa_trapdisc_pack(act, MLXSW_AFA_TRAPDISC_FORWARD_ACTION_DISCARD);
+ mlxsw_afa_trapdisc_pack(act, MLXSW_AFA_TRAPDISC_TRAP_ACTION_NOP,
+ MLXSW_AFA_TRAPDISC_FORWARD_ACTION_DISCARD, 0);
return 0;
}
EXPORT_SYMBOL(mlxsw_afa_block_append_drop);
+int mlxsw_afa_block_append_trap(struct mlxsw_afa_block *block)
+{
+ char *act = mlxsw_afa_block_append_action(block,
+ MLXSW_AFA_TRAPDISC_CODE,
+ MLXSW_AFA_TRAPDISC_SIZE);
+
+ if (!act)
+ return -ENOBUFS;
+ mlxsw_afa_trapdisc_pack(act, MLXSW_AFA_TRAPDISC_TRAP_ACTION_TRAP,
+ MLXSW_AFA_TRAPDISC_FORWARD_ACTION_DISCARD,
+ MLXSW_TRAP_ID_ACL0);
+ return 0;
+}
+EXPORT_SYMBOL(mlxsw_afa_block_append_trap);
+
/* Forwarding Action
* -----------------
* Forwarding Action can be used to implement Policy Based Switching (PBS)
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.h b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.h
index bd8b91d..f99c341 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.h
@@ -60,6 +60,7 @@
void mlxsw_afa_block_continue(struct mlxsw_afa_block *block);
void mlxsw_afa_block_jump(struct mlxsw_afa_block *block, u16 group_id);
int mlxsw_afa_block_append_drop(struct mlxsw_afa_block *block);
+int mlxsw_afa_block_append_trap(struct mlxsw_afa_block *block);
int mlxsw_afa_block_append_fwd(struct mlxsw_afa_block *block,
u8 local_port, bool in_port);
int mlxsw_afa_block_append_vlan_modify(struct mlxsw_afa_block *block,
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.h b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.h
index c75e914..9807ef8 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.h
@@ -56,6 +56,7 @@
MLXSW_AFK_ELEMENT_SRC_L4_PORT,
MLXSW_AFK_ELEMENT_VID,
MLXSW_AFK_ELEMENT_PCP,
+ MLXSW_AFK_ELEMENT_TCP_FLAGS,
MLXSW_AFK_ELEMENT_MAX,
};
@@ -102,6 +103,7 @@
MLXSW_AFK_ELEMENT_INFO_U32(IP_PROTO, 0x10, 0, 8),
MLXSW_AFK_ELEMENT_INFO_U32(VID, 0x10, 8, 12),
MLXSW_AFK_ELEMENT_INFO_U32(PCP, 0x10, 20, 3),
+ MLXSW_AFK_ELEMENT_INFO_U32(TCP_FLAGS, 0x10, 23, 9),
MLXSW_AFK_ELEMENT_INFO_U32(SRC_IP4, 0x18, 0, 32),
MLXSW_AFK_ELEMENT_INFO_U32(DST_IP4, 0x1C, 0, 32),
MLXSW_AFK_ELEMENT_INFO_BUF(SRC_IP6_HI, 0x18, 8),
diff --git a/drivers/net/ethernet/mellanox/mlxsw/pci_hw.h b/drivers/net/ethernet/mellanox/mlxsw/pci_hw.h
index 0af3338..a644120 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/pci_hw.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/pci_hw.h
@@ -155,7 +155,7 @@
/* pci_cqe_trap_id
* Trap ID that captured the packet.
*/
-MLXSW_ITEM32(pci, cqe, trap_id, 0x08, 0, 8);
+MLXSW_ITEM32(pci, cqe, trap_id, 0x08, 0, 9);
/* pci_cqe_crc
* Length include CRC. Indicates the length field includes
diff --git a/drivers/net/ethernet/mellanox/mlxsw/reg.h b/drivers/net/ethernet/mellanox/mlxsw/reg.h
index 83b277c..1bd34d9 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/reg.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/reg.h
@@ -958,7 +958,7 @@
MLXSW_REG_SFGC_TABLE_TYPE_VID = 1,
MLXSW_REG_SFGC_TABLE_TYPE_SINGLE = 2,
MLXSW_REG_SFGC_TABLE_TYPE_ANY = 0,
- MLXSW_REG_SFGC_TABLE_TYPE_FID_OFFEST = 3,
+ MLXSW_REG_SFGC_TABLE_TYPE_FID_OFFSET = 3,
MLXSW_REG_SFGC_TABLE_TYPE_FID = 4,
};
@@ -5491,6 +5491,81 @@
mlxsw_reg_mtmp_sensor_name_memcpy_from(payload, sensor_name);
}
+/* MCIA - Management Cable Info Access
+ * -----------------------------------
+ * MCIA register is used to access the SFP+ and QSFP connector's EPROM.
+ */
+
+#define MLXSW_REG_MCIA_ID 0x9014
+#define MLXSW_REG_MCIA_LEN 0x40
+
+MLXSW_REG_DEFINE(mcia, MLXSW_REG_MCIA_ID, MLXSW_REG_MCIA_LEN);
+
+/* reg_mcia_l
+ * Lock bit. Setting this bit will lock the access to the specific
+ * cable. Used for updating a full page in a cable EPROM. Any access
+ * other then subsequence writes will fail while the port is locked.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, mcia, l, 0x00, 31, 1);
+
+/* reg_mcia_module
+ * Module number.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, mcia, module, 0x00, 16, 8);
+
+/* reg_mcia_status
+ * Module status.
+ * Access: RO
+ */
+MLXSW_ITEM32(reg, mcia, status, 0x00, 0, 8);
+
+/* reg_mcia_i2c_device_address
+ * I2C device address.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, mcia, i2c_device_address, 0x04, 24, 8);
+
+/* reg_mcia_page_number
+ * Page number.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, mcia, page_number, 0x04, 16, 8);
+
+/* reg_mcia_device_address
+ * Device address.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, mcia, device_address, 0x04, 0, 16);
+
+/* reg_mcia_size
+ * Number of bytes to read/write (up to 48 bytes).
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, mcia, size, 0x08, 0, 16);
+
+#define MLXSW_SP_REG_MCIA_EEPROM_SIZE 48
+
+/* reg_mcia_eeprom
+ * Bytes to read/write.
+ * Access: RW
+ */
+MLXSW_ITEM_BUF(reg, mcia, eeprom, 0x10, MLXSW_SP_REG_MCIA_EEPROM_SIZE);
+
+static inline void mlxsw_reg_mcia_pack(char *payload, u8 module, u8 lock,
+ u8 page_number, u16 device_addr,
+ u8 size, u8 i2c_device_addr)
+{
+ MLXSW_REG_ZERO(mcia, payload);
+ mlxsw_reg_mcia_module_set(payload, module);
+ mlxsw_reg_mcia_l_set(payload, lock);
+ mlxsw_reg_mcia_page_number_set(payload, page_number);
+ mlxsw_reg_mcia_device_address_set(payload, device_addr);
+ mlxsw_reg_mcia_size_set(payload, size);
+ mlxsw_reg_mcia_i2c_device_address_set(payload, i2c_device_addr);
+}
+
/* MPAT - Monitoring Port Analyzer Table
* -------------------------------------
* MPAT Register is used to query and configure the Switch PortAnalyzer Table.
@@ -5643,6 +5718,222 @@
MLXSW_REG_MLCR_DURATION_MAX : 0);
}
+/* MCQI - Management Component Query Information
+ * ---------------------------------------------
+ * This register allows querying information about firmware components.
+ */
+#define MLXSW_REG_MCQI_ID 0x9061
+#define MLXSW_REG_MCQI_BASE_LEN 0x18
+#define MLXSW_REG_MCQI_CAP_LEN 0x14
+#define MLXSW_REG_MCQI_LEN (MLXSW_REG_MCQI_BASE_LEN + MLXSW_REG_MCQI_CAP_LEN)
+
+MLXSW_REG_DEFINE(mcqi, MLXSW_REG_MCQI_ID, MLXSW_REG_MCQI_LEN);
+
+/* reg_mcqi_component_index
+ * Index of the accessed component.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, mcqi, component_index, 0x00, 0, 16);
+
+enum mlxfw_reg_mcqi_info_type {
+ MLXSW_REG_MCQI_INFO_TYPE_CAPABILITIES,
+};
+
+/* reg_mcqi_info_type
+ * Component properties set.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, mcqi, info_type, 0x08, 0, 5);
+
+/* reg_mcqi_offset
+ * The requested/returned data offset from the section start, given in bytes.
+ * Must be DWORD aligned.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, mcqi, offset, 0x10, 0, 32);
+
+/* reg_mcqi_data_size
+ * The requested/returned data size, given in bytes. If data_size is not DWORD
+ * aligned, the last bytes are zero padded.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, mcqi, data_size, 0x14, 0, 16);
+
+/* reg_mcqi_cap_max_component_size
+ * Maximum size for this component, given in bytes.
+ * Access: RO
+ */
+MLXSW_ITEM32(reg, mcqi, cap_max_component_size, 0x20, 0, 32);
+
+/* reg_mcqi_cap_log_mcda_word_size
+ * Log 2 of the access word size in bytes. Read and write access must be aligned
+ * to the word size. Write access must be done for an integer number of words.
+ * Access: RO
+ */
+MLXSW_ITEM32(reg, mcqi, cap_log_mcda_word_size, 0x24, 28, 4);
+
+/* reg_mcqi_cap_mcda_max_write_size
+ * Maximal write size for MCDA register
+ * Access: RO
+ */
+MLXSW_ITEM32(reg, mcqi, cap_mcda_max_write_size, 0x24, 0, 16);
+
+static inline void mlxsw_reg_mcqi_pack(char *payload, u16 component_index)
+{
+ MLXSW_REG_ZERO(mcqi, payload);
+ mlxsw_reg_mcqi_component_index_set(payload, component_index);
+ mlxsw_reg_mcqi_info_type_set(payload,
+ MLXSW_REG_MCQI_INFO_TYPE_CAPABILITIES);
+ mlxsw_reg_mcqi_offset_set(payload, 0);
+ mlxsw_reg_mcqi_data_size_set(payload, MLXSW_REG_MCQI_CAP_LEN);
+}
+
+static inline void mlxsw_reg_mcqi_unpack(char *payload,
+ u32 *p_cap_max_component_size,
+ u8 *p_cap_log_mcda_word_size,
+ u16 *p_cap_mcda_max_write_size)
+{
+ *p_cap_max_component_size =
+ mlxsw_reg_mcqi_cap_max_component_size_get(payload);
+ *p_cap_log_mcda_word_size =
+ mlxsw_reg_mcqi_cap_log_mcda_word_size_get(payload);
+ *p_cap_mcda_max_write_size =
+ mlxsw_reg_mcqi_cap_mcda_max_write_size_get(payload);
+}
+
+/* MCC - Management Component Control
+ * ----------------------------------
+ * Controls the firmware component and updates the FSM.
+ */
+#define MLXSW_REG_MCC_ID 0x9062
+#define MLXSW_REG_MCC_LEN 0x1C
+
+MLXSW_REG_DEFINE(mcc, MLXSW_REG_MCC_ID, MLXSW_REG_MCC_LEN);
+
+enum mlxsw_reg_mcc_instruction {
+ MLXSW_REG_MCC_INSTRUCTION_LOCK_UPDATE_HANDLE = 0x01,
+ MLXSW_REG_MCC_INSTRUCTION_RELEASE_UPDATE_HANDLE = 0x02,
+ MLXSW_REG_MCC_INSTRUCTION_UPDATE_COMPONENT = 0x03,
+ MLXSW_REG_MCC_INSTRUCTION_VERIFY_COMPONENT = 0x04,
+ MLXSW_REG_MCC_INSTRUCTION_ACTIVATE = 0x06,
+ MLXSW_REG_MCC_INSTRUCTION_CANCEL = 0x08,
+};
+
+/* reg_mcc_instruction
+ * Command to be executed by the FSM.
+ * Applicable for write operation only.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, mcc, instruction, 0x00, 0, 8);
+
+/* reg_mcc_component_index
+ * Index of the accessed component. Applicable only for commands that
+ * refer to components. Otherwise, this field is reserved.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, mcc, component_index, 0x04, 0, 16);
+
+/* reg_mcc_update_handle
+ * Token representing the current flow executed by the FSM.
+ * Access: WO
+ */
+MLXSW_ITEM32(reg, mcc, update_handle, 0x08, 0, 24);
+
+/* reg_mcc_error_code
+ * Indicates the successful completion of the instruction, or the reason it
+ * failed
+ * Access: RO
+ */
+MLXSW_ITEM32(reg, mcc, error_code, 0x0C, 8, 8);
+
+/* reg_mcc_control_state
+ * Current FSM state
+ * Access: RO
+ */
+MLXSW_ITEM32(reg, mcc, control_state, 0x0C, 0, 4);
+
+/* reg_mcc_component_size
+ * Component size in bytes. Valid for UPDATE_COMPONENT instruction. Specifying
+ * the size may shorten the update time. Value 0x0 means that size is
+ * unspecified.
+ * Access: WO
+ */
+MLXSW_ITEM32(reg, mcc, component_size, 0x10, 0, 32);
+
+static inline void mlxsw_reg_mcc_pack(char *payload,
+ enum mlxsw_reg_mcc_instruction instr,
+ u16 component_index, u32 update_handle,
+ u32 component_size)
+{
+ MLXSW_REG_ZERO(mcc, payload);
+ mlxsw_reg_mcc_instruction_set(payload, instr);
+ mlxsw_reg_mcc_component_index_set(payload, component_index);
+ mlxsw_reg_mcc_update_handle_set(payload, update_handle);
+ mlxsw_reg_mcc_component_size_set(payload, component_size);
+}
+
+static inline void mlxsw_reg_mcc_unpack(char *payload, u32 *p_update_handle,
+ u8 *p_error_code, u8 *p_control_state)
+{
+ if (p_update_handle)
+ *p_update_handle = mlxsw_reg_mcc_update_handle_get(payload);
+ if (p_error_code)
+ *p_error_code = mlxsw_reg_mcc_error_code_get(payload);
+ if (p_control_state)
+ *p_control_state = mlxsw_reg_mcc_control_state_get(payload);
+}
+
+/* MCDA - Management Component Data Access
+ * ---------------------------------------
+ * This register allows reading and writing a firmware component.
+ */
+#define MLXSW_REG_MCDA_ID 0x9063
+#define MLXSW_REG_MCDA_BASE_LEN 0x10
+#define MLXSW_REG_MCDA_MAX_DATA_LEN 0x80
+#define MLXSW_REG_MCDA_LEN \
+ (MLXSW_REG_MCDA_BASE_LEN + MLXSW_REG_MCDA_MAX_DATA_LEN)
+
+MLXSW_REG_DEFINE(mcda, MLXSW_REG_MCDA_ID, MLXSW_REG_MCDA_LEN);
+
+/* reg_mcda_update_handle
+ * Token representing the current flow executed by the FSM.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, mcda, update_handle, 0x00, 0, 24);
+
+/* reg_mcda_offset
+ * Offset of accessed address relative to component start. Accesses must be in
+ * accordance to log_mcda_word_size in MCQI reg.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, mcda, offset, 0x04, 0, 32);
+
+/* reg_mcda_size
+ * Size of the data accessed, given in bytes.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, mcda, size, 0x08, 0, 16);
+
+/* reg_mcda_data
+ * Data block accessed.
+ * Access: RW
+ */
+MLXSW_ITEM32_INDEXED(reg, mcda, data, 0x10, 0, 32, 4, 0, false);
+
+static inline void mlxsw_reg_mcda_pack(char *payload, u32 update_handle,
+ u32 offset, u16 size, u8 *data)
+{
+ int i;
+
+ MLXSW_REG_ZERO(mcda, payload);
+ mlxsw_reg_mcda_update_handle_set(payload, update_handle);
+ mlxsw_reg_mcda_offset_set(payload, offset);
+ mlxsw_reg_mcda_size_set(payload, size);
+
+ for (i = 0; i < size / 4; i++)
+ mlxsw_reg_mcda_data_set(payload, i, *(u32 *) &data[i * 4]);
+}
+
/* MPSC - Monitoring Packet Sampling Configuration Register
* --------------------------------------------------------
* MPSC Register is used to configure the Packet Sampling mechanism.
@@ -6217,10 +6508,14 @@
MLXSW_REG(mfsl),
MLXSW_REG(mtcap),
MLXSW_REG(mtmp),
+ MLXSW_REG(mcia),
MLXSW_REG(mpat),
MLXSW_REG(mpar),
MLXSW_REG(mlcr),
MLXSW_REG(mpsc),
+ MLXSW_REG(mcqi),
+ MLXSW_REG(mcc),
+ MLXSW_REG(mcda),
MLXSW_REG(mgpc),
MLXSW_REG(sbpr),
MLXSW_REG(sbcm),
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
index 88357ce..60bf8f2 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
@@ -68,6 +68,22 @@
#include "txheader.h"
#include "spectrum_cnt.h"
#include "spectrum_dpipe.h"
+#include "../mlxfw/mlxfw.h"
+
+#define MLXSW_FWREV_MAJOR 13
+#define MLXSW_FWREV_MINOR 1420
+#define MLXSW_FWREV_SUBMINOR 122
+
+static const struct mlxsw_fw_rev mlxsw_sp_supported_fw_rev = {
+ .major = MLXSW_FWREV_MAJOR,
+ .minor = MLXSW_FWREV_MINOR,
+ .subminor = MLXSW_FWREV_SUBMINOR
+};
+
+#define MLXSW_SP_FW_FILENAME \
+ "mellanox/mlxsw_spectrum-" __stringify(MLXSW_FWREV_MAJOR) \
+ "." __stringify(MLXSW_FWREV_MINOR) \
+ "." __stringify(MLXSW_FWREV_SUBMINOR) ".mfa2"
static const char mlxsw_sp_driver_name[] = "mlxsw_spectrum";
static const char mlxsw_sp_driver_version[] = "1.0";
@@ -140,6 +156,223 @@
*/
MLXSW_ITEM32(tx, hdr, type, 0x0C, 0, 4);
+struct mlxsw_sp_mlxfw_dev {
+ struct mlxfw_dev mlxfw_dev;
+ struct mlxsw_sp *mlxsw_sp;
+};
+
+static int mlxsw_sp_component_query(struct mlxfw_dev *mlxfw_dev,
+ u16 component_index, u32 *p_max_size,
+ u8 *p_align_bits, u16 *p_max_write_size)
+{
+ struct mlxsw_sp_mlxfw_dev *mlxsw_sp_mlxfw_dev =
+ container_of(mlxfw_dev, struct mlxsw_sp_mlxfw_dev, mlxfw_dev);
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_mlxfw_dev->mlxsw_sp;
+ char mcqi_pl[MLXSW_REG_MCQI_LEN];
+ int err;
+
+ mlxsw_reg_mcqi_pack(mcqi_pl, component_index);
+ err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(mcqi), mcqi_pl);
+ if (err)
+ return err;
+ mlxsw_reg_mcqi_unpack(mcqi_pl, p_max_size, p_align_bits,
+ p_max_write_size);
+
+ *p_align_bits = max_t(u8, *p_align_bits, 2);
+ *p_max_write_size = min_t(u16, *p_max_write_size,
+ MLXSW_REG_MCDA_MAX_DATA_LEN);
+ return 0;
+}
+
+static int mlxsw_sp_fsm_lock(struct mlxfw_dev *mlxfw_dev, u32 *fwhandle)
+{
+ struct mlxsw_sp_mlxfw_dev *mlxsw_sp_mlxfw_dev =
+ container_of(mlxfw_dev, struct mlxsw_sp_mlxfw_dev, mlxfw_dev);
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_mlxfw_dev->mlxsw_sp;
+ char mcc_pl[MLXSW_REG_MCC_LEN];
+ u8 control_state;
+ int err;
+
+ mlxsw_reg_mcc_pack(mcc_pl, 0, 0, 0, 0);
+ err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(mcc), mcc_pl);
+ if (err)
+ return err;
+
+ mlxsw_reg_mcc_unpack(mcc_pl, fwhandle, NULL, &control_state);
+ if (control_state != MLXFW_FSM_STATE_IDLE)
+ return -EBUSY;
+
+ mlxsw_reg_mcc_pack(mcc_pl,
+ MLXSW_REG_MCC_INSTRUCTION_LOCK_UPDATE_HANDLE,
+ 0, *fwhandle, 0);
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mcc), mcc_pl);
+}
+
+static int mlxsw_sp_fsm_component_update(struct mlxfw_dev *mlxfw_dev,
+ u32 fwhandle, u16 component_index,
+ u32 component_size)
+{
+ struct mlxsw_sp_mlxfw_dev *mlxsw_sp_mlxfw_dev =
+ container_of(mlxfw_dev, struct mlxsw_sp_mlxfw_dev, mlxfw_dev);
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_mlxfw_dev->mlxsw_sp;
+ char mcc_pl[MLXSW_REG_MCC_LEN];
+
+ mlxsw_reg_mcc_pack(mcc_pl, MLXSW_REG_MCC_INSTRUCTION_UPDATE_COMPONENT,
+ component_index, fwhandle, component_size);
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mcc), mcc_pl);
+}
+
+static int mlxsw_sp_fsm_block_download(struct mlxfw_dev *mlxfw_dev,
+ u32 fwhandle, u8 *data, u16 size,
+ u32 offset)
+{
+ struct mlxsw_sp_mlxfw_dev *mlxsw_sp_mlxfw_dev =
+ container_of(mlxfw_dev, struct mlxsw_sp_mlxfw_dev, mlxfw_dev);
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_mlxfw_dev->mlxsw_sp;
+ char mcda_pl[MLXSW_REG_MCDA_LEN];
+
+ mlxsw_reg_mcda_pack(mcda_pl, fwhandle, offset, size, data);
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mcda), mcda_pl);
+}
+
+static int mlxsw_sp_fsm_component_verify(struct mlxfw_dev *mlxfw_dev,
+ u32 fwhandle, u16 component_index)
+{
+ struct mlxsw_sp_mlxfw_dev *mlxsw_sp_mlxfw_dev =
+ container_of(mlxfw_dev, struct mlxsw_sp_mlxfw_dev, mlxfw_dev);
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_mlxfw_dev->mlxsw_sp;
+ char mcc_pl[MLXSW_REG_MCC_LEN];
+
+ mlxsw_reg_mcc_pack(mcc_pl, MLXSW_REG_MCC_INSTRUCTION_VERIFY_COMPONENT,
+ component_index, fwhandle, 0);
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mcc), mcc_pl);
+}
+
+static int mlxsw_sp_fsm_activate(struct mlxfw_dev *mlxfw_dev, u32 fwhandle)
+{
+ struct mlxsw_sp_mlxfw_dev *mlxsw_sp_mlxfw_dev =
+ container_of(mlxfw_dev, struct mlxsw_sp_mlxfw_dev, mlxfw_dev);
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_mlxfw_dev->mlxsw_sp;
+ char mcc_pl[MLXSW_REG_MCC_LEN];
+
+ mlxsw_reg_mcc_pack(mcc_pl, MLXSW_REG_MCC_INSTRUCTION_ACTIVATE, 0,
+ fwhandle, 0);
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mcc), mcc_pl);
+}
+
+static int mlxsw_sp_fsm_query_state(struct mlxfw_dev *mlxfw_dev, u32 fwhandle,
+ enum mlxfw_fsm_state *fsm_state,
+ enum mlxfw_fsm_state_err *fsm_state_err)
+{
+ struct mlxsw_sp_mlxfw_dev *mlxsw_sp_mlxfw_dev =
+ container_of(mlxfw_dev, struct mlxsw_sp_mlxfw_dev, mlxfw_dev);
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_mlxfw_dev->mlxsw_sp;
+ char mcc_pl[MLXSW_REG_MCC_LEN];
+ u8 control_state;
+ u8 error_code;
+ int err;
+
+ mlxsw_reg_mcc_pack(mcc_pl, 0, 0, fwhandle, 0);
+ err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(mcc), mcc_pl);
+ if (err)
+ return err;
+
+ mlxsw_reg_mcc_unpack(mcc_pl, NULL, &error_code, &control_state);
+ *fsm_state = control_state;
+ *fsm_state_err = min_t(enum mlxfw_fsm_state_err, error_code,
+ MLXFW_FSM_STATE_ERR_MAX);
+ return 0;
+}
+
+static void mlxsw_sp_fsm_cancel(struct mlxfw_dev *mlxfw_dev, u32 fwhandle)
+{
+ struct mlxsw_sp_mlxfw_dev *mlxsw_sp_mlxfw_dev =
+ container_of(mlxfw_dev, struct mlxsw_sp_mlxfw_dev, mlxfw_dev);
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_mlxfw_dev->mlxsw_sp;
+ char mcc_pl[MLXSW_REG_MCC_LEN];
+
+ mlxsw_reg_mcc_pack(mcc_pl, MLXSW_REG_MCC_INSTRUCTION_CANCEL, 0,
+ fwhandle, 0);
+ mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mcc), mcc_pl);
+}
+
+static void mlxsw_sp_fsm_release(struct mlxfw_dev *mlxfw_dev, u32 fwhandle)
+{
+ struct mlxsw_sp_mlxfw_dev *mlxsw_sp_mlxfw_dev =
+ container_of(mlxfw_dev, struct mlxsw_sp_mlxfw_dev, mlxfw_dev);
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_mlxfw_dev->mlxsw_sp;
+ char mcc_pl[MLXSW_REG_MCC_LEN];
+
+ mlxsw_reg_mcc_pack(mcc_pl,
+ MLXSW_REG_MCC_INSTRUCTION_RELEASE_UPDATE_HANDLE, 0,
+ fwhandle, 0);
+ mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mcc), mcc_pl);
+}
+
+static const struct mlxfw_dev_ops mlxsw_sp_mlxfw_dev_ops = {
+ .component_query = mlxsw_sp_component_query,
+ .fsm_lock = mlxsw_sp_fsm_lock,
+ .fsm_component_update = mlxsw_sp_fsm_component_update,
+ .fsm_block_download = mlxsw_sp_fsm_block_download,
+ .fsm_component_verify = mlxsw_sp_fsm_component_verify,
+ .fsm_activate = mlxsw_sp_fsm_activate,
+ .fsm_query_state = mlxsw_sp_fsm_query_state,
+ .fsm_cancel = mlxsw_sp_fsm_cancel,
+ .fsm_release = mlxsw_sp_fsm_release
+};
+
+static int mlxsw_sp_firmware_flash(struct mlxsw_sp *mlxsw_sp,
+ const struct firmware *firmware)
+{
+ struct mlxsw_sp_mlxfw_dev mlxsw_sp_mlxfw_dev = {
+ .mlxfw_dev = {
+ .ops = &mlxsw_sp_mlxfw_dev_ops,
+ .psid = mlxsw_sp->bus_info->psid,
+ .psid_size = strlen(mlxsw_sp->bus_info->psid),
+ },
+ .mlxsw_sp = mlxsw_sp
+ };
+
+ return mlxfw_firmware_flash(&mlxsw_sp_mlxfw_dev.mlxfw_dev, firmware);
+}
+
+static bool mlxsw_sp_fw_rev_ge(const struct mlxsw_fw_rev *a,
+ const struct mlxsw_fw_rev *b)
+{
+ if (a->major != b->major)
+ return a->major > b->major;
+ if (a->minor != b->minor)
+ return a->minor > b->minor;
+ return a->subminor >= b->subminor;
+}
+
+static int mlxsw_sp_fw_rev_validate(struct mlxsw_sp *mlxsw_sp)
+{
+ const struct mlxsw_fw_rev *rev = &mlxsw_sp->bus_info->fw_rev;
+ const struct firmware *firmware;
+ int err;
+
+ if (mlxsw_sp_fw_rev_ge(rev, &mlxsw_sp_supported_fw_rev))
+ return 0;
+
+ dev_info(mlxsw_sp->bus_info->dev, "The firmware version %d.%d.%d out of data\n",
+ rev->major, rev->minor, rev->subminor);
+ dev_info(mlxsw_sp->bus_info->dev, "Upgrading firmware using file %s\n",
+ MLXSW_SP_FW_FILENAME);
+
+ err = request_firmware_direct(&firmware, MLXSW_SP_FW_FILENAME,
+ mlxsw_sp->bus_info->dev);
+ if (err) {
+ dev_err(mlxsw_sp->bus_info->dev, "Could not request firmware file %s\n",
+ MLXSW_SP_FW_FILENAME);
+ return err;
+ }
+
+ err = mlxsw_sp_firmware_flash(mlxsw_sp, firmware);
+ release_firmware(firmware);
+ return err;
+}
+
int mlxsw_sp_flow_counter_get(struct mlxsw_sp *mlxsw_sp,
unsigned int counter_index, u64 *packets,
u64 *bytes)
@@ -210,6 +443,41 @@
mlxsw_tx_hdr_type_set(txhdr, MLXSW_TXHDR_TYPE_CONTROL);
}
+int mlxsw_sp_port_vid_stp_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid,
+ u8 state)
+{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ enum mlxsw_reg_spms_state spms_state;
+ char *spms_pl;
+ int err;
+
+ switch (state) {
+ case BR_STATE_FORWARDING:
+ spms_state = MLXSW_REG_SPMS_STATE_FORWARDING;
+ break;
+ case BR_STATE_LEARNING:
+ spms_state = MLXSW_REG_SPMS_STATE_LEARNING;
+ break;
+ case BR_STATE_LISTENING: /* fall-through */
+ case BR_STATE_DISABLED: /* fall-through */
+ case BR_STATE_BLOCKING:
+ spms_state = MLXSW_REG_SPMS_STATE_DISCARDING;
+ break;
+ default:
+ BUG();
+ }
+
+ spms_pl = kmalloc(MLXSW_REG_SPMS_LEN, GFP_KERNEL);
+ if (!spms_pl)
+ return -ENOMEM;
+ mlxsw_reg_spms_pack(spms_pl, mlxsw_sp_port->local_port);
+ mlxsw_reg_spms_vid_pack(spms_pl, vid, spms_state);
+
+ err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spms), spms_pl);
+ kfree(spms_pl);
+ return err;
+}
+
static int mlxsw_sp_base_mac_get(struct mlxsw_sp *mlxsw_sp)
{
char spad_pl[MLXSW_REG_SPAD_LEN] = {0};
@@ -592,25 +860,16 @@
return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pmtu), pmtu_pl);
}
-static int __mlxsw_sp_port_swid_set(struct mlxsw_sp *mlxsw_sp, u8 local_port,
- u8 swid)
-{
- char pspa_pl[MLXSW_REG_PSPA_LEN];
-
- mlxsw_reg_pspa_pack(pspa_pl, swid, local_port);
- return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pspa), pspa_pl);
-}
-
static int mlxsw_sp_port_swid_set(struct mlxsw_sp_port *mlxsw_sp_port, u8 swid)
{
struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ char pspa_pl[MLXSW_REG_PSPA_LEN];
- return __mlxsw_sp_port_swid_set(mlxsw_sp, mlxsw_sp_port->local_port,
- swid);
+ mlxsw_reg_pspa_pack(pspa_pl, swid, mlxsw_sp_port->local_port);
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pspa), pspa_pl);
}
-static int mlxsw_sp_port_vp_mode_set(struct mlxsw_sp_port *mlxsw_sp_port,
- bool enable)
+int mlxsw_sp_port_vp_mode_set(struct mlxsw_sp_port *mlxsw_sp_port, bool enable)
{
struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
char svpe_pl[MLXSW_REG_SVPE_LEN];
@@ -619,21 +878,8 @@
return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(svpe), svpe_pl);
}
-int mlxsw_sp_port_vid_to_fid_set(struct mlxsw_sp_port *mlxsw_sp_port,
- enum mlxsw_reg_svfa_mt mt, bool valid, u16 fid,
- u16 vid)
-{
- struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
- char svfa_pl[MLXSW_REG_SVFA_LEN];
-
- mlxsw_reg_svfa_pack(svfa_pl, mlxsw_sp_port->local_port, mt, valid,
- fid, vid);
- return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(svfa), svfa_pl);
-}
-
-int __mlxsw_sp_port_vid_learning_set(struct mlxsw_sp_port *mlxsw_sp_port,
- u16 vid_begin, u16 vid_end,
- bool learn_enable)
+int mlxsw_sp_port_vid_learning_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid,
+ bool learn_enable)
{
struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
char *spvmlr_pl;
@@ -642,18 +888,56 @@
spvmlr_pl = kmalloc(MLXSW_REG_SPVMLR_LEN, GFP_KERNEL);
if (!spvmlr_pl)
return -ENOMEM;
- mlxsw_reg_spvmlr_pack(spvmlr_pl, mlxsw_sp_port->local_port, vid_begin,
- vid_end, learn_enable);
+ mlxsw_reg_spvmlr_pack(spvmlr_pl, mlxsw_sp_port->local_port, vid, vid,
+ learn_enable);
err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spvmlr), spvmlr_pl);
kfree(spvmlr_pl);
return err;
}
-static int mlxsw_sp_port_vid_learning_set(struct mlxsw_sp_port *mlxsw_sp_port,
- u16 vid, bool learn_enable)
+static int __mlxsw_sp_port_pvid_set(struct mlxsw_sp_port *mlxsw_sp_port,
+ u16 vid)
{
- return __mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, vid,
- learn_enable);
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ char spvid_pl[MLXSW_REG_SPVID_LEN];
+
+ mlxsw_reg_spvid_pack(spvid_pl, mlxsw_sp_port->local_port, vid);
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spvid), spvid_pl);
+}
+
+static int mlxsw_sp_port_allow_untagged_set(struct mlxsw_sp_port *mlxsw_sp_port,
+ bool allow)
+{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ char spaft_pl[MLXSW_REG_SPAFT_LEN];
+
+ mlxsw_reg_spaft_pack(spaft_pl, mlxsw_sp_port->local_port, allow);
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spaft), spaft_pl);
+}
+
+int mlxsw_sp_port_pvid_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid)
+{
+ int err;
+
+ if (!vid) {
+ err = mlxsw_sp_port_allow_untagged_set(mlxsw_sp_port, false);
+ if (err)
+ return err;
+ } else {
+ err = __mlxsw_sp_port_pvid_set(mlxsw_sp_port, vid);
+ if (err)
+ return err;
+ err = mlxsw_sp_port_allow_untagged_set(mlxsw_sp_port, true);
+ if (err)
+ goto err_port_allow_untagged_set;
+ }
+
+ mlxsw_sp_port->pvid = vid;
+ return 0;
+
+err_port_allow_untagged_set:
+ __mlxsw_sp_port_pvid_set(mlxsw_sp_port, mlxsw_sp_port->pvid);
+ return err;
}
static int
@@ -683,13 +967,14 @@
return 0;
}
-static int mlxsw_sp_port_module_map(struct mlxsw_sp *mlxsw_sp, u8 local_port,
+static int mlxsw_sp_port_module_map(struct mlxsw_sp_port *mlxsw_sp_port,
u8 module, u8 width, u8 lane)
{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
char pmlp_pl[MLXSW_REG_PMLP_LEN];
int i;
- mlxsw_reg_pmlp_pack(pmlp_pl, local_port);
+ mlxsw_reg_pmlp_pack(pmlp_pl, mlxsw_sp_port->local_port);
mlxsw_reg_pmlp_width_set(pmlp_pl, width);
for (i = 0; i < width; i++) {
mlxsw_reg_pmlp_module_set(pmlp_pl, i, module);
@@ -699,11 +984,12 @@
return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pmlp), pmlp_pl);
}
-static int mlxsw_sp_port_module_unmap(struct mlxsw_sp *mlxsw_sp, u8 local_port)
+static int mlxsw_sp_port_module_unmap(struct mlxsw_sp_port *mlxsw_sp_port)
{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
char pmlp_pl[MLXSW_REG_PMLP_LEN];
- mlxsw_reg_pmlp_pack(pmlp_pl, local_port);
+ mlxsw_reg_pmlp_pack(pmlp_pl, mlxsw_sp_port->local_port);
mlxsw_reg_pmlp_width_set(pmlp_pl, 0);
return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pmlp), pmlp_pl);
}
@@ -1100,95 +1386,82 @@
return 0;
}
-static int mlxsw_sp_port_vp_mode_trans(struct mlxsw_sp_port *mlxsw_sp_port)
+static void mlxsw_sp_port_vlan_flush(struct mlxsw_sp_port *mlxsw_sp_port)
{
- enum mlxsw_reg_svfa_mt mt = MLXSW_REG_SVFA_MT_PORT_VID_TO_FID;
- u16 vid, last_visited_vid;
- int err;
+ struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan, *tmp;
- for_each_set_bit(vid, mlxsw_sp_port->active_vlans, VLAN_N_VID) {
- err = mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_port, mt, true, vid,
- vid);
- if (err) {
- last_visited_vid = vid;
- goto err_port_vid_to_fid_set;
- }
- }
-
- err = mlxsw_sp_port_vp_mode_set(mlxsw_sp_port, true);
- if (err) {
- last_visited_vid = VLAN_N_VID;
- goto err_port_vid_to_fid_set;
- }
-
- return 0;
-
-err_port_vid_to_fid_set:
- for_each_set_bit(vid, mlxsw_sp_port->active_vlans, last_visited_vid)
- mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_port, mt, false, vid,
- vid);
- return err;
+ list_for_each_entry_safe(mlxsw_sp_port_vlan, tmp,
+ &mlxsw_sp_port->vlans_list, list)
+ mlxsw_sp_port_vlan_put(mlxsw_sp_port_vlan);
}
-static int mlxsw_sp_port_vlan_mode_trans(struct mlxsw_sp_port *mlxsw_sp_port)
+static struct mlxsw_sp_port_vlan *
+mlxsw_sp_port_vlan_create(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid)
{
- enum mlxsw_reg_svfa_mt mt = MLXSW_REG_SVFA_MT_PORT_VID_TO_FID;
- u16 vid;
+ struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
+ bool untagged = vid == 1;
int err;
- err = mlxsw_sp_port_vp_mode_set(mlxsw_sp_port, false);
+ err = mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid, true, untagged);
if (err)
- return err;
+ return ERR_PTR(err);
- for_each_set_bit(vid, mlxsw_sp_port->active_vlans, VLAN_N_VID) {
- err = mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_port, mt, false,
- vid, vid);
- if (err)
- return err;
+ mlxsw_sp_port_vlan = kzalloc(sizeof(*mlxsw_sp_port_vlan), GFP_KERNEL);
+ if (!mlxsw_sp_port_vlan) {
+ err = -ENOMEM;
+ goto err_port_vlan_alloc;
}
- return 0;
+ mlxsw_sp_port_vlan->mlxsw_sp_port = mlxsw_sp_port;
+ mlxsw_sp_port_vlan->vid = vid;
+ list_add(&mlxsw_sp_port_vlan->list, &mlxsw_sp_port->vlans_list);
+
+ return mlxsw_sp_port_vlan;
+
+err_port_vlan_alloc:
+ mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid, false, false);
+ return ERR_PTR(err);
}
-static struct mlxsw_sp_port *
-mlxsw_sp_port_vport_create(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid)
+static void
+mlxsw_sp_port_vlan_destroy(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan)
{
- struct mlxsw_sp_port *mlxsw_sp_vport;
+ struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp_port_vlan->mlxsw_sp_port;
+ u16 vid = mlxsw_sp_port_vlan->vid;
- mlxsw_sp_vport = kzalloc(sizeof(*mlxsw_sp_vport), GFP_KERNEL);
- if (!mlxsw_sp_vport)
- return NULL;
-
- /* dev will be set correctly after the VLAN device is linked
- * with the real device. In case of bridge SELF invocation, dev
- * will remain as is.
- */
- mlxsw_sp_vport->dev = mlxsw_sp_port->dev;
- mlxsw_sp_vport->mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
- mlxsw_sp_vport->local_port = mlxsw_sp_port->local_port;
- mlxsw_sp_vport->stp_state = BR_STATE_FORWARDING;
- mlxsw_sp_vport->lagged = mlxsw_sp_port->lagged;
- mlxsw_sp_vport->lag_id = mlxsw_sp_port->lag_id;
- mlxsw_sp_vport->vport.vid = vid;
-
- list_add(&mlxsw_sp_vport->vport.list, &mlxsw_sp_port->vports_list);
-
- return mlxsw_sp_vport;
+ list_del(&mlxsw_sp_port_vlan->list);
+ kfree(mlxsw_sp_port_vlan);
+ mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid, false, false);
}
-static void mlxsw_sp_port_vport_destroy(struct mlxsw_sp_port *mlxsw_sp_vport)
+struct mlxsw_sp_port_vlan *
+mlxsw_sp_port_vlan_get(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid)
{
- list_del(&mlxsw_sp_vport->vport.list);
- kfree(mlxsw_sp_vport);
+ struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
+
+ mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, vid);
+ if (mlxsw_sp_port_vlan)
+ return mlxsw_sp_port_vlan;
+
+ return mlxsw_sp_port_vlan_create(mlxsw_sp_port, vid);
+}
+
+void mlxsw_sp_port_vlan_put(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan)
+{
+ struct mlxsw_sp_fid *fid = mlxsw_sp_port_vlan->fid;
+
+ if (mlxsw_sp_port_vlan->bridge_port)
+ mlxsw_sp_port_vlan_bridge_leave(mlxsw_sp_port_vlan);
+ else if (fid)
+ mlxsw_sp_port_vlan_router_leave(mlxsw_sp_port_vlan);
+
+ mlxsw_sp_port_vlan_destroy(mlxsw_sp_port_vlan);
}
static int mlxsw_sp_port_add_vid(struct net_device *dev,
__be16 __always_unused proto, u16 vid)
{
struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
- struct mlxsw_sp_port *mlxsw_sp_vport;
- bool untagged = vid == 1;
- int err;
/* VLAN 0 is added to HW filter when device goes up, but it is
* reserved in our case, so simply return.
@@ -1196,43 +1469,14 @@
if (!vid)
return 0;
- if (mlxsw_sp_port_vport_find(mlxsw_sp_port, vid))
- return 0;
-
- mlxsw_sp_vport = mlxsw_sp_port_vport_create(mlxsw_sp_port, vid);
- if (!mlxsw_sp_vport)
- return -ENOMEM;
-
- /* When adding the first VLAN interface on a bridged port we need to
- * transition all the active 802.1Q bridge VLANs to use explicit
- * {Port, VID} to FID mappings and set the port's mode to Virtual mode.
- */
- if (list_is_singular(&mlxsw_sp_port->vports_list)) {
- err = mlxsw_sp_port_vp_mode_trans(mlxsw_sp_port);
- if (err)
- goto err_port_vp_mode_trans;
- }
-
- err = mlxsw_sp_port_vlan_set(mlxsw_sp_vport, vid, vid, true, untagged);
- if (err)
- goto err_port_add_vid;
-
- return 0;
-
-err_port_add_vid:
- if (list_is_singular(&mlxsw_sp_port->vports_list))
- mlxsw_sp_port_vlan_mode_trans(mlxsw_sp_port);
-err_port_vp_mode_trans:
- mlxsw_sp_port_vport_destroy(mlxsw_sp_vport);
- return err;
+ return PTR_ERR_OR_ZERO(mlxsw_sp_port_vlan_get(mlxsw_sp_port, vid));
}
static int mlxsw_sp_port_kill_vid(struct net_device *dev,
__be16 __always_unused proto, u16 vid)
{
struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
- struct mlxsw_sp_port *mlxsw_sp_vport;
- struct mlxsw_sp_fid *f;
+ struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
/* VLAN 0 is removed from HW filter when device goes down, but
* it is reserved in our case, so simply return.
@@ -1240,27 +1484,10 @@
if (!vid)
return 0;
- mlxsw_sp_vport = mlxsw_sp_port_vport_find(mlxsw_sp_port, vid);
- if (WARN_ON(!mlxsw_sp_vport))
+ mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, vid);
+ if (!mlxsw_sp_port_vlan)
return 0;
-
- mlxsw_sp_port_vlan_set(mlxsw_sp_vport, vid, vid, false, false);
-
- /* Drop FID reference. If this was the last reference the
- * resources will be freed.
- */
- f = mlxsw_sp_vport_fid_get(mlxsw_sp_vport);
- if (f && !WARN_ON(!f->leave))
- f->leave(mlxsw_sp_vport);
-
- /* When removing the last VLAN interface on a bridged port we need to
- * transition all active 802.1Q bridge VLANs to use VID to FID
- * mappings and set port's mode to VLAN mode.
- */
- if (list_is_singular(&mlxsw_sp_port->vports_list))
- mlxsw_sp_port_vlan_mode_trans(mlxsw_sp_port);
-
- mlxsw_sp_port_vport_destroy(mlxsw_sp_vport);
+ mlxsw_sp_port_vlan_put(mlxsw_sp_port_vlan);
return 0;
}
@@ -1466,11 +1693,15 @@
}
static int mlxsw_sp_setup_tc(struct net_device *dev, u32 handle,
- __be16 proto, struct tc_to_netdev *tc)
+ u32 chain_index, __be16 proto,
+ struct tc_to_netdev *tc)
{
struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
bool ingress = TC_H_MAJ(handle) == TC_H_MAJ(TC_H_INGRESS);
+ if (chain_index)
+ return -EOPNOTSUPP;
+
switch (tc->type) {
case TC_SETUP_MATCHALL:
switch (tc->cls_mall->command) {
@@ -1519,12 +1750,6 @@
.ndo_get_offload_stats = mlxsw_sp_port_get_offload_stats,
.ndo_vlan_rx_add_vid = mlxsw_sp_port_add_vid,
.ndo_vlan_rx_kill_vid = mlxsw_sp_port_kill_vid,
- .ndo_fdb_add = switchdev_port_fdb_add,
- .ndo_fdb_del = switchdev_port_fdb_del,
- .ndo_fdb_dump = switchdev_port_fdb_dump,
- .ndo_bridge_setlink = switchdev_port_bridge_setlink,
- .ndo_bridge_getlink = switchdev_port_bridge_getlink,
- .ndo_bridge_dellink = switchdev_port_bridge_dellink,
.ndo_get_phys_port_name = mlxsw_sp_port_get_phys_port_name,
};
@@ -2269,6 +2494,160 @@
return 0;
}
+static int mlxsw_sp_flash_device(struct net_device *dev,
+ struct ethtool_flash *flash)
+{
+ struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ const struct firmware *firmware;
+ int err;
+
+ if (flash->region != ETHTOOL_FLASH_ALL_REGIONS)
+ return -EOPNOTSUPP;
+
+ dev_hold(dev);
+ rtnl_unlock();
+
+ err = request_firmware_direct(&firmware, flash->data, &dev->dev);
+ if (err)
+ goto out;
+ err = mlxsw_sp_firmware_flash(mlxsw_sp, firmware);
+ release_firmware(firmware);
+out:
+ rtnl_lock();
+ dev_put(dev);
+ return err;
+}
+
+#define MLXSW_SP_QSFP_I2C_ADDR 0x50
+
+static int mlxsw_sp_query_module_eeprom(struct mlxsw_sp_port *mlxsw_sp_port,
+ u16 offset, u16 size, void *data,
+ unsigned int *p_read_size)
+{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ char eeprom_tmp[MLXSW_SP_REG_MCIA_EEPROM_SIZE];
+ char mcia_pl[MLXSW_REG_MCIA_LEN];
+ int status;
+ int err;
+
+ size = min_t(u16, size, MLXSW_SP_REG_MCIA_EEPROM_SIZE);
+ mlxsw_reg_mcia_pack(mcia_pl, mlxsw_sp_port->mapping.module,
+ 0, 0, offset, size, MLXSW_SP_QSFP_I2C_ADDR);
+
+ err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(mcia), mcia_pl);
+ if (err)
+ return err;
+
+ status = mlxsw_reg_mcia_status_get(mcia_pl);
+ if (status)
+ return -EIO;
+
+ mlxsw_reg_mcia_eeprom_memcpy_from(mcia_pl, eeprom_tmp);
+ memcpy(data, eeprom_tmp, size);
+ *p_read_size = size;
+
+ return 0;
+}
+
+enum mlxsw_sp_eeprom_module_info_rev_id {
+ MLXSW_SP_EEPROM_MODULE_INFO_REV_ID_UNSPC = 0x00,
+ MLXSW_SP_EEPROM_MODULE_INFO_REV_ID_8436 = 0x01,
+ MLXSW_SP_EEPROM_MODULE_INFO_REV_ID_8636 = 0x03,
+};
+
+enum mlxsw_sp_eeprom_module_info_id {
+ MLXSW_SP_EEPROM_MODULE_INFO_ID_SFP = 0x03,
+ MLXSW_SP_EEPROM_MODULE_INFO_ID_QSFP = 0x0C,
+ MLXSW_SP_EEPROM_MODULE_INFO_ID_QSFP_PLUS = 0x0D,
+ MLXSW_SP_EEPROM_MODULE_INFO_ID_QSFP28 = 0x11,
+};
+
+enum mlxsw_sp_eeprom_module_info {
+ MLXSW_SP_EEPROM_MODULE_INFO_ID,
+ MLXSW_SP_EEPROM_MODULE_INFO_REV_ID,
+ MLXSW_SP_EEPROM_MODULE_INFO_SIZE,
+};
+
+static int mlxsw_sp_get_module_info(struct net_device *netdev,
+ struct ethtool_modinfo *modinfo)
+{
+ struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(netdev);
+ u8 module_info[MLXSW_SP_EEPROM_MODULE_INFO_SIZE];
+ u8 module_rev_id, module_id;
+ unsigned int read_size;
+ int err;
+
+ err = mlxsw_sp_query_module_eeprom(mlxsw_sp_port, 0,
+ MLXSW_SP_EEPROM_MODULE_INFO_SIZE,
+ module_info, &read_size);
+ if (err)
+ return err;
+
+ if (read_size < MLXSW_SP_EEPROM_MODULE_INFO_SIZE)
+ return -EIO;
+
+ module_rev_id = module_info[MLXSW_SP_EEPROM_MODULE_INFO_REV_ID];
+ module_id = module_info[MLXSW_SP_EEPROM_MODULE_INFO_ID];
+
+ switch (module_id) {
+ case MLXSW_SP_EEPROM_MODULE_INFO_ID_QSFP:
+ modinfo->type = ETH_MODULE_SFF_8436;
+ modinfo->eeprom_len = ETH_MODULE_SFF_8436_LEN;
+ break;
+ case MLXSW_SP_EEPROM_MODULE_INFO_ID_QSFP_PLUS:
+ case MLXSW_SP_EEPROM_MODULE_INFO_ID_QSFP28:
+ if (module_id == MLXSW_SP_EEPROM_MODULE_INFO_ID_QSFP28 ||
+ module_rev_id >= MLXSW_SP_EEPROM_MODULE_INFO_REV_ID_8636) {
+ modinfo->type = ETH_MODULE_SFF_8636;
+ modinfo->eeprom_len = ETH_MODULE_SFF_8636_LEN;
+ } else {
+ modinfo->type = ETH_MODULE_SFF_8436;
+ modinfo->eeprom_len = ETH_MODULE_SFF_8436_LEN;
+ }
+ break;
+ case MLXSW_SP_EEPROM_MODULE_INFO_ID_SFP:
+ modinfo->type = ETH_MODULE_SFF_8472;
+ modinfo->eeprom_len = ETH_MODULE_SFF_8472_LEN;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int mlxsw_sp_get_module_eeprom(struct net_device *netdev,
+ struct ethtool_eeprom *ee,
+ u8 *data)
+{
+ struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(netdev);
+ int offset = ee->offset;
+ unsigned int read_size;
+ int i = 0;
+ int err;
+
+ if (!ee->len)
+ return -EINVAL;
+
+ memset(data, 0, ee->len);
+
+ while (i < ee->len) {
+ err = mlxsw_sp_query_module_eeprom(mlxsw_sp_port, offset,
+ ee->len - i, data + i,
+ &read_size);
+ if (err) {
+ netdev_err(mlxsw_sp_port->dev, "Eeprom query failed\n");
+ return err;
+ }
+
+ i += read_size;
+ offset += read_size;
+ }
+
+ return 0;
+}
+
static const struct ethtool_ops mlxsw_sp_port_ethtool_ops = {
.get_drvinfo = mlxsw_sp_port_get_drvinfo,
.get_link = ethtool_op_get_link,
@@ -2280,6 +2659,9 @@
.get_sset_count = mlxsw_sp_port_get_sset_count,
.get_link_ksettings = mlxsw_sp_port_get_link_ksettings,
.set_link_ksettings = mlxsw_sp_port_set_link_ksettings,
+ .flash_device = mlxsw_sp_flash_device,
+ .get_module_info = mlxsw_sp_get_module_info,
+ .get_module_eeprom = mlxsw_sp_get_module_eeprom,
};
static int
@@ -2398,51 +2780,38 @@
return 0;
}
-static int mlxsw_sp_port_pvid_vport_create(struct mlxsw_sp_port *mlxsw_sp_port)
+static int mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port,
+ bool split, u8 module, u8 width, u8 lane)
{
- mlxsw_sp_port->pvid = 1;
-
- return mlxsw_sp_port_add_vid(mlxsw_sp_port->dev, 0, 1);
-}
-
-static int mlxsw_sp_port_pvid_vport_destroy(struct mlxsw_sp_port *mlxsw_sp_port)
-{
- return mlxsw_sp_port_kill_vid(mlxsw_sp_port->dev, 0, 1);
-}
-
-static int __mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port,
- bool split, u8 module, u8 width, u8 lane)
-{
+ struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
struct mlxsw_sp_port *mlxsw_sp_port;
struct net_device *dev;
- size_t bytes;
int err;
+ err = mlxsw_core_port_init(mlxsw_sp->core, local_port);
+ if (err) {
+ dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to init core port\n",
+ local_port);
+ return err;
+ }
+
dev = alloc_etherdev(sizeof(struct mlxsw_sp_port));
- if (!dev)
- return -ENOMEM;
+ if (!dev) {
+ err = -ENOMEM;
+ goto err_alloc_etherdev;
+ }
SET_NETDEV_DEV(dev, mlxsw_sp->bus_info->dev);
mlxsw_sp_port = netdev_priv(dev);
mlxsw_sp_port->dev = dev;
mlxsw_sp_port->mlxsw_sp = mlxsw_sp;
mlxsw_sp_port->local_port = local_port;
+ mlxsw_sp_port->pvid = 1;
mlxsw_sp_port->split = split;
mlxsw_sp_port->mapping.module = module;
mlxsw_sp_port->mapping.width = width;
mlxsw_sp_port->mapping.lane = lane;
mlxsw_sp_port->link.autoneg = 1;
- bytes = DIV_ROUND_UP(VLAN_N_VID, BITS_PER_BYTE);
- mlxsw_sp_port->active_vlans = kzalloc(bytes, GFP_KERNEL);
- if (!mlxsw_sp_port->active_vlans) {
- err = -ENOMEM;
- goto err_port_active_vlans_alloc;
- }
- mlxsw_sp_port->untagged_vlans = kzalloc(bytes, GFP_KERNEL);
- if (!mlxsw_sp_port->untagged_vlans) {
- err = -ENOMEM;
- goto err_port_untagged_vlans_alloc;
- }
- INIT_LIST_HEAD(&mlxsw_sp_port->vports_list);
+ INIT_LIST_HEAD(&mlxsw_sp_port->vlans_list);
INIT_LIST_HEAD(&mlxsw_sp_port->mall_tc_list);
mlxsw_sp_port->pcpu_stats =
@@ -2472,6 +2841,13 @@
dev->netdev_ops = &mlxsw_sp_port_netdev_ops;
dev->ethtool_ops = &mlxsw_sp_port_ethtool_ops;
+ err = mlxsw_sp_port_module_map(mlxsw_sp_port, module, width, lane);
+ if (err) {
+ dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to map module\n",
+ mlxsw_sp_port->local_port);
+ goto err_port_module_map;
+ }
+
err = mlxsw_sp_port_swid_set(mlxsw_sp_port, 0);
if (err) {
dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set SWID\n",
@@ -2547,11 +2923,18 @@
goto err_port_dcb_init;
}
- err = mlxsw_sp_port_pvid_vport_create(mlxsw_sp_port);
+ err = mlxsw_sp_port_fids_init(mlxsw_sp_port);
if (err) {
- dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to create PVID vPort\n",
+ dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize FIDs\n",
mlxsw_sp_port->local_port);
- goto err_port_pvid_vport_create;
+ goto err_port_fids_init;
+ }
+
+ mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_get(mlxsw_sp_port, 1);
+ if (IS_ERR(mlxsw_sp_port_vlan)) {
+ dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to create VID 1\n",
+ mlxsw_sp_port->local_port);
+ goto err_port_vlan_get;
}
mlxsw_sp_port_switchdev_init(mlxsw_sp_port);
@@ -2572,8 +2955,10 @@
err_register_netdev:
mlxsw_sp->ports[local_port] = NULL;
mlxsw_sp_port_switchdev_fini(mlxsw_sp_port);
- mlxsw_sp_port_pvid_vport_destroy(mlxsw_sp_port);
-err_port_pvid_vport_create:
+ mlxsw_sp_port_vlan_put(mlxsw_sp_port_vlan);
+err_port_vlan_get:
+ mlxsw_sp_port_fids_fini(mlxsw_sp_port);
+err_port_fids_init:
mlxsw_sp_port_dcb_fini(mlxsw_sp_port);
err_port_dcb_init:
err_port_ets_init:
@@ -2585,43 +2970,21 @@
err_dev_addr_init:
mlxsw_sp_port_swid_set(mlxsw_sp_port, MLXSW_PORT_SWID_DISABLED_PORT);
err_port_swid_set:
+ mlxsw_sp_port_module_unmap(mlxsw_sp_port);
+err_port_module_map:
kfree(mlxsw_sp_port->hw_stats.cache);
err_alloc_hw_stats:
kfree(mlxsw_sp_port->sample);
err_alloc_sample:
free_percpu(mlxsw_sp_port->pcpu_stats);
err_alloc_stats:
- kfree(mlxsw_sp_port->untagged_vlans);
-err_port_untagged_vlans_alloc:
- kfree(mlxsw_sp_port->active_vlans);
-err_port_active_vlans_alloc:
free_netdev(dev);
- return err;
-}
-
-static int mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port,
- bool split, u8 module, u8 width, u8 lane)
-{
- int err;
-
- err = mlxsw_core_port_init(mlxsw_sp->core, local_port);
- if (err) {
- dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to init core port\n",
- local_port);
- return err;
- }
- err = __mlxsw_sp_port_create(mlxsw_sp, local_port, split,
- module, width, lane);
- if (err)
- goto err_port_create;
- return 0;
-
-err_port_create:
+err_alloc_etherdev:
mlxsw_core_port_fini(mlxsw_sp->core, local_port);
return err;
}
-static void __mlxsw_sp_port_remove(struct mlxsw_sp *mlxsw_sp, u8 local_port)
+static void mlxsw_sp_port_remove(struct mlxsw_sp *mlxsw_sp, u8 local_port)
{
struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp->ports[local_port];
@@ -2630,22 +2993,16 @@
unregister_netdev(mlxsw_sp_port->dev); /* This calls ndo_stop */
mlxsw_sp->ports[local_port] = NULL;
mlxsw_sp_port_switchdev_fini(mlxsw_sp_port);
- mlxsw_sp_port_pvid_vport_destroy(mlxsw_sp_port);
+ mlxsw_sp_port_vlan_flush(mlxsw_sp_port);
+ mlxsw_sp_port_fids_fini(mlxsw_sp_port);
mlxsw_sp_port_dcb_fini(mlxsw_sp_port);
mlxsw_sp_port_swid_set(mlxsw_sp_port, MLXSW_PORT_SWID_DISABLED_PORT);
- mlxsw_sp_port_module_unmap(mlxsw_sp, mlxsw_sp_port->local_port);
+ mlxsw_sp_port_module_unmap(mlxsw_sp_port);
kfree(mlxsw_sp_port->hw_stats.cache);
kfree(mlxsw_sp_port->sample);
free_percpu(mlxsw_sp_port->pcpu_stats);
- kfree(mlxsw_sp_port->untagged_vlans);
- kfree(mlxsw_sp_port->active_vlans);
- WARN_ON_ONCE(!list_empty(&mlxsw_sp_port->vports_list));
+ WARN_ON_ONCE(!list_empty(&mlxsw_sp_port->vlans_list));
free_netdev(mlxsw_sp_port->dev);
-}
-
-static void mlxsw_sp_port_remove(struct mlxsw_sp *mlxsw_sp, u8 local_port)
-{
- __mlxsw_sp_port_remove(mlxsw_sp, local_port);
mlxsw_core_port_fini(mlxsw_sp->core, local_port);
}
@@ -2724,19 +3081,6 @@
int err, i;
for (i = 0; i < count; i++) {
- err = mlxsw_sp_port_module_map(mlxsw_sp, base_port + i, module,
- width, i * width);
- if (err)
- goto err_port_module_map;
- }
-
- for (i = 0; i < count; i++) {
- err = __mlxsw_sp_port_swid_set(mlxsw_sp, base_port + i, 0);
- if (err)
- goto err_port_swid_set;
- }
-
- for (i = 0; i < count; i++) {
err = mlxsw_sp_port_create(mlxsw_sp, base_port + i, true,
module, width, i * width);
if (err)
@@ -2749,15 +3093,6 @@
for (i--; i >= 0; i--)
if (mlxsw_sp_port_created(mlxsw_sp, base_port + i))
mlxsw_sp_port_remove(mlxsw_sp, base_port + i);
- i = count;
-err_port_swid_set:
- for (i--; i >= 0; i--)
- __mlxsw_sp_port_swid_set(mlxsw_sp, base_port + i,
- MLXSW_PORT_SWID_DISABLED_PORT);
- i = count;
-err_port_module_map:
- for (i--; i >= 0; i--)
- mlxsw_sp_port_module_unmap(mlxsw_sp, base_port + i);
return err;
}
@@ -2776,17 +3111,6 @@
local_port = base_port + i * 2;
module = mlxsw_sp->port_to_module[local_port];
- mlxsw_sp_port_module_map(mlxsw_sp, local_port, module, width,
- 0);
- }
-
- for (i = 0; i < count; i++)
- __mlxsw_sp_port_swid_set(mlxsw_sp, base_port + i * 2, 0);
-
- for (i = 0; i < count; i++) {
- local_port = base_port + i * 2;
- module = mlxsw_sp->port_to_module[local_port];
-
mlxsw_sp_port_create(mlxsw_sp, local_port, false, module,
width, 0);
}
@@ -3020,7 +3344,9 @@
MLXSW_SP_RXL_NO_MARK(BGP_IPV4, TRAP_TO_CPU, BGP_IPV4, false),
/* PKT Sample trap */
MLXSW_RXL(mlxsw_sp_rx_listener_sample_func, PKT_SAMPLE, MIRROR_TO_CPU,
- false, SP_IP2ME, DISCARD)
+ false, SP_IP2ME, DISCARD),
+ /* ACL trap */
+ MLXSW_SP_RXL_NO_MARK(ACL0, TRAP_TO_CPU, IP2ME, false),
};
static int mlxsw_sp_cpu_policers_set(struct mlxsw_core *mlxsw_core)
@@ -3192,57 +3518,6 @@
}
}
-static int __mlxsw_sp_flood_init(struct mlxsw_core *mlxsw_core,
- enum mlxsw_reg_sfgc_type type,
- enum mlxsw_reg_sfgc_bridge_type bridge_type)
-{
- enum mlxsw_flood_table_type table_type;
- enum mlxsw_sp_flood_table flood_table;
- char sfgc_pl[MLXSW_REG_SFGC_LEN];
-
- if (bridge_type == MLXSW_REG_SFGC_BRIDGE_TYPE_VFID)
- table_type = MLXSW_REG_SFGC_TABLE_TYPE_FID;
- else
- table_type = MLXSW_REG_SFGC_TABLE_TYPE_FID_OFFEST;
-
- switch (type) {
- case MLXSW_REG_SFGC_TYPE_UNKNOWN_UNICAST:
- flood_table = MLXSW_SP_FLOOD_TABLE_UC;
- break;
- case MLXSW_REG_SFGC_TYPE_UNREGISTERED_MULTICAST_IPV4:
- flood_table = MLXSW_SP_FLOOD_TABLE_MC;
- break;
- default:
- flood_table = MLXSW_SP_FLOOD_TABLE_BC;
- }
-
- mlxsw_reg_sfgc_pack(sfgc_pl, type, bridge_type, table_type,
- flood_table);
- return mlxsw_reg_write(mlxsw_core, MLXSW_REG(sfgc), sfgc_pl);
-}
-
-static int mlxsw_sp_flood_init(struct mlxsw_sp *mlxsw_sp)
-{
- int type, err;
-
- for (type = 0; type < MLXSW_REG_SFGC_TYPE_MAX; type++) {
- if (type == MLXSW_REG_SFGC_TYPE_RESERVED)
- continue;
-
- err = __mlxsw_sp_flood_init(mlxsw_sp->core, type,
- MLXSW_REG_SFGC_BRIDGE_TYPE_VFID);
- if (err)
- return err;
-
- err = __mlxsw_sp_flood_init(mlxsw_sp->core, type,
- MLXSW_REG_SFGC_BRIDGE_TYPE_1Q_FID);
- if (err)
- return err;
- }
-
- return 0;
-}
-
static int mlxsw_sp_lag_init(struct mlxsw_sp *mlxsw_sp)
{
char slcr_pl[MLXSW_REG_SLCR_LEN];
@@ -3290,18 +3565,6 @@
return mlxsw_reg_write(mlxsw_core, MLXSW_REG(htgt), htgt_pl);
}
-static int mlxsw_sp_vfid_op(struct mlxsw_sp *mlxsw_sp, u16 fid, bool create);
-
-static int mlxsw_sp_dummy_fid_init(struct mlxsw_sp *mlxsw_sp)
-{
- return mlxsw_sp_vfid_op(mlxsw_sp, MLXSW_SP_DUMMY_FID, true);
-}
-
-static void mlxsw_sp_dummy_fid_fini(struct mlxsw_sp *mlxsw_sp)
-{
- mlxsw_sp_vfid_op(mlxsw_sp, MLXSW_SP_DUMMY_FID, false);
-}
-
static int mlxsw_sp_init(struct mlxsw_core *mlxsw_core,
const struct mlxsw_bus_info *mlxsw_bus_info)
{
@@ -3310,9 +3573,12 @@
mlxsw_sp->core = mlxsw_core;
mlxsw_sp->bus_info = mlxsw_bus_info;
- INIT_LIST_HEAD(&mlxsw_sp->fids);
- INIT_LIST_HEAD(&mlxsw_sp->vfids.list);
- INIT_LIST_HEAD(&mlxsw_sp->br_mids.list);
+
+ err = mlxsw_sp_fw_rev_validate(mlxsw_sp);
+ if (err) {
+ dev_err(mlxsw_sp->bus_info->dev, "Could not upgrade firmware\n");
+ return err;
+ }
err = mlxsw_sp_base_mac_get(mlxsw_sp);
if (err) {
@@ -3320,16 +3586,16 @@
return err;
}
- err = mlxsw_sp_traps_init(mlxsw_sp);
+ err = mlxsw_sp_fids_init(mlxsw_sp);
if (err) {
- dev_err(mlxsw_sp->bus_info->dev, "Failed to set traps\n");
+ dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize FIDs\n");
return err;
}
- err = mlxsw_sp_flood_init(mlxsw_sp);
+ err = mlxsw_sp_traps_init(mlxsw_sp);
if (err) {
- dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize flood tables\n");
- goto err_flood_init;
+ dev_err(mlxsw_sp->bus_info->dev, "Failed to set traps\n");
+ goto err_traps_init;
}
err = mlxsw_sp_buffers_init(mlxsw_sp);
@@ -3380,12 +3646,6 @@
goto err_dpipe_init;
}
- err = mlxsw_sp_dummy_fid_init(mlxsw_sp);
- if (err) {
- dev_err(mlxsw_sp->bus_info->dev, "Failed to init dummy FID\n");
- goto err_dummy_fid_init;
- }
-
err = mlxsw_sp_ports_create(mlxsw_sp);
if (err) {
dev_err(mlxsw_sp->bus_info->dev, "Failed to create ports\n");
@@ -3395,8 +3655,6 @@
return 0;
err_ports_create:
- mlxsw_sp_dummy_fid_fini(mlxsw_sp);
-err_dummy_fid_init:
mlxsw_sp_dpipe_fini(mlxsw_sp);
err_dpipe_init:
mlxsw_sp_counter_pool_fini(mlxsw_sp);
@@ -3413,8 +3671,9 @@
err_lag_init:
mlxsw_sp_buffers_fini(mlxsw_sp);
err_buffers_init:
-err_flood_init:
mlxsw_sp_traps_fini(mlxsw_sp);
+err_traps_init:
+ mlxsw_sp_fids_fini(mlxsw_sp);
return err;
}
@@ -3423,7 +3682,6 @@
struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
mlxsw_sp_ports_remove(mlxsw_sp);
- mlxsw_sp_dummy_fid_fini(mlxsw_sp);
mlxsw_sp_dpipe_fini(mlxsw_sp);
mlxsw_sp_counter_pool_fini(mlxsw_sp);
mlxsw_sp_acl_fini(mlxsw_sp);
@@ -3433,8 +3691,7 @@
mlxsw_sp_lag_fini(mlxsw_sp);
mlxsw_sp_buffers_fini(mlxsw_sp);
mlxsw_sp_traps_fini(mlxsw_sp);
- WARN_ON(!list_empty(&mlxsw_sp->vfids.list));
- WARN_ON(!list_empty(&mlxsw_sp->fids));
+ mlxsw_sp_fids_fini(mlxsw_sp);
}
static struct mlxsw_config_profile mlxsw_sp_config_profile = {
@@ -3450,7 +3707,7 @@
.max_fid_offset_flood_tables = 3,
.fid_offset_flood_table_size = VLAN_N_VID - 1,
.max_fid_flood_tables = 3,
- .fid_flood_table_size = MLXSW_SP_VFID_MAX,
+ .fid_flood_table_size = MLXSW_SP_FID_8021D_MAX,
.used_max_ib_mc = 1,
.max_ib_mc = 0,
.used_max_pkey = 1,
@@ -3510,7 +3767,7 @@
return ret;
}
-static struct mlxsw_sp_port *mlxsw_sp_port_dev_lower_find(struct net_device *dev)
+struct mlxsw_sp_port *mlxsw_sp_port_dev_lower_find(struct net_device *dev)
{
struct mlxsw_sp_port *mlxsw_sp_port;
@@ -3531,7 +3788,7 @@
return mlxsw_sp_port ? mlxsw_sp_port->mlxsw_sp : NULL;
}
-static struct mlxsw_sp_port *mlxsw_sp_port_dev_lower_find_rcu(struct net_device *dev)
+struct mlxsw_sp_port *mlxsw_sp_port_dev_lower_find_rcu(struct net_device *dev)
{
struct mlxsw_sp_port *mlxsw_sp_port;
@@ -3562,176 +3819,6 @@
dev_put(mlxsw_sp_port->dev);
}
-static bool mlxsw_sp_lag_port_fid_member(struct mlxsw_sp_port *lag_port,
- u16 fid)
-{
- if (mlxsw_sp_fid_is_vfid(fid))
- return mlxsw_sp_port_vport_find_by_fid(lag_port, fid);
- else
- return test_bit(fid, lag_port->active_vlans);
-}
-
-static bool mlxsw_sp_port_fdb_should_flush(struct mlxsw_sp_port *mlxsw_sp_port,
- u16 fid)
-{
- struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
- u8 local_port = mlxsw_sp_port->local_port;
- u16 lag_id = mlxsw_sp_port->lag_id;
- u64 max_lag_members;
- int i, count = 0;
-
- if (!mlxsw_sp_port->lagged)
- return true;
-
- max_lag_members = MLXSW_CORE_RES_GET(mlxsw_sp->core,
- MAX_LAG_MEMBERS);
- for (i = 0; i < max_lag_members; i++) {
- struct mlxsw_sp_port *lag_port;
-
- lag_port = mlxsw_sp_port_lagged_get(mlxsw_sp, lag_id, i);
- if (!lag_port || lag_port->local_port == local_port)
- continue;
- if (mlxsw_sp_lag_port_fid_member(lag_port, fid))
- count++;
- }
-
- return !count;
-}
-
-static int
-mlxsw_sp_port_fdb_flush_by_port_fid(const struct mlxsw_sp_port *mlxsw_sp_port,
- u16 fid)
-{
- struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
- char sfdf_pl[MLXSW_REG_SFDF_LEN];
-
- mlxsw_reg_sfdf_pack(sfdf_pl, MLXSW_REG_SFDF_FLUSH_PER_PORT_AND_FID);
- mlxsw_reg_sfdf_fid_set(sfdf_pl, fid);
- mlxsw_reg_sfdf_port_fid_system_port_set(sfdf_pl,
- mlxsw_sp_port->local_port);
-
- netdev_dbg(mlxsw_sp_port->dev, "FDB flushed using Port=%d, FID=%d\n",
- mlxsw_sp_port->local_port, fid);
-
- return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfdf), sfdf_pl);
-}
-
-static int
-mlxsw_sp_port_fdb_flush_by_lag_id_fid(const struct mlxsw_sp_port *mlxsw_sp_port,
- u16 fid)
-{
- struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
- char sfdf_pl[MLXSW_REG_SFDF_LEN];
-
- mlxsw_reg_sfdf_pack(sfdf_pl, MLXSW_REG_SFDF_FLUSH_PER_LAG_AND_FID);
- mlxsw_reg_sfdf_fid_set(sfdf_pl, fid);
- mlxsw_reg_sfdf_lag_fid_lag_id_set(sfdf_pl, mlxsw_sp_port->lag_id);
-
- netdev_dbg(mlxsw_sp_port->dev, "FDB flushed using LAG ID=%d, FID=%d\n",
- mlxsw_sp_port->lag_id, fid);
-
- return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfdf), sfdf_pl);
-}
-
-int mlxsw_sp_port_fdb_flush(struct mlxsw_sp_port *mlxsw_sp_port, u16 fid)
-{
- if (!mlxsw_sp_port_fdb_should_flush(mlxsw_sp_port, fid))
- return 0;
-
- if (mlxsw_sp_port->lagged)
- return mlxsw_sp_port_fdb_flush_by_lag_id_fid(mlxsw_sp_port,
- fid);
- else
- return mlxsw_sp_port_fdb_flush_by_port_fid(mlxsw_sp_port, fid);
-}
-
-static void mlxsw_sp_master_bridge_gone_sync(struct mlxsw_sp *mlxsw_sp)
-{
- struct mlxsw_sp_fid *f, *tmp;
-
- list_for_each_entry_safe(f, tmp, &mlxsw_sp->fids, list)
- if (--f->ref_count == 0)
- mlxsw_sp_fid_destroy(mlxsw_sp, f);
- else
- WARN_ON_ONCE(1);
-}
-
-static bool mlxsw_sp_master_bridge_check(struct mlxsw_sp *mlxsw_sp,
- struct net_device *br_dev)
-{
- return !mlxsw_sp->master_bridge.dev ||
- mlxsw_sp->master_bridge.dev == br_dev;
-}
-
-static void mlxsw_sp_master_bridge_inc(struct mlxsw_sp *mlxsw_sp,
- struct net_device *br_dev)
-{
- mlxsw_sp->master_bridge.dev = br_dev;
- mlxsw_sp->master_bridge.ref_count++;
-}
-
-static void mlxsw_sp_master_bridge_dec(struct mlxsw_sp *mlxsw_sp)
-{
- if (--mlxsw_sp->master_bridge.ref_count == 0) {
- mlxsw_sp->master_bridge.dev = NULL;
- /* It's possible upper VLAN devices are still holding
- * references to underlying FIDs. Drop the reference
- * and release the resources if it was the last one.
- * If it wasn't, then something bad happened.
- */
- mlxsw_sp_master_bridge_gone_sync(mlxsw_sp);
- }
-}
-
-static int mlxsw_sp_port_bridge_join(struct mlxsw_sp_port *mlxsw_sp_port,
- struct net_device *br_dev)
-{
- struct net_device *dev = mlxsw_sp_port->dev;
- int err;
-
- /* When port is not bridged untagged packets are tagged with
- * PVID=VID=1, thereby creating an implicit VLAN interface in
- * the device. Remove it and let bridge code take care of its
- * own VLANs.
- */
- err = mlxsw_sp_port_kill_vid(dev, 0, 1);
- if (err)
- return err;
-
- mlxsw_sp_master_bridge_inc(mlxsw_sp_port->mlxsw_sp, br_dev);
-
- mlxsw_sp_port->learning = 1;
- mlxsw_sp_port->learning_sync = 1;
- mlxsw_sp_port->uc_flood = 1;
- mlxsw_sp_port->mc_flood = 1;
- mlxsw_sp_port->mc_router = 0;
- mlxsw_sp_port->mc_disabled = 1;
- mlxsw_sp_port->bridged = 1;
-
- return 0;
-}
-
-static void mlxsw_sp_port_bridge_leave(struct mlxsw_sp_port *mlxsw_sp_port)
-{
- struct net_device *dev = mlxsw_sp_port->dev;
-
- mlxsw_sp_port_pvid_set(mlxsw_sp_port, 1);
-
- mlxsw_sp_master_bridge_dec(mlxsw_sp_port->mlxsw_sp);
-
- mlxsw_sp_port->learning = 0;
- mlxsw_sp_port->learning_sync = 0;
- mlxsw_sp_port->uc_flood = 0;
- mlxsw_sp_port->mc_flood = 0;
- mlxsw_sp_port->mc_router = 0;
- mlxsw_sp_port->bridged = 0;
-
- /* Add implicit VLAN interface in the device, so that untagged
- * packets will be classified to the default vFID.
- */
- mlxsw_sp_port_add_vid(dev, 0, 1);
-}
-
static int mlxsw_sp_lag_create(struct mlxsw_sp *mlxsw_sp, u16 lag_id)
{
char sldr_pl[MLXSW_REG_SLDR_LEN];
@@ -3850,51 +3937,11 @@
return -EBUSY;
}
-static void
-mlxsw_sp_port_pvid_vport_lag_join(struct mlxsw_sp_port *mlxsw_sp_port,
- struct net_device *lag_dev, u16 lag_id)
-{
- struct mlxsw_sp_port *mlxsw_sp_vport;
- struct mlxsw_sp_fid *f;
-
- mlxsw_sp_vport = mlxsw_sp_port_vport_find(mlxsw_sp_port, 1);
- if (WARN_ON(!mlxsw_sp_vport))
- return;
-
- /* If vPort is assigned a RIF, then leave it since it's no
- * longer valid.
- */
- f = mlxsw_sp_vport_fid_get(mlxsw_sp_vport);
- if (f)
- f->leave(mlxsw_sp_vport);
-
- mlxsw_sp_vport->lag_id = lag_id;
- mlxsw_sp_vport->lagged = 1;
- mlxsw_sp_vport->dev = lag_dev;
-}
-
-static void
-mlxsw_sp_port_pvid_vport_lag_leave(struct mlxsw_sp_port *mlxsw_sp_port)
-{
- struct mlxsw_sp_port *mlxsw_sp_vport;
- struct mlxsw_sp_fid *f;
-
- mlxsw_sp_vport = mlxsw_sp_port_vport_find(mlxsw_sp_port, 1);
- if (WARN_ON(!mlxsw_sp_vport))
- return;
-
- f = mlxsw_sp_vport_fid_get(mlxsw_sp_vport);
- if (f)
- f->leave(mlxsw_sp_vport);
-
- mlxsw_sp_vport->dev = mlxsw_sp_port->dev;
- mlxsw_sp_vport->lagged = 0;
-}
-
static int mlxsw_sp_port_lag_join(struct mlxsw_sp_port *mlxsw_sp_port,
struct net_device *lag_dev)
{
struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
struct mlxsw_sp_upper *lag;
u16 lag_id;
u8 port_index;
@@ -3927,7 +3974,10 @@
mlxsw_sp_port->lagged = 1;
lag->ref_count++;
- mlxsw_sp_port_pvid_vport_lag_join(mlxsw_sp_port, lag_dev, lag_id);
+ /* Port is no longer usable as a router interface */
+ mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, 1);
+ if (mlxsw_sp_port_vlan->fid)
+ mlxsw_sp_port_vlan_router_leave(mlxsw_sp_port_vlan);
return 0;
@@ -3954,10 +4004,8 @@
mlxsw_sp_lag_col_port_disable(mlxsw_sp_port, lag_id);
mlxsw_sp_lag_col_port_remove(mlxsw_sp_port, lag_id);
- if (mlxsw_sp_port->bridged) {
- mlxsw_sp_port_active_vlans_del(mlxsw_sp_port);
- mlxsw_sp_port_bridge_leave(mlxsw_sp_port);
- }
+ /* Any VLANs configured on the port are no longer valid */
+ mlxsw_sp_port_vlan_flush(mlxsw_sp_port);
if (lag->ref_count == 1)
mlxsw_sp_lag_destroy(mlxsw_sp, lag_id);
@@ -3967,7 +4015,9 @@
mlxsw_sp_port->lagged = 0;
lag->ref_count--;
- mlxsw_sp_port_pvid_vport_lag_leave(mlxsw_sp_port);
+ mlxsw_sp_port_vlan_get(mlxsw_sp_port, 1);
+ /* Make sure untagged frames are allowed to ingress */
+ mlxsw_sp_port_pvid_set(mlxsw_sp_port, 1);
}
static int mlxsw_sp_lag_dist_port_add(struct mlxsw_sp_port *mlxsw_sp_port,
@@ -4009,34 +4059,6 @@
return mlxsw_sp_port_lag_tx_en_set(mlxsw_sp_port, info->tx_enabled);
}
-static int mlxsw_sp_port_vlan_link(struct mlxsw_sp_port *mlxsw_sp_port,
- struct net_device *vlan_dev)
-{
- struct mlxsw_sp_port *mlxsw_sp_vport;
- u16 vid = vlan_dev_vlan_id(vlan_dev);
-
- mlxsw_sp_vport = mlxsw_sp_port_vport_find(mlxsw_sp_port, vid);
- if (WARN_ON(!mlxsw_sp_vport))
- return -EINVAL;
-
- mlxsw_sp_vport->dev = vlan_dev;
-
- return 0;
-}
-
-static void mlxsw_sp_port_vlan_unlink(struct mlxsw_sp_port *mlxsw_sp_port,
- struct net_device *vlan_dev)
-{
- struct mlxsw_sp_port *mlxsw_sp_vport;
- u16 vid = vlan_dev_vlan_id(vlan_dev);
-
- mlxsw_sp_vport = mlxsw_sp_port_vport_find(mlxsw_sp_port, vid);
- if (WARN_ON(!mlxsw_sp_vport))
- return;
-
- mlxsw_sp_vport->dev = mlxsw_sp_port->dev;
-}
-
static int mlxsw_sp_port_stp_set(struct mlxsw_sp_port *mlxsw_sp_port,
bool enable)
{
@@ -4066,9 +4088,12 @@
{
int err;
- err = mlxsw_sp_port_stp_set(mlxsw_sp_port, true);
+ err = mlxsw_sp_port_vp_mode_set(mlxsw_sp_port, true);
if (err)
return err;
+ err = mlxsw_sp_port_stp_set(mlxsw_sp_port, true);
+ if (err)
+ goto err_port_stp_set;
err = mlxsw_sp_port_vlan_set(mlxsw_sp_port, 2, VLAN_N_VID - 1,
true, false);
if (err)
@@ -4077,6 +4102,8 @@
err_port_vlan_set:
mlxsw_sp_port_stp_set(mlxsw_sp_port, false);
+err_port_stp_set:
+ mlxsw_sp_port_vp_mode_set(mlxsw_sp_port, false);
return err;
}
@@ -4085,9 +4112,11 @@
mlxsw_sp_port_vlan_set(mlxsw_sp_port, 2, VLAN_N_VID - 1,
false, false);
mlxsw_sp_port_stp_set(mlxsw_sp_port, false);
+ mlxsw_sp_port_vp_mode_set(mlxsw_sp_port, false);
}
-static int mlxsw_sp_netdevice_port_upper_event(struct net_device *dev,
+static int mlxsw_sp_netdevice_port_upper_event(struct net_device *lower_dev,
+ struct net_device *dev,
unsigned long event, void *ptr)
{
struct netdev_notifier_changeupper_info *info;
@@ -4110,10 +4139,6 @@
return -EINVAL;
if (!info->linking)
break;
- /* HW limitation forbids to put ports to multiple bridges. */
- if (netif_is_bridge_master(upper_dev) &&
- !mlxsw_sp_master_bridge_check(mlxsw_sp, upper_dev))
- return -EINVAL;
if (netif_is_lag_master(upper_dev) &&
!mlxsw_sp_master_lag_check(mlxsw_sp, upper_dev,
info->upper_info))
@@ -4130,19 +4155,15 @@
break;
case NETDEV_CHANGEUPPER:
upper_dev = info->upper_dev;
- if (is_vlan_dev(upper_dev)) {
- if (info->linking)
- err = mlxsw_sp_port_vlan_link(mlxsw_sp_port,
- upper_dev);
- else
- mlxsw_sp_port_vlan_unlink(mlxsw_sp_port,
- upper_dev);
- } else if (netif_is_bridge_master(upper_dev)) {
+ if (netif_is_bridge_master(upper_dev)) {
if (info->linking)
err = mlxsw_sp_port_bridge_join(mlxsw_sp_port,
+ lower_dev,
upper_dev);
else
- mlxsw_sp_port_bridge_leave(mlxsw_sp_port);
+ mlxsw_sp_port_bridge_leave(mlxsw_sp_port,
+ lower_dev,
+ upper_dev);
} else if (netif_is_lag_master(upper_dev)) {
if (info->linking)
err = mlxsw_sp_port_lag_join(mlxsw_sp_port,
@@ -4155,9 +4176,6 @@
err = mlxsw_sp_port_ovs_join(mlxsw_sp_port);
else
mlxsw_sp_port_ovs_leave(mlxsw_sp_port);
- } else {
- err = -EINVAL;
- WARN_ON(1);
}
break;
}
@@ -4189,15 +4207,18 @@
return 0;
}
-static int mlxsw_sp_netdevice_port_event(struct net_device *dev,
+static int mlxsw_sp_netdevice_port_event(struct net_device *lower_dev,
+ struct net_device *port_dev,
unsigned long event, void *ptr)
{
switch (event) {
case NETDEV_PRECHANGEUPPER:
case NETDEV_CHANGEUPPER:
- return mlxsw_sp_netdevice_port_upper_event(dev, event, ptr);
+ return mlxsw_sp_netdevice_port_upper_event(lower_dev, port_dev,
+ event, ptr);
case NETDEV_CHANGELOWERSTATE:
- return mlxsw_sp_netdevice_port_lower_event(dev, event, ptr);
+ return mlxsw_sp_netdevice_port_lower_event(port_dev, event,
+ ptr);
}
return 0;
@@ -4212,7 +4233,8 @@
netdev_for_each_lower_dev(lag_dev, dev, iter) {
if (mlxsw_sp_port_dev_check(dev)) {
- ret = mlxsw_sp_netdevice_port_event(dev, event, ptr);
+ ret = mlxsw_sp_netdevice_port_event(lag_dev, dev, event,
+ ptr);
if (ret)
return ret;
}
@@ -4221,322 +4243,33 @@
return 0;
}
-static int mlxsw_sp_master_bridge_vlan_link(struct mlxsw_sp *mlxsw_sp,
- struct net_device *vlan_dev)
-{
- u16 fid = vlan_dev_vlan_id(vlan_dev);
- struct mlxsw_sp_fid *f;
-
- f = mlxsw_sp_fid_find(mlxsw_sp, fid);
- if (!f) {
- f = mlxsw_sp_fid_create(mlxsw_sp, fid);
- if (IS_ERR(f))
- return PTR_ERR(f);
- }
-
- f->ref_count++;
-
- return 0;
-}
-
-static void mlxsw_sp_master_bridge_vlan_unlink(struct mlxsw_sp *mlxsw_sp,
- struct net_device *vlan_dev)
-{
- u16 fid = vlan_dev_vlan_id(vlan_dev);
- struct mlxsw_sp_fid *f;
-
- f = mlxsw_sp_fid_find(mlxsw_sp, fid);
- if (f && f->rif)
- mlxsw_sp_rif_bridge_destroy(mlxsw_sp, f->rif);
- if (f && --f->ref_count == 0)
- mlxsw_sp_fid_destroy(mlxsw_sp, f);
-}
-
-static int mlxsw_sp_netdevice_bridge_event(struct net_device *br_dev,
- unsigned long event, void *ptr)
-{
- struct netdev_notifier_changeupper_info *info;
- struct net_device *upper_dev;
- struct mlxsw_sp *mlxsw_sp;
- int err = 0;
-
- mlxsw_sp = mlxsw_sp_lower_get(br_dev);
- if (!mlxsw_sp)
- return 0;
-
- info = ptr;
-
- switch (event) {
- case NETDEV_PRECHANGEUPPER:
- upper_dev = info->upper_dev;
- if (!is_vlan_dev(upper_dev))
- return -EINVAL;
- if (is_vlan_dev(upper_dev) &&
- br_dev != mlxsw_sp->master_bridge.dev)
- return -EINVAL;
- break;
- case NETDEV_CHANGEUPPER:
- upper_dev = info->upper_dev;
- if (is_vlan_dev(upper_dev)) {
- if (info->linking)
- err = mlxsw_sp_master_bridge_vlan_link(mlxsw_sp,
- upper_dev);
- else
- mlxsw_sp_master_bridge_vlan_unlink(mlxsw_sp,
- upper_dev);
- } else {
- err = -EINVAL;
- WARN_ON(1);
- }
- break;
- }
-
- return err;
-}
-
-static u16 mlxsw_sp_avail_vfid_get(const struct mlxsw_sp *mlxsw_sp)
-{
- return find_first_zero_bit(mlxsw_sp->vfids.mapped,
- MLXSW_SP_VFID_MAX);
-}
-
-static int mlxsw_sp_vfid_op(struct mlxsw_sp *mlxsw_sp, u16 fid, bool create)
-{
- char sfmr_pl[MLXSW_REG_SFMR_LEN];
-
- mlxsw_reg_sfmr_pack(sfmr_pl, !create, fid, 0);
- return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfmr), sfmr_pl);
-}
-
-static void mlxsw_sp_vport_vfid_leave(struct mlxsw_sp_port *mlxsw_sp_vport);
-
-static struct mlxsw_sp_fid *mlxsw_sp_vfid_create(struct mlxsw_sp *mlxsw_sp,
- struct net_device *br_dev)
-{
- struct device *dev = mlxsw_sp->bus_info->dev;
- struct mlxsw_sp_fid *f;
- u16 vfid, fid;
- int err;
-
- vfid = mlxsw_sp_avail_vfid_get(mlxsw_sp);
- if (vfid == MLXSW_SP_VFID_MAX) {
- dev_err(dev, "No available vFIDs\n");
- return ERR_PTR(-ERANGE);
- }
-
- fid = mlxsw_sp_vfid_to_fid(vfid);
- err = mlxsw_sp_vfid_op(mlxsw_sp, fid, true);
- if (err) {
- dev_err(dev, "Failed to create FID=%d\n", fid);
- return ERR_PTR(err);
- }
-
- f = kzalloc(sizeof(*f), GFP_KERNEL);
- if (!f)
- goto err_allocate_vfid;
-
- f->leave = mlxsw_sp_vport_vfid_leave;
- f->fid = fid;
- f->dev = br_dev;
-
- list_add(&f->list, &mlxsw_sp->vfids.list);
- set_bit(vfid, mlxsw_sp->vfids.mapped);
-
- return f;
-
-err_allocate_vfid:
- mlxsw_sp_vfid_op(mlxsw_sp, fid, false);
- return ERR_PTR(-ENOMEM);
-}
-
-static void mlxsw_sp_vfid_destroy(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_fid *f)
-{
- u16 vfid = mlxsw_sp_fid_to_vfid(f->fid);
- u16 fid = f->fid;
-
- clear_bit(vfid, mlxsw_sp->vfids.mapped);
- list_del(&f->list);
-
- if (f->rif)
- mlxsw_sp_rif_bridge_destroy(mlxsw_sp, f->rif);
-
- kfree(f);
-
- mlxsw_sp_vfid_op(mlxsw_sp, fid, false);
-}
-
-static int mlxsw_sp_vport_fid_map(struct mlxsw_sp_port *mlxsw_sp_vport, u16 fid,
- bool valid)
-{
- enum mlxsw_reg_svfa_mt mt = MLXSW_REG_SVFA_MT_PORT_VID_TO_FID;
- u16 vid = mlxsw_sp_vport_vid_get(mlxsw_sp_vport);
-
- return mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_vport, mt, valid, fid,
- vid);
-}
-
-static int mlxsw_sp_vport_vfid_join(struct mlxsw_sp_port *mlxsw_sp_vport,
- struct net_device *br_dev)
-{
- struct mlxsw_sp_fid *f;
- int err;
-
- f = mlxsw_sp_vfid_find(mlxsw_sp_vport->mlxsw_sp, br_dev);
- if (!f) {
- f = mlxsw_sp_vfid_create(mlxsw_sp_vport->mlxsw_sp, br_dev);
- if (IS_ERR(f))
- return PTR_ERR(f);
- }
-
- err = mlxsw_sp_vport_flood_set(mlxsw_sp_vport, f->fid, true);
- if (err)
- goto err_vport_flood_set;
-
- err = mlxsw_sp_vport_fid_map(mlxsw_sp_vport, f->fid, true);
- if (err)
- goto err_vport_fid_map;
-
- mlxsw_sp_vport_fid_set(mlxsw_sp_vport, f);
- f->ref_count++;
-
- netdev_dbg(mlxsw_sp_vport->dev, "Joined FID=%d\n", f->fid);
-
- return 0;
-
-err_vport_fid_map:
- mlxsw_sp_vport_flood_set(mlxsw_sp_vport, f->fid, false);
-err_vport_flood_set:
- if (!f->ref_count)
- mlxsw_sp_vfid_destroy(mlxsw_sp_vport->mlxsw_sp, f);
- return err;
-}
-
-static void mlxsw_sp_vport_vfid_leave(struct mlxsw_sp_port *mlxsw_sp_vport)
-{
- struct mlxsw_sp_fid *f = mlxsw_sp_vport_fid_get(mlxsw_sp_vport);
-
- netdev_dbg(mlxsw_sp_vport->dev, "Left FID=%d\n", f->fid);
-
- mlxsw_sp_vport_fid_map(mlxsw_sp_vport, f->fid, false);
-
- mlxsw_sp_vport_flood_set(mlxsw_sp_vport, f->fid, false);
-
- mlxsw_sp_port_fdb_flush(mlxsw_sp_vport, f->fid);
-
- mlxsw_sp_vport_fid_set(mlxsw_sp_vport, NULL);
- if (--f->ref_count == 0)
- mlxsw_sp_vfid_destroy(mlxsw_sp_vport->mlxsw_sp, f);
-}
-
-static int mlxsw_sp_vport_bridge_join(struct mlxsw_sp_port *mlxsw_sp_vport,
- struct net_device *br_dev)
-{
- struct mlxsw_sp_fid *f = mlxsw_sp_vport_fid_get(mlxsw_sp_vport);
- u16 vid = mlxsw_sp_vport_vid_get(mlxsw_sp_vport);
- struct net_device *dev = mlxsw_sp_vport->dev;
- int err;
-
- if (f && !WARN_ON(!f->leave))
- f->leave(mlxsw_sp_vport);
-
- err = mlxsw_sp_vport_vfid_join(mlxsw_sp_vport, br_dev);
- if (err) {
- netdev_err(dev, "Failed to join vFID\n");
- return err;
- }
-
- err = mlxsw_sp_port_vid_learning_set(mlxsw_sp_vport, vid, true);
- if (err) {
- netdev_err(dev, "Failed to enable learning\n");
- goto err_port_vid_learning_set;
- }
-
- mlxsw_sp_vport->learning = 1;
- mlxsw_sp_vport->learning_sync = 1;
- mlxsw_sp_vport->uc_flood = 1;
- mlxsw_sp_vport->mc_flood = 1;
- mlxsw_sp_vport->mc_router = 0;
- mlxsw_sp_vport->mc_disabled = 1;
- mlxsw_sp_vport->bridged = 1;
-
- return 0;
-
-err_port_vid_learning_set:
- mlxsw_sp_vport_vfid_leave(mlxsw_sp_vport);
- return err;
-}
-
-static void mlxsw_sp_vport_bridge_leave(struct mlxsw_sp_port *mlxsw_sp_vport)
-{
- u16 vid = mlxsw_sp_vport_vid_get(mlxsw_sp_vport);
-
- mlxsw_sp_port_vid_learning_set(mlxsw_sp_vport, vid, false);
-
- mlxsw_sp_vport_vfid_leave(mlxsw_sp_vport);
-
- mlxsw_sp_vport->learning = 0;
- mlxsw_sp_vport->learning_sync = 0;
- mlxsw_sp_vport->uc_flood = 0;
- mlxsw_sp_vport->mc_flood = 0;
- mlxsw_sp_vport->mc_router = 0;
- mlxsw_sp_vport->bridged = 0;
-}
-
-static bool
-mlxsw_sp_port_master_bridge_check(const struct mlxsw_sp_port *mlxsw_sp_port,
- const struct net_device *br_dev)
-{
- struct mlxsw_sp_port *mlxsw_sp_vport;
-
- list_for_each_entry(mlxsw_sp_vport, &mlxsw_sp_port->vports_list,
- vport.list) {
- struct net_device *dev = mlxsw_sp_vport_dev_get(mlxsw_sp_vport);
-
- if (dev && dev == br_dev)
- return false;
- }
-
- return true;
-}
-
-static int mlxsw_sp_netdevice_vport_event(struct net_device *dev,
- unsigned long event, void *ptr,
- u16 vid)
+static int mlxsw_sp_netdevice_port_vlan_event(struct net_device *vlan_dev,
+ struct net_device *dev,
+ unsigned long event, void *ptr,
+ u16 vid)
{
struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
struct netdev_notifier_changeupper_info *info = ptr;
- struct mlxsw_sp_port *mlxsw_sp_vport;
struct net_device *upper_dev;
int err = 0;
- mlxsw_sp_vport = mlxsw_sp_port_vport_find(mlxsw_sp_port, vid);
- if (!mlxsw_sp_vport)
- return 0;
-
switch (event) {
case NETDEV_PRECHANGEUPPER:
upper_dev = info->upper_dev;
if (!netif_is_bridge_master(upper_dev))
return -EINVAL;
- if (!info->linking)
- break;
- /* We can't have multiple VLAN interfaces configured on
- * the same port and being members in the same bridge.
- */
- if (netif_is_bridge_master(upper_dev) &&
- !mlxsw_sp_port_master_bridge_check(mlxsw_sp_port,
- upper_dev))
- return -EINVAL;
break;
case NETDEV_CHANGEUPPER:
upper_dev = info->upper_dev;
if (netif_is_bridge_master(upper_dev)) {
if (info->linking)
- err = mlxsw_sp_vport_bridge_join(mlxsw_sp_vport,
- upper_dev);
+ err = mlxsw_sp_port_bridge_join(mlxsw_sp_port,
+ vlan_dev,
+ upper_dev);
else
- mlxsw_sp_vport_bridge_leave(mlxsw_sp_vport);
+ mlxsw_sp_port_bridge_leave(mlxsw_sp_port,
+ vlan_dev,
+ upper_dev);
} else {
err = -EINVAL;
WARN_ON(1);
@@ -4547,9 +4280,10 @@
return err;
}
-static int mlxsw_sp_netdevice_lag_vport_event(struct net_device *lag_dev,
- unsigned long event, void *ptr,
- u16 vid)
+static int mlxsw_sp_netdevice_lag_port_vlan_event(struct net_device *vlan_dev,
+ struct net_device *lag_dev,
+ unsigned long event,
+ void *ptr, u16 vid)
{
struct net_device *dev;
struct list_head *iter;
@@ -4557,8 +4291,9 @@
netdev_for_each_lower_dev(lag_dev, dev, iter) {
if (mlxsw_sp_port_dev_check(dev)) {
- ret = mlxsw_sp_netdevice_vport_event(dev, event, ptr,
- vid);
+ ret = mlxsw_sp_netdevice_port_vlan_event(vlan_dev, dev,
+ event, ptr,
+ vid);
if (ret)
return ret;
}
@@ -4574,11 +4309,12 @@
u16 vid = vlan_dev_vlan_id(vlan_dev);
if (mlxsw_sp_port_dev_check(real_dev))
- return mlxsw_sp_netdevice_vport_event(real_dev, event, ptr,
- vid);
+ return mlxsw_sp_netdevice_port_vlan_event(vlan_dev, real_dev,
+ event, ptr, vid);
else if (netif_is_lag_master(real_dev))
- return mlxsw_sp_netdevice_lag_vport_event(real_dev, event, ptr,
- vid);
+ return mlxsw_sp_netdevice_lag_port_vlan_event(vlan_dev,
+ real_dev, event,
+ ptr, vid);
return 0;
}
@@ -4603,11 +4339,9 @@
else if (mlxsw_sp_is_vrf_event(event, ptr))
err = mlxsw_sp_netdevice_vrf_event(dev, event, ptr);
else if (mlxsw_sp_port_dev_check(dev))
- err = mlxsw_sp_netdevice_port_event(dev, event, ptr);
+ err = mlxsw_sp_netdevice_port_event(dev, dev, event, ptr);
else if (netif_is_lag_master(dev))
err = mlxsw_sp_netdevice_lag_event(dev, event, ptr);
- else if (netif_is_bridge_master(dev))
- err = mlxsw_sp_netdevice_bridge_event(dev, event, ptr);
else if (is_vlan_dev(dev))
err = mlxsw_sp_netdevice_vlan_event(dev, event, ptr);
@@ -4680,3 +4414,4 @@
MODULE_AUTHOR("Jiri Pirko <jiri@mellanox.com>");
MODULE_DESCRIPTION("Mellanox Spectrum driver");
MODULE_DEVICE_TABLE(pci, mlxsw_sp_pci_id_table);
+MODULE_FIRMWARE(MLXSW_SP_FW_FILENAME);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
index 0c23bc1..5ef98d4 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
@@ -54,12 +54,7 @@
#include "core_acl_flex_keys.h"
#include "core_acl_flex_actions.h"
-#define MLXSW_SP_VFID_BASE VLAN_N_VID
-#define MLXSW_SP_VFID_MAX 1024 /* Bridged VLAN interfaces */
-
-#define MLXSW_SP_DUMMY_FID 15359
-
-#define MLXSW_SP_RFID_BASE 15360
+#define MLXSW_SP_FID_8021D_MAX 1024
#define MLXSW_SP_MID_MAX 7000
@@ -78,13 +73,19 @@
unsigned int ref_count;
};
-struct mlxsw_sp_fid {
- void (*leave)(struct mlxsw_sp_port *mlxsw_sp_vport);
- struct list_head list;
- unsigned int ref_count;
- struct net_device *dev;
- struct mlxsw_sp_rif *rif;
- u16 fid;
+enum mlxsw_sp_rif_type {
+ MLXSW_SP_RIF_TYPE_SUBPORT,
+ MLXSW_SP_RIF_TYPE_VLAN,
+ MLXSW_SP_RIF_TYPE_FID,
+ MLXSW_SP_RIF_TYPE_MAX,
+};
+
+enum mlxsw_sp_fid_type {
+ MLXSW_SP_FID_TYPE_8021Q,
+ MLXSW_SP_FID_TYPE_8021D,
+ MLXSW_SP_FID_TYPE_RFID,
+ MLXSW_SP_FID_TYPE_DUMMY,
+ MLXSW_SP_FID_TYPE_MAX,
};
struct mlxsw_sp_mid {
@@ -95,85 +96,6 @@
unsigned int ref_count;
};
-static inline u16 mlxsw_sp_vfid_to_fid(u16 vfid)
-{
- return MLXSW_SP_VFID_BASE + vfid;
-}
-
-static inline u16 mlxsw_sp_fid_to_vfid(u16 fid)
-{
- return fid - MLXSW_SP_VFID_BASE;
-}
-
-static inline bool mlxsw_sp_fid_is_vfid(u16 fid)
-{
- return fid >= MLXSW_SP_VFID_BASE && fid < MLXSW_SP_DUMMY_FID;
-}
-
-struct mlxsw_sp_sb_pr {
- enum mlxsw_reg_sbpr_mode mode;
- u32 size;
-};
-
-struct mlxsw_cp_sb_occ {
- u32 cur;
- u32 max;
-};
-
-struct mlxsw_sp_sb_cm {
- u32 min_buff;
- u32 max_buff;
- u8 pool;
- struct mlxsw_cp_sb_occ occ;
-};
-
-struct mlxsw_sp_sb_pm {
- u32 min_buff;
- u32 max_buff;
- struct mlxsw_cp_sb_occ occ;
-};
-
-#define MLXSW_SP_SB_POOL_COUNT 4
-#define MLXSW_SP_SB_TC_COUNT 8
-
-struct mlxsw_sp_sb_port {
- struct mlxsw_sp_sb_cm cms[2][MLXSW_SP_SB_TC_COUNT];
- struct mlxsw_sp_sb_pm pms[2][MLXSW_SP_SB_POOL_COUNT];
-};
-
-struct mlxsw_sp_sb {
- struct mlxsw_sp_sb_pr prs[2][MLXSW_SP_SB_POOL_COUNT];
- struct mlxsw_sp_sb_port *ports;
- u32 cell_size;
-};
-
-#define MLXSW_SP_PREFIX_COUNT (sizeof(struct in6_addr) * BITS_PER_BYTE)
-
-struct mlxsw_sp_prefix_usage {
- DECLARE_BITMAP(b, MLXSW_SP_PREFIX_COUNT);
-};
-
-enum mlxsw_sp_l3proto {
- MLXSW_SP_L3_PROTO_IPV4,
- MLXSW_SP_L3_PROTO_IPV6,
-};
-
-struct mlxsw_sp_lpm_tree {
- u8 id; /* tree ID */
- unsigned int ref_count;
- enum mlxsw_sp_l3proto proto;
- struct mlxsw_sp_prefix_usage prefix_usage;
-};
-
-struct mlxsw_sp_fib;
-
-struct mlxsw_sp_vr {
- u16 id; /* virtual router ID */
- u32 tb_id; /* kernel fib table id */
- unsigned int rif_count;
- struct mlxsw_sp_fib *fib4;
-};
-
enum mlxsw_sp_span_type {
MLXSW_SP_SPAN_EGRESS,
MLXSW_SP_SPAN_INGRESS
@@ -212,58 +134,25 @@
};
};
-struct mlxsw_sp_router {
- struct mlxsw_sp_vr *vrs;
- struct rhashtable neigh_ht;
- struct rhashtable nexthop_group_ht;
- struct rhashtable nexthop_ht;
- struct {
- struct mlxsw_sp_lpm_tree *trees;
- unsigned int tree_count;
- } lpm;
- struct {
- struct delayed_work dw;
- unsigned long interval; /* ms */
- } neighs_update;
- struct delayed_work nexthop_probe_dw;
-#define MLXSW_SP_UNRESOLVED_NH_PROBE_INTERVAL 5000 /* ms */
- struct list_head nexthop_neighs_list;
- bool aborted;
-};
-
+struct mlxsw_sp_sb;
+struct mlxsw_sp_bridge;
+struct mlxsw_sp_router;
struct mlxsw_sp_acl;
struct mlxsw_sp_counter_pool;
+struct mlxsw_sp_fid_core;
struct mlxsw_sp {
- struct {
- struct list_head list;
- DECLARE_BITMAP(mapped, MLXSW_SP_VFID_MAX);
- } vfids;
- struct {
- struct list_head list;
- DECLARE_BITMAP(mapped, MLXSW_SP_MID_MAX);
- } br_mids;
- struct list_head fids; /* VLAN-aware bridge FIDs */
- struct mlxsw_sp_rif **rifs;
struct mlxsw_sp_port **ports;
struct mlxsw_core *core;
const struct mlxsw_bus_info *bus_info;
unsigned char base_mac[ETH_ALEN];
- struct {
- struct delayed_work dw;
-#define MLXSW_SP_DEFAULT_LEARNING_INTERVAL 100
- unsigned int interval; /* ms */
- } fdb_notify;
-#define MLXSW_SP_MIN_AGEING_TIME 10
-#define MLXSW_SP_MAX_AGEING_TIME 1000000
-#define MLXSW_SP_DEFAULT_AGEING_TIME 300
- u32 ageing_time;
- struct mlxsw_sp_upper master_bridge;
struct mlxsw_sp_upper *lags;
u8 *port_to_module;
- struct mlxsw_sp_sb sb;
- struct mlxsw_sp_router router;
+ struct mlxsw_sp_sb *sb;
+ struct mlxsw_sp_bridge *bridge;
+ struct mlxsw_sp_router *router;
struct mlxsw_sp_acl *acl;
+ struct mlxsw_sp_fid_core *fid_core;
struct {
DECLARE_BITMAP(usage, MLXSW_SP_KVD_LINEAR_SIZE);
} kvdl;
@@ -273,7 +162,6 @@
struct mlxsw_sp_span_entry *entries;
int entries_count;
} span;
- struct notifier_block fib_nb;
};
static inline struct mlxsw_sp_upper *
@@ -282,18 +170,6 @@
return &mlxsw_sp->lags[lag_id];
}
-static inline u32 mlxsw_sp_cells_bytes(const struct mlxsw_sp *mlxsw_sp,
- u32 cells)
-{
- return mlxsw_sp->sb.cell_size * cells;
-}
-
-static inline u32 mlxsw_sp_bytes_cells(const struct mlxsw_sp *mlxsw_sp,
- u32 bytes)
-{
- return DIV_ROUND_UP(bytes, mlxsw_sp->sb.cell_size);
-}
-
struct mlxsw_sp_port_pcpu_stats {
u64 rx_packets;
u64 rx_bytes;
@@ -310,29 +186,28 @@
bool truncate;
};
+struct mlxsw_sp_bridge_port;
+struct mlxsw_sp_fid;
+
+struct mlxsw_sp_port_vlan {
+ struct list_head list;
+ struct mlxsw_sp_port *mlxsw_sp_port;
+ struct mlxsw_sp_fid *fid;
+ u16 vid;
+ struct mlxsw_sp_bridge_port *bridge_port;
+ struct list_head bridge_vlan_node;
+};
+
struct mlxsw_sp_port {
struct net_device *dev;
struct mlxsw_sp_port_pcpu_stats __percpu *pcpu_stats;
struct mlxsw_sp *mlxsw_sp;
u8 local_port;
- u8 stp_state;
- u16 learning:1,
- learning_sync:1,
- uc_flood:1,
- mc_flood:1,
- mc_router:1,
- mc_disabled:1,
- bridged:1,
- lagged:1,
+ u8 lagged:1,
split:1;
u16 pvid;
u16 lag_id;
struct {
- struct list_head list;
- struct mlxsw_sp_fid *f;
- u16 vid;
- } vport;
- struct {
u8 tx_pause:1,
rx_pause:1,
autoneg:1;
@@ -347,11 +222,6 @@
u8 width;
u8 lane;
} mapping;
- /* 802.1Q bridge VLANs */
- unsigned long *active_vlans;
- unsigned long *untagged_vlans;
- /* VLAN interfaces */
- struct list_head vports_list;
/* TC handles */
struct list_head mall_tc_list;
struct {
@@ -360,13 +230,9 @@
struct delayed_work update_dw;
} hw_stats;
struct mlxsw_sp_port_sample *sample;
+ struct list_head vlans_list;
};
-bool mlxsw_sp_port_dev_check(const struct net_device *dev);
-struct mlxsw_sp *mlxsw_sp_lower_get(struct net_device *dev);
-struct mlxsw_sp_port *mlxsw_sp_port_lower_dev_hold(struct net_device *dev);
-void mlxsw_sp_port_dev_put(struct mlxsw_sp_port *mlxsw_sp_port);
-
static inline bool
mlxsw_sp_port_is_pause_en(const struct mlxsw_sp_port *mlxsw_sp_port)
{
@@ -385,102 +251,28 @@
return mlxsw_sp_port && mlxsw_sp_port->lagged ? mlxsw_sp_port : NULL;
}
-static inline u16
-mlxsw_sp_vport_vid_get(const struct mlxsw_sp_port *mlxsw_sp_vport)
+static inline struct mlxsw_sp_port_vlan *
+mlxsw_sp_port_vlan_find_by_vid(const struct mlxsw_sp_port *mlxsw_sp_port,
+ u16 vid)
{
- return mlxsw_sp_vport->vport.vid;
-}
+ struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
-static inline bool
-mlxsw_sp_port_is_vport(const struct mlxsw_sp_port *mlxsw_sp_port)
-{
- u16 vid = mlxsw_sp_vport_vid_get(mlxsw_sp_port);
-
- return vid != 0;
-}
-
-static inline void mlxsw_sp_vport_fid_set(struct mlxsw_sp_port *mlxsw_sp_vport,
- struct mlxsw_sp_fid *f)
-{
- mlxsw_sp_vport->vport.f = f;
-}
-
-static inline struct mlxsw_sp_fid *
-mlxsw_sp_vport_fid_get(const struct mlxsw_sp_port *mlxsw_sp_vport)
-{
- return mlxsw_sp_vport->vport.f;
-}
-
-static inline struct net_device *
-mlxsw_sp_vport_dev_get(const struct mlxsw_sp_port *mlxsw_sp_vport)
-{
- struct mlxsw_sp_fid *f = mlxsw_sp_vport_fid_get(mlxsw_sp_vport);
-
- return f ? f->dev : NULL;
-}
-
-static inline struct mlxsw_sp_port *
-mlxsw_sp_port_vport_find(const struct mlxsw_sp_port *mlxsw_sp_port, u16 vid)
-{
- struct mlxsw_sp_port *mlxsw_sp_vport;
-
- list_for_each_entry(mlxsw_sp_vport, &mlxsw_sp_port->vports_list,
- vport.list) {
- if (mlxsw_sp_vport_vid_get(mlxsw_sp_vport) == vid)
- return mlxsw_sp_vport;
+ list_for_each_entry(mlxsw_sp_port_vlan, &mlxsw_sp_port->vlans_list,
+ list) {
+ if (mlxsw_sp_port_vlan->vid == vid)
+ return mlxsw_sp_port_vlan;
}
return NULL;
}
-static inline struct mlxsw_sp_port *
-mlxsw_sp_port_vport_find_by_fid(const struct mlxsw_sp_port *mlxsw_sp_port,
- u16 fid)
-{
- struct mlxsw_sp_port *mlxsw_sp_vport;
-
- list_for_each_entry(mlxsw_sp_vport, &mlxsw_sp_port->vports_list,
- vport.list) {
- struct mlxsw_sp_fid *f = mlxsw_sp_vport_fid_get(mlxsw_sp_vport);
-
- if (f && f->fid == fid)
- return mlxsw_sp_vport;
- }
-
- return NULL;
-}
-
-static inline struct mlxsw_sp_fid *mlxsw_sp_fid_find(struct mlxsw_sp *mlxsw_sp,
- u16 fid)
-{
- struct mlxsw_sp_fid *f;
-
- list_for_each_entry(f, &mlxsw_sp->fids, list)
- if (f->fid == fid)
- return f;
-
- return NULL;
-}
-
-static inline struct mlxsw_sp_fid *
-mlxsw_sp_vfid_find(const struct mlxsw_sp *mlxsw_sp,
- const struct net_device *br_dev)
-{
- struct mlxsw_sp_fid *f;
-
- list_for_each_entry(f, &mlxsw_sp->vfids.list, list)
- if (f->dev == br_dev)
- return f;
-
- return NULL;
-}
-
-enum mlxsw_sp_flood_table {
- MLXSW_SP_FLOOD_TABLE_UC,
- MLXSW_SP_FLOOD_TABLE_BC,
- MLXSW_SP_FLOOD_TABLE_MC,
+enum mlxsw_sp_flood_type {
+ MLXSW_SP_FLOOD_TYPE_UC,
+ MLXSW_SP_FLOOD_TYPE_BC,
+ MLXSW_SP_FLOOD_TYPE_MC,
};
+/* spectrum_buffers.c */
int mlxsw_sp_buffers_init(struct mlxsw_sp *mlxsw_sp);
void mlxsw_sp_buffers_fini(struct mlxsw_sp *mlxsw_sp);
int mlxsw_sp_port_buffers_init(struct mlxsw_sp_port *mlxsw_sp_port);
@@ -515,26 +307,26 @@
unsigned int sb_index, u16 tc_index,
enum devlink_sb_pool_type pool_type,
u32 *p_cur, u32 *p_max);
+u32 mlxsw_sp_cells_bytes(const struct mlxsw_sp *mlxsw_sp, u32 cells);
+u32 mlxsw_sp_bytes_cells(const struct mlxsw_sp *mlxsw_sp, u32 bytes);
+/* spectrum_switchdev.c */
int mlxsw_sp_switchdev_init(struct mlxsw_sp *mlxsw_sp);
void mlxsw_sp_switchdev_fini(struct mlxsw_sp *mlxsw_sp);
-int mlxsw_sp_port_vlan_init(struct mlxsw_sp_port *mlxsw_sp_port);
void mlxsw_sp_port_switchdev_init(struct mlxsw_sp_port *mlxsw_sp_port);
void mlxsw_sp_port_switchdev_fini(struct mlxsw_sp_port *mlxsw_sp_port);
-int mlxsw_sp_port_vid_to_fid_set(struct mlxsw_sp_port *mlxsw_sp_port,
- enum mlxsw_reg_svfa_mt mt, bool valid, u16 fid,
- u16 vid);
-int mlxsw_sp_port_vlan_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid_begin,
- u16 vid_end, bool is_member, bool untagged);
-int mlxsw_sp_vport_flood_set(struct mlxsw_sp_port *mlxsw_sp_vport, u16 fid,
- bool set);
-void mlxsw_sp_port_active_vlans_del(struct mlxsw_sp_port *mlxsw_sp_port);
-int mlxsw_sp_port_pvid_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid);
-int mlxsw_sp_port_fdb_flush(struct mlxsw_sp_port *mlxsw_sp_port, u16 fid);
int mlxsw_sp_rif_fdb_op(struct mlxsw_sp *mlxsw_sp, const char *mac, u16 fid,
bool adding);
-struct mlxsw_sp_fid *mlxsw_sp_fid_create(struct mlxsw_sp *mlxsw_sp, u16 fid);
-void mlxsw_sp_fid_destroy(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_fid *f);
+void
+mlxsw_sp_port_vlan_bridge_leave(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan);
+int mlxsw_sp_port_bridge_join(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct net_device *brport_dev,
+ struct net_device *br_dev);
+void mlxsw_sp_port_bridge_leave(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct net_device *brport_dev,
+ struct net_device *br_dev);
+
+/* spectrum.c */
int mlxsw_sp_port_ets_set(struct mlxsw_sp_port *mlxsw_sp_port,
enum mlxsw_reg_qeec_hr hr, u8 index, u8 next_index,
bool dwrr, u8 dwrr_weight);
@@ -546,27 +338,45 @@
int mlxsw_sp_port_ets_maxrate_set(struct mlxsw_sp_port *mlxsw_sp_port,
enum mlxsw_reg_qeec_hr hr, u8 index,
u8 next_index, u32 maxrate);
-int __mlxsw_sp_port_vid_learning_set(struct mlxsw_sp_port *mlxsw_sp_port,
- u16 vid_begin, u16 vid_end,
- bool learn_enable);
+int mlxsw_sp_port_vid_stp_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid,
+ u8 state);
+int mlxsw_sp_port_vp_mode_set(struct mlxsw_sp_port *mlxsw_sp_port, bool enable);
+int mlxsw_sp_port_vid_learning_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid,
+ bool learn_enable);
+int mlxsw_sp_port_pvid_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid);
+struct mlxsw_sp_port_vlan *
+mlxsw_sp_port_vlan_get(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid);
+void mlxsw_sp_port_vlan_put(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan);
+int mlxsw_sp_port_vlan_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid_begin,
+ u16 vid_end, bool is_member, bool untagged);
+int mlxsw_sp_flow_counter_get(struct mlxsw_sp *mlxsw_sp,
+ unsigned int counter_index, u64 *packets,
+ u64 *bytes);
+int mlxsw_sp_flow_counter_alloc(struct mlxsw_sp *mlxsw_sp,
+ unsigned int *p_counter_index);
+void mlxsw_sp_flow_counter_free(struct mlxsw_sp *mlxsw_sp,
+ unsigned int counter_index);
+bool mlxsw_sp_port_dev_check(const struct net_device *dev);
+struct mlxsw_sp *mlxsw_sp_lower_get(struct net_device *dev);
+struct mlxsw_sp_port *mlxsw_sp_port_dev_lower_find(struct net_device *dev);
+struct mlxsw_sp_port *mlxsw_sp_port_lower_dev_hold(struct net_device *dev);
+void mlxsw_sp_port_dev_put(struct mlxsw_sp_port *mlxsw_sp_port);
+struct mlxsw_sp_port *mlxsw_sp_port_dev_lower_find_rcu(struct net_device *dev);
+/* spectrum_dcb.c */
#ifdef CONFIG_MLXSW_SPECTRUM_DCB
-
int mlxsw_sp_port_dcb_init(struct mlxsw_sp_port *mlxsw_sp_port);
void mlxsw_sp_port_dcb_fini(struct mlxsw_sp_port *mlxsw_sp_port);
-
#else
-
static inline int mlxsw_sp_port_dcb_init(struct mlxsw_sp_port *mlxsw_sp_port)
{
return 0;
}
-
static inline void mlxsw_sp_port_dcb_fini(struct mlxsw_sp_port *mlxsw_sp_port)
{}
-
#endif
+/* spectrum_router.c */
int mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp);
void mlxsw_sp_router_fini(struct mlxsw_sp *mlxsw_sp);
int mlxsw_sp_router_netevent_event(struct notifier_block *unused,
@@ -574,17 +384,17 @@
int mlxsw_sp_netdevice_router_port_event(struct net_device *dev);
int mlxsw_sp_inetaddr_event(struct notifier_block *unused,
unsigned long event, void *ptr);
-void mlxsw_sp_rif_bridge_destroy(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_rif *rif);
int mlxsw_sp_netdevice_vrf_event(struct net_device *l3_dev, unsigned long event,
struct netdev_notifier_changeupper_info *info);
+void
+mlxsw_sp_port_vlan_router_leave(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan);
+void mlxsw_sp_rif_destroy(struct mlxsw_sp_rif *rif);
+/* spectrum_kvdl.c */
int mlxsw_sp_kvdl_alloc(struct mlxsw_sp *mlxsw_sp, unsigned int entry_count,
u32 *p_entry_index);
void mlxsw_sp_kvdl_free(struct mlxsw_sp *mlxsw_sp, int entry_index);
-struct mlxsw_afk *mlxsw_sp_acl_afk(struct mlxsw_sp_acl *acl);
-
struct mlxsw_sp_acl_rule_info {
unsigned int priority;
struct mlxsw_afk_element_values values;
@@ -625,6 +435,8 @@
struct mlxsw_sp_acl_ruleset;
+/* spectrum_acl.c */
+struct mlxsw_afk *mlxsw_sp_acl_afk(struct mlxsw_sp_acl *acl);
struct mlxsw_sp_acl_ruleset *
mlxsw_sp_acl_ruleset_get(struct mlxsw_sp *mlxsw_sp,
struct net_device *dev, bool ingress,
@@ -649,6 +461,7 @@
void mlxsw_sp_acl_rulei_act_jump(struct mlxsw_sp_acl_rule_info *rulei,
u16 group_id);
int mlxsw_sp_acl_rulei_act_drop(struct mlxsw_sp_acl_rule_info *rulei);
+int mlxsw_sp_acl_rulei_act_trap(struct mlxsw_sp_acl_rule_info *rulei);
int mlxsw_sp_acl_rulei_act_fwd(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_rule_info *rulei,
struct net_device *out_dev);
@@ -683,23 +496,48 @@
struct mlxsw_sp_acl_rule *rule,
u64 *packets, u64 *bytes, u64 *last_use);
+struct mlxsw_sp_fid *mlxsw_sp_acl_dummy_fid(struct mlxsw_sp *mlxsw_sp);
+
int mlxsw_sp_acl_init(struct mlxsw_sp *mlxsw_sp);
void mlxsw_sp_acl_fini(struct mlxsw_sp *mlxsw_sp);
+/* spectrum_acl_tcam.c */
extern const struct mlxsw_sp_acl_ops mlxsw_sp_acl_tcam_ops;
+/* spectrum_flower.c */
int mlxsw_sp_flower_replace(struct mlxsw_sp_port *mlxsw_sp_port, bool ingress,
__be16 protocol, struct tc_cls_flower_offload *f);
void mlxsw_sp_flower_destroy(struct mlxsw_sp_port *mlxsw_sp_port, bool ingress,
struct tc_cls_flower_offload *f);
int mlxsw_sp_flower_stats(struct mlxsw_sp_port *mlxsw_sp_port, bool ingress,
struct tc_cls_flower_offload *f);
-int mlxsw_sp_flow_counter_get(struct mlxsw_sp *mlxsw_sp,
- unsigned int counter_index, u64 *packets,
- u64 *bytes);
-int mlxsw_sp_flow_counter_alloc(struct mlxsw_sp *mlxsw_sp,
- unsigned int *p_counter_index);
-void mlxsw_sp_flow_counter_free(struct mlxsw_sp *mlxsw_sp,
- unsigned int counter_index);
+
+/* spectrum_fid.c */
+int mlxsw_sp_fid_flood_set(struct mlxsw_sp_fid *fid,
+ enum mlxsw_sp_flood_type packet_type, u8 local_port,
+ bool member);
+int mlxsw_sp_fid_port_vid_map(struct mlxsw_sp_fid *fid,
+ struct mlxsw_sp_port *mlxsw_sp_port, u16 vid);
+void mlxsw_sp_fid_port_vid_unmap(struct mlxsw_sp_fid *fid,
+ struct mlxsw_sp_port *mlxsw_sp_port, u16 vid);
+enum mlxsw_sp_rif_type mlxsw_sp_fid_rif_type(const struct mlxsw_sp_fid *fid);
+u16 mlxsw_sp_fid_index(const struct mlxsw_sp_fid *fid);
+enum mlxsw_sp_fid_type mlxsw_sp_fid_type(const struct mlxsw_sp_fid *fid);
+void mlxsw_sp_fid_rif_set(struct mlxsw_sp_fid *fid, struct mlxsw_sp_rif *rif);
+enum mlxsw_sp_rif_type
+mlxsw_sp_fid_type_rif_type(const struct mlxsw_sp *mlxsw_sp,
+ enum mlxsw_sp_fid_type type);
+u16 mlxsw_sp_fid_8021q_vid(const struct mlxsw_sp_fid *fid);
+struct mlxsw_sp_fid *mlxsw_sp_fid_8021q_get(struct mlxsw_sp *mlxsw_sp, u16 vid);
+struct mlxsw_sp_fid *mlxsw_sp_fid_8021d_get(struct mlxsw_sp *mlxsw_sp,
+ int br_ifindex);
+struct mlxsw_sp_fid *mlxsw_sp_fid_rfid_get(struct mlxsw_sp *mlxsw_sp,
+ u16 rif_index);
+struct mlxsw_sp_fid *mlxsw_sp_fid_dummy_get(struct mlxsw_sp *mlxsw_sp);
+void mlxsw_sp_fid_put(struct mlxsw_sp_fid *fid);
+int mlxsw_sp_port_fids_init(struct mlxsw_sp_port *mlxsw_sp_port);
+void mlxsw_sp_port_fids_fini(struct mlxsw_sp_port *mlxsw_sp_port);
+int mlxsw_sp_fids_init(struct mlxsw_sp *mlxsw_sp);
+void mlxsw_sp_fids_fini(struct mlxsw_sp *mlxsw_sp);
#endif
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c
index 317f7b1..01a1501 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c
@@ -53,6 +53,7 @@
struct mlxsw_sp *mlxsw_sp;
struct mlxsw_afk *afk;
struct mlxsw_afa *afa;
+ struct mlxsw_sp_fid *dummy_fid;
const struct mlxsw_sp_acl_ops *ops;
struct rhashtable ruleset_ht;
struct list_head rules;
@@ -112,6 +113,11 @@
.automatic_shrinking = true,
};
+struct mlxsw_sp_fid *mlxsw_sp_acl_dummy_fid(struct mlxsw_sp *mlxsw_sp)
+{
+ return mlxsw_sp->acl->dummy_fid;
+}
+
static struct mlxsw_sp_acl_ruleset *
mlxsw_sp_acl_ruleset_create(struct mlxsw_sp *mlxsw_sp,
const struct mlxsw_sp_acl_profile_ops *ops)
@@ -341,6 +347,11 @@
return mlxsw_afa_block_append_drop(rulei->act_block);
}
+int mlxsw_sp_acl_rulei_act_trap(struct mlxsw_sp_acl_rule_info *rulei)
+{
+ return mlxsw_afa_block_append_trap(rulei->act_block);
+}
+
int mlxsw_sp_acl_rulei_act_fwd(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_rule_info *rulei,
struct net_device *out_dev)
@@ -676,6 +687,7 @@
int mlxsw_sp_acl_init(struct mlxsw_sp *mlxsw_sp)
{
const struct mlxsw_sp_acl_ops *acl_ops = &mlxsw_sp_acl_tcam_ops;
+ struct mlxsw_sp_fid *fid;
struct mlxsw_sp_acl *acl;
int err;
@@ -706,6 +718,13 @@
if (err)
goto err_rhashtable_init;
+ fid = mlxsw_sp_fid_dummy_get(mlxsw_sp);
+ if (IS_ERR(fid)) {
+ err = PTR_ERR(fid);
+ goto err_fid_get;
+ }
+ acl->dummy_fid = fid;
+
INIT_LIST_HEAD(&acl->rules);
err = acl_ops->init(mlxsw_sp, acl->priv);
if (err)
@@ -721,6 +740,8 @@
return 0;
err_acl_ops_init:
+ mlxsw_sp_fid_put(fid);
+err_fid_get:
rhashtable_destroy(&acl->ruleset_ht);
err_rhashtable_init:
mlxsw_afa_destroy(acl->afa);
@@ -739,6 +760,7 @@
cancel_delayed_work_sync(&mlxsw_sp->acl->rule_activity_update.dw);
acl_ops->fini(mlxsw_sp, acl->priv);
WARN_ON(!list_empty(&acl->rules));
+ mlxsw_sp_fid_put(acl->dummy_fid);
rhashtable_destroy(&acl->ruleset_ht);
mlxsw_afa_destroy(acl->afa);
mlxsw_afk_destroy(acl->afk);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_keys.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_keys.h
index af7b7ba..85d5001 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_keys.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_keys.h
@@ -68,6 +68,11 @@
MLXSW_AFK_ELEMENT_INST_U32(SRC_SYS_PORT, 0x0C, 0, 16),
};
+static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv4[] = {
+ MLXSW_AFK_ELEMENT_INST_U32(SRC_IP4, 0x00, 0, 32),
+ MLXSW_AFK_ELEMENT_INST_U32(TCP_FLAGS, 0x08, 8, 9), /* TCP_CONTROL+TCP_ECN */
+};
+
static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv4_ex[] = {
MLXSW_AFK_ELEMENT_INST_U32(VID, 0x00, 0, 12),
MLXSW_AFK_ELEMENT_INST_U32(PCP, 0x08, 29, 3),
@@ -102,6 +107,7 @@
MLXSW_AFK_BLOCK(0x12, mlxsw_sp_afk_element_info_l2_smac_ex),
MLXSW_AFK_BLOCK(0x30, mlxsw_sp_afk_element_info_ipv4_sip),
MLXSW_AFK_BLOCK(0x31, mlxsw_sp_afk_element_info_ipv4_dip),
+ MLXSW_AFK_BLOCK(0x32, mlxsw_sp_afk_element_info_ipv4),
MLXSW_AFK_BLOCK(0x33, mlxsw_sp_afk_element_info_ipv4_ex),
MLXSW_AFK_BLOCK(0x60, mlxsw_sp_afk_element_info_ipv6_dip),
MLXSW_AFK_BLOCK(0x65, mlxsw_sp_afk_element_info_ipv6_ex1),
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c
index 3a24289..61a10f1 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c
@@ -983,6 +983,7 @@
MLXSW_AFK_ELEMENT_SRC_L4_PORT,
MLXSW_AFK_ELEMENT_VID,
MLXSW_AFK_ELEMENT_PCP,
+ MLXSW_AFK_ELEMENT_TCP_FLAGS,
};
static const enum mlxsw_afk_element mlxsw_sp_acl_tcam_pattern_ipv6[] = {
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c
index 997189c..93728c6 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c
@@ -43,25 +43,72 @@
#include "port.h"
#include "reg.h"
+struct mlxsw_sp_sb_pr {
+ enum mlxsw_reg_sbpr_mode mode;
+ u32 size;
+};
+
+struct mlxsw_cp_sb_occ {
+ u32 cur;
+ u32 max;
+};
+
+struct mlxsw_sp_sb_cm {
+ u32 min_buff;
+ u32 max_buff;
+ u8 pool;
+ struct mlxsw_cp_sb_occ occ;
+};
+
+struct mlxsw_sp_sb_pm {
+ u32 min_buff;
+ u32 max_buff;
+ struct mlxsw_cp_sb_occ occ;
+};
+
+#define MLXSW_SP_SB_POOL_COUNT 4
+#define MLXSW_SP_SB_TC_COUNT 8
+
+struct mlxsw_sp_sb_port {
+ struct mlxsw_sp_sb_cm cms[2][MLXSW_SP_SB_TC_COUNT];
+ struct mlxsw_sp_sb_pm pms[2][MLXSW_SP_SB_POOL_COUNT];
+};
+
+struct mlxsw_sp_sb {
+ struct mlxsw_sp_sb_pr prs[2][MLXSW_SP_SB_POOL_COUNT];
+ struct mlxsw_sp_sb_port *ports;
+ u32 cell_size;
+};
+
+u32 mlxsw_sp_cells_bytes(const struct mlxsw_sp *mlxsw_sp, u32 cells)
+{
+ return mlxsw_sp->sb->cell_size * cells;
+}
+
+u32 mlxsw_sp_bytes_cells(const struct mlxsw_sp *mlxsw_sp, u32 bytes)
+{
+ return DIV_ROUND_UP(bytes, mlxsw_sp->sb->cell_size);
+}
+
static struct mlxsw_sp_sb_pr *mlxsw_sp_sb_pr_get(struct mlxsw_sp *mlxsw_sp,
u8 pool,
enum mlxsw_reg_sbxx_dir dir)
{
- return &mlxsw_sp->sb.prs[dir][pool];
+ return &mlxsw_sp->sb->prs[dir][pool];
}
static struct mlxsw_sp_sb_cm *mlxsw_sp_sb_cm_get(struct mlxsw_sp *mlxsw_sp,
u8 local_port, u8 pg_buff,
enum mlxsw_reg_sbxx_dir dir)
{
- return &mlxsw_sp->sb.ports[local_port].cms[dir][pg_buff];
+ return &mlxsw_sp->sb->ports[local_port].cms[dir][pg_buff];
}
static struct mlxsw_sp_sb_pm *mlxsw_sp_sb_pm_get(struct mlxsw_sp *mlxsw_sp,
u8 local_port, u8 pool,
enum mlxsw_reg_sbxx_dir dir)
{
- return &mlxsw_sp->sb.ports[local_port].pms[dir][pool];
+ return &mlxsw_sp->sb->ports[local_port].pms[dir][pool];
}
static int mlxsw_sp_sb_pr_write(struct mlxsw_sp *mlxsw_sp, u8 pool,
@@ -215,16 +262,17 @@
{
unsigned int max_ports = mlxsw_core_max_ports(mlxsw_sp->core);
- mlxsw_sp->sb.ports = kcalloc(max_ports, sizeof(struct mlxsw_sp_sb_port),
- GFP_KERNEL);
- if (!mlxsw_sp->sb.ports)
+ mlxsw_sp->sb->ports = kcalloc(max_ports,
+ sizeof(struct mlxsw_sp_sb_port),
+ GFP_KERNEL);
+ if (!mlxsw_sp->sb->ports)
return -ENOMEM;
return 0;
}
static void mlxsw_sp_sb_ports_fini(struct mlxsw_sp *mlxsw_sp)
{
- kfree(mlxsw_sp->sb.ports);
+ kfree(mlxsw_sp->sb->ports);
}
#define MLXSW_SP_SB_PR_INGRESS_SIZE 12440000
@@ -551,15 +599,19 @@
if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, CELL_SIZE))
return -EIO;
- mlxsw_sp->sb.cell_size = MLXSW_CORE_RES_GET(mlxsw_sp->core, CELL_SIZE);
if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_BUFFER_SIZE))
return -EIO;
sb_size = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_BUFFER_SIZE);
+ mlxsw_sp->sb = kzalloc(sizeof(*mlxsw_sp->sb), GFP_KERNEL);
+ if (!mlxsw_sp->sb)
+ return -ENOMEM;
+ mlxsw_sp->sb->cell_size = MLXSW_CORE_RES_GET(mlxsw_sp->core, CELL_SIZE);
+
err = mlxsw_sp_sb_ports_init(mlxsw_sp);
if (err)
- return err;
+ goto err_sb_ports_init;
err = mlxsw_sp_sb_prs_init(mlxsw_sp);
if (err)
goto err_sb_prs_init;
@@ -584,6 +636,8 @@
err_sb_cpu_port_sb_cms_init:
err_sb_prs_init:
mlxsw_sp_sb_ports_fini(mlxsw_sp);
+err_sb_ports_init:
+ kfree(mlxsw_sp->sb);
return err;
}
@@ -591,6 +645,7 @@
{
devlink_sb_unregister(priv_to_devlink(mlxsw_sp->core), 0);
mlxsw_sp_sb_ports_fini(mlxsw_sp);
+ kfree(mlxsw_sp->sb);
}
int mlxsw_sp_port_buffers_init(struct mlxsw_sp_port *mlxsw_sp_port)
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_dpipe.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_dpipe.c
index 5f0a7bc..af2c65a 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_dpipe.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_dpipe.c
@@ -218,7 +218,7 @@
mlxsw_sp_table_erif_entries_dump(void *priv, bool counters_enabled,
struct devlink_dpipe_dump_ctx *dump_ctx)
{
- struct devlink_dpipe_value match_value = {{0}}, action_value = {{0}};
+ struct devlink_dpipe_value match_value, action_value;
struct devlink_dpipe_action action = {0};
struct devlink_dpipe_match match = {0};
struct devlink_dpipe_entry entry = {0};
@@ -227,6 +227,9 @@
int i, j;
int err;
+ memset(&match_value, 0, sizeof(match_value));
+ memset(&action_value, 0, sizeof(action_value));
+
mlxsw_sp_erif_match_action_prepare(&match, &action);
err = mlxsw_sp_erif_entry_prepare(&entry, &match_value, &match,
&action_value, &action);
@@ -242,10 +245,11 @@
return err;
j = 0;
for (; i < rif_count; i++) {
- if (!mlxsw_sp->rifs[i])
+ struct mlxsw_sp_rif *rif = mlxsw_sp_rif_by_index(mlxsw_sp, i);
+
+ if (!rif)
continue;
- err = mlxsw_sp_erif_entry_get(mlxsw_sp, &entry,
- mlxsw_sp->rifs[i],
+ err = mlxsw_sp_erif_entry_get(mlxsw_sp, &entry, rif,
counters_enabled);
if (err)
goto err_entry_get;
@@ -282,15 +286,15 @@
rtnl_lock();
for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS); i++) {
- if (!mlxsw_sp->rifs[i])
+ struct mlxsw_sp_rif *rif = mlxsw_sp_rif_by_index(mlxsw_sp, i);
+
+ if (!rif)
continue;
if (enable)
- mlxsw_sp_rif_counter_alloc(mlxsw_sp,
- mlxsw_sp->rifs[i],
+ mlxsw_sp_rif_counter_alloc(mlxsw_sp, rif,
MLXSW_SP_RIF_COUNTER_EGRESS);
else
- mlxsw_sp_rif_counter_free(mlxsw_sp,
- mlxsw_sp->rifs[i],
+ mlxsw_sp_rif_counter_free(mlxsw_sp, rif,
MLXSW_SP_RIF_COUNTER_EGRESS);
}
rtnl_unlock();
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_fid.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_fid.c
new file mode 100644
index 0000000..6afbe9e
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_fid.c
@@ -0,0 +1,992 @@
+/*
+ * drivers/net/ethernet/mellanox/mlxsw/spectrum_fid.c
+ * Copyright (c) 2017 Mellanox Technologies. All rights reserved.
+ * Copyright (c) 2017 Ido Schimmel <idosch@mellanox.com>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the names of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <linux/kernel.h>
+#include <linux/bitops.h>
+#include <linux/if_vlan.h>
+#include <linux/if_bridge.h>
+#include <linux/netdevice.h>
+#include <linux/rtnetlink.h>
+
+#include "spectrum.h"
+#include "reg.h"
+
+struct mlxsw_sp_fid_family;
+
+struct mlxsw_sp_fid_core {
+ struct mlxsw_sp_fid_family *fid_family_arr[MLXSW_SP_FID_TYPE_MAX];
+ unsigned int *port_fid_mappings;
+};
+
+struct mlxsw_sp_fid {
+ struct list_head list;
+ struct mlxsw_sp_rif *rif;
+ unsigned int ref_count;
+ u16 fid_index;
+ struct mlxsw_sp_fid_family *fid_family;
+};
+
+struct mlxsw_sp_fid_8021q {
+ struct mlxsw_sp_fid common;
+ u16 vid;
+};
+
+struct mlxsw_sp_fid_8021d {
+ struct mlxsw_sp_fid common;
+ int br_ifindex;
+};
+
+struct mlxsw_sp_flood_table {
+ enum mlxsw_sp_flood_type packet_type;
+ enum mlxsw_reg_sfgc_bridge_type bridge_type;
+ enum mlxsw_flood_table_type table_type;
+ int table_index;
+};
+
+struct mlxsw_sp_fid_ops {
+ void (*setup)(struct mlxsw_sp_fid *fid, const void *arg);
+ int (*configure)(struct mlxsw_sp_fid *fid);
+ void (*deconfigure)(struct mlxsw_sp_fid *fid);
+ int (*index_alloc)(struct mlxsw_sp_fid *fid, const void *arg,
+ u16 *p_fid_index);
+ bool (*compare)(const struct mlxsw_sp_fid *fid,
+ const void *arg);
+ u16 (*flood_index)(const struct mlxsw_sp_fid *fid);
+ int (*port_vid_map)(struct mlxsw_sp_fid *fid,
+ struct mlxsw_sp_port *port, u16 vid);
+ void (*port_vid_unmap)(struct mlxsw_sp_fid *fid,
+ struct mlxsw_sp_port *port, u16 vid);
+};
+
+struct mlxsw_sp_fid_family {
+ enum mlxsw_sp_fid_type type;
+ size_t fid_size;
+ u16 start_index;
+ u16 end_index;
+ struct list_head fids_list;
+ unsigned long *fids_bitmap;
+ const struct mlxsw_sp_flood_table *flood_tables;
+ int nr_flood_tables;
+ enum mlxsw_sp_rif_type rif_type;
+ const struct mlxsw_sp_fid_ops *ops;
+ struct mlxsw_sp *mlxsw_sp;
+};
+
+static const int mlxsw_sp_sfgc_uc_packet_types[MLXSW_REG_SFGC_TYPE_MAX] = {
+ [MLXSW_REG_SFGC_TYPE_UNKNOWN_UNICAST] = 1,
+};
+
+static const int mlxsw_sp_sfgc_bc_packet_types[MLXSW_REG_SFGC_TYPE_MAX] = {
+ [MLXSW_REG_SFGC_TYPE_BROADCAST] = 1,
+ [MLXSW_REG_SFGC_TYPE_UNREGISTERED_MULTICAST_IPV6] = 1,
+ [MLXSW_REG_SFGC_TYPE_UNREGISTERED_MULTICAST_NON_IP] = 1,
+ [MLXSW_REG_SFGC_TYPE_IPV4_LINK_LOCAL] = 1,
+ [MLXSW_REG_SFGC_TYPE_IPV6_ALL_HOST] = 1,
+};
+
+static const int mlxsw_sp_sfgc_mc_packet_types[MLXSW_REG_SFGC_TYPE_MAX] = {
+ [MLXSW_REG_SFGC_TYPE_UNREGISTERED_MULTICAST_IPV4] = 1,
+};
+
+static const int *mlxsw_sp_packet_type_sfgc_types[] = {
+ [MLXSW_SP_FLOOD_TYPE_UC] = mlxsw_sp_sfgc_uc_packet_types,
+ [MLXSW_SP_FLOOD_TYPE_BC] = mlxsw_sp_sfgc_bc_packet_types,
+ [MLXSW_SP_FLOOD_TYPE_MC] = mlxsw_sp_sfgc_mc_packet_types,
+};
+
+static const struct mlxsw_sp_flood_table *
+mlxsw_sp_fid_flood_table_lookup(const struct mlxsw_sp_fid *fid,
+ enum mlxsw_sp_flood_type packet_type)
+{
+ struct mlxsw_sp_fid_family *fid_family = fid->fid_family;
+ int i;
+
+ for (i = 0; i < fid_family->nr_flood_tables; i++) {
+ if (fid_family->flood_tables[i].packet_type != packet_type)
+ continue;
+ return &fid_family->flood_tables[i];
+ }
+
+ return NULL;
+}
+
+int mlxsw_sp_fid_flood_set(struct mlxsw_sp_fid *fid,
+ enum mlxsw_sp_flood_type packet_type, u8 local_port,
+ bool member)
+{
+ struct mlxsw_sp_fid_family *fid_family = fid->fid_family;
+ const struct mlxsw_sp_fid_ops *ops = fid_family->ops;
+ const struct mlxsw_sp_flood_table *flood_table;
+ char *sftr_pl;
+ int err;
+
+ if (WARN_ON(!fid_family->flood_tables || !ops->flood_index))
+ return -EINVAL;
+
+ flood_table = mlxsw_sp_fid_flood_table_lookup(fid, packet_type);
+ if (!flood_table)
+ return -ESRCH;
+
+ sftr_pl = kmalloc(MLXSW_REG_SFTR_LEN, GFP_KERNEL);
+ if (!sftr_pl)
+ return -ENOMEM;
+
+ mlxsw_reg_sftr_pack(sftr_pl, flood_table->table_index,
+ ops->flood_index(fid), flood_table->table_type, 1,
+ local_port, member);
+ err = mlxsw_reg_write(fid_family->mlxsw_sp->core, MLXSW_REG(sftr),
+ sftr_pl);
+ kfree(sftr_pl);
+ return err;
+}
+
+int mlxsw_sp_fid_port_vid_map(struct mlxsw_sp_fid *fid,
+ struct mlxsw_sp_port *mlxsw_sp_port, u16 vid)
+{
+ if (WARN_ON(!fid->fid_family->ops->port_vid_map))
+ return -EINVAL;
+ return fid->fid_family->ops->port_vid_map(fid, mlxsw_sp_port, vid);
+}
+
+void mlxsw_sp_fid_port_vid_unmap(struct mlxsw_sp_fid *fid,
+ struct mlxsw_sp_port *mlxsw_sp_port, u16 vid)
+{
+ fid->fid_family->ops->port_vid_unmap(fid, mlxsw_sp_port, vid);
+}
+
+enum mlxsw_sp_rif_type mlxsw_sp_fid_rif_type(const struct mlxsw_sp_fid *fid)
+{
+ return fid->fid_family->rif_type;
+}
+
+u16 mlxsw_sp_fid_index(const struct mlxsw_sp_fid *fid)
+{
+ return fid->fid_index;
+}
+
+enum mlxsw_sp_fid_type mlxsw_sp_fid_type(const struct mlxsw_sp_fid *fid)
+{
+ return fid->fid_family->type;
+}
+
+void mlxsw_sp_fid_rif_set(struct mlxsw_sp_fid *fid, struct mlxsw_sp_rif *rif)
+{
+ fid->rif = rif;
+}
+
+enum mlxsw_sp_rif_type
+mlxsw_sp_fid_type_rif_type(const struct mlxsw_sp *mlxsw_sp,
+ enum mlxsw_sp_fid_type type)
+{
+ struct mlxsw_sp_fid_core *fid_core = mlxsw_sp->fid_core;
+
+ return fid_core->fid_family_arr[type]->rif_type;
+}
+
+static struct mlxsw_sp_fid_8021q *
+mlxsw_sp_fid_8021q_fid(const struct mlxsw_sp_fid *fid)
+{
+ return container_of(fid, struct mlxsw_sp_fid_8021q, common);
+}
+
+u16 mlxsw_sp_fid_8021q_vid(const struct mlxsw_sp_fid *fid)
+{
+ return mlxsw_sp_fid_8021q_fid(fid)->vid;
+}
+
+static void mlxsw_sp_fid_8021q_setup(struct mlxsw_sp_fid *fid, const void *arg)
+{
+ u16 vid = *(u16 *) arg;
+
+ mlxsw_sp_fid_8021q_fid(fid)->vid = vid;
+}
+
+static enum mlxsw_reg_sfmr_op mlxsw_sp_sfmr_op(bool valid)
+{
+ return valid ? MLXSW_REG_SFMR_OP_CREATE_FID :
+ MLXSW_REG_SFMR_OP_DESTROY_FID;
+}
+
+static int mlxsw_sp_fid_op(struct mlxsw_sp *mlxsw_sp, u16 fid_index,
+ u16 fid_offset, bool valid)
+{
+ char sfmr_pl[MLXSW_REG_SFMR_LEN];
+
+ mlxsw_reg_sfmr_pack(sfmr_pl, mlxsw_sp_sfmr_op(valid), fid_index,
+ fid_offset);
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfmr), sfmr_pl);
+}
+
+static int mlxsw_sp_fid_vid_map(struct mlxsw_sp *mlxsw_sp, u16 fid_index,
+ u16 vid, bool valid)
+{
+ enum mlxsw_reg_svfa_mt mt = MLXSW_REG_SVFA_MT_VID_TO_FID;
+ char svfa_pl[MLXSW_REG_SVFA_LEN];
+
+ mlxsw_reg_svfa_pack(svfa_pl, 0, mt, valid, fid_index, vid);
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(svfa), svfa_pl);
+}
+
+static int __mlxsw_sp_fid_port_vid_map(struct mlxsw_sp *mlxsw_sp, u16 fid_index,
+ u8 local_port, u16 vid, bool valid)
+{
+ enum mlxsw_reg_svfa_mt mt = MLXSW_REG_SVFA_MT_PORT_VID_TO_FID;
+ char svfa_pl[MLXSW_REG_SVFA_LEN];
+
+ mlxsw_reg_svfa_pack(svfa_pl, local_port, mt, valid, fid_index, vid);
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(svfa), svfa_pl);
+}
+
+static int mlxsw_sp_fid_8021q_configure(struct mlxsw_sp_fid *fid)
+{
+ struct mlxsw_sp *mlxsw_sp = fid->fid_family->mlxsw_sp;
+ struct mlxsw_sp_fid_8021q *fid_8021q;
+ int err;
+
+ err = mlxsw_sp_fid_op(mlxsw_sp, fid->fid_index, fid->fid_index, true);
+ if (err)
+ return err;
+
+ fid_8021q = mlxsw_sp_fid_8021q_fid(fid);
+ err = mlxsw_sp_fid_vid_map(mlxsw_sp, fid->fid_index, fid_8021q->vid,
+ true);
+ if (err)
+ goto err_fid_map;
+
+ return 0;
+
+err_fid_map:
+ mlxsw_sp_fid_op(mlxsw_sp, fid->fid_index, 0, false);
+ return err;
+}
+
+static void mlxsw_sp_fid_8021q_deconfigure(struct mlxsw_sp_fid *fid)
+{
+ struct mlxsw_sp *mlxsw_sp = fid->fid_family->mlxsw_sp;
+ struct mlxsw_sp_fid_8021q *fid_8021q;
+
+ fid_8021q = mlxsw_sp_fid_8021q_fid(fid);
+ mlxsw_sp_fid_vid_map(mlxsw_sp, fid->fid_index, fid_8021q->vid, false);
+ mlxsw_sp_fid_op(mlxsw_sp, fid->fid_index, 0, false);
+}
+
+static int mlxsw_sp_fid_8021q_index_alloc(struct mlxsw_sp_fid *fid,
+ const void *arg, u16 *p_fid_index)
+{
+ struct mlxsw_sp_fid_family *fid_family = fid->fid_family;
+ u16 vid = *(u16 *) arg;
+
+ /* Use 1:1 mapping for simplicity although not a must */
+ if (vid < fid_family->start_index || vid > fid_family->end_index)
+ return -EINVAL;
+ *p_fid_index = vid;
+
+ return 0;
+}
+
+static bool
+mlxsw_sp_fid_8021q_compare(const struct mlxsw_sp_fid *fid, const void *arg)
+{
+ u16 vid = *(u16 *) arg;
+
+ return mlxsw_sp_fid_8021q_fid(fid)->vid == vid;
+}
+
+static u16 mlxsw_sp_fid_8021q_flood_index(const struct mlxsw_sp_fid *fid)
+{
+ return fid->fid_index;
+}
+
+static int mlxsw_sp_fid_8021q_port_vid_map(struct mlxsw_sp_fid *fid,
+ struct mlxsw_sp_port *mlxsw_sp_port,
+ u16 vid)
+{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ u8 local_port = mlxsw_sp_port->local_port;
+
+ /* In case there are no {Port, VID} => FID mappings on the port,
+ * we can use the global VID => FID mapping we created when the
+ * FID was configured.
+ */
+ if (mlxsw_sp->fid_core->port_fid_mappings[local_port] == 0)
+ return 0;
+ return __mlxsw_sp_fid_port_vid_map(mlxsw_sp, fid->fid_index, local_port,
+ vid, true);
+}
+
+static void
+mlxsw_sp_fid_8021q_port_vid_unmap(struct mlxsw_sp_fid *fid,
+ struct mlxsw_sp_port *mlxsw_sp_port, u16 vid)
+{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ u8 local_port = mlxsw_sp_port->local_port;
+
+ if (mlxsw_sp->fid_core->port_fid_mappings[local_port] == 0)
+ return;
+ __mlxsw_sp_fid_port_vid_map(mlxsw_sp, fid->fid_index, local_port, vid,
+ false);
+}
+
+static const struct mlxsw_sp_fid_ops mlxsw_sp_fid_8021q_ops = {
+ .setup = mlxsw_sp_fid_8021q_setup,
+ .configure = mlxsw_sp_fid_8021q_configure,
+ .deconfigure = mlxsw_sp_fid_8021q_deconfigure,
+ .index_alloc = mlxsw_sp_fid_8021q_index_alloc,
+ .compare = mlxsw_sp_fid_8021q_compare,
+ .flood_index = mlxsw_sp_fid_8021q_flood_index,
+ .port_vid_map = mlxsw_sp_fid_8021q_port_vid_map,
+ .port_vid_unmap = mlxsw_sp_fid_8021q_port_vid_unmap,
+};
+
+static const struct mlxsw_sp_flood_table mlxsw_sp_fid_8021q_flood_tables[] = {
+ {
+ .packet_type = MLXSW_SP_FLOOD_TYPE_UC,
+ .bridge_type = MLXSW_REG_SFGC_BRIDGE_TYPE_1Q_FID,
+ .table_type = MLXSW_REG_SFGC_TABLE_TYPE_FID_OFFSET,
+ .table_index = 0,
+ },
+ {
+ .packet_type = MLXSW_SP_FLOOD_TYPE_MC,
+ .bridge_type = MLXSW_REG_SFGC_BRIDGE_TYPE_1Q_FID,
+ .table_type = MLXSW_REG_SFGC_TABLE_TYPE_FID_OFFSET,
+ .table_index = 1,
+ },
+ {
+ .packet_type = MLXSW_SP_FLOOD_TYPE_BC,
+ .bridge_type = MLXSW_REG_SFGC_BRIDGE_TYPE_1Q_FID,
+ .table_type = MLXSW_REG_SFGC_TABLE_TYPE_FID_OFFSET,
+ .table_index = 2,
+ },
+};
+
+/* Range and flood configuration must match mlxsw_config_profile */
+static const struct mlxsw_sp_fid_family mlxsw_sp_fid_8021q_family = {
+ .type = MLXSW_SP_FID_TYPE_8021Q,
+ .fid_size = sizeof(struct mlxsw_sp_fid_8021q),
+ .start_index = 1,
+ .end_index = VLAN_VID_MASK,
+ .flood_tables = mlxsw_sp_fid_8021q_flood_tables,
+ .nr_flood_tables = ARRAY_SIZE(mlxsw_sp_fid_8021q_flood_tables),
+ .rif_type = MLXSW_SP_RIF_TYPE_VLAN,
+ .ops = &mlxsw_sp_fid_8021q_ops,
+};
+
+static struct mlxsw_sp_fid_8021d *
+mlxsw_sp_fid_8021d_fid(const struct mlxsw_sp_fid *fid)
+{
+ return container_of(fid, struct mlxsw_sp_fid_8021d, common);
+}
+
+static void mlxsw_sp_fid_8021d_setup(struct mlxsw_sp_fid *fid, const void *arg)
+{
+ int br_ifindex = *(int *) arg;
+
+ mlxsw_sp_fid_8021d_fid(fid)->br_ifindex = br_ifindex;
+}
+
+static int mlxsw_sp_fid_8021d_configure(struct mlxsw_sp_fid *fid)
+{
+ struct mlxsw_sp_fid_family *fid_family = fid->fid_family;
+
+ return mlxsw_sp_fid_op(fid_family->mlxsw_sp, fid->fid_index, 0, true);
+}
+
+static void mlxsw_sp_fid_8021d_deconfigure(struct mlxsw_sp_fid *fid)
+{
+ mlxsw_sp_fid_op(fid->fid_family->mlxsw_sp, fid->fid_index, 0, false);
+}
+
+static int mlxsw_sp_fid_8021d_index_alloc(struct mlxsw_sp_fid *fid,
+ const void *arg, u16 *p_fid_index)
+{
+ struct mlxsw_sp_fid_family *fid_family = fid->fid_family;
+ u16 nr_fids, fid_index;
+
+ nr_fids = fid_family->end_index - fid_family->start_index + 1;
+ fid_index = find_first_zero_bit(fid_family->fids_bitmap, nr_fids);
+ if (fid_index == nr_fids)
+ return -ENOBUFS;
+ *p_fid_index = fid_family->start_index + fid_index;
+
+ return 0;
+}
+
+static bool
+mlxsw_sp_fid_8021d_compare(const struct mlxsw_sp_fid *fid, const void *arg)
+{
+ int br_ifindex = *(int *) arg;
+
+ return mlxsw_sp_fid_8021d_fid(fid)->br_ifindex == br_ifindex;
+}
+
+static u16 mlxsw_sp_fid_8021d_flood_index(const struct mlxsw_sp_fid *fid)
+{
+ return fid->fid_index - fid->fid_family->start_index;
+}
+
+static int mlxsw_sp_port_vp_mode_trans(struct mlxsw_sp_port *mlxsw_sp_port)
+{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
+ int err;
+
+ list_for_each_entry(mlxsw_sp_port_vlan, &mlxsw_sp_port->vlans_list,
+ list) {
+ struct mlxsw_sp_fid *fid = mlxsw_sp_port_vlan->fid;
+ u16 vid = mlxsw_sp_port_vlan->vid;
+
+ if (!fid)
+ continue;
+
+ err = __mlxsw_sp_fid_port_vid_map(mlxsw_sp, fid->fid_index,
+ mlxsw_sp_port->local_port,
+ vid, true);
+ if (err)
+ goto err_fid_port_vid_map;
+ }
+
+ err = mlxsw_sp_port_vp_mode_set(mlxsw_sp_port, true);
+ if (err)
+ goto err_port_vp_mode_set;
+
+ return 0;
+
+err_port_vp_mode_set:
+err_fid_port_vid_map:
+ list_for_each_entry_continue_reverse(mlxsw_sp_port_vlan,
+ &mlxsw_sp_port->vlans_list, list) {
+ struct mlxsw_sp_fid *fid = mlxsw_sp_port_vlan->fid;
+ u16 vid = mlxsw_sp_port_vlan->vid;
+
+ if (!fid)
+ continue;
+
+ __mlxsw_sp_fid_port_vid_map(mlxsw_sp, fid->fid_index,
+ mlxsw_sp_port->local_port, vid,
+ false);
+ }
+ return err;
+}
+
+static void mlxsw_sp_port_vlan_mode_trans(struct mlxsw_sp_port *mlxsw_sp_port)
+{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
+
+ mlxsw_sp_port_vp_mode_set(mlxsw_sp_port, false);
+
+ list_for_each_entry_reverse(mlxsw_sp_port_vlan,
+ &mlxsw_sp_port->vlans_list, list) {
+ struct mlxsw_sp_fid *fid = mlxsw_sp_port_vlan->fid;
+ u16 vid = mlxsw_sp_port_vlan->vid;
+
+ if (!fid)
+ continue;
+
+ __mlxsw_sp_fid_port_vid_map(mlxsw_sp, fid->fid_index,
+ mlxsw_sp_port->local_port, vid,
+ false);
+ }
+}
+
+static int mlxsw_sp_fid_8021d_port_vid_map(struct mlxsw_sp_fid *fid,
+ struct mlxsw_sp_port *mlxsw_sp_port,
+ u16 vid)
+{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ u8 local_port = mlxsw_sp_port->local_port;
+ int err;
+
+ err = __mlxsw_sp_fid_port_vid_map(mlxsw_sp, fid->fid_index,
+ mlxsw_sp_port->local_port, vid, true);
+ if (err)
+ return err;
+
+ if (mlxsw_sp->fid_core->port_fid_mappings[local_port]++ == 0) {
+ err = mlxsw_sp_port_vp_mode_trans(mlxsw_sp_port);
+ if (err)
+ goto err_port_vp_mode_trans;
+ }
+
+ return 0;
+
+err_port_vp_mode_trans:
+ mlxsw_sp->fid_core->port_fid_mappings[local_port]--;
+ __mlxsw_sp_fid_port_vid_map(mlxsw_sp, fid->fid_index,
+ mlxsw_sp_port->local_port, vid, false);
+ return err;
+}
+
+static void
+mlxsw_sp_fid_8021d_port_vid_unmap(struct mlxsw_sp_fid *fid,
+ struct mlxsw_sp_port *mlxsw_sp_port, u16 vid)
+{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ u8 local_port = mlxsw_sp_port->local_port;
+
+ if (mlxsw_sp->fid_core->port_fid_mappings[local_port] == 1)
+ mlxsw_sp_port_vlan_mode_trans(mlxsw_sp_port);
+ mlxsw_sp->fid_core->port_fid_mappings[local_port]--;
+ __mlxsw_sp_fid_port_vid_map(mlxsw_sp, fid->fid_index,
+ mlxsw_sp_port->local_port, vid, false);
+}
+
+static const struct mlxsw_sp_fid_ops mlxsw_sp_fid_8021d_ops = {
+ .setup = mlxsw_sp_fid_8021d_setup,
+ .configure = mlxsw_sp_fid_8021d_configure,
+ .deconfigure = mlxsw_sp_fid_8021d_deconfigure,
+ .index_alloc = mlxsw_sp_fid_8021d_index_alloc,
+ .compare = mlxsw_sp_fid_8021d_compare,
+ .flood_index = mlxsw_sp_fid_8021d_flood_index,
+ .port_vid_map = mlxsw_sp_fid_8021d_port_vid_map,
+ .port_vid_unmap = mlxsw_sp_fid_8021d_port_vid_unmap,
+};
+
+static const struct mlxsw_sp_flood_table mlxsw_sp_fid_8021d_flood_tables[] = {
+ {
+ .packet_type = MLXSW_SP_FLOOD_TYPE_UC,
+ .bridge_type = MLXSW_REG_SFGC_BRIDGE_TYPE_VFID,
+ .table_type = MLXSW_REG_SFGC_TABLE_TYPE_FID,
+ .table_index = 0,
+ },
+ {
+ .packet_type = MLXSW_SP_FLOOD_TYPE_MC,
+ .bridge_type = MLXSW_REG_SFGC_BRIDGE_TYPE_VFID,
+ .table_type = MLXSW_REG_SFGC_TABLE_TYPE_FID,
+ .table_index = 1,
+ },
+ {
+ .packet_type = MLXSW_SP_FLOOD_TYPE_BC,
+ .bridge_type = MLXSW_REG_SFGC_BRIDGE_TYPE_VFID,
+ .table_type = MLXSW_REG_SFGC_TABLE_TYPE_FID,
+ .table_index = 2,
+ },
+};
+
+/* Range and flood configuration must match mlxsw_config_profile */
+static const struct mlxsw_sp_fid_family mlxsw_sp_fid_8021d_family = {
+ .type = MLXSW_SP_FID_TYPE_8021D,
+ .fid_size = sizeof(struct mlxsw_sp_fid_8021d),
+ .start_index = VLAN_N_VID,
+ .end_index = VLAN_N_VID + MLXSW_SP_FID_8021D_MAX - 1,
+ .flood_tables = mlxsw_sp_fid_8021d_flood_tables,
+ .nr_flood_tables = ARRAY_SIZE(mlxsw_sp_fid_8021d_flood_tables),
+ .rif_type = MLXSW_SP_RIF_TYPE_FID,
+ .ops = &mlxsw_sp_fid_8021d_ops,
+};
+
+static int mlxsw_sp_fid_rfid_configure(struct mlxsw_sp_fid *fid)
+{
+ /* rFIDs are allocated by the device during init */
+ return 0;
+}
+
+static void mlxsw_sp_fid_rfid_deconfigure(struct mlxsw_sp_fid *fid)
+{
+}
+
+static int mlxsw_sp_fid_rfid_index_alloc(struct mlxsw_sp_fid *fid,
+ const void *arg, u16 *p_fid_index)
+{
+ u16 rif_index = *(u16 *) arg;
+
+ *p_fid_index = fid->fid_family->start_index + rif_index;
+
+ return 0;
+}
+
+static bool mlxsw_sp_fid_rfid_compare(const struct mlxsw_sp_fid *fid,
+ const void *arg)
+{
+ u16 rif_index = *(u16 *) arg;
+
+ return fid->fid_index == rif_index + fid->fid_family->start_index;
+}
+
+static int mlxsw_sp_fid_rfid_port_vid_map(struct mlxsw_sp_fid *fid,
+ struct mlxsw_sp_port *mlxsw_sp_port,
+ u16 vid)
+{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ u8 local_port = mlxsw_sp_port->local_port;
+ int err;
+
+ /* We only need to transition the port to virtual mode since
+ * {Port, VID} => FID is done by the firmware upon RIF creation.
+ */
+ if (mlxsw_sp->fid_core->port_fid_mappings[local_port]++ == 0) {
+ err = mlxsw_sp_port_vp_mode_trans(mlxsw_sp_port);
+ if (err)
+ goto err_port_vp_mode_trans;
+ }
+
+ return 0;
+
+err_port_vp_mode_trans:
+ mlxsw_sp->fid_core->port_fid_mappings[local_port]--;
+ return err;
+}
+
+static void
+mlxsw_sp_fid_rfid_port_vid_unmap(struct mlxsw_sp_fid *fid,
+ struct mlxsw_sp_port *mlxsw_sp_port, u16 vid)
+{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ u8 local_port = mlxsw_sp_port->local_port;
+
+ if (mlxsw_sp->fid_core->port_fid_mappings[local_port] == 1)
+ mlxsw_sp_port_vlan_mode_trans(mlxsw_sp_port);
+ mlxsw_sp->fid_core->port_fid_mappings[local_port]--;
+}
+
+static const struct mlxsw_sp_fid_ops mlxsw_sp_fid_rfid_ops = {
+ .configure = mlxsw_sp_fid_rfid_configure,
+ .deconfigure = mlxsw_sp_fid_rfid_deconfigure,
+ .index_alloc = mlxsw_sp_fid_rfid_index_alloc,
+ .compare = mlxsw_sp_fid_rfid_compare,
+ .port_vid_map = mlxsw_sp_fid_rfid_port_vid_map,
+ .port_vid_unmap = mlxsw_sp_fid_rfid_port_vid_unmap,
+};
+
+#define MLXSW_SP_RFID_BASE (15 * 1024)
+#define MLXSW_SP_RFID_MAX 1024
+
+static const struct mlxsw_sp_fid_family mlxsw_sp_fid_rfid_family = {
+ .type = MLXSW_SP_FID_TYPE_RFID,
+ .fid_size = sizeof(struct mlxsw_sp_fid),
+ .start_index = MLXSW_SP_RFID_BASE,
+ .end_index = MLXSW_SP_RFID_BASE + MLXSW_SP_RFID_MAX - 1,
+ .rif_type = MLXSW_SP_RIF_TYPE_SUBPORT,
+ .ops = &mlxsw_sp_fid_rfid_ops,
+};
+
+static int mlxsw_sp_fid_dummy_configure(struct mlxsw_sp_fid *fid)
+{
+ struct mlxsw_sp *mlxsw_sp = fid->fid_family->mlxsw_sp;
+
+ return mlxsw_sp_fid_op(mlxsw_sp, fid->fid_index, 0, true);
+}
+
+static void mlxsw_sp_fid_dummy_deconfigure(struct mlxsw_sp_fid *fid)
+{
+ mlxsw_sp_fid_op(fid->fid_family->mlxsw_sp, fid->fid_index, 0, false);
+}
+
+static int mlxsw_sp_fid_dummy_index_alloc(struct mlxsw_sp_fid *fid,
+ const void *arg, u16 *p_fid_index)
+{
+ *p_fid_index = fid->fid_family->start_index;
+
+ return 0;
+}
+
+static bool mlxsw_sp_fid_dummy_compare(const struct mlxsw_sp_fid *fid,
+ const void *arg)
+{
+ return true;
+}
+
+static const struct mlxsw_sp_fid_ops mlxsw_sp_fid_dummy_ops = {
+ .configure = mlxsw_sp_fid_dummy_configure,
+ .deconfigure = mlxsw_sp_fid_dummy_deconfigure,
+ .index_alloc = mlxsw_sp_fid_dummy_index_alloc,
+ .compare = mlxsw_sp_fid_dummy_compare,
+};
+
+static const struct mlxsw_sp_fid_family mlxsw_sp_fid_dummy_family = {
+ .type = MLXSW_SP_FID_TYPE_DUMMY,
+ .fid_size = sizeof(struct mlxsw_sp_fid),
+ .start_index = MLXSW_SP_RFID_BASE - 1,
+ .end_index = MLXSW_SP_RFID_BASE - 1,
+ .ops = &mlxsw_sp_fid_dummy_ops,
+};
+
+static const struct mlxsw_sp_fid_family *mlxsw_sp_fid_family_arr[] = {
+ [MLXSW_SP_FID_TYPE_8021Q] = &mlxsw_sp_fid_8021q_family,
+ [MLXSW_SP_FID_TYPE_8021D] = &mlxsw_sp_fid_8021d_family,
+ [MLXSW_SP_FID_TYPE_RFID] = &mlxsw_sp_fid_rfid_family,
+ [MLXSW_SP_FID_TYPE_DUMMY] = &mlxsw_sp_fid_dummy_family,
+};
+
+static struct mlxsw_sp_fid *mlxsw_sp_fid_get(struct mlxsw_sp *mlxsw_sp,
+ enum mlxsw_sp_fid_type type,
+ const void *arg)
+{
+ struct mlxsw_sp_fid_family *fid_family;
+ struct mlxsw_sp_fid *fid;
+ u16 fid_index;
+ int err;
+
+ fid_family = mlxsw_sp->fid_core->fid_family_arr[type];
+ list_for_each_entry(fid, &fid_family->fids_list, list) {
+ if (!fid->fid_family->ops->compare(fid, arg))
+ continue;
+ fid->ref_count++;
+ return fid;
+ }
+
+ fid = kzalloc(fid_family->fid_size, GFP_KERNEL);
+ if (!fid)
+ return ERR_PTR(-ENOMEM);
+ fid->fid_family = fid_family;
+
+ err = fid->fid_family->ops->index_alloc(fid, arg, &fid_index);
+ if (err)
+ goto err_index_alloc;
+ fid->fid_index = fid_index;
+ __set_bit(fid_index - fid_family->start_index, fid_family->fids_bitmap);
+
+ if (fid->fid_family->ops->setup)
+ fid->fid_family->ops->setup(fid, arg);
+
+ err = fid->fid_family->ops->configure(fid);
+ if (err)
+ goto err_configure;
+
+ list_add(&fid->list, &fid_family->fids_list);
+ fid->ref_count++;
+ return fid;
+
+err_configure:
+ __clear_bit(fid_index - fid_family->start_index,
+ fid_family->fids_bitmap);
+err_index_alloc:
+ kfree(fid);
+ return ERR_PTR(err);
+}
+
+void mlxsw_sp_fid_put(struct mlxsw_sp_fid *fid)
+{
+ struct mlxsw_sp_fid_family *fid_family = fid->fid_family;
+
+ if (--fid->ref_count == 1 && fid->rif) {
+ /* Destroy the associated RIF and let it drop the last
+ * reference on the FID.
+ */
+ return mlxsw_sp_rif_destroy(fid->rif);
+ } else if (fid->ref_count == 0) {
+ list_del(&fid->list);
+ fid->fid_family->ops->deconfigure(fid);
+ __clear_bit(fid->fid_index - fid_family->start_index,
+ fid_family->fids_bitmap);
+ kfree(fid);
+ }
+}
+
+struct mlxsw_sp_fid *mlxsw_sp_fid_8021q_get(struct mlxsw_sp *mlxsw_sp, u16 vid)
+{
+ return mlxsw_sp_fid_get(mlxsw_sp, MLXSW_SP_FID_TYPE_8021Q, &vid);
+}
+
+struct mlxsw_sp_fid *mlxsw_sp_fid_8021d_get(struct mlxsw_sp *mlxsw_sp,
+ int br_ifindex)
+{
+ return mlxsw_sp_fid_get(mlxsw_sp, MLXSW_SP_FID_TYPE_8021D, &br_ifindex);
+}
+
+struct mlxsw_sp_fid *mlxsw_sp_fid_rfid_get(struct mlxsw_sp *mlxsw_sp,
+ u16 rif_index)
+{
+ return mlxsw_sp_fid_get(mlxsw_sp, MLXSW_SP_FID_TYPE_RFID, &rif_index);
+}
+
+struct mlxsw_sp_fid *mlxsw_sp_fid_dummy_get(struct mlxsw_sp *mlxsw_sp)
+{
+ return mlxsw_sp_fid_get(mlxsw_sp, MLXSW_SP_FID_TYPE_DUMMY, NULL);
+}
+
+static int
+mlxsw_sp_fid_flood_table_init(struct mlxsw_sp_fid_family *fid_family,
+ const struct mlxsw_sp_flood_table *flood_table)
+{
+ enum mlxsw_sp_flood_type packet_type = flood_table->packet_type;
+ const int *sfgc_packet_types;
+ int i;
+
+ sfgc_packet_types = mlxsw_sp_packet_type_sfgc_types[packet_type];
+ for (i = 0; i < MLXSW_REG_SFGC_TYPE_MAX; i++) {
+ struct mlxsw_sp *mlxsw_sp = fid_family->mlxsw_sp;
+ char sfgc_pl[MLXSW_REG_SFGC_LEN];
+ int err;
+
+ if (!sfgc_packet_types[i])
+ continue;
+ mlxsw_reg_sfgc_pack(sfgc_pl, i, flood_table->bridge_type,
+ flood_table->table_type,
+ flood_table->table_index);
+ err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfgc), sfgc_pl);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+static int
+mlxsw_sp_fid_flood_tables_init(struct mlxsw_sp_fid_family *fid_family)
+{
+ int i;
+
+ for (i = 0; i < fid_family->nr_flood_tables; i++) {
+ const struct mlxsw_sp_flood_table *flood_table;
+ int err;
+
+ flood_table = &fid_family->flood_tables[i];
+ err = mlxsw_sp_fid_flood_table_init(fid_family, flood_table);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+static int mlxsw_sp_fid_family_register(struct mlxsw_sp *mlxsw_sp,
+ const struct mlxsw_sp_fid_family *tmpl)
+{
+ u16 nr_fids = tmpl->end_index - tmpl->start_index + 1;
+ struct mlxsw_sp_fid_family *fid_family;
+ int err;
+
+ fid_family = kmemdup(tmpl, sizeof(*fid_family), GFP_KERNEL);
+ if (!fid_family)
+ return -ENOMEM;
+
+ fid_family->mlxsw_sp = mlxsw_sp;
+ INIT_LIST_HEAD(&fid_family->fids_list);
+ fid_family->fids_bitmap = kcalloc(BITS_TO_LONGS(nr_fids),
+ sizeof(unsigned long), GFP_KERNEL);
+ if (!fid_family->fids_bitmap) {
+ err = -ENOMEM;
+ goto err_alloc_fids_bitmap;
+ }
+
+ if (fid_family->flood_tables) {
+ err = mlxsw_sp_fid_flood_tables_init(fid_family);
+ if (err)
+ goto err_fid_flood_tables_init;
+ }
+
+ mlxsw_sp->fid_core->fid_family_arr[tmpl->type] = fid_family;
+
+ return 0;
+
+err_fid_flood_tables_init:
+ kfree(fid_family->fids_bitmap);
+err_alloc_fids_bitmap:
+ kfree(fid_family);
+ return err;
+}
+
+static void
+mlxsw_sp_fid_family_unregister(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_fid_family *fid_family)
+{
+ mlxsw_sp->fid_core->fid_family_arr[fid_family->type] = NULL;
+ kfree(fid_family->fids_bitmap);
+ WARN_ON_ONCE(!list_empty(&fid_family->fids_list));
+ kfree(fid_family);
+}
+
+int mlxsw_sp_port_fids_init(struct mlxsw_sp_port *mlxsw_sp_port)
+{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+
+ /* Track number of FIDs configured on the port with mapping type
+ * PORT_VID_TO_FID, so that we know when to transition the port
+ * back to non-virtual (VLAN) mode.
+ */
+ mlxsw_sp->fid_core->port_fid_mappings[mlxsw_sp_port->local_port] = 0;
+
+ return mlxsw_sp_port_vp_mode_set(mlxsw_sp_port, false);
+}
+
+void mlxsw_sp_port_fids_fini(struct mlxsw_sp_port *mlxsw_sp_port)
+{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+
+ mlxsw_sp->fid_core->port_fid_mappings[mlxsw_sp_port->local_port] = 0;
+}
+
+int mlxsw_sp_fids_init(struct mlxsw_sp *mlxsw_sp)
+{
+ unsigned int max_ports = mlxsw_core_max_ports(mlxsw_sp->core);
+ struct mlxsw_sp_fid_core *fid_core;
+ int err, i;
+
+ fid_core = kzalloc(sizeof(*mlxsw_sp->fid_core), GFP_KERNEL);
+ if (!fid_core)
+ return -ENOMEM;
+ mlxsw_sp->fid_core = fid_core;
+
+ fid_core->port_fid_mappings = kcalloc(max_ports, sizeof(unsigned int),
+ GFP_KERNEL);
+ if (!fid_core->port_fid_mappings) {
+ err = -ENOMEM;
+ goto err_alloc_port_fid_mappings;
+ }
+
+ for (i = 0; i < MLXSW_SP_FID_TYPE_MAX; i++) {
+ err = mlxsw_sp_fid_family_register(mlxsw_sp,
+ mlxsw_sp_fid_family_arr[i]);
+
+ if (err)
+ goto err_fid_ops_register;
+ }
+
+ return 0;
+
+err_fid_ops_register:
+ for (i--; i >= 0; i--) {
+ struct mlxsw_sp_fid_family *fid_family;
+
+ fid_family = fid_core->fid_family_arr[i];
+ mlxsw_sp_fid_family_unregister(mlxsw_sp, fid_family);
+ }
+ kfree(fid_core->port_fid_mappings);
+err_alloc_port_fid_mappings:
+ kfree(fid_core);
+ return err;
+}
+
+void mlxsw_sp_fids_fini(struct mlxsw_sp *mlxsw_sp)
+{
+ struct mlxsw_sp_fid_core *fid_core = mlxsw_sp->fid_core;
+ int i;
+
+ for (i = 0; i < MLXSW_SP_FID_TYPE_MAX; i++)
+ mlxsw_sp_fid_family_unregister(mlxsw_sp,
+ fid_core->fid_family_arr[i]);
+ kfree(fid_core->port_fid_mappings);
+ kfree(fid_core);
+}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c
index 7d87e23..21bb2bf 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c
@@ -67,12 +67,20 @@
err = mlxsw_sp_acl_rulei_act_drop(rulei);
if (err)
return err;
+ } else if (is_tcf_gact_trap(a)) {
+ err = mlxsw_sp_acl_rulei_act_trap(rulei);
+ if (err)
+ return err;
} else if (is_tcf_mirred_egress_redirect(a)) {
int ifindex = tcf_mirred_ifindex(a);
struct net_device *out_dev;
+ struct mlxsw_sp_fid *fid;
+ u16 fid_index;
+ fid = mlxsw_sp_acl_dummy_fid(mlxsw_sp);
+ fid_index = mlxsw_sp_fid_index(fid);
err = mlxsw_sp_acl_rulei_act_fid_set(mlxsw_sp, rulei,
- MLXSW_SP_DUMMY_FID);
+ fid_index);
if (err)
return err;
@@ -178,6 +186,32 @@
return 0;
}
+static int mlxsw_sp_flower_parse_tcp(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_rule_info *rulei,
+ struct tc_cls_flower_offload *f,
+ u8 ip_proto)
+{
+ struct flow_dissector_key_tcp *key, *mask;
+
+ if (!dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_TCP))
+ return 0;
+
+ if (ip_proto != IPPROTO_TCP) {
+ dev_err(mlxsw_sp->bus_info->dev, "TCP keys supported only for TCP\n");
+ return -EINVAL;
+ }
+
+ key = skb_flow_dissector_target(f->dissector,
+ FLOW_DISSECTOR_KEY_TCP,
+ f->key);
+ mask = skb_flow_dissector_target(f->dissector,
+ FLOW_DISSECTOR_KEY_TCP,
+ f->mask);
+ mlxsw_sp_acl_rulei_keymask_u32(rulei, MLXSW_AFK_ELEMENT_TCP_FLAGS,
+ ntohs(key->flags), ntohs(mask->flags));
+ return 0;
+}
+
static int mlxsw_sp_flower_parse(struct mlxsw_sp *mlxsw_sp,
struct net_device *dev,
struct mlxsw_sp_acl_rule_info *rulei,
@@ -194,6 +228,7 @@
BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS) |
BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS) |
BIT(FLOW_DISSECTOR_KEY_PORTS) |
+ BIT(FLOW_DISSECTOR_KEY_TCP) |
BIT(FLOW_DISSECTOR_KEY_VLAN))) {
dev_err(mlxsw_sp->bus_info->dev, "Unsupported key\n");
return -EOPNOTSUPP;
@@ -285,6 +320,9 @@
err = mlxsw_sp_flower_parse_ports(mlxsw_sp, rulei, f, ip_proto);
if (err)
return err;
+ err = mlxsw_sp_flower_parse_tcp(mlxsw_sp, rulei, f, ip_proto);
+ if (err)
+ return err;
return mlxsw_sp_flower_parse_actions(mlxsw_sp, dev, rulei, f->exts);
}
@@ -363,8 +401,6 @@
struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
struct mlxsw_sp_acl_ruleset *ruleset;
struct mlxsw_sp_acl_rule *rule;
- struct tc_action *a;
- LIST_HEAD(actions);
u64 packets;
u64 lastuse;
u64 bytes;
@@ -385,13 +421,7 @@
if (err)
goto err_rule_get_stats;
- preempt_disable();
-
- tcf_exts_to_list(f->exts, &actions);
- list_for_each_entry(a, &actions, list)
- tcf_action_stats_update(a, bytes, packets, lastuse);
-
- preempt_enable();
+ tcf_exts_stats_update(f->exts, bytes, packets, lastuse);
mlxsw_sp_acl_ruleset_put(mlxsw_sp, ruleset);
return 0;
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
index 9f89c41..700cc8c 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
@@ -42,6 +42,7 @@
#include <linux/notifier.h>
#include <linux/inetdevice.h>
#include <linux/netdevice.h>
+#include <linux/if_bridge.h>
#include <net/netevent.h>
#include <net/neighbour.h>
#include <net/arp.h>
@@ -56,21 +57,82 @@
#include "spectrum_dpipe.h"
#include "spectrum_router.h"
+struct mlxsw_sp_vr;
+struct mlxsw_sp_lpm_tree;
+struct mlxsw_sp_rif_ops;
+
+struct mlxsw_sp_router {
+ struct mlxsw_sp *mlxsw_sp;
+ struct mlxsw_sp_rif **rifs;
+ struct mlxsw_sp_vr *vrs;
+ struct rhashtable neigh_ht;
+ struct rhashtable nexthop_group_ht;
+ struct rhashtable nexthop_ht;
+ struct {
+ struct mlxsw_sp_lpm_tree *trees;
+ unsigned int tree_count;
+ } lpm;
+ struct {
+ struct delayed_work dw;
+ unsigned long interval; /* ms */
+ } neighs_update;
+ struct delayed_work nexthop_probe_dw;
+#define MLXSW_SP_UNRESOLVED_NH_PROBE_INTERVAL 5000 /* ms */
+ struct list_head nexthop_neighs_list;
+ bool aborted;
+ struct notifier_block fib_nb;
+ const struct mlxsw_sp_rif_ops **rif_ops_arr;
+};
+
struct mlxsw_sp_rif {
struct list_head nexthop_list;
struct list_head neigh_list;
struct net_device *dev;
- struct mlxsw_sp_fid *f;
+ struct mlxsw_sp_fid *fid;
unsigned char addr[ETH_ALEN];
int mtu;
u16 rif_index;
u16 vr_id;
+ const struct mlxsw_sp_rif_ops *ops;
+ struct mlxsw_sp *mlxsw_sp;
+
unsigned int counter_ingress;
bool counter_ingress_valid;
unsigned int counter_egress;
bool counter_egress_valid;
};
+struct mlxsw_sp_rif_params {
+ struct net_device *dev;
+ union {
+ u16 system_port;
+ u16 lag_id;
+ };
+ u16 vid;
+ bool lag;
+};
+
+struct mlxsw_sp_rif_subport {
+ struct mlxsw_sp_rif common;
+ union {
+ u16 system_port;
+ u16 lag_id;
+ };
+ u16 vid;
+ bool lag;
+};
+
+struct mlxsw_sp_rif_ops {
+ enum mlxsw_sp_rif_type type;
+ size_t rif_size;
+
+ void (*setup)(struct mlxsw_sp_rif *rif,
+ const struct mlxsw_sp_rif_params *params);
+ int (*configure)(struct mlxsw_sp_rif *rif);
+ void (*deconfigure)(struct mlxsw_sp_rif *rif);
+ struct mlxsw_sp_fid * (*fid_get)(struct mlxsw_sp_rif *rif);
+};
+
static unsigned int *
mlxsw_sp_rif_p_counter_get(struct mlxsw_sp_rif *rif,
enum mlxsw_sp_rif_counter_dir dir)
@@ -219,10 +281,35 @@
mlxsw_sp_rif_counter_valid_set(rif, dir, false);
}
+static void mlxsw_sp_rif_counters_alloc(struct mlxsw_sp_rif *rif)
+{
+ struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
+ struct devlink *devlink;
+
+ devlink = priv_to_devlink(mlxsw_sp->core);
+ if (!devlink_dpipe_table_counter_enabled(devlink,
+ MLXSW_SP_DPIPE_TABLE_NAME_ERIF))
+ return;
+ mlxsw_sp_rif_counter_alloc(mlxsw_sp, rif, MLXSW_SP_RIF_COUNTER_EGRESS);
+}
+
+static void mlxsw_sp_rif_counters_free(struct mlxsw_sp_rif *rif)
+{
+ struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
+
+ mlxsw_sp_rif_counter_free(mlxsw_sp, rif, MLXSW_SP_RIF_COUNTER_EGRESS);
+}
+
static struct mlxsw_sp_rif *
mlxsw_sp_rif_find_by_dev(const struct mlxsw_sp *mlxsw_sp,
const struct net_device *dev);
+#define MLXSW_SP_PREFIX_COUNT (sizeof(struct in6_addr) * BITS_PER_BYTE)
+
+struct mlxsw_sp_prefix_usage {
+ DECLARE_BITMAP(b, MLXSW_SP_PREFIX_COUNT);
+};
+
#define mlxsw_sp_prefix_usage_for_each(prefix, prefix_usage) \
for_each_set_bit(prefix, (prefix_usage)->b, MLXSW_SP_PREFIX_COUNT)
@@ -287,6 +374,7 @@
};
struct mlxsw_sp_nexthop_group;
+struct mlxsw_sp_fib;
struct mlxsw_sp_fib_node {
struct list_head entry_list;
@@ -313,6 +401,18 @@
bool offloaded;
};
+enum mlxsw_sp_l3proto {
+ MLXSW_SP_L3_PROTO_IPV4,
+ MLXSW_SP_L3_PROTO_IPV6,
+};
+
+struct mlxsw_sp_lpm_tree {
+ u8 id; /* tree ID */
+ unsigned int ref_count;
+ enum mlxsw_sp_l3proto proto;
+ struct mlxsw_sp_prefix_usage prefix_usage;
+};
+
struct mlxsw_sp_fib {
struct rhashtable ht;
struct list_head node_list;
@@ -323,6 +423,13 @@
enum mlxsw_sp_l3proto proto;
};
+struct mlxsw_sp_vr {
+ u16 id; /* virtual router ID */
+ u32 tb_id; /* kernel fib table id */
+ unsigned int rif_count;
+ struct mlxsw_sp_fib *fib4;
+};
+
static const struct rhashtable_params mlxsw_sp_fib_ht_params;
static struct mlxsw_sp_fib *mlxsw_sp_fib_create(struct mlxsw_sp_vr *vr,
@@ -361,8 +468,8 @@
static struct mlxsw_sp_lpm_tree *lpm_tree;
int i;
- for (i = 0; i < mlxsw_sp->router.lpm.tree_count; i++) {
- lpm_tree = &mlxsw_sp->router.lpm.trees[i];
+ for (i = 0; i < mlxsw_sp->router->lpm.tree_count; i++) {
+ lpm_tree = &mlxsw_sp->router->lpm.trees[i];
if (lpm_tree->ref_count == 0)
return lpm_tree;
}
@@ -458,8 +565,8 @@
struct mlxsw_sp_lpm_tree *lpm_tree;
int i;
- for (i = 0; i < mlxsw_sp->router.lpm.tree_count; i++) {
- lpm_tree = &mlxsw_sp->router.lpm.trees[i];
+ for (i = 0; i < mlxsw_sp->router->lpm.tree_count; i++) {
+ lpm_tree = &mlxsw_sp->router->lpm.trees[i];
if (lpm_tree->ref_count != 0 &&
lpm_tree->proto == proto &&
mlxsw_sp_prefix_usage_eq(&lpm_tree->prefix_usage,
@@ -484,7 +591,7 @@
return 0;
}
-#define MLXSW_SP_LPM_TREE_MIN 2 /* trees 0 and 1 are reserved */
+#define MLXSW_SP_LPM_TREE_MIN 1 /* tree 0 is reserved */
static int mlxsw_sp_lpm_init(struct mlxsw_sp *mlxsw_sp)
{
@@ -496,15 +603,15 @@
return -EIO;
max_trees = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_LPM_TREES);
- mlxsw_sp->router.lpm.tree_count = max_trees - MLXSW_SP_LPM_TREE_MIN;
- mlxsw_sp->router.lpm.trees = kcalloc(mlxsw_sp->router.lpm.tree_count,
+ mlxsw_sp->router->lpm.tree_count = max_trees - MLXSW_SP_LPM_TREE_MIN;
+ mlxsw_sp->router->lpm.trees = kcalloc(mlxsw_sp->router->lpm.tree_count,
sizeof(struct mlxsw_sp_lpm_tree),
GFP_KERNEL);
- if (!mlxsw_sp->router.lpm.trees)
+ if (!mlxsw_sp->router->lpm.trees)
return -ENOMEM;
- for (i = 0; i < mlxsw_sp->router.lpm.tree_count; i++) {
- lpm_tree = &mlxsw_sp->router.lpm.trees[i];
+ for (i = 0; i < mlxsw_sp->router->lpm.tree_count; i++) {
+ lpm_tree = &mlxsw_sp->router->lpm.trees[i];
lpm_tree->id = i + MLXSW_SP_LPM_TREE_MIN;
}
@@ -513,7 +620,7 @@
static void mlxsw_sp_lpm_fini(struct mlxsw_sp *mlxsw_sp)
{
- kfree(mlxsw_sp->router.lpm.trees);
+ kfree(mlxsw_sp->router->lpm.trees);
}
static bool mlxsw_sp_vr_is_used(const struct mlxsw_sp_vr *vr)
@@ -527,7 +634,7 @@
int i;
for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_VRS); i++) {
- vr = &mlxsw_sp->router.vrs[i];
+ vr = &mlxsw_sp->router->vrs[i];
if (!mlxsw_sp_vr_is_used(vr))
return vr;
}
@@ -573,7 +680,7 @@
tb_id = mlxsw_sp_fix_tb_id(tb_id);
for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_VRS); i++) {
- vr = &mlxsw_sp->router.vrs[i];
+ vr = &mlxsw_sp->router->vrs[i];
if (mlxsw_sp_vr_is_used(vr) && vr->tb_id == tb_id)
return vr;
}
@@ -680,13 +787,13 @@
return -EIO;
max_vrs = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_VRS);
- mlxsw_sp->router.vrs = kcalloc(max_vrs, sizeof(struct mlxsw_sp_vr),
- GFP_KERNEL);
- if (!mlxsw_sp->router.vrs)
+ mlxsw_sp->router->vrs = kcalloc(max_vrs, sizeof(struct mlxsw_sp_vr),
+ GFP_KERNEL);
+ if (!mlxsw_sp->router->vrs)
return -ENOMEM;
for (i = 0; i < max_vrs; i++) {
- vr = &mlxsw_sp->router.vrs[i];
+ vr = &mlxsw_sp->router->vrs[i];
vr->id = i;
}
@@ -706,7 +813,7 @@
*/
mlxsw_core_flush_owq();
mlxsw_sp_router_fib_flush(mlxsw_sp);
- kfree(mlxsw_sp->router.vrs);
+ kfree(mlxsw_sp->router->vrs);
}
struct mlxsw_sp_neigh_key {
@@ -758,7 +865,7 @@
mlxsw_sp_neigh_entry_insert(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_neigh_entry *neigh_entry)
{
- return rhashtable_insert_fast(&mlxsw_sp->router.neigh_ht,
+ return rhashtable_insert_fast(&mlxsw_sp->router->neigh_ht,
&neigh_entry->ht_node,
mlxsw_sp_neigh_ht_params);
}
@@ -767,7 +874,7 @@
mlxsw_sp_neigh_entry_remove(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_neigh_entry *neigh_entry)
{
- rhashtable_remove_fast(&mlxsw_sp->router.neigh_ht,
+ rhashtable_remove_fast(&mlxsw_sp->router->neigh_ht,
&neigh_entry->ht_node,
mlxsw_sp_neigh_ht_params);
}
@@ -815,7 +922,7 @@
struct mlxsw_sp_neigh_key key;
key.n = n;
- return rhashtable_lookup_fast(&mlxsw_sp->router.neigh_ht,
+ return rhashtable_lookup_fast(&mlxsw_sp->router->neigh_ht,
&key, mlxsw_sp_neigh_ht_params);
}
@@ -824,7 +931,7 @@
{
unsigned long interval = NEIGH_VAR(&arp_tbl.parms, DELAY_PROBE_TIME);
- mlxsw_sp->router.neighs_update.interval = jiffies_to_msecs(interval);
+ mlxsw_sp->router->neighs_update.interval = jiffies_to_msecs(interval);
}
static void mlxsw_sp_router_neigh_ent_ipv4_process(struct mlxsw_sp *mlxsw_sp,
@@ -839,13 +946,13 @@
mlxsw_reg_rauhtd_ent_ipv4_unpack(rauhtd_pl, ent_index, &rif, &dip);
- if (!mlxsw_sp->rifs[rif]) {
+ if (!mlxsw_sp->router->rifs[rif]) {
dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Incorrect RIF in neighbour entry\n");
return;
}
dipn = htonl(dip);
- dev = mlxsw_sp->rifs[rif]->dev;
+ dev = mlxsw_sp->router->rifs[rif]->dev;
n = neigh_lookup(&arp_tbl, &dipn, dev);
if (!n) {
netdev_err(dev, "Failed to find matching neighbour for IP=%pI4h\n",
@@ -954,7 +1061,7 @@
/* Take RTNL mutex here to prevent lists from changes */
rtnl_lock();
- list_for_each_entry(neigh_entry, &mlxsw_sp->router.nexthop_neighs_list,
+ list_for_each_entry(neigh_entry, &mlxsw_sp->router->nexthop_neighs_list,
nexthop_neighs_list_node)
/* If this neigh have nexthops, make the kernel think this neigh
* is active regardless of the traffic.
@@ -966,33 +1073,35 @@
static void
mlxsw_sp_router_neighs_update_work_schedule(struct mlxsw_sp *mlxsw_sp)
{
- unsigned long interval = mlxsw_sp->router.neighs_update.interval;
+ unsigned long interval = mlxsw_sp->router->neighs_update.interval;
- mlxsw_core_schedule_dw(&mlxsw_sp->router.neighs_update.dw,
+ mlxsw_core_schedule_dw(&mlxsw_sp->router->neighs_update.dw,
msecs_to_jiffies(interval));
}
static void mlxsw_sp_router_neighs_update_work(struct work_struct *work)
{
- struct mlxsw_sp *mlxsw_sp = container_of(work, struct mlxsw_sp,
- router.neighs_update.dw.work);
+ struct mlxsw_sp_router *router;
int err;
- err = mlxsw_sp_router_neighs_update_rauhtd(mlxsw_sp);
+ router = container_of(work, struct mlxsw_sp_router,
+ neighs_update.dw.work);
+ err = mlxsw_sp_router_neighs_update_rauhtd(router->mlxsw_sp);
if (err)
- dev_err(mlxsw_sp->bus_info->dev, "Could not update kernel for neigh activity");
+ dev_err(router->mlxsw_sp->bus_info->dev, "Could not update kernel for neigh activity");
- mlxsw_sp_router_neighs_update_nh(mlxsw_sp);
+ mlxsw_sp_router_neighs_update_nh(router->mlxsw_sp);
- mlxsw_sp_router_neighs_update_work_schedule(mlxsw_sp);
+ mlxsw_sp_router_neighs_update_work_schedule(router->mlxsw_sp);
}
static void mlxsw_sp_router_probe_unresolved_nexthops(struct work_struct *work)
{
struct mlxsw_sp_neigh_entry *neigh_entry;
- struct mlxsw_sp *mlxsw_sp = container_of(work, struct mlxsw_sp,
- router.nexthop_probe_dw.work);
+ struct mlxsw_sp_router *router;
+ router = container_of(work, struct mlxsw_sp_router,
+ nexthop_probe_dw.work);
/* Iterate over nexthop neighbours, find those who are unresolved and
* send arp on them. This solves the chicken-egg problem when
* the nexthop wouldn't get offloaded until the neighbor is resolved
@@ -1002,13 +1111,13 @@
* Take RTNL mutex here to prevent lists from changes.
*/
rtnl_lock();
- list_for_each_entry(neigh_entry, &mlxsw_sp->router.nexthop_neighs_list,
+ list_for_each_entry(neigh_entry, &router->nexthop_neighs_list,
nexthop_neighs_list_node)
if (!neigh_entry->connected)
neigh_event_send(neigh_entry->key.n, NULL);
rtnl_unlock();
- mlxsw_core_schedule_dw(&mlxsw_sp->router.nexthop_probe_dw,
+ mlxsw_core_schedule_dw(&router->nexthop_probe_dw,
MLXSW_SP_UNRESOLVED_NH_PROBE_INTERVAL);
}
@@ -1130,7 +1239,7 @@
mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
interval = jiffies_to_msecs(NEIGH_VAR(p, DELAY_PROBE_TIME));
- mlxsw_sp->router.neighs_update.interval = interval;
+ mlxsw_sp->router->neighs_update.interval = interval;
mlxsw_sp_port_dev_put(mlxsw_sp_port);
break;
@@ -1171,7 +1280,7 @@
{
int err;
- err = rhashtable_init(&mlxsw_sp->router.neigh_ht,
+ err = rhashtable_init(&mlxsw_sp->router->neigh_ht,
&mlxsw_sp_neigh_ht_params);
if (err)
return err;
@@ -1182,20 +1291,20 @@
mlxsw_sp_router_neighs_update_interval_init(mlxsw_sp);
/* Create the delayed works for the activity_update */
- INIT_DELAYED_WORK(&mlxsw_sp->router.neighs_update.dw,
+ INIT_DELAYED_WORK(&mlxsw_sp->router->neighs_update.dw,
mlxsw_sp_router_neighs_update_work);
- INIT_DELAYED_WORK(&mlxsw_sp->router.nexthop_probe_dw,
+ INIT_DELAYED_WORK(&mlxsw_sp->router->nexthop_probe_dw,
mlxsw_sp_router_probe_unresolved_nexthops);
- mlxsw_core_schedule_dw(&mlxsw_sp->router.neighs_update.dw, 0);
- mlxsw_core_schedule_dw(&mlxsw_sp->router.nexthop_probe_dw, 0);
+ mlxsw_core_schedule_dw(&mlxsw_sp->router->neighs_update.dw, 0);
+ mlxsw_core_schedule_dw(&mlxsw_sp->router->nexthop_probe_dw, 0);
return 0;
}
static void mlxsw_sp_neigh_fini(struct mlxsw_sp *mlxsw_sp)
{
- cancel_delayed_work_sync(&mlxsw_sp->router.neighs_update.dw);
- cancel_delayed_work_sync(&mlxsw_sp->router.nexthop_probe_dw);
- rhashtable_destroy(&mlxsw_sp->router.neigh_ht);
+ cancel_delayed_work_sync(&mlxsw_sp->router->neighs_update.dw);
+ cancel_delayed_work_sync(&mlxsw_sp->router->nexthop_probe_dw);
+ rhashtable_destroy(&mlxsw_sp->router->neigh_ht);
}
static int mlxsw_sp_neigh_rif_flush(struct mlxsw_sp *mlxsw_sp,
@@ -1270,7 +1379,7 @@
static int mlxsw_sp_nexthop_group_insert(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_nexthop_group *nh_grp)
{
- return rhashtable_insert_fast(&mlxsw_sp->router.nexthop_group_ht,
+ return rhashtable_insert_fast(&mlxsw_sp->router->nexthop_group_ht,
&nh_grp->ht_node,
mlxsw_sp_nexthop_group_ht_params);
}
@@ -1278,7 +1387,7 @@
static void mlxsw_sp_nexthop_group_remove(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_nexthop_group *nh_grp)
{
- rhashtable_remove_fast(&mlxsw_sp->router.nexthop_group_ht,
+ rhashtable_remove_fast(&mlxsw_sp->router->nexthop_group_ht,
&nh_grp->ht_node,
mlxsw_sp_nexthop_group_ht_params);
}
@@ -1287,7 +1396,7 @@
mlxsw_sp_nexthop_group_lookup(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_nexthop_group_key key)
{
- return rhashtable_lookup_fast(&mlxsw_sp->router.nexthop_group_ht, &key,
+ return rhashtable_lookup_fast(&mlxsw_sp->router->nexthop_group_ht, &key,
mlxsw_sp_nexthop_group_ht_params);
}
@@ -1300,14 +1409,14 @@
static int mlxsw_sp_nexthop_insert(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_nexthop *nh)
{
- return rhashtable_insert_fast(&mlxsw_sp->router.nexthop_ht,
+ return rhashtable_insert_fast(&mlxsw_sp->router->nexthop_ht,
&nh->ht_node, mlxsw_sp_nexthop_ht_params);
}
static void mlxsw_sp_nexthop_remove(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_nexthop *nh)
{
- rhashtable_remove_fast(&mlxsw_sp->router.nexthop_ht, &nh->ht_node,
+ rhashtable_remove_fast(&mlxsw_sp->router->nexthop_ht, &nh->ht_node,
mlxsw_sp_nexthop_ht_params);
}
@@ -1315,7 +1424,7 @@
mlxsw_sp_nexthop_lookup(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_nexthop_key key)
{
- return rhashtable_lookup_fast(&mlxsw_sp->router.nexthop_ht, &key,
+ return rhashtable_lookup_fast(&mlxsw_sp->router->nexthop_ht, &key,
mlxsw_sp_nexthop_ht_params);
}
@@ -1602,7 +1711,7 @@
*/
if (list_empty(&neigh_entry->nexthop_list))
list_add_tail(&neigh_entry->nexthop_neighs_list_node,
- &mlxsw_sp->router.nexthop_neighs_list);
+ &mlxsw_sp->router->nexthop_neighs_list);
nh->neigh_entry = neigh_entry;
list_add_tail(&nh->neigh_list_node, &neigh_entry->nexthop_list);
@@ -1700,7 +1809,7 @@
struct mlxsw_sp_nexthop *nh;
struct mlxsw_sp_rif *rif;
- if (mlxsw_sp->router.aborted)
+ if (mlxsw_sp->router->aborted)
return;
key.fib_nh = fib_nh;
@@ -2513,7 +2622,7 @@
struct mlxsw_sp_fib_node *fib_node;
int err;
- if (mlxsw_sp->router.aborted)
+ if (mlxsw_sp->router->aborted)
return 0;
fib_node = mlxsw_sp_fib4_node_get(mlxsw_sp, fen_info);
@@ -2553,7 +2662,7 @@
struct mlxsw_sp_fib_entry *fib_entry;
struct mlxsw_sp_fib_node *fib_node;
- if (mlxsw_sp->router.aborted)
+ if (mlxsw_sp->router->aborted)
return;
fib_entry = mlxsw_sp_fib4_entry_lookup(mlxsw_sp, fen_info);
@@ -2584,7 +2693,7 @@
return err;
for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_VRS); i++) {
- struct mlxsw_sp_vr *vr = &mlxsw_sp->router.vrs[i];
+ struct mlxsw_sp_vr *vr = &mlxsw_sp->router->vrs[i];
char raltb_pl[MLXSW_REG_RALTB_LEN];
char ralue_pl[MLXSW_REG_RALUE_LEN];
@@ -2666,7 +2775,7 @@
int i;
for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_VRS); i++) {
- struct mlxsw_sp_vr *vr = &mlxsw_sp->router.vrs[i];
+ struct mlxsw_sp_vr *vr = &mlxsw_sp->router->vrs[i];
if (!mlxsw_sp_vr_is_used(vr))
continue;
@@ -2678,11 +2787,11 @@
{
int err;
- if (mlxsw_sp->router.aborted)
+ if (mlxsw_sp->router->aborted)
return;
dev_warn(mlxsw_sp->bus_info->dev, "FIB abort triggered. Note that FIB entries are no longer being offloaded to this device.\n");
mlxsw_sp_router_fib_flush(mlxsw_sp);
- mlxsw_sp->router.aborted = true;
+ mlxsw_sp->router->aborted = true;
err = mlxsw_sp_router_set_abort_trap(mlxsw_sp);
if (err)
dev_warn(mlxsw_sp->bus_info->dev, "Failed to set abort trap.\n");
@@ -2748,9 +2857,9 @@
static int mlxsw_sp_router_fib_event(struct notifier_block *nb,
unsigned long event, void *ptr)
{
- struct mlxsw_sp *mlxsw_sp = container_of(nb, struct mlxsw_sp, fib_nb);
struct mlxsw_sp_fib_event_work *fib_work;
struct fib_notifier_info *info = ptr;
+ struct mlxsw_sp_router *router;
if (!net_eq(info->net, &init_net))
return NOTIFY_DONE;
@@ -2760,7 +2869,8 @@
return NOTIFY_BAD;
INIT_WORK(&fib_work->work, mlxsw_sp_router_fib_event_work);
- fib_work->mlxsw_sp = mlxsw_sp;
+ router = container_of(nb, struct mlxsw_sp_router, fib_nb);
+ fib_work->mlxsw_sp = router->mlxsw_sp;
fib_work->event = event;
switch (event) {
@@ -2798,8 +2908,9 @@
int i;
for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS); i++)
- if (mlxsw_sp->rifs[i] && mlxsw_sp->rifs[i]->dev == dev)
- return mlxsw_sp->rifs[i];
+ if (mlxsw_sp->router->rifs[i] &&
+ mlxsw_sp->router->rifs[i]->dev == dev)
+ return mlxsw_sp->router->rifs[i];
return NULL;
}
@@ -2849,77 +2960,46 @@
return false;
}
-#define MLXSW_SP_INVALID_INDEX_RIF 0xffff
-static int mlxsw_sp_avail_rif_get(struct mlxsw_sp *mlxsw_sp)
+static enum mlxsw_sp_rif_type
+mlxsw_sp_dev_rif_type(const struct mlxsw_sp *mlxsw_sp,
+ const struct net_device *dev)
+{
+ enum mlxsw_sp_fid_type type;
+
+ /* RIF type is derived from the type of the underlying FID */
+ if (is_vlan_dev(dev) && netif_is_bridge_master(vlan_dev_real_dev(dev)))
+ type = MLXSW_SP_FID_TYPE_8021Q;
+ else if (netif_is_bridge_master(dev) && br_vlan_enabled(dev))
+ type = MLXSW_SP_FID_TYPE_8021Q;
+ else if (netif_is_bridge_master(dev))
+ type = MLXSW_SP_FID_TYPE_8021D;
+ else
+ type = MLXSW_SP_FID_TYPE_RFID;
+
+ return mlxsw_sp_fid_type_rif_type(mlxsw_sp, type);
+}
+
+static int mlxsw_sp_rif_index_alloc(struct mlxsw_sp *mlxsw_sp, u16 *p_rif_index)
{
int i;
- for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS); i++)
- if (!mlxsw_sp->rifs[i])
- return i;
+ for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS); i++) {
+ if (!mlxsw_sp->router->rifs[i]) {
+ *p_rif_index = i;
+ return 0;
+ }
+ }
- return MLXSW_SP_INVALID_INDEX_RIF;
+ return -ENOBUFS;
}
-static void mlxsw_sp_vport_rif_sp_attr_get(struct mlxsw_sp_port *mlxsw_sp_vport,
- bool *p_lagged, u16 *p_system_port)
-{
- u8 local_port = mlxsw_sp_vport->local_port;
-
- *p_lagged = mlxsw_sp_vport->lagged;
- *p_system_port = *p_lagged ? mlxsw_sp_vport->lag_id : local_port;
-}
-
-static int mlxsw_sp_vport_rif_sp_op(struct mlxsw_sp_port *mlxsw_sp_vport,
- u16 vr_id, struct net_device *l3_dev,
- u16 rif_index, bool create)
-{
- struct mlxsw_sp *mlxsw_sp = mlxsw_sp_vport->mlxsw_sp;
- bool lagged = mlxsw_sp_vport->lagged;
- char ritr_pl[MLXSW_REG_RITR_LEN];
- u16 system_port;
-
- mlxsw_reg_ritr_pack(ritr_pl, create, MLXSW_REG_RITR_SP_IF, rif_index,
- vr_id, l3_dev->mtu, l3_dev->dev_addr);
-
- mlxsw_sp_vport_rif_sp_attr_get(mlxsw_sp_vport, &lagged, &system_port);
- mlxsw_reg_ritr_sp_if_pack(ritr_pl, lagged, system_port,
- mlxsw_sp_vport_vid_get(mlxsw_sp_vport));
-
- return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
-}
-
-static void mlxsw_sp_vport_rif_sp_leave(struct mlxsw_sp_port *mlxsw_sp_vport);
-
-static u16 mlxsw_sp_rif_sp_to_fid(u16 rif_index)
-{
- return MLXSW_SP_RFID_BASE + rif_index;
-}
-
-static struct mlxsw_sp_fid *
-mlxsw_sp_rfid_alloc(u16 fid, struct net_device *l3_dev)
-{
- struct mlxsw_sp_fid *f;
-
- f = kzalloc(sizeof(*f), GFP_KERNEL);
- if (!f)
- return NULL;
-
- f->leave = mlxsw_sp_vport_rif_sp_leave;
- f->ref_count = 0;
- f->dev = l3_dev;
- f->fid = fid;
-
- return f;
-}
-
-static struct mlxsw_sp_rif *
-mlxsw_sp_rif_alloc(u16 rif_index, u16 vr_id, struct net_device *l3_dev,
- struct mlxsw_sp_fid *f)
+static struct mlxsw_sp_rif *mlxsw_sp_rif_alloc(size_t rif_size, u16 rif_index,
+ u16 vr_id,
+ struct net_device *l3_dev)
{
struct mlxsw_sp_rif *rif;
- rif = kzalloc(sizeof(*rif), GFP_KERNEL);
+ rif = kzalloc(rif_size, GFP_KERNEL);
if (!rif)
return NULL;
@@ -2930,11 +3010,16 @@
rif->vr_id = vr_id;
rif->dev = l3_dev;
rif->rif_index = rif_index;
- rif->f = f;
return rif;
}
+struct mlxsw_sp_rif *mlxsw_sp_rif_by_index(const struct mlxsw_sp *mlxsw_sp,
+ u16 rif_index)
+{
+ return mlxsw_sp->router->rifs[rif_index];
+}
+
u16 mlxsw_sp_rif_index(const struct mlxsw_sp_rif *rif)
{
return rif->rif_index;
@@ -2946,152 +3031,199 @@
}
static struct mlxsw_sp_rif *
-mlxsw_sp_vport_rif_sp_create(struct mlxsw_sp_port *mlxsw_sp_vport,
- struct net_device *l3_dev)
+mlxsw_sp_rif_create(struct mlxsw_sp *mlxsw_sp,
+ const struct mlxsw_sp_rif_params *params)
{
- struct mlxsw_sp *mlxsw_sp = mlxsw_sp_vport->mlxsw_sp;
- u32 tb_id = l3mdev_fib_table(l3_dev);
- struct mlxsw_sp_vr *vr;
- struct mlxsw_sp_fid *f;
+ u32 tb_id = l3mdev_fib_table(params->dev);
+ const struct mlxsw_sp_rif_ops *ops;
+ enum mlxsw_sp_rif_type type;
struct mlxsw_sp_rif *rif;
- u16 fid, rif_index;
+ struct mlxsw_sp_fid *fid;
+ struct mlxsw_sp_vr *vr;
+ u16 rif_index;
int err;
- rif_index = mlxsw_sp_avail_rif_get(mlxsw_sp);
- if (rif_index == MLXSW_SP_INVALID_INDEX_RIF)
- return ERR_PTR(-ERANGE);
+ type = mlxsw_sp_dev_rif_type(mlxsw_sp, params->dev);
+ ops = mlxsw_sp->router->rif_ops_arr[type];
vr = mlxsw_sp_vr_get(mlxsw_sp, tb_id ? : RT_TABLE_MAIN);
if (IS_ERR(vr))
return ERR_CAST(vr);
- err = mlxsw_sp_vport_rif_sp_op(mlxsw_sp_vport, vr->id, l3_dev,
- rif_index, true);
+ err = mlxsw_sp_rif_index_alloc(mlxsw_sp, &rif_index);
if (err)
- goto err_vport_rif_sp_op;
+ goto err_rif_index_alloc;
- fid = mlxsw_sp_rif_sp_to_fid(rif_index);
- err = mlxsw_sp_rif_fdb_op(mlxsw_sp, l3_dev->dev_addr, fid, true);
- if (err)
- goto err_rif_fdb_op;
-
- f = mlxsw_sp_rfid_alloc(fid, l3_dev);
- if (!f) {
- err = -ENOMEM;
- goto err_rfid_alloc;
- }
-
- rif = mlxsw_sp_rif_alloc(rif_index, vr->id, l3_dev, f);
+ rif = mlxsw_sp_rif_alloc(ops->rif_size, rif_index, vr->id, params->dev);
if (!rif) {
err = -ENOMEM;
goto err_rif_alloc;
}
+ rif->mlxsw_sp = mlxsw_sp;
+ rif->ops = ops;
- if (devlink_dpipe_table_counter_enabled(priv_to_devlink(mlxsw_sp->core),
- MLXSW_SP_DPIPE_TABLE_NAME_ERIF)) {
- err = mlxsw_sp_rif_counter_alloc(mlxsw_sp, rif,
- MLXSW_SP_RIF_COUNTER_EGRESS);
- if (err)
- netdev_dbg(mlxsw_sp_vport->dev,
- "Counter alloc Failed err=%d\n", err);
+ fid = ops->fid_get(rif);
+ if (IS_ERR(fid)) {
+ err = PTR_ERR(fid);
+ goto err_fid_get;
}
+ rif->fid = fid;
- f->rif = rif;
- mlxsw_sp->rifs[rif_index] = rif;
+ if (ops->setup)
+ ops->setup(rif, params);
+
+ err = ops->configure(rif);
+ if (err)
+ goto err_configure;
+
+ err = mlxsw_sp_rif_fdb_op(mlxsw_sp, params->dev->dev_addr,
+ mlxsw_sp_fid_index(fid), true);
+ if (err)
+ goto err_rif_fdb_op;
+
+ mlxsw_sp_rif_counters_alloc(rif);
+ mlxsw_sp_fid_rif_set(fid, rif);
+ mlxsw_sp->router->rifs[rif_index] = rif;
vr->rif_count++;
return rif;
-err_rif_alloc:
- kfree(f);
-err_rfid_alloc:
- mlxsw_sp_rif_fdb_op(mlxsw_sp, l3_dev->dev_addr, fid, false);
err_rif_fdb_op:
- mlxsw_sp_vport_rif_sp_op(mlxsw_sp_vport, vr->id, l3_dev, rif_index,
- false);
-err_vport_rif_sp_op:
+ ops->deconfigure(rif);
+err_configure:
+ mlxsw_sp_fid_put(fid);
+err_fid_get:
+ kfree(rif);
+err_rif_alloc:
+err_rif_index_alloc:
mlxsw_sp_vr_put(vr);
return ERR_PTR(err);
}
-static void mlxsw_sp_vport_rif_sp_destroy(struct mlxsw_sp_port *mlxsw_sp_vport,
- struct mlxsw_sp_rif *rif)
+void mlxsw_sp_rif_destroy(struct mlxsw_sp_rif *rif)
{
- struct mlxsw_sp *mlxsw_sp = mlxsw_sp_vport->mlxsw_sp;
- struct mlxsw_sp_vr *vr = &mlxsw_sp->router.vrs[rif->vr_id];
- struct net_device *l3_dev = rif->dev;
- struct mlxsw_sp_fid *f = rif->f;
- u16 rif_index = rif->rif_index;
- u16 fid = f->fid;
+ const struct mlxsw_sp_rif_ops *ops = rif->ops;
+ struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
+ struct mlxsw_sp_fid *fid = rif->fid;
+ struct mlxsw_sp_vr *vr;
mlxsw_sp_router_rif_gone_sync(mlxsw_sp, rif);
-
- mlxsw_sp_rif_counter_free(mlxsw_sp, rif, MLXSW_SP_RIF_COUNTER_EGRESS);
- mlxsw_sp_rif_counter_free(mlxsw_sp, rif, MLXSW_SP_RIF_COUNTER_INGRESS);
+ vr = &mlxsw_sp->router->vrs[rif->vr_id];
vr->rif_count--;
- mlxsw_sp->rifs[rif_index] = NULL;
- f->rif = NULL;
-
+ mlxsw_sp->router->rifs[rif->rif_index] = NULL;
+ mlxsw_sp_fid_rif_set(fid, NULL);
+ mlxsw_sp_rif_counters_free(rif);
+ mlxsw_sp_rif_fdb_op(mlxsw_sp, rif->dev->dev_addr,
+ mlxsw_sp_fid_index(fid), false);
+ ops->deconfigure(rif);
+ mlxsw_sp_fid_put(fid);
kfree(rif);
-
- kfree(f);
-
- mlxsw_sp_rif_fdb_op(mlxsw_sp, l3_dev->dev_addr, fid, false);
-
- mlxsw_sp_vport_rif_sp_op(mlxsw_sp_vport, vr->id, l3_dev, rif_index,
- false);
mlxsw_sp_vr_put(vr);
}
-static int mlxsw_sp_vport_rif_sp_join(struct mlxsw_sp_port *mlxsw_sp_vport,
- struct net_device *l3_dev)
+static void
+mlxsw_sp_rif_subport_params_init(struct mlxsw_sp_rif_params *params,
+ struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan)
{
- struct mlxsw_sp *mlxsw_sp = mlxsw_sp_vport->mlxsw_sp;
+ struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp_port_vlan->mlxsw_sp_port;
+
+ params->vid = mlxsw_sp_port_vlan->vid;
+ params->lag = mlxsw_sp_port->lagged;
+ if (params->lag)
+ params->lag_id = mlxsw_sp_port->lag_id;
+ else
+ params->system_port = mlxsw_sp_port->local_port;
+}
+
+static int
+mlxsw_sp_port_vlan_router_join(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan,
+ struct net_device *l3_dev)
+{
+ struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp_port_vlan->mlxsw_sp_port;
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ u16 vid = mlxsw_sp_port_vlan->vid;
struct mlxsw_sp_rif *rif;
+ struct mlxsw_sp_fid *fid;
+ int err;
rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, l3_dev);
if (!rif) {
- rif = mlxsw_sp_vport_rif_sp_create(mlxsw_sp_vport, l3_dev);
+ struct mlxsw_sp_rif_params params = {
+ .dev = l3_dev,
+ };
+
+ mlxsw_sp_rif_subport_params_init(¶ms, mlxsw_sp_port_vlan);
+ rif = mlxsw_sp_rif_create(mlxsw_sp, ¶ms);
if (IS_ERR(rif))
return PTR_ERR(rif);
}
- mlxsw_sp_vport_fid_set(mlxsw_sp_vport, rif->f);
- rif->f->ref_count++;
+ /* FID was already created, just take a reference */
+ fid = rif->ops->fid_get(rif);
+ err = mlxsw_sp_fid_port_vid_map(fid, mlxsw_sp_port, vid);
+ if (err)
+ goto err_fid_port_vid_map;
- netdev_dbg(mlxsw_sp_vport->dev, "Joined FID=%d\n", rif->f->fid);
+ err = mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, false);
+ if (err)
+ goto err_port_vid_learning_set;
+
+ err = mlxsw_sp_port_vid_stp_set(mlxsw_sp_port, vid,
+ BR_STATE_FORWARDING);
+ if (err)
+ goto err_port_vid_stp_set;
+
+ mlxsw_sp_port_vlan->fid = fid;
return 0;
+
+err_port_vid_stp_set:
+ mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, true);
+err_port_vid_learning_set:
+ mlxsw_sp_fid_port_vid_unmap(fid, mlxsw_sp_port, vid);
+err_fid_port_vid_map:
+ mlxsw_sp_fid_put(fid);
+ return err;
}
-static void mlxsw_sp_vport_rif_sp_leave(struct mlxsw_sp_port *mlxsw_sp_vport)
+void
+mlxsw_sp_port_vlan_router_leave(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan)
{
- struct mlxsw_sp_fid *f = mlxsw_sp_vport_fid_get(mlxsw_sp_vport);
+ struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp_port_vlan->mlxsw_sp_port;
+ struct mlxsw_sp_fid *fid = mlxsw_sp_port_vlan->fid;
+ u16 vid = mlxsw_sp_port_vlan->vid;
- netdev_dbg(mlxsw_sp_vport->dev, "Left FID=%d\n", f->fid);
+ if (WARN_ON(mlxsw_sp_fid_type(fid) != MLXSW_SP_FID_TYPE_RFID))
+ return;
- mlxsw_sp_vport_fid_set(mlxsw_sp_vport, NULL);
- if (--f->ref_count == 0)
- mlxsw_sp_vport_rif_sp_destroy(mlxsw_sp_vport, f->rif);
+ mlxsw_sp_port_vlan->fid = NULL;
+ mlxsw_sp_port_vid_stp_set(mlxsw_sp_port, vid, BR_STATE_BLOCKING);
+ mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, true);
+ mlxsw_sp_fid_port_vid_unmap(fid, mlxsw_sp_port, vid);
+ /* If router port holds the last reference on the rFID, then the
+ * associated Sub-port RIF will be destroyed.
+ */
+ mlxsw_sp_fid_put(fid);
}
-static int mlxsw_sp_inetaddr_vport_event(struct net_device *l3_dev,
- struct net_device *port_dev,
- unsigned long event, u16 vid)
+static int mlxsw_sp_inetaddr_port_vlan_event(struct net_device *l3_dev,
+ struct net_device *port_dev,
+ unsigned long event, u16 vid)
{
struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(port_dev);
- struct mlxsw_sp_port *mlxsw_sp_vport;
+ struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
- mlxsw_sp_vport = mlxsw_sp_port_vport_find(mlxsw_sp_port, vid);
- if (WARN_ON(!mlxsw_sp_vport))
+ mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, vid);
+ if (WARN_ON(!mlxsw_sp_port_vlan))
return -EINVAL;
switch (event) {
case NETDEV_UP:
- return mlxsw_sp_vport_rif_sp_join(mlxsw_sp_vport, l3_dev);
+ return mlxsw_sp_port_vlan_router_join(mlxsw_sp_port_vlan,
+ l3_dev);
case NETDEV_DOWN:
- mlxsw_sp_vport_rif_sp_leave(mlxsw_sp_vport);
+ mlxsw_sp_port_vlan_router_leave(mlxsw_sp_port_vlan);
break;
}
@@ -3106,7 +3238,7 @@
netif_is_ovs_port(port_dev))
return 0;
- return mlxsw_sp_inetaddr_vport_event(port_dev, port_dev, event, 1);
+ return mlxsw_sp_inetaddr_port_vlan_event(port_dev, port_dev, event, 1);
}
static int __mlxsw_sp_inetaddr_lag_event(struct net_device *l3_dev,
@@ -3119,8 +3251,9 @@
netdev_for_each_lower_dev(lag_dev, port_dev, iter) {
if (mlxsw_sp_port_dev_check(port_dev)) {
- err = mlxsw_sp_inetaddr_vport_event(l3_dev, port_dev,
- event, vid);
+ err = mlxsw_sp_inetaddr_port_vlan_event(l3_dev,
+ port_dev,
+ event, vid);
if (err)
return err;
}
@@ -3138,189 +3271,24 @@
return __mlxsw_sp_inetaddr_lag_event(lag_dev, lag_dev, event, 1);
}
-static struct mlxsw_sp_fid *mlxsw_sp_bridge_fid_get(struct mlxsw_sp *mlxsw_sp,
- struct net_device *l3_dev)
-{
- u16 fid;
-
- if (is_vlan_dev(l3_dev))
- fid = vlan_dev_vlan_id(l3_dev);
- else if (mlxsw_sp->master_bridge.dev == l3_dev)
- fid = 1;
- else
- return mlxsw_sp_vfid_find(mlxsw_sp, l3_dev);
-
- return mlxsw_sp_fid_find(mlxsw_sp, fid);
-}
-
-static u8 mlxsw_sp_router_port(const struct mlxsw_sp *mlxsw_sp)
-{
- return mlxsw_core_max_ports(mlxsw_sp->core) + 1;
-}
-
-static enum mlxsw_flood_table_type mlxsw_sp_flood_table_type_get(u16 fid)
-{
- return mlxsw_sp_fid_is_vfid(fid) ? MLXSW_REG_SFGC_TABLE_TYPE_FID :
- MLXSW_REG_SFGC_TABLE_TYPE_FID_OFFEST;
-}
-
-static u16 mlxsw_sp_flood_table_index_get(u16 fid)
-{
- return mlxsw_sp_fid_is_vfid(fid) ? mlxsw_sp_fid_to_vfid(fid) : fid;
-}
-
-static int mlxsw_sp_router_port_flood_set(struct mlxsw_sp *mlxsw_sp, u16 fid,
- bool set)
-{
- u8 router_port = mlxsw_sp_router_port(mlxsw_sp);
- enum mlxsw_flood_table_type table_type;
- char *sftr_pl;
- u16 index;
- int err;
-
- sftr_pl = kmalloc(MLXSW_REG_SFTR_LEN, GFP_KERNEL);
- if (!sftr_pl)
- return -ENOMEM;
-
- table_type = mlxsw_sp_flood_table_type_get(fid);
- index = mlxsw_sp_flood_table_index_get(fid);
- mlxsw_reg_sftr_pack(sftr_pl, MLXSW_SP_FLOOD_TABLE_BC, index, table_type,
- 1, router_port, set);
- err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sftr), sftr_pl);
-
- kfree(sftr_pl);
- return err;
-}
-
-static enum mlxsw_reg_ritr_if_type mlxsw_sp_rif_type_get(u16 fid)
-{
- if (mlxsw_sp_fid_is_vfid(fid))
- return MLXSW_REG_RITR_FID_IF;
- else
- return MLXSW_REG_RITR_VLAN_IF;
-}
-
-static int mlxsw_sp_rif_bridge_op(struct mlxsw_sp *mlxsw_sp, u16 vr_id,
- struct net_device *l3_dev,
- u16 fid, u16 rif,
- bool create)
-{
- enum mlxsw_reg_ritr_if_type rif_type;
- char ritr_pl[MLXSW_REG_RITR_LEN];
-
- rif_type = mlxsw_sp_rif_type_get(fid);
- mlxsw_reg_ritr_pack(ritr_pl, create, rif_type, rif, vr_id, l3_dev->mtu,
- l3_dev->dev_addr);
- mlxsw_reg_ritr_fid_set(ritr_pl, rif_type, fid);
-
- return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
-}
-
-static int mlxsw_sp_rif_bridge_create(struct mlxsw_sp *mlxsw_sp,
- struct net_device *l3_dev,
- struct mlxsw_sp_fid *f)
-{
- u32 tb_id = l3mdev_fib_table(l3_dev);
- struct mlxsw_sp_rif *rif;
- struct mlxsw_sp_vr *vr;
- u16 rif_index;
- int err;
-
- rif_index = mlxsw_sp_avail_rif_get(mlxsw_sp);
- if (rif_index == MLXSW_SP_INVALID_INDEX_RIF)
- return -ERANGE;
-
- vr = mlxsw_sp_vr_get(mlxsw_sp, tb_id ? : RT_TABLE_MAIN);
- if (IS_ERR(vr))
- return PTR_ERR(vr);
-
- err = mlxsw_sp_router_port_flood_set(mlxsw_sp, f->fid, true);
- if (err)
- goto err_port_flood_set;
-
- err = mlxsw_sp_rif_bridge_op(mlxsw_sp, vr->id, l3_dev, f->fid,
- rif_index, true);
- if (err)
- goto err_rif_bridge_op;
-
- err = mlxsw_sp_rif_fdb_op(mlxsw_sp, l3_dev->dev_addr, f->fid, true);
- if (err)
- goto err_rif_fdb_op;
-
- rif = mlxsw_sp_rif_alloc(rif_index, vr->id, l3_dev, f);
- if (!rif) {
- err = -ENOMEM;
- goto err_rif_alloc;
- }
-
- f->rif = rif;
- mlxsw_sp->rifs[rif_index] = rif;
- vr->rif_count++;
-
- netdev_dbg(l3_dev, "RIF=%d created\n", rif_index);
-
- return 0;
-
-err_rif_alloc:
- mlxsw_sp_rif_fdb_op(mlxsw_sp, l3_dev->dev_addr, f->fid, false);
-err_rif_fdb_op:
- mlxsw_sp_rif_bridge_op(mlxsw_sp, vr->id, l3_dev, f->fid, rif_index,
- false);
-err_rif_bridge_op:
- mlxsw_sp_router_port_flood_set(mlxsw_sp, f->fid, false);
-err_port_flood_set:
- mlxsw_sp_vr_put(vr);
- return err;
-}
-
-void mlxsw_sp_rif_bridge_destroy(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_rif *rif)
-{
- struct mlxsw_sp_vr *vr = &mlxsw_sp->router.vrs[rif->vr_id];
- struct net_device *l3_dev = rif->dev;
- struct mlxsw_sp_fid *f = rif->f;
- u16 rif_index = rif->rif_index;
-
- mlxsw_sp_router_rif_gone_sync(mlxsw_sp, rif);
-
- vr->rif_count--;
- mlxsw_sp->rifs[rif_index] = NULL;
- f->rif = NULL;
-
- kfree(rif);
-
- mlxsw_sp_rif_fdb_op(mlxsw_sp, l3_dev->dev_addr, f->fid, false);
-
- mlxsw_sp_rif_bridge_op(mlxsw_sp, vr->id, l3_dev, f->fid, rif_index,
- false);
-
- mlxsw_sp_router_port_flood_set(mlxsw_sp, f->fid, false);
-
- mlxsw_sp_vr_put(vr);
-
- netdev_dbg(l3_dev, "RIF=%d destroyed\n", rif_index);
-}
-
static int mlxsw_sp_inetaddr_bridge_event(struct net_device *l3_dev,
- struct net_device *br_dev,
unsigned long event)
{
struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(l3_dev);
- struct mlxsw_sp_fid *f;
-
- /* FID can either be an actual FID if the L3 device is the
- * VLAN-aware bridge or a VLAN device on top. Otherwise, the
- * L3 device is a VLAN-unaware bridge and we get a vFID.
- */
- f = mlxsw_sp_bridge_fid_get(mlxsw_sp, l3_dev);
- if (WARN_ON(!f))
- return -EINVAL;
+ struct mlxsw_sp_rif_params params = {
+ .dev = l3_dev,
+ };
+ struct mlxsw_sp_rif *rif;
switch (event) {
case NETDEV_UP:
- return mlxsw_sp_rif_bridge_create(mlxsw_sp, l3_dev, f);
+ rif = mlxsw_sp_rif_create(mlxsw_sp, ¶ms);
+ if (IS_ERR(rif))
+ return PTR_ERR(rif);
+ break;
case NETDEV_DOWN:
- mlxsw_sp_rif_bridge_destroy(mlxsw_sp, f->rif);
+ rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, l3_dev);
+ mlxsw_sp_rif_destroy(rif);
break;
}
@@ -3331,19 +3299,16 @@
unsigned long event)
{
struct net_device *real_dev = vlan_dev_real_dev(vlan_dev);
- struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(vlan_dev);
u16 vid = vlan_dev_vlan_id(vlan_dev);
if (mlxsw_sp_port_dev_check(real_dev))
- return mlxsw_sp_inetaddr_vport_event(vlan_dev, real_dev, event,
- vid);
+ return mlxsw_sp_inetaddr_port_vlan_event(vlan_dev, real_dev,
+ event, vid);
else if (netif_is_lag_master(real_dev))
return __mlxsw_sp_inetaddr_lag_event(vlan_dev, real_dev, event,
vid);
- else if (netif_is_bridge_master(real_dev) &&
- mlxsw_sp->master_bridge.dev == real_dev)
- return mlxsw_sp_inetaddr_bridge_event(vlan_dev, real_dev,
- event);
+ else if (netif_is_bridge_master(real_dev) && br_vlan_enabled(real_dev))
+ return mlxsw_sp_inetaddr_bridge_event(vlan_dev, event);
return 0;
}
@@ -3356,7 +3321,7 @@
else if (netif_is_lag_master(dev))
return mlxsw_sp_inetaddr_lag_event(dev, event);
else if (netif_is_bridge_master(dev))
- return mlxsw_sp_inetaddr_bridge_event(dev, dev, event);
+ return mlxsw_sp_inetaddr_bridge_event(dev, event);
else if (is_vlan_dev(dev))
return mlxsw_sp_inetaddr_vlan_event(dev, event);
else
@@ -3406,6 +3371,7 @@
{
struct mlxsw_sp *mlxsw_sp;
struct mlxsw_sp_rif *rif;
+ u16 fid_index;
int err;
mlxsw_sp = mlxsw_sp_lower_get(dev);
@@ -3415,8 +3381,9 @@
rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
if (!rif)
return 0;
+ fid_index = mlxsw_sp_fid_index(rif->fid);
- err = mlxsw_sp_rif_fdb_op(mlxsw_sp, rif->addr, rif->f->fid, false);
+ err = mlxsw_sp_rif_fdb_op(mlxsw_sp, rif->addr, fid_index, false);
if (err)
return err;
@@ -3425,7 +3392,7 @@
if (err)
goto err_rif_edit;
- err = mlxsw_sp_rif_fdb_op(mlxsw_sp, dev->dev_addr, rif->f->fid, true);
+ err = mlxsw_sp_rif_fdb_op(mlxsw_sp, dev->dev_addr, fid_index, true);
if (err)
goto err_rif_fdb_op;
@@ -3439,7 +3406,7 @@
err_rif_fdb_op:
mlxsw_sp_rif_edit(mlxsw_sp, rif->rif_index, rif->addr, rif->mtu);
err_rif_edit:
- mlxsw_sp_rif_fdb_op(mlxsw_sp, rif->addr, rif->f->fid, true);
+ mlxsw_sp_rif_fdb_op(mlxsw_sp, rif->addr, fid_index, true);
return err;
}
@@ -3492,16 +3459,225 @@
return err;
}
+static struct mlxsw_sp_rif_subport *
+mlxsw_sp_rif_subport_rif(const struct mlxsw_sp_rif *rif)
+{
+ return container_of(rif, struct mlxsw_sp_rif_subport, common);
+}
+
+static void mlxsw_sp_rif_subport_setup(struct mlxsw_sp_rif *rif,
+ const struct mlxsw_sp_rif_params *params)
+{
+ struct mlxsw_sp_rif_subport *rif_subport;
+
+ rif_subport = mlxsw_sp_rif_subport_rif(rif);
+ rif_subport->vid = params->vid;
+ rif_subport->lag = params->lag;
+ if (params->lag)
+ rif_subport->lag_id = params->lag_id;
+ else
+ rif_subport->system_port = params->system_port;
+}
+
+static int mlxsw_sp_rif_subport_op(struct mlxsw_sp_rif *rif, bool enable)
+{
+ struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
+ struct mlxsw_sp_rif_subport *rif_subport;
+ char ritr_pl[MLXSW_REG_RITR_LEN];
+
+ rif_subport = mlxsw_sp_rif_subport_rif(rif);
+ mlxsw_reg_ritr_pack(ritr_pl, enable, MLXSW_REG_RITR_SP_IF,
+ rif->rif_index, rif->vr_id, rif->dev->mtu,
+ rif->dev->dev_addr);
+ mlxsw_reg_ritr_sp_if_pack(ritr_pl, rif_subport->lag,
+ rif_subport->lag ? rif_subport->lag_id :
+ rif_subport->system_port,
+ rif_subport->vid);
+
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
+}
+
+static int mlxsw_sp_rif_subport_configure(struct mlxsw_sp_rif *rif)
+{
+ return mlxsw_sp_rif_subport_op(rif, true);
+}
+
+static void mlxsw_sp_rif_subport_deconfigure(struct mlxsw_sp_rif *rif)
+{
+ mlxsw_sp_rif_subport_op(rif, false);
+}
+
+static struct mlxsw_sp_fid *
+mlxsw_sp_rif_subport_fid_get(struct mlxsw_sp_rif *rif)
+{
+ return mlxsw_sp_fid_rfid_get(rif->mlxsw_sp, rif->rif_index);
+}
+
+static const struct mlxsw_sp_rif_ops mlxsw_sp_rif_subport_ops = {
+ .type = MLXSW_SP_RIF_TYPE_SUBPORT,
+ .rif_size = sizeof(struct mlxsw_sp_rif_subport),
+ .setup = mlxsw_sp_rif_subport_setup,
+ .configure = mlxsw_sp_rif_subport_configure,
+ .deconfigure = mlxsw_sp_rif_subport_deconfigure,
+ .fid_get = mlxsw_sp_rif_subport_fid_get,
+};
+
+static int mlxsw_sp_rif_vlan_fid_op(struct mlxsw_sp_rif *rif,
+ enum mlxsw_reg_ritr_if_type type,
+ u16 vid_fid, bool enable)
+{
+ struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
+ char ritr_pl[MLXSW_REG_RITR_LEN];
+
+ mlxsw_reg_ritr_pack(ritr_pl, enable, type, rif->rif_index, rif->vr_id,
+ rif->dev->mtu, rif->dev->dev_addr);
+ mlxsw_reg_ritr_fid_set(ritr_pl, type, vid_fid);
+
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
+}
+
+static u8 mlxsw_sp_router_port(const struct mlxsw_sp *mlxsw_sp)
+{
+ return mlxsw_core_max_ports(mlxsw_sp->core) + 1;
+}
+
+static int mlxsw_sp_rif_vlan_configure(struct mlxsw_sp_rif *rif)
+{
+ struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
+ u16 vid = mlxsw_sp_fid_8021q_vid(rif->fid);
+ int err;
+
+ err = mlxsw_sp_rif_vlan_fid_op(rif, MLXSW_REG_RITR_VLAN_IF, vid, true);
+ if (err)
+ return err;
+
+ err = mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_BC,
+ mlxsw_sp_router_port(mlxsw_sp), true);
+ if (err)
+ goto err_fid_bc_flood_set;
+
+ return 0;
+
+err_fid_bc_flood_set:
+ mlxsw_sp_rif_vlan_fid_op(rif, MLXSW_REG_RITR_VLAN_IF, vid, false);
+ return err;
+}
+
+static void mlxsw_sp_rif_vlan_deconfigure(struct mlxsw_sp_rif *rif)
+{
+ struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
+ u16 vid = mlxsw_sp_fid_8021q_vid(rif->fid);
+
+ mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_BC,
+ mlxsw_sp_router_port(mlxsw_sp), false);
+ mlxsw_sp_rif_vlan_fid_op(rif, MLXSW_REG_RITR_VLAN_IF, vid, false);
+}
+
+static struct mlxsw_sp_fid *
+mlxsw_sp_rif_vlan_fid_get(struct mlxsw_sp_rif *rif)
+{
+ u16 vid = is_vlan_dev(rif->dev) ? vlan_dev_vlan_id(rif->dev) : 1;
+
+ return mlxsw_sp_fid_8021q_get(rif->mlxsw_sp, vid);
+}
+
+static const struct mlxsw_sp_rif_ops mlxsw_sp_rif_vlan_ops = {
+ .type = MLXSW_SP_RIF_TYPE_VLAN,
+ .rif_size = sizeof(struct mlxsw_sp_rif),
+ .configure = mlxsw_sp_rif_vlan_configure,
+ .deconfigure = mlxsw_sp_rif_vlan_deconfigure,
+ .fid_get = mlxsw_sp_rif_vlan_fid_get,
+};
+
+static int mlxsw_sp_rif_fid_configure(struct mlxsw_sp_rif *rif)
+{
+ struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
+ u16 fid_index = mlxsw_sp_fid_index(rif->fid);
+ int err;
+
+ err = mlxsw_sp_rif_vlan_fid_op(rif, MLXSW_REG_RITR_FID_IF, fid_index,
+ true);
+ if (err)
+ return err;
+
+ err = mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_BC,
+ mlxsw_sp_router_port(mlxsw_sp), true);
+ if (err)
+ goto err_fid_bc_flood_set;
+
+ return 0;
+
+err_fid_bc_flood_set:
+ mlxsw_sp_rif_vlan_fid_op(rif, MLXSW_REG_RITR_FID_IF, fid_index, false);
+ return err;
+}
+
+static void mlxsw_sp_rif_fid_deconfigure(struct mlxsw_sp_rif *rif)
+{
+ struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
+ u16 fid_index = mlxsw_sp_fid_index(rif->fid);
+
+ mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_BC,
+ mlxsw_sp_router_port(mlxsw_sp), false);
+ mlxsw_sp_rif_vlan_fid_op(rif, MLXSW_REG_RITR_FID_IF, fid_index, false);
+}
+
+static struct mlxsw_sp_fid *
+mlxsw_sp_rif_fid_fid_get(struct mlxsw_sp_rif *rif)
+{
+ return mlxsw_sp_fid_8021d_get(rif->mlxsw_sp, rif->dev->ifindex);
+}
+
+static const struct mlxsw_sp_rif_ops mlxsw_sp_rif_fid_ops = {
+ .type = MLXSW_SP_RIF_TYPE_FID,
+ .rif_size = sizeof(struct mlxsw_sp_rif),
+ .configure = mlxsw_sp_rif_fid_configure,
+ .deconfigure = mlxsw_sp_rif_fid_deconfigure,
+ .fid_get = mlxsw_sp_rif_fid_fid_get,
+};
+
+static const struct mlxsw_sp_rif_ops *mlxsw_sp_rif_ops_arr[] = {
+ [MLXSW_SP_RIF_TYPE_SUBPORT] = &mlxsw_sp_rif_subport_ops,
+ [MLXSW_SP_RIF_TYPE_VLAN] = &mlxsw_sp_rif_vlan_ops,
+ [MLXSW_SP_RIF_TYPE_FID] = &mlxsw_sp_rif_fid_ops,
+};
+
+static int mlxsw_sp_rifs_init(struct mlxsw_sp *mlxsw_sp)
+{
+ u64 max_rifs = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS);
+
+ mlxsw_sp->router->rifs = kcalloc(max_rifs,
+ sizeof(struct mlxsw_sp_rif *),
+ GFP_KERNEL);
+ if (!mlxsw_sp->router->rifs)
+ return -ENOMEM;
+
+ mlxsw_sp->router->rif_ops_arr = mlxsw_sp_rif_ops_arr;
+
+ return 0;
+}
+
+static void mlxsw_sp_rifs_fini(struct mlxsw_sp *mlxsw_sp)
+{
+ int i;
+
+ for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS); i++)
+ WARN_ON_ONCE(mlxsw_sp->router->rifs[i]);
+
+ kfree(mlxsw_sp->router->rifs);
+}
+
static void mlxsw_sp_router_fib_dump_flush(struct notifier_block *nb)
{
- struct mlxsw_sp *mlxsw_sp = container_of(nb, struct mlxsw_sp, fib_nb);
+ struct mlxsw_sp_router *router;
/* Flush pending FIB notifications and then flush the device's
* table before requesting another dump. The FIB notification
* block is unregistered, so no need to take RTNL.
*/
mlxsw_core_flush_owq();
- mlxsw_sp_router_fib_flush(mlxsw_sp);
+ router = container_of(nb, struct mlxsw_sp_router, fib_nb);
+ mlxsw_sp_router_fib_flush(router->mlxsw_sp);
}
static int __mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp)
@@ -3512,55 +3688,50 @@
if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_RIFS))
return -EIO;
-
max_rifs = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS);
- mlxsw_sp->rifs = kcalloc(max_rifs, sizeof(struct mlxsw_sp_rif *),
- GFP_KERNEL);
- if (!mlxsw_sp->rifs)
- return -ENOMEM;
mlxsw_reg_rgcr_pack(rgcr_pl, true);
mlxsw_reg_rgcr_max_router_interfaces_set(rgcr_pl, max_rifs);
err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rgcr), rgcr_pl);
if (err)
- goto err_rgcr_fail;
-
+ return err;
return 0;
-
-err_rgcr_fail:
- kfree(mlxsw_sp->rifs);
- return err;
}
static void __mlxsw_sp_router_fini(struct mlxsw_sp *mlxsw_sp)
{
char rgcr_pl[MLXSW_REG_RGCR_LEN];
- int i;
mlxsw_reg_rgcr_pack(rgcr_pl, false);
mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rgcr), rgcr_pl);
-
- for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS); i++)
- WARN_ON_ONCE(mlxsw_sp->rifs[i]);
-
- kfree(mlxsw_sp->rifs);
}
int mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp)
{
+ struct mlxsw_sp_router *router;
int err;
- INIT_LIST_HEAD(&mlxsw_sp->router.nexthop_neighs_list);
+ router = kzalloc(sizeof(*mlxsw_sp->router), GFP_KERNEL);
+ if (!router)
+ return -ENOMEM;
+ mlxsw_sp->router = router;
+ router->mlxsw_sp = mlxsw_sp;
+
+ INIT_LIST_HEAD(&mlxsw_sp->router->nexthop_neighs_list);
err = __mlxsw_sp_router_init(mlxsw_sp);
if (err)
- return err;
+ goto err_router_init;
- err = rhashtable_init(&mlxsw_sp->router.nexthop_ht,
+ err = mlxsw_sp_rifs_init(mlxsw_sp);
+ if (err)
+ goto err_rifs_init;
+
+ err = rhashtable_init(&mlxsw_sp->router->nexthop_ht,
&mlxsw_sp_nexthop_ht_params);
if (err)
goto err_nexthop_ht_init;
- err = rhashtable_init(&mlxsw_sp->router.nexthop_group_ht,
+ err = rhashtable_init(&mlxsw_sp->router->nexthop_group_ht,
&mlxsw_sp_nexthop_group_ht_params);
if (err)
goto err_nexthop_group_ht_init;
@@ -3577,8 +3748,8 @@
if (err)
goto err_neigh_init;
- mlxsw_sp->fib_nb.notifier_call = mlxsw_sp_router_fib_event;
- err = register_fib_notifier(&mlxsw_sp->fib_nb,
+ mlxsw_sp->router->fib_nb.notifier_call = mlxsw_sp_router_fib_event;
+ err = register_fib_notifier(&mlxsw_sp->router->fib_nb,
mlxsw_sp_router_fib_dump_flush);
if (err)
goto err_register_fib_notifier;
@@ -3592,21 +3763,27 @@
err_vrs_init:
mlxsw_sp_lpm_fini(mlxsw_sp);
err_lpm_init:
- rhashtable_destroy(&mlxsw_sp->router.nexthop_group_ht);
+ rhashtable_destroy(&mlxsw_sp->router->nexthop_group_ht);
err_nexthop_group_ht_init:
- rhashtable_destroy(&mlxsw_sp->router.nexthop_ht);
+ rhashtable_destroy(&mlxsw_sp->router->nexthop_ht);
err_nexthop_ht_init:
+ mlxsw_sp_rifs_fini(mlxsw_sp);
+err_rifs_init:
__mlxsw_sp_router_fini(mlxsw_sp);
+err_router_init:
+ kfree(mlxsw_sp->router);
return err;
}
void mlxsw_sp_router_fini(struct mlxsw_sp *mlxsw_sp)
{
- unregister_fib_notifier(&mlxsw_sp->fib_nb);
+ unregister_fib_notifier(&mlxsw_sp->router->fib_nb);
mlxsw_sp_neigh_fini(mlxsw_sp);
mlxsw_sp_vrs_fini(mlxsw_sp);
mlxsw_sp_lpm_fini(mlxsw_sp);
- rhashtable_destroy(&mlxsw_sp->router.nexthop_group_ht);
- rhashtable_destroy(&mlxsw_sp->router.nexthop_ht);
+ rhashtable_destroy(&mlxsw_sp->router->nexthop_group_ht);
+ rhashtable_destroy(&mlxsw_sp->router->nexthop_ht);
+ mlxsw_sp_rifs_fini(mlxsw_sp);
__mlxsw_sp_router_fini(mlxsw_sp);
+ kfree(mlxsw_sp->router);
}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.h
index c3095fe..a3e8d2b 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.h
@@ -42,6 +42,8 @@
MLXSW_SP_RIF_COUNTER_EGRESS,
};
+struct mlxsw_sp_rif *mlxsw_sp_rif_by_index(const struct mlxsw_sp *mlxsw_sp,
+ u16 rif_index);
u16 mlxsw_sp_rif_index(const struct mlxsw_sp_rif *rif);
int mlxsw_sp_rif_dev_ifindex(const struct mlxsw_sp_rif *rif);
int mlxsw_sp_rif_counter_value_get(struct mlxsw_sp *mlxsw_sp,
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
index f4bb0c0..cd89a3e 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
@@ -52,48 +52,390 @@
#include "core.h"
#include "reg.h"
-static u16 mlxsw_sp_port_vid_to_fid_get(struct mlxsw_sp_port *mlxsw_sp_port,
- u16 vid)
+struct mlxsw_sp_bridge_ops;
+
+struct mlxsw_sp_bridge {
+ struct mlxsw_sp *mlxsw_sp;
+ struct {
+ struct delayed_work dw;
+#define MLXSW_SP_DEFAULT_LEARNING_INTERVAL 100
+ unsigned int interval; /* ms */
+ } fdb_notify;
+#define MLXSW_SP_MIN_AGEING_TIME 10
+#define MLXSW_SP_MAX_AGEING_TIME 1000000
+#define MLXSW_SP_DEFAULT_AGEING_TIME 300
+ u32 ageing_time;
+ bool vlan_enabled_exists;
+ struct list_head bridges_list;
+ struct list_head mids_list;
+ DECLARE_BITMAP(mids_bitmap, MLXSW_SP_MID_MAX);
+ const struct mlxsw_sp_bridge_ops *bridge_8021q_ops;
+ const struct mlxsw_sp_bridge_ops *bridge_8021d_ops;
+};
+
+struct mlxsw_sp_bridge_device {
+ struct net_device *dev;
+ struct list_head list;
+ struct list_head ports_list;
+ u8 vlan_enabled:1,
+ multicast_enabled:1;
+ const struct mlxsw_sp_bridge_ops *ops;
+};
+
+struct mlxsw_sp_bridge_port {
+ struct net_device *dev;
+ struct mlxsw_sp_bridge_device *bridge_device;
+ struct list_head list;
+ struct list_head vlans_list;
+ unsigned int ref_count;
+ u8 stp_state;
+ unsigned long flags;
+ bool mrouter;
+ bool lagged;
+ union {
+ u16 lag_id;
+ u16 system_port;
+ };
+};
+
+struct mlxsw_sp_bridge_vlan {
+ struct list_head list;
+ struct list_head port_vlan_list;
+ u16 vid;
+};
+
+struct mlxsw_sp_bridge_ops {
+ int (*port_join)(struct mlxsw_sp_bridge_device *bridge_device,
+ struct mlxsw_sp_bridge_port *bridge_port,
+ struct mlxsw_sp_port *mlxsw_sp_port);
+ void (*port_leave)(struct mlxsw_sp_bridge_device *bridge_device,
+ struct mlxsw_sp_bridge_port *bridge_port,
+ struct mlxsw_sp_port *mlxsw_sp_port);
+ struct mlxsw_sp_fid *
+ (*fid_get)(struct mlxsw_sp_bridge_device *bridge_device,
+ u16 vid);
+};
+
+static int
+mlxsw_sp_bridge_port_fdb_flush(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_bridge_port *bridge_port,
+ u16 fid_index);
+
+static struct mlxsw_sp_bridge_device *
+mlxsw_sp_bridge_device_find(const struct mlxsw_sp_bridge *bridge,
+ const struct net_device *br_dev)
{
- struct mlxsw_sp_fid *f = mlxsw_sp_vport_fid_get(mlxsw_sp_port);
- u16 fid = vid;
+ struct mlxsw_sp_bridge_device *bridge_device;
- fid = f ? f->fid : fid;
+ list_for_each_entry(bridge_device, &bridge->bridges_list, list)
+ if (bridge_device->dev == br_dev)
+ return bridge_device;
- if (!fid)
- fid = mlxsw_sp_port->pvid;
-
- return fid;
+ return NULL;
}
-static struct mlxsw_sp_port *
-mlxsw_sp_port_orig_get(struct net_device *dev,
- struct mlxsw_sp_port *mlxsw_sp_port)
+static struct mlxsw_sp_bridge_device *
+mlxsw_sp_bridge_device_create(struct mlxsw_sp_bridge *bridge,
+ struct net_device *br_dev)
{
- struct mlxsw_sp_port *mlxsw_sp_vport;
- struct mlxsw_sp_fid *fid;
- u16 vid;
+ struct device *dev = bridge->mlxsw_sp->bus_info->dev;
+ struct mlxsw_sp_bridge_device *bridge_device;
+ bool vlan_enabled = br_vlan_enabled(br_dev);
- if (netif_is_bridge_master(dev)) {
- fid = mlxsw_sp_vfid_find(mlxsw_sp_port->mlxsw_sp,
- dev);
- if (fid) {
- mlxsw_sp_vport =
- mlxsw_sp_port_vport_find_by_fid(mlxsw_sp_port,
- fid->fid);
- WARN_ON(!mlxsw_sp_vport);
- return mlxsw_sp_vport;
- }
+ if (vlan_enabled && bridge->vlan_enabled_exists) {
+ dev_err(dev, "Only one VLAN-aware bridge is supported\n");
+ return ERR_PTR(-EINVAL);
}
- if (!is_vlan_dev(dev))
- return mlxsw_sp_port;
+ bridge_device = kzalloc(sizeof(*bridge_device), GFP_KERNEL);
+ if (!bridge_device)
+ return ERR_PTR(-ENOMEM);
- vid = vlan_dev_vlan_id(dev);
- mlxsw_sp_vport = mlxsw_sp_port_vport_find(mlxsw_sp_port, vid);
- WARN_ON(!mlxsw_sp_vport);
+ bridge_device->dev = br_dev;
+ bridge_device->vlan_enabled = vlan_enabled;
+ bridge_device->multicast_enabled = br_multicast_enabled(br_dev);
+ INIT_LIST_HEAD(&bridge_device->ports_list);
+ if (vlan_enabled) {
+ bridge->vlan_enabled_exists = true;
+ bridge_device->ops = bridge->bridge_8021q_ops;
+ } else {
+ bridge_device->ops = bridge->bridge_8021d_ops;
+ }
+ list_add(&bridge_device->list, &bridge->bridges_list);
- return mlxsw_sp_vport;
+ return bridge_device;
+}
+
+static void
+mlxsw_sp_bridge_device_destroy(struct mlxsw_sp_bridge *bridge,
+ struct mlxsw_sp_bridge_device *bridge_device)
+{
+ list_del(&bridge_device->list);
+ if (bridge_device->vlan_enabled)
+ bridge->vlan_enabled_exists = false;
+ WARN_ON(!list_empty(&bridge_device->ports_list));
+ kfree(bridge_device);
+}
+
+static struct mlxsw_sp_bridge_device *
+mlxsw_sp_bridge_device_get(struct mlxsw_sp_bridge *bridge,
+ struct net_device *br_dev)
+{
+ struct mlxsw_sp_bridge_device *bridge_device;
+
+ bridge_device = mlxsw_sp_bridge_device_find(bridge, br_dev);
+ if (bridge_device)
+ return bridge_device;
+
+ return mlxsw_sp_bridge_device_create(bridge, br_dev);
+}
+
+static void
+mlxsw_sp_bridge_device_put(struct mlxsw_sp_bridge *bridge,
+ struct mlxsw_sp_bridge_device *bridge_device)
+{
+ if (list_empty(&bridge_device->ports_list))
+ mlxsw_sp_bridge_device_destroy(bridge, bridge_device);
+}
+
+static struct mlxsw_sp_bridge_port *
+__mlxsw_sp_bridge_port_find(const struct mlxsw_sp_bridge_device *bridge_device,
+ const struct net_device *brport_dev)
+{
+ struct mlxsw_sp_bridge_port *bridge_port;
+
+ list_for_each_entry(bridge_port, &bridge_device->ports_list, list) {
+ if (bridge_port->dev == brport_dev)
+ return bridge_port;
+ }
+
+ return NULL;
+}
+
+static struct mlxsw_sp_bridge_port *
+mlxsw_sp_bridge_port_find(struct mlxsw_sp_bridge *bridge,
+ struct net_device *brport_dev)
+{
+ struct net_device *br_dev = netdev_master_upper_dev_get(brport_dev);
+ struct mlxsw_sp_bridge_device *bridge_device;
+
+ if (!br_dev)
+ return NULL;
+
+ bridge_device = mlxsw_sp_bridge_device_find(bridge, br_dev);
+ if (!bridge_device)
+ return NULL;
+
+ return __mlxsw_sp_bridge_port_find(bridge_device, brport_dev);
+}
+
+static struct mlxsw_sp_bridge_port *
+mlxsw_sp_bridge_port_create(struct mlxsw_sp_bridge_device *bridge_device,
+ struct net_device *brport_dev)
+{
+ struct mlxsw_sp_bridge_port *bridge_port;
+ struct mlxsw_sp_port *mlxsw_sp_port;
+
+ bridge_port = kzalloc(sizeof(*bridge_port), GFP_KERNEL);
+ if (!bridge_port)
+ return NULL;
+
+ mlxsw_sp_port = mlxsw_sp_port_dev_lower_find(brport_dev);
+ bridge_port->lagged = mlxsw_sp_port->lagged;
+ if (bridge_port->lagged)
+ bridge_port->lag_id = mlxsw_sp_port->lag_id;
+ else
+ bridge_port->system_port = mlxsw_sp_port->local_port;
+ bridge_port->dev = brport_dev;
+ bridge_port->bridge_device = bridge_device;
+ bridge_port->stp_state = BR_STATE_DISABLED;
+ bridge_port->flags = BR_LEARNING | BR_FLOOD | BR_LEARNING_SYNC;
+ INIT_LIST_HEAD(&bridge_port->vlans_list);
+ list_add(&bridge_port->list, &bridge_device->ports_list);
+ bridge_port->ref_count = 1;
+
+ return bridge_port;
+}
+
+static void
+mlxsw_sp_bridge_port_destroy(struct mlxsw_sp_bridge_port *bridge_port)
+{
+ list_del(&bridge_port->list);
+ WARN_ON(!list_empty(&bridge_port->vlans_list));
+ kfree(bridge_port);
+}
+
+static bool
+mlxsw_sp_bridge_port_should_destroy(const struct mlxsw_sp_bridge_port *
+ bridge_port)
+{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(bridge_port->dev);
+
+ /* In case ports were pulled from out of a bridged LAG, then
+ * it's possible the reference count isn't zero, yet the bridge
+ * port should be destroyed, as it's no longer an upper of ours.
+ */
+ if (!mlxsw_sp && list_empty(&bridge_port->vlans_list))
+ return true;
+ else if (bridge_port->ref_count == 0)
+ return true;
+ else
+ return false;
+}
+
+static struct mlxsw_sp_bridge_port *
+mlxsw_sp_bridge_port_get(struct mlxsw_sp_bridge *bridge,
+ struct net_device *brport_dev)
+{
+ struct net_device *br_dev = netdev_master_upper_dev_get(brport_dev);
+ struct mlxsw_sp_bridge_device *bridge_device;
+ struct mlxsw_sp_bridge_port *bridge_port;
+ int err;
+
+ bridge_port = mlxsw_sp_bridge_port_find(bridge, brport_dev);
+ if (bridge_port) {
+ bridge_port->ref_count++;
+ return bridge_port;
+ }
+
+ bridge_device = mlxsw_sp_bridge_device_get(bridge, br_dev);
+ if (IS_ERR(bridge_device))
+ return ERR_CAST(bridge_device);
+
+ bridge_port = mlxsw_sp_bridge_port_create(bridge_device, brport_dev);
+ if (!bridge_port) {
+ err = -ENOMEM;
+ goto err_bridge_port_create;
+ }
+
+ return bridge_port;
+
+err_bridge_port_create:
+ mlxsw_sp_bridge_device_put(bridge, bridge_device);
+ return ERR_PTR(err);
+}
+
+static void mlxsw_sp_bridge_port_put(struct mlxsw_sp_bridge *bridge,
+ struct mlxsw_sp_bridge_port *bridge_port)
+{
+ struct mlxsw_sp_bridge_device *bridge_device;
+
+ bridge_port->ref_count--;
+ if (!mlxsw_sp_bridge_port_should_destroy(bridge_port))
+ return;
+ bridge_device = bridge_port->bridge_device;
+ mlxsw_sp_bridge_port_destroy(bridge_port);
+ mlxsw_sp_bridge_device_put(bridge, bridge_device);
+}
+
+static struct mlxsw_sp_port_vlan *
+mlxsw_sp_port_vlan_find_by_bridge(struct mlxsw_sp_port *mlxsw_sp_port,
+ const struct mlxsw_sp_bridge_device *
+ bridge_device,
+ u16 vid)
+{
+ struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
+
+ list_for_each_entry(mlxsw_sp_port_vlan, &mlxsw_sp_port->vlans_list,
+ list) {
+ if (!mlxsw_sp_port_vlan->bridge_port)
+ continue;
+ if (mlxsw_sp_port_vlan->bridge_port->bridge_device !=
+ bridge_device)
+ continue;
+ if (bridge_device->vlan_enabled &&
+ mlxsw_sp_port_vlan->vid != vid)
+ continue;
+ return mlxsw_sp_port_vlan;
+ }
+
+ return NULL;
+}
+
+static struct mlxsw_sp_port_vlan*
+mlxsw_sp_port_vlan_find_by_fid(struct mlxsw_sp_port *mlxsw_sp_port,
+ u16 fid_index)
+{
+ struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
+
+ list_for_each_entry(mlxsw_sp_port_vlan, &mlxsw_sp_port->vlans_list,
+ list) {
+ struct mlxsw_sp_fid *fid = mlxsw_sp_port_vlan->fid;
+
+ if (fid && mlxsw_sp_fid_index(fid) == fid_index)
+ return mlxsw_sp_port_vlan;
+ }
+
+ return NULL;
+}
+
+static struct mlxsw_sp_bridge_vlan *
+mlxsw_sp_bridge_vlan_find(const struct mlxsw_sp_bridge_port *bridge_port,
+ u16 vid)
+{
+ struct mlxsw_sp_bridge_vlan *bridge_vlan;
+
+ list_for_each_entry(bridge_vlan, &bridge_port->vlans_list, list) {
+ if (bridge_vlan->vid == vid)
+ return bridge_vlan;
+ }
+
+ return NULL;
+}
+
+static struct mlxsw_sp_bridge_vlan *
+mlxsw_sp_bridge_vlan_create(struct mlxsw_sp_bridge_port *bridge_port, u16 vid)
+{
+ struct mlxsw_sp_bridge_vlan *bridge_vlan;
+
+ bridge_vlan = kzalloc(sizeof(*bridge_vlan), GFP_KERNEL);
+ if (!bridge_vlan)
+ return NULL;
+
+ INIT_LIST_HEAD(&bridge_vlan->port_vlan_list);
+ bridge_vlan->vid = vid;
+ list_add(&bridge_vlan->list, &bridge_port->vlans_list);
+
+ return bridge_vlan;
+}
+
+static void
+mlxsw_sp_bridge_vlan_destroy(struct mlxsw_sp_bridge_vlan *bridge_vlan)
+{
+ list_del(&bridge_vlan->list);
+ WARN_ON(!list_empty(&bridge_vlan->port_vlan_list));
+ kfree(bridge_vlan);
+}
+
+static struct mlxsw_sp_bridge_vlan *
+mlxsw_sp_bridge_vlan_get(struct mlxsw_sp_bridge_port *bridge_port, u16 vid)
+{
+ struct mlxsw_sp_bridge_vlan *bridge_vlan;
+
+ bridge_vlan = mlxsw_sp_bridge_vlan_find(bridge_port, vid);
+ if (bridge_vlan)
+ return bridge_vlan;
+
+ return mlxsw_sp_bridge_vlan_create(bridge_port, vid);
+}
+
+static void mlxsw_sp_bridge_vlan_put(struct mlxsw_sp_bridge_vlan *bridge_vlan)
+{
+ if (list_empty(&bridge_vlan->port_vlan_list))
+ mlxsw_sp_bridge_vlan_destroy(bridge_vlan);
+}
+
+static void mlxsw_sp_port_bridge_flags_get(struct mlxsw_sp_bridge *bridge,
+ struct net_device *dev,
+ unsigned long *brport_flags)
+{
+ struct mlxsw_sp_bridge_port *bridge_port;
+
+ bridge_port = mlxsw_sp_bridge_port_find(bridge, dev);
+ if (WARN_ON(!bridge_port))
+ return;
+
+ memcpy(brport_flags, &bridge_port->flags, sizeof(*brport_flags));
}
static int mlxsw_sp_port_attr_get(struct net_device *dev,
@@ -102,10 +444,6 @@
struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
- mlxsw_sp_port = mlxsw_sp_port_orig_get(attr->orig_dev, mlxsw_sp_port);
- if (!mlxsw_sp_port)
- return -EINVAL;
-
switch (attr->id) {
case SWITCHDEV_ATTR_ID_PORT_PARENT_ID:
attr->u.ppid.id_len = sizeof(mlxsw_sp->base_mac);
@@ -113,10 +451,11 @@
attr->u.ppid.id_len);
break;
case SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS:
- attr->u.brport_flags =
- (mlxsw_sp_port->learning ? BR_LEARNING : 0) |
- (mlxsw_sp_port->learning_sync ? BR_LEARNING_SYNC : 0) |
- (mlxsw_sp_port->uc_flood ? BR_FLOOD : 0);
+ mlxsw_sp_port_bridge_flags_get(mlxsw_sp->bridge, attr->orig_dev,
+ &attr->u.brport_flags);
+ break;
+ case SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS_SUPPORT:
+ attr->u.brport_flags_support = BR_LEARNING | BR_FLOOD;
break;
default:
return -EOPNOTSUPP;
@@ -125,267 +464,185 @@
return 0;
}
-static int mlxsw_sp_port_stp_state_set(struct mlxsw_sp_port *mlxsw_sp_port,
- u8 state)
+static int
+mlxsw_sp_port_bridge_vlan_stp_set(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct mlxsw_sp_bridge_vlan *bridge_vlan,
+ u8 state)
{
- struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
- enum mlxsw_reg_spms_state spms_state;
- char *spms_pl;
- u16 vid;
- int err;
+ struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
- switch (state) {
- case BR_STATE_FORWARDING:
- spms_state = MLXSW_REG_SPMS_STATE_FORWARDING;
- break;
- case BR_STATE_LEARNING:
- spms_state = MLXSW_REG_SPMS_STATE_LEARNING;
- break;
- case BR_STATE_LISTENING: /* fall-through */
- case BR_STATE_DISABLED: /* fall-through */
- case BR_STATE_BLOCKING:
- spms_state = MLXSW_REG_SPMS_STATE_DISCARDING;
- break;
- default:
- BUG();
+ list_for_each_entry(mlxsw_sp_port_vlan, &bridge_vlan->port_vlan_list,
+ bridge_vlan_node) {
+ if (mlxsw_sp_port_vlan->mlxsw_sp_port != mlxsw_sp_port)
+ continue;
+ return mlxsw_sp_port_vid_stp_set(mlxsw_sp_port,
+ bridge_vlan->vid, state);
}
- spms_pl = kmalloc(MLXSW_REG_SPMS_LEN, GFP_KERNEL);
- if (!spms_pl)
- return -ENOMEM;
- mlxsw_reg_spms_pack(spms_pl, mlxsw_sp_port->local_port);
-
- if (mlxsw_sp_port_is_vport(mlxsw_sp_port)) {
- vid = mlxsw_sp_vport_vid_get(mlxsw_sp_port);
- mlxsw_reg_spms_vid_pack(spms_pl, vid, spms_state);
- } else {
- for_each_set_bit(vid, mlxsw_sp_port->active_vlans, VLAN_N_VID)
- mlxsw_reg_spms_vid_pack(spms_pl, vid, spms_state);
- }
-
- err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spms), spms_pl);
- kfree(spms_pl);
- return err;
+ return 0;
}
static int mlxsw_sp_port_attr_stp_state_set(struct mlxsw_sp_port *mlxsw_sp_port,
struct switchdev_trans *trans,
+ struct net_device *orig_dev,
u8 state)
{
- if (switchdev_trans_ph_prepare(trans))
- return 0;
-
- mlxsw_sp_port->stp_state = state;
- return mlxsw_sp_port_stp_state_set(mlxsw_sp_port, state);
-}
-
-static int __mlxsw_sp_port_flood_table_set(struct mlxsw_sp_port *mlxsw_sp_port,
- u16 idx_begin, u16 idx_end,
- enum mlxsw_sp_flood_table table,
- bool set)
-{
- struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
- u16 local_port = mlxsw_sp_port->local_port;
- enum mlxsw_flood_table_type table_type;
- u16 range = idx_end - idx_begin + 1;
- char *sftr_pl;
+ struct mlxsw_sp_bridge_port *bridge_port;
+ struct mlxsw_sp_bridge_vlan *bridge_vlan;
int err;
- if (mlxsw_sp_port_is_vport(mlxsw_sp_port))
- table_type = MLXSW_REG_SFGC_TABLE_TYPE_FID;
- else
- table_type = MLXSW_REG_SFGC_TABLE_TYPE_FID_OFFEST;
-
- sftr_pl = kmalloc(MLXSW_REG_SFTR_LEN, GFP_KERNEL);
- if (!sftr_pl)
- return -ENOMEM;
-
- mlxsw_reg_sftr_pack(sftr_pl, table, idx_begin,
- table_type, range, local_port, set);
- err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sftr), sftr_pl);
-
- kfree(sftr_pl);
- return err;
-}
-
-static int __mlxsw_sp_port_flood_set(struct mlxsw_sp_port *mlxsw_sp_port,
- u16 idx_begin, u16 idx_end, bool uc_set,
- bool bc_set, bool mc_set)
-{
- int err;
-
- err = __mlxsw_sp_port_flood_table_set(mlxsw_sp_port, idx_begin, idx_end,
- MLXSW_SP_FLOOD_TABLE_UC, uc_set);
- if (err)
- return err;
-
- err = __mlxsw_sp_port_flood_table_set(mlxsw_sp_port, idx_begin, idx_end,
- MLXSW_SP_FLOOD_TABLE_BC, bc_set);
- if (err)
- goto err_flood_bm_set;
-
- err = __mlxsw_sp_port_flood_table_set(mlxsw_sp_port, idx_begin, idx_end,
- MLXSW_SP_FLOOD_TABLE_MC, mc_set);
- if (err)
- goto err_flood_mc_set;
- return 0;
-
-err_flood_mc_set:
- __mlxsw_sp_port_flood_table_set(mlxsw_sp_port, idx_begin, idx_end,
- MLXSW_SP_FLOOD_TABLE_BC, !bc_set);
-err_flood_bm_set:
- __mlxsw_sp_port_flood_table_set(mlxsw_sp_port, idx_begin, idx_end,
- MLXSW_SP_FLOOD_TABLE_UC, !uc_set);
- return err;
-}
-
-static int mlxsw_sp_port_flood_table_set(struct mlxsw_sp_port *mlxsw_sp_port,
- enum mlxsw_sp_flood_table table,
- bool set)
-{
- struct net_device *dev = mlxsw_sp_port->dev;
- u16 vid, last_visited_vid;
- int err;
-
- if (mlxsw_sp_port_is_vport(mlxsw_sp_port)) {
- u16 fid = mlxsw_sp_vport_fid_get(mlxsw_sp_port)->fid;
- u16 vfid = mlxsw_sp_fid_to_vfid(fid);
-
- return __mlxsw_sp_port_flood_table_set(mlxsw_sp_port, vfid,
- vfid, table, set);
- }
-
- for_each_set_bit(vid, mlxsw_sp_port->active_vlans, VLAN_N_VID) {
- err = __mlxsw_sp_port_flood_table_set(mlxsw_sp_port, vid, vid,
- table, set);
- if (err) {
- last_visited_vid = vid;
- goto err_port_flood_set;
- }
- }
-
- return 0;
-
-err_port_flood_set:
- for_each_set_bit(vid, mlxsw_sp_port->active_vlans, last_visited_vid)
- __mlxsw_sp_port_flood_table_set(mlxsw_sp_port, vid, vid, table,
- !set);
- netdev_err(dev, "Failed to configure unicast flooding\n");
- return err;
-}
-
-static int mlxsw_sp_port_mc_disabled_set(struct mlxsw_sp_port *mlxsw_sp_port,
- struct switchdev_trans *trans,
- bool mc_disabled)
-{
- int set;
- int err = 0;
-
if (switchdev_trans_ph_prepare(trans))
return 0;
- if (mlxsw_sp_port->mc_router != mlxsw_sp_port->mc_flood) {
- set = mc_disabled ?
- mlxsw_sp_port->mc_flood : mlxsw_sp_port->mc_router;
- err = mlxsw_sp_port_flood_table_set(mlxsw_sp_port,
- MLXSW_SP_FLOOD_TABLE_MC,
- set);
- }
-
- if (!err)
- mlxsw_sp_port->mc_disabled = mc_disabled;
-
- return err;
-}
-
-int mlxsw_sp_vport_flood_set(struct mlxsw_sp_port *mlxsw_sp_vport, u16 fid,
- bool set)
-{
- bool mc_set = set;
- u16 vfid;
-
- /* In case of vFIDs, index into the flooding table is relative to
- * the start of the vFIDs range.
+ /* It's possible we failed to enslave the port, yet this
+ * operation is executed due to it being deferred.
*/
- vfid = mlxsw_sp_fid_to_vfid(fid);
+ bridge_port = mlxsw_sp_bridge_port_find(mlxsw_sp_port->mlxsw_sp->bridge,
+ orig_dev);
+ if (!bridge_port)
+ return 0;
- if (set)
- mc_set = mlxsw_sp_vport->mc_disabled ?
- mlxsw_sp_vport->mc_flood : mlxsw_sp_vport->mc_router;
-
- return __mlxsw_sp_port_flood_set(mlxsw_sp_vport, vfid, vfid, set, set,
- mc_set);
-}
-
-static int mlxsw_sp_port_learning_set(struct mlxsw_sp_port *mlxsw_sp_port,
- bool set)
-{
- u16 vid;
- int err;
-
- if (mlxsw_sp_port_is_vport(mlxsw_sp_port)) {
- vid = mlxsw_sp_vport_vid_get(mlxsw_sp_port);
-
- return __mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, vid,
- set);
+ list_for_each_entry(bridge_vlan, &bridge_port->vlans_list, list) {
+ err = mlxsw_sp_port_bridge_vlan_stp_set(mlxsw_sp_port,
+ bridge_vlan, state);
+ if (err)
+ goto err_port_bridge_vlan_stp_set;
}
- for_each_set_bit(vid, mlxsw_sp_port->active_vlans, VLAN_N_VID) {
- err = __mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, vid,
- set);
+ bridge_port->stp_state = state;
+
+ return 0;
+
+err_port_bridge_vlan_stp_set:
+ list_for_each_entry_continue_reverse(bridge_vlan,
+ &bridge_port->vlans_list, list)
+ mlxsw_sp_port_bridge_vlan_stp_set(mlxsw_sp_port, bridge_vlan,
+ bridge_port->stp_state);
+ return err;
+}
+
+static int
+mlxsw_sp_port_bridge_vlan_flood_set(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct mlxsw_sp_bridge_vlan *bridge_vlan,
+ enum mlxsw_sp_flood_type packet_type,
+ bool member)
+{
+ struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
+
+ list_for_each_entry(mlxsw_sp_port_vlan, &bridge_vlan->port_vlan_list,
+ bridge_vlan_node) {
+ if (mlxsw_sp_port_vlan->mlxsw_sp_port != mlxsw_sp_port)
+ continue;
+ return mlxsw_sp_fid_flood_set(mlxsw_sp_port_vlan->fid,
+ packet_type,
+ mlxsw_sp_port->local_port,
+ member);
+ }
+
+ return 0;
+}
+
+static int
+mlxsw_sp_bridge_port_flood_table_set(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct mlxsw_sp_bridge_port *bridge_port,
+ enum mlxsw_sp_flood_type packet_type,
+ bool member)
+{
+ struct mlxsw_sp_bridge_vlan *bridge_vlan;
+ int err;
+
+ list_for_each_entry(bridge_vlan, &bridge_port->vlans_list, list) {
+ err = mlxsw_sp_port_bridge_vlan_flood_set(mlxsw_sp_port,
+ bridge_vlan,
+ packet_type,
+ member);
if (err)
- goto err_port_vid_learning_set;
+ goto err_port_bridge_vlan_flood_set;
}
return 0;
-err_port_vid_learning_set:
- for_each_set_bit(vid, mlxsw_sp_port->active_vlans, VLAN_N_VID)
- __mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, vid, !set);
+err_port_bridge_vlan_flood_set:
+ list_for_each_entry_continue_reverse(bridge_vlan,
+ &bridge_port->vlans_list, list)
+ mlxsw_sp_port_bridge_vlan_flood_set(mlxsw_sp_port, bridge_vlan,
+ packet_type, !member);
+ return err;
+}
+
+static int
+mlxsw_sp_port_bridge_vlan_learning_set(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct mlxsw_sp_bridge_vlan *bridge_vlan,
+ bool set)
+{
+ struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
+ u16 vid = bridge_vlan->vid;
+
+ list_for_each_entry(mlxsw_sp_port_vlan, &bridge_vlan->port_vlan_list,
+ bridge_vlan_node) {
+ if (mlxsw_sp_port_vlan->mlxsw_sp_port != mlxsw_sp_port)
+ continue;
+ return mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, set);
+ }
+
+ return 0;
+}
+
+static int
+mlxsw_sp_bridge_port_learning_set(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct mlxsw_sp_bridge_port *bridge_port,
+ bool set)
+{
+ struct mlxsw_sp_bridge_vlan *bridge_vlan;
+ int err;
+
+ list_for_each_entry(bridge_vlan, &bridge_port->vlans_list, list) {
+ err = mlxsw_sp_port_bridge_vlan_learning_set(mlxsw_sp_port,
+ bridge_vlan, set);
+ if (err)
+ goto err_port_bridge_vlan_learning_set;
+ }
+
+ return 0;
+
+err_port_bridge_vlan_learning_set:
+ list_for_each_entry_continue_reverse(bridge_vlan,
+ &bridge_port->vlans_list, list)
+ mlxsw_sp_port_bridge_vlan_learning_set(mlxsw_sp_port,
+ bridge_vlan, !set);
return err;
}
static int mlxsw_sp_port_attr_br_flags_set(struct mlxsw_sp_port *mlxsw_sp_port,
struct switchdev_trans *trans,
+ struct net_device *orig_dev,
unsigned long brport_flags)
{
- unsigned long learning = mlxsw_sp_port->learning ? BR_LEARNING : 0;
- unsigned long uc_flood = mlxsw_sp_port->uc_flood ? BR_FLOOD : 0;
+ struct mlxsw_sp_bridge_port *bridge_port;
int err;
- if (!mlxsw_sp_port->bridged)
- return -EINVAL;
-
if (switchdev_trans_ph_prepare(trans))
return 0;
- if ((uc_flood ^ brport_flags) & BR_FLOOD) {
- err = mlxsw_sp_port_flood_table_set(mlxsw_sp_port,
- MLXSW_SP_FLOOD_TABLE_UC,
- !mlxsw_sp_port->uc_flood);
- if (err)
- return err;
- }
+ bridge_port = mlxsw_sp_bridge_port_find(mlxsw_sp_port->mlxsw_sp->bridge,
+ orig_dev);
+ if (WARN_ON(!bridge_port))
+ return -EINVAL;
- if ((learning ^ brport_flags) & BR_LEARNING) {
- err = mlxsw_sp_port_learning_set(mlxsw_sp_port,
- !mlxsw_sp_port->learning);
- if (err)
- goto err_port_learning_set;
- }
+ err = mlxsw_sp_bridge_port_flood_table_set(mlxsw_sp_port, bridge_port,
+ MLXSW_SP_FLOOD_TYPE_UC,
+ brport_flags & BR_FLOOD);
+ if (err)
+ return err;
- mlxsw_sp_port->uc_flood = brport_flags & BR_FLOOD ? 1 : 0;
- mlxsw_sp_port->learning = brport_flags & BR_LEARNING ? 1 : 0;
- mlxsw_sp_port->learning_sync = brport_flags & BR_LEARNING_SYNC ? 1 : 0;
+ err = mlxsw_sp_bridge_port_learning_set(mlxsw_sp_port, bridge_port,
+ brport_flags & BR_LEARNING);
+ if (err)
+ return err;
+
+ memcpy(&bridge_port->flags, &brport_flags, sizeof(brport_flags));
return 0;
-
-err_port_learning_set:
- if ((uc_flood ^ brport_flags) & BR_FLOOD)
- mlxsw_sp_port_flood_table_set(mlxsw_sp_port,
- MLXSW_SP_FLOOD_TABLE_UC,
- mlxsw_sp_port->uc_flood);
- return err;
}
static int mlxsw_sp_ageing_set(struct mlxsw_sp *mlxsw_sp, u32 ageing_time)
@@ -397,7 +654,7 @@
err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfdat), sfdat_pl);
if (err)
return err;
- mlxsw_sp->ageing_time = ageing_time;
+ mlxsw_sp->bridge->ageing_time = ageing_time;
return 0;
}
@@ -426,28 +683,77 @@
bool vlan_enabled)
{
struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ struct mlxsw_sp_bridge_device *bridge_device;
- /* SWITCHDEV_TRANS_PREPARE phase */
- if ((!vlan_enabled) && (mlxsw_sp->master_bridge.dev == orig_dev)) {
- netdev_err(mlxsw_sp_port->dev, "Bridge must be vlan-aware\n");
+ if (!switchdev_trans_ph_prepare(trans))
+ return 0;
+
+ bridge_device = mlxsw_sp_bridge_device_find(mlxsw_sp->bridge, orig_dev);
+ if (WARN_ON(!bridge_device))
return -EINVAL;
- }
- return 0;
+ if (bridge_device->vlan_enabled == vlan_enabled)
+ return 0;
+
+ netdev_err(bridge_device->dev, "VLAN filtering can't be changed for existing bridge\n");
+ return -EINVAL;
}
static int mlxsw_sp_port_attr_mc_router_set(struct mlxsw_sp_port *mlxsw_sp_port,
struct switchdev_trans *trans,
+ struct net_device *orig_dev,
bool is_port_mc_router)
{
+ struct mlxsw_sp_bridge_port *bridge_port;
+
if (switchdev_trans_ph_prepare(trans))
return 0;
- mlxsw_sp_port->mc_router = is_port_mc_router;
- if (!mlxsw_sp_port->mc_disabled)
- return mlxsw_sp_port_flood_table_set(mlxsw_sp_port,
- MLXSW_SP_FLOOD_TABLE_MC,
- is_port_mc_router);
+ bridge_port = mlxsw_sp_bridge_port_find(mlxsw_sp_port->mlxsw_sp->bridge,
+ orig_dev);
+ if (WARN_ON(!bridge_port))
+ return -EINVAL;
+
+ if (!bridge_port->bridge_device->multicast_enabled)
+ return 0;
+
+ return mlxsw_sp_bridge_port_flood_table_set(mlxsw_sp_port, bridge_port,
+ MLXSW_SP_FLOOD_TYPE_MC,
+ is_port_mc_router);
+}
+
+static int mlxsw_sp_port_mc_disabled_set(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct switchdev_trans *trans,
+ struct net_device *orig_dev,
+ bool mc_disabled)
+{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ struct mlxsw_sp_bridge_device *bridge_device;
+ struct mlxsw_sp_bridge_port *bridge_port;
+ int err;
+
+ if (switchdev_trans_ph_prepare(trans))
+ return 0;
+
+ /* It's possible we failed to enslave the port, yet this
+ * operation is executed due to it being deferred.
+ */
+ bridge_device = mlxsw_sp_bridge_device_find(mlxsw_sp->bridge, orig_dev);
+ if (!bridge_device)
+ return 0;
+
+ list_for_each_entry(bridge_port, &bridge_device->ports_list, list) {
+ enum mlxsw_sp_flood_type packet_type = MLXSW_SP_FLOOD_TYPE_MC;
+ bool member = mc_disabled ? true : bridge_port->mrouter;
+
+ err = mlxsw_sp_bridge_port_flood_table_set(mlxsw_sp_port,
+ bridge_port,
+ packet_type, member);
+ if (err)
+ return err;
+ }
+
+ bridge_device->multicast_enabled = !mc_disabled;
return 0;
}
@@ -457,19 +763,17 @@
struct switchdev_trans *trans)
{
struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
- int err = 0;
-
- mlxsw_sp_port = mlxsw_sp_port_orig_get(attr->orig_dev, mlxsw_sp_port);
- if (!mlxsw_sp_port)
- return -EINVAL;
+ int err;
switch (attr->id) {
case SWITCHDEV_ATTR_ID_PORT_STP_STATE:
err = mlxsw_sp_port_attr_stp_state_set(mlxsw_sp_port, trans,
+ attr->orig_dev,
attr->u.stp_state);
break;
case SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS:
err = mlxsw_sp_port_attr_br_flags_set(mlxsw_sp_port, trans,
+ attr->orig_dev,
attr->u.brport_flags);
break;
case SWITCHDEV_ATTR_ID_BRIDGE_AGEING_TIME:
@@ -483,10 +787,12 @@
break;
case SWITCHDEV_ATTR_ID_PORT_MROUTER:
err = mlxsw_sp_port_attr_mc_router_set(mlxsw_sp_port, trans,
+ attr->orig_dev,
attr->u.mrouter);
break;
case SWITCHDEV_ATTR_ID_BRIDGE_MC_DISABLED:
err = mlxsw_sp_port_mc_disabled_set(mlxsw_sp_port, trans,
+ attr->orig_dev,
attr->u.mc_disabled);
break;
default:
@@ -497,355 +803,213 @@
return err;
}
-static int mlxsw_sp_fid_op(struct mlxsw_sp *mlxsw_sp, u16 fid, bool create)
+static bool mlxsw_sp_mc_flood(const struct mlxsw_sp_bridge_port *bridge_port)
{
- char sfmr_pl[MLXSW_REG_SFMR_LEN];
+ const struct mlxsw_sp_bridge_device *bridge_device;
- mlxsw_reg_sfmr_pack(sfmr_pl, !create, fid, fid);
- return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfmr), sfmr_pl);
+ bridge_device = bridge_port->bridge_device;
+ return !bridge_device->multicast_enabled ? true : bridge_port->mrouter;
}
-static int mlxsw_sp_fid_map(struct mlxsw_sp *mlxsw_sp, u16 fid, bool valid)
+static int
+mlxsw_sp_port_vlan_fid_join(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan,
+ struct mlxsw_sp_bridge_port *bridge_port)
{
- enum mlxsw_reg_svfa_mt mt = MLXSW_REG_SVFA_MT_VID_TO_FID;
- char svfa_pl[MLXSW_REG_SVFA_LEN];
-
- mlxsw_reg_svfa_pack(svfa_pl, 0, mt, valid, fid, fid);
- return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(svfa), svfa_pl);
-}
-
-static struct mlxsw_sp_fid *mlxsw_sp_fid_alloc(u16 fid)
-{
- struct mlxsw_sp_fid *f;
-
- f = kzalloc(sizeof(*f), GFP_KERNEL);
- if (!f)
- return NULL;
-
- f->fid = fid;
-
- return f;
-}
-
-struct mlxsw_sp_fid *mlxsw_sp_fid_create(struct mlxsw_sp *mlxsw_sp, u16 fid)
-{
- struct mlxsw_sp_fid *f;
+ struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp_port_vlan->mlxsw_sp_port;
+ struct mlxsw_sp_bridge_device *bridge_device;
+ u8 local_port = mlxsw_sp_port->local_port;
+ u16 vid = mlxsw_sp_port_vlan->vid;
+ struct mlxsw_sp_fid *fid;
int err;
- err = mlxsw_sp_fid_op(mlxsw_sp, fid, true);
+ bridge_device = bridge_port->bridge_device;
+ fid = bridge_device->ops->fid_get(bridge_device, vid);
+ if (IS_ERR(fid))
+ return PTR_ERR(fid);
+
+ err = mlxsw_sp_fid_flood_set(fid, MLXSW_SP_FLOOD_TYPE_UC, local_port,
+ bridge_port->flags & BR_FLOOD);
if (err)
- return ERR_PTR(err);
+ goto err_fid_uc_flood_set;
- /* Although all the ports member in the FID might be using a
- * {Port, VID} to FID mapping, we create a global VID-to-FID
- * mapping. This allows a port to transition to VLAN mode,
- * knowing the global mapping exists.
- */
- err = mlxsw_sp_fid_map(mlxsw_sp, fid, true);
+ err = mlxsw_sp_fid_flood_set(fid, MLXSW_SP_FLOOD_TYPE_MC, local_port,
+ mlxsw_sp_mc_flood(bridge_port));
if (err)
- goto err_fid_map;
+ goto err_fid_mc_flood_set;
- f = mlxsw_sp_fid_alloc(fid);
- if (!f) {
- err = -ENOMEM;
- goto err_allocate_fid;
- }
+ err = mlxsw_sp_fid_flood_set(fid, MLXSW_SP_FLOOD_TYPE_BC, local_port,
+ true);
+ if (err)
+ goto err_fid_bc_flood_set;
- list_add(&f->list, &mlxsw_sp->fids);
+ err = mlxsw_sp_fid_port_vid_map(fid, mlxsw_sp_port, vid);
+ if (err)
+ goto err_fid_port_vid_map;
- return f;
-
-err_allocate_fid:
- mlxsw_sp_fid_map(mlxsw_sp, fid, false);
-err_fid_map:
- mlxsw_sp_fid_op(mlxsw_sp, fid, false);
- return ERR_PTR(err);
-}
-
-void mlxsw_sp_fid_destroy(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_fid *f)
-{
- u16 fid = f->fid;
-
- list_del(&f->list);
-
- if (f->rif)
- mlxsw_sp_rif_bridge_destroy(mlxsw_sp, f->rif);
-
- kfree(f);
-
- mlxsw_sp_fid_map(mlxsw_sp, fid, false);
-
- mlxsw_sp_fid_op(mlxsw_sp, fid, false);
-}
-
-static int __mlxsw_sp_port_fid_join(struct mlxsw_sp_port *mlxsw_sp_port,
- u16 fid)
-{
- struct mlxsw_sp_fid *f;
-
- if (test_bit(fid, mlxsw_sp_port->active_vlans))
- return 0;
-
- f = mlxsw_sp_fid_find(mlxsw_sp_port->mlxsw_sp, fid);
- if (!f) {
- f = mlxsw_sp_fid_create(mlxsw_sp_port->mlxsw_sp, fid);
- if (IS_ERR(f))
- return PTR_ERR(f);
- }
-
- f->ref_count++;
-
- netdev_dbg(mlxsw_sp_port->dev, "Joined FID=%d\n", fid);
+ mlxsw_sp_port_vlan->fid = fid;
return 0;
+
+err_fid_port_vid_map:
+ mlxsw_sp_fid_flood_set(fid, MLXSW_SP_FLOOD_TYPE_BC, local_port, false);
+err_fid_bc_flood_set:
+ mlxsw_sp_fid_flood_set(fid, MLXSW_SP_FLOOD_TYPE_MC, local_port, false);
+err_fid_mc_flood_set:
+ mlxsw_sp_fid_flood_set(fid, MLXSW_SP_FLOOD_TYPE_UC, local_port, false);
+err_fid_uc_flood_set:
+ mlxsw_sp_fid_put(fid);
+ return err;
}
-static void __mlxsw_sp_port_fid_leave(struct mlxsw_sp_port *mlxsw_sp_port,
- u16 fid)
+static void
+mlxsw_sp_port_vlan_fid_leave(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan)
{
- struct mlxsw_sp_fid *f;
+ struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp_port_vlan->mlxsw_sp_port;
+ struct mlxsw_sp_fid *fid = mlxsw_sp_port_vlan->fid;
+ u8 local_port = mlxsw_sp_port->local_port;
+ u16 vid = mlxsw_sp_port_vlan->vid;
- f = mlxsw_sp_fid_find(mlxsw_sp_port->mlxsw_sp, fid);
- if (WARN_ON(!f))
+ mlxsw_sp_port_vlan->fid = NULL;
+ mlxsw_sp_fid_port_vid_unmap(fid, mlxsw_sp_port, vid);
+ mlxsw_sp_fid_flood_set(fid, MLXSW_SP_FLOOD_TYPE_BC, local_port, false);
+ mlxsw_sp_fid_flood_set(fid, MLXSW_SP_FLOOD_TYPE_MC, local_port, false);
+ mlxsw_sp_fid_flood_set(fid, MLXSW_SP_FLOOD_TYPE_UC, local_port, false);
+ mlxsw_sp_fid_put(fid);
+}
+
+static u16
+mlxsw_sp_port_pvid_determine(const struct mlxsw_sp_port *mlxsw_sp_port,
+ u16 vid, bool is_pvid)
+{
+ if (is_pvid)
+ return vid;
+ else if (mlxsw_sp_port->pvid == vid)
+ return 0; /* Dis-allow untagged packets */
+ else
+ return mlxsw_sp_port->pvid;
+}
+
+static int
+mlxsw_sp_port_vlan_bridge_join(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan,
+ struct mlxsw_sp_bridge_port *bridge_port)
+{
+ struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp_port_vlan->mlxsw_sp_port;
+ struct mlxsw_sp_bridge_vlan *bridge_vlan;
+ u16 vid = mlxsw_sp_port_vlan->vid;
+ int err;
+
+ /* No need to continue if only VLAN flags were changed */
+ if (mlxsw_sp_port_vlan->bridge_port)
+ return 0;
+
+ err = mlxsw_sp_port_vlan_fid_join(mlxsw_sp_port_vlan, bridge_port);
+ if (err)
+ return err;
+
+ err = mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid,
+ bridge_port->flags & BR_LEARNING);
+ if (err)
+ goto err_port_vid_learning_set;
+
+ err = mlxsw_sp_port_vid_stp_set(mlxsw_sp_port, vid,
+ bridge_port->stp_state);
+ if (err)
+ goto err_port_vid_stp_set;
+
+ bridge_vlan = mlxsw_sp_bridge_vlan_get(bridge_port, vid);
+ if (!bridge_vlan) {
+ err = -ENOMEM;
+ goto err_bridge_vlan_get;
+ }
+
+ list_add(&mlxsw_sp_port_vlan->bridge_vlan_node,
+ &bridge_vlan->port_vlan_list);
+
+ mlxsw_sp_bridge_port_get(mlxsw_sp_port->mlxsw_sp->bridge,
+ bridge_port->dev);
+ mlxsw_sp_port_vlan->bridge_port = bridge_port;
+
+ return 0;
+
+err_bridge_vlan_get:
+ mlxsw_sp_port_vid_stp_set(mlxsw_sp_port, vid, BR_STATE_DISABLED);
+err_port_vid_stp_set:
+ mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, false);
+err_port_vid_learning_set:
+ mlxsw_sp_port_vlan_fid_leave(mlxsw_sp_port_vlan);
+ return err;
+}
+
+void
+mlxsw_sp_port_vlan_bridge_leave(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan)
+{
+ struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp_port_vlan->mlxsw_sp_port;
+ struct mlxsw_sp_fid *fid = mlxsw_sp_port_vlan->fid;
+ struct mlxsw_sp_bridge_vlan *bridge_vlan;
+ struct mlxsw_sp_bridge_port *bridge_port;
+ u16 vid = mlxsw_sp_port_vlan->vid;
+ bool last;
+
+ if (WARN_ON(mlxsw_sp_fid_type(fid) != MLXSW_SP_FID_TYPE_8021Q &&
+ mlxsw_sp_fid_type(fid) != MLXSW_SP_FID_TYPE_8021D))
return;
- netdev_dbg(mlxsw_sp_port->dev, "Left FID=%d\n", fid);
+ bridge_port = mlxsw_sp_port_vlan->bridge_port;
+ bridge_vlan = mlxsw_sp_bridge_vlan_find(bridge_port, vid);
+ last = list_is_singular(&bridge_vlan->port_vlan_list);
- mlxsw_sp_port_fdb_flush(mlxsw_sp_port, fid);
+ list_del(&mlxsw_sp_port_vlan->bridge_vlan_node);
+ mlxsw_sp_bridge_vlan_put(bridge_vlan);
+ mlxsw_sp_port_vid_stp_set(mlxsw_sp_port, vid, BR_STATE_DISABLED);
+ mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, false);
+ if (last)
+ mlxsw_sp_bridge_port_fdb_flush(mlxsw_sp_port->mlxsw_sp,
+ bridge_port,
+ mlxsw_sp_fid_index(fid));
+ mlxsw_sp_port_vlan_fid_leave(mlxsw_sp_port_vlan);
- if (--f->ref_count == 0)
- mlxsw_sp_fid_destroy(mlxsw_sp_port->mlxsw_sp, f);
+ mlxsw_sp_bridge_port_put(mlxsw_sp_port->mlxsw_sp->bridge, bridge_port);
+ mlxsw_sp_port_vlan->bridge_port = NULL;
}
-static int mlxsw_sp_port_fid_map(struct mlxsw_sp_port *mlxsw_sp_port, u16 fid,
- bool valid)
+static int
+mlxsw_sp_bridge_port_vlan_add(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct mlxsw_sp_bridge_port *bridge_port,
+ u16 vid, bool is_untagged, bool is_pvid)
{
- enum mlxsw_reg_svfa_mt mt = MLXSW_REG_SVFA_MT_PORT_VID_TO_FID;
+ u16 pvid = mlxsw_sp_port_pvid_determine(mlxsw_sp_port, vid, is_pvid);
+ struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
+ struct mlxsw_sp_bridge_vlan *bridge_vlan;
+ u16 old_pvid = mlxsw_sp_port->pvid;
+ int err;
- /* If port doesn't have vPorts, then it can use the global
- * VID-to-FID mapping.
- */
- if (list_empty(&mlxsw_sp_port->vports_list))
- return 0;
+ mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_get(mlxsw_sp_port, vid);
+ if (IS_ERR(mlxsw_sp_port_vlan))
+ return PTR_ERR(mlxsw_sp_port_vlan);
- return mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_port, mt, valid, fid, fid);
-}
-
-static int mlxsw_sp_port_fid_join(struct mlxsw_sp_port *mlxsw_sp_port,
- u16 fid_begin, u16 fid_end)
-{
- bool mc_flood;
- int fid, err;
-
- for (fid = fid_begin; fid <= fid_end; fid++) {
- err = __mlxsw_sp_port_fid_join(mlxsw_sp_port, fid);
- if (err)
- goto err_port_fid_join;
- }
-
- mc_flood = mlxsw_sp_port->mc_disabled ?
- mlxsw_sp_port->mc_flood : mlxsw_sp_port->mc_router;
-
- err = __mlxsw_sp_port_flood_set(mlxsw_sp_port, fid_begin, fid_end,
- mlxsw_sp_port->uc_flood, true,
- mc_flood);
+ err = mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid, true,
+ is_untagged);
if (err)
- goto err_port_flood_set;
+ goto err_port_vlan_set;
- for (fid = fid_begin; fid <= fid_end; fid++) {
- err = mlxsw_sp_port_fid_map(mlxsw_sp_port, fid, true);
- if (err)
- goto err_port_fid_map;
- }
+ err = mlxsw_sp_port_pvid_set(mlxsw_sp_port, pvid);
+ if (err)
+ goto err_port_pvid_set;
+
+ err = mlxsw_sp_port_vlan_bridge_join(mlxsw_sp_port_vlan, bridge_port);
+ if (err)
+ goto err_port_vlan_bridge_join;
+
+ bridge_vlan = mlxsw_sp_bridge_vlan_find(bridge_port, vid);
return 0;
-err_port_fid_map:
- for (fid--; fid >= fid_begin; fid--)
- mlxsw_sp_port_fid_map(mlxsw_sp_port, fid, false);
- __mlxsw_sp_port_flood_set(mlxsw_sp_port, fid_begin, fid_end, false,
- false, false);
-err_port_flood_set:
- fid = fid_end;
-err_port_fid_join:
- for (fid--; fid >= fid_begin; fid--)
- __mlxsw_sp_port_fid_leave(mlxsw_sp_port, fid);
- return err;
-}
-
-static void mlxsw_sp_port_fid_leave(struct mlxsw_sp_port *mlxsw_sp_port,
- u16 fid_begin, u16 fid_end)
-{
- int fid;
-
- for (fid = fid_begin; fid <= fid_end; fid++)
- mlxsw_sp_port_fid_map(mlxsw_sp_port, fid, false);
-
- __mlxsw_sp_port_flood_set(mlxsw_sp_port, fid_begin, fid_end, false,
- false, false);
-
- for (fid = fid_begin; fid <= fid_end; fid++)
- __mlxsw_sp_port_fid_leave(mlxsw_sp_port, fid);
-}
-
-static int __mlxsw_sp_port_pvid_set(struct mlxsw_sp_port *mlxsw_sp_port,
- u16 vid)
-{
- struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
- char spvid_pl[MLXSW_REG_SPVID_LEN];
-
- mlxsw_reg_spvid_pack(spvid_pl, mlxsw_sp_port->local_port, vid);
- return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spvid), spvid_pl);
-}
-
-static int mlxsw_sp_port_allow_untagged_set(struct mlxsw_sp_port *mlxsw_sp_port,
- bool allow)
-{
- struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
- char spaft_pl[MLXSW_REG_SPAFT_LEN];
-
- mlxsw_reg_spaft_pack(spaft_pl, mlxsw_sp_port->local_port, allow);
- return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spaft), spaft_pl);
-}
-
-int mlxsw_sp_port_pvid_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid)
-{
- struct net_device *dev = mlxsw_sp_port->dev;
- int err;
-
- if (!vid) {
- err = mlxsw_sp_port_allow_untagged_set(mlxsw_sp_port, false);
- if (err) {
- netdev_err(dev, "Failed to disallow untagged traffic\n");
- return err;
- }
- } else {
- err = __mlxsw_sp_port_pvid_set(mlxsw_sp_port, vid);
- if (err) {
- netdev_err(dev, "Failed to set PVID\n");
- return err;
- }
-
- /* Only allow if not already allowed. */
- if (!mlxsw_sp_port->pvid) {
- err = mlxsw_sp_port_allow_untagged_set(mlxsw_sp_port,
- true);
- if (err) {
- netdev_err(dev, "Failed to allow untagged traffic\n");
- goto err_port_allow_untagged_set;
- }
- }
- }
-
- mlxsw_sp_port->pvid = vid;
- return 0;
-
-err_port_allow_untagged_set:
- __mlxsw_sp_port_pvid_set(mlxsw_sp_port, mlxsw_sp_port->pvid);
- return err;
-}
-
-static int mlxsw_sp_port_vid_learning_set(struct mlxsw_sp_port *mlxsw_sp_port,
- u16 vid_begin, u16 vid_end,
- bool learn_enable)
-{
- u16 vid, vid_e;
- int err;
-
- for (vid = vid_begin; vid <= vid_end;
- vid += MLXSW_REG_SPVMLR_REC_MAX_COUNT) {
- vid_e = min((u16) (vid + MLXSW_REG_SPVMLR_REC_MAX_COUNT - 1),
- vid_end);
-
- err = __mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid,
- vid_e, learn_enable);
- if (err)
- return err;
- }
-
- return 0;
-}
-
-static int __mlxsw_sp_port_vlans_add(struct mlxsw_sp_port *mlxsw_sp_port,
- u16 vid_begin, u16 vid_end,
- bool flag_untagged, bool flag_pvid)
-{
- struct net_device *dev = mlxsw_sp_port->dev;
- u16 vid, old_pvid;
- int err;
-
- if (!mlxsw_sp_port->bridged)
- return -EINVAL;
-
- err = mlxsw_sp_port_fid_join(mlxsw_sp_port, vid_begin, vid_end);
- if (err) {
- netdev_err(dev, "Failed to join FIDs\n");
- return err;
- }
-
- err = mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid_begin, vid_end,
- true, flag_untagged);
- if (err) {
- netdev_err(dev, "Unable to add VIDs %d-%d\n", vid_begin,
- vid_end);
- goto err_port_vlans_set;
- }
-
- old_pvid = mlxsw_sp_port->pvid;
- if (flag_pvid && old_pvid != vid_begin) {
- err = mlxsw_sp_port_pvid_set(mlxsw_sp_port, vid_begin);
- if (err) {
- netdev_err(dev, "Unable to add PVID %d\n", vid_begin);
- goto err_port_pvid_set;
- }
- } else if (!flag_pvid && old_pvid >= vid_begin && old_pvid <= vid_end) {
- err = mlxsw_sp_port_pvid_set(mlxsw_sp_port, 0);
- if (err) {
- netdev_err(dev, "Unable to del PVID\n");
- goto err_port_pvid_set;
- }
- }
-
- err = mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid_begin, vid_end,
- mlxsw_sp_port->learning);
- if (err) {
- netdev_err(dev, "Failed to set learning for VIDs %d-%d\n",
- vid_begin, vid_end);
- goto err_port_vid_learning_set;
- }
-
- /* Changing activity bits only if HW operation succeded */
- for (vid = vid_begin; vid <= vid_end; vid++) {
- set_bit(vid, mlxsw_sp_port->active_vlans);
- if (flag_untagged)
- set_bit(vid, mlxsw_sp_port->untagged_vlans);
- else
- clear_bit(vid, mlxsw_sp_port->untagged_vlans);
- }
-
- /* STP state change must be done after we set active VLANs */
- err = mlxsw_sp_port_stp_state_set(mlxsw_sp_port,
- mlxsw_sp_port->stp_state);
- if (err) {
- netdev_err(dev, "Failed to set STP state\n");
- goto err_port_stp_state_set;
- }
-
- return 0;
-
-err_port_stp_state_set:
- for (vid = vid_begin; vid <= vid_end; vid++)
- clear_bit(vid, mlxsw_sp_port->active_vlans);
- mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid_begin, vid_end,
- false);
-err_port_vid_learning_set:
- if (old_pvid != mlxsw_sp_port->pvid)
- mlxsw_sp_port_pvid_set(mlxsw_sp_port, old_pvid);
+err_port_vlan_bridge_join:
+ mlxsw_sp_port_pvid_set(mlxsw_sp_port, old_pvid);
err_port_pvid_set:
- mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid_begin, vid_end,
- false, false);
-err_port_vlans_set:
- mlxsw_sp_port_fid_leave(mlxsw_sp_port, vid_begin, vid_end);
+ mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid, false, false);
+err_port_vlan_set:
+ mlxsw_sp_port_vlan_put(mlxsw_sp_port_vlan);
return err;
}
@@ -855,13 +1019,55 @@
{
bool flag_untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED;
bool flag_pvid = vlan->flags & BRIDGE_VLAN_INFO_PVID;
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ struct net_device *orig_dev = vlan->obj.orig_dev;
+ struct mlxsw_sp_bridge_port *bridge_port;
+ u16 vid;
if (switchdev_trans_ph_prepare(trans))
return 0;
- return __mlxsw_sp_port_vlans_add(mlxsw_sp_port,
- vlan->vid_begin, vlan->vid_end,
- flag_untagged, flag_pvid);
+ bridge_port = mlxsw_sp_bridge_port_find(mlxsw_sp->bridge, orig_dev);
+ if (WARN_ON(!bridge_port))
+ return -EINVAL;
+
+ if (!bridge_port->bridge_device->vlan_enabled)
+ return 0;
+
+ for (vid = vlan->vid_begin; vid <= vlan->vid_end; vid++) {
+ int err;
+
+ err = mlxsw_sp_bridge_port_vlan_add(mlxsw_sp_port, bridge_port,
+ vid, flag_untagged,
+ flag_pvid);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+static enum mlxsw_reg_sfdf_flush_type mlxsw_sp_fdb_flush_type(bool lagged)
+{
+ return lagged ? MLXSW_REG_SFDF_FLUSH_PER_LAG_AND_FID :
+ MLXSW_REG_SFDF_FLUSH_PER_PORT_AND_FID;
+}
+
+static int
+mlxsw_sp_bridge_port_fdb_flush(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_bridge_port *bridge_port,
+ u16 fid_index)
+{
+ bool lagged = bridge_port->lagged;
+ char sfdf_pl[MLXSW_REG_SFDF_LEN];
+ u16 system_port;
+
+ system_port = lagged ? bridge_port->lag_id : bridge_port->system_port;
+ mlxsw_reg_sfdf_pack(sfdf_pl, mlxsw_sp_fdb_flush_type(lagged));
+ mlxsw_reg_sfdf_fid_set(sfdf_pl, fid_index);
+ mlxsw_reg_sfdf_port_fid_system_port_set(sfdf_pl, system_port);
+
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfdf), sfdf_pl);
}
static enum mlxsw_reg_sfd_rec_policy mlxsw_sp_sfd_rec_policy(bool dynamic)
@@ -935,29 +1141,40 @@
}
static int
-mlxsw_sp_port_fdb_static_add(struct mlxsw_sp_port *mlxsw_sp_port,
- const struct switchdev_obj_port_fdb *fdb,
- struct switchdev_trans *trans)
+mlxsw_sp_port_fdb_set(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct switchdev_notifier_fdb_info *fdb_info, bool adding)
{
- u16 fid = mlxsw_sp_port_vid_to_fid_get(mlxsw_sp_port, fdb->vid);
- u16 lag_vid = 0;
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ struct net_device *orig_dev = fdb_info->info.dev;
+ struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
+ struct mlxsw_sp_bridge_device *bridge_device;
+ struct mlxsw_sp_bridge_port *bridge_port;
+ u16 fid_index, vid;
- if (switchdev_trans_ph_prepare(trans))
+ bridge_port = mlxsw_sp_bridge_port_find(mlxsw_sp->bridge, orig_dev);
+ if (!bridge_port)
+ return -EINVAL;
+
+ bridge_device = bridge_port->bridge_device;
+ mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_bridge(mlxsw_sp_port,
+ bridge_device,
+ fdb_info->vid);
+ if (!mlxsw_sp_port_vlan)
return 0;
- if (mlxsw_sp_port_is_vport(mlxsw_sp_port)) {
- lag_vid = mlxsw_sp_vport_vid_get(mlxsw_sp_port);
- }
+ fid_index = mlxsw_sp_fid_index(mlxsw_sp_port_vlan->fid);
+ vid = mlxsw_sp_port_vlan->vid;
- if (!mlxsw_sp_port->lagged)
- return mlxsw_sp_port_fdb_uc_op(mlxsw_sp_port->mlxsw_sp,
- mlxsw_sp_port->local_port,
- fdb->addr, fid, true, false);
+ if (!bridge_port->lagged)
+ return mlxsw_sp_port_fdb_uc_op(mlxsw_sp,
+ bridge_port->system_port,
+ fdb_info->addr, fid_index,
+ adding, false);
else
- return mlxsw_sp_port_fdb_uc_lag_op(mlxsw_sp_port->mlxsw_sp,
- mlxsw_sp_port->lag_id,
- fdb->addr, fid, lag_vid,
- true, false);
+ return mlxsw_sp_port_fdb_uc_lag_op(mlxsw_sp,
+ bridge_port->lag_id,
+ fdb_info->addr, fid_index,
+ vid, adding, false);
}
static int mlxsw_sp_port_mdb_op(struct mlxsw_sp *mlxsw_sp, const char *addr,
@@ -1006,7 +1223,7 @@
{
struct mlxsw_sp_mid *mid;
- list_for_each_entry(mid, &mlxsw_sp->br_mids.list, list) {
+ list_for_each_entry(mid, &mlxsw_sp->bridge->mids_list, list) {
if (ether_addr_equal(mid->addr, addr) && mid->fid == fid)
return mid;
}
@@ -1020,7 +1237,7 @@
struct mlxsw_sp_mid *mid;
u16 mid_idx;
- mid_idx = find_first_zero_bit(mlxsw_sp->br_mids.mapped,
+ mid_idx = find_first_zero_bit(mlxsw_sp->bridge->mids_bitmap,
MLXSW_SP_MID_MAX);
if (mid_idx == MLXSW_SP_MID_MAX)
return NULL;
@@ -1029,12 +1246,12 @@
if (!mid)
return NULL;
- set_bit(mid_idx, mlxsw_sp->br_mids.mapped);
+ set_bit(mid_idx, mlxsw_sp->bridge->mids_bitmap);
ether_addr_copy(mid->addr, addr);
mid->fid = fid;
mid->mid = mid_idx;
mid->ref_count = 0;
- list_add_tail(&mid->list, &mlxsw_sp->br_mids.list);
+ list_add_tail(&mid->list, &mlxsw_sp->bridge->mids_list);
return mid;
}
@@ -1044,7 +1261,7 @@
{
if (--mid->ref_count == 0) {
list_del(&mid->list);
- clear_bit(mid->mid, mlxsw_sp->br_mids.mapped);
+ clear_bit(mid->mid, mlxsw_sp->bridge->mids_bitmap);
kfree(mid);
return 1;
}
@@ -1056,17 +1273,34 @@
struct switchdev_trans *trans)
{
struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ struct net_device *orig_dev = mdb->obj.orig_dev;
+ struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
struct net_device *dev = mlxsw_sp_port->dev;
+ struct mlxsw_sp_bridge_device *bridge_device;
+ struct mlxsw_sp_bridge_port *bridge_port;
struct mlxsw_sp_mid *mid;
- u16 fid = mlxsw_sp_port_vid_to_fid_get(mlxsw_sp_port, mdb->vid);
+ u16 fid_index;
int err = 0;
if (switchdev_trans_ph_prepare(trans))
return 0;
- mid = __mlxsw_sp_mc_get(mlxsw_sp, mdb->addr, fid);
+ bridge_port = mlxsw_sp_bridge_port_find(mlxsw_sp->bridge, orig_dev);
+ if (WARN_ON(!bridge_port))
+ return -EINVAL;
+
+ bridge_device = bridge_port->bridge_device;
+ mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_bridge(mlxsw_sp_port,
+ bridge_device,
+ mdb->vid);
+ if (WARN_ON(!mlxsw_sp_port_vlan))
+ return -EINVAL;
+
+ fid_index = mlxsw_sp_fid_index(mlxsw_sp_port_vlan->fid);
+
+ mid = __mlxsw_sp_mc_get(mlxsw_sp, mdb->addr, fid_index);
if (!mid) {
- mid = __mlxsw_sp_mc_alloc(mlxsw_sp, mdb->addr, fid);
+ mid = __mlxsw_sp_mc_alloc(mlxsw_sp, mdb->addr, fid_index);
if (!mid) {
netdev_err(dev, "Unable to allocate MC group\n");
return -ENOMEM;
@@ -1082,8 +1316,8 @@
}
if (mid->ref_count == 1) {
- err = mlxsw_sp_port_mdb_op(mlxsw_sp, mdb->addr, fid, mid->mid,
- true);
+ err = mlxsw_sp_port_mdb_op(mlxsw_sp, mdb->addr, fid_index,
+ mid->mid, true);
if (err) {
netdev_err(dev, "Unable to set MC SFD\n");
goto err_out;
@@ -1104,24 +1338,12 @@
struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
int err = 0;
- mlxsw_sp_port = mlxsw_sp_port_orig_get(obj->orig_dev, mlxsw_sp_port);
- if (!mlxsw_sp_port)
- return -EINVAL;
-
switch (obj->id) {
case SWITCHDEV_OBJ_ID_PORT_VLAN:
- if (mlxsw_sp_port_is_vport(mlxsw_sp_port))
- return 0;
-
err = mlxsw_sp_port_vlans_add(mlxsw_sp_port,
SWITCHDEV_OBJ_PORT_VLAN(obj),
trans);
break;
- case SWITCHDEV_OBJ_ID_PORT_FDB:
- err = mlxsw_sp_port_fdb_static_add(mlxsw_sp_port,
- SWITCHDEV_OBJ_PORT_FDB(obj),
- trans);
- break;
case SWITCHDEV_OBJ_ID_PORT_MDB:
err = mlxsw_sp_port_mdb_add(mlxsw_sp_port,
SWITCHDEV_OBJ_PORT_MDB(obj),
@@ -1135,82 +1357,72 @@
return err;
}
-static int __mlxsw_sp_port_vlans_del(struct mlxsw_sp_port *mlxsw_sp_port,
- u16 vid_begin, u16 vid_end)
+static void
+mlxsw_sp_bridge_port_vlan_del(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct mlxsw_sp_bridge_port *bridge_port, u16 vid)
{
- u16 vid, pvid;
+ u16 pvid = mlxsw_sp_port->pvid == vid ? 0 : vid;
+ struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
- if (!mlxsw_sp_port->bridged)
- return -EINVAL;
+ mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, vid);
+ if (WARN_ON(!mlxsw_sp_port_vlan))
+ return;
- mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid_begin, vid_end,
- false);
-
- pvid = mlxsw_sp_port->pvid;
- if (pvid >= vid_begin && pvid <= vid_end)
- mlxsw_sp_port_pvid_set(mlxsw_sp_port, 0);
-
- mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid_begin, vid_end,
- false, false);
-
- mlxsw_sp_port_fid_leave(mlxsw_sp_port, vid_begin, vid_end);
-
- /* Changing activity bits only if HW operation succeded */
- for (vid = vid_begin; vid <= vid_end; vid++)
- clear_bit(vid, mlxsw_sp_port->active_vlans);
-
- return 0;
+ mlxsw_sp_port_vlan_bridge_leave(mlxsw_sp_port_vlan);
+ mlxsw_sp_port_pvid_set(mlxsw_sp_port, pvid);
+ mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid, false, false);
+ mlxsw_sp_port_vlan_put(mlxsw_sp_port_vlan);
}
static int mlxsw_sp_port_vlans_del(struct mlxsw_sp_port *mlxsw_sp_port,
const struct switchdev_obj_port_vlan *vlan)
{
- return __mlxsw_sp_port_vlans_del(mlxsw_sp_port, vlan->vid_begin,
- vlan->vid_end);
-}
-
-void mlxsw_sp_port_active_vlans_del(struct mlxsw_sp_port *mlxsw_sp_port)
-{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ struct net_device *orig_dev = vlan->obj.orig_dev;
+ struct mlxsw_sp_bridge_port *bridge_port;
u16 vid;
- for_each_set_bit(vid, mlxsw_sp_port->active_vlans, VLAN_N_VID)
- __mlxsw_sp_port_vlans_del(mlxsw_sp_port, vid, vid);
-}
+ bridge_port = mlxsw_sp_bridge_port_find(mlxsw_sp->bridge, orig_dev);
+ if (WARN_ON(!bridge_port))
+ return -EINVAL;
-static int
-mlxsw_sp_port_fdb_static_del(struct mlxsw_sp_port *mlxsw_sp_port,
- const struct switchdev_obj_port_fdb *fdb)
-{
- u16 fid = mlxsw_sp_port_vid_to_fid_get(mlxsw_sp_port, fdb->vid);
- u16 lag_vid = 0;
+ if (!bridge_port->bridge_device->vlan_enabled)
+ return 0;
- if (mlxsw_sp_port_is_vport(mlxsw_sp_port)) {
- lag_vid = mlxsw_sp_vport_vid_get(mlxsw_sp_port);
- }
+ for (vid = vlan->vid_begin; vid <= vlan->vid_end; vid++)
+ mlxsw_sp_bridge_port_vlan_del(mlxsw_sp_port, bridge_port, vid);
- if (!mlxsw_sp_port->lagged)
- return mlxsw_sp_port_fdb_uc_op(mlxsw_sp_port->mlxsw_sp,
- mlxsw_sp_port->local_port,
- fdb->addr, fid,
- false, false);
- else
- return mlxsw_sp_port_fdb_uc_lag_op(mlxsw_sp_port->mlxsw_sp,
- mlxsw_sp_port->lag_id,
- fdb->addr, fid, lag_vid,
- false, false);
+ return 0;
}
static int mlxsw_sp_port_mdb_del(struct mlxsw_sp_port *mlxsw_sp_port,
const struct switchdev_obj_port_mdb *mdb)
{
struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ struct net_device *orig_dev = mdb->obj.orig_dev;
+ struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
+ struct mlxsw_sp_bridge_device *bridge_device;
struct net_device *dev = mlxsw_sp_port->dev;
+ struct mlxsw_sp_bridge_port *bridge_port;
struct mlxsw_sp_mid *mid;
- u16 fid = mlxsw_sp_port_vid_to_fid_get(mlxsw_sp_port, mdb->vid);
+ u16 fid_index;
u16 mid_idx;
int err = 0;
- mid = __mlxsw_sp_mc_get(mlxsw_sp, mdb->addr, fid);
+ bridge_port = mlxsw_sp_bridge_port_find(mlxsw_sp->bridge, orig_dev);
+ if (WARN_ON(!bridge_port))
+ return -EINVAL;
+
+ bridge_device = bridge_port->bridge_device;
+ mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_bridge(mlxsw_sp_port,
+ bridge_device,
+ mdb->vid);
+ if (WARN_ON(!mlxsw_sp_port_vlan))
+ return -EINVAL;
+
+ fid_index = mlxsw_sp_fid_index(mlxsw_sp_port_vlan->fid);
+
+ mid = __mlxsw_sp_mc_get(mlxsw_sp, mdb->addr, fid_index);
if (!mid) {
netdev_err(dev, "Unable to remove port from MC DB\n");
return -EINVAL;
@@ -1222,8 +1434,8 @@
mid_idx = mid->mid;
if (__mlxsw_sp_mc_dec_ref(mlxsw_sp, mid)) {
- err = mlxsw_sp_port_mdb_op(mlxsw_sp, mdb->addr, fid, mid_idx,
- false);
+ err = mlxsw_sp_port_mdb_op(mlxsw_sp, mdb->addr, fid_index,
+ mid_idx, false);
if (err)
netdev_err(dev, "Unable to remove MC SFD\n");
}
@@ -1237,22 +1449,11 @@
struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
int err = 0;
- mlxsw_sp_port = mlxsw_sp_port_orig_get(obj->orig_dev, mlxsw_sp_port);
- if (!mlxsw_sp_port)
- return -EINVAL;
-
switch (obj->id) {
case SWITCHDEV_OBJ_ID_PORT_VLAN:
- if (mlxsw_sp_port_is_vport(mlxsw_sp_port))
- return 0;
-
err = mlxsw_sp_port_vlans_del(mlxsw_sp_port,
SWITCHDEV_OBJ_PORT_VLAN(obj));
break;
- case SWITCHDEV_OBJ_ID_PORT_FDB:
- err = mlxsw_sp_port_fdb_static_del(mlxsw_sp_port,
- SWITCHDEV_OBJ_PORT_FDB(obj));
- break;
case SWITCHDEV_OBJ_ID_PORT_MDB:
err = mlxsw_sp_port_mdb_del(mlxsw_sp_port,
SWITCHDEV_OBJ_PORT_MDB(obj));
@@ -1282,188 +1483,200 @@
return NULL;
}
-static int mlxsw_sp_port_fdb_dump(struct mlxsw_sp_port *mlxsw_sp_port,
- struct switchdev_obj_port_fdb *fdb,
- switchdev_obj_dump_cb_t *cb,
- struct net_device *orig_dev)
-{
- struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
- struct mlxsw_sp_port *tmp;
- struct mlxsw_sp_fid *f;
- u16 vport_fid;
- char *sfd_pl;
- char mac[ETH_ALEN];
- u16 fid;
- u8 local_port;
- u16 lag_id;
- u8 num_rec;
- int stored_err = 0;
- int i;
- int err;
-
- sfd_pl = kmalloc(MLXSW_REG_SFD_LEN, GFP_KERNEL);
- if (!sfd_pl)
- return -ENOMEM;
-
- f = mlxsw_sp_vport_fid_get(mlxsw_sp_port);
- vport_fid = f ? f->fid : 0;
-
- mlxsw_reg_sfd_pack(sfd_pl, MLXSW_REG_SFD_OP_QUERY_DUMP, 0);
- do {
- mlxsw_reg_sfd_num_rec_set(sfd_pl, MLXSW_REG_SFD_REC_MAX_COUNT);
- err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(sfd), sfd_pl);
- if (err)
- goto out;
-
- num_rec = mlxsw_reg_sfd_num_rec_get(sfd_pl);
-
- /* Even in case of error, we have to run the dump to the end
- * so the session in firmware is finished.
- */
- if (stored_err)
- continue;
-
- for (i = 0; i < num_rec; i++) {
- switch (mlxsw_reg_sfd_rec_type_get(sfd_pl, i)) {
- case MLXSW_REG_SFD_REC_TYPE_UNICAST:
- mlxsw_reg_sfd_uc_unpack(sfd_pl, i, mac, &fid,
- &local_port);
- if (local_port == mlxsw_sp_port->local_port) {
- if (vport_fid && vport_fid == fid)
- fdb->vid = 0;
- else if (!vport_fid &&
- !mlxsw_sp_fid_is_vfid(fid))
- fdb->vid = fid;
- else
- continue;
- ether_addr_copy(fdb->addr, mac);
- fdb->ndm_state = NUD_REACHABLE;
- err = cb(&fdb->obj);
- if (err)
- stored_err = err;
- }
- break;
- case MLXSW_REG_SFD_REC_TYPE_UNICAST_LAG:
- mlxsw_reg_sfd_uc_lag_unpack(sfd_pl, i,
- mac, &fid, &lag_id);
- tmp = mlxsw_sp_lag_rep_port(mlxsw_sp, lag_id);
- if (tmp && tmp->local_port ==
- mlxsw_sp_port->local_port) {
- /* LAG records can only point to LAG
- * devices or VLAN devices on top.
- */
- if (!netif_is_lag_master(orig_dev) &&
- !is_vlan_dev(orig_dev))
- continue;
- if (vport_fid && vport_fid == fid)
- fdb->vid = 0;
- else if (!vport_fid &&
- !mlxsw_sp_fid_is_vfid(fid))
- fdb->vid = fid;
- else
- continue;
- ether_addr_copy(fdb->addr, mac);
- fdb->ndm_state = NUD_REACHABLE;
- err = cb(&fdb->obj);
- if (err)
- stored_err = err;
- }
- break;
- }
- }
- } while (num_rec == MLXSW_REG_SFD_REC_MAX_COUNT);
-
-out:
- kfree(sfd_pl);
- return stored_err ? stored_err : err;
-}
-
-static int mlxsw_sp_port_vlan_dump(struct mlxsw_sp_port *mlxsw_sp_port,
- struct switchdev_obj_port_vlan *vlan,
- switchdev_obj_dump_cb_t *cb)
-{
- u16 vid;
- int err = 0;
-
- if (mlxsw_sp_port_is_vport(mlxsw_sp_port)) {
- vlan->flags = 0;
- vlan->vid_begin = mlxsw_sp_vport_vid_get(mlxsw_sp_port);
- vlan->vid_end = mlxsw_sp_vport_vid_get(mlxsw_sp_port);
- return cb(&vlan->obj);
- }
-
- for_each_set_bit(vid, mlxsw_sp_port->active_vlans, VLAN_N_VID) {
- vlan->flags = 0;
- if (vid == mlxsw_sp_port->pvid)
- vlan->flags |= BRIDGE_VLAN_INFO_PVID;
- if (test_bit(vid, mlxsw_sp_port->untagged_vlans))
- vlan->flags |= BRIDGE_VLAN_INFO_UNTAGGED;
- vlan->vid_begin = vid;
- vlan->vid_end = vid;
- err = cb(&vlan->obj);
- if (err)
- break;
- }
- return err;
-}
-
-static int mlxsw_sp_port_obj_dump(struct net_device *dev,
- struct switchdev_obj *obj,
- switchdev_obj_dump_cb_t *cb)
-{
- struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
- int err = 0;
-
- mlxsw_sp_port = mlxsw_sp_port_orig_get(obj->orig_dev, mlxsw_sp_port);
- if (!mlxsw_sp_port)
- return -EINVAL;
-
- switch (obj->id) {
- case SWITCHDEV_OBJ_ID_PORT_VLAN:
- err = mlxsw_sp_port_vlan_dump(mlxsw_sp_port,
- SWITCHDEV_OBJ_PORT_VLAN(obj), cb);
- break;
- case SWITCHDEV_OBJ_ID_PORT_FDB:
- err = mlxsw_sp_port_fdb_dump(mlxsw_sp_port,
- SWITCHDEV_OBJ_PORT_FDB(obj), cb,
- obj->orig_dev);
- break;
- default:
- err = -EOPNOTSUPP;
- break;
- }
-
- return err;
-}
-
static const struct switchdev_ops mlxsw_sp_port_switchdev_ops = {
.switchdev_port_attr_get = mlxsw_sp_port_attr_get,
.switchdev_port_attr_set = mlxsw_sp_port_attr_set,
.switchdev_port_obj_add = mlxsw_sp_port_obj_add,
.switchdev_port_obj_del = mlxsw_sp_port_obj_del,
- .switchdev_port_obj_dump = mlxsw_sp_port_obj_dump,
};
-static void mlxsw_sp_fdb_call_notifiers(bool learning_sync, bool adding,
- char *mac, u16 vid,
- struct net_device *dev)
+static int
+mlxsw_sp_bridge_8021q_port_join(struct mlxsw_sp_bridge_device *bridge_device,
+ struct mlxsw_sp_bridge_port *bridge_port,
+ struct mlxsw_sp_port *mlxsw_sp_port)
+{
+ struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
+
+ if (is_vlan_dev(bridge_port->dev))
+ return -EINVAL;
+
+ mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, 1);
+ if (WARN_ON(!mlxsw_sp_port_vlan))
+ return -EINVAL;
+
+ /* Let VLAN-aware bridge take care of its own VLANs */
+ mlxsw_sp_port_vlan_put(mlxsw_sp_port_vlan);
+
+ return 0;
+}
+
+static void
+mlxsw_sp_bridge_8021q_port_leave(struct mlxsw_sp_bridge_device *bridge_device,
+ struct mlxsw_sp_bridge_port *bridge_port,
+ struct mlxsw_sp_port *mlxsw_sp_port)
+{
+ mlxsw_sp_port_vlan_get(mlxsw_sp_port, 1);
+ /* Make sure untagged frames are allowed to ingress */
+ mlxsw_sp_port_pvid_set(mlxsw_sp_port, 1);
+}
+
+static struct mlxsw_sp_fid *
+mlxsw_sp_bridge_8021q_fid_get(struct mlxsw_sp_bridge_device *bridge_device,
+ u16 vid)
+{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(bridge_device->dev);
+
+ return mlxsw_sp_fid_8021q_get(mlxsw_sp, vid);
+}
+
+static const struct mlxsw_sp_bridge_ops mlxsw_sp_bridge_8021q_ops = {
+ .port_join = mlxsw_sp_bridge_8021q_port_join,
+ .port_leave = mlxsw_sp_bridge_8021q_port_leave,
+ .fid_get = mlxsw_sp_bridge_8021q_fid_get,
+};
+
+static bool
+mlxsw_sp_port_is_br_member(const struct mlxsw_sp_port *mlxsw_sp_port,
+ const struct net_device *br_dev)
+{
+ struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
+
+ list_for_each_entry(mlxsw_sp_port_vlan, &mlxsw_sp_port->vlans_list,
+ list) {
+ if (mlxsw_sp_port_vlan->bridge_port &&
+ mlxsw_sp_port_vlan->bridge_port->bridge_device->dev ==
+ br_dev)
+ return true;
+ }
+
+ return false;
+}
+
+static int
+mlxsw_sp_bridge_8021d_port_join(struct mlxsw_sp_bridge_device *bridge_device,
+ struct mlxsw_sp_bridge_port *bridge_port,
+ struct mlxsw_sp_port *mlxsw_sp_port)
+{
+ struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
+ u16 vid;
+
+ if (!is_vlan_dev(bridge_port->dev))
+ return -EINVAL;
+ vid = vlan_dev_vlan_id(bridge_port->dev);
+
+ mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, vid);
+ if (WARN_ON(!mlxsw_sp_port_vlan))
+ return -EINVAL;
+
+ if (mlxsw_sp_port_is_br_member(mlxsw_sp_port, bridge_device->dev)) {
+ netdev_err(mlxsw_sp_port->dev, "Can't bridge VLAN uppers of the same port\n");
+ return -EINVAL;
+ }
+
+ /* Port is no longer usable as a router interface */
+ if (mlxsw_sp_port_vlan->fid)
+ mlxsw_sp_port_vlan_router_leave(mlxsw_sp_port_vlan);
+
+ return mlxsw_sp_port_vlan_bridge_join(mlxsw_sp_port_vlan, bridge_port);
+}
+
+static void
+mlxsw_sp_bridge_8021d_port_leave(struct mlxsw_sp_bridge_device *bridge_device,
+ struct mlxsw_sp_bridge_port *bridge_port,
+ struct mlxsw_sp_port *mlxsw_sp_port)
+{
+ struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
+ u16 vid = vlan_dev_vlan_id(bridge_port->dev);
+
+ mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, vid);
+ if (WARN_ON(!mlxsw_sp_port_vlan))
+ return;
+
+ mlxsw_sp_port_vlan_bridge_leave(mlxsw_sp_port_vlan);
+}
+
+static struct mlxsw_sp_fid *
+mlxsw_sp_bridge_8021d_fid_get(struct mlxsw_sp_bridge_device *bridge_device,
+ u16 vid)
+{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(bridge_device->dev);
+
+ return mlxsw_sp_fid_8021d_get(mlxsw_sp, bridge_device->dev->ifindex);
+}
+
+static const struct mlxsw_sp_bridge_ops mlxsw_sp_bridge_8021d_ops = {
+ .port_join = mlxsw_sp_bridge_8021d_port_join,
+ .port_leave = mlxsw_sp_bridge_8021d_port_leave,
+ .fid_get = mlxsw_sp_bridge_8021d_fid_get,
+};
+
+int mlxsw_sp_port_bridge_join(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct net_device *brport_dev,
+ struct net_device *br_dev)
+{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ struct mlxsw_sp_bridge_device *bridge_device;
+ struct mlxsw_sp_bridge_port *bridge_port;
+ int err;
+
+ bridge_port = mlxsw_sp_bridge_port_get(mlxsw_sp->bridge, brport_dev);
+ if (IS_ERR(bridge_port))
+ return PTR_ERR(bridge_port);
+ bridge_device = bridge_port->bridge_device;
+
+ err = bridge_device->ops->port_join(bridge_device, bridge_port,
+ mlxsw_sp_port);
+ if (err)
+ goto err_port_join;
+
+ return 0;
+
+err_port_join:
+ mlxsw_sp_bridge_port_put(mlxsw_sp->bridge, bridge_port);
+ return err;
+}
+
+void mlxsw_sp_port_bridge_leave(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct net_device *brport_dev,
+ struct net_device *br_dev)
+{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ struct mlxsw_sp_bridge_device *bridge_device;
+ struct mlxsw_sp_bridge_port *bridge_port;
+
+ bridge_device = mlxsw_sp_bridge_device_find(mlxsw_sp->bridge, br_dev);
+ if (!bridge_device)
+ return;
+ bridge_port = __mlxsw_sp_bridge_port_find(bridge_device, brport_dev);
+ if (!bridge_port)
+ return;
+
+ bridge_device->ops->port_leave(bridge_device, bridge_port,
+ mlxsw_sp_port);
+ mlxsw_sp_bridge_port_put(mlxsw_sp->bridge, bridge_port);
+}
+
+static void
+mlxsw_sp_fdb_call_notifiers(enum switchdev_notifier_type type,
+ const char *mac, u16 vid,
+ struct net_device *dev)
{
struct switchdev_notifier_fdb_info info;
- unsigned long notifier_type;
- if (learning_sync) {
- info.addr = mac;
- info.vid = vid;
- notifier_type = adding ? SWITCHDEV_FDB_ADD : SWITCHDEV_FDB_DEL;
- call_switchdev_notifiers(notifier_type, dev, &info.info);
- }
+ info.addr = mac;
+ info.vid = vid;
+ call_switchdev_notifiers(type, dev, &info.info);
}
static void mlxsw_sp_fdb_notify_mac_process(struct mlxsw_sp *mlxsw_sp,
char *sfn_pl, int rec_index,
bool adding)
{
+ struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
+ struct mlxsw_sp_bridge_device *bridge_device;
+ struct mlxsw_sp_bridge_port *bridge_port;
struct mlxsw_sp_port *mlxsw_sp_port;
+ enum switchdev_notifier_type type;
char mac[ETH_ALEN];
u8 local_port;
u16 vid, fid;
@@ -1477,22 +1690,21 @@
goto just_remove;
}
- if (mlxsw_sp_fid_is_vfid(fid)) {
- struct mlxsw_sp_port *mlxsw_sp_vport;
-
- mlxsw_sp_vport = mlxsw_sp_port_vport_find_by_fid(mlxsw_sp_port,
- fid);
- if (!mlxsw_sp_vport) {
- netdev_err(mlxsw_sp_port->dev, "Failed to find a matching vPort following FDB notification\n");
- goto just_remove;
- }
- vid = 0;
- /* Override the physical port with the vPort. */
- mlxsw_sp_port = mlxsw_sp_vport;
- } else {
- vid = fid;
+ mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_fid(mlxsw_sp_port, fid);
+ if (!mlxsw_sp_port_vlan) {
+ netdev_err(mlxsw_sp_port->dev, "Failed to find a matching {Port, VID} following FDB notification\n");
+ goto just_remove;
}
+ bridge_port = mlxsw_sp_port_vlan->bridge_port;
+ if (!bridge_port) {
+ netdev_err(mlxsw_sp_port->dev, "{Port, VID} not associated with a bridge\n");
+ goto just_remove;
+ }
+
+ bridge_device = bridge_port->bridge_device;
+ vid = bridge_device->vlan_enabled ? mlxsw_sp_port_vlan->vid : 0;
+
do_fdb_op:
err = mlxsw_sp_port_fdb_uc_op(mlxsw_sp, local_port, mac, fid,
adding, true);
@@ -1503,8 +1715,9 @@
if (!do_notification)
return;
- mlxsw_sp_fdb_call_notifiers(mlxsw_sp_port->learning_sync,
- adding, mac, vid, mlxsw_sp_port->dev);
+ type = adding ? SWITCHDEV_FDB_ADD_TO_BRIDGE : SWITCHDEV_FDB_DEL_TO_BRIDGE;
+ mlxsw_sp_fdb_call_notifiers(type, mac, vid, bridge_port->dev);
+
return;
just_remove:
@@ -1517,8 +1730,11 @@
char *sfn_pl, int rec_index,
bool adding)
{
+ struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
+ struct mlxsw_sp_bridge_device *bridge_device;
+ struct mlxsw_sp_bridge_port *bridge_port;
struct mlxsw_sp_port *mlxsw_sp_port;
- struct net_device *dev;
+ enum switchdev_notifier_type type;
char mac[ETH_ALEN];
u16 lag_vid = 0;
u16 lag_id;
@@ -1533,26 +1749,22 @@
goto just_remove;
}
- if (mlxsw_sp_fid_is_vfid(fid)) {
- struct mlxsw_sp_port *mlxsw_sp_vport;
-
- mlxsw_sp_vport = mlxsw_sp_port_vport_find_by_fid(mlxsw_sp_port,
- fid);
- if (!mlxsw_sp_vport) {
- netdev_err(mlxsw_sp_port->dev, "Failed to find a matching vPort following FDB notification\n");
- goto just_remove;
- }
-
- lag_vid = mlxsw_sp_vport_vid_get(mlxsw_sp_vport);
- dev = mlxsw_sp_vport->dev;
- vid = 0;
- /* Override the physical port with the vPort. */
- mlxsw_sp_port = mlxsw_sp_vport;
- } else {
- dev = mlxsw_sp_lag_get(mlxsw_sp, lag_id)->dev;
- vid = fid;
+ mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_fid(mlxsw_sp_port, fid);
+ if (!mlxsw_sp_port_vlan) {
+ netdev_err(mlxsw_sp_port->dev, "Failed to find a matching {Port, VID} following FDB notification\n");
+ goto just_remove;
}
+ bridge_port = mlxsw_sp_port_vlan->bridge_port;
+ if (!bridge_port) {
+ netdev_err(mlxsw_sp_port->dev, "{Port, VID} not associated with a bridge\n");
+ goto just_remove;
+ }
+
+ bridge_device = bridge_port->bridge_device;
+ vid = bridge_device->vlan_enabled ? mlxsw_sp_port_vlan->vid : 0;
+ lag_vid = mlxsw_sp_port_vlan->vid;
+
do_fdb_op:
err = mlxsw_sp_port_fdb_uc_lag_op(mlxsw_sp, lag_id, mac, fid, lag_vid,
adding, true);
@@ -1563,8 +1775,9 @@
if (!do_notification)
return;
- mlxsw_sp_fdb_call_notifiers(mlxsw_sp_port->learning_sync, adding, mac,
- vid, dev);
+ type = adding ? SWITCHDEV_FDB_ADD_TO_BRIDGE : SWITCHDEV_FDB_DEL_TO_BRIDGE;
+ mlxsw_sp_fdb_call_notifiers(type, mac, vid, bridge_port->dev);
+
return;
just_remove:
@@ -1598,12 +1811,15 @@
static void mlxsw_sp_fdb_notify_work_schedule(struct mlxsw_sp *mlxsw_sp)
{
- mlxsw_core_schedule_dw(&mlxsw_sp->fdb_notify.dw,
- msecs_to_jiffies(mlxsw_sp->fdb_notify.interval));
+ struct mlxsw_sp_bridge *bridge = mlxsw_sp->bridge;
+
+ mlxsw_core_schedule_dw(&bridge->fdb_notify.dw,
+ msecs_to_jiffies(bridge->fdb_notify.interval));
}
static void mlxsw_sp_fdb_notify_work(struct work_struct *work)
{
+ struct mlxsw_sp_bridge *bridge;
struct mlxsw_sp *mlxsw_sp;
char *sfn_pl;
u8 num_rec;
@@ -1614,7 +1830,8 @@
if (!sfn_pl)
return;
- mlxsw_sp = container_of(work, struct mlxsw_sp, fdb_notify.dw.work);
+ bridge = container_of(work, struct mlxsw_sp_bridge, fdb_notify.dw.work);
+ mlxsw_sp = bridge->mlxsw_sp;
rtnl_lock();
mlxsw_reg_sfn_pack(sfn_pl);
@@ -1633,8 +1850,100 @@
mlxsw_sp_fdb_notify_work_schedule(mlxsw_sp);
}
+struct mlxsw_sp_switchdev_event_work {
+ struct work_struct work;
+ struct switchdev_notifier_fdb_info fdb_info;
+ struct net_device *dev;
+ unsigned long event;
+};
+
+static void mlxsw_sp_switchdev_event_work(struct work_struct *work)
+{
+ struct mlxsw_sp_switchdev_event_work *switchdev_work =
+ container_of(work, struct mlxsw_sp_switchdev_event_work, work);
+ struct net_device *dev = switchdev_work->dev;
+ struct switchdev_notifier_fdb_info *fdb_info;
+ struct mlxsw_sp_port *mlxsw_sp_port;
+ int err;
+
+ rtnl_lock();
+ mlxsw_sp_port = mlxsw_sp_port_dev_lower_find(dev);
+ if (!mlxsw_sp_port)
+ goto out;
+
+ switch (switchdev_work->event) {
+ case SWITCHDEV_FDB_ADD_TO_DEVICE:
+ fdb_info = &switchdev_work->fdb_info;
+ err = mlxsw_sp_port_fdb_set(mlxsw_sp_port, fdb_info, true);
+ if (err)
+ break;
+ mlxsw_sp_fdb_call_notifiers(SWITCHDEV_FDB_OFFLOADED,
+ fdb_info->addr,
+ fdb_info->vid, dev);
+ break;
+ case SWITCHDEV_FDB_DEL_TO_DEVICE:
+ fdb_info = &switchdev_work->fdb_info;
+ mlxsw_sp_port_fdb_set(mlxsw_sp_port, fdb_info, false);
+ break;
+ }
+
+out:
+ rtnl_unlock();
+ kfree(switchdev_work->fdb_info.addr);
+ kfree(switchdev_work);
+ dev_put(dev);
+}
+
+/* Called under rcu_read_lock() */
+static int mlxsw_sp_switchdev_event(struct notifier_block *unused,
+ unsigned long event, void *ptr)
+{
+ struct net_device *dev = switchdev_notifier_info_to_dev(ptr);
+ struct mlxsw_sp_switchdev_event_work *switchdev_work;
+ struct switchdev_notifier_fdb_info *fdb_info = ptr;
+
+ if (!mlxsw_sp_port_dev_lower_find_rcu(dev))
+ return NOTIFY_DONE;
+
+ switchdev_work = kzalloc(sizeof(*switchdev_work), GFP_ATOMIC);
+ if (!switchdev_work)
+ return NOTIFY_BAD;
+
+ INIT_WORK(&switchdev_work->work, mlxsw_sp_switchdev_event_work);
+ switchdev_work->dev = dev;
+ switchdev_work->event = event;
+
+ switch (event) {
+ case SWITCHDEV_FDB_ADD_TO_DEVICE: /* fall through */
+ case SWITCHDEV_FDB_DEL_TO_DEVICE:
+ memcpy(&switchdev_work->fdb_info, ptr,
+ sizeof(switchdev_work->fdb_info));
+ switchdev_work->fdb_info.addr = kzalloc(ETH_ALEN, GFP_ATOMIC);
+ ether_addr_copy((u8 *)switchdev_work->fdb_info.addr,
+ fdb_info->addr);
+ /* Take a reference on the device. This can be either
+ * upper device containig mlxsw_sp_port or just a
+ * mlxsw_sp_port
+ */
+ dev_hold(dev);
+ break;
+ default:
+ kfree(switchdev_work);
+ return NOTIFY_DONE;
+ }
+
+ mlxsw_core_schedule_work(&switchdev_work->work);
+
+ return NOTIFY_DONE;
+}
+
+static struct notifier_block mlxsw_sp_switchdev_notifier = {
+ .notifier_call = mlxsw_sp_switchdev_event,
+};
+
static int mlxsw_sp_fdb_init(struct mlxsw_sp *mlxsw_sp)
{
+ struct mlxsw_sp_bridge *bridge = mlxsw_sp->bridge;
int err;
err = mlxsw_sp_ageing_set(mlxsw_sp, MLXSW_SP_DEFAULT_AGEING_TIME);
@@ -1642,25 +1951,51 @@
dev_err(mlxsw_sp->bus_info->dev, "Failed to set default ageing time\n");
return err;
}
- INIT_DELAYED_WORK(&mlxsw_sp->fdb_notify.dw, mlxsw_sp_fdb_notify_work);
- mlxsw_sp->fdb_notify.interval = MLXSW_SP_DEFAULT_LEARNING_INTERVAL;
+
+ err = register_switchdev_notifier(&mlxsw_sp_switchdev_notifier);
+ if (err) {
+ dev_err(mlxsw_sp->bus_info->dev, "Failed to register switchdev notifier\n");
+ return err;
+ }
+
+ INIT_DELAYED_WORK(&bridge->fdb_notify.dw, mlxsw_sp_fdb_notify_work);
+ bridge->fdb_notify.interval = MLXSW_SP_DEFAULT_LEARNING_INTERVAL;
mlxsw_sp_fdb_notify_work_schedule(mlxsw_sp);
return 0;
}
static void mlxsw_sp_fdb_fini(struct mlxsw_sp *mlxsw_sp)
{
- cancel_delayed_work_sync(&mlxsw_sp->fdb_notify.dw);
+ cancel_delayed_work_sync(&mlxsw_sp->bridge->fdb_notify.dw);
+ unregister_switchdev_notifier(&mlxsw_sp_switchdev_notifier);
+
}
int mlxsw_sp_switchdev_init(struct mlxsw_sp *mlxsw_sp)
{
+ struct mlxsw_sp_bridge *bridge;
+
+ bridge = kzalloc(sizeof(*mlxsw_sp->bridge), GFP_KERNEL);
+ if (!bridge)
+ return -ENOMEM;
+ mlxsw_sp->bridge = bridge;
+ bridge->mlxsw_sp = mlxsw_sp;
+
+ INIT_LIST_HEAD(&mlxsw_sp->bridge->bridges_list);
+ INIT_LIST_HEAD(&mlxsw_sp->bridge->mids_list);
+
+ bridge->bridge_8021q_ops = &mlxsw_sp_bridge_8021q_ops;
+ bridge->bridge_8021d_ops = &mlxsw_sp_bridge_8021d_ops;
+
return mlxsw_sp_fdb_init(mlxsw_sp);
}
void mlxsw_sp_switchdev_fini(struct mlxsw_sp *mlxsw_sp)
{
mlxsw_sp_fdb_fini(mlxsw_sp);
+ WARN_ON(!list_empty(&mlxsw_sp->bridge->mids_list));
+ WARN_ON(!list_empty(&mlxsw_sp->bridge->bridges_list));
+ kfree(mlxsw_sp->bridge);
}
void mlxsw_sp_port_switchdev_init(struct mlxsw_sp_port *mlxsw_sp_port)
diff --git a/drivers/net/ethernet/mellanox/mlxsw/trap.h b/drivers/net/ethernet/mellanox/mlxsw/trap.h
index e008fdb..12b5ed58 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/trap.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/trap.h
@@ -66,6 +66,7 @@
MLXSW_TRAP_ID_RTR_INGRESS0 = 0x70,
MLXSW_TRAP_ID_BGP_IPV4 = 0x88,
MLXSW_TRAP_ID_HOST_MISS_IPV4 = 0x90,
+ MLXSW_TRAP_ID_ACL0 = 0x1C0,
MLXSW_TRAP_ID_MAX = 0x1FF
};
diff --git a/drivers/net/ethernet/micrel/ks8842.c b/drivers/net/ethernet/micrel/ks8842.c
index cb0102d..e3d7c74 100644
--- a/drivers/net/ethernet/micrel/ks8842.c
+++ b/drivers/net/ethernet/micrel/ks8842.c
@@ -669,7 +669,7 @@
ks8842_update_rx_counters(netdev, status, len);
if (adapter->conf_flags & KS884X_16BIT) {
- u16 *data16 = (u16 *)skb_put(skb, len);
+ u16 *data16 = skb_put(skb, len);
ks8842_select_bank(adapter, 17);
while (len > 0) {
*data16++ = ioread16(adapter->hw_addr +
@@ -679,7 +679,7 @@
len -= sizeof(u32);
}
} else {
- u32 *data = (u32 *)skb_put(skb, len);
+ u32 *data = skb_put(skb, len);
ks8842_select_bank(adapter, 17);
while (len > 0) {
diff --git a/drivers/net/ethernet/micrel/ks8851.c b/drivers/net/ethernet/micrel/ks8851.c
index 20358f8..2fe96f1 100644
--- a/drivers/net/ethernet/micrel/ks8851.c
+++ b/drivers/net/ethernet/micrel/ks8851.c
@@ -1071,7 +1071,10 @@
struct ethtool_link_ksettings *cmd)
{
struct ks8851_net *ks = netdev_priv(dev);
- return mii_ethtool_get_link_ksettings(&ks->mii, cmd);
+
+ mii_ethtool_get_link_ksettings(&ks->mii, cmd);
+
+ return 0;
}
static int ks8851_set_link_ksettings(struct net_device *dev,
diff --git a/drivers/net/ethernet/micrel/ks8851_mll.c b/drivers/net/ethernet/micrel/ks8851_mll.c
index 7647f7b..f3e9dd4 100644
--- a/drivers/net/ethernet/micrel/ks8851_mll.c
+++ b/drivers/net/ethernet/micrel/ks8851_mll.c
@@ -1315,7 +1315,10 @@
struct ethtool_link_ksettings *cmd)
{
struct ks_net *ks = netdev_priv(netdev);
- return mii_ethtool_get_link_ksettings(&ks->mii, cmd);
+
+ mii_ethtool_get_link_ksettings(&ks->mii, cmd);
+
+ return 0;
}
static int ks_set_link_ksettings(struct net_device *netdev,
diff --git a/drivers/net/ethernet/micrel/ksz884x.c b/drivers/net/ethernet/micrel/ksz884x.c
index ee1c78a..e798fbe 100644
--- a/drivers/net/ethernet/micrel/ksz884x.c
+++ b/drivers/net/ethernet/micrel/ksz884x.c
@@ -5020,8 +5020,7 @@
*/
skb_reserve(skb, 2);
- memcpy(skb_put(skb, packet_len),
- dma_buf->skb->data, packet_len);
+ skb_put_data(skb, dma_buf->skb->data, packet_len);
} while (0);
skb->protocol = eth_type_trans(skb, dev);
diff --git a/drivers/net/ethernet/neterion/s2io.c b/drivers/net/ethernet/neterion/s2io.c
index 118723e..fd2ec36 100644
--- a/drivers/net/ethernet/neterion/s2io.c
+++ b/drivers/net/ethernet/neterion/s2io.c
@@ -2459,7 +2459,6 @@
struct buffAdd *ba;
struct RxD_t *first_rxdp = NULL;
u64 Buffer0_ptr = 0, Buffer1_ptr = 0;
- int rxd_index = 0;
struct RxD1 *rxdp1;
struct RxD3 *rxdp3;
struct swStat *swstats = &ring->nic->mac_control.stats_info->sw_stat;
@@ -2474,10 +2473,6 @@
rxdp = ring->rx_blocks[block_no].rxds[off].virt_addr;
- rxd_index = off + 1;
- if (block_no)
- rxd_index += (block_no * ring->rxd_count);
-
if ((block_no == block_no1) &&
(off == ring->rx_curr_get_info.offset) &&
(rxdp->Host_Control)) {
diff --git a/drivers/net/ethernet/neterion/vxge/vxge-main.c b/drivers/net/ethernet/neterion/vxge/vxge-main.c
index 6a4310a..50ea69d 100644
--- a/drivers/net/ethernet/neterion/vxge/vxge-main.c
+++ b/drivers/net/ethernet/neterion/vxge/vxge-main.c
@@ -3218,6 +3218,7 @@
case HWTSTAMP_FILTER_PTP_V2_EVENT:
case HWTSTAMP_FILTER_PTP_V2_SYNC:
case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
+ case HWTSTAMP_FILTER_NTP_ALL:
if (vdev->devh->config.hwts_en != VXGE_HW_HWTS_ENABLE)
return -EFAULT;
diff --git a/drivers/net/ethernet/netronome/Kconfig b/drivers/net/ethernet/netronome/Kconfig
index 967d7ca..0d5a7b9 100644
--- a/drivers/net/ethernet/netronome/Kconfig
+++ b/drivers/net/ethernet/netronome/Kconfig
@@ -19,6 +19,7 @@
tristate "Netronome(R) NFP4000/NFP6000 NIC driver"
depends on PCI && PCI_MSI
depends on VXLAN || VXLAN=n
+ depends on MAY_USE_DEVLINK
---help---
This driver supports the Netronome(R) NFP4000/NFP6000 based
cards working as a advanced Ethernet NIC. It works with both
diff --git a/drivers/net/ethernet/netronome/nfp/Makefile b/drivers/net/ethernet/netronome/nfp/Makefile
index 4b15f0f..5ad9a55 100644
--- a/drivers/net/ethernet/netronome/nfp/Makefile
+++ b/drivers/net/ethernet/netronome/nfp/Makefile
@@ -14,17 +14,24 @@
nfpcore/nfp_resource.o \
nfpcore/nfp_rtsym.o \
nfpcore/nfp_target.o \
+ nfp_app.o \
+ nfp_app_nic.o \
+ nfp_devlink.o \
+ nfp_hwmon.o \
nfp_main.o \
nfp_net_common.o \
nfp_net_ethtool.o \
- nfp_net_offload.o \
nfp_net_main.o \
- nfp_netvf_main.o
+ nfp_netvf_main.o \
+ nfp_port.o \
+ bpf/main.o \
+ bpf/offload.o \
+ nic/main.o
ifeq ($(CONFIG_BPF_SYSCALL),y)
nfp-objs += \
- nfp_bpf_verifier.o \
- nfp_bpf_jit.o
+ bpf/verifier.o \
+ bpf/jit.o
endif
nfp-$(CONFIG_NFP_DEBUG) += nfp_net_debugfs.o
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_bpf_jit.c b/drivers/net/ethernet/netronome/nfp/bpf/jit.c
similarity index 99%
rename from drivers/net/ethernet/netronome/nfp/nfp_bpf_jit.c
rename to drivers/net/ethernet/netronome/nfp/bpf/jit.c
index 97a8f00..8e57fda 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_bpf_jit.c
+++ b/drivers/net/ethernet/netronome/nfp/bpf/jit.c
@@ -39,8 +39,8 @@
#include <linux/pkt_cls.h>
#include <linux/unistd.h>
-#include "nfp_asm.h"
-#include "nfp_bpf.h"
+#include "main.h"
+#include "../nfp_asm.h"
/* --- NFP prog --- */
/* Foreach "multiple" entries macros provide pos and next<n> pointers.
diff --git a/drivers/net/ethernet/netronome/nfp/bpf/main.c b/drivers/net/ethernet/netronome/nfp/bpf/main.c
new file mode 100644
index 0000000..afbdf5f
--- /dev/null
+++ b/drivers/net/ethernet/netronome/nfp/bpf/main.c
@@ -0,0 +1,160 @@
+/*
+ * Copyright (C) 2017 Netronome Systems, Inc.
+ *
+ * This software is dual licensed under the GNU General License Version 2,
+ * June 1991 as shown in the file COPYING in the top-level directory of this
+ * source tree or the BSD 2-Clause License provided below. You have the
+ * option to license this software under the complete terms of either license.
+ *
+ * The BSD 2-Clause License:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <net/pkt_cls.h>
+
+#include "../nfpcore/nfp_cpp.h"
+#include "../nfp_app.h"
+#include "../nfp_main.h"
+#include "../nfp_net.h"
+#include "../nfp_port.h"
+#include "main.h"
+
+static bool nfp_net_ebpf_capable(struct nfp_net *nn)
+{
+ if (nn->cap & NFP_NET_CFG_CTRL_BPF &&
+ nn_readb(nn, NFP_NET_CFG_BPF_ABI) == NFP_NET_BPF_ABI)
+ return true;
+ return false;
+}
+
+static int
+nfp_bpf_xdp_offload(struct nfp_app *app, struct nfp_net *nn,
+ struct bpf_prog *prog)
+{
+ struct tc_cls_bpf_offload cmd = {
+ .prog = prog,
+ };
+ int ret;
+
+ if (!nfp_net_ebpf_capable(nn))
+ return -EINVAL;
+
+ if (nn->dp.ctrl & NFP_NET_CFG_CTRL_BPF) {
+ if (!nn->dp.bpf_offload_xdp)
+ return prog ? -EBUSY : 0;
+ cmd.command = prog ? TC_CLSBPF_REPLACE : TC_CLSBPF_DESTROY;
+ } else {
+ if (!prog)
+ return 0;
+ cmd.command = TC_CLSBPF_ADD;
+ }
+
+ ret = nfp_net_bpf_offload(nn, &cmd);
+ /* Stop offload if replace not possible */
+ if (ret && cmd.command == TC_CLSBPF_REPLACE)
+ nfp_bpf_xdp_offload(app, nn, NULL);
+ nn->dp.bpf_offload_xdp = prog && !ret;
+ return ret;
+}
+
+static const char *nfp_bpf_extra_cap(struct nfp_app *app, struct nfp_net *nn)
+{
+ return nfp_net_ebpf_capable(nn) ? "BPF" : "";
+}
+
+static int
+nfp_bpf_vnic_init(struct nfp_app *app, struct nfp_net *nn, unsigned int id)
+{
+ struct nfp_net_bpf_priv *priv;
+ int ret;
+
+ /* Limit to single port, otherwise it's just a NIC */
+ if (id > 0) {
+ nfp_warn(app->cpp,
+ "BPF NIC doesn't support more than one port right now\n");
+ nn->port = nfp_port_alloc(app, NFP_PORT_INVALID, nn->dp.netdev);
+ return PTR_ERR_OR_ZERO(nn->port);
+ }
+
+ priv = kmalloc(sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ nn->app_priv = priv;
+ spin_lock_init(&priv->rx_filter_lock);
+ setup_timer(&priv->rx_filter_stats_timer,
+ nfp_net_filter_stats_timer, (unsigned long)nn);
+
+ ret = nfp_app_nic_vnic_init(app, nn, id);
+ if (ret)
+ kfree(priv);
+
+ return ret;
+}
+
+static void nfp_bpf_vnic_clean(struct nfp_app *app, struct nfp_net *nn)
+{
+ if (nn->dp.bpf_offload_xdp)
+ nfp_bpf_xdp_offload(app, nn, NULL);
+ kfree(nn->app_priv);
+}
+
+static int nfp_bpf_setup_tc(struct nfp_app *app, struct net_device *netdev,
+ u32 handle, __be16 proto, struct tc_to_netdev *tc)
+{
+ struct nfp_net *nn = netdev_priv(netdev);
+
+ if (TC_H_MAJ(handle) != TC_H_MAJ(TC_H_INGRESS))
+ return -EOPNOTSUPP;
+ if (proto != htons(ETH_P_ALL))
+ return -EOPNOTSUPP;
+
+ if (tc->type == TC_SETUP_CLSBPF && nfp_net_ebpf_capable(nn)) {
+ if (!nn->dp.bpf_offload_xdp)
+ return nfp_net_bpf_offload(nn, tc->cls_bpf);
+ else
+ return -EBUSY;
+ }
+
+ return -EINVAL;
+}
+
+static bool nfp_bpf_tc_busy(struct nfp_app *app, struct nfp_net *nn)
+{
+ return nn->dp.ctrl & NFP_NET_CFG_CTRL_BPF;
+}
+
+const struct nfp_app_type app_bpf = {
+ .id = NFP_APP_BPF_NIC,
+ .name = "ebpf",
+
+ .extra_cap = nfp_bpf_extra_cap,
+
+ .vnic_init = nfp_bpf_vnic_init,
+ .vnic_clean = nfp_bpf_vnic_clean,
+
+ .setup_tc = nfp_bpf_setup_tc,
+ .tc_busy = nfp_bpf_tc_busy,
+ .xdp_offload = nfp_bpf_xdp_offload,
+};
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_bpf.h b/drivers/net/ethernet/netronome/nfp/bpf/main.h
similarity index 88%
rename from drivers/net/ethernet/netronome/nfp/nfp_bpf.h
rename to drivers/net/ethernet/netronome/nfp/bpf/main.h
index 9513c80..4051e94 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_bpf.h
+++ b/drivers/net/ethernet/netronome/nfp/bpf/main.h
@@ -39,6 +39,8 @@
#include <linux/list.h>
#include <linux/types.h>
+#include "../nfp_net.h"
+
/* For branch fixup logic use up-most byte of branch instruction as scratch
* area. Remember to clear this before sending instructions to HW!
*/
@@ -198,4 +200,25 @@
int nfp_prog_verify(struct nfp_prog *nfp_prog, struct bpf_prog *prog);
+struct nfp_net;
+struct tc_cls_bpf_offload;
+
+/**
+ * struct nfp_net_bpf_priv - per-vNIC BPF private data
+ * @rx_filter: Filter offload statistics - dropped packets/bytes
+ * @rx_filter_prev: Filter offload statistics - values from previous update
+ * @rx_filter_change: Jiffies when statistics last changed
+ * @rx_filter_stats_timer: Timer for polling filter offload statistics
+ * @rx_filter_lock: Lock protecting timer state changes (teardown)
+ */
+struct nfp_net_bpf_priv {
+ struct nfp_stat_pair rx_filter, rx_filter_prev;
+ unsigned long rx_filter_change;
+ struct timer_list rx_filter_stats_timer;
+ spinlock_t rx_filter_lock;
+};
+
+int nfp_net_bpf_offload(struct nfp_net *nn, struct tc_cls_bpf_offload *cls_bpf);
+void nfp_net_filter_stats_timer(unsigned long data);
+
#endif
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_offload.c b/drivers/net/ethernet/netronome/nfp/bpf/offload.c
similarity index 84%
rename from drivers/net/ethernet/netronome/nfp/nfp_net_offload.c
rename to drivers/net/ethernet/netronome/nfp/bpf/offload.c
index cc823df..78d80a3 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_offload.c
+++ b/drivers/net/ethernet/netronome/nfp/bpf/offload.c
@@ -47,60 +47,59 @@
#include <net/tc_act/tc_gact.h>
#include <net/tc_act/tc_mirred.h>
-#include "nfp_bpf.h"
-#include "nfp_net_ctrl.h"
-#include "nfp_net.h"
+#include "main.h"
+#include "../nfp_net_ctrl.h"
+#include "../nfp_net.h"
void nfp_net_filter_stats_timer(unsigned long data)
{
struct nfp_net *nn = (void *)data;
+ struct nfp_net_bpf_priv *priv;
struct nfp_stat_pair latest;
- spin_lock_bh(&nn->rx_filter_lock);
+ priv = nn->app_priv;
+
+ spin_lock_bh(&priv->rx_filter_lock);
if (nn->dp.ctrl & NFP_NET_CFG_CTRL_BPF)
- mod_timer(&nn->rx_filter_stats_timer,
+ mod_timer(&priv->rx_filter_stats_timer,
jiffies + NFP_NET_STAT_POLL_IVL);
- spin_unlock_bh(&nn->rx_filter_lock);
+ spin_unlock_bh(&priv->rx_filter_lock);
latest.pkts = nn_readq(nn, NFP_NET_CFG_STATS_APP1_FRAMES);
latest.bytes = nn_readq(nn, NFP_NET_CFG_STATS_APP1_BYTES);
- if (latest.pkts != nn->rx_filter.pkts)
- nn->rx_filter_change = jiffies;
+ if (latest.pkts != priv->rx_filter.pkts)
+ priv->rx_filter_change = jiffies;
- nn->rx_filter = latest;
+ priv->rx_filter = latest;
}
static void nfp_net_bpf_stats_reset(struct nfp_net *nn)
{
- nn->rx_filter.pkts = nn_readq(nn, NFP_NET_CFG_STATS_APP1_FRAMES);
- nn->rx_filter.bytes = nn_readq(nn, NFP_NET_CFG_STATS_APP1_BYTES);
- nn->rx_filter_prev = nn->rx_filter;
- nn->rx_filter_change = jiffies;
+ struct nfp_net_bpf_priv *priv = nn->app_priv;
+
+ priv->rx_filter.pkts = nn_readq(nn, NFP_NET_CFG_STATS_APP1_FRAMES);
+ priv->rx_filter.bytes = nn_readq(nn, NFP_NET_CFG_STATS_APP1_BYTES);
+ priv->rx_filter_prev = priv->rx_filter;
+ priv->rx_filter_change = jiffies;
}
static int
nfp_net_bpf_stats_update(struct nfp_net *nn, struct tc_cls_bpf_offload *cls_bpf)
{
- struct tc_action *a;
- LIST_HEAD(actions);
+ struct nfp_net_bpf_priv *priv = nn->app_priv;
u64 bytes, pkts;
- pkts = nn->rx_filter.pkts - nn->rx_filter_prev.pkts;
- bytes = nn->rx_filter.bytes - nn->rx_filter_prev.bytes;
+ pkts = priv->rx_filter.pkts - priv->rx_filter_prev.pkts;
+ bytes = priv->rx_filter.bytes - priv->rx_filter_prev.bytes;
bytes -= pkts * ETH_HLEN;
- nn->rx_filter_prev = nn->rx_filter;
+ priv->rx_filter_prev = priv->rx_filter;
- preempt_disable();
-
- tcf_exts_to_list(cls_bpf->exts, &actions);
- list_for_each_entry(a, &actions, list)
- tcf_action_stats_update(a, bytes, pkts, nn->rx_filter_change);
-
- preempt_enable();
+ tcf_exts_stats_update(cls_bpf->exts,
+ bytes, pkts, priv->rx_filter_change);
return 0;
}
@@ -190,6 +189,7 @@
unsigned int code_sz, unsigned int n_instr,
bool dense_mode)
{
+ struct nfp_net_bpf_priv *priv = nn->app_priv;
u64 bpf_addr = dma_addr;
int err;
@@ -216,20 +216,23 @@
dma_free_coherent(nn->dp.dev, code_sz, code, dma_addr);
nfp_net_bpf_stats_reset(nn);
- mod_timer(&nn->rx_filter_stats_timer, jiffies + NFP_NET_STAT_POLL_IVL);
+ mod_timer(&priv->rx_filter_stats_timer,
+ jiffies + NFP_NET_STAT_POLL_IVL);
}
static int nfp_net_bpf_stop(struct nfp_net *nn)
{
+ struct nfp_net_bpf_priv *priv = nn->app_priv;
+
if (!(nn->dp.ctrl & NFP_NET_CFG_CTRL_BPF))
return 0;
- spin_lock_bh(&nn->rx_filter_lock);
+ spin_lock_bh(&priv->rx_filter_lock);
nn->dp.ctrl &= ~NFP_NET_CFG_CTRL_BPF;
- spin_unlock_bh(&nn->rx_filter_lock);
+ spin_unlock_bh(&priv->rx_filter_lock);
nn_writel(nn, NFP_NET_CFG_CTRL, nn->dp.ctrl);
- del_timer_sync(&nn->rx_filter_stats_timer);
+ del_timer_sync(&priv->rx_filter_stats_timer);
nn->dp.bpf_offload_skip_sw = 0;
return nfp_net_reconfig(nn, NFP_NET_CFG_UPDATE_GEN);
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_bpf_verifier.c b/drivers/net/ethernet/netronome/nfp/bpf/verifier.c
similarity index 99%
rename from drivers/net/ethernet/netronome/nfp/nfp_bpf_verifier.c
rename to drivers/net/ethernet/netronome/nfp/bpf/verifier.c
index b3361f9..d696ba4 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_bpf_verifier.c
+++ b/drivers/net/ethernet/netronome/nfp/bpf/verifier.c
@@ -38,7 +38,7 @@
#include <linux/kernel.h>
#include <linux/pkt_cls.h>
-#include "nfp_bpf.h"
+#include "main.h"
/* Analyzer/verifier definitions */
struct nfp_bpf_analyzer_priv {
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_app.c b/drivers/net/ethernet/netronome/nfp/nfp_app.c
new file mode 100644
index 0000000..396b93f
--- /dev/null
+++ b/drivers/net/ethernet/netronome/nfp/nfp_app.c
@@ -0,0 +1,102 @@
+/*
+ * Copyright (C) 2017 Netronome Systems, Inc.
+ *
+ * This software is dual licensed under the GNU General License Version 2,
+ * June 1991 as shown in the file COPYING in the top-level directory of this
+ * source tree or the BSD 2-Clause License provided below. You have the
+ * option to license this software under the complete terms of either license.
+ *
+ * The BSD 2-Clause License:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/skbuff.h>
+#include <linux/slab.h>
+
+#include "nfpcore/nfp_cpp.h"
+#include "nfpcore/nfp_nffw.h"
+#include "nfp_app.h"
+#include "nfp_main.h"
+
+static const struct nfp_app_type *apps[] = {
+ &app_nic,
+ &app_bpf,
+};
+
+const char *nfp_app_mip_name(struct nfp_app *app)
+{
+ if (!app || !app->pf->mip)
+ return "";
+ return nfp_mip_name(app->pf->mip);
+}
+
+struct sk_buff *nfp_app_ctrl_msg_alloc(struct nfp_app *app, unsigned int size)
+{
+ struct sk_buff *skb;
+
+ if (nfp_app_ctrl_has_meta(app))
+ size += 8;
+
+ skb = alloc_skb(size, GFP_ATOMIC);
+ if (!skb)
+ return NULL;
+
+ if (nfp_app_ctrl_has_meta(app))
+ skb_reserve(skb, 8);
+
+ return skb;
+}
+
+struct nfp_app *nfp_app_alloc(struct nfp_pf *pf, enum nfp_app_id id)
+{
+ struct nfp_app *app;
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(apps); i++)
+ if (apps[i]->id == id)
+ break;
+ if (i == ARRAY_SIZE(apps)) {
+ nfp_err(pf->cpp, "failed to find app with ID 0x%02hhx\n", id);
+ return ERR_PTR(-EINVAL);
+ }
+
+ if (WARN_ON(!apps[i]->name || !apps[i]->vnic_init))
+ return ERR_PTR(-EINVAL);
+
+ app = kzalloc(sizeof(*app), GFP_KERNEL);
+ if (!app)
+ return ERR_PTR(-ENOMEM);
+
+ app->pf = pf;
+ app->cpp = pf->cpp;
+ app->pdev = pf->pdev;
+ app->type = apps[i];
+
+ return app;
+}
+
+void nfp_app_free(struct nfp_app *app)
+{
+ kfree(app);
+}
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_app.h b/drivers/net/ethernet/netronome/nfp/nfp_app.h
new file mode 100644
index 0000000..f5e373f
--- /dev/null
+++ b/drivers/net/ethernet/netronome/nfp/nfp_app.h
@@ -0,0 +1,230 @@
+/*
+ * Copyright (C) 2017 Netronome Systems, Inc.
+ *
+ * This software is dual licensed under the GNU General License Version 2,
+ * June 1991 as shown in the file COPYING in the top-level directory of this
+ * source tree or the BSD 2-Clause License provided below. You have the
+ * option to license this software under the complete terms of either license.
+ *
+ * The BSD 2-Clause License:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef _NFP_APP_H
+#define _NFP_APP_H 1
+
+struct bpf_prog;
+struct net_device;
+struct pci_dev;
+struct sk_buff;
+struct tc_to_netdev;
+struct sk_buff;
+struct nfp_app;
+struct nfp_cpp;
+struct nfp_pf;
+struct nfp_net;
+
+enum nfp_app_id {
+ NFP_APP_CORE_NIC = 0x1,
+ NFP_APP_BPF_NIC = 0x2,
+};
+
+extern const struct nfp_app_type app_nic;
+extern const struct nfp_app_type app_bpf;
+
+/**
+ * struct nfp_app_type - application definition
+ * @id: application ID
+ * @name: application name
+ * @ctrl_has_meta: control messages have prepend of type:5/port:CTRL
+ *
+ * Callbacks
+ * @init: perform basic app checks
+ * @extra_cap: extra capabilities string
+ * @vnic_init: init vNICs (assign port types, etc.)
+ * @vnic_clean: clean up app's vNIC state
+ * @start: start application logic
+ * @stop: stop application logic
+ * @ctrl_msg_rx: control message handler
+ * @setup_tc: setup TC ndo
+ * @tc_busy: TC HW offload busy (rules loaded)
+ * @xdp_offload: offload an XDP program
+ */
+struct nfp_app_type {
+ enum nfp_app_id id;
+ const char *name;
+
+ bool ctrl_has_meta;
+
+ int (*init)(struct nfp_app *app);
+
+ const char *(*extra_cap)(struct nfp_app *app, struct nfp_net *nn);
+
+ int (*vnic_init)(struct nfp_app *app, struct nfp_net *nn,
+ unsigned int id);
+ void (*vnic_clean)(struct nfp_app *app, struct nfp_net *nn);
+
+ int (*start)(struct nfp_app *app);
+ void (*stop)(struct nfp_app *app);
+
+ void (*ctrl_msg_rx)(struct nfp_app *app, struct sk_buff *skb);
+
+ int (*setup_tc)(struct nfp_app *app, struct net_device *netdev,
+ u32 handle, __be16 proto, struct tc_to_netdev *tc);
+ bool (*tc_busy)(struct nfp_app *app, struct nfp_net *nn);
+ int (*xdp_offload)(struct nfp_app *app, struct nfp_net *nn,
+ struct bpf_prog *prog);
+};
+
+/**
+ * struct nfp_app - NFP application container
+ * @pdev: backpointer to PCI device
+ * @pf: backpointer to NFP PF structure
+ * @cpp: pointer to the CPP handle
+ * @ctrl: pointer to ctrl vNIC struct
+ * @type: pointer to const application ops and info
+ */
+struct nfp_app {
+ struct pci_dev *pdev;
+ struct nfp_pf *pf;
+ struct nfp_cpp *cpp;
+
+ struct nfp_net *ctrl;
+
+ const struct nfp_app_type *type;
+};
+
+bool nfp_ctrl_tx(struct nfp_net *nn, struct sk_buff *skb);
+
+static inline int nfp_app_init(struct nfp_app *app)
+{
+ if (!app->type->init)
+ return 0;
+ return app->type->init(app);
+}
+
+static inline int nfp_app_vnic_init(struct nfp_app *app, struct nfp_net *nn,
+ unsigned int id)
+{
+ return app->type->vnic_init(app, nn, id);
+}
+
+static inline void nfp_app_vnic_clean(struct nfp_app *app, struct nfp_net *nn)
+{
+ if (app->type->vnic_clean)
+ app->type->vnic_clean(app, nn);
+}
+
+static inline int nfp_app_start(struct nfp_app *app, struct nfp_net *ctrl)
+{
+ app->ctrl = ctrl;
+ if (!app->type->start)
+ return 0;
+ return app->type->start(app);
+}
+
+static inline void nfp_app_stop(struct nfp_app *app)
+{
+ if (!app->type->stop)
+ return;
+ app->type->stop(app);
+}
+
+static inline const char *nfp_app_name(struct nfp_app *app)
+{
+ if (!app)
+ return "";
+ return app->type->name;
+}
+
+static inline bool nfp_app_needs_ctrl_vnic(struct nfp_app *app)
+{
+ return app && app->type->ctrl_msg_rx;
+}
+
+static inline bool nfp_app_ctrl_has_meta(struct nfp_app *app)
+{
+ return app->type->ctrl_has_meta;
+}
+
+static inline const char *nfp_app_extra_cap(struct nfp_app *app,
+ struct nfp_net *nn)
+{
+ if (!app || !app->type->extra_cap)
+ return "";
+ return app->type->extra_cap(app, nn);
+}
+
+static inline bool nfp_app_has_tc(struct nfp_app *app)
+{
+ return app && app->type->setup_tc;
+}
+
+static inline bool nfp_app_tc_busy(struct nfp_app *app, struct nfp_net *nn)
+{
+ if (!app || !app->type->tc_busy)
+ return false;
+ return app->type->tc_busy(app, nn);
+}
+
+static inline int nfp_app_setup_tc(struct nfp_app *app,
+ struct net_device *netdev,
+ u32 handle, __be16 proto,
+ struct tc_to_netdev *tc)
+{
+ if (!app || !app->type->setup_tc)
+ return -EOPNOTSUPP;
+ return app->type->setup_tc(app, netdev, handle, proto, tc);
+}
+
+static inline int nfp_app_xdp_offload(struct nfp_app *app, struct nfp_net *nn,
+ struct bpf_prog *prog)
+{
+ if (!app || !app->type->xdp_offload)
+ return -EOPNOTSUPP;
+ return app->type->xdp_offload(app, nn, prog);
+}
+
+static inline bool nfp_app_ctrl_tx(struct nfp_app *app, struct sk_buff *skb)
+{
+ return nfp_ctrl_tx(app->ctrl, skb);
+}
+
+static inline void nfp_app_ctrl_rx(struct nfp_app *app, struct sk_buff *skb)
+{
+ app->type->ctrl_msg_rx(app, skb);
+}
+
+const char *nfp_app_mip_name(struct nfp_app *app);
+struct sk_buff *nfp_app_ctrl_msg_alloc(struct nfp_app *app, unsigned int size);
+
+struct nfp_app *nfp_app_alloc(struct nfp_pf *pf, enum nfp_app_id id);
+void nfp_app_free(struct nfp_app *app);
+
+/* Callbacks shared between apps */
+
+int nfp_app_nic_vnic_init(struct nfp_app *app, struct nfp_net *nn,
+ unsigned int id);
+
+#endif
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_app_nic.c b/drivers/net/ethernet/netronome/nfp/nfp_app_nic.c
new file mode 100644
index 0000000..83c65e6
--- /dev/null
+++ b/drivers/net/ethernet/netronome/nfp/nfp_app_nic.c
@@ -0,0 +1,86 @@
+/*
+ * Copyright (C) 2017 Netronome Systems, Inc.
+ *
+ * This software is dual licensed under the GNU General License Version 2,
+ * June 1991 as shown in the file COPYING in the top-level directory of this
+ * source tree or the BSD 2-Clause License provided below. You have the
+ * option to license this software under the complete terms of either license.
+ *
+ * The BSD 2-Clause License:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include "nfpcore/nfp_cpp.h"
+#include "nfpcore/nfp_nsp.h"
+#include "nfp_app.h"
+#include "nfp_main.h"
+#include "nfp_net.h"
+#include "nfp_port.h"
+
+static int
+nfp_app_nic_vnic_init_phy_port(struct nfp_pf *pf, struct nfp_app *app,
+ struct nfp_net *nn, unsigned int id)
+{
+ if (!pf->eth_tbl)
+ return 0;
+
+ nn->port = nfp_port_alloc(app, NFP_PORT_PHYS_PORT, nn->dp.netdev);
+ if (IS_ERR(nn->port))
+ return PTR_ERR(nn->port);
+
+ nn->port->eth_id = id;
+ nn->port->eth_port = nfp_net_find_port(pf->eth_tbl, id);
+
+ /* Check if vNIC has external port associated and cfg is OK */
+ if (!nn->port->eth_port) {
+ nfp_err(app->cpp,
+ "NSP port entries don't match vNICs (no entry for port #%d)\n",
+ id);
+ nfp_port_free(nn->port);
+ return -EINVAL;
+ }
+ if (nn->port->eth_port->override_changed) {
+ nfp_warn(app->cpp,
+ "Config changed for port #%d, reboot required before port will be operational\n",
+ id);
+ nn->port->type = NFP_PORT_INVALID;
+ return 1;
+ }
+
+ return 0;
+}
+
+int nfp_app_nic_vnic_init(struct nfp_app *app, struct nfp_net *nn,
+ unsigned int id)
+{
+ int err;
+
+ err = nfp_app_nic_vnic_init_phy_port(app->pf, app, nn, id);
+ if (err)
+ return err < 0 ? err : 0;
+
+ nfp_net_get_mac_addr(app->pf, nn, id);
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_asm.h b/drivers/net/ethernet/netronome/nfp/nfp_asm.h
index 22484b6..d2b5357 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_asm.h
+++ b/drivers/net/ethernet/netronome/nfp/nfp_asm.h
@@ -34,7 +34,7 @@
#ifndef __NFP_ASM_H__
#define __NFP_ASM_H__ 1
-#include "nfp_bpf.h"
+#include <linux/types.h>
#define REG_NONE 0
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_devlink.c b/drivers/net/ethernet/netronome/nfp/nfp_devlink.c
new file mode 100644
index 0000000..2609a0f
--- /dev/null
+++ b/drivers/net/ethernet/netronome/nfp/nfp_devlink.c
@@ -0,0 +1,181 @@
+/*
+ * Copyright (C) 2017 Netronome Systems, Inc.
+ *
+ * This software is dual licensed under the GNU General License Version 2,
+ * June 1991 as shown in the file COPYING in the top-level directory of this
+ * source tree or the BSD 2-Clause License provided below. You have the
+ * option to license this software under the complete terms of either license.
+ *
+ * The BSD 2-Clause License:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/rtnetlink.h>
+#include <net/devlink.h>
+
+#include "nfpcore/nfp_nsp.h"
+#include "nfp_app.h"
+#include "nfp_main.h"
+#include "nfp_port.h"
+
+static int
+nfp_devlink_fill_eth_port(struct nfp_port *port,
+ struct nfp_eth_table_port *copy)
+{
+ struct nfp_eth_table_port *eth_port;
+
+ eth_port = __nfp_port_get_eth_port(port);
+ if (!eth_port)
+ return -EINVAL;
+
+ memcpy(copy, eth_port, sizeof(*eth_port));
+
+ return 0;
+}
+
+static int
+nfp_devlink_fill_eth_port_from_id(struct nfp_pf *pf, unsigned int port_index,
+ struct nfp_eth_table_port *copy)
+{
+ struct nfp_port *port;
+
+ port = nfp_port_from_id(pf, NFP_PORT_PHYS_PORT, port_index);
+
+ return nfp_devlink_fill_eth_port(port, copy);
+}
+
+static int
+nfp_devlink_set_lanes(struct nfp_pf *pf, unsigned int idx, unsigned int lanes)
+{
+ struct nfp_nsp *nsp;
+ int ret;
+
+ nsp = nfp_eth_config_start(pf->cpp, idx);
+ if (IS_ERR(nsp))
+ return PTR_ERR(nsp);
+
+ ret = __nfp_eth_set_split(nsp, lanes);
+ if (ret) {
+ nfp_eth_config_cleanup_end(nsp);
+ return ret;
+ }
+
+ ret = nfp_eth_config_commit_end(nsp);
+ if (ret < 0)
+ return ret;
+ if (ret) /* no change */
+ return 0;
+
+ return nfp_net_refresh_port_table_sync(pf);
+}
+
+static int
+nfp_devlink_port_split(struct devlink *devlink, unsigned int port_index,
+ unsigned int count)
+{
+ struct nfp_pf *pf = devlink_priv(devlink);
+ struct nfp_eth_table_port eth_port;
+ int ret;
+
+ if (count < 2)
+ return -EINVAL;
+
+ mutex_lock(&pf->lock);
+
+ rtnl_lock();
+ ret = nfp_devlink_fill_eth_port_from_id(pf, port_index, ð_port);
+ rtnl_unlock();
+ if (ret)
+ goto out;
+
+ if (eth_port.is_split || eth_port.port_lanes % count) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ ret = nfp_devlink_set_lanes(pf, eth_port.index,
+ eth_port.port_lanes / count);
+out:
+ mutex_unlock(&pf->lock);
+
+ return ret;
+}
+
+static int
+nfp_devlink_port_unsplit(struct devlink *devlink, unsigned int port_index)
+{
+ struct nfp_pf *pf = devlink_priv(devlink);
+ struct nfp_eth_table_port eth_port;
+ int ret;
+
+ mutex_lock(&pf->lock);
+
+ rtnl_lock();
+ ret = nfp_devlink_fill_eth_port_from_id(pf, port_index, ð_port);
+ rtnl_unlock();
+ if (ret)
+ goto out;
+
+ if (!eth_port.is_split) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ ret = nfp_devlink_set_lanes(pf, eth_port.index, eth_port.port_lanes);
+out:
+ mutex_unlock(&pf->lock);
+
+ return ret;
+}
+
+const struct devlink_ops nfp_devlink_ops = {
+ .port_split = nfp_devlink_port_split,
+ .port_unsplit = nfp_devlink_port_unsplit,
+};
+
+int nfp_devlink_port_register(struct nfp_app *app, struct nfp_port *port)
+{
+ struct nfp_eth_table_port eth_port;
+ struct devlink *devlink;
+ int ret;
+
+ rtnl_lock();
+ ret = nfp_devlink_fill_eth_port(port, ð_port);
+ rtnl_unlock();
+ if (ret)
+ return ret;
+
+ devlink_port_type_eth_set(&port->dl_port, port->netdev);
+ if (eth_port.is_split)
+ devlink_port_split_set(&port->dl_port, eth_port.label_port);
+
+ devlink = priv_to_devlink(app->pf);
+
+ return devlink_port_register(devlink, &port->dl_port, port->eth_id);
+}
+
+void nfp_devlink_port_unregister(struct nfp_port *port)
+{
+ devlink_port_unregister(&port->dl_port);
+}
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_hwmon.c b/drivers/net/ethernet/netronome/nfp/nfp_hwmon.c
new file mode 100644
index 0000000..f0dcf45
--- /dev/null
+++ b/drivers/net/ethernet/netronome/nfp/nfp_hwmon.c
@@ -0,0 +1,192 @@
+/*
+ * Copyright (C) 2017 Netronome Systems, Inc.
+ *
+ * This software is dual licensed under the GNU General License Version 2,
+ * June 1991 as shown in the file COPYING in the top-level directory of this
+ * source tree or the BSD 2-Clause License provided below. You have the
+ * option to license this software under the complete terms of either license.
+ *
+ * The BSD 2-Clause License:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/kernel.h>
+#include <linux/bitops.h>
+#include <linux/hwmon.h>
+
+#include "nfpcore/nfp_cpp.h"
+#include "nfpcore/nfp_nsp.h"
+#include "nfp_main.h"
+
+#define NFP_TEMP_MAX (95 * 1000)
+#define NFP_TEMP_CRIT (105 * 1000)
+
+#define NFP_POWER_MAX (25 * 1000 * 1000)
+
+static int nfp_hwmon_sensor_id(enum hwmon_sensor_types type, int channel)
+{
+ if (type == hwmon_temp)
+ return NFP_SENSOR_CHIP_TEMPERATURE;
+ if (type == hwmon_power)
+ return NFP_SENSOR_ASSEMBLY_POWER + channel;
+ return -EINVAL;
+}
+
+static int
+nfp_hwmon_read(struct device *dev, enum hwmon_sensor_types type, u32 attr,
+ int channel, long *val)
+{
+ static const struct {
+ enum hwmon_sensor_types type;
+ u32 attr;
+ long val;
+ } const_vals[] = {
+ { hwmon_temp, hwmon_temp_max, NFP_TEMP_MAX },
+ { hwmon_temp, hwmon_temp_crit, NFP_TEMP_CRIT },
+ { hwmon_power, hwmon_power_max, NFP_POWER_MAX },
+ };
+ struct nfp_pf *pf = dev_get_drvdata(dev);
+ enum nfp_nsp_sensor_id id;
+ int err, i;
+
+ for (i = 0; i < ARRAY_SIZE(const_vals); i++)
+ if (const_vals[i].type == type && const_vals[i].attr == attr) {
+ *val = const_vals[i].val;
+ return 0;
+ }
+
+ err = nfp_hwmon_sensor_id(type, channel);
+ if (err < 0)
+ return err;
+ id = err;
+
+ if (!(pf->nspi->sensor_mask & BIT(id)))
+ return -EOPNOTSUPP;
+
+ if (type == hwmon_temp && attr == hwmon_temp_input)
+ return nfp_hwmon_read_sensor(pf->cpp, id, val);
+ if (type == hwmon_power && attr == hwmon_power_input)
+ return nfp_hwmon_read_sensor(pf->cpp, id, val);
+
+ return -EINVAL;
+}
+
+static umode_t
+nfp_hwmon_is_visible(const void *data, enum hwmon_sensor_types type, u32 attr,
+ int channel)
+{
+ if (type == hwmon_temp) {
+ switch (attr) {
+ case hwmon_temp_input:
+ case hwmon_temp_crit:
+ case hwmon_temp_max:
+ return 0444;
+ }
+ } else if (type == hwmon_power) {
+ switch (attr) {
+ case hwmon_power_input:
+ case hwmon_power_max:
+ return 0444;
+ }
+ }
+ return 0;
+}
+
+static u32 nfp_chip_config[] = {
+ HWMON_C_REGISTER_TZ,
+ 0
+};
+
+static const struct hwmon_channel_info nfp_chip = {
+ .type = hwmon_chip,
+ .config = nfp_chip_config,
+};
+
+static u32 nfp_temp_config[] = {
+ HWMON_T_INPUT | HWMON_T_MAX | HWMON_T_CRIT,
+ 0
+};
+
+static const struct hwmon_channel_info nfp_temp = {
+ .type = hwmon_temp,
+ .config = nfp_temp_config,
+};
+
+static u32 nfp_power_config[] = {
+ HWMON_P_INPUT | HWMON_P_MAX,
+ HWMON_P_INPUT,
+ HWMON_P_INPUT,
+ 0
+};
+
+static const struct hwmon_channel_info nfp_power = {
+ .type = hwmon_power,
+ .config = nfp_power_config,
+};
+
+static const struct hwmon_channel_info *nfp_hwmon_info[] = {
+ &nfp_chip,
+ &nfp_temp,
+ &nfp_power,
+ NULL
+};
+
+static const struct hwmon_ops nfp_hwmon_ops = {
+ .is_visible = nfp_hwmon_is_visible,
+ .read = nfp_hwmon_read,
+};
+
+static const struct hwmon_chip_info nfp_chip_info = {
+ .ops = &nfp_hwmon_ops,
+ .info = nfp_hwmon_info,
+};
+
+int nfp_hwmon_register(struct nfp_pf *pf)
+{
+ if (!IS_REACHABLE(CONFIG_HWMON))
+ return 0;
+
+ if (!pf->nspi) {
+ nfp_warn(pf->cpp, "not registering HWMON (no NSP info)\n");
+ return 0;
+ }
+ if (!pf->nspi->sensor_mask) {
+ nfp_info(pf->cpp,
+ "not registering HWMON (NSP doesn't report sensors)\n");
+ return 0;
+ }
+
+ pf->hwmon_dev = hwmon_device_register_with_info(&pf->pdev->dev, "nfp",
+ pf, &nfp_chip_info,
+ NULL);
+ return PTR_ERR_OR_ZERO(pf->hwmon_dev);
+}
+
+void nfp_hwmon_unregister(struct nfp_pf *pf)
+{
+ if (!IS_REACHABLE(CONFIG_HWMON) || !pf->hwmon_dev)
+ return;
+
+ hwmon_device_unregister(pf->hwmon_dev);
+}
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_main.c b/drivers/net/ethernet/netronome/nfp/nfp_main.c
index dde35da..4e59dcb 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_main.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_main.c
@@ -41,9 +41,11 @@
#include <linux/kernel.h>
#include <linux/module.h>
+#include <linux/mutex.h>
#include <linux/pci.h>
#include <linux/firmware.h>
#include <linux/vermagic.h>
+#include <net/devlink.h>
#include "nfpcore/nfp.h"
#include "nfpcore/nfp_cpp.h"
@@ -71,20 +73,22 @@
};
MODULE_DEVICE_TABLE(pci, nfp_pci_device_ids);
-static void nfp_pcie_sriov_read_nfd_limit(struct nfp_pf *pf)
+static int nfp_pcie_sriov_read_nfd_limit(struct nfp_pf *pf)
{
-#ifdef CONFIG_PCI_IOV
int err;
- pf->limit_vfs = nfp_rtsym_read_le(pf->cpp, "nfd_vf_cfg_max_vfs", &err);
+ pf->limit_vfs = nfp_rtsym_read_le(pf->rtbl, "nfd_vf_cfg_max_vfs", &err);
if (!err)
- return;
+ return pci_sriov_set_totalvfs(pf->pdev, pf->limit_vfs);
pf->limit_vfs = ~0;
+ pci_sriov_set_totalvfs(pf->pdev, 0); /* 0 is unset */
/* Allow any setting for backwards compatibility if symbol not found */
- if (err != -ENOENT)
- nfp_warn(pf->cpp, "Warning: VF limit read failed: %d\n", err);
-#endif
+ if (err == -ENOENT)
+ return 0;
+
+ nfp_warn(pf->cpp, "Warning: VF limit read failed: %d\n", err);
+ return err;
}
static int nfp_pcie_sriov_enable(struct pci_dev *pdev, int num_vfs)
@@ -166,7 +170,7 @@
return NULL;
}
- fw_model = nfp_hwinfo_lookup(pf->cpp, "assembly.partno");
+ fw_model = nfp_hwinfo_lookup(pf->hwinfo, "assembly.partno");
if (!fw_model) {
dev_err(&pdev->dev, "Error: can't read part number\n");
return NULL;
@@ -253,7 +257,6 @@
static int nfp_nsp_init(struct pci_dev *pdev, struct nfp_pf *pf)
{
- struct nfp_nsp_identify *nspi;
struct nfp_nsp *nsp;
int err;
@@ -270,14 +273,13 @@
pf->eth_tbl = __nfp_eth_read_ports(pf->cpp, nsp);
- nspi = __nfp_nsp_identify(nsp);
- if (nspi) {
- dev_info(&pdev->dev, "BSP: %s\n", nspi->version);
- kfree(nspi);
- }
+ pf->nspi = __nfp_nsp_identify(nsp);
+ if (pf->nspi)
+ dev_info(&pdev->dev, "BSP: %s\n", pf->nspi->version);
err = nfp_fw_load(pdev, pf, nsp);
if (err < 0) {
+ kfree(pf->nspi);
kfree(pf->eth_tbl);
dev_err(&pdev->dev, "Failed to load FW\n");
goto exit_close_nsp;
@@ -315,6 +317,7 @@
static int nfp_pci_probe(struct pci_dev *pdev,
const struct pci_device_id *pci_id)
{
+ struct devlink *devlink;
struct nfp_pf *pf;
int err;
@@ -335,12 +338,15 @@
goto err_pci_disable;
}
- pf = kzalloc(sizeof(*pf), GFP_KERNEL);
- if (!pf) {
+ devlink = devlink_alloc(&nfp_devlink_ops, sizeof(*pf));
+ if (!devlink) {
err = -ENOMEM;
goto err_rel_regions;
}
+ pf = devlink_priv(devlink);
+ INIT_LIST_HEAD(&pf->vnics);
INIT_LIST_HEAD(&pf->ports);
+ mutex_init(&pf->lock);
pci_set_drvdata(pdev, pf);
pf->pdev = pdev;
@@ -352,34 +358,62 @@
goto err_disable_msix;
}
+ pf->hwinfo = nfp_hwinfo_read(pf->cpp);
+
dev_info(&pdev->dev, "Assembly: %s%s%s-%s CPLD: %s\n",
- nfp_hwinfo_lookup(pf->cpp, "assembly.vendor"),
- nfp_hwinfo_lookup(pf->cpp, "assembly.partno"),
- nfp_hwinfo_lookup(pf->cpp, "assembly.serial"),
- nfp_hwinfo_lookup(pf->cpp, "assembly.revision"),
- nfp_hwinfo_lookup(pf->cpp, "cpld.version"));
+ nfp_hwinfo_lookup(pf->hwinfo, "assembly.vendor"),
+ nfp_hwinfo_lookup(pf->hwinfo, "assembly.partno"),
+ nfp_hwinfo_lookup(pf->hwinfo, "assembly.serial"),
+ nfp_hwinfo_lookup(pf->hwinfo, "assembly.revision"),
+ nfp_hwinfo_lookup(pf->hwinfo, "cpld.version"));
+
+ err = devlink_register(devlink, &pdev->dev);
+ if (err)
+ goto err_hwinfo_free;
err = nfp_nsp_init(pdev, pf);
if (err)
- goto err_cpp_free;
+ goto err_devlink_unreg;
- nfp_pcie_sriov_read_nfd_limit(pf);
+ pf->mip = nfp_mip_open(pf->cpp);
+ pf->rtbl = __nfp_rtsym_table_read(pf->cpp, pf->mip);
- err = nfp_net_pci_probe(pf);
+ err = nfp_pcie_sriov_read_nfd_limit(pf);
if (err)
goto err_fw_unload;
+ err = nfp_net_pci_probe(pf);
+ if (err)
+ goto err_sriov_unlimit;
+
+ err = nfp_hwmon_register(pf);
+ if (err) {
+ dev_err(&pdev->dev, "Failed to register hwmon info\n");
+ goto err_net_remove;
+ }
+
return 0;
+err_net_remove:
+ nfp_net_pci_remove(pf);
+err_sriov_unlimit:
+ pci_sriov_set_totalvfs(pf->pdev, 0);
err_fw_unload:
+ kfree(pf->rtbl);
+ nfp_mip_close(pf->mip);
if (pf->fw_loaded)
nfp_fw_unload(pf);
kfree(pf->eth_tbl);
-err_cpp_free:
+ kfree(pf->nspi);
+err_devlink_unreg:
+ devlink_unregister(devlink);
+err_hwinfo_free:
+ kfree(pf->hwinfo);
nfp_cpp_free(pf->cpp);
err_disable_msix:
pci_set_drvdata(pdev, NULL);
- kfree(pf);
+ mutex_destroy(&pf->lock);
+ devlink_free(devlink);
err_rel_regions:
pci_release_regions(pdev);
err_pci_disable:
@@ -391,19 +425,32 @@
static void nfp_pci_remove(struct pci_dev *pdev)
{
struct nfp_pf *pf = pci_get_drvdata(pdev);
+ struct devlink *devlink;
+
+ nfp_hwmon_unregister(pf);
+
+ devlink = priv_to_devlink(pf);
nfp_net_pci_remove(pf);
nfp_pcie_sriov_disable(pdev);
+ pci_sriov_set_totalvfs(pf->pdev, 0);
+ devlink_unregister(devlink);
+
+ kfree(pf->rtbl);
+ nfp_mip_close(pf->mip);
if (pf->fw_loaded)
nfp_fw_unload(pf);
pci_set_drvdata(pdev, NULL);
+ kfree(pf->hwinfo);
nfp_cpp_free(pf->cpp);
kfree(pf->eth_tbl);
- kfree(pf);
+ kfree(pf->nspi);
+ mutex_destroy(&pf->lock);
+ devlink_free(devlink);
pci_release_regions(pdev);
pci_disable_device(pdev);
}
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_main.h b/drivers/net/ethernet/netronome/nfp/nfp_main.h
index b57de04..88724f8 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_main.h
+++ b/drivers/net/ethernet/netronome/nfp/nfp_main.h
@@ -47,39 +47,56 @@
#include <linux/workqueue.h>
struct dentry;
+struct device;
+struct devlink_ops;
struct pci_dev;
struct nfp_cpp;
struct nfp_cpp_area;
struct nfp_eth_table;
+struct nfp_hwinfo;
+struct nfp_mip;
+struct nfp_net;
+struct nfp_nsp_identify;
+struct nfp_rtsym_table;
/**
* struct nfp_pf - NFP PF-specific device structure
* @pdev: Backpointer to PCI device
* @cpp: Pointer to the CPP handle
- * @ctrl_area: Pointer to the CPP area for the control BAR
- * @tx_area: Pointer to the CPP area for the TX queues
- * @rx_area: Pointer to the CPP area for the FL/RX queues
- * @irq_entries: Array of MSI-X entries for all ports
+ * @app: Pointer to the APP handle
+ * @data_vnic_bar: Pointer to the CPP area for the data vNICs' BARs
+ * @ctrl_vnic_bar: Pointer to the CPP area for the ctrl vNIC's BAR
+ * @qc_area: Pointer to the CPP area for the queues
+ * @irq_entries: Array of MSI-X entries for all vNICs
* @limit_vfs: Number of VFs supported by firmware (~0 for PCI limit)
* @num_vfs: Number of SR-IOV VFs enabled
* @fw_loaded: Is the firmware loaded?
+ * @ctrl_vnic: Pointer to the control vNIC if available
+ * @mip: MIP handle
+ * @rtbl: RTsym table
+ * @hwinfo: HWInfo table
* @eth_tbl: NSP ETH table
+ * @nspi: NSP identification info
+ * @hwmon_dev: pointer to hwmon device
* @ddir: Per-device debugfs directory
- * @num_ports: Number of adapter ports app firmware supports
- * @num_netdevs: Number of netdevs spawned
- * @ports: Linked list of port structures (struct nfp_net)
- * @port_lock: Protects @ports, @num_ports, @num_netdevs
+ * @max_data_vnics: Number of data vNICs app firmware supports
+ * @num_vnics: Number of vNICs spawned
+ * @vnics: Linked list of vNIC structures (struct nfp_net)
+ * @ports: Linked list of port structures (struct nfp_port)
* @port_refresh_work: Work entry for taking netdevs out
+ * @lock: Protects all fields which may change after probe
*/
struct nfp_pf {
struct pci_dev *pdev;
struct nfp_cpp *cpp;
- struct nfp_cpp_area *ctrl_area;
- struct nfp_cpp_area *tx_area;
- struct nfp_cpp_area *rx_area;
+ struct nfp_app *app;
+
+ struct nfp_cpp_area *data_vnic_bar;
+ struct nfp_cpp_area *ctrl_vnic_bar;
+ struct nfp_cpp_area *qc_area;
struct msix_entry *irq_entries;
@@ -88,21 +105,42 @@
bool fw_loaded;
+ struct nfp_net *ctrl_vnic;
+
+ const struct nfp_mip *mip;
+ struct nfp_rtsym_table *rtbl;
+ struct nfp_hwinfo *hwinfo;
struct nfp_eth_table *eth_tbl;
+ struct nfp_nsp_identify *nspi;
+
+ struct device *hwmon_dev;
struct dentry *ddir;
- unsigned int num_ports;
- unsigned int num_netdevs;
+ unsigned int max_data_vnics;
+ unsigned int num_vnics;
+ struct list_head vnics;
struct list_head ports;
struct work_struct port_refresh_work;
- struct mutex port_lock;
+ struct mutex lock;
};
extern struct pci_driver nfp_netvf_pci_driver;
+extern const struct devlink_ops nfp_devlink_ops;
+
int nfp_net_pci_probe(struct nfp_pf *pf);
void nfp_net_pci_remove(struct nfp_pf *pf);
+int nfp_hwmon_register(struct nfp_pf *pf);
+void nfp_hwmon_unregister(struct nfp_pf *pf);
+
+struct nfp_eth_table_port *
+nfp_net_find_port(struct nfp_eth_table *eth_tbl, unsigned int id);
+void
+nfp_net_get_mac_addr(struct nfp_pf *pf, struct nfp_net *nn, unsigned int id);
+
+bool nfp_ctrl_tx(struct nfp_net *nn, struct sk_buff *skb);
+
#endif /* NFP_MAIN_H */
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net.h b/drivers/net/ethernet/netronome/nfp/nfp_net.h
index fcf81b3..02fd8d4 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net.h
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net.h
@@ -50,15 +50,32 @@
#include "nfp_net_ctrl.h"
-#define nn_err(nn, fmt, args...) netdev_err((nn)->dp.netdev, fmt, ## args)
-#define nn_warn(nn, fmt, args...) netdev_warn((nn)->dp.netdev, fmt, ## args)
-#define nn_info(nn, fmt, args...) netdev_info((nn)->dp.netdev, fmt, ## args)
-#define nn_dbg(nn, fmt, args...) netdev_dbg((nn)->dp.netdev, fmt, ## args)
+#define nn_pr(nn, lvl, fmt, args...) \
+ ({ \
+ struct nfp_net *__nn = (nn); \
+ \
+ if (__nn->dp.netdev) \
+ netdev_printk(lvl, __nn->dp.netdev, fmt, ## args); \
+ else \
+ dev_printk(lvl, __nn->dp.dev, "ctrl: " fmt, ## args); \
+ })
+
+#define nn_err(nn, fmt, args...) nn_pr(nn, KERN_ERR, fmt, ## args)
+#define nn_warn(nn, fmt, args...) nn_pr(nn, KERN_WARNING, fmt, ## args)
+#define nn_info(nn, fmt, args...) nn_pr(nn, KERN_INFO, fmt, ## args)
+#define nn_dbg(nn, fmt, args...) nn_pr(nn, KERN_DEBUG, fmt, ## args)
+
#define nn_dp_warn(dp, fmt, args...) \
- do { \
- if (unlikely(net_ratelimit())) \
- netdev_warn((dp)->netdev, fmt, ## args); \
- } while (0)
+ ({ \
+ struct nfp_net_dp *__dp = (dp); \
+ \
+ if (unlikely(net_ratelimit())) { \
+ if (__dp->netdev) \
+ netdev_warn(__dp->netdev, fmt, ## args); \
+ else \
+ dev_warn(__dp->dev, fmt, ## args); \
+ } \
+ })
/* Max time to wait for NFP to respond on updates (in seconds) */
#define NFP_NET_POLL_TIMEOUT 5
@@ -84,7 +101,7 @@
#define NFP_NET_NON_Q_VECTORS 2
#define NFP_NET_IRQ_LSC_IDX 0
#define NFP_NET_IRQ_EXN_IDX 1
-#define NFP_NET_MIN_PORT_IRQS (NFP_NET_NON_Q_VECTORS + 1)
+#define NFP_NET_MIN_VNIC_IRQS (NFP_NET_NON_Q_VECTORS + 1)
/* Queue/Ring definitions */
#define NFP_NET_MAX_TX_RINGS 64 /* Max. # of Tx rings per device */
@@ -102,6 +119,7 @@
#define NFP_NET_RX_DESCS_DEFAULT 4096 /* Default # of Rx descs per ring */
#define NFP_NET_FL_BATCH 16 /* Add freelist in this Batch size */
+#define NFP_NET_XDP_MAX_COMPLETE 2048 /* XDP bufs to reclaim in NAPI poll */
/* Offload definitions */
#define NFP_NET_N_VXLAN_PORTS (NFP_NET_CFG_VXLAN_SZ / sizeof(__be16))
@@ -115,6 +133,10 @@
struct nfp_eth_table_port;
struct nfp_net;
struct nfp_net_r_vector;
+struct nfp_port;
+
+/* Convenience macro for wrapping descriptor index on ring size */
+#define D_IDX(ring, idx) ((idx) & ((ring)->cnt - 1))
/* Convenience macro for writing dma address into RX/TX descriptors */
#define nfp_desc_set_dma_addr(desc, dma_addr) \
@@ -153,10 +175,15 @@
__le32 dma_addr_lo; /* Low 32bit of host buf addr */
__le16 mss; /* MSS to be used for LSO */
- u8 l4_offset; /* LSO, where the L4 data starts */
+ u8 lso_hdrlen; /* LSO, TCP payload offset */
u8 flags; /* TX Flags, see @PCIE_DESC_TX_* */
-
- __le16 vlan; /* VLAN tag to add if indicated */
+ union {
+ struct {
+ u8 l3_offset; /* L3 header offset */
+ u8 l4_offset; /* L4 header offset */
+ };
+ __le16 vlan; /* VLAN tag to add if indicated */
+ };
__le16 data_len; /* Length of frame + meta data */
} __packed;
__le32 vals[4];
@@ -287,9 +314,11 @@
#define NFP_NET_META_FIELD_MASK GENMASK(NFP_NET_META_FIELD_SIZE - 1, 0)
struct nfp_meta_parsed {
- u32 hash_type;
+ u8 hash_type;
+ u8 csum_type;
u32 hash;
u32 mark;
+ __wsum csum;
};
struct nfp_net_rx_hash {
@@ -316,8 +345,6 @@
* @idx: Ring index from Linux's perspective
* @fl_qcidx: Queue Controller Peripheral (QCP) queue index for the freelist
* @qcp_fl: Pointer to base of the QCP freelist queue
- * @wr_ptr_add: Accumulated number of buffers to add to QCP write pointer
- * (used for free list batching)
* @rxbufs: Array of transmitted FL/RX buffers
* @rxds: Virtual address of FL/RX ring in host memory
* @dma: DMA address of the FL/RX ring
@@ -331,7 +358,6 @@
u32 rd_p;
u32 idx;
- u32 wr_ptr_add;
int fl_qcidx;
u8 __iomem *qcp_fl;
@@ -379,7 +405,14 @@
*/
struct nfp_net_r_vector {
struct nfp_net *nfp_net;
- struct napi_struct napi;
+ union {
+ struct napi_struct napi;
+ struct {
+ struct tasklet_struct tasklet;
+ struct sk_buff_head queue;
+ struct spinlock lock;
+ };
+ };
struct nfp_net_tx_ring *tx_ring;
struct nfp_net_rx_ring *rx_ring;
@@ -508,11 +541,6 @@
* @rss_cfg: RSS configuration
* @rss_key: RSS secret key
* @rss_itbl: RSS indirection table
- * @rx_filter: Filter offload statistics - dropped packets/bytes
- * @rx_filter_prev: Filter offload statistics - values from previous update
- * @rx_filter_change: Jiffies when statistics last changed
- * @rx_filter_stats_timer: Timer for polling filter offload statistics
- * @rx_filter_lock: Lock protecting timer state changes (teardown)
* @max_r_vecs: Number of allocated interrupt vectors for RX/TX
* @max_tx_rings: Maximum number of TX rings supported by the Firmware
* @max_rx_rings: Maximum number of RX rings supported by the Firmware
@@ -531,7 +559,6 @@
* @reconfig_sync_present: Some thread is performing synchronous reconfig
* @reconfig_timer: Timer for async reading of reconfig results
* @link_up: Is the link up?
- * @link_changed: Has link state changes since last port refresh?
* @link_status_lock: Protects @link_* and ensures atomicity with BAR reading
* @rx_coalesce_usecs: RX interrupt moderation usecs delay parameter
* @rx_coalesce_max_frames: RX interrupt moderation frame count parameter
@@ -544,10 +571,11 @@
* @rx_bar: Pointer to mapped FL/RX queues
* @debugfs_dir: Device directory in debugfs
* @ethtool_dump_flag: Ethtool dump flag
- * @port_list: Entry on device port list
+ * @vnic_list: Entry on device vNIC list
* @pdev: Backpointer to PCI device
- * @cpp: CPP device handle if available
- * @eth_port: Translated ETH Table port entry
+ * @app: APP handle if available
+ * @port: Pointer to nfp_port structure if vNIC is a port
+ * @app_priv: APP private data for this vNIC
*/
struct nfp_net {
struct nfp_net_dp dp;
@@ -562,11 +590,6 @@
u8 rss_key[NFP_NET_CFG_RSS_KEY_SZ];
u8 rss_itbl[NFP_NET_CFG_RSS_ITBL_SZ];
- struct nfp_stat_pair rx_filter, rx_filter_prev;
- unsigned long rx_filter_change;
- struct timer_list rx_filter_stats_timer;
- spinlock_t rx_filter_lock;
-
unsigned int max_tx_rings;
unsigned int max_rx_rings;
@@ -589,7 +612,6 @@
u32 me_freq_mhz;
bool link_up;
- bool link_changed;
spinlock_t link_status_lock;
spinlock_t reconfig_lock;
@@ -614,12 +636,14 @@
struct dentry *debugfs_dir;
u32 ethtool_dump_flag;
- struct list_head port_list;
+ struct list_head vnic_list;
struct pci_dev *pdev;
- struct nfp_cpp *cpp;
+ struct nfp_app *app;
- struct nfp_eth_table_port *eth_port;
+ struct nfp_port *port;
+
+ void *app_priv;
};
/* Functions to read/write from/to a BAR
@@ -681,6 +705,7 @@
* either add to a pointer or to read the pointer value.
*/
#define NFP_QCP_QUEUE_ADDR_SZ 0x800
+#define NFP_QCP_QUEUE_AREA_SZ 0x80000
#define NFP_QCP_QUEUE_OFF(_x) ((_x) * NFP_QCP_QUEUE_ADDR_SZ)
#define NFP_QCP_QUEUE_ADD_RPTR 0x0000
#define NFP_QCP_QUEUE_ADD_WPTR 0x0004
@@ -788,19 +813,47 @@
return _nfp_qcp_read(q, NFP_QCP_WRITE_PTR);
}
+static inline bool nfp_net_is_data_vnic(struct nfp_net *nn)
+{
+ WARN_ON_ONCE(!nn->dp.netdev && nn->port);
+ return !!nn->dp.netdev;
+}
+
+static inline bool nfp_net_running(struct nfp_net *nn)
+{
+ return nn->dp.ctrl & NFP_NET_CFG_CTRL_ENABLE;
+}
+
+static inline const char *nfp_net_name(struct nfp_net *nn)
+{
+ return nn->dp.netdev ? nn->dp.netdev->name : "ctrl";
+}
+
/* Globals */
extern const char nfp_driver_version[];
+extern const struct net_device_ops nfp_net_netdev_ops;
+
+static inline bool nfp_netdev_is_nfp_net(struct net_device *netdev)
+{
+ return netdev->netdev_ops == &nfp_net_netdev_ops;
+}
+
/* Prototypes */
void nfp_net_get_fw_version(struct nfp_net_fw_version *fw_ver,
void __iomem *ctrl_bar);
struct nfp_net *
-nfp_net_netdev_alloc(struct pci_dev *pdev,
- unsigned int max_tx_rings, unsigned int max_rx_rings);
-void nfp_net_netdev_free(struct nfp_net *nn);
-int nfp_net_netdev_init(struct net_device *netdev);
-void nfp_net_netdev_clean(struct net_device *netdev);
+nfp_net_alloc(struct pci_dev *pdev, bool needs_netdev,
+ unsigned int max_tx_rings, unsigned int max_rx_rings);
+void nfp_net_free(struct nfp_net *nn);
+
+int nfp_net_init(struct nfp_net *nn);
+void nfp_net_clean(struct nfp_net *nn);
+
+int nfp_ctrl_open(struct nfp_net *nn);
+void nfp_ctrl_close(struct nfp_net *nn);
+
void nfp_net_set_ethtool_ops(struct net_device *netdev);
void nfp_net_info(struct nfp_net *nn);
int nfp_net_reconfig(struct nfp_net *nn, u32 update);
@@ -821,15 +874,11 @@
int nfp_net_ring_reconfig(struct nfp_net *nn, struct nfp_net_dp *new,
struct netlink_ext_ack *extack);
-bool nfp_net_link_changed_read_clear(struct nfp_net *nn);
-int nfp_net_refresh_eth_port(struct nfp_net *nn);
-void nfp_net_refresh_port_table(struct nfp_net *nn);
-
#ifdef CONFIG_NFP_DEBUG
void nfp_net_debugfs_create(void);
void nfp_net_debugfs_destroy(void);
struct dentry *nfp_net_debugfs_device_add(struct pci_dev *pdev);
-void nfp_net_debugfs_port_add(struct nfp_net *nn, struct dentry *ddir, int id);
+void nfp_net_debugfs_vnic_add(struct nfp_net *nn, struct dentry *ddir, int id);
void nfp_net_debugfs_dir_clean(struct dentry **dir);
#else
static inline void nfp_net_debugfs_create(void)
@@ -846,7 +895,7 @@
}
static inline void
-nfp_net_debugfs_port_add(struct nfp_net *nn, struct dentry *ddir, int id)
+nfp_net_debugfs_vnic_add(struct nfp_net *nn, struct dentry *ddir, int id)
{
}
@@ -855,7 +904,4 @@
}
#endif /* CONFIG_NFP_DEBUG */
-void nfp_net_filter_stats_timer(unsigned long data);
-int nfp_net_bpf_offload(struct nfp_net *nn, struct tc_cls_bpf_offload *cls_bpf);
-
#endif /* _NFP_NET_H_ */
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
index 82bd6b0..2bdddd1 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
@@ -61,15 +61,16 @@
#include <linux/log2.h>
#include <linux/if_vlan.h>
#include <linux/random.h>
-
+#include <linux/vmalloc.h>
#include <linux/ktime.h>
-#include <net/pkt_cls.h>
#include <net/vxlan.h>
#include "nfpcore/nfp_nsp.h"
+#include "nfp_app.h"
#include "nfp_net_ctrl.h"
#include "nfp_net.h"
+#include "nfp_port.h"
/**
* nfp_net_get_fw_version() - Read and parse the FW version
@@ -279,6 +280,30 @@
return ret;
}
+/**
+ * nfp_net_reconfig_mbox() - Reconfigure the firmware via the mailbox
+ * @nn: NFP Net device to reconfigure
+ * @mbox_cmd: The value for the mailbox command
+ *
+ * Helper function for mailbox updates
+ *
+ * Return: Negative errno on error, 0 on success
+ */
+static int nfp_net_reconfig_mbox(struct nfp_net *nn, u32 mbox_cmd)
+{
+ int ret;
+
+ nn_writeq(nn, NFP_NET_CFG_MBOX_CMD, mbox_cmd);
+
+ ret = nfp_net_reconfig(nn, NFP_NET_CFG_UPDATE_MBOX);
+ if (ret) {
+ nn_err(nn, "Mailbox update error\n");
+ return ret;
+ }
+
+ return -nn_readl(nn, NFP_NET_CFG_MBOX_RET);
+}
+
/* Interrupt configuration and handling
*/
@@ -391,17 +416,13 @@
return IRQ_HANDLED;
}
-bool nfp_net_link_changed_read_clear(struct nfp_net *nn)
+static irqreturn_t nfp_ctrl_irq_rxtx(int irq, void *data)
{
- unsigned long flags;
- bool ret;
+ struct nfp_net_r_vector *r_vec = data;
- spin_lock_irqsave(&nn->link_status_lock, flags);
- ret = nn->link_changed;
- nn->link_changed = false;
- spin_unlock_irqrestore(&nn->link_status_lock, flags);
+ tasklet_schedule(&r_vec->tasklet);
- return ret;
+ return IRQ_HANDLED;
}
/**
@@ -423,7 +444,8 @@
goto out;
nn->link_up = link_up;
- nn->link_changed = true;
+ if (nn->port)
+ set_bit(NFP_PORT_CHANGED, &nn->port->flags);
if (nn->link_up) {
netif_carrier_on(nn->dp.netdev);
@@ -515,34 +537,6 @@
}
/**
- * nfp_net_vecs_init() - Assign IRQs and setup rvecs.
- * @netdev: netdev structure
- */
-static void nfp_net_vecs_init(struct net_device *netdev)
-{
- struct nfp_net *nn = netdev_priv(netdev);
- struct nfp_net_r_vector *r_vec;
- int r;
-
- nn->lsc_handler = nfp_net_irq_lsc;
- nn->exn_handler = nfp_net_irq_exn;
-
- for (r = 0; r < nn->max_r_vecs; r++) {
- struct msix_entry *entry;
-
- entry = &nn->irq_entries[NFP_NET_NON_Q_VECTORS + r];
-
- r_vec = &nn->r_vecs[r];
- r_vec->nfp_net = nn;
- r_vec->handler = nfp_net_irq_rxtx;
- r_vec->irq_entry = entry->entry;
- r_vec->irq_vector = entry->vector;
-
- cpumask_set_cpu(r, &r_vec->affinity_mask);
- }
-}
-
-/**
* nfp_net_aux_irq_request() - Request an auxiliary interrupt (LSC or EXN)
* @nn: NFP Network structure
* @ctrl_offset: Control BAR offset where IRQ configuration should be written
@@ -562,7 +556,7 @@
entry = &nn->irq_entries[vector_idx];
- snprintf(name, name_sz, format, netdev_name(nn->dp.netdev));
+ snprintf(name, name_sz, format, nfp_net_name(nn));
err = request_irq(entry->vector, handler, 0, name, nn);
if (err) {
nn_err(nn, "Failed to request IRQ %d (err=%d).\n",
@@ -667,17 +661,22 @@
if (!skb_is_gso(skb))
return;
- if (!skb->encapsulation)
+ if (!skb->encapsulation) {
+ txd->l3_offset = skb_network_offset(skb);
+ txd->l4_offset = skb_transport_offset(skb);
hdrlen = skb_transport_offset(skb) + tcp_hdrlen(skb);
- else
+ } else {
+ txd->l3_offset = skb_inner_network_offset(skb);
+ txd->l4_offset = skb_inner_transport_offset(skb);
hdrlen = skb_inner_transport_header(skb) - skb->data +
inner_tcp_hdrlen(skb);
+ }
txbuf->pkt_cnt = skb_shinfo(skb)->gso_segs;
txbuf->real_len += hdrlen * (txbuf->pkt_cnt - 1);
mss = skb_shinfo(skb)->gso_size & PCIE_DESC_TX_MSS_MASK;
- txd->l4_offset = hdrlen;
+ txd->lso_hdrlen = hdrlen;
txd->mss = cpu_to_le16(mss);
txd->flags |= PCIE_DESC_TX_LSO;
@@ -804,7 +803,7 @@
if (dma_mapping_error(dp->dev, dma_addr))
goto err_free;
- wr_idx = tx_ring->wr_p & (tx_ring->cnt - 1);
+ wr_idx = D_IDX(tx_ring, tx_ring->wr_p);
/* Stash the soft descriptor of the head then initialize it */
txbuf = &tx_ring->txbufs[wr_idx];
@@ -823,12 +822,11 @@
txd->flags = 0;
txd->mss = 0;
- txd->l4_offset = 0;
+ txd->lso_hdrlen = 0;
+ /* Do not reorder - tso may adjust pkt cnt, vlan may override fields */
nfp_net_tx_tso(r_vec, txbuf, txd, skb);
-
nfp_net_tx_csum(dp, r_vec, txbuf, txd, skb);
-
if (skb_vlan_tag_present(skb) && dp->ctrl & NFP_NET_CFG_CTRL_TXVLAN) {
txd->flags |= PCIE_DESC_TX_VLAN;
txd->vlan = cpu_to_le16(skb_vlan_tag_get(skb));
@@ -848,7 +846,7 @@
if (dma_mapping_error(dp->dev, dma_addr))
goto err_unmap;
- wr_idx = (wr_idx + 1) & (tx_ring->cnt - 1);
+ wr_idx = D_IDX(tx_ring, wr_idx + 1);
tx_ring->txbufs[wr_idx].skb = skb;
tx_ring->txbufs[wr_idx].dma_addr = dma_addr;
tx_ring->txbufs[wr_idx].fidx = f;
@@ -936,14 +934,10 @@
if (qcp_rd_p == tx_ring->qcp_rd_p)
return;
- if (qcp_rd_p > tx_ring->qcp_rd_p)
- todo = qcp_rd_p - tx_ring->qcp_rd_p;
- else
- todo = qcp_rd_p + tx_ring->cnt - tx_ring->qcp_rd_p;
+ todo = D_IDX(tx_ring, qcp_rd_p - tx_ring->qcp_rd_p);
while (todo--) {
- idx = tx_ring->rd_p & (tx_ring->cnt - 1);
- tx_ring->rd_p++;
+ idx = D_IDX(tx_ring, tx_ring->rd_p++);
skb = tx_ring->txbufs[idx].skb;
if (!skb)
@@ -982,6 +976,9 @@
r_vec->tx_pkts += done_pkts;
u64_stats_update_end(&r_vec->tx_sync);
+ if (!dp->netdev)
+ return;
+
nd_q = netdev_get_tx_queue(dp->netdev, tx_ring->idx);
netdev_tx_completed_queue(nd_q, done_pkts, done_bytes);
if (nfp_net_tx_ring_should_wake(tx_ring)) {
@@ -997,45 +994,45 @@
tx_ring->rd_p, tx_ring->wr_p, tx_ring->cnt);
}
-static void nfp_net_xdp_complete(struct nfp_net_tx_ring *tx_ring)
+static bool nfp_net_xdp_complete(struct nfp_net_tx_ring *tx_ring)
{
struct nfp_net_r_vector *r_vec = tx_ring->r_vec;
u32 done_pkts = 0, done_bytes = 0;
+ bool done_all;
int idx, todo;
u32 qcp_rd_p;
- if (tx_ring->wr_p == tx_ring->rd_p)
- return;
-
/* Work out how many descriptors have been transmitted */
qcp_rd_p = nfp_qcp_rd_ptr_read(tx_ring->qcp_q);
if (qcp_rd_p == tx_ring->qcp_rd_p)
- return;
+ return true;
- if (qcp_rd_p > tx_ring->qcp_rd_p)
- todo = qcp_rd_p - tx_ring->qcp_rd_p;
- else
- todo = qcp_rd_p + tx_ring->cnt - tx_ring->qcp_rd_p;
+ todo = D_IDX(tx_ring, qcp_rd_p - tx_ring->qcp_rd_p);
+
+ done_all = todo <= NFP_NET_XDP_MAX_COMPLETE;
+ todo = min(todo, NFP_NET_XDP_MAX_COMPLETE);
+
+ tx_ring->qcp_rd_p = D_IDX(tx_ring, tx_ring->qcp_rd_p + todo);
done_pkts = todo;
while (todo--) {
- idx = tx_ring->rd_p & (tx_ring->cnt - 1);
+ idx = D_IDX(tx_ring, tx_ring->rd_p);
tx_ring->rd_p++;
done_bytes += tx_ring->txbufs[idx].real_len;
}
- tx_ring->qcp_rd_p = qcp_rd_p;
-
u64_stats_update_begin(&r_vec->tx_sync);
r_vec->tx_bytes += done_bytes;
r_vec->tx_pkts += done_pkts;
u64_stats_update_end(&r_vec->tx_sync);
WARN_ONCE(tx_ring->wr_p - tx_ring->rd_p > tx_ring->cnt,
- "TX ring corruption rd_p=%u wr_p=%u cnt=%u\n",
+ "XDP TX ring corruption rd_p=%u wr_p=%u cnt=%u\n",
tx_ring->rd_p, tx_ring->wr_p, tx_ring->cnt);
+
+ return done_all;
}
/**
@@ -1056,7 +1053,7 @@
struct sk_buff *skb;
int idx, nr_frags;
- idx = tx_ring->rd_p & (tx_ring->cnt - 1);
+ idx = D_IDX(tx_ring, tx_ring->rd_p);
tx_buf = &tx_ring->txbufs[idx];
skb = tx_ring->txbufs[idx].skb;
@@ -1091,7 +1088,7 @@
tx_ring->qcp_rd_p = 0;
tx_ring->wr_ptr_add = 0;
- if (tx_ring->is_xdp)
+ if (tx_ring->is_xdp || !dp->netdev)
return;
nd_q = netdev_get_tx_queue(dp->netdev, tx_ring->idx);
@@ -1209,7 +1206,7 @@
{
unsigned int wr_idx;
- wr_idx = rx_ring->wr_p & (rx_ring->cnt - 1);
+ wr_idx = D_IDX(rx_ring, rx_ring->wr_p);
nfp_net_dma_sync_dev_rx(dp, dma_addr);
@@ -1224,14 +1221,12 @@
dma_addr + dp->rx_dma_off);
rx_ring->wr_p++;
- rx_ring->wr_ptr_add++;
- if (rx_ring->wr_ptr_add >= NFP_NET_FL_BATCH) {
+ if (!(rx_ring->wr_p % NFP_NET_FL_BATCH)) {
/* Update write pointer of the freelist queue. Make
* sure all writes are flushed before telling the hardware.
*/
wmb();
- nfp_qcp_wr_ptr_add(rx_ring->qcp_fl, rx_ring->wr_ptr_add);
- rx_ring->wr_ptr_add = 0;
+ nfp_qcp_wr_ptr_add(rx_ring->qcp_fl, NFP_NET_FL_BATCH);
}
}
@@ -1247,7 +1242,7 @@
unsigned int wr_idx, last_idx;
/* Move the empty entry to the end of the list */
- wr_idx = rx_ring->wr_p & (rx_ring->cnt - 1);
+ wr_idx = D_IDX(rx_ring, rx_ring->wr_p);
last_idx = rx_ring->cnt - 1;
rx_ring->rxbufs[wr_idx].dma_addr = rx_ring->rxbufs[last_idx].dma_addr;
rx_ring->rxbufs[wr_idx].frag = rx_ring->rxbufs[last_idx].frag;
@@ -1257,7 +1252,6 @@
memset(rx_ring->rxds, 0, sizeof(*rx_ring->rxds) * rx_ring->cnt);
rx_ring->wr_p = 0;
rx_ring->rd_p = 0;
- rx_ring->wr_ptr_add = 0;
}
/**
@@ -1350,17 +1344,28 @@
* @dp: NFP Net data path struct
* @r_vec: per-ring structure
* @rxd: Pointer to RX descriptor
+ * @meta: Parsed metadata prepend
* @skb: Pointer to SKB
*/
static void nfp_net_rx_csum(struct nfp_net_dp *dp,
struct nfp_net_r_vector *r_vec,
- struct nfp_net_rx_desc *rxd, struct sk_buff *skb)
+ struct nfp_net_rx_desc *rxd,
+ struct nfp_meta_parsed *meta, struct sk_buff *skb)
{
skb_checksum_none_assert(skb);
if (!(dp->netdev->features & NETIF_F_RXCSUM))
return;
+ if (meta->csum_type) {
+ skb->ip_summed = meta->csum_type;
+ skb->csum = meta->csum;
+ u64_stats_update_begin(&r_vec->rx_sync);
+ r_vec->hw_csum_rx_ok++;
+ u64_stats_update_end(&r_vec->rx_sync);
+ return;
+ }
+
if (nfp_net_rx_csum_has_errors(le16_to_cpu(rxd->rxd.flags))) {
u64_stats_update_begin(&r_vec->rx_sync);
r_vec->hw_csum_rx_error++;
@@ -1445,6 +1450,12 @@
meta->mark = get_unaligned_be32(data);
data += 4;
break;
+ case NFP_NET_META_CSUM:
+ meta->csum_type = CHECKSUM_COMPLETE;
+ meta->csum =
+ (__force __wsum)__get_unaligned_cpu32(data);
+ data += 4;
+ break;
default:
return NULL;
}
@@ -1479,18 +1490,26 @@
nfp_net_tx_xdp_buf(struct nfp_net_dp *dp, struct nfp_net_rx_ring *rx_ring,
struct nfp_net_tx_ring *tx_ring,
struct nfp_net_rx_buf *rxbuf, unsigned int dma_off,
- unsigned int pkt_len)
+ unsigned int pkt_len, bool *completed)
{
struct nfp_net_tx_buf *txbuf;
struct nfp_net_tx_desc *txd;
int wr_idx;
if (unlikely(nfp_net_tx_full(tx_ring, 1))) {
- nfp_net_rx_drop(dp, rx_ring->r_vec, rx_ring, rxbuf, NULL);
- return false;
+ if (!*completed) {
+ nfp_net_xdp_complete(tx_ring);
+ *completed = true;
+ }
+
+ if (unlikely(nfp_net_tx_full(tx_ring, 1))) {
+ nfp_net_rx_drop(dp, rx_ring->r_vec, rx_ring, rxbuf,
+ NULL);
+ return false;
+ }
}
- wr_idx = tx_ring->wr_p & (tx_ring->cnt - 1);
+ wr_idx = D_IDX(tx_ring, tx_ring->wr_p);
/* Stash the soft descriptor of the head then initialize it */
txbuf = &tx_ring->txbufs[wr_idx];
@@ -1515,7 +1534,7 @@
txd->flags = 0;
txd->mss = 0;
- txd->l4_offset = 0;
+ txd->lso_hdrlen = 0;
tx_ring->wr_p++;
tx_ring->wr_ptr_add++;
@@ -1559,6 +1578,7 @@
struct nfp_net_dp *dp = &r_vec->nfp_net->dp;
struct nfp_net_tx_ring *tx_ring;
struct bpf_prog *xdp_prog;
+ bool xdp_tx_cmpl = false;
unsigned int true_bufsz;
struct sk_buff *skb;
int pkts_polled = 0;
@@ -1577,7 +1597,7 @@
dma_addr_t new_dma_addr;
void *new_frag;
- idx = rx_ring->rd_p & (rx_ring->cnt - 1);
+ idx = D_IDX(rx_ring, rx_ring->rd_p);
rxd = &rx_ring->rxds[idx];
if (!(rxd->rxd.meta_len_dd & PCIE_DESC_RX_DD))
@@ -1669,14 +1689,17 @@
if (unlikely(!nfp_net_tx_xdp_buf(dp, rx_ring,
tx_ring, rxbuf,
dma_off,
- pkt_len)))
+ pkt_len,
+ &xdp_tx_cmpl)))
trace_xdp_exception(dp->netdev,
xdp_prog, act);
continue;
default:
bpf_warn_invalid_xdp_action(act);
+ /* fall through */
case XDP_ABORTED:
trace_xdp_exception(dp->netdev, xdp_prog, act);
+ /* fall through */
case XDP_DROP:
nfp_net_rx_give_one(dp, rx_ring, rxbuf->frag,
rxbuf->dma_addr);
@@ -1708,7 +1731,7 @@
skb_record_rx_queue(skb, rx_ring->idx);
skb->protocol = eth_type_trans(skb, dp->netdev);
- nfp_net_rx_csum(dp, r_vec, rxd, skb);
+ nfp_net_rx_csum(dp, r_vec, rxd, &meta, skb);
if (rxd->rxd.flags & PCIE_DESC_RX_VLAN)
__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
@@ -1717,8 +1740,14 @@
napi_gro_receive(&rx_ring->r_vec->napi, skb);
}
- if (xdp_prog && tx_ring->wr_ptr_add)
- nfp_net_tx_xmit_more_flush(tx_ring);
+ if (xdp_prog) {
+ if (tx_ring->wr_ptr_add)
+ nfp_net_tx_xmit_more_flush(tx_ring);
+ else if (unlikely(tx_ring->wr_p != tx_ring->rd_p) &&
+ !xdp_tx_cmpl)
+ if (!nfp_net_xdp_complete(tx_ring))
+ pkts_polled = budget;
+ }
rcu_read_unlock();
return pkts_polled;
@@ -1739,11 +1768,8 @@
if (r_vec->tx_ring)
nfp_net_tx_complete(r_vec->tx_ring);
- if (r_vec->rx_ring) {
+ if (r_vec->rx_ring)
pkts_polled = nfp_net_rx(r_vec->rx_ring, budget);
- if (r_vec->xdp_ring)
- nfp_net_xdp_complete(r_vec->xdp_ring);
- }
if (pkts_polled < budget)
if (napi_complete_done(napi, pkts_polled))
@@ -1752,10 +1778,273 @@
return pkts_polled;
}
+/* Control device data path
+ */
+
+static bool
+nfp_ctrl_tx_one(struct nfp_net *nn, struct nfp_net_r_vector *r_vec,
+ struct sk_buff *skb, bool old)
+{
+ unsigned int real_len = skb->len, meta_len = 0;
+ struct nfp_net_tx_ring *tx_ring;
+ struct nfp_net_tx_buf *txbuf;
+ struct nfp_net_tx_desc *txd;
+ struct nfp_net_dp *dp;
+ dma_addr_t dma_addr;
+ int wr_idx;
+
+ dp = &r_vec->nfp_net->dp;
+ tx_ring = r_vec->tx_ring;
+
+ if (WARN_ON_ONCE(skb_shinfo(skb)->nr_frags)) {
+ nn_dp_warn(dp, "Driver's CTRL TX does not implement gather\n");
+ goto err_free;
+ }
+
+ if (unlikely(nfp_net_tx_full(tx_ring, 1))) {
+ u64_stats_update_begin(&r_vec->tx_sync);
+ r_vec->tx_busy++;
+ u64_stats_update_end(&r_vec->tx_sync);
+ if (!old)
+ __skb_queue_tail(&r_vec->queue, skb);
+ else
+ __skb_queue_head(&r_vec->queue, skb);
+ return true;
+ }
+
+ if (nfp_app_ctrl_has_meta(nn->app)) {
+ if (unlikely(skb_headroom(skb) < 8)) {
+ nn_dp_warn(dp, "CTRL TX on skb without headroom\n");
+ goto err_free;
+ }
+ meta_len = 8;
+ put_unaligned_be32(NFP_META_PORT_ID_CTRL, skb_push(skb, 4));
+ put_unaligned_be32(NFP_NET_META_PORTID, skb_push(skb, 4));
+ }
+
+ /* Start with the head skbuf */
+ dma_addr = dma_map_single(dp->dev, skb->data, skb_headlen(skb),
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(dp->dev, dma_addr))
+ goto err_dma_warn;
+
+ wr_idx = D_IDX(tx_ring, tx_ring->wr_p);
+
+ /* Stash the soft descriptor of the head then initialize it */
+ txbuf = &tx_ring->txbufs[wr_idx];
+ txbuf->skb = skb;
+ txbuf->dma_addr = dma_addr;
+ txbuf->fidx = -1;
+ txbuf->pkt_cnt = 1;
+ txbuf->real_len = real_len;
+
+ /* Build TX descriptor */
+ txd = &tx_ring->txds[wr_idx];
+ txd->offset_eop = meta_len | PCIE_DESC_TX_EOP;
+ txd->dma_len = cpu_to_le16(skb_headlen(skb));
+ nfp_desc_set_dma_addr(txd, dma_addr);
+ txd->data_len = cpu_to_le16(skb->len);
+
+ txd->flags = 0;
+ txd->mss = 0;
+ txd->lso_hdrlen = 0;
+
+ tx_ring->wr_p++;
+ tx_ring->wr_ptr_add++;
+ nfp_net_tx_xmit_more_flush(tx_ring);
+
+ return false;
+
+err_dma_warn:
+ nn_dp_warn(dp, "Failed to DMA map TX CTRL buffer\n");
+err_free:
+ u64_stats_update_begin(&r_vec->tx_sync);
+ r_vec->tx_errors++;
+ u64_stats_update_end(&r_vec->tx_sync);
+ dev_kfree_skb_any(skb);
+ return false;
+}
+
+bool nfp_ctrl_tx(struct nfp_net *nn, struct sk_buff *skb)
+{
+ struct nfp_net_r_vector *r_vec = &nn->r_vecs[0];
+ bool ret;
+
+ spin_lock_bh(&r_vec->lock);
+ ret = nfp_ctrl_tx_one(nn, r_vec, skb, false);
+ spin_unlock_bh(&r_vec->lock);
+
+ return ret;
+}
+
+static void __nfp_ctrl_tx_queued(struct nfp_net_r_vector *r_vec)
+{
+ struct sk_buff *skb;
+
+ while ((skb = __skb_dequeue(&r_vec->queue)))
+ if (nfp_ctrl_tx_one(r_vec->nfp_net, r_vec, skb, true))
+ return;
+}
+
+static bool
+nfp_ctrl_meta_ok(struct nfp_net *nn, void *data, unsigned int meta_len)
+{
+ u32 meta_type, meta_tag;
+
+ if (!nfp_app_ctrl_has_meta(nn->app))
+ return !meta_len;
+
+ if (meta_len != 8)
+ return false;
+
+ meta_type = get_unaligned_be32(data);
+ meta_tag = get_unaligned_be32(data + 4);
+
+ return (meta_type == NFP_NET_META_PORTID &&
+ meta_tag == NFP_META_PORT_ID_CTRL);
+}
+
+static bool
+nfp_ctrl_rx_one(struct nfp_net *nn, struct nfp_net_dp *dp,
+ struct nfp_net_r_vector *r_vec, struct nfp_net_rx_ring *rx_ring)
+{
+ unsigned int meta_len, data_len, meta_off, pkt_len, pkt_off;
+ struct nfp_net_rx_buf *rxbuf;
+ struct nfp_net_rx_desc *rxd;
+ dma_addr_t new_dma_addr;
+ struct sk_buff *skb;
+ void *new_frag;
+ int idx;
+
+ idx = D_IDX(rx_ring, rx_ring->rd_p);
+
+ rxd = &rx_ring->rxds[idx];
+ if (!(rxd->rxd.meta_len_dd & PCIE_DESC_RX_DD))
+ return false;
+
+ /* Memory barrier to ensure that we won't do other reads
+ * before the DD bit.
+ */
+ dma_rmb();
+
+ rx_ring->rd_p++;
+
+ rxbuf = &rx_ring->rxbufs[idx];
+ meta_len = rxd->rxd.meta_len_dd & PCIE_DESC_RX_META_LEN_MASK;
+ data_len = le16_to_cpu(rxd->rxd.data_len);
+ pkt_len = data_len - meta_len;
+
+ pkt_off = NFP_NET_RX_BUF_HEADROOM + dp->rx_dma_off;
+ if (dp->rx_offset == NFP_NET_CFG_RX_OFFSET_DYNAMIC)
+ pkt_off += meta_len;
+ else
+ pkt_off += dp->rx_offset;
+ meta_off = pkt_off - meta_len;
+
+ /* Stats update */
+ u64_stats_update_begin(&r_vec->rx_sync);
+ r_vec->rx_pkts++;
+ r_vec->rx_bytes += pkt_len;
+ u64_stats_update_end(&r_vec->rx_sync);
+
+ nfp_net_dma_sync_cpu_rx(dp, rxbuf->dma_addr + meta_off, data_len);
+
+ if (unlikely(!nfp_ctrl_meta_ok(nn, rxbuf->frag + meta_off, meta_len))) {
+ nn_dp_warn(dp, "incorrect metadata for ctrl packet (%d)\n",
+ meta_len);
+ nfp_net_rx_drop(dp, r_vec, rx_ring, rxbuf, NULL);
+ return true;
+ }
+
+ skb = build_skb(rxbuf->frag, dp->fl_bufsz);
+ if (unlikely(!skb)) {
+ nfp_net_rx_drop(dp, r_vec, rx_ring, rxbuf, NULL);
+ return true;
+ }
+ new_frag = nfp_net_napi_alloc_one(dp, &new_dma_addr);
+ if (unlikely(!new_frag)) {
+ nfp_net_rx_drop(dp, r_vec, rx_ring, rxbuf, skb);
+ return true;
+ }
+
+ nfp_net_dma_unmap_rx(dp, rxbuf->dma_addr);
+
+ nfp_net_rx_give_one(dp, rx_ring, new_frag, new_dma_addr);
+
+ skb_reserve(skb, pkt_off);
+ skb_put(skb, pkt_len);
+
+ nfp_app_ctrl_rx(nn->app, skb);
+
+ return true;
+}
+
+static void nfp_ctrl_rx(struct nfp_net_r_vector *r_vec)
+{
+ struct nfp_net_rx_ring *rx_ring = r_vec->rx_ring;
+ struct nfp_net *nn = r_vec->nfp_net;
+ struct nfp_net_dp *dp = &nn->dp;
+
+ while (nfp_ctrl_rx_one(nn, dp, r_vec, rx_ring))
+ continue;
+}
+
+static void nfp_ctrl_poll(unsigned long arg)
+{
+ struct nfp_net_r_vector *r_vec = (void *)arg;
+
+ spin_lock_bh(&r_vec->lock);
+ nfp_net_tx_complete(r_vec->tx_ring);
+ __nfp_ctrl_tx_queued(r_vec);
+ spin_unlock_bh(&r_vec->lock);
+
+ nfp_ctrl_rx(r_vec);
+
+ nfp_net_irq_unmask(r_vec->nfp_net, r_vec->irq_entry);
+}
+
/* Setup and Configuration
*/
/**
+ * nfp_net_vecs_init() - Assign IRQs and setup rvecs.
+ * @nn: NFP Network structure
+ */
+static void nfp_net_vecs_init(struct nfp_net *nn)
+{
+ struct nfp_net_r_vector *r_vec;
+ int r;
+
+ nn->lsc_handler = nfp_net_irq_lsc;
+ nn->exn_handler = nfp_net_irq_exn;
+
+ for (r = 0; r < nn->max_r_vecs; r++) {
+ struct msix_entry *entry;
+
+ entry = &nn->irq_entries[NFP_NET_NON_Q_VECTORS + r];
+
+ r_vec = &nn->r_vecs[r];
+ r_vec->nfp_net = nn;
+ r_vec->irq_entry = entry->entry;
+ r_vec->irq_vector = entry->vector;
+
+ if (nn->dp.netdev) {
+ r_vec->handler = nfp_net_irq_rxtx;
+ } else {
+ r_vec->handler = nfp_ctrl_irq_rxtx;
+
+ __skb_queue_head_init(&r_vec->queue);
+ spin_lock_init(&r_vec->lock);
+ tasklet_init(&r_vec->tasklet, nfp_ctrl_poll,
+ (unsigned long)r_vec);
+ tasklet_disable(&r_vec->tasklet);
+ }
+
+ cpumask_set_cpu(r, &r_vec->affinity_mask);
+ }
+}
+
+/**
* nfp_net_tx_ring_free() - Free resources allocated to a TX ring
* @tx_ring: TX ring to free
*/
@@ -1803,7 +2092,7 @@
if (!tx_ring->txbufs)
goto err_alloc;
- if (!tx_ring->is_xdp)
+ if (!tx_ring->is_xdp && dp->netdev)
netif_set_xps_queue(dp->netdev, &r_vec->affinity_mask,
tx_ring->idx);
@@ -2017,15 +2306,22 @@
int err;
/* Setup NAPI */
- netif_napi_add(nn->dp.netdev, &r_vec->napi,
- nfp_net_poll, NAPI_POLL_WEIGHT);
+ if (nn->dp.netdev)
+ netif_napi_add(nn->dp.netdev, &r_vec->napi,
+ nfp_net_poll, NAPI_POLL_WEIGHT);
+ else
+ tasklet_enable(&r_vec->tasklet);
snprintf(r_vec->name, sizeof(r_vec->name),
- "%s-rxtx-%d", nn->dp.netdev->name, idx);
+ "%s-rxtx-%d", nfp_net_name(nn), idx);
err = request_irq(r_vec->irq_vector, r_vec->handler, 0, r_vec->name,
r_vec);
if (err) {
- netif_napi_del(&r_vec->napi);
+ if (nn->dp.netdev)
+ netif_napi_del(&r_vec->napi);
+ else
+ tasklet_disable(&r_vec->tasklet);
+
nn_err(nn, "Error requesting IRQ %d\n", r_vec->irq_vector);
return err;
}
@@ -2043,7 +2339,11 @@
nfp_net_cleanup_vector(struct nfp_net *nn, struct nfp_net_r_vector *r_vec)
{
irq_set_affinity_hint(r_vec->irq_vector, NULL);
- netif_napi_del(&r_vec->napi);
+ if (nn->dp.netdev)
+ netif_napi_del(&r_vec->napi);
+ else
+ tasklet_disable(&r_vec->tasklet);
+
free_irq(r_vec->irq_vector, r_vec);
}
@@ -2105,17 +2405,16 @@
/**
* nfp_net_write_mac_addr() - Write mac address to the device control BAR
* @nn: NFP Net device to reconfigure
+ * @addr: MAC address to write
*
* Writes the MAC address from the netdev to the device control BAR. Does not
* perform the required reconfig. We do a bit of byte swapping dance because
* firmware is LE.
*/
-static void nfp_net_write_mac_addr(struct nfp_net *nn)
+static void nfp_net_write_mac_addr(struct nfp_net *nn, const u8 *addr)
{
- nn_writel(nn, NFP_NET_CFG_MACADDR + 0,
- get_unaligned_be32(nn->dp.netdev->dev_addr));
- nn_writew(nn, NFP_NET_CFG_MACADDR + 6,
- get_unaligned_be16(nn->dp.netdev->dev_addr + 4));
+ nn_writel(nn, NFP_NET_CFG_MACADDR + 0, get_unaligned_be32(addr));
+ nn_writew(nn, NFP_NET_CFG_MACADDR + 6, get_unaligned_be16(addr + 4));
}
static void nfp_net_vec_clear_ring_data(struct nfp_net *nn, unsigned int idx)
@@ -2197,17 +2496,15 @@
new_ctrl = nn->dp.ctrl;
- if (nn->cap & NFP_NET_CFG_CTRL_RSS) {
+ if (nn->dp.ctrl & NFP_NET_CFG_CTRL_RSS_ANY) {
nfp_net_rss_write_key(nn);
nfp_net_rss_write_itbl(nn);
nn_writel(nn, NFP_NET_CFG_RSS_CTRL, nn->rss_cfg);
update |= NFP_NET_CFG_UPDATE_RSS;
}
- if (nn->cap & NFP_NET_CFG_CTRL_IRQMOD) {
+ if (nn->dp.ctrl & NFP_NET_CFG_CTRL_IRQMOD) {
nfp_net_coalesce_write_cfg(nn);
-
- new_ctrl |= NFP_NET_CFG_CTRL_IRQMOD;
update |= NFP_NET_CFG_UPDATE_IRQMOD;
}
@@ -2222,9 +2519,10 @@
nn_writeq(nn, NFP_NET_CFG_RXRS_ENABLE, nn->dp.num_rx_rings == 64 ?
0xffffffffffffffffULL : ((u64)1 << nn->dp.num_rx_rings) - 1);
- nfp_net_write_mac_addr(nn);
+ if (nn->dp.netdev)
+ nfp_net_write_mac_addr(nn, nn->dp.netdev->dev_addr);
- nn_writel(nn, NFP_NET_CFG_MTU, nn->dp.netdev->mtu);
+ nn_writel(nn, NFP_NET_CFG_MTU, nn->dp.mtu);
bufsz = nn->dp.fl_bufsz - nn->dp.rx_dma_off - NFP_NET_RX_BUF_NON_DATA;
nn_writel(nn, NFP_NET_CFG_FLBUFSZ, bufsz);
@@ -2262,6 +2560,86 @@
}
/**
+ * nfp_net_close_stack() - Quiesce the stack (part of close)
+ * @nn: NFP Net device to reconfigure
+ */
+static void nfp_net_close_stack(struct nfp_net *nn)
+{
+ unsigned int r;
+
+ disable_irq(nn->irq_entries[NFP_NET_IRQ_LSC_IDX].vector);
+ netif_carrier_off(nn->dp.netdev);
+ nn->link_up = false;
+
+ for (r = 0; r < nn->dp.num_r_vecs; r++) {
+ disable_irq(nn->r_vecs[r].irq_vector);
+ napi_disable(&nn->r_vecs[r].napi);
+ }
+
+ netif_tx_disable(nn->dp.netdev);
+}
+
+/**
+ * nfp_net_close_free_all() - Free all runtime resources
+ * @nn: NFP Net device to reconfigure
+ */
+static void nfp_net_close_free_all(struct nfp_net *nn)
+{
+ unsigned int r;
+
+ nfp_net_tx_rings_free(&nn->dp);
+ nfp_net_rx_rings_free(&nn->dp);
+
+ for (r = 0; r < nn->dp.num_r_vecs; r++)
+ nfp_net_cleanup_vector(nn, &nn->r_vecs[r]);
+
+ nfp_net_aux_irq_free(nn, NFP_NET_CFG_LSC, NFP_NET_IRQ_LSC_IDX);
+ nfp_net_aux_irq_free(nn, NFP_NET_CFG_EXN, NFP_NET_IRQ_EXN_IDX);
+}
+
+/**
+ * nfp_net_netdev_close() - Called when the device is downed
+ * @netdev: netdev structure
+ */
+static int nfp_net_netdev_close(struct net_device *netdev)
+{
+ struct nfp_net *nn = netdev_priv(netdev);
+
+ /* Step 1: Disable RX and TX rings from the Linux kernel perspective
+ */
+ nfp_net_close_stack(nn);
+
+ /* Step 2: Tell NFP
+ */
+ nfp_net_clear_config_and_disable(nn);
+
+ /* Step 3: Free resources
+ */
+ nfp_net_close_free_all(nn);
+
+ nn_dbg(nn, "%s down", netdev->name);
+ return 0;
+}
+
+void nfp_ctrl_close(struct nfp_net *nn)
+{
+ int r;
+
+ rtnl_lock();
+
+ for (r = 0; r < nn->dp.num_r_vecs; r++) {
+ disable_irq(nn->r_vecs[r].irq_vector);
+ tasklet_disable(&nn->r_vecs[r].tasklet);
+ }
+
+ nfp_net_clear_config_and_disable(nn);
+
+ nfp_net_close_free_all(nn);
+
+ rtnl_unlock();
+}
+
+/**
* nfp_net_open_stack() - Start the device from stack's perspective
* @nn: NFP Net device to reconfigure
*/
@@ -2280,16 +2658,10 @@
nfp_net_read_link_status(nn);
}
-static int nfp_net_netdev_open(struct net_device *netdev)
+static int nfp_net_open_alloc_all(struct nfp_net *nn)
{
- struct nfp_net *nn = netdev_priv(netdev);
int err, r;
- /* Step 1: Allocate resources for rings and the like
- * - Request interrupts
- * - Allocate RX and TX ring resources
- * - Setup initial RSS table
- */
err = nfp_net_aux_irq_request(nn, NFP_NET_CFG_EXN, "%s-exn",
nn->exn_name, sizeof(nn->exn_name),
NFP_NET_IRQ_EXN_IDX, nn->exn_handler);
@@ -2319,37 +2691,8 @@
for (r = 0; r < nn->max_r_vecs; r++)
nfp_net_vector_assign_rings(&nn->dp, &nn->r_vecs[r], r);
- err = netif_set_real_num_tx_queues(netdev, nn->dp.num_stack_tx_rings);
- if (err)
- goto err_free_rings;
-
- err = netif_set_real_num_rx_queues(netdev, nn->dp.num_rx_rings);
- if (err)
- goto err_free_rings;
-
- /* Step 2: Configure the NFP
- * - Enable rings from 0 to tx_rings/rx_rings - 1.
- * - Write MAC address (in case it changed)
- * - Set the MTU
- * - Set the Freelist buffer size
- * - Enable the FW
- */
- err = nfp_net_set_config_and_enable(nn);
- if (err)
- goto err_free_rings;
-
- /* Step 3: Enable for kernel
- * - put some freelist descriptors on each RX ring
- * - enable NAPI on each ring
- * - enable all TX queues
- * - set link state
- */
- nfp_net_open_stack(nn);
-
return 0;
-err_free_rings:
- nfp_net_tx_rings_free(&nn->dp);
err_free_rx_rings:
nfp_net_rx_rings_free(&nn->dp);
err_cleanup_vec:
@@ -2363,74 +2706,81 @@
return err;
}
-/**
- * nfp_net_close_stack() - Quiescent the stack (part of close)
- * @nn: NFP Net device to reconfigure
- */
-static void nfp_net_close_stack(struct nfp_net *nn)
-{
- unsigned int r;
-
- disable_irq(nn->irq_entries[NFP_NET_IRQ_LSC_IDX].vector);
- netif_carrier_off(nn->dp.netdev);
- nn->link_up = false;
-
- for (r = 0; r < nn->dp.num_r_vecs; r++) {
- disable_irq(nn->r_vecs[r].irq_vector);
- napi_disable(&nn->r_vecs[r].napi);
- }
-
- netif_tx_disable(nn->dp.netdev);
-}
-
-/**
- * nfp_net_close_free_all() - Free all runtime resources
- * @nn: NFP Net device to reconfigure
- */
-static void nfp_net_close_free_all(struct nfp_net *nn)
-{
- unsigned int r;
-
- for (r = 0; r < nn->dp.num_rx_rings; r++) {
- nfp_net_rx_ring_bufs_free(&nn->dp, &nn->dp.rx_rings[r]);
- nfp_net_rx_ring_free(&nn->dp.rx_rings[r]);
- }
- for (r = 0; r < nn->dp.num_tx_rings; r++) {
- nfp_net_tx_ring_bufs_free(&nn->dp, &nn->dp.tx_rings[r]);
- nfp_net_tx_ring_free(&nn->dp.tx_rings[r]);
- }
- for (r = 0; r < nn->dp.num_r_vecs; r++)
- nfp_net_cleanup_vector(nn, &nn->r_vecs[r]);
-
- kfree(nn->dp.rx_rings);
- kfree(nn->dp.tx_rings);
-
- nfp_net_aux_irq_free(nn, NFP_NET_CFG_LSC, NFP_NET_IRQ_LSC_IDX);
- nfp_net_aux_irq_free(nn, NFP_NET_CFG_EXN, NFP_NET_IRQ_EXN_IDX);
-}
-
-/**
- * nfp_net_netdev_close() - Called when the device is downed
- * @netdev: netdev structure
- */
-static int nfp_net_netdev_close(struct net_device *netdev)
+static int nfp_net_netdev_open(struct net_device *netdev)
{
struct nfp_net *nn = netdev_priv(netdev);
+ int err;
- /* Step 1: Disable RX and TX rings from the Linux kernel perspective
+ /* Step 1: Allocate resources for rings and the like
+ * - Request interrupts
+ * - Allocate RX and TX ring resources
+ * - Setup initial RSS table
*/
- nfp_net_close_stack(nn);
+ err = nfp_net_open_alloc_all(nn);
+ if (err)
+ return err;
- /* Step 2: Tell NFP
+ err = netif_set_real_num_tx_queues(netdev, nn->dp.num_stack_tx_rings);
+ if (err)
+ goto err_free_all;
+
+ err = netif_set_real_num_rx_queues(netdev, nn->dp.num_rx_rings);
+ if (err)
+ goto err_free_all;
+
+ /* Step 2: Configure the NFP
+ * - Enable rings from 0 to tx_rings/rx_rings - 1.
+ * - Write MAC address (in case it changed)
+ * - Set the MTU
+ * - Set the Freelist buffer size
+ * - Enable the FW
*/
- nfp_net_clear_config_and_disable(nn);
+ err = nfp_net_set_config_and_enable(nn);
+ if (err)
+ goto err_free_all;
- /* Step 3: Free resources
+ /* Step 3: Enable for kernel
+ * - put some freelist descriptors on each RX ring
+ * - enable NAPI on each ring
+ * - enable all TX queues
+ * - set link state
*/
- nfp_net_close_free_all(nn);
+ nfp_net_open_stack(nn);
- nn_dbg(nn, "%s down", netdev->name);
return 0;
+
+err_free_all:
+ nfp_net_close_free_all(nn);
+ return err;
+}
+
+int nfp_ctrl_open(struct nfp_net *nn)
+{
+ int err, r;
+
+ /* ring dumping depends on vNICs being opened/closed under rtnl */
+ rtnl_lock();
+
+ err = nfp_net_open_alloc_all(nn);
+ if (err)
+ goto err_unlock;
+
+ err = nfp_net_set_config_and_enable(nn);
+ if (err)
+ goto err_free_all;
+
+ for (r = 0; r < nn->dp.num_r_vecs; r++)
+ enable_irq(nn->r_vecs[r].irq_vector);
+
+ rtnl_unlock();
+
+ return 0;
+
+err_free_all:
+ nfp_net_close_free_all(nn);
+err_unlock:
+ rtnl_unlock();
+ return err;
}
static void nfp_net_set_rx_mode(struct net_device *netdev)
@@ -2634,6 +2984,40 @@
return nfp_net_ring_reconfig(nn, dp, NULL);
}
+static int
+nfp_net_vlan_rx_add_vid(struct net_device *netdev, __be16 proto, u16 vid)
+{
+ struct nfp_net *nn = netdev_priv(netdev);
+
+ /* Priority tagged packets with vlan id 0 are processed by the
+ * NFP as untagged packets
+ */
+ if (!vid)
+ return 0;
+
+ nn_writew(nn, NFP_NET_CFG_VLAN_FILTER_VID, vid);
+ nn_writew(nn, NFP_NET_CFG_VLAN_FILTER_PROTO, ETH_P_8021Q);
+
+ return nfp_net_reconfig_mbox(nn, NFP_NET_CFG_MBOX_CMD_CTAG_FILTER_ADD);
+}
+
+static int
+nfp_net_vlan_rx_kill_vid(struct net_device *netdev, __be16 proto, u16 vid)
+{
+ struct nfp_net *nn = netdev_priv(netdev);
+
+ /* Priority tagged packets with vlan id 0 are processed by the
+ * NFP as untagged packets
+ */
+ if (!vid)
+ return 0;
+
+ nn_writew(nn, NFP_NET_CFG_VLAN_FILTER_VID, vid);
+ nn_writew(nn, NFP_NET_CFG_VLAN_FILTER_PROTO, ETH_P_8021Q);
+
+ return nfp_net_reconfig_mbox(nn, NFP_NET_CFG_MBOX_CMD_CTAG_FILTER_KILL);
+}
+
static void nfp_net_stat64(struct net_device *netdev,
struct rtnl_link_stats64 *stats)
{
@@ -2667,33 +3051,16 @@
}
}
-static bool nfp_net_ebpf_capable(struct nfp_net *nn)
-{
- if (nn->cap & NFP_NET_CFG_CTRL_BPF &&
- nn_readb(nn, NFP_NET_CFG_BPF_ABI) == NFP_NET_BPF_ABI)
- return true;
- return false;
-}
-
static int
-nfp_net_setup_tc(struct net_device *netdev, u32 handle, __be16 proto,
- struct tc_to_netdev *tc)
+nfp_net_setup_tc(struct net_device *netdev, u32 handle, u32 chain_index,
+ __be16 proto, struct tc_to_netdev *tc)
{
struct nfp_net *nn = netdev_priv(netdev);
- if (TC_H_MAJ(handle) != TC_H_MAJ(TC_H_INGRESS))
- return -EOPNOTSUPP;
- if (proto != htons(ETH_P_ALL))
+ if (chain_index)
return -EOPNOTSUPP;
- if (tc->type == TC_SETUP_CLSBPF && nfp_net_ebpf_capable(nn)) {
- if (!nn->dp.bpf_offload_xdp)
- return nfp_net_bpf_offload(nn, tc->cls_bpf);
- else
- return -EBUSY;
- }
-
- return -EINVAL;
+ return nfp_app_setup_tc(nn->app, netdev, handle, proto, tc);
}
static int nfp_net_set_features(struct net_device *netdev,
@@ -2710,9 +3077,9 @@
if (changed & NETIF_F_RXCSUM) {
if (features & NETIF_F_RXCSUM)
- new_ctrl |= NFP_NET_CFG_CTRL_RXCSUM;
+ new_ctrl |= nn->cap & NFP_NET_CFG_CTRL_RXCSUM_ANY;
else
- new_ctrl &= ~NFP_NET_CFG_CTRL_RXCSUM;
+ new_ctrl &= ~NFP_NET_CFG_CTRL_RXCSUM_ANY;
}
if (changed & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM)) {
@@ -2724,9 +3091,10 @@
if (changed & (NETIF_F_TSO | NETIF_F_TSO6)) {
if (features & (NETIF_F_TSO | NETIF_F_TSO6))
- new_ctrl |= NFP_NET_CFG_CTRL_LSO;
+ new_ctrl |= nn->cap & NFP_NET_CFG_CTRL_LSO2 ?:
+ NFP_NET_CFG_CTRL_LSO;
else
- new_ctrl &= ~NFP_NET_CFG_CTRL_LSO;
+ new_ctrl &= ~NFP_NET_CFG_CTRL_LSO_ANY;
}
if (changed & NETIF_F_HW_VLAN_CTAG_RX) {
@@ -2743,6 +3111,13 @@
new_ctrl &= ~NFP_NET_CFG_CTRL_TXVLAN;
}
+ if (changed & NETIF_F_HW_VLAN_CTAG_FILTER) {
+ if (features & NETIF_F_HW_VLAN_CTAG_FILTER)
+ new_ctrl |= NFP_NET_CFG_CTRL_CTAG_FILTER;
+ else
+ new_ctrl &= ~NFP_NET_CFG_CTRL_CTAG_FILTER;
+ }
+
if (changed & NETIF_F_SG) {
if (features & NETIF_F_SG)
new_ctrl |= NFP_NET_CFG_CTRL_GATHER;
@@ -2750,7 +3125,7 @@
new_ctrl &= ~NFP_NET_CFG_CTRL_GATHER;
}
- if (changed & NETIF_F_HW_TC && nn->dp.ctrl & NFP_NET_CFG_CTRL_BPF) {
+ if (changed & NETIF_F_HW_TC && nfp_app_tc_busy(nn->app, nn)) {
nn_err(nn, "Cannot disable HW TC offload while in use\n");
return -EBUSY;
}
@@ -2818,26 +3193,6 @@
return features;
}
-static int
-nfp_net_get_phys_port_name(struct net_device *netdev, char *name, size_t len)
-{
- struct nfp_net *nn = netdev_priv(netdev);
- int err;
-
- if (!nn->eth_port)
- return -EOPNOTSUPP;
-
- if (!nn->eth_port->is_split)
- err = snprintf(name, len, "p%d", nn->eth_port->label_port);
- else
- err = snprintf(name, len, "p%ds%d", nn->eth_port->label_port,
- nn->eth_port->label_subport);
- if (err >= len)
- return -EINVAL;
-
- return 0;
-}
-
/**
* nfp_net_set_vxlan_port() - set vxlan port in SW and reconfigure HW
* @nn: NFP Net device to reconfigure
@@ -2919,34 +3274,6 @@
nfp_net_set_vxlan_port(nn, idx, 0);
}
-static int nfp_net_xdp_offload(struct nfp_net *nn, struct bpf_prog *prog)
-{
- struct tc_cls_bpf_offload cmd = {
- .prog = prog,
- };
- int ret;
-
- if (!nfp_net_ebpf_capable(nn))
- return -EINVAL;
-
- if (nn->dp.ctrl & NFP_NET_CFG_CTRL_BPF) {
- if (!nn->dp.bpf_offload_xdp)
- return prog ? -EBUSY : 0;
- cmd.command = prog ? TC_CLSBPF_REPLACE : TC_CLSBPF_DESTROY;
- } else {
- if (!prog)
- return 0;
- cmd.command = TC_CLSBPF_ADD;
- }
-
- ret = nfp_net_bpf_offload(nn, &cmd);
- /* Stop offload if replace not possible */
- if (ret && cmd.command == TC_CLSBPF_REPLACE)
- nfp_net_xdp_offload(nn, NULL);
- nn->dp.bpf_offload_xdp = prog && !ret;
- return ret;
-}
-
static int nfp_net_xdp_setup(struct nfp_net *nn, struct netdev_xdp *xdp)
{
struct bpf_prog *old_prog = nn->dp.xdp_prog;
@@ -2959,7 +3286,7 @@
if (prog && nn->dp.xdp_prog) {
prog = xchg(&nn->dp.xdp_prog, prog);
bpf_prog_put(prog);
- nfp_net_xdp_offload(nn, nn->dp.xdp_prog);
+ nfp_app_xdp_offload(nn->app, nn, nn->dp.xdp_prog);
return 0;
}
@@ -2980,7 +3307,7 @@
if (old_prog)
bpf_prog_put(old_prog);
- nfp_net_xdp_offload(nn, nn->dp.xdp_prog);
+ nfp_app_xdp_offload(nn->app, nn, nn->dp.xdp_prog);
return 0;
}
@@ -2994,25 +3321,49 @@
return nfp_net_xdp_setup(nn, xdp);
case XDP_QUERY_PROG:
xdp->prog_attached = !!nn->dp.xdp_prog;
+ xdp->prog_id = nn->dp.xdp_prog ? nn->dp.xdp_prog->aux->id : 0;
return 0;
default:
return -EINVAL;
}
}
-static const struct net_device_ops nfp_net_netdev_ops = {
+static int nfp_net_set_mac_address(struct net_device *netdev, void *addr)
+{
+ struct nfp_net *nn = netdev_priv(netdev);
+ struct sockaddr *saddr = addr;
+ int err;
+
+ err = eth_prepare_mac_addr_change(netdev, addr);
+ if (err)
+ return err;
+
+ nfp_net_write_mac_addr(nn, saddr->sa_data);
+
+ err = nfp_net_reconfig(nn, NFP_NET_CFG_UPDATE_MACADDR);
+ if (err)
+ return err;
+
+ eth_commit_mac_addr_change(netdev, addr);
+
+ return 0;
+}
+
+const struct net_device_ops nfp_net_netdev_ops = {
.ndo_open = nfp_net_netdev_open,
.ndo_stop = nfp_net_netdev_close,
.ndo_start_xmit = nfp_net_tx,
.ndo_get_stats64 = nfp_net_stat64,
+ .ndo_vlan_rx_add_vid = nfp_net_vlan_rx_add_vid,
+ .ndo_vlan_rx_kill_vid = nfp_net_vlan_rx_kill_vid,
.ndo_setup_tc = nfp_net_setup_tc,
.ndo_tx_timeout = nfp_net_tx_timeout,
.ndo_set_rx_mode = nfp_net_set_rx_mode,
.ndo_change_mtu = nfp_net_change_mtu,
- .ndo_set_mac_address = eth_mac_addr,
+ .ndo_set_mac_address = nfp_net_set_mac_address,
.ndo_set_features = nfp_net_set_features,
.ndo_features_check = nfp_net_features_check,
- .ndo_get_phys_port_name = nfp_net_get_phys_port_name,
+ .ndo_get_phys_port_name = nfp_port_get_phys_port_name,
.ndo_udp_tunnel_add = nfp_net_add_vxlan_port,
.ndo_udp_tunnel_del = nfp_net_del_vxlan_port,
.ndo_xdp = nfp_net_xdp,
@@ -3032,7 +3383,7 @@
nn->fw_ver.resv, nn->fw_ver.class,
nn->fw_ver.major, nn->fw_ver.minor,
nn->max_mtu);
- nn_info(nn, "CAP: %#x %s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s\n",
+ nn_info(nn, "CAP: %#x %s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s\n",
nn->cap,
nn->cap & NFP_NET_CFG_CTRL_PROMISC ? "PROMISC " : "",
nn->cap & NFP_NET_CFG_CTRL_L2BC ? "L2BCFILT " : "",
@@ -3043,43 +3394,58 @@
nn->cap & NFP_NET_CFG_CTRL_TXVLAN ? "TXVLAN " : "",
nn->cap & NFP_NET_CFG_CTRL_SCATTER ? "SCATTER " : "",
nn->cap & NFP_NET_CFG_CTRL_GATHER ? "GATHER " : "",
- nn->cap & NFP_NET_CFG_CTRL_LSO ? "TSO " : "",
- nn->cap & NFP_NET_CFG_CTRL_RSS ? "RSS " : "",
+ nn->cap & NFP_NET_CFG_CTRL_LSO ? "TSO1 " : "",
+ nn->cap & NFP_NET_CFG_CTRL_LSO2 ? "TSO2 " : "",
+ nn->cap & NFP_NET_CFG_CTRL_RSS ? "RSS1 " : "",
+ nn->cap & NFP_NET_CFG_CTRL_RSS2 ? "RSS2 " : "",
+ nn->cap & NFP_NET_CFG_CTRL_CTAG_FILTER ? "CTAG_FILTER " : "",
nn->cap & NFP_NET_CFG_CTRL_L2SWITCH ? "L2SWITCH " : "",
nn->cap & NFP_NET_CFG_CTRL_MSIXAUTO ? "AUTOMASK " : "",
nn->cap & NFP_NET_CFG_CTRL_IRQMOD ? "IRQMOD " : "",
nn->cap & NFP_NET_CFG_CTRL_VXLAN ? "VXLAN " : "",
nn->cap & NFP_NET_CFG_CTRL_NVGRE ? "NVGRE " : "",
- nfp_net_ebpf_capable(nn) ? "BPF " : "");
+ nn->cap & NFP_NET_CFG_CTRL_CSUM_COMPLETE ?
+ "RXCSUM_COMPLETE " : "",
+ nn->cap & NFP_NET_CFG_CTRL_LIVE_ADDR ? "LIVE_ADDR " : "",
+ nfp_app_extra_cap(nn->app, nn));
}
/**
- * nfp_net_netdev_alloc() - Allocate netdev and related structure
+ * nfp_net_alloc() - Allocate netdev and related structure
* @pdev: PCI device
+ * @needs_netdev: Whether to allocate a netdev for this vNIC
* @max_tx_rings: Maximum number of TX rings supported by device
* @max_rx_rings: Maximum number of RX rings supported by device
*
* This function allocates a netdev device and fills in the initial
- * part of the @struct nfp_net structure.
+ * part of the @struct nfp_net structure. In case of control device
+ * nfp_net structure is allocated without the netdev.
*
* Return: NFP Net device structure, or ERR_PTR on error.
*/
-struct nfp_net *nfp_net_netdev_alloc(struct pci_dev *pdev,
- unsigned int max_tx_rings,
- unsigned int max_rx_rings)
+struct nfp_net *nfp_net_alloc(struct pci_dev *pdev, bool needs_netdev,
+ unsigned int max_tx_rings,
+ unsigned int max_rx_rings)
{
- struct net_device *netdev;
struct nfp_net *nn;
- netdev = alloc_etherdev_mqs(sizeof(struct nfp_net),
- max_tx_rings, max_rx_rings);
- if (!netdev)
- return ERR_PTR(-ENOMEM);
+ if (needs_netdev) {
+ struct net_device *netdev;
- SET_NETDEV_DEV(netdev, &pdev->dev);
- nn = netdev_priv(netdev);
+ netdev = alloc_etherdev_mqs(sizeof(struct nfp_net),
+ max_tx_rings, max_rx_rings);
+ if (!netdev)
+ return ERR_PTR(-ENOMEM);
- nn->dp.netdev = netdev;
+ SET_NETDEV_DEV(netdev, &pdev->dev);
+ nn = netdev_priv(netdev);
+ nn->dp.netdev = netdev;
+ } else {
+ nn = vzalloc(sizeof(*nn));
+ if (!nn)
+ return ERR_PTR(-ENOMEM);
+ }
+
nn->dp.dev = &pdev->dev;
nn->pdev = pdev;
@@ -3099,24 +3465,24 @@
nn->dp.rxd_cnt = NFP_NET_RX_DESCS_DEFAULT;
spin_lock_init(&nn->reconfig_lock);
- spin_lock_init(&nn->rx_filter_lock);
spin_lock_init(&nn->link_status_lock);
setup_timer(&nn->reconfig_timer,
nfp_net_reconfig_timer, (unsigned long)nn);
- setup_timer(&nn->rx_filter_stats_timer,
- nfp_net_filter_stats_timer, (unsigned long)nn);
return nn;
}
/**
- * nfp_net_netdev_free() - Undo what @nfp_net_netdev_alloc() did
+ * nfp_net_free() - Undo what @nfp_net_alloc() did
* @nn: NFP Net device to reconfigure
*/
-void nfp_net_netdev_free(struct nfp_net *nn)
+void nfp_net_free(struct nfp_net *nn)
{
- free_netdev(nn->dp.netdev);
+ if (nn->dp.netdev)
+ free_netdev(nn->dp.netdev);
+ else
+ vfree(nn);
}
/**
@@ -3187,48 +3553,13 @@
nn->tx_coalesce_max_frames = 64;
}
-/**
- * nfp_net_netdev_init() - Initialise/finalise the netdev structure
- * @netdev: netdev structure
- *
- * Return: 0 on success or negative errno on error.
- */
-int nfp_net_netdev_init(struct net_device *netdev)
+static void nfp_net_netdev_init(struct nfp_net *nn)
{
- struct nfp_net *nn = netdev_priv(netdev);
- int err;
+ struct net_device *netdev = nn->dp.netdev;
- nn->dp.chained_metadata_format = nn->fw_ver.major > 3;
+ nfp_net_write_mac_addr(nn, nn->dp.netdev->dev_addr);
- nn->dp.rx_dma_dir = DMA_FROM_DEVICE;
-
- /* Get some of the read-only fields from the BAR */
- nn->cap = nn_readl(nn, NFP_NET_CFG_CAP);
- nn->max_mtu = nn_readl(nn, NFP_NET_CFG_MAX_MTU);
-
- nfp_net_write_mac_addr(nn);
-
- /* Determine RX packet/metadata boundary offset */
- if (nn->fw_ver.major >= 2) {
- u32 reg;
-
- reg = nn_readl(nn, NFP_NET_CFG_RX_OFFSET);
- if (reg > NFP_NET_MAX_PREPEND) {
- nn_err(nn, "Invalid rx offset: %d\n", reg);
- return -EINVAL;
- }
- nn->dp.rx_offset = reg;
- } else {
- nn->dp.rx_offset = NFP_NET_RX_OFFSET;
- }
-
- /* Set default MTU and Freelist buffer size */
- if (nn->max_mtu < NFP_NET_DEFAULT_MTU)
- netdev->mtu = nn->max_mtu;
- else
- netdev->mtu = NFP_NET_DEFAULT_MTU;
- nn->dp.mtu = netdev->mtu;
- nn->dp.fl_bufsz = nfp_net_calc_fl_bufsz(&nn->dp);
+ netdev->mtu = nn->dp.mtu;
/* Advertise/enable offloads based on capabilities
*
@@ -3236,10 +3567,13 @@
* and netdev->hw_features advertises which features are
* supported. By default we enable most features.
*/
+ if (nn->cap & NFP_NET_CFG_CTRL_LIVE_ADDR)
+ netdev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
+
netdev->hw_features = NETIF_F_HIGHDMA;
- if (nn->cap & NFP_NET_CFG_CTRL_RXCSUM) {
+ if (nn->cap & NFP_NET_CFG_CTRL_RXCSUM_ANY) {
netdev->hw_features |= NETIF_F_RXCSUM;
- nn->dp.ctrl |= NFP_NET_CFG_CTRL_RXCSUM;
+ nn->dp.ctrl |= nn->cap & NFP_NET_CFG_CTRL_RXCSUM_ANY;
}
if (nn->cap & NFP_NET_CFG_CTRL_TXCSUM) {
netdev->hw_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
@@ -3249,15 +3583,14 @@
netdev->hw_features |= NETIF_F_SG;
nn->dp.ctrl |= NFP_NET_CFG_CTRL_GATHER;
}
- if ((nn->cap & NFP_NET_CFG_CTRL_LSO) && nn->fw_ver.major > 2) {
+ if ((nn->cap & NFP_NET_CFG_CTRL_LSO && nn->fw_ver.major > 2) ||
+ nn->cap & NFP_NET_CFG_CTRL_LSO2) {
netdev->hw_features |= NETIF_F_TSO | NETIF_F_TSO6;
- nn->dp.ctrl |= NFP_NET_CFG_CTRL_LSO;
+ nn->dp.ctrl |= nn->cap & NFP_NET_CFG_CTRL_LSO2 ?:
+ NFP_NET_CFG_CTRL_LSO;
}
- if (nn->cap & NFP_NET_CFG_CTRL_RSS) {
+ if (nn->cap & NFP_NET_CFG_CTRL_RSS_ANY)
netdev->hw_features |= NETIF_F_RXHASH;
- nfp_net_rss_init(nn);
- nn->dp.ctrl |= NFP_NET_CFG_CTRL_RSS;
- }
if (nn->cap & NFP_NET_CFG_CTRL_VXLAN &&
nn->cap & NFP_NET_CFG_CTRL_NVGRE) {
if (nn->cap & NFP_NET_CFG_CTRL_LSO)
@@ -3275,17 +3608,89 @@
nn->dp.ctrl |= NFP_NET_CFG_CTRL_RXVLAN;
}
if (nn->cap & NFP_NET_CFG_CTRL_TXVLAN) {
- netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_TX;
- nn->dp.ctrl |= NFP_NET_CFG_CTRL_TXVLAN;
+ if (nn->cap & NFP_NET_CFG_CTRL_LSO2) {
+ nn_warn(nn, "Device advertises both TSO2 and TXVLAN. Refusing to enable TXVLAN.\n");
+ } else {
+ netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_TX;
+ nn->dp.ctrl |= NFP_NET_CFG_CTRL_TXVLAN;
+ }
+ }
+ if (nn->cap & NFP_NET_CFG_CTRL_CTAG_FILTER) {
+ netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_FILTER;
+ nn->dp.ctrl |= NFP_NET_CFG_CTRL_CTAG_FILTER;
}
netdev->features = netdev->hw_features;
- if (nfp_net_ebpf_capable(nn))
+ if (nfp_app_has_tc(nn->app))
netdev->hw_features |= NETIF_F_HW_TC;
/* Advertise but disable TSO by default. */
netdev->features &= ~(NETIF_F_TSO | NETIF_F_TSO6);
+ nn->dp.ctrl &= ~NFP_NET_CFG_CTRL_LSO_ANY;
+
+ /* Finalise the netdev setup */
+ netdev->netdev_ops = &nfp_net_netdev_ops;
+ netdev->watchdog_timeo = msecs_to_jiffies(5 * 1000);
+
+ /* MTU range: 68 - hw-specific max */
+ netdev->min_mtu = ETH_MIN_MTU;
+ netdev->max_mtu = nn->max_mtu;
+
+ netif_carrier_off(netdev);
+
+ nfp_net_set_ethtool_ops(netdev);
+}
+
+/**
+ * nfp_net_init() - Initialise/finalise the nfp_net structure
+ * @nn: NFP Net device structure
+ *
+ * Return: 0 on success or negative errno on error.
+ */
+int nfp_net_init(struct nfp_net *nn)
+{
+ int err;
+
+ nn->dp.rx_dma_dir = DMA_FROM_DEVICE;
+
+ /* Get some of the read-only fields from the BAR */
+ nn->cap = nn_readl(nn, NFP_NET_CFG_CAP);
+ nn->max_mtu = nn_readl(nn, NFP_NET_CFG_MAX_MTU);
+
+ /* Chained metadata is signalled by capabilities except in version 4 */
+ nn->dp.chained_metadata_format = nn->fw_ver.major == 4 ||
+ !nn->dp.netdev ||
+ nn->cap & NFP_NET_CFG_CTRL_CHAIN_META;
+ if (nn->dp.chained_metadata_format && nn->fw_ver.major != 4)
+ nn->cap &= ~NFP_NET_CFG_CTRL_RSS;
+
+ /* Determine RX packet/metadata boundary offset */
+ if (nn->fw_ver.major >= 2) {
+ u32 reg;
+
+ reg = nn_readl(nn, NFP_NET_CFG_RX_OFFSET);
+ if (reg > NFP_NET_MAX_PREPEND) {
+ nn_err(nn, "Invalid rx offset: %d\n", reg);
+ return -EINVAL;
+ }
+ nn->dp.rx_offset = reg;
+ } else {
+ nn->dp.rx_offset = NFP_NET_RX_OFFSET;
+ }
+
+ /* Set default MTU and Freelist buffer size */
+ if (nn->max_mtu < NFP_NET_DEFAULT_MTU)
+ nn->dp.mtu = nn->max_mtu;
+ else
+ nn->dp.mtu = NFP_NET_DEFAULT_MTU;
+ nn->dp.fl_bufsz = nfp_net_calc_fl_bufsz(&nn->dp);
+
+ if (nn->cap & NFP_NET_CFG_CTRL_RSS_ANY) {
+ nfp_net_rss_init(nn);
+ nn->dp.ctrl |= nn->cap & NFP_NET_CFG_CTRL_RSS2 ?:
+ NFP_NET_CFG_CTRL_RSS;
+ }
/* Allow L2 Broadcast and Multicast through by default, if supported */
if (nn->cap & NFP_NET_CFG_CTRL_L2BC)
@@ -3299,6 +3704,9 @@
nn->dp.ctrl |= NFP_NET_CFG_CTRL_IRQMOD;
}
+ if (nn->dp.netdev)
+ nfp_net_netdev_init(nn);
+
/* Stash the re-configuration queue away. First odd queue in TX Bar */
nn->qcp_cfg = nn->tx_bar + NFP_QCP_QUEUE_ADDR_SZ;
@@ -3311,34 +3719,24 @@
if (err)
return err;
- /* Finalise the netdev setup */
- netdev->netdev_ops = &nfp_net_netdev_ops;
- netdev->watchdog_timeo = msecs_to_jiffies(5 * 1000);
+ nfp_net_vecs_init(nn);
- /* MTU range: 68 - hw-specific max */
- netdev->min_mtu = ETH_MIN_MTU;
- netdev->max_mtu = nn->max_mtu;
-
- netif_carrier_off(netdev);
-
- nfp_net_set_ethtool_ops(netdev);
- nfp_net_vecs_init(netdev);
-
- return register_netdev(netdev);
+ if (!nn->dp.netdev)
+ return 0;
+ return register_netdev(nn->dp.netdev);
}
/**
- * nfp_net_netdev_clean() - Undo what nfp_net_netdev_init() did.
- * @netdev: netdev structure
+ * nfp_net_clean() - Undo what nfp_net_init() did.
+ * @nn: NFP Net device structure
*/
-void nfp_net_netdev_clean(struct net_device *netdev)
+void nfp_net_clean(struct nfp_net *nn)
{
- struct nfp_net *nn = netdev_priv(netdev);
+ if (!nn->dp.netdev)
+ return;
unregister_netdev(nn->dp.netdev);
if (nn->dp.xdp_prog)
bpf_prog_put(nn->dp.xdp_prog);
- if (nn->dp.bpf_offload_xdp)
- nfp_net_xdp_offload(nn, NULL);
}
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.h b/drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.h
index d04ccc9..e5e94e0 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.h
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.h
@@ -71,6 +71,10 @@
#define NFP_NET_META_FIELD_SIZE 4
#define NFP_NET_META_HASH 1 /* next field carries hash type */
#define NFP_NET_META_MARK 2
+#define NFP_NET_META_PORTID 5
+#define NFP_NET_META_CSUM 6 /* checksum complete type */
+
+#define NFP_META_PORT_ID_CTRL ~0U
/**
* Hash type pre-pended when a RSS hash was computed
@@ -119,9 +123,10 @@
#define NFP_NET_CFG_CTRL_TXVLAN (0x1 << 7) /* Enable VLAN insert */
#define NFP_NET_CFG_CTRL_SCATTER (0x1 << 8) /* Scatter DMA */
#define NFP_NET_CFG_CTRL_GATHER (0x1 << 9) /* Gather DMA */
-#define NFP_NET_CFG_CTRL_LSO (0x1 << 10) /* LSO/TSO */
+#define NFP_NET_CFG_CTRL_LSO (0x1 << 10) /* LSO/TSO (version 1) */
+#define NFP_NET_CFG_CTRL_CTAG_FILTER (0x1 << 11) /* VLAN CTAG filtering */
#define NFP_NET_CFG_CTRL_RINGCFG (0x1 << 16) /* Ring runtime changes */
-#define NFP_NET_CFG_CTRL_RSS (0x1 << 17) /* RSS */
+#define NFP_NET_CFG_CTRL_RSS (0x1 << 17) /* RSS (version 1) */
#define NFP_NET_CFG_CTRL_IRQMOD (0x1 << 18) /* Interrupt moderation */
#define NFP_NET_CFG_CTRL_RINGPRIO (0x1 << 19) /* Ring priorities */
#define NFP_NET_CFG_CTRL_MSIXAUTO (0x1 << 20) /* MSI-X auto-masking */
@@ -131,6 +136,20 @@
#define NFP_NET_CFG_CTRL_VXLAN (0x1 << 24) /* VXLAN tunnel support */
#define NFP_NET_CFG_CTRL_NVGRE (0x1 << 25) /* NVGRE tunnel support */
#define NFP_NET_CFG_CTRL_BPF (0x1 << 27) /* BPF offload capable */
+#define NFP_NET_CFG_CTRL_LSO2 (0x1 << 28) /* LSO/TSO (version 2) */
+#define NFP_NET_CFG_CTRL_RSS2 (0x1 << 29) /* RSS (version 2) */
+#define NFP_NET_CFG_CTRL_CSUM_COMPLETE (0x1 << 30) /* Checksum complete */
+#define NFP_NET_CFG_CTRL_LIVE_ADDR (0x1 << 31) /* live MAC addr change */
+
+#define NFP_NET_CFG_CTRL_LSO_ANY (NFP_NET_CFG_CTRL_LSO | \
+ NFP_NET_CFG_CTRL_LSO2)
+#define NFP_NET_CFG_CTRL_RSS_ANY (NFP_NET_CFG_CTRL_RSS | \
+ NFP_NET_CFG_CTRL_RSS2)
+#define NFP_NET_CFG_CTRL_RXCSUM_ANY (NFP_NET_CFG_CTRL_RXCSUM | \
+ NFP_NET_CFG_CTRL_CSUM_COMPLETE)
+#define NFP_NET_CFG_CTRL_CHAIN_META (NFP_NET_CFG_CTRL_RSS2 | \
+ NFP_NET_CFG_CTRL_CSUM_COMPLETE)
+
#define NFP_NET_CFG_UPDATE 0x0004
#define NFP_NET_CFG_UPDATE_GEN (0x1 << 0) /* General update */
#define NFP_NET_CFG_UPDATE_RING (0x1 << 1) /* Ring config change */
@@ -143,6 +162,8 @@
#define NFP_NET_CFG_UPDATE_IRQMOD (0x1 << 8) /* IRQ mod change */
#define NFP_NET_CFG_UPDATE_VXLAN (0x1 << 9) /* VXLAN port change */
#define NFP_NET_CFG_UPDATE_BPF (0x1 << 10) /* BPF program load */
+#define NFP_NET_CFG_UPDATE_MACADDR (0x1 << 11) /* MAC address change */
+#define NFP_NET_CFG_UPDATE_MBOX (0x1 << 12) /* Mailbox update */
#define NFP_NET_CFG_UPDATE_ERR (0x1 << 31) /* A error occurred */
#define NFP_NET_CFG_TXRS_ENABLE 0x0008
#define NFP_NET_CFG_RXRS_ENABLE 0x0010
@@ -381,4 +402,29 @@
#define NFP_NET_CFG_RXR_STATS(_x) (NFP_NET_CFG_RXR_STATS_BASE + \
((_x) * 0x10))
+/**
+ * General use mailbox area (0x1800 - 0x19ff)
+ * 4B used for update command and 4B return code
+ * followed by a max of 504B of variable length value
+ */
+#define NFP_NET_CFG_MBOX_CMD 0x1800
+#define NFP_NET_CFG_MBOX_RET 0x1804
+#define NFP_NET_CFG_MBOX_VAL 0x1808
+#define NFP_NET_CFG_MBOX_VAL_MAX_SZ 0x1F8
+
+#define NFP_NET_CFG_MBOX_CMD_CTAG_FILTER_ADD 1
+#define NFP_NET_CFG_MBOX_CMD_CTAG_FILTER_KILL 2
+
+/**
+ * VLAN filtering using general use mailbox
+ * @NFP_NET_CFG_VLAN_FILTER: Base address of VLAN filter mailbox
+ * @NFP_NET_CFG_VLAN_FILTER_VID: VLAN ID to filter
+ * @NFP_NET_CFG_VLAN_FILTER_PROTO: VLAN proto to filter
+ * @NFP_NET_CFG_VXLAN_SZ: Size of the VLAN filter mailbox in bytes
+ */
+#define NFP_NET_CFG_VLAN_FILTER NFP_NET_CFG_MBOX_VAL
+#define NFP_NET_CFG_VLAN_FILTER_VID NFP_NET_CFG_VLAN_FILTER
+#define NFP_NET_CFG_VLAN_FILTER_PROTO (NFP_NET_CFG_VLAN_FILTER + 2)
+#define NFP_NET_CFG_VLAN_FILTER_SZ 0x0004
+
#endif /* _NFP_NET_CTRL_H_ */
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_debugfs.c b/drivers/net/ethernet/netronome/nfp/nfp_net_debugfs.c
index 4077c59..40217ece 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_debugfs.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_debugfs.c
@@ -54,7 +54,7 @@
goto out;
nn = r_vec->nfp_net;
rx_ring = r_vec->rx_ring;
- if (!netif_running(nn->dp.netdev))
+ if (!nfp_net_running(nn))
goto out;
rxd_cnt = rx_ring->cnt;
@@ -62,7 +62,7 @@
fl_rd_p = nfp_qcp_rd_ptr_read(rx_ring->qcp_fl);
fl_wr_p = nfp_qcp_wr_ptr_read(rx_ring->qcp_fl);
- seq_printf(file, "RX[%02d,%02d]: cnt=%d dma=%pad host=%p H_RD=%d H_WR=%d FL_RD=%d FL_WR=%d\n",
+ seq_printf(file, "RX[%02d,%02d]: cnt=%u dma=%pad host=%p H_RD=%u H_WR=%u FL_RD=%u FL_WR=%u\n",
rx_ring->idx, rx_ring->fl_qcidx,
rx_ring->cnt, &rx_ring->dma, rx_ring->rxds,
rx_ring->rd_p, rx_ring->wr_p, fl_rd_p, fl_wr_p);
@@ -138,7 +138,7 @@
if (!r_vec->nfp_net || !tx_ring)
goto out;
nn = r_vec->nfp_net;
- if (!netif_running(nn->dp.netdev))
+ if (!nfp_net_running(nn))
goto out;
txd_cnt = tx_ring->cnt;
@@ -146,7 +146,7 @@
d_rd_p = nfp_qcp_rd_ptr_read(tx_ring->qcp_q);
d_wr_p = nfp_qcp_wr_ptr_read(tx_ring->qcp_q);
- seq_printf(file, "TX[%02d,%02d%s]: cnt=%d dma=%pad host=%p H_RD=%d H_WR=%d D_RD=%d D_WR=%d\n",
+ seq_printf(file, "TX[%02d,%02d%s]: cnt=%u dma=%pad host=%p H_RD=%u H_WR=%u D_RD=%u D_WR=%u\n",
tx_ring->idx, tx_ring->qcidx,
tx_ring == r_vec->tx_ring ? "" : "xdp",
tx_ring->cnt, &tx_ring->dma, tx_ring->txds,
@@ -200,7 +200,7 @@
.llseek = seq_lseek
};
-void nfp_net_debugfs_port_add(struct nfp_net *nn, struct dentry *ddir, int id)
+void nfp_net_debugfs_vnic_add(struct nfp_net *nn, struct dentry *ddir, int id)
{
struct dentry *queues, *tx, *rx, *xdp;
char name[20];
@@ -209,7 +209,10 @@
if (IS_ERR_OR_NULL(nfp_dir))
return;
- sprintf(name, "port%d", id);
+ if (nfp_net_is_data_vnic(nn))
+ sprintf(name, "vnic%d", id);
+ else
+ strcpy(name, "ctrl-vnic");
nn->debugfs_dir = debugfs_create_dir(name, ddir);
if (IS_ERR_OR_NULL(nn->debugfs_dir))
return;
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c b/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c
index abbb47e..6e31355 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c
@@ -50,8 +50,10 @@
#include "nfpcore/nfp.h"
#include "nfpcore/nfp_nsp.h"
+#include "nfp_app.h"
#include "nfp_net_ctrl.h"
#include "nfp_net.h"
+#include "nfp_port.h"
enum nfp_dump_diag {
NFP_DUMP_NSP_DIAG = 0,
@@ -134,14 +136,14 @@
#define NN_ET_STATS_LEN (NN_ET_GLOBAL_STATS_LEN + NN_ET_RVEC_GATHER_STATS + \
NN_ET_RVEC_STATS_LEN + NN_ET_QUEUE_STATS_LEN)
-static void nfp_net_get_nspinfo(struct nfp_net *nn, char *version)
+static void nfp_net_get_nspinfo(struct nfp_app *app, char *version)
{
struct nfp_nsp *nsp;
- if (!nn->cpp)
+ if (!app)
return;
- nsp = nfp_nsp_open(nn->cpp);
+ nsp = nfp_nsp_open(app->cpp);
if (IS_ERR(nsp))
return;
@@ -162,11 +164,12 @@
sizeof(drvinfo->driver));
strlcpy(drvinfo->version, nfp_driver_version, sizeof(drvinfo->version));
- nfp_net_get_nspinfo(nn, nsp_version);
+ nfp_net_get_nspinfo(nn->app, nsp_version);
snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
- "%d.%d.%d.%d %s",
+ "%d.%d.%d.%d %s %s %s",
nn->fw_ver.resv, nn->fw_ver.class,
- nn->fw_ver.major, nn->fw_ver.minor, nsp_version);
+ nn->fw_ver.major, nn->fw_ver.minor, nsp_version,
+ nfp_app_mip_name(nn->app), nfp_app_name(nn->app));
strlcpy(drvinfo->bus_info, pci_name(nn->pdev),
sizeof(drvinfo->bus_info));
@@ -195,37 +198,38 @@
[NFP_NET_CFG_STS_LINK_RATE_50G] = SPEED_50000,
[NFP_NET_CFG_STS_LINK_RATE_100G] = SPEED_100000,
};
- struct nfp_net *nn = netdev_priv(netdev);
+ struct nfp_eth_table_port *eth_port;
+ struct nfp_port *port;
+ struct nfp_net *nn;
u32 sts, ls;
+ /* Init to unknowns */
ethtool_link_ksettings_add_link_mode(cmd, supported, FIBRE);
cmd->base.port = PORT_OTHER;
cmd->base.speed = SPEED_UNKNOWN;
cmd->base.duplex = DUPLEX_UNKNOWN;
- if (nn->eth_port)
- cmd->base.autoneg = nn->eth_port->aneg != NFP_ANEG_DISABLED ?
+ port = nfp_port_from_netdev(netdev);
+ eth_port = nfp_port_get_eth_port(port);
+ if (eth_port)
+ cmd->base.autoneg = eth_port->aneg != NFP_ANEG_DISABLED ?
AUTONEG_ENABLE : AUTONEG_DISABLE;
if (!netif_carrier_ok(netdev))
return 0;
/* Use link speed from ETH table if available, otherwise try the BAR */
- if (nn->eth_port) {
- int err;
-
- if (nfp_net_link_changed_read_clear(nn)) {
- err = nfp_net_refresh_eth_port(nn);
- if (err)
- return err;
- }
-
- cmd->base.port = nn->eth_port->port_type;
- cmd->base.speed = nn->eth_port->speed;
+ if (eth_port) {
+ cmd->base.port = eth_port->port_type;
+ cmd->base.speed = eth_port->speed;
cmd->base.duplex = DUPLEX_FULL;
return 0;
}
+ if (!nfp_netdev_is_nfp_net(netdev))
+ return -EOPNOTSUPP;
+ nn = netdev_priv(netdev);
+
sts = nn_readl(nn, NFP_NET_CFG_STS);
ls = FIELD_GET(NFP_NET_CFG_STS_LINK_RATE, sts);
@@ -246,19 +250,22 @@
nfp_net_set_link_ksettings(struct net_device *netdev,
const struct ethtool_link_ksettings *cmd)
{
- struct nfp_net *nn = netdev_priv(netdev);
+ struct nfp_eth_table_port *eth_port;
+ struct nfp_port *port;
struct nfp_nsp *nsp;
int err;
- if (!nn->eth_port)
+ port = nfp_port_from_netdev(netdev);
+ eth_port = __nfp_port_get_eth_port(port);
+ if (!eth_port)
return -EOPNOTSUPP;
if (netif_running(netdev)) {
- nn_warn(nn, "Changing settings not allowed on an active interface. It may cause the port to be disabled until reboot.\n");
+ netdev_warn(netdev, "Changing settings not allowed on an active interface. It may cause the port to be disabled until reboot.\n");
return -EBUSY;
}
- nsp = nfp_eth_config_start(nn->cpp, nn->eth_port->index);
+ nsp = nfp_eth_config_start(port->app->cpp, eth_port->index);
if (IS_ERR(nsp))
return PTR_ERR(nsp);
@@ -267,7 +274,7 @@
if (err)
goto err_bad_set;
if (cmd->base.speed != SPEED_UNKNOWN) {
- u32 speed = cmd->base.speed / nn->eth_port->lanes;
+ u32 speed = cmd->base.speed / eth_port->lanes;
err = __nfp_eth_set_speed(nsp, speed);
if (err)
@@ -278,7 +285,7 @@
if (err > 0)
return 0; /* no change */
- nfp_net_refresh_port_table(nn);
+ nfp_net_refresh_port_table(port);
return err;
@@ -496,7 +503,7 @@
cmd->data = 0;
- if (!(nn->cap & NFP_NET_CFG_CTRL_RSS))
+ if (!(nn->cap & NFP_NET_CFG_CTRL_RSS_ANY))
return -EOPNOTSUPP;
nfp_rss_flag = ethtool_flow_to_nfp_flag(cmd->flow_type);
@@ -533,7 +540,7 @@
u32 nfp_rss_flag;
int err;
- if (!(nn->cap & NFP_NET_CFG_CTRL_RSS))
+ if (!(nn->cap & NFP_NET_CFG_CTRL_RSS_ANY))
return -EOPNOTSUPP;
/* RSS only supports IP SA/DA and L4 src/dst ports */
@@ -595,7 +602,7 @@
{
struct nfp_net *nn = netdev_priv(netdev);
- if (!(nn->cap & NFP_NET_CFG_CTRL_RSS))
+ if (!(nn->cap & NFP_NET_CFG_CTRL_RSS_ANY))
return 0;
return ARRAY_SIZE(nn->rss_itbl);
@@ -605,7 +612,7 @@
{
struct nfp_net *nn = netdev_priv(netdev);
- if (!(nn->cap & NFP_NET_CFG_CTRL_RSS))
+ if (!(nn->cap & NFP_NET_CFG_CTRL_RSS_ANY))
return -EOPNOTSUPP;
return nfp_net_rss_key_sz(nn);
@@ -617,7 +624,7 @@
struct nfp_net *nn = netdev_priv(netdev);
int i;
- if (!(nn->cap & NFP_NET_CFG_CTRL_RSS))
+ if (!(nn->cap & NFP_NET_CFG_CTRL_RSS_ANY))
return -EOPNOTSUPP;
if (indir)
@@ -641,7 +648,7 @@
struct nfp_net *nn = netdev_priv(netdev);
int i;
- if (!(nn->cap & NFP_NET_CFG_CTRL_RSS) ||
+ if (!(nn->cap & NFP_NET_CFG_CTRL_RSS_ANY) ||
!(hfunc == ETH_RSS_HASH_NO_CHANGE || hfunc == nn->rss_hfunc))
return -EOPNOTSUPP;
@@ -706,13 +713,13 @@
struct nfp_resource *res;
int ret;
- if (!nn->cpp)
+ if (!nn->app)
return -EOPNOTSUPP;
dump->version = 1;
dump->flag = NFP_DUMP_NSP_DIAG;
- res = nfp_resource_acquire(nn->cpp, NFP_RESOURCE_NSP_DIAG);
+ res = nfp_resource_acquire(nn->app->cpp, NFP_RESOURCE_NSP_DIAG);
if (IS_ERR(res))
return PTR_ERR(res);
@@ -722,7 +729,7 @@
goto exit_release;
}
- ret = nfp_cpp_read(nn->cpp, nfp_resource_cpp_id(res),
+ ret = nfp_cpp_read(nn->app->cpp, nfp_resource_cpp_id(res),
nfp_resource_address(res),
buffer, dump->len);
if (ret != dump->len)
@@ -743,7 +750,7 @@
{
struct nfp_net *nn = netdev_priv(netdev);
- if (!nn->cpp)
+ if (!nn->app)
return -EOPNOTSUPP;
if (val->flag != NFP_DUMP_NSP_DIAG)
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_main.c b/drivers/net/ethernet/netronome/nfp/nfp_net_main.c
index 8cb87cb..bc2bc08 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_main.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_main.c
@@ -43,6 +43,7 @@
#include <linux/etherdevice.h>
#include <linux/kernel.h>
#include <linux/init.h>
+#include <linux/lockdep.h>
#include <linux/pci.h>
#include <linux/pci_regs.h>
#include <linux/msi.h>
@@ -54,20 +55,21 @@
#include "nfpcore/nfp_nffw.h"
#include "nfpcore/nfp_nsp.h"
#include "nfpcore/nfp6000_pcie.h"
-
+#include "nfp_app.h"
#include "nfp_net_ctrl.h"
#include "nfp_net.h"
#include "nfp_main.h"
+#include "nfp_port.h"
#define NFP_PF_CSR_SLICE_SIZE (32 * 1024)
-static int nfp_is_ready(struct nfp_cpp *cpp)
+static int nfp_is_ready(struct nfp_pf *pf)
{
const char *cp;
long state;
int err;
- cp = nfp_hwinfo_lookup(cpp, "board.state");
+ cp = nfp_hwinfo_lookup(pf->hwinfo, "board.state");
if (!cp)
return 0;
@@ -132,30 +134,32 @@
/**
* nfp_net_get_mac_addr() - Get the MAC address.
+ * @pf: NFP PF handle
* @nn: NFP Network structure
- * @cpp: NFP CPP handle
* @id: NFP port id
*
* First try to get the MAC address from NSP ETH table. If that
* fails try HWInfo. As a last resort generate a random address.
*/
-static void
-nfp_net_get_mac_addr(struct nfp_net *nn, struct nfp_cpp *cpp, unsigned int id)
+void
+nfp_net_get_mac_addr(struct nfp_pf *pf, struct nfp_net *nn, unsigned int id)
{
+ struct nfp_eth_table_port *eth_port;
struct nfp_net_dp *dp = &nn->dp;
u8 mac_addr[ETH_ALEN];
const char *mac_str;
char name[32];
- if (nn->eth_port) {
- ether_addr_copy(dp->netdev->dev_addr, nn->eth_port->mac_addr);
- ether_addr_copy(dp->netdev->perm_addr, nn->eth_port->mac_addr);
+ eth_port = __nfp_port_get_eth_port(nn->port);
+ if (eth_port) {
+ ether_addr_copy(dp->netdev->dev_addr, eth_port->mac_addr);
+ ether_addr_copy(dp->netdev->perm_addr, eth_port->mac_addr);
return;
}
snprintf(name, sizeof(name), "eth%d.mac", id);
- mac_str = nfp_hwinfo_lookup(cpp, name);
+ mac_str = nfp_hwinfo_lookup(pf->hwinfo, name);
if (!mac_str) {
dev_warn(dp->dev, "Can't lookup MAC address. Generate\n");
eth_hw_addr_random(dp->netdev);
@@ -175,7 +179,7 @@
ether_addr_copy(dp->netdev->perm_addr, mac_addr);
}
-static struct nfp_eth_table_port *
+struct nfp_eth_table_port *
nfp_net_find_port(struct nfp_eth_table *eth_tbl, unsigned int id)
{
int i;
@@ -187,267 +191,265 @@
return NULL;
}
-static unsigned int nfp_net_pf_get_num_ports(struct nfp_pf *pf)
+static int
+nfp_net_pf_rtsym_read_optional(struct nfp_pf *pf, const char *format,
+ unsigned int default_val)
{
char name[256];
- u16 interface;
- int pcie_pf;
int err = 0;
u64 val;
- interface = nfp_cpp_interface(pf->cpp);
- pcie_pf = NFP_CPP_INTERFACE_UNIT_of(interface);
+ snprintf(name, sizeof(name), format, nfp_cppcore_pcie_unit(pf->cpp));
- snprintf(name, sizeof(name), "nfd_cfg_pf%d_num_ports", pcie_pf);
-
- val = nfp_rtsym_read_le(pf->cpp, name, &err);
- /* Default to one port */
+ val = nfp_rtsym_read_le(pf->rtbl, name, &err);
if (err) {
- if (err != -ENOENT)
- nfp_err(pf->cpp, "Unable to read adapter port count\n");
- val = 1;
+ if (err == -ENOENT)
+ return default_val;
+ nfp_err(pf->cpp, "Unable to read symbol %s\n", name);
+ return err;
}
return val;
}
-static unsigned int
-nfp_net_pf_total_qcs(struct nfp_pf *pf, void __iomem *ctrl_bar,
- unsigned int stride, u32 start_off, u32 num_off)
+static int nfp_net_pf_get_num_ports(struct nfp_pf *pf)
{
- unsigned int i, min_qc, max_qc;
-
- min_qc = readl(ctrl_bar + start_off);
- max_qc = min_qc;
-
- for (i = 0; i < pf->num_ports; i++) {
- /* To make our lives simpler only accept configuration where
- * queues are allocated to PFs in order (queues of PFn all have
- * indexes lower than PFn+1).
- */
- if (max_qc > readl(ctrl_bar + start_off))
- return 0;
-
- max_qc = readl(ctrl_bar + start_off);
- max_qc += readl(ctrl_bar + num_off) * stride;
- ctrl_bar += NFP_PF_CSR_SLICE_SIZE;
- }
-
- return max_qc - min_qc;
+ return nfp_net_pf_rtsym_read_optional(pf, "nfd_cfg_pf%u_num_ports", 1);
}
-static u8 __iomem *nfp_net_pf_map_ctrl_bar(struct nfp_pf *pf)
+static int nfp_net_pf_get_app_id(struct nfp_pf *pf)
{
- const struct nfp_rtsym *ctrl_sym;
- u8 __iomem *ctrl_bar;
+ return nfp_net_pf_rtsym_read_optional(pf, "_pf%u_net_app_id",
+ NFP_APP_CORE_NIC);
+}
+
+static u8 __iomem *
+nfp_net_pf_map_rtsym(struct nfp_pf *pf, const char *name, const char *sym_fmt,
+ unsigned int min_size, struct nfp_cpp_area **area)
+{
+ const struct nfp_rtsym *sym;
char pf_symbol[256];
- u16 interface;
- int pcie_pf;
+ u8 __iomem *mem;
- interface = nfp_cpp_interface(pf->cpp);
- pcie_pf = NFP_CPP_INTERFACE_UNIT_of(interface);
+ snprintf(pf_symbol, sizeof(pf_symbol), sym_fmt,
+ nfp_cppcore_pcie_unit(pf->cpp));
- snprintf(pf_symbol, sizeof(pf_symbol), "_pf%d_net_bar0", pcie_pf);
-
- ctrl_sym = nfp_rtsym_lookup(pf->cpp, pf_symbol);
- if (!ctrl_sym) {
- dev_err(&pf->pdev->dev,
- "Failed to find PF BAR0 symbol %s\n", pf_symbol);
- return NULL;
+ sym = nfp_rtsym_lookup(pf->rtbl, pf_symbol);
+ if (!sym) {
+ nfp_err(pf->cpp, "Failed to find PF symbol %s\n", pf_symbol);
+ return (u8 __iomem *)ERR_PTR(-ENOENT);
}
- if (ctrl_sym->size < pf->num_ports * NFP_PF_CSR_SLICE_SIZE) {
- dev_err(&pf->pdev->dev,
- "PF BAR0 too small to contain %d ports\n",
- pf->num_ports);
- return NULL;
+ if (sym->size < min_size) {
+ nfp_err(pf->cpp, "PF symbol %s too small\n", pf_symbol);
+ return (u8 __iomem *)ERR_PTR(-EINVAL);
}
- ctrl_bar = nfp_net_map_area(pf->cpp, "net.ctrl",
- ctrl_sym->domain, ctrl_sym->target,
- ctrl_sym->addr, ctrl_sym->size,
- &pf->ctrl_area);
- if (IS_ERR(ctrl_bar)) {
- dev_err(&pf->pdev->dev, "Failed to map PF BAR0: %ld\n",
- PTR_ERR(ctrl_bar));
- return NULL;
+ mem = nfp_net_map_area(pf->cpp, name, sym->domain, sym->target,
+ sym->addr, sym->size, area);
+ if (IS_ERR(mem)) {
+ nfp_err(pf->cpp, "Failed to map PF symbol %s: %ld\n",
+ pf_symbol, PTR_ERR(mem));
+ return mem;
}
- return ctrl_bar;
+ return mem;
}
-static void nfp_net_pf_free_netdevs(struct nfp_pf *pf)
+static void nfp_net_pf_free_vnic(struct nfp_pf *pf, struct nfp_net *nn)
{
- struct nfp_net *nn;
+ nfp_port_free(nn->port);
+ list_del(&nn->vnic_list);
+ pf->num_vnics--;
+ nfp_net_free(nn);
+}
- while (!list_empty(&pf->ports)) {
- nn = list_first_entry(&pf->ports, struct nfp_net, port_list);
- list_del(&nn->port_list);
- pf->num_netdevs--;
+static void nfp_net_pf_free_vnics(struct nfp_pf *pf)
+{
+ struct nfp_net *nn, *next;
- nfp_net_netdev_free(nn);
- }
+ list_for_each_entry_safe(nn, next, &pf->vnics, vnic_list)
+ if (nfp_net_is_data_vnic(nn))
+ nfp_net_pf_free_vnic(pf, nn);
}
static struct nfp_net *
-nfp_net_pf_alloc_port_netdev(struct nfp_pf *pf, void __iomem *ctrl_bar,
- void __iomem *tx_bar, void __iomem *rx_bar,
- int stride, struct nfp_net_fw_version *fw_ver,
- struct nfp_eth_table_port *eth_port)
+nfp_net_pf_alloc_vnic(struct nfp_pf *pf, bool needs_netdev,
+ void __iomem *ctrl_bar, void __iomem *qc_bar,
+ int stride, unsigned int eth_id)
{
- u32 n_tx_rings, n_rx_rings;
+ u32 tx_base, rx_base, n_tx_rings, n_rx_rings;
struct nfp_net *nn;
+ int err;
+ tx_base = readl(ctrl_bar + NFP_NET_CFG_START_TXQ);
+ rx_base = readl(ctrl_bar + NFP_NET_CFG_START_RXQ);
n_tx_rings = readl(ctrl_bar + NFP_NET_CFG_MAX_TXRINGS);
n_rx_rings = readl(ctrl_bar + NFP_NET_CFG_MAX_RXRINGS);
- /* Allocate and initialise the netdev */
- nn = nfp_net_netdev_alloc(pf->pdev, n_tx_rings, n_rx_rings);
+ /* Allocate and initialise the vNIC */
+ nn = nfp_net_alloc(pf->pdev, needs_netdev, n_tx_rings, n_rx_rings);
if (IS_ERR(nn))
return nn;
- nn->cpp = pf->cpp;
- nn->fw_ver = *fw_ver;
+ nn->app = pf->app;
+ nfp_net_get_fw_version(&nn->fw_ver, ctrl_bar);
nn->dp.ctrl_bar = ctrl_bar;
- nn->tx_bar = tx_bar;
- nn->rx_bar = rx_bar;
+ nn->tx_bar = qc_bar + tx_base * NFP_QCP_QUEUE_ADDR_SZ;
+ nn->rx_bar = qc_bar + rx_base * NFP_QCP_QUEUE_ADDR_SZ;
nn->dp.is_vf = 0;
nn->stride_rx = stride;
nn->stride_tx = stride;
- nn->eth_port = eth_port;
+
+ if (needs_netdev) {
+ err = nfp_app_vnic_init(pf->app, nn, eth_id);
+ if (err) {
+ nfp_net_free(nn);
+ return ERR_PTR(err);
+ }
+ }
+
+ pf->num_vnics++;
+ list_add_tail(&nn->vnic_list, &pf->vnics);
return nn;
}
static int
-nfp_net_pf_init_port_netdev(struct nfp_pf *pf, struct nfp_net *nn,
- unsigned int id)
+nfp_net_pf_init_vnic(struct nfp_pf *pf, struct nfp_net *nn, unsigned int id)
{
int err;
- /* Get MAC address */
- nfp_net_get_mac_addr(nn, pf->cpp, id);
-
/* Get ME clock frequency from ctrl BAR
* XXX for now frequency is hardcoded until we figure out how
* to get the value from nfp-hwinfo into ctrl bar
*/
nn->me_freq_mhz = 1200;
- err = nfp_net_netdev_init(nn->dp.netdev);
+ err = nfp_net_init(nn);
if (err)
return err;
- nfp_net_debugfs_port_add(nn, pf->ddir, id);
+ nfp_net_debugfs_vnic_add(nn, pf->ddir, id);
+
+ if (nn->port) {
+ err = nfp_devlink_port_register(pf->app, nn->port);
+ if (err)
+ goto err_dfs_clean;
+ }
nfp_net_info(nn);
return 0;
+
+err_dfs_clean:
+ nfp_net_debugfs_dir_clean(&nn->debugfs_dir);
+ nfp_net_clean(nn);
+ return err;
}
static int
-nfp_net_pf_alloc_netdevs(struct nfp_pf *pf, void __iomem *ctrl_bar,
- void __iomem *tx_bar, void __iomem *rx_bar,
- int stride, struct nfp_net_fw_version *fw_ver)
+nfp_net_pf_alloc_vnics(struct nfp_pf *pf, void __iomem *ctrl_bar,
+ void __iomem *qc_bar, int stride)
{
- u32 prev_tx_base, prev_rx_base, tgt_tx_base, tgt_rx_base;
- struct nfp_eth_table_port *eth_port;
struct nfp_net *nn;
unsigned int i;
int err;
- prev_tx_base = readl(ctrl_bar + NFP_NET_CFG_START_TXQ);
- prev_rx_base = readl(ctrl_bar + NFP_NET_CFG_START_RXQ);
-
- for (i = 0; i < pf->num_ports; i++) {
- tgt_tx_base = readl(ctrl_bar + NFP_NET_CFG_START_TXQ);
- tgt_rx_base = readl(ctrl_bar + NFP_NET_CFG_START_RXQ);
- tx_bar += (tgt_tx_base - prev_tx_base) * NFP_QCP_QUEUE_ADDR_SZ;
- rx_bar += (tgt_rx_base - prev_rx_base) * NFP_QCP_QUEUE_ADDR_SZ;
- prev_tx_base = tgt_tx_base;
- prev_rx_base = tgt_rx_base;
-
- eth_port = nfp_net_find_port(pf->eth_tbl, i);
- if (eth_port && eth_port->override_changed) {
- nfp_warn(pf->cpp, "Config changed for port #%d, reboot required before port will be operational\n", i);
- } else {
- nn = nfp_net_pf_alloc_port_netdev(pf, ctrl_bar, tx_bar,
- rx_bar, stride,
- fw_ver, eth_port);
- if (IS_ERR(nn)) {
- err = PTR_ERR(nn);
- goto err_free_prev;
- }
- list_add_tail(&nn->port_list, &pf->ports);
- pf->num_netdevs++;
+ for (i = 0; i < pf->max_data_vnics; i++) {
+ nn = nfp_net_pf_alloc_vnic(pf, true, ctrl_bar, qc_bar,
+ stride, i);
+ if (IS_ERR(nn)) {
+ err = PTR_ERR(nn);
+ goto err_free_prev;
}
ctrl_bar += NFP_PF_CSR_SLICE_SIZE;
+
+ /* Kill the vNIC if app init marked it as invalid */
+ if (nn->port && nn->port->type == NFP_PORT_INVALID) {
+ nfp_net_pf_free_vnic(pf, nn);
+ continue;
+ }
}
- if (list_empty(&pf->ports))
+ if (list_empty(&pf->vnics))
return -ENODEV;
return 0;
err_free_prev:
- nfp_net_pf_free_netdevs(pf);
+ nfp_net_pf_free_vnics(pf);
return err;
}
-static int
-nfp_net_pf_spawn_netdevs(struct nfp_pf *pf,
- void __iomem *ctrl_bar, void __iomem *tx_bar,
- void __iomem *rx_bar, int stride,
- struct nfp_net_fw_version *fw_ver)
+static void nfp_net_pf_clean_vnic(struct nfp_pf *pf, struct nfp_net *nn)
{
- unsigned int id, wanted_irqs, num_irqs, ports_left, irqs_left;
- struct nfp_net *nn;
- int err;
+ if (nn->port)
+ nfp_devlink_port_unregister(nn->port);
+ nfp_net_debugfs_dir_clean(&nn->debugfs_dir);
+ nfp_net_clean(nn);
+ nfp_app_vnic_clean(pf->app, nn);
+}
- /* Allocate the netdevs and do basic init */
- err = nfp_net_pf_alloc_netdevs(pf, ctrl_bar, tx_bar, rx_bar,
- stride, fw_ver);
- if (err)
- return err;
+static int nfp_net_pf_alloc_irqs(struct nfp_pf *pf)
+{
+ unsigned int wanted_irqs, num_irqs, vnics_left, irqs_left;
+ struct nfp_net *nn;
/* Get MSI-X vectors */
wanted_irqs = 0;
- list_for_each_entry(nn, &pf->ports, port_list)
+ list_for_each_entry(nn, &pf->vnics, vnic_list)
wanted_irqs += NFP_NET_NON_Q_VECTORS + nn->dp.num_r_vecs;
pf->irq_entries = kcalloc(wanted_irqs, sizeof(*pf->irq_entries),
GFP_KERNEL);
- if (!pf->irq_entries) {
- err = -ENOMEM;
- goto err_nn_free;
- }
+ if (!pf->irq_entries)
+ return -ENOMEM;
num_irqs = nfp_net_irqs_alloc(pf->pdev, pf->irq_entries,
- NFP_NET_MIN_PORT_IRQS * pf->num_netdevs,
+ NFP_NET_MIN_VNIC_IRQS * pf->num_vnics,
wanted_irqs);
if (!num_irqs) {
- nn_warn(nn, "Unable to allocate MSI-X Vectors. Exiting\n");
- err = -ENOMEM;
- goto err_vec_free;
+ nfp_warn(pf->cpp, "Unable to allocate MSI-X vectors\n");
+ kfree(pf->irq_entries);
+ return -ENOMEM;
}
- /* Distribute IRQs to ports */
+ /* Distribute IRQs to vNICs */
irqs_left = num_irqs;
- ports_left = pf->num_netdevs;
- list_for_each_entry(nn, &pf->ports, port_list) {
+ vnics_left = pf->num_vnics;
+ list_for_each_entry(nn, &pf->vnics, vnic_list) {
unsigned int n;
- n = DIV_ROUND_UP(irqs_left, ports_left);
+ n = min(NFP_NET_NON_Q_VECTORS + nn->dp.num_r_vecs,
+ DIV_ROUND_UP(irqs_left, vnics_left));
nfp_net_irqs_assign(nn, &pf->irq_entries[num_irqs - irqs_left],
n);
irqs_left -= n;
- ports_left--;
+ vnics_left--;
}
- /* Finish netdev init and register */
+ return 0;
+}
+
+static void nfp_net_pf_free_irqs(struct nfp_pf *pf)
+{
+ nfp_net_irqs_disable(pf->pdev);
+ kfree(pf->irq_entries);
+}
+
+static int nfp_net_pf_init_vnics(struct nfp_pf *pf)
+{
+ struct nfp_net *nn;
+ unsigned int id;
+ int err;
+
+ /* Finish vNIC init and register */
id = 0;
- list_for_each_entry(nn, &pf->ports, port_list) {
- err = nfp_net_pf_init_port_netdev(pf, nn, id);
+ list_for_each_entry(nn, &pf->vnics, vnic_list) {
+ if (!nfp_net_is_data_vnic(nn))
+ continue;
+ err = nfp_net_pf_init_vnic(pf, nn, id);
if (err)
goto err_prev_deinit;
@@ -457,118 +459,244 @@
return 0;
err_prev_deinit:
- list_for_each_entry_continue_reverse(nn, &pf->ports, port_list) {
- nfp_net_debugfs_dir_clean(&nn->debugfs_dir);
- nfp_net_netdev_clean(nn->dp.netdev);
- }
- nfp_net_irqs_disable(pf->pdev);
-err_vec_free:
- kfree(pf->irq_entries);
-err_nn_free:
- nfp_net_pf_free_netdevs(pf);
+ list_for_each_entry_continue_reverse(nn, &pf->vnics, vnic_list)
+ if (nfp_net_is_data_vnic(nn))
+ nfp_net_pf_clean_vnic(pf, nn);
return err;
}
+static int
+nfp_net_pf_app_init(struct nfp_pf *pf, u8 __iomem *qc_bar, unsigned int stride)
+{
+ u8 __iomem *ctrl_bar;
+ int err;
+
+ pf->app = nfp_app_alloc(pf, nfp_net_pf_get_app_id(pf));
+ if (IS_ERR(pf->app))
+ return PTR_ERR(pf->app);
+
+ err = nfp_app_init(pf->app);
+ if (err)
+ goto err_free;
+
+ if (!nfp_app_needs_ctrl_vnic(pf->app))
+ return 0;
+
+ ctrl_bar = nfp_net_pf_map_rtsym(pf, "net.ctrl", "_pf%u_net_ctrl_bar",
+ NFP_PF_CSR_SLICE_SIZE,
+ &pf->ctrl_vnic_bar);
+ if (IS_ERR(ctrl_bar)) {
+ err = PTR_ERR(ctrl_bar);
+ goto err_free;
+ }
+
+ pf->ctrl_vnic = nfp_net_pf_alloc_vnic(pf, false, ctrl_bar, qc_bar,
+ stride, 0);
+ if (IS_ERR(pf->ctrl_vnic)) {
+ err = PTR_ERR(pf->ctrl_vnic);
+ goto err_unmap;
+ }
+
+ return 0;
+
+err_unmap:
+ nfp_cpp_area_release_free(pf->ctrl_vnic_bar);
+err_free:
+ nfp_app_free(pf->app);
+ return err;
+}
+
+static void nfp_net_pf_app_clean(struct nfp_pf *pf)
+{
+ if (pf->ctrl_vnic) {
+ nfp_net_pf_free_vnic(pf, pf->ctrl_vnic);
+ nfp_cpp_area_release_free(pf->ctrl_vnic_bar);
+ }
+ nfp_app_free(pf->app);
+ pf->app = NULL;
+}
+
+static int nfp_net_pf_app_start_ctrl(struct nfp_pf *pf)
+{
+ int err;
+
+ if (!pf->ctrl_vnic)
+ return 0;
+ err = nfp_net_pf_init_vnic(pf, pf->ctrl_vnic, 0);
+ if (err)
+ return err;
+
+ err = nfp_ctrl_open(pf->ctrl_vnic);
+ if (err)
+ goto err_clean_ctrl;
+
+ return 0;
+
+err_clean_ctrl:
+ nfp_net_pf_clean_vnic(pf, pf->ctrl_vnic);
+ return err;
+}
+
+static void nfp_net_pf_app_stop_ctrl(struct nfp_pf *pf)
+{
+ if (!pf->ctrl_vnic)
+ return;
+ nfp_ctrl_close(pf->ctrl_vnic);
+ nfp_net_pf_clean_vnic(pf, pf->ctrl_vnic);
+}
+
+static int nfp_net_pf_app_start(struct nfp_pf *pf)
+{
+ int err;
+
+ err = nfp_net_pf_app_start_ctrl(pf);
+ if (err)
+ return err;
+
+ err = nfp_app_start(pf->app, pf->ctrl_vnic);
+ if (err)
+ goto err_ctrl_stop;
+
+ return 0;
+
+err_ctrl_stop:
+ nfp_net_pf_app_stop_ctrl(pf);
+ return err;
+}
+
+static void nfp_net_pf_app_stop(struct nfp_pf *pf)
+{
+ nfp_app_stop(pf->app);
+ nfp_net_pf_app_stop_ctrl(pf);
+}
+
static void nfp_net_pci_remove_finish(struct nfp_pf *pf)
{
+ nfp_net_pf_app_stop(pf);
+ /* stop app first, to avoid double free of ctrl vNIC's ddir */
nfp_net_debugfs_dir_clean(&pf->ddir);
- nfp_net_irqs_disable(pf->pdev);
- kfree(pf->irq_entries);
+ nfp_net_pf_free_irqs(pf);
- nfp_cpp_area_release_free(pf->rx_area);
- nfp_cpp_area_release_free(pf->tx_area);
- nfp_cpp_area_release_free(pf->ctrl_area);
+ nfp_net_pf_app_clean(pf);
+
+ nfp_cpp_area_release_free(pf->qc_area);
+ nfp_cpp_area_release_free(pf->data_vnic_bar);
}
-static void nfp_net_refresh_netdevs(struct work_struct *work)
+static int
+nfp_net_eth_port_update(struct nfp_cpp *cpp, struct nfp_port *port,
+ struct nfp_eth_table *eth_table)
{
- struct nfp_pf *pf = container_of(work, struct nfp_pf,
- port_refresh_work);
+ struct nfp_eth_table_port *eth_port;
+
+ ASSERT_RTNL();
+
+ eth_port = nfp_net_find_port(eth_table, port->eth_id);
+ if (!eth_port) {
+ set_bit(NFP_PORT_CHANGED, &port->flags);
+ nfp_warn(cpp, "Warning: port #%d not present after reconfig\n",
+ port->eth_id);
+ return -EIO;
+ }
+ if (eth_port->override_changed) {
+ nfp_warn(cpp, "Port #%d config changed, unregistering. Reboot required before port will be operational again.\n", port->eth_id);
+ port->type = NFP_PORT_INVALID;
+ }
+
+ memcpy(port->eth_port, eth_port, sizeof(*eth_port));
+
+ return 0;
+}
+
+int nfp_net_refresh_port_table_sync(struct nfp_pf *pf)
+{
struct nfp_eth_table *eth_table;
struct nfp_net *nn, *next;
+ struct nfp_port *port;
- mutex_lock(&pf->port_lock);
+ lockdep_assert_held(&pf->lock);
/* Check for nfp_net_pci_remove() racing against us */
- if (list_empty(&pf->ports))
- goto out;
+ if (list_empty(&pf->vnics))
+ return 0;
- list_for_each_entry(nn, &pf->ports, port_list)
- nfp_net_link_changed_read_clear(nn);
+ /* Update state of all ports */
+ rtnl_lock();
+ list_for_each_entry(port, &pf->ports, port_list)
+ clear_bit(NFP_PORT_CHANGED, &port->flags);
eth_table = nfp_eth_read_ports(pf->cpp);
if (!eth_table) {
+ list_for_each_entry(port, &pf->ports, port_list)
+ if (__nfp_port_get_eth_port(port))
+ set_bit(NFP_PORT_CHANGED, &port->flags);
+ rtnl_unlock();
nfp_err(pf->cpp, "Error refreshing port config!\n");
- goto out;
+ return -EIO;
}
- rtnl_lock();
- list_for_each_entry(nn, &pf->ports, port_list) {
- if (!nn->eth_port)
- continue;
- nn->eth_port = nfp_net_find_port(eth_table,
- nn->eth_port->eth_index);
- }
+ list_for_each_entry(port, &pf->ports, port_list)
+ if (__nfp_port_get_eth_port(port))
+ nfp_net_eth_port_update(pf->cpp, port, eth_table);
rtnl_unlock();
- kfree(pf->eth_tbl);
- pf->eth_tbl = eth_table;
+ kfree(eth_table);
- list_for_each_entry_safe(nn, next, &pf->ports, port_list) {
- if (!nn->eth_port) {
- nfp_warn(pf->cpp, "Warning: port not present after reconfig\n");
- continue;
- }
- if (!nn->eth_port->override_changed)
+ /* Shoot off the ports which became invalid */
+ list_for_each_entry_safe(nn, next, &pf->vnics, vnic_list) {
+ if (!nn->port || nn->port->type != NFP_PORT_INVALID)
continue;
- nn_warn(nn, "Port config changed, unregistering. Reboot required before port will be operational again.\n");
-
- nfp_net_debugfs_dir_clean(&nn->debugfs_dir);
- nfp_net_netdev_clean(nn->dp.netdev);
-
- list_del(&nn->port_list);
- pf->num_netdevs--;
- nfp_net_netdev_free(nn);
+ nfp_net_pf_clean_vnic(pf, nn);
+ nfp_net_pf_free_vnic(pf, nn);
}
- if (list_empty(&pf->ports))
+ if (list_empty(&pf->vnics))
nfp_net_pci_remove_finish(pf);
-out:
- mutex_unlock(&pf->port_lock);
+
+ return 0;
}
-void nfp_net_refresh_port_table(struct nfp_net *nn)
+static void nfp_net_refresh_vnics(struct work_struct *work)
{
- struct nfp_pf *pf = pci_get_drvdata(nn->pdev);
+ struct nfp_pf *pf = container_of(work, struct nfp_pf,
+ port_refresh_work);
+
+ mutex_lock(&pf->lock);
+ nfp_net_refresh_port_table_sync(pf);
+ mutex_unlock(&pf->lock);
+}
+
+void nfp_net_refresh_port_table(struct nfp_port *port)
+{
+ struct nfp_pf *pf = port->app->pf;
+
+ set_bit(NFP_PORT_CHANGED, &port->flags);
schedule_work(&pf->port_refresh_work);
}
-int nfp_net_refresh_eth_port(struct nfp_net *nn)
+int nfp_net_refresh_eth_port(struct nfp_port *port)
{
- struct nfp_eth_table_port *eth_port;
+ struct nfp_cpp *cpp = port->app->cpp;
struct nfp_eth_table *eth_table;
+ int ret;
- eth_table = nfp_eth_read_ports(nn->cpp);
+ clear_bit(NFP_PORT_CHANGED, &port->flags);
+
+ eth_table = nfp_eth_read_ports(cpp);
if (!eth_table) {
- nn_err(nn, "Error refreshing port state table!\n");
+ set_bit(NFP_PORT_CHANGED, &port->flags);
+ nfp_err(cpp, "Error refreshing port state table!\n");
return -EIO;
}
- eth_port = nfp_net_find_port(eth_table, nn->eth_port->eth_index);
- if (!eth_port) {
- nn_err(nn, "Error finding state of the port!\n");
- kfree(eth_table);
- return -EIO;
- }
-
- memcpy(nn->eth_port, eth_port, sizeof(*eth_port));
+ ret = nfp_net_eth_port_update(cpp, port, eth_table);
kfree(eth_table);
- return 0;
+ return ret;
}
/*
@@ -576,29 +704,34 @@
*/
int nfp_net_pci_probe(struct nfp_pf *pf)
{
- u8 __iomem *ctrl_bar, *tx_bar, *rx_bar;
- u32 total_tx_qcs, total_rx_qcs;
struct nfp_net_fw_version fw_ver;
- u32 tx_area_sz, rx_area_sz;
- u32 start_q;
+ u8 __iomem *ctrl_bar, *qc_bar;
+ u32 ctrl_bar_sz;
int stride;
int err;
- INIT_WORK(&pf->port_refresh_work, nfp_net_refresh_netdevs);
- mutex_init(&pf->port_lock);
+ INIT_WORK(&pf->port_refresh_work, nfp_net_refresh_vnics);
/* Verify that the board has completed initialization */
- if (!nfp_is_ready(pf->cpp)) {
+ if (!nfp_is_ready(pf)) {
nfp_err(pf->cpp, "NFP is not ready for NIC operation.\n");
return -EINVAL;
}
- mutex_lock(&pf->port_lock);
- pf->num_ports = nfp_net_pf_get_num_ports(pf);
+ mutex_lock(&pf->lock);
+ pf->max_data_vnics = nfp_net_pf_get_num_ports(pf);
+ if ((int)pf->max_data_vnics < 0) {
+ err = pf->max_data_vnics;
+ goto err_unlock;
+ }
- ctrl_bar = nfp_net_pf_map_ctrl_bar(pf);
- if (!ctrl_bar) {
- err = pf->fw_loaded ? -EINVAL : -EPROBE_DEFER;
+ ctrl_bar_sz = pf->max_data_vnics * NFP_PF_CSR_SLICE_SIZE;
+ ctrl_bar = nfp_net_pf_map_rtsym(pf, "net.ctrl", "_pf%d_net_bar0",
+ ctrl_bar_sz, &pf->data_vnic_bar);
+ if (IS_ERR(ctrl_bar)) {
+ err = PTR_ERR(ctrl_bar);
+ if (!pf->fw_loaded && err == -ENOENT)
+ err = -EPROBE_DEFER;
goto err_unlock;
}
@@ -616,7 +749,7 @@
nfp_warn(pf->cpp, "OBSOLETE Firmware detected - VF isolation not available\n");
} else {
switch (fw_ver.major) {
- case 1 ... 4:
+ case 1 ... 5:
stride = 4;
break;
default:
@@ -628,65 +761,59 @@
}
}
- /* Find how many QC structs need to be mapped */
- total_tx_qcs = nfp_net_pf_total_qcs(pf, ctrl_bar, stride,
- NFP_NET_CFG_START_TXQ,
- NFP_NET_CFG_MAX_TXRINGS);
- total_rx_qcs = nfp_net_pf_total_qcs(pf, ctrl_bar, stride,
- NFP_NET_CFG_START_RXQ,
- NFP_NET_CFG_MAX_RXRINGS);
- if (!total_tx_qcs || !total_rx_qcs) {
- nfp_err(pf->cpp, "Invalid PF QC configuration [%d,%d]\n",
- total_tx_qcs, total_rx_qcs);
- err = -EINVAL;
+ /* Map queues */
+ qc_bar = nfp_net_map_area(pf->cpp, "net.qc", 0, 0,
+ NFP_PCIE_QUEUE(0), NFP_QCP_QUEUE_AREA_SZ,
+ &pf->qc_area);
+ if (IS_ERR(qc_bar)) {
+ nfp_err(pf->cpp, "Failed to map Queue Controller area.\n");
+ err = PTR_ERR(qc_bar);
goto err_ctrl_unmap;
}
- tx_area_sz = NFP_QCP_QUEUE_ADDR_SZ * total_tx_qcs;
- rx_area_sz = NFP_QCP_QUEUE_ADDR_SZ * total_rx_qcs;
-
- /* Map TX queues */
- start_q = readl(ctrl_bar + NFP_NET_CFG_START_TXQ);
- tx_bar = nfp_net_map_area(pf->cpp, "net.tx", 0, 0,
- NFP_PCIE_QUEUE(start_q),
- tx_area_sz, &pf->tx_area);
- if (IS_ERR(tx_bar)) {
- nfp_err(pf->cpp, "Failed to map TX area.\n");
- err = PTR_ERR(tx_bar);
- goto err_ctrl_unmap;
- }
-
- /* Map RX queues */
- start_q = readl(ctrl_bar + NFP_NET_CFG_START_RXQ);
- rx_bar = nfp_net_map_area(pf->cpp, "net.rx", 0, 0,
- NFP_PCIE_QUEUE(start_q),
- rx_area_sz, &pf->rx_area);
- if (IS_ERR(rx_bar)) {
- nfp_err(pf->cpp, "Failed to map RX area.\n");
- err = PTR_ERR(rx_bar);
- goto err_unmap_tx;
- }
+ err = nfp_net_pf_app_init(pf, qc_bar, stride);
+ if (err)
+ goto err_unmap_qc;
pf->ddir = nfp_net_debugfs_device_add(pf->pdev);
- err = nfp_net_pf_spawn_netdevs(pf, ctrl_bar, tx_bar, rx_bar,
- stride, &fw_ver);
+ /* Allocate the vnics and do basic init */
+ err = nfp_net_pf_alloc_vnics(pf, ctrl_bar, qc_bar, stride);
if (err)
goto err_clean_ddir;
- mutex_unlock(&pf->port_lock);
+ err = nfp_net_pf_alloc_irqs(pf);
+ if (err)
+ goto err_free_vnics;
+
+ err = nfp_net_pf_app_start(pf);
+ if (err)
+ goto err_free_irqs;
+
+ err = nfp_net_pf_init_vnics(pf);
+ if (err)
+ goto err_stop_app;
+
+ mutex_unlock(&pf->lock);
return 0;
+err_stop_app:
+ nfp_net_pf_app_stop(pf);
+err_free_irqs:
+ nfp_net_pf_free_irqs(pf);
+err_free_vnics:
+ nfp_net_pf_free_vnics(pf);
err_clean_ddir:
nfp_net_debugfs_dir_clean(&pf->ddir);
- nfp_cpp_area_release_free(pf->rx_area);
-err_unmap_tx:
- nfp_cpp_area_release_free(pf->tx_area);
+ nfp_net_pf_app_clean(pf);
+err_unmap_qc:
+ nfp_cpp_area_release_free(pf->qc_area);
err_ctrl_unmap:
- nfp_cpp_area_release_free(pf->ctrl_area);
+ nfp_cpp_area_release_free(pf->data_vnic_bar);
err_unlock:
- mutex_unlock(&pf->port_lock);
+ mutex_unlock(&pf->lock);
+ cancel_work_sync(&pf->port_refresh_work);
return err;
}
@@ -694,21 +821,19 @@
{
struct nfp_net *nn;
- mutex_lock(&pf->port_lock);
- if (list_empty(&pf->ports))
+ mutex_lock(&pf->lock);
+ if (list_empty(&pf->vnics))
goto out;
- list_for_each_entry(nn, &pf->ports, port_list) {
- nfp_net_debugfs_dir_clean(&nn->debugfs_dir);
+ list_for_each_entry(nn, &pf->vnics, vnic_list)
+ if (nfp_net_is_data_vnic(nn))
+ nfp_net_pf_clean_vnic(pf, nn);
- nfp_net_netdev_clean(nn->dp.netdev);
- }
-
- nfp_net_pf_free_netdevs(pf);
+ nfp_net_pf_free_vnics(pf);
nfp_net_pci_remove_finish(pf);
out:
- mutex_unlock(&pf->port_lock);
+ mutex_unlock(&pf->lock);
cancel_work_sync(&pf->port_refresh_work);
}
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_netvf_main.c b/drivers/net/ethernet/netronome/nfp/nfp_netvf_main.c
index 86e61be..c879626 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_netvf_main.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_netvf_main.c
@@ -161,7 +161,7 @@
dev_warn(&pdev->dev, "OBSOLETE Firmware detected - VF isolation not available\n");
} else {
switch (fw_ver.major) {
- case 1 ... 4:
+ case 1 ... 5:
stride = 4;
tx_bar_no = NFP_NET_Q0_BAR;
rx_bar_no = tx_bar_no;
@@ -202,7 +202,7 @@
rx_bar_off = NFP_PCIE_QUEUE(startq);
/* Allocate and initialise the netdev */
- nn = nfp_net_netdev_alloc(pdev, max_tx_rings, max_rx_rings);
+ nn = nfp_net_alloc(pdev, true, max_tx_rings, max_rx_rings);
if (IS_ERR(nn)) {
err = PTR_ERR(nn);
goto err_ctrl_unmap;
@@ -267,7 +267,7 @@
nfp_netvf_get_mac_addr(nn);
num_irqs = nfp_net_irqs_alloc(pdev, vf->irq_entries,
- NFP_NET_MIN_PORT_IRQS,
+ NFP_NET_MIN_VNIC_IRQS,
NFP_NET_NON_Q_VECTORS +
nn->dp.num_r_vecs);
if (!num_irqs) {
@@ -283,13 +283,13 @@
*/
nn->me_freq_mhz = 1200;
- err = nfp_net_netdev_init(nn->dp.netdev);
+ err = nfp_net_init(nn);
if (err)
goto err_irqs_disable;
nfp_net_info(nn);
vf->ddir = nfp_net_debugfs_device_add(pdev);
- nfp_net_debugfs_port_add(nn, vf->ddir, 0);
+ nfp_net_debugfs_vnic_add(nn, vf->ddir, 0);
return 0;
@@ -304,7 +304,7 @@
else
iounmap(vf->q_bar);
err_netdev_free:
- nfp_net_netdev_free(nn);
+ nfp_net_free(nn);
err_ctrl_unmap:
iounmap(ctrl_bar);
err_pci_regions:
@@ -328,7 +328,7 @@
nfp_net_debugfs_dir_clean(&nn->debugfs_dir);
nfp_net_debugfs_dir_clean(&vf->ddir);
- nfp_net_netdev_clean(nn->dp.netdev);
+ nfp_net_clean(nn);
nfp_net_irqs_disable(pdev);
@@ -340,7 +340,7 @@
}
iounmap(nn->dp.ctrl_bar);
- nfp_net_netdev_free(nn);
+ nfp_net_free(nn);
pci_release_regions(pdev);
pci_disable_device(pdev);
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_port.c b/drivers/net/ethernet/netronome/nfp/nfp_port.c
new file mode 100644
index 0000000..a17410a
--- /dev/null
+++ b/drivers/net/ethernet/netronome/nfp/nfp_port.c
@@ -0,0 +1,140 @@
+/*
+ * Copyright (C) 2017 Netronome Systems, Inc.
+ *
+ * This software is dual licensed under the GNU General License Version 2,
+ * June 1991 as shown in the file COPYING in the top-level directory of this
+ * source tree or the BSD 2-Clause License provided below. You have the
+ * option to license this software under the complete terms of either license.
+ *
+ * The BSD 2-Clause License:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/lockdep.h>
+
+#include "nfpcore/nfp_nsp.h"
+#include "nfp_app.h"
+#include "nfp_main.h"
+#include "nfp_net.h"
+#include "nfp_port.h"
+
+struct nfp_port *nfp_port_from_netdev(struct net_device *netdev)
+{
+ struct nfp_net *nn;
+
+ if (WARN_ON(!nfp_netdev_is_nfp_net(netdev)))
+ return NULL;
+ nn = netdev_priv(netdev);
+
+ return nn->port;
+}
+
+struct nfp_port *
+nfp_port_from_id(struct nfp_pf *pf, enum nfp_port_type type, unsigned int id)
+{
+ struct nfp_port *port;
+
+ lockdep_assert_held(&pf->lock);
+
+ if (type != NFP_PORT_PHYS_PORT)
+ return NULL;
+
+ list_for_each_entry(port, &pf->ports, port_list)
+ if (port->eth_id == id)
+ return port;
+
+ return NULL;
+}
+
+struct nfp_eth_table_port *__nfp_port_get_eth_port(struct nfp_port *port)
+{
+ if (!port)
+ return NULL;
+ if (port->type != NFP_PORT_PHYS_PORT)
+ return NULL;
+
+ return port->eth_port;
+}
+
+struct nfp_eth_table_port *nfp_port_get_eth_port(struct nfp_port *port)
+{
+ if (!__nfp_port_get_eth_port(port))
+ return NULL;
+
+ if (test_bit(NFP_PORT_CHANGED, &port->flags))
+ if (nfp_net_refresh_eth_port(port))
+ return NULL;
+
+ return __nfp_port_get_eth_port(port);
+}
+
+int
+nfp_port_get_phys_port_name(struct net_device *netdev, char *name, size_t len)
+{
+ struct nfp_eth_table_port *eth_port;
+ struct nfp_port *port;
+ int n;
+
+ port = nfp_port_from_netdev(netdev);
+ eth_port = __nfp_port_get_eth_port(port);
+ if (!eth_port)
+ return -EOPNOTSUPP;
+
+ if (!eth_port->is_split)
+ n = snprintf(name, len, "p%d", eth_port->label_port);
+ else
+ n = snprintf(name, len, "p%ds%d", eth_port->label_port,
+ eth_port->label_subport);
+ if (n >= len)
+ return -EINVAL;
+
+ return 0;
+}
+
+struct nfp_port *
+nfp_port_alloc(struct nfp_app *app, enum nfp_port_type type,
+ struct net_device *netdev)
+{
+ struct nfp_port *port;
+
+ port = kzalloc(sizeof(*port), GFP_KERNEL);
+ if (!port)
+ return ERR_PTR(-ENOMEM);
+
+ port->netdev = netdev;
+ port->type = type;
+ port->app = app;
+
+ list_add_tail(&port->port_list, &app->pf->ports);
+
+ return port;
+}
+
+void nfp_port_free(struct nfp_port *port)
+{
+ if (!port)
+ return;
+ list_del(&port->port_list);
+ kfree(port);
+}
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_port.h b/drivers/net/ethernet/netronome/nfp/nfp_port.h
new file mode 100644
index 0000000..4d1a9b3
--- /dev/null
+++ b/drivers/net/ethernet/netronome/nfp/nfp_port.h
@@ -0,0 +1,114 @@
+/*
+ * Copyright (C) 2017 Netronome Systems, Inc.
+ *
+ * This software is dual licensed under the GNU General License Version 2,
+ * June 1991 as shown in the file COPYING in the top-level directory of this
+ * source tree or the BSD 2-Clause License provided below. You have the
+ * option to license this software under the complete terms of either license.
+ *
+ * The BSD 2-Clause License:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef _NFP_PORT_H_
+#define _NFP_PORT_H_
+
+#include <net/devlink.h>
+
+struct net_device;
+struct nfp_app;
+struct nfp_pf;
+struct nfp_port;
+
+/**
+ * enum nfp_port_type - type of port NFP can switch traffic to
+ * @NFP_PORT_INVALID: port is invalid, %NFP_PORT_PHYS_PORT transitions to this
+ * state when port disappears because of FW fault or config
+ * change
+ * @NFP_PORT_PHYS_PORT: external NIC port
+ */
+enum nfp_port_type {
+ NFP_PORT_INVALID,
+ NFP_PORT_PHYS_PORT,
+};
+
+/**
+ * enum nfp_port_flags - port flags (can be type-specific)
+ * @NFP_PORT_CHANGED: port state has changed since last eth table refresh;
+ * for NFP_PORT_PHYS_PORT, never set otherwise; must hold
+ * rtnl_lock to clear
+ */
+enum nfp_port_flags {
+ NFP_PORT_CHANGED = 0,
+};
+
+/**
+ * struct nfp_port - structure representing NFP port
+ * @netdev: backpointer to associated netdev
+ * @type: what port type does the entity represent
+ * @flags: port flags
+ * @app: backpointer to the app structure
+ * @dl_port: devlink port structure
+ * @eth_id: for %NFP_PORT_PHYS_PORT port ID in NFP enumeration scheme
+ * @eth_port: for %NFP_PORT_PHYS_PORT translated ETH Table port entry
+ * @port_list: entry on pf's list of ports
+ */
+struct nfp_port {
+ struct net_device *netdev;
+ enum nfp_port_type type;
+
+ unsigned long flags;
+
+ struct nfp_app *app;
+
+ struct devlink_port dl_port;
+
+ unsigned int eth_id;
+ struct nfp_eth_table_port *eth_port;
+
+ struct list_head port_list;
+};
+
+struct nfp_port *nfp_port_from_netdev(struct net_device *netdev);
+struct nfp_port *
+nfp_port_from_id(struct nfp_pf *pf, enum nfp_port_type type, unsigned int id);
+struct nfp_eth_table_port *__nfp_port_get_eth_port(struct nfp_port *port);
+struct nfp_eth_table_port *nfp_port_get_eth_port(struct nfp_port *port);
+
+int
+nfp_port_get_phys_port_name(struct net_device *netdev, char *name, size_t len);
+
+struct nfp_port *
+nfp_port_alloc(struct nfp_app *app, enum nfp_port_type type,
+ struct net_device *netdev);
+void nfp_port_free(struct nfp_port *port);
+
+int nfp_net_refresh_eth_port(struct nfp_port *port);
+void nfp_net_refresh_port_table(struct nfp_port *port);
+int nfp_net_refresh_port_table_sync(struct nfp_pf *pf);
+
+int nfp_devlink_port_register(struct nfp_app *app, struct nfp_port *port);
+void nfp_devlink_port_unregister(struct nfp_port *port);
+
+#endif
diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp.h b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp.h
index 4df2ce2..1a8d04a 100644
--- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp.h
+++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp.h
@@ -46,7 +46,9 @@
/* Implemented in nfp_hwinfo.c */
-const char *nfp_hwinfo_lookup(struct nfp_cpp *cpp, const char *lookup);
+struct nfp_hwinfo;
+struct nfp_hwinfo *nfp_hwinfo_read(struct nfp_cpp *cpp);
+const char *nfp_hwinfo_lookup(struct nfp_hwinfo *hwinfo, const char *lookup);
/* Implemented in nfp_nsp.c, low level functions */
@@ -64,6 +66,8 @@
int nfp_nsp_write_eth_table(struct nfp_nsp *state,
const void *buf, unsigned int size);
int nfp_nsp_read_identify(struct nfp_nsp *state, void *buf, unsigned int size);
+int nfp_nsp_read_sensors(struct nfp_nsp *state, unsigned int sensor_mask,
+ void *buf, unsigned int size);
/* Implemented in nfp_resource.c */
diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000_pcie.c b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000_pcie.c
index 43dc68e..cd67832 100644
--- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000_pcie.c
+++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000_pcie.c
@@ -119,6 +119,11 @@
#define NFP_PCIE_EM 0x020000
#define NFP_PCIE_SRAM 0x000000
+/* Minimal size of the PCIe cfg memory we depend on being mapped,
+ * queue controller and DMA controller don't have to be covered.
+ */
+#define NFP_PCI_MIN_MAP_SIZE 0x080000
+
#define NFP_PCIE_P2C_FIXED_SIZE(bar) (1 << (bar)->bitsize)
#define NFP_PCIE_P2C_BULK_SIZE(bar) (1 << (bar)->bitsize)
#define NFP_PCIE_P2C_GENERAL_TARGET_OFFSET(bar, x) ((x) << ((bar)->bitsize - 2))
@@ -583,9 +588,15 @@
NFP_PCIE_BAR_PCIE2CPP_MapType(
NFP_PCIE_BAR_PCIE2CPP_MapType_EXPLICIT3),
};
+ char status_msg[196] = {};
struct nfp_bar *bar;
int i, bars_free;
int expl_groups;
+ char *msg, *end;
+
+ msg = status_msg +
+ snprintf(status_msg, sizeof(status_msg) - 1, "RESERVED BARs: ");
+ end = status_msg + sizeof(status_msg) - 1;
bar = &nfp->bar[0];
for (i = 0; i < ARRAY_SIZE(nfp->bar); i++, bar++) {
@@ -628,34 +639,38 @@
/* Configure, and lock, BAR0.0 for General Target use (MSI-X SRAM) */
bar = &nfp->bar[0];
- bar->iomem = ioremap_nocache(nfp_bar_resource_start(bar),
- nfp_bar_resource_len(bar));
+ if (nfp_bar_resource_len(bar) >= NFP_PCI_MIN_MAP_SIZE)
+ bar->iomem = ioremap_nocache(nfp_bar_resource_start(bar),
+ nfp_bar_resource_len(bar));
if (bar->iomem) {
- dev_info(nfp->dev,
- "BAR0.0 RESERVED: General Mapping/MSI-X SRAM\n");
+ msg += snprintf(msg, end - msg, "0.0: General/MSI-X SRAM, ");
atomic_inc(&bar->refcnt);
bars_free--;
nfp6000_bar_write(nfp, bar, barcfg_msix_general);
nfp->expl.data = bar->iomem + NFP_PCIE_SRAM + 0x1000;
+
+ if (nfp->pdev->device == PCI_DEVICE_ID_NETRONOME_NFP4000 ||
+ nfp->pdev->device == PCI_DEVICE_ID_NETRONOME_NFP6000) {
+ nfp->iomem.csr = bar->iomem + NFP_PCIE_BAR(0);
+ } else {
+ int pf = nfp->pdev->devfn & 7;
+
+ nfp->iomem.csr = bar->iomem + NFP_PCIE_BAR(pf);
+ }
+ nfp->iomem.em = bar->iomem + NFP_PCIE_EM;
}
if (nfp->pdev->device == PCI_DEVICE_ID_NETRONOME_NFP4000 ||
- nfp->pdev->device == PCI_DEVICE_ID_NETRONOME_NFP6000) {
- nfp->iomem.csr = bar->iomem + NFP_PCIE_BAR(0);
+ nfp->pdev->device == PCI_DEVICE_ID_NETRONOME_NFP6000)
expl_groups = 4;
- } else {
- int pf = nfp->pdev->devfn & 7;
-
- nfp->iomem.csr = bar->iomem + NFP_PCIE_BAR(pf);
+ else
expl_groups = 1;
- }
- nfp->iomem.em = bar->iomem + NFP_PCIE_EM;
/* Configure, and lock, BAR0.1 for PCIe XPB (MSI-X PBA) */
bar = &nfp->bar[1];
- dev_info(nfp->dev, "BAR0.1 RESERVED: PCIe XPB/MSI-X PBA\n");
+ msg += snprintf(msg, end - msg, "0.1: PCIe XPB/MSI-X PBA, ");
atomic_inc(&bar->refcnt);
bars_free--;
@@ -674,9 +689,8 @@
bar->iomem = ioremap_nocache(nfp_bar_resource_start(bar),
nfp_bar_resource_len(bar));
if (bar->iomem) {
- dev_info(nfp->dev,
- "BAR0.%d RESERVED: Explicit%d Mapping\n",
- 4 + i, i);
+ msg += snprintf(msg, end - msg,
+ "0.%d: Explicit%d, ", 4 + i, i);
atomic_inc(&bar->refcnt);
bars_free--;
@@ -694,8 +708,7 @@
sort(&nfp->bar[0], nfp->bars, sizeof(nfp->bar[0]),
bar_cmp, NULL);
- dev_info(nfp->dev, "%d NFP PCI2CPP BARs, %d free\n",
- nfp->bars, bars_free);
+ dev_info(nfp->dev, "%sfree: %d/%d\n", status_msg, bars_free, nfp->bars);
return 0;
}
diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cpp.h b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cpp.h
index edecc0a..25a9671 100644
--- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cpp.h
+++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cpp.h
@@ -42,6 +42,7 @@
#include <linux/ctype.h>
#include <linux/types.h>
+#include <linux/sizes.h>
#ifndef NFP_SUBSYS
#define NFP_SUBSYS "nfp"
@@ -59,6 +60,13 @@
#define PCI_64BIT_BAR_COUNT 3
#define NFP_CPP_NUM_TARGETS 16
+/* Max size of area it should be safe to request */
+#define NFP_CPP_SAFE_AREA_SIZE SZ_2M
+
+/* NFP_MUTEX_WAIT_* are timeouts in seconds when waiting for a mutex */
+#define NFP_MUTEX_WAIT_FIRST_WARN 15
+#define NFP_MUTEX_WAIT_NEXT_WARN 5
+#define NFP_MUTEX_WAIT_ERROR 60
struct device;
@@ -214,13 +222,6 @@
u16 nfp_cpp_interface(struct nfp_cpp *cpp);
int nfp_cpp_serial(struct nfp_cpp *cpp, const u8 **serial);
-void *nfp_hwinfo_cache(struct nfp_cpp *cpp);
-void nfp_hwinfo_cache_set(struct nfp_cpp *cpp, void *val);
-void *nfp_rtsym_cache(struct nfp_cpp *cpp);
-void nfp_rtsym_cache_set(struct nfp_cpp *cpp, void *val);
-
-void nfp_nffw_cache_flush(struct nfp_cpp *cpp);
-
struct nfp_cpp_area *nfp_cpp_area_alloc_with_name(struct nfp_cpp *cpp,
u32 cpp_id,
const char *name,
@@ -289,6 +290,17 @@
int nfp_cpp_mutex_unlock(struct nfp_cpp_mutex *mutex);
int nfp_cpp_mutex_trylock(struct nfp_cpp_mutex *mutex);
+/**
+ * nfp_cppcore_pcie_unit() - Get PCI Unit of a CPP handle
+ * @cpp: CPP handle
+ *
+ * Return: PCI unit for the NFP CPP handle
+ */
+static inline u8 nfp_cppcore_pcie_unit(struct nfp_cpp *cpp)
+{
+ return NFP_CPP_INTERFACE_UNIT_of(nfp_cpp_interface(cpp));
+}
+
struct nfp_cpp_explicit;
struct nfp_cpp_explicit_command {
diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cppcore.c b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cppcore.c
index e2abba4..9b69dcf 100644
--- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cppcore.c
+++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cppcore.c
@@ -76,10 +76,6 @@
* @serial: chip serial number
* @imb_cat_table: CPP Mapping Table
*
- * Following fields can be used only in probe() or with rtnl held:
- * @hwinfo: HWInfo database fetched from the device
- * @rtsym: firmware run time symbols
- *
* Following fields use explicit locking:
* @resource_list: NFP CPP resource list
* @resource_lock: protects @resource_list
@@ -107,9 +103,6 @@
struct mutex area_cache_mutex;
struct list_head area_cache_list;
-
- void *hwinfo;
- void *rtsym;
};
/* Element of the area_cache_list */
@@ -233,9 +226,6 @@
if (cpp->op->free)
cpp->op->free(cpp);
- kfree(cpp->hwinfo);
- kfree(cpp->rtsym);
-
device_unregister(&cpp->dev);
kfree(cpp);
@@ -276,39 +266,6 @@
return sizeof(cpp->serial);
}
-void *nfp_hwinfo_cache(struct nfp_cpp *cpp)
-{
- return cpp->hwinfo;
-}
-
-void nfp_hwinfo_cache_set(struct nfp_cpp *cpp, void *val)
-{
- cpp->hwinfo = val;
-}
-
-void *nfp_rtsym_cache(struct nfp_cpp *cpp)
-{
- return cpp->rtsym;
-}
-
-void nfp_rtsym_cache_set(struct nfp_cpp *cpp, void *val)
-{
- cpp->rtsym = val;
-}
-
-/**
- * nfp_nffw_cache_flush() - Flush cached firmware information
- * @cpp: NFP CPP handle
- *
- * Flush cached firmware information. This function should be called
- * every time firmware is loaded on unloaded.
- */
-void nfp_nffw_cache_flush(struct nfp_cpp *cpp)
-{
- kfree(nfp_rtsym_cache(cpp));
- nfp_rtsym_cache_set(cpp, NULL);
-}
-
/**
* nfp_cpp_area_alloc_with_name() - allocate a new CPP area
* @cpp: CPP device handle
@@ -924,18 +881,9 @@
mutex_unlock(&cpp->area_cache_mutex);
}
-/**
- * nfp_cpp_read() - read from CPP target
- * @cpp: CPP handle
- * @destination: CPP id
- * @address: offset into CPP target
- * @kernel_vaddr: kernel buffer for result
- * @length: number of bytes to read
- *
- * Return: length of io, or -ERRNO
- */
-int nfp_cpp_read(struct nfp_cpp *cpp, u32 destination,
- unsigned long long address, void *kernel_vaddr, size_t length)
+static int __nfp_cpp_read(struct nfp_cpp *cpp, u32 destination,
+ unsigned long long address, void *kernel_vaddr,
+ size_t length)
{
struct nfp_cpp_area_cache *cache;
struct nfp_cpp_area *area;
@@ -968,18 +916,43 @@
}
/**
- * nfp_cpp_write() - write to CPP target
+ * nfp_cpp_read() - read from CPP target
* @cpp: CPP handle
* @destination: CPP id
* @address: offset into CPP target
- * @kernel_vaddr: kernel buffer to read from
- * @length: number of bytes to write
+ * @kernel_vaddr: kernel buffer for result
+ * @length: number of bytes to read
*
* Return: length of io, or -ERRNO
*/
-int nfp_cpp_write(struct nfp_cpp *cpp, u32 destination,
- unsigned long long address,
- const void *kernel_vaddr, size_t length)
+int nfp_cpp_read(struct nfp_cpp *cpp, u32 destination,
+ unsigned long long address, void *kernel_vaddr,
+ size_t length)
+{
+ size_t n, offset;
+ int ret;
+
+ for (offset = 0; offset < length; offset += n) {
+ unsigned long long r_addr = address + offset;
+
+ /* make first read smaller to align to safe window */
+ n = min_t(size_t, length - offset,
+ ALIGN(r_addr + 1, NFP_CPP_SAFE_AREA_SIZE) - r_addr);
+
+ ret = __nfp_cpp_read(cpp, destination, address + offset,
+ kernel_vaddr + offset, n);
+ if (ret < 0)
+ return ret;
+ if (ret != n)
+ return offset + n;
+ }
+
+ return length;
+}
+
+static int __nfp_cpp_write(struct nfp_cpp *cpp, u32 destination,
+ unsigned long long address,
+ const void *kernel_vaddr, size_t length)
{
struct nfp_cpp_area_cache *cache;
struct nfp_cpp_area *area;
@@ -1011,6 +984,41 @@
return err;
}
+/**
+ * nfp_cpp_write() - write to CPP target
+ * @cpp: CPP handle
+ * @destination: CPP id
+ * @address: offset into CPP target
+ * @kernel_vaddr: kernel buffer to read from
+ * @length: number of bytes to write
+ *
+ * Return: length of io, or -ERRNO
+ */
+int nfp_cpp_write(struct nfp_cpp *cpp, u32 destination,
+ unsigned long long address,
+ const void *kernel_vaddr, size_t length)
+{
+ size_t n, offset;
+ int ret;
+
+ for (offset = 0; offset < length; offset += n) {
+ unsigned long long w_addr = address + offset;
+
+ /* make first write smaller to align to safe window */
+ n = min_t(size_t, length - offset,
+ ALIGN(w_addr + 1, NFP_CPP_SAFE_AREA_SIZE) - w_addr);
+
+ ret = __nfp_cpp_write(cpp, destination, address + offset,
+ kernel_vaddr + offset, n);
+ if (ret < 0)
+ return ret;
+ if (ret != n)
+ return offset + n;
+ }
+
+ return length;
+}
+
/* Return the correct CPP address, and fixup xpb_addr as needed. */
static u32 nfp_xpb_to_cpp(struct nfp_cpp *cpp, u32 *xpb_addr)
{
diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_hwinfo.c b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_hwinfo.c
index 8d8f311..4f24aff 100644
--- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_hwinfo.c
+++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_hwinfo.c
@@ -178,7 +178,8 @@
return hwinfo_db_walk(cpp, db, size);
}
-static int hwinfo_try_fetch(struct nfp_cpp *cpp, size_t *cpp_size)
+static struct nfp_hwinfo *
+hwinfo_try_fetch(struct nfp_cpp *cpp, size_t *cpp_size)
{
struct nfp_hwinfo *header;
struct nfp_resource *res;
@@ -196,7 +197,7 @@
nfp_resource_release(res);
if (*cpp_size < HWINFO_SIZE_MIN)
- return -ENOENT;
+ return NULL;
} else if (PTR_ERR(res) == -ENOENT) {
/* Try getting the HWInfo table from the 'classic' location */
cpp_id = NFP_CPP_ISLAND_ID(NFP_CPP_TARGET_MU,
@@ -204,101 +205,86 @@
cpp_addr = 0x30000;
*cpp_size = 0x0e000;
} else {
- return PTR_ERR(res);
+ return NULL;
}
db = kmalloc(*cpp_size + 1, GFP_KERNEL);
if (!db)
- return -ENOMEM;
+ return NULL;
err = nfp_cpp_read(cpp, cpp_id, cpp_addr, db, *cpp_size);
- if (err != *cpp_size) {
- kfree(db);
- return err < 0 ? err : -EIO;
- }
+ if (err != *cpp_size)
+ goto exit_free;
header = (void *)db;
- if (nfp_hwinfo_is_updating(header)) {
- kfree(db);
- return -EBUSY;
- }
+ if (nfp_hwinfo_is_updating(header))
+ goto exit_free;
if (le32_to_cpu(header->version) != NFP_HWINFO_VERSION_2) {
nfp_err(cpp, "Unknown HWInfo version: 0x%08x\n",
le32_to_cpu(header->version));
- kfree(db);
- return -EINVAL;
+ goto exit_free;
}
/* NULL-terminate for safety */
db[*cpp_size] = '\0';
- nfp_hwinfo_cache_set(cpp, db);
-
- return 0;
+ return (void *)db;
+exit_free:
+ kfree(db);
+ return NULL;
}
-static int hwinfo_fetch(struct nfp_cpp *cpp, size_t *hwdb_size)
+static struct nfp_hwinfo *hwinfo_fetch(struct nfp_cpp *cpp, size_t *hwdb_size)
{
const unsigned long wait_until = jiffies + HWINFO_WAIT * HZ;
+ struct nfp_hwinfo *db;
int err;
for (;;) {
const unsigned long start_time = jiffies;
- err = hwinfo_try_fetch(cpp, hwdb_size);
- if (!err)
- return 0;
+ db = hwinfo_try_fetch(cpp, hwdb_size);
+ if (db)
+ return db;
err = msleep_interruptible(100);
if (err || time_after(start_time, wait_until)) {
nfp_err(cpp, "NFP access error\n");
- return -EIO;
+ return NULL;
}
}
}
-static int nfp_hwinfo_load(struct nfp_cpp *cpp)
+struct nfp_hwinfo *nfp_hwinfo_read(struct nfp_cpp *cpp)
{
struct nfp_hwinfo *db;
size_t hwdb_size = 0;
int err;
- err = hwinfo_fetch(cpp, &hwdb_size);
- if (err)
- return err;
+ db = hwinfo_fetch(cpp, &hwdb_size);
+ if (!db)
+ return NULL;
- db = nfp_hwinfo_cache(cpp);
err = hwinfo_db_validate(cpp, db, hwdb_size);
if (err) {
kfree(db);
- nfp_hwinfo_cache_set(cpp, NULL);
- return err;
+ return NULL;
}
- return 0;
+ return db;
}
/**
* nfp_hwinfo_lookup() - Find a value in the HWInfo table by name
- * @cpp: NFP CPP handle
+ * @hwinfo: NFP HWinfo table
* @lookup: HWInfo name to search for
*
* Return: Value of the HWInfo name, or NULL
*/
-const char *nfp_hwinfo_lookup(struct nfp_cpp *cpp, const char *lookup)
+const char *nfp_hwinfo_lookup(struct nfp_hwinfo *hwinfo, const char *lookup)
{
const char *key, *val, *end;
- struct nfp_hwinfo *hwinfo;
- int err;
-
- hwinfo = nfp_hwinfo_cache(cpp);
- if (!hwinfo) {
- err = nfp_hwinfo_load(cpp);
- if (err)
- return NULL;
- hwinfo = nfp_hwinfo_cache(cpp);
- }
if (!hwinfo || !lookup)
return NULL;
diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_mip.c b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_mip.c
index 3d15dd0..5f193fe 100644
--- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_mip.c
+++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_mip.c
@@ -141,6 +141,8 @@
return NULL;
}
+ mip->name[sizeof(mip->name) - 1] = 0;
+
return mip;
}
@@ -149,6 +151,11 @@
kfree(mip);
}
+const char *nfp_mip_name(const struct nfp_mip *mip)
+{
+ return mip->name;
+}
+
/**
* nfp_mip_symtab() - Get the address and size of the MIP symbol table
* @mip: MIP handle
diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_mutex.c b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_mutex.c
index 8a99c18..f7b9581 100644
--- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_mutex.c
+++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_mutex.c
@@ -195,7 +195,8 @@
*/
int nfp_cpp_mutex_lock(struct nfp_cpp_mutex *mutex)
{
- unsigned long warn_at = jiffies + 15 * HZ;
+ unsigned long warn_at = jiffies + NFP_MUTEX_WAIT_FIRST_WARN * HZ;
+ unsigned long err_at = jiffies + NFP_MUTEX_WAIT_ERROR * HZ;
unsigned int timeout_ms = 1;
int err;
@@ -214,12 +215,16 @@
return -ERESTARTSYS;
if (time_is_before_eq_jiffies(warn_at)) {
- warn_at = jiffies + 60 * HZ;
+ warn_at = jiffies + NFP_MUTEX_WAIT_NEXT_WARN * HZ;
nfp_warn(mutex->cpp,
"Warning: waiting for NFP mutex [depth:%hd target:%d addr:%llx key:%08x]\n",
mutex->depth,
mutex->target, mutex->address, mutex->key);
}
+ if (time_is_before_eq_jiffies(err_at)) {
+ nfp_err(mutex->cpp, "Error: mutex wait timed out\n");
+ return -EBUSY;
+ }
}
return err;
diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nffw.h b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nffw.h
index 988badd..d27d297 100644
--- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nffw.h
+++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nffw.h
@@ -55,6 +55,7 @@
const struct nfp_mip *nfp_mip_open(struct nfp_cpp *cpp);
void nfp_mip_close(const struct nfp_mip *mip);
+const char *nfp_mip_name(const struct nfp_mip *mip);
void nfp_mip_symtab(const struct nfp_mip *mip, u32 *addr, u32 *size);
void nfp_mip_strtab(const struct nfp_mip *mip, u32 *addr, u32 *size);
@@ -87,9 +88,16 @@
int domain;
};
-int nfp_rtsym_count(struct nfp_cpp *cpp);
-const struct nfp_rtsym *nfp_rtsym_get(struct nfp_cpp *cpp, int idx);
-const struct nfp_rtsym *nfp_rtsym_lookup(struct nfp_cpp *cpp, const char *name);
-u64 nfp_rtsym_read_le(struct nfp_cpp *cpp, const char *name, int *error);
+struct nfp_rtsym_table;
+
+struct nfp_rtsym_table *nfp_rtsym_table_read(struct nfp_cpp *cpp);
+struct nfp_rtsym_table *
+__nfp_rtsym_table_read(struct nfp_cpp *cpp, const struct nfp_mip *mip);
+int nfp_rtsym_count(struct nfp_rtsym_table *rtbl);
+const struct nfp_rtsym *nfp_rtsym_get(struct nfp_rtsym_table *rtbl, int idx);
+const struct nfp_rtsym *
+nfp_rtsym_lookup(struct nfp_rtsym_table *rtbl, const char *name);
+u64 nfp_rtsym_read_le(struct nfp_rtsym_table *rtbl, const char *name,
+ int *error);
#endif /* NFP_NFFW_H */
diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.c b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.c
index 2fa9247..3736455 100644
--- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.c
+++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.c
@@ -93,6 +93,7 @@
SPCODE_FW_LOAD = 6, /* Load fw from buffer, len in option */
SPCODE_ETH_RESCAN = 7, /* Rescan ETHs, write ETH_TABLE to buf */
SPCODE_ETH_CONTROL = 8, /* Update media config from buffer */
+ SPCODE_NSP_SENSORS = 12, /* Read NSP sensor(s) */
SPCODE_NSP_IDENTIFY = 13, /* Read NSP version */
};
@@ -419,6 +420,14 @@
if (err < 0)
return err;
}
+ /* Zero out remaining part of the buffer */
+ if (out_buf && out_size && out_size > in_size) {
+ memset(out_buf, 0, out_size - in_size);
+ err = nfp_cpp_write(cpp, cpp_id, cpp_buf + in_size,
+ out_buf, out_size - in_size);
+ if (err < 0)
+ return err;
+ }
ret = nfp_nsp_command(nsp, code, option, cpp_id, cpp_buf);
if (ret < 0)
@@ -465,13 +474,7 @@
int nfp_nsp_device_soft_reset(struct nfp_nsp *state)
{
- int err;
-
- err = nfp_nsp_command(state, SPCODE_SOFT_RESET, 0, 0, 0);
-
- nfp_nffw_cache_flush(state->cpp);
-
- return err;
+ return nfp_nsp_command(state, SPCODE_SOFT_RESET, 0, 0, 0);
}
int nfp_nsp_load_fw(struct nfp_nsp *state, const struct firmware *fw)
@@ -498,3 +501,10 @@
return nfp_nsp_command_buf(state, SPCODE_NSP_IDENTIFY, size, NULL, 0,
buf, size);
}
+
+int nfp_nsp_read_sensors(struct nfp_nsp *state, unsigned int sensor_mask,
+ void *buf, unsigned int size)
+{
+ return nfp_nsp_command_buf(state, SPCODE_NSP_SENSORS, sensor_mask,
+ NULL, 0, buf, size);
+}
diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.h b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.h
index 36b21e4..26d7dce 100644
--- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.h
+++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.h
@@ -96,6 +96,7 @@
* @override_changed: is media reconfig pending?
*
* @port_type: one of %PORT_* defines for ethtool
+ * @port_lanes: total number of lanes on the port (sum of lanes of all subports)
* @is_split: is interface part of a split port
*/
struct nfp_eth_table {
@@ -127,6 +128,8 @@
/* Computed fields */
u8 port_type;
+ unsigned int port_lanes;
+
bool is_split;
} ports[0];
};
@@ -157,6 +160,7 @@
* @primary: version of primarary bootloader
* @secondary: version id of secondary bootloader
* @nsp: version id of NSP
+ * @sensor_mask: mask of present sensors available on NIC
*/
struct nfp_nsp_identify {
char version[40];
@@ -167,8 +171,19 @@
u16 primary;
u16 secondary;
u16 nsp;
+ u64 sensor_mask;
};
struct nfp_nsp_identify *__nfp_nsp_identify(struct nfp_nsp *nsp);
+enum nfp_nsp_sensor_id {
+ NFP_SENSOR_CHIP_TEMPERATURE,
+ NFP_SENSOR_ASSEMBLY_POWER,
+ NFP_SENSOR_ASSEMBLY_12V_POWER,
+ NFP_SENSOR_ASSEMBLY_3V3_POWER,
+};
+
+int nfp_hwmon_read_sensor(struct nfp_cpp *cpp, enum nfp_nsp_sensor_id id,
+ long *val);
+
#endif
diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp_cmds.c b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp_cmds.c
index e7a263d..5d362f8 100644
--- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp_cmds.c
+++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp_cmds.c
@@ -46,7 +46,8 @@
__le16 primary;
__le16 secondary;
__le16 nsp;
- __le16 reserved;
+ u8 reserved[6];
+ __le64 sensor_mask;
};
struct nfp_nsp_identify *__nfp_nsp_identify(struct nfp_nsp *nsp)
@@ -82,8 +83,52 @@
nspi->primary = le16_to_cpu(ni->primary);
nspi->secondary = le16_to_cpu(ni->secondary);
nspi->nsp = le16_to_cpu(ni->nsp);
+ nspi->sensor_mask = le64_to_cpu(ni->sensor_mask);
exit_free:
kfree(ni);
return nspi;
}
+
+struct nfp_sensors {
+ __le32 chip_temp;
+ __le32 assembly_power;
+ __le32 assembly_12v_power;
+ __le32 assembly_3v3_power;
+};
+
+int nfp_hwmon_read_sensor(struct nfp_cpp *cpp, enum nfp_nsp_sensor_id id,
+ long *val)
+{
+ struct nfp_sensors s;
+ struct nfp_nsp *nsp;
+ int ret;
+
+ nsp = nfp_nsp_open(cpp);
+ if (IS_ERR(nsp))
+ return PTR_ERR(nsp);
+
+ ret = nfp_nsp_read_sensors(nsp, BIT(id), &s, sizeof(s));
+ nfp_nsp_close(nsp);
+
+ if (ret < 0)
+ return ret;
+
+ switch (id) {
+ case NFP_SENSOR_CHIP_TEMPERATURE:
+ *val = le32_to_cpu(s.chip_temp);
+ break;
+ case NFP_SENSOR_ASSEMBLY_POWER:
+ *val = le32_to_cpu(s.assembly_power);
+ break;
+ case NFP_SENSOR_ASSEMBLY_12V_POWER:
+ *val = le32_to_cpu(s.assembly_12v_power);
+ break;
+ case NFP_SENSOR_ASSEMBLY_3V3_POWER:
+ *val = le32_to_cpu(s.assembly_3v3_power);
+ break;
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp_eth.c b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp_eth.c
index 639438d..b0f8785 100644
--- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp_eth.c
+++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp_eth.c
@@ -186,17 +186,19 @@
}
static void
-nfp_eth_mark_split_ports(struct nfp_cpp *cpp, struct nfp_eth_table *table)
+nfp_eth_calc_port_geometry(struct nfp_cpp *cpp, struct nfp_eth_table *table)
{
unsigned int i, j;
for (i = 0; i < table->count; i++)
for (j = 0; j < table->count; j++) {
- if (i == j)
- continue;
if (table->ports[i].label_port !=
table->ports[j].label_port)
continue;
+ table->ports[i].port_lanes += table->ports[j].lanes;
+
+ if (i == j)
+ continue;
if (table->ports[i].label_subport ==
table->ports[j].label_subport)
nfp_warn(cpp,
@@ -205,7 +207,6 @@
table->ports[i].label_subport);
table->ports[i].is_split = true;
- break;
}
}
@@ -289,7 +290,7 @@
nfp_eth_port_translate(nsp, &entries[i], i,
&table->ports[j++]);
- nfp_eth_mark_split_ports(cpp, table);
+ nfp_eth_calc_port_geometry(cpp, table);
for (i = 0; i < table->count; i++)
nfp_eth_calc_port_type(cpp, &table->ports[i]);
diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_resource.c b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_resource.c
index 2d15a7c..0726122 100644
--- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_resource.c
+++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_resource.c
@@ -181,7 +181,8 @@
struct nfp_resource *
nfp_resource_acquire(struct nfp_cpp *cpp, const char *name)
{
- unsigned long warn_at = jiffies + 15 * HZ;
+ unsigned long warn_at = jiffies + NFP_MUTEX_WAIT_FIRST_WARN * HZ;
+ unsigned long err_at = jiffies + NFP_MUTEX_WAIT_ERROR * HZ;
struct nfp_cpp_mutex *dev_mutex;
struct nfp_resource *res;
int err;
@@ -214,10 +215,15 @@
}
if (time_is_before_eq_jiffies(warn_at)) {
- warn_at = jiffies + 60 * HZ;
+ warn_at = jiffies + NFP_MUTEX_WAIT_NEXT_WARN * HZ;
nfp_warn(cpp, "Warning: waiting for NFP resource %s\n",
name);
}
+ if (time_is_before_eq_jiffies(err_at)) {
+ nfp_err(cpp, "Error: resource %s timed out\n", name);
+ err = -EBUSY;
+ goto err_free;
+ }
}
nfp_cpp_mutex_free(dev_mutex);
diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_rtsym.c b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_rtsym.c
index 0e3870e..203f9cb 100644
--- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_rtsym.c
+++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_rtsym.c
@@ -65,7 +65,8 @@
__le32 size_lo;
};
-struct nfp_rtsym_cache {
+struct nfp_rtsym_table {
+ struct nfp_cpp *cpp;
int num;
char *strtab;
struct nfp_rtsym symtab[];
@@ -78,7 +79,7 @@
}
static void
-nfp_rtsym_sw_entry_init(struct nfp_rtsym_cache *cache, u32 strtab_size,
+nfp_rtsym_sw_entry_init(struct nfp_rtsym_table *cache, u32 strtab_size,
struct nfp_rtsym *sw, struct nfp_rtsym_entry *fw)
{
sw->type = fw->type;
@@ -106,26 +107,36 @@
sw->domain = -1;
}
-static int nfp_rtsymtab_probe(struct nfp_cpp *cpp)
+struct nfp_rtsym_table *nfp_rtsym_table_read(struct nfp_cpp *cpp)
+{
+ struct nfp_rtsym_table *rtbl;
+ const struct nfp_mip *mip;
+
+ mip = nfp_mip_open(cpp);
+ rtbl = __nfp_rtsym_table_read(cpp, mip);
+ nfp_mip_close(mip);
+
+ return rtbl;
+}
+
+struct nfp_rtsym_table *
+__nfp_rtsym_table_read(struct nfp_cpp *cpp, const struct nfp_mip *mip)
{
const u32 dram = NFP_CPP_ID(NFP_CPP_TARGET_MU, NFP_CPP_ACTION_RW, 0) |
NFP_ISL_EMEM0;
u32 strtab_addr, symtab_addr, strtab_size, symtab_size;
struct nfp_rtsym_entry *rtsymtab;
- struct nfp_rtsym_cache *cache;
- const struct nfp_mip *mip;
+ struct nfp_rtsym_table *cache;
int err, n, size;
- mip = nfp_mip_open(cpp);
if (!mip)
- return -EIO;
+ return NULL;
nfp_mip_strtab(mip, &strtab_addr, &strtab_size);
nfp_mip_symtab(mip, &symtab_addr, &symtab_size);
- nfp_mip_close(mip);
if (!symtab_size || !strtab_size || symtab_size % sizeof(*rtsymtab))
- return -ENXIO;
+ return NULL;
/* Align to 64 bits */
symtab_size = round_up(symtab_size, 8);
@@ -133,27 +144,26 @@
rtsymtab = kmalloc(symtab_size, GFP_KERNEL);
if (!rtsymtab)
- return -ENOMEM;
+ return NULL;
size = sizeof(*cache);
size += symtab_size / sizeof(*rtsymtab) * sizeof(struct nfp_rtsym);
size += strtab_size + 1;
cache = kmalloc(size, GFP_KERNEL);
- if (!cache) {
- err = -ENOMEM;
- goto err_free_rtsym_raw;
- }
+ if (!cache)
+ goto exit_free_rtsym_raw;
+ cache->cpp = cpp;
cache->num = symtab_size / sizeof(*rtsymtab);
cache->strtab = (void *)&cache->symtab[cache->num];
err = nfp_cpp_read(cpp, dram, symtab_addr, rtsymtab, symtab_size);
if (err != symtab_size)
- goto err_free_cache;
+ goto exit_free_cache;
err = nfp_cpp_read(cpp, dram, strtab_addr, cache->strtab, strtab_size);
if (err != strtab_size)
- goto err_free_cache;
+ goto exit_free_cache;
cache->strtab[strtab_size] = '\0';
for (n = 0; n < cache->num; n++)
@@ -161,97 +171,71 @@
&cache->symtab[n], &rtsymtab[n]);
kfree(rtsymtab);
- nfp_rtsym_cache_set(cpp, cache);
- return 0;
-err_free_cache:
+ return cache;
+
+exit_free_cache:
kfree(cache);
-err_free_rtsym_raw:
+exit_free_rtsym_raw:
kfree(rtsymtab);
- return err;
-}
-
-static struct nfp_rtsym_cache *nfp_rtsym(struct nfp_cpp *cpp)
-{
- struct nfp_rtsym_cache *cache;
- int err;
-
- cache = nfp_rtsym_cache(cpp);
- if (cache)
- return cache;
-
- err = nfp_rtsymtab_probe(cpp);
- if (err < 0)
- return ERR_PTR(err);
-
- return nfp_rtsym_cache(cpp);
+ return NULL;
}
/**
* nfp_rtsym_count() - Get the number of RTSYM descriptors
- * @cpp: NFP CPP handle
+ * @rtbl: NFP RTsym table
*
- * Return: Number of RTSYM descriptors, or -ERRNO
+ * Return: Number of RTSYM descriptors
*/
-int nfp_rtsym_count(struct nfp_cpp *cpp)
+int nfp_rtsym_count(struct nfp_rtsym_table *rtbl)
{
- struct nfp_rtsym_cache *cache;
-
- cache = nfp_rtsym(cpp);
- if (IS_ERR(cache))
- return PTR_ERR(cache);
-
- return cache->num;
+ if (!rtbl)
+ return -EINVAL;
+ return rtbl->num;
}
/**
* nfp_rtsym_get() - Get the Nth RTSYM descriptor
- * @cpp: NFP CPP handle
+ * @rtbl: NFP RTsym table
* @idx: Index (0-based) of the RTSYM descriptor
*
* Return: const pointer to a struct nfp_rtsym descriptor, or NULL
*/
-const struct nfp_rtsym *nfp_rtsym_get(struct nfp_cpp *cpp, int idx)
+const struct nfp_rtsym *nfp_rtsym_get(struct nfp_rtsym_table *rtbl, int idx)
{
- struct nfp_rtsym_cache *cache;
-
- cache = nfp_rtsym(cpp);
- if (IS_ERR(cache))
+ if (!rtbl)
+ return NULL;
+ if (idx >= rtbl->num)
return NULL;
- if (idx >= cache->num)
- return NULL;
-
- return &cache->symtab[idx];
+ return &rtbl->symtab[idx];
}
/**
* nfp_rtsym_lookup() - Return the RTSYM descriptor for a symbol name
- * @cpp: NFP CPP handle
+ * @rtbl: NFP RTsym table
* @name: Symbol name
*
* Return: const pointer to a struct nfp_rtsym descriptor, or NULL
*/
-const struct nfp_rtsym *nfp_rtsym_lookup(struct nfp_cpp *cpp, const char *name)
+const struct nfp_rtsym *
+nfp_rtsym_lookup(struct nfp_rtsym_table *rtbl, const char *name)
{
- struct nfp_rtsym_cache *cache;
int n;
- cache = nfp_rtsym(cpp);
- if (IS_ERR(cache))
+ if (!rtbl)
return NULL;
- for (n = 0; n < cache->num; n++) {
- if (strcmp(name, cache->symtab[n].name) == 0)
- return &cache->symtab[n];
- }
+ for (n = 0; n < rtbl->num; n++)
+ if (strcmp(name, rtbl->symtab[n].name) == 0)
+ return &rtbl->symtab[n];
return NULL;
}
/**
* nfp_rtsym_read_le() - Read a simple unsigned scalar value from symbol
- * @cpp: NFP CPP handle
+ * @rtbl: NFP RTsym table
* @name: Symbol name
* @error: Poniter to error code (optional)
*
@@ -261,14 +245,15 @@
*
* Return: value read, on error sets the error and returns ~0ULL.
*/
-u64 nfp_rtsym_read_le(struct nfp_cpp *cpp, const char *name, int *error)
+u64 nfp_rtsym_read_le(struct nfp_rtsym_table *rtbl, const char *name,
+ int *error)
{
const struct nfp_rtsym *sym;
u32 val32, id;
u64 val;
int err;
- sym = nfp_rtsym_lookup(cpp, name);
+ sym = nfp_rtsym_lookup(rtbl, name);
if (!sym) {
err = -ENOENT;
goto exit;
@@ -278,14 +263,14 @@
switch (sym->size) {
case 4:
- err = nfp_cpp_readl(cpp, id, sym->addr, &val32);
+ err = nfp_cpp_readl(rtbl->cpp, id, sym->addr, &val32);
val = val32;
break;
case 8:
- err = nfp_cpp_readq(cpp, id, sym->addr, &val);
+ err = nfp_cpp_readq(rtbl->cpp, id, sym->addr, &val);
break;
default:
- nfp_err(cpp,
+ nfp_err(rtbl->cpp,
"rtsym '%s' unsupported or non-scalar size: %lld\n",
name, sym->size);
err = -EINVAL;
diff --git a/drivers/net/ethernet/netronome/nfp/nic/main.c b/drivers/net/ethernet/netronome/nfp/nic/main.c
new file mode 100644
index 0000000..5206842
--- /dev/null
+++ b/drivers/net/ethernet/netronome/nfp/nic/main.c
@@ -0,0 +1,58 @@
+/*
+ * Copyright (C) 2017 Netronome Systems, Inc.
+ *
+ * This software is dual licensed under the GNU General License Version 2,
+ * June 1991 as shown in the file COPYING in the top-level directory of this
+ * source tree or the BSD 2-Clause License provided below. You have the
+ * option to license this software under the complete terms of either license.
+ *
+ * The BSD 2-Clause License:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include "../nfpcore/nfp_cpp.h"
+#include "../nfpcore/nfp_nsp.h"
+#include "../nfp_app.h"
+#include "../nfp_main.h"
+
+static int nfp_nic_init(struct nfp_app *app)
+{
+ struct nfp_pf *pf = app->pf;
+
+ if (pf->eth_tbl && pf->max_data_vnics != pf->eth_tbl->count) {
+ nfp_err(pf->cpp, "ETH entries don't match vNICs (%d vs %d)\n",
+ pf->max_data_vnics, pf->eth_tbl->count);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+const struct nfp_app_type app_nic = {
+ .id = NFP_APP_CORE_NIC,
+ .name = "nic",
+
+ .init = nfp_nic_init,
+ .vnic_init = nfp_app_nic_vnic_init,
+};
diff --git a/drivers/net/ethernet/nuvoton/w90p910_ether.c b/drivers/net/ethernet/nuvoton/w90p910_ether.c
index 159564d..89ab786 100644
--- a/drivers/net/ethernet/nuvoton/w90p910_ether.c
+++ b/drivers/net/ethernet/nuvoton/w90p910_ether.c
@@ -868,7 +868,10 @@
struct ethtool_link_ksettings *cmd)
{
struct w90p910_ether *ether = netdev_priv(dev);
- return mii_ethtool_get_link_ksettings(ðer->mii, cmd);
+
+ mii_ethtool_get_link_ksettings(ðer->mii, cmd);
+
+ return 0;
}
static int w90p910_set_link_ksettings(struct net_device *dev,
diff --git a/drivers/net/ethernet/nxp/lpc_eth.c b/drivers/net/ethernet/nxp/lpc_eth.c
index 9c7ffd6..08381ef 100644
--- a/drivers/net/ethernet/nxp/lpc_eth.c
+++ b/drivers/net/ethernet/nxp/lpc_eth.c
@@ -919,7 +919,6 @@
struct sk_buff *skb;
u32 rxconsidx, len, ethst;
struct rx_status_t *prxstat;
- u8 *prdbuf;
int rx_done = 0;
/* Get the current RX buffer indexes */
@@ -959,11 +958,10 @@
if (!skb) {
ndev->stats.rx_dropped++;
} else {
- prdbuf = skb_put(skb, len);
-
/* Copy packet from buffer */
- memcpy(prdbuf, pldat->rx_buff_v +
- rxconsidx * ENET_MAXF_SIZE, len);
+ skb_put_data(skb,
+ pldat->rx_buff_v + rxconsidx * ENET_MAXF_SIZE,
+ len);
/* Pass to upper layer */
skb->protocol = eth_type_trans(skb, ndev);
diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_ethtool.c b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_ethtool.c
index 2109327..731ce1e 100644
--- a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_ethtool.c
+++ b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_ethtool.c
@@ -85,9 +85,8 @@
{
struct pch_gbe_adapter *adapter = netdev_priv(netdev);
u32 supported, advertising;
- int ret;
- ret = mii_ethtool_get_link_ksettings(&adapter->mii, ecmd);
+ mii_ethtool_get_link_ksettings(&adapter->mii, ecmd);
ethtool_convert_link_mode_to_legacy_u32(&supported,
ecmd->link_modes.supported);
@@ -104,7 +103,8 @@
if (!netif_carrier_ok(adapter->netdev))
ecmd->base.speed = SPEED_UNKNOWN;
- return ret;
+
+ return 0;
}
/**
diff --git a/drivers/net/ethernet/packetengines/hamachi.c b/drivers/net/ethernet/packetengines/hamachi.c
index 8b026db..482b85e 100644
--- a/drivers/net/ethernet/packetengines/hamachi.c
+++ b/drivers/net/ethernet/packetengines/hamachi.c
@@ -1495,8 +1495,8 @@
hmp->rx_skbuff[entry]->data, pkt_len);
skb_put(skb, pkt_len);
#else
- memcpy(skb_put(skb, pkt_len), hmp->rx_ring_dma
- + entry*sizeof(*desc), pkt_len);
+ skb_put_data(skb, hmp->rx_ring_dma
+ + entry*sizeof(*desc), pkt_len);
#endif
pci_dma_sync_single_for_device(hmp->pci_dev,
leXX_to_cpu(hmp->rx_ring[entry].addr),
diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.c
index a996801..66ff15d 100644
--- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.c
+++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.c
@@ -21,6 +21,7 @@
*
*/
+#include <linux/io-64-nonatomic-lo-hi.h>
#include <linux/slab.h>
#include "netxen_nic.h"
#include "netxen_nic_hw.h"
@@ -44,20 +45,6 @@
void __iomem *addr, u32 data);
static u32 netxen_nic_io_read_128M(struct netxen_adapter *adapter,
void __iomem *addr);
-#ifndef readq
-static inline u64 readq(void __iomem *addr)
-{
- return readl(addr) | (((u64) readl(addr + 4)) << 32LL);
-}
-#endif
-
-#ifndef writeq
-static inline void writeq(u64 val, void __iomem *addr)
-{
- writel(((u32) (val)), (addr));
- writel(((u32) (val >> 32)), (addr + 4));
-}
-#endif
#define PCI_OFFSET_FIRST_RANGE(adapter, off) \
((adapter)->ahw.pci_base0 + (off))
diff --git a/drivers/net/ethernet/qlogic/qed/Makefile b/drivers/net/ethernet/qlogic/qed/Makefile
index 974929d..6745238 100644
--- a/drivers/net/ethernet/qlogic/qed/Makefile
+++ b/drivers/net/ethernet/qlogic/qed/Makefile
@@ -5,6 +5,6 @@
qed_selftest.o qed_dcbx.o qed_debug.o qed_ptp.o
qed-$(CONFIG_QED_SRIOV) += qed_sriov.o qed_vf.o
qed-$(CONFIG_QED_LL2) += qed_ll2.o
-qed-$(CONFIG_QED_RDMA) += qed_roce.o
+qed-$(CONFIG_QED_RDMA) += qed_roce.o qed_rdma.o
qed-$(CONFIG_QED_ISCSI) += qed_iscsi.o qed_ooo.o
qed-$(CONFIG_QED_FCOE) += qed_fcoe.o
diff --git a/drivers/net/ethernet/qlogic/qed/qed.h b/drivers/net/ethernet/qlogic/qed/qed.h
index 2ab1aab..14b08ee 100644
--- a/drivers/net/ethernet/qlogic/qed/qed.h
+++ b/drivers/net/ethernet/qlogic/qed/qed.h
@@ -54,7 +54,7 @@
#define QED_MAJOR_VERSION 8
#define QED_MINOR_VERSION 10
-#define QED_REVISION_VERSION 10
+#define QED_REVISION_VERSION 11
#define QED_ENGINEERING_VERSION 21
#define QED_VERSION \
@@ -92,7 +92,7 @@
#define QED_MFW_SET_FIELD(name, field, value) \
do { \
- (name) &= ~((field ## _MASK) << (field ## _SHIFT)); \
+ (name) &= ~(field ## _MASK); \
(name) |= (((value) << (field ## _SHIFT)) & (field ## _MASK));\
} while (0)
@@ -412,6 +412,11 @@
u32 init_ops_size;
};
+enum BAR_ID {
+ BAR_ID_0, /* used for GRC */
+ BAR_ID_1 /* Used for doorbells */
+};
+
#define DRV_MODULE_VERSION \
__stringify(QED_MAJOR_VERSION) "." \
__stringify(QED_MINOR_VERSION) "." \
@@ -495,10 +500,6 @@
bool b_rdma_enabled_in_prs;
u32 rdma_prs_search_reg;
- /* Array of sb_info of all status blocks */
- struct qed_sb_info *sbs_info[MAX_SB_PER_PF_MIMD];
- u16 num_sbs;
-
struct qed_cxt_mngr *p_cxt_mngr;
/* Flag indicating whether interrupts are enabled or not*/
@@ -537,6 +538,9 @@
u8 dcbx_no_edpm;
u8 db_bar_no_edpm;
+ /* L2-related */
+ struct qed_l2_info *p_l2_info;
+
struct qed_ptt *p_arfs_ptt;
struct qed_simd_fp_handler simd_proto_handler[64];
@@ -548,7 +552,6 @@
#endif
struct z_stream_s *stream;
- struct qed_roce_ll2_info *ll2;
};
struct pci_params {
@@ -598,16 +601,11 @@
enum qed_dev_type type;
/* Translate type/revision combo into the proper conditions */
#define QED_IS_BB(dev) ((dev)->type == QED_DEV_TYPE_BB)
-#define QED_IS_BB_A0(dev) (QED_IS_BB(dev) && \
- CHIP_REV_IS_A0(dev))
#define QED_IS_BB_B0(dev) (QED_IS_BB(dev) && \
CHIP_REV_IS_B0(dev))
#define QED_IS_AH(dev) ((dev)->type == QED_DEV_TYPE_AH)
#define QED_IS_K2(dev) QED_IS_AH(dev)
-#define QED_GET_TYPE(dev) (QED_IS_BB_A0(dev) ? CHIP_BB_A0 : \
- QED_IS_BB_B0(dev) ? CHIP_BB_B0 : CHIP_K2)
-
u16 vendor_id;
u16 device_id;
#define QED_DEV_ID_MASK 0xff00
@@ -621,7 +619,6 @@
u16 chip_rev;
#define CHIP_REV_MASK 0xf
#define CHIP_REV_SHIFT 12
-#define CHIP_REV_IS_A0(_cdev) (!(_cdev)->chip_rev)
#define CHIP_REV_IS_B0(_cdev) ((_cdev)->chip_rev == 1)
u16 chip_metal;
@@ -633,7 +630,7 @@
#define CHIP_BOND_ID_SHIFT 0
u8 num_engines;
- u8 num_ports_in_engines;
+ u8 num_ports_in_engine;
u8 num_funcs_in_port;
u8 path_id;
@@ -644,7 +641,6 @@
int pcie_width;
int pcie_speed;
- u8 ver_str[VER_SIZE];
/* Add MF related configuration */
u8 mcp_rev;
diff --git a/drivers/net/ethernet/qlogic/qed/qed_cxt.c b/drivers/net/ethernet/qlogic/qed/qed_cxt.c
index 6948457..e201214 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_cxt.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_cxt.c
@@ -135,7 +135,6 @@
struct qed_conn_type_cfg {
u32 cid_count;
- u32 cid_start;
u32 cids_per_vf;
struct qed_tid_seg tid_seg[TASK_SEGMENTS];
};
@@ -222,6 +221,9 @@
/* Acquired CIDs */
struct qed_cid_acquired_map acquired[MAX_CONN_TYPES];
+ struct qed_cid_acquired_map
+ acquired_vf[MAX_CONN_TYPES][MAX_NUM_VFS];
+
/* ILT shadow table */
struct qed_dma_mem *ilt_shadow;
u32 pf_start_line;
@@ -1121,45 +1123,76 @@
static void qed_cid_map_free(struct qed_hwfn *p_hwfn)
{
struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
- u32 type;
+ u32 type, vf;
for (type = 0; type < MAX_CONN_TYPES; type++) {
kfree(p_mngr->acquired[type].cid_map);
p_mngr->acquired[type].max_count = 0;
p_mngr->acquired[type].start_cid = 0;
+
+ for (vf = 0; vf < MAX_NUM_VFS; vf++) {
+ kfree(p_mngr->acquired_vf[type][vf].cid_map);
+ p_mngr->acquired_vf[type][vf].max_count = 0;
+ p_mngr->acquired_vf[type][vf].start_cid = 0;
+ }
}
}
+static int
+qed_cid_map_alloc_single(struct qed_hwfn *p_hwfn,
+ u32 type,
+ u32 cid_start,
+ u32 cid_count, struct qed_cid_acquired_map *p_map)
+{
+ u32 size;
+
+ if (!cid_count)
+ return 0;
+
+ size = DIV_ROUND_UP(cid_count,
+ sizeof(unsigned long) * BITS_PER_BYTE) *
+ sizeof(unsigned long);
+ p_map->cid_map = kzalloc(size, GFP_KERNEL);
+ if (!p_map->cid_map)
+ return -ENOMEM;
+
+ p_map->max_count = cid_count;
+ p_map->start_cid = cid_start;
+
+ DP_VERBOSE(p_hwfn, QED_MSG_CXT,
+ "Type %08x start: %08x count %08x\n",
+ type, p_map->start_cid, p_map->max_count);
+
+ return 0;
+}
+
static int qed_cid_map_alloc(struct qed_hwfn *p_hwfn)
{
struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
- u32 start_cid = 0;
- u32 type;
+ u32 start_cid = 0, vf_start_cid = 0;
+ u32 type, vf;
for (type = 0; type < MAX_CONN_TYPES; type++) {
- u32 cid_cnt = p_hwfn->p_cxt_mngr->conn_cfg[type].cid_count;
- u32 size;
+ struct qed_conn_type_cfg *p_cfg = &p_mngr->conn_cfg[type];
+ struct qed_cid_acquired_map *p_map;
- if (cid_cnt == 0)
- continue;
-
- size = DIV_ROUND_UP(cid_cnt,
- sizeof(unsigned long) * BITS_PER_BYTE) *
- sizeof(unsigned long);
- p_mngr->acquired[type].cid_map = kzalloc(size, GFP_KERNEL);
- if (!p_mngr->acquired[type].cid_map)
+ /* Handle PF maps */
+ p_map = &p_mngr->acquired[type];
+ if (qed_cid_map_alloc_single(p_hwfn, type, start_cid,
+ p_cfg->cid_count, p_map))
goto cid_map_fail;
- p_mngr->acquired[type].max_count = cid_cnt;
- p_mngr->acquired[type].start_cid = start_cid;
+ /* Handle VF maps */
+ for (vf = 0; vf < MAX_NUM_VFS; vf++) {
+ p_map = &p_mngr->acquired_vf[type][vf];
+ if (qed_cid_map_alloc_single(p_hwfn, type,
+ vf_start_cid,
+ p_cfg->cids_per_vf, p_map))
+ goto cid_map_fail;
+ }
- p_hwfn->p_cxt_mngr->conn_cfg[type].cid_start = start_cid;
-
- DP_VERBOSE(p_hwfn, QED_MSG_CXT,
- "Type %08x start: %08x count %08x\n",
- type, p_mngr->acquired[type].start_cid,
- p_mngr->acquired[type].max_count);
- start_cid += cid_cnt;
+ start_cid += p_cfg->cid_count;
+ vf_start_cid += p_cfg->cids_per_vf;
}
return 0;
@@ -1265,19 +1298,36 @@
void qed_cxt_mngr_setup(struct qed_hwfn *p_hwfn)
{
struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
+ struct qed_cid_acquired_map *p_map;
+ struct qed_conn_type_cfg *p_cfg;
int type;
+ u32 len;
/* Reset acquired cids */
for (type = 0; type < MAX_CONN_TYPES; type++) {
- u32 cid_cnt = p_hwfn->p_cxt_mngr->conn_cfg[type].cid_count;
+ u32 vf;
- if (cid_cnt == 0)
+ p_cfg = &p_mngr->conn_cfg[type];
+ if (p_cfg->cid_count) {
+ p_map = &p_mngr->acquired[type];
+ len = DIV_ROUND_UP(p_map->max_count,
+ sizeof(unsigned long) *
+ BITS_PER_BYTE) *
+ sizeof(unsigned long);
+ memset(p_map->cid_map, 0, len);
+ }
+
+ if (!p_cfg->cids_per_vf)
continue;
- memset(p_mngr->acquired[type].cid_map, 0,
- DIV_ROUND_UP(cid_cnt,
- sizeof(unsigned long) * BITS_PER_BYTE) *
- sizeof(unsigned long));
+ for (vf = 0; vf < MAX_NUM_VFS; vf++) {
+ p_map = &p_mngr->acquired_vf[type][vf];
+ len = DIV_ROUND_UP(p_map->max_count,
+ sizeof(unsigned long) *
+ BITS_PER_BYTE) *
+ sizeof(unsigned long);
+ memset(p_map->cid_map, 0, len);
+ }
}
}
@@ -1841,91 +1891,145 @@
qed_prs_init_pf(p_hwfn);
}
-int qed_cxt_acquire_cid(struct qed_hwfn *p_hwfn,
- enum protocol_type type, u32 *p_cid)
+int _qed_cxt_acquire_cid(struct qed_hwfn *p_hwfn,
+ enum protocol_type type, u32 *p_cid, u8 vfid)
{
struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
+ struct qed_cid_acquired_map *p_map;
u32 rel_cid;
- if (type >= MAX_CONN_TYPES || !p_mngr->acquired[type].cid_map) {
+ if (type >= MAX_CONN_TYPES) {
DP_NOTICE(p_hwfn, "Invalid protocol type %d", type);
return -EINVAL;
}
- rel_cid = find_first_zero_bit(p_mngr->acquired[type].cid_map,
- p_mngr->acquired[type].max_count);
+ if (vfid >= MAX_NUM_VFS && vfid != QED_CXT_PF_CID) {
+ DP_NOTICE(p_hwfn, "VF [%02x] is out of range\n", vfid);
+ return -EINVAL;
+ }
- if (rel_cid >= p_mngr->acquired[type].max_count) {
+ /* Determine the right map to take this CID from */
+ if (vfid == QED_CXT_PF_CID)
+ p_map = &p_mngr->acquired[type];
+ else
+ p_map = &p_mngr->acquired_vf[type][vfid];
+
+ if (!p_map->cid_map) {
+ DP_NOTICE(p_hwfn, "Invalid protocol type %d", type);
+ return -EINVAL;
+ }
+
+ rel_cid = find_first_zero_bit(p_map->cid_map, p_map->max_count);
+
+ if (rel_cid >= p_map->max_count) {
DP_NOTICE(p_hwfn, "no CID available for protocol %d\n", type);
return -EINVAL;
}
- __set_bit(rel_cid, p_mngr->acquired[type].cid_map);
+ __set_bit(rel_cid, p_map->cid_map);
- *p_cid = rel_cid + p_mngr->acquired[type].start_cid;
+ *p_cid = rel_cid + p_map->start_cid;
+
+ DP_VERBOSE(p_hwfn, QED_MSG_CXT,
+ "Acquired cid 0x%08x [rel. %08x] vfid %02x type %d\n",
+ *p_cid, rel_cid, vfid, type);
return 0;
}
+int qed_cxt_acquire_cid(struct qed_hwfn *p_hwfn,
+ enum protocol_type type, u32 *p_cid)
+{
+ return _qed_cxt_acquire_cid(p_hwfn, type, p_cid, QED_CXT_PF_CID);
+}
+
static bool qed_cxt_test_cid_acquired(struct qed_hwfn *p_hwfn,
- u32 cid, enum protocol_type *p_type)
+ u32 cid,
+ u8 vfid,
+ enum protocol_type *p_type,
+ struct qed_cid_acquired_map **pp_map)
{
struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
- struct qed_cid_acquired_map *p_map;
- enum protocol_type p;
u32 rel_cid;
/* Iterate over protocols and find matching cid range */
- for (p = 0; p < MAX_CONN_TYPES; p++) {
- p_map = &p_mngr->acquired[p];
+ for (*p_type = 0; *p_type < MAX_CONN_TYPES; (*p_type)++) {
+ if (vfid == QED_CXT_PF_CID)
+ *pp_map = &p_mngr->acquired[*p_type];
+ else
+ *pp_map = &p_mngr->acquired_vf[*p_type][vfid];
- if (!p_map->cid_map)
+ if (!((*pp_map)->cid_map))
continue;
- if (cid >= p_map->start_cid &&
- cid < p_map->start_cid + p_map->max_count)
+ if (cid >= (*pp_map)->start_cid &&
+ cid < (*pp_map)->start_cid + (*pp_map)->max_count)
break;
}
- *p_type = p;
- if (p == MAX_CONN_TYPES) {
- DP_NOTICE(p_hwfn, "Invalid CID %d", cid);
- return false;
+ if (*p_type == MAX_CONN_TYPES) {
+ DP_NOTICE(p_hwfn, "Invalid CID %d vfid %02x", cid, vfid);
+ goto fail;
}
- rel_cid = cid - p_map->start_cid;
- if (!test_bit(rel_cid, p_map->cid_map)) {
- DP_NOTICE(p_hwfn, "CID %d not acquired", cid);
- return false;
+ rel_cid = cid - (*pp_map)->start_cid;
+ if (!test_bit(rel_cid, (*pp_map)->cid_map)) {
+ DP_NOTICE(p_hwfn, "CID %d [vifd %02x] not acquired",
+ cid, vfid);
+ goto fail;
}
+
return true;
+fail:
+ *p_type = MAX_CONN_TYPES;
+ *pp_map = NULL;
+ return false;
}
-void qed_cxt_release_cid(struct qed_hwfn *p_hwfn, u32 cid)
+void _qed_cxt_release_cid(struct qed_hwfn *p_hwfn, u32 cid, u8 vfid)
{
- struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
+ struct qed_cid_acquired_map *p_map = NULL;
enum protocol_type type;
bool b_acquired;
u32 rel_cid;
+ if (vfid != QED_CXT_PF_CID && vfid > MAX_NUM_VFS) {
+ DP_NOTICE(p_hwfn,
+ "Trying to return incorrect CID belonging to VF %02x\n",
+ vfid);
+ return;
+ }
+
/* Test acquired and find matching per-protocol map */
- b_acquired = qed_cxt_test_cid_acquired(p_hwfn, cid, &type);
+ b_acquired = qed_cxt_test_cid_acquired(p_hwfn, cid, vfid,
+ &type, &p_map);
if (!b_acquired)
return;
- rel_cid = cid - p_mngr->acquired[type].start_cid;
- __clear_bit(rel_cid, p_mngr->acquired[type].cid_map);
+ rel_cid = cid - p_map->start_cid;
+ clear_bit(rel_cid, p_map->cid_map);
+
+ DP_VERBOSE(p_hwfn, QED_MSG_CXT,
+ "Released CID 0x%08x [rel. %08x] vfid %02x type %d\n",
+ cid, rel_cid, vfid, type);
+}
+
+void qed_cxt_release_cid(struct qed_hwfn *p_hwfn, u32 cid)
+{
+ _qed_cxt_release_cid(p_hwfn, cid, QED_CXT_PF_CID);
}
int qed_cxt_get_cid_info(struct qed_hwfn *p_hwfn, struct qed_cxt_info *p_info)
{
struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
+ struct qed_cid_acquired_map *p_map = NULL;
u32 conn_cxt_size, hw_p_size, cxts_per_p, line;
enum protocol_type type;
bool b_acquired;
/* Test acquired and find matching per-protocol map */
- b_acquired = qed_cxt_test_cid_acquired(p_hwfn, p_info->iid, &type);
+ b_acquired = qed_cxt_test_cid_acquired(p_hwfn, p_info->iid,
+ QED_CXT_PF_CID, &type, &p_map);
if (!b_acquired)
return -EINVAL;
@@ -2012,8 +2116,12 @@
struct qed_eth_pf_params *p_params =
&p_hwfn->pf_params.eth_pf_params;
- qed_cxt_set_proto_cid_count(p_hwfn, PROTOCOLID_ETH,
- p_params->num_cons, 1);
+ if (!p_params->num_vf_cons)
+ p_params->num_vf_cons =
+ ETH_PF_PARAMS_VF_CONS_DEFAULT;
+ qed_cxt_set_proto_cid_count(p_hwfn, PROTOCOLID_ETH,
+ p_params->num_cons,
+ p_params->num_vf_cons);
p_hwfn->p_cxt_mngr->arfs_count = p_params->num_arfs_filters;
break;
}
diff --git a/drivers/net/ethernet/qlogic/qed/qed_cxt.h b/drivers/net/ethernet/qlogic/qed/qed_cxt.h
index 53ad532..1783634 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_cxt.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_cxt.h
@@ -54,19 +54,6 @@
};
/**
- * @brief qed_cxt_acquire - Acquire a new cid of a specific protocol type
- *
- * @param p_hwfn
- * @param type
- * @param p_cid
- *
- * @return int
- */
-int qed_cxt_acquire_cid(struct qed_hwfn *p_hwfn,
- enum protocol_type type,
- u32 *p_cid);
-
-/**
* @brief qedo_cid_get_cxt_info - Returns the context info for a specific cid
*
*
@@ -195,14 +182,51 @@
*/
int qed_qm_reconf(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt);
+#define QED_CXT_PF_CID (0xff)
+
/**
* @brief qed_cxt_release - Release a cid
*
* @param p_hwfn
* @param cid
*/
-void qed_cxt_release_cid(struct qed_hwfn *p_hwfn,
- u32 cid);
+void qed_cxt_release_cid(struct qed_hwfn *p_hwfn, u32 cid);
+
+/**
+ * @brief qed_cxt_release - Release a cid belonging to a vf-queue
+ *
+ * @param p_hwfn
+ * @param cid
+ * @param vfid - engine relative index. QED_CXT_PF_CID if belongs to PF
+ */
+void _qed_cxt_release_cid(struct qed_hwfn *p_hwfn, u32 cid, u8 vfid);
+
+/**
+ * @brief qed_cxt_acquire - Acquire a new cid of a specific protocol type
+ *
+ * @param p_hwfn
+ * @param type
+ * @param p_cid
+ *
+ * @return int
+ */
+int qed_cxt_acquire_cid(struct qed_hwfn *p_hwfn,
+ enum protocol_type type, u32 *p_cid);
+
+/**
+ * @brief _qed_cxt_acquire - Acquire a new cid of a specific protocol type
+ * for a vf-queue
+ *
+ * @param p_hwfn
+ * @param type
+ * @param p_cid
+ * @param vfid - engine relative index. QED_CXT_PF_CID if belongs to PF
+ *
+ * @return int
+ */
+int _qed_cxt_acquire_cid(struct qed_hwfn *p_hwfn,
+ enum protocol_type type, u32 *p_cid, u8 vfid);
+
int qed_cxt_dynamic_ilt_alloc(struct qed_hwfn *p_hwfn,
enum qed_cxt_elem_type elem_type, u32 iid);
u32 qed_cxt_get_proto_tid_count(struct qed_hwfn *p_hwfn,
diff --git a/drivers/net/ethernet/qlogic/qed/qed_dcbx.c b/drivers/net/ethernet/qlogic/qed/qed_dcbx.c
index d883ad5..eaca457 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_dcbx.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_dcbx.c
@@ -44,6 +44,7 @@
#include "qed_hsi.h"
#include "qed_sp.h"
#include "qed_sriov.h"
+#include "qed_rdma.h"
#ifdef CONFIG_DCB
#include <linux/qed/qed_eth_if.h>
#endif
@@ -191,17 +192,19 @@
qed_dcbx_set_params(struct qed_dcbx_results *p_data,
struct qed_hw_info *p_info,
bool enable,
- bool update,
u8 prio,
u8 tc,
enum dcbx_protocol_type type,
enum qed_pci_personality personality)
{
/* PF update ramrod data */
- p_data->arr[type].update = update;
p_data->arr[type].enable = enable;
p_data->arr[type].priority = prio;
p_data->arr[type].tc = tc;
+ if (enable)
+ p_data->arr[type].update = UPDATE_DCB;
+ else
+ p_data->arr[type].update = DONT_UPDATE_DCB_DSCP;
/* QM reconf data */
if (p_info->personality == personality)
@@ -213,7 +216,6 @@
qed_dcbx_update_app_info(struct qed_dcbx_results *p_data,
struct qed_hwfn *p_hwfn,
bool enable,
- bool update,
u8 prio, u8 tc, enum dcbx_protocol_type type)
{
struct qed_hw_info *p_info = &p_hwfn->hw_info;
@@ -231,7 +233,7 @@
personality = qed_dcbx_app_update[i].personality;
name = qed_dcbx_app_update[i].name;
- qed_dcbx_set_params(p_data, p_info, enable, update,
+ qed_dcbx_set_params(p_data, p_info, enable,
prio, tc, type, personality);
}
}
@@ -304,22 +306,11 @@
*/
enable = !(type == DCBX_PROTOCOL_ETH);
- qed_dcbx_update_app_info(p_data, p_hwfn, enable, true,
+ qed_dcbx_update_app_info(p_data, p_hwfn, enable,
priority, tc, type);
}
}
- /* If RoCE-V2 TLV is not detected, driver need to use RoCE app
- * data for RoCE-v2 not the default app data.
- */
- if (!p_data->arr[DCBX_PROTOCOL_ROCE_V2].update &&
- p_data->arr[DCBX_PROTOCOL_ROCE].update) {
- tc = p_data->arr[DCBX_PROTOCOL_ROCE].tc;
- priority = p_data->arr[DCBX_PROTOCOL_ROCE].priority;
- qed_dcbx_update_app_info(p_data, p_hwfn, true, true,
- priority, tc, DCBX_PROTOCOL_ROCE_V2);
- }
-
/* Update ramrod protocol data and hw_info fields
* with default info when corresponding APP TLV's are not detected.
* The enabled field has a different logic for ethernet as only for
@@ -332,8 +323,8 @@
if (p_data->arr[type].update)
continue;
- enable = !(type == DCBX_PROTOCOL_ETH);
- qed_dcbx_update_app_info(p_data, p_hwfn, enable, true,
+ enable = (type == DCBX_PROTOCOL_ETH) ? false : !!dcbx_version;
+ qed_dcbx_update_app_info(p_data, p_hwfn, enable,
priority, tc, type);
}
@@ -902,10 +893,33 @@
/* update storm FW with negotiation results */
qed_sp_pf_update(p_hwfn);
+
+ /* for roce PFs, we may want to enable/disable DPM
+ * when DCBx change occurs
+ */
+ if (p_hwfn->hw_info.personality ==
+ QED_PCI_ETH_ROCE)
+ qed_roce_dpm_dcbx(p_hwfn, p_ptt);
}
}
qed_dcbx_get_params(p_hwfn, &p_hwfn->p_dcbx_info->get, type);
+
+ if (type == QED_DCBX_OPERATIONAL_MIB) {
+ struct qed_dcbx_results *p_data;
+ u16 val;
+
+ /* Configure in NIG which protocols support EDPM and should
+ * honor PFC.
+ */
+ p_data = &p_hwfn->p_dcbx_info->results;
+ val = (0x1 << p_data->arr[DCBX_PROTOCOL_ROCE].tc) |
+ (0x1 << p_data->arr[DCBX_PROTOCOL_ROCE_V2].tc);
+ val <<= NIG_REG_TX_EDPM_CTRL_TX_EDPM_TC_EN_SHIFT;
+ val |= NIG_REG_TX_EDPM_CTRL_TX_EDPM_EN;
+ qed_wr(p_hwfn, p_ptt, NIG_REG_TX_EDPM_CTRL, val);
+ }
+
qed_dcbx_aen(p_hwfn, type);
return rc;
@@ -923,6 +937,7 @@
void qed_dcbx_info_free(struct qed_hwfn *p_hwfn)
{
kfree(p_hwfn->p_dcbx_info);
+ p_hwfn->p_dcbx_info = NULL;
}
static void qed_dcbx_update_protocol_data(struct protocol_dcb_data *p_data,
@@ -944,17 +959,18 @@
p_dest->pf_id = p_src->pf_id;
update_flag = p_src->arr[DCBX_PROTOCOL_FCOE].update;
- p_dest->update_fcoe_dcb_data_flag = update_flag;
+ p_dest->update_fcoe_dcb_data_mode = update_flag;
update_flag = p_src->arr[DCBX_PROTOCOL_ROCE].update;
- p_dest->update_roce_dcb_data_flag = update_flag;
+ p_dest->update_roce_dcb_data_mode = update_flag;
+
update_flag = p_src->arr[DCBX_PROTOCOL_ROCE_V2].update;
- p_dest->update_roce_dcb_data_flag = update_flag;
+ p_dest->update_rroce_dcb_data_mode = update_flag;
update_flag = p_src->arr[DCBX_PROTOCOL_ISCSI].update;
- p_dest->update_iscsi_dcb_data_flag = update_flag;
+ p_dest->update_iscsi_dcb_data_mode = update_flag;
update_flag = p_src->arr[DCBX_PROTOCOL_ETH].update;
- p_dest->update_eth_dcb_data_flag = update_flag;
+ p_dest->update_eth_dcb_data_mode = update_flag;
p_dcb_data = &p_dest->fcoe_dcb_data;
qed_dcbx_update_protocol_data(p_dcb_data, p_src, DCBX_PROTOCOL_FCOE);
@@ -1458,7 +1474,7 @@
break;
case DCB_CAP_ATTR_DCBX:
*cap = (DCB_CAP_DCBX_LLD_MANAGED | DCB_CAP_DCBX_VER_CEE |
- DCB_CAP_DCBX_VER_IEEE);
+ DCB_CAP_DCBX_VER_IEEE | DCB_CAP_DCBX_STATIC);
break;
default:
*cap = false;
@@ -1532,6 +1548,8 @@
mode |= DCB_CAP_DCBX_VER_IEEE;
if (dcbx_info->operational.cee)
mode |= DCB_CAP_DCBX_VER_CEE;
+ if (dcbx_info->operational.local)
+ mode |= DCB_CAP_DCBX_STATIC;
DP_VERBOSE(hwfn, QED_MSG_DCB, "dcb mode = %d\n", mode);
kfree(dcbx_info);
diff --git a/drivers/net/ethernet/qlogic/qed/qed_dcbx.h b/drivers/net/ethernet/qlogic/qed/qed_dcbx.h
index 414e262..5feb90e 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_dcbx.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_dcbx.h
@@ -52,7 +52,7 @@
struct qed_dcbx_app_data {
bool enable; /* DCB enabled */
- bool update; /* Update indication */
+ u8 update; /* Update indication */
u8 priority; /* Priority */
u8 tc; /* Traffic Class */
};
diff --git a/drivers/net/ethernet/qlogic/qed/qed_debug.c b/drivers/net/ethernet/qlogic/qed/qed_debug.c
index a672f6a..03c3cf7 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_debug.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_debug.c
@@ -15,13 +15,6 @@
#include "qed_mcp.h"
#include "qed_reg_addr.h"
-/* Chip IDs enum */
-enum chip_ids {
- CHIP_BB_B0,
- CHIP_K2,
- MAX_CHIP_IDS
-};
-
/* Memory groups enum */
enum mem_groups {
MEM_GROUP_PXP_MEM,
@@ -33,7 +26,6 @@
MEM_GROUP_BRB_MEM,
MEM_GROUP_PRS_MEM,
MEM_GROUP_SDM_MEM,
- MEM_GROUP_PBUF,
MEM_GROUP_IOR,
MEM_GROUP_RAM,
MEM_GROUP_BTB_RAM,
@@ -45,6 +37,7 @@
MEM_GROUP_CAU_PI,
MEM_GROUP_CAU_MEM,
MEM_GROUP_PXP_ILT,
+ MEM_GROUP_PBUF,
MEM_GROUP_MULD_MEM,
MEM_GROUP_BTB_MEM,
MEM_GROUP_IGU_MEM,
@@ -66,7 +59,6 @@
"BRB_MEM",
"PRS_MEM",
"SDM_MEM",
- "PBUF",
"IOR",
"RAM",
"BTB_RAM",
@@ -78,6 +70,7 @@
"CAU_PI",
"CAU_MEM",
"PXP_ILT",
+ "PBUF",
"MULD_MEM",
"BTB_MEM",
"IGU_MEM",
@@ -88,48 +81,59 @@
};
/* Idle check conditions */
-static u32 cond4(const u32 *r, const u32 *imm)
+
+static u32 cond5(const u32 *r, const u32 *imm)
{
return ((r[0] & imm[0]) != imm[1]) && ((r[1] & imm[2]) != imm[3]);
}
-static u32 cond6(const u32 *r, const u32 *imm)
+static u32 cond7(const u32 *r, const u32 *imm)
{
return ((r[0] >> imm[0]) & imm[1]) != imm[2];
}
-static u32 cond5(const u32 *r, const u32 *imm)
+static u32 cond14(const u32 *r, const u32 *imm)
+{
+ return (r[0] != imm[0]) && (((r[1] >> imm[1]) & imm[2]) == imm[3]);
+}
+
+static u32 cond6(const u32 *r, const u32 *imm)
{
return (r[0] & imm[0]) != imm[1];
}
-static u32 cond8(const u32 *r, const u32 *imm)
+static u32 cond9(const u32 *r, const u32 *imm)
{
return ((r[0] & imm[0]) >> imm[1]) !=
(((r[0] & imm[2]) >> imm[3]) | ((r[1] & imm[4]) << imm[5]));
}
-static u32 cond9(const u32 *r, const u32 *imm)
+static u32 cond10(const u32 *r, const u32 *imm)
{
return ((r[0] & imm[0]) >> imm[1]) != (r[0] & imm[2]);
}
-static u32 cond1(const u32 *r, const u32 *imm)
+static u32 cond4(const u32 *r, const u32 *imm)
{
return (r[0] & ~imm[0]) != imm[1];
}
static u32 cond0(const u32 *r, const u32 *imm)
{
+ return (r[0] & ~r[1]) != imm[0];
+}
+
+static u32 cond1(const u32 *r, const u32 *imm)
+{
return r[0] != imm[0];
}
-static u32 cond10(const u32 *r, const u32 *imm)
+static u32 cond11(const u32 *r, const u32 *imm)
{
return r[0] != r[1] && r[2] == imm[0];
}
-static u32 cond11(const u32 *r, const u32 *imm)
+static u32 cond12(const u32 *r, const u32 *imm)
{
return r[0] != r[1] && r[2] > imm[0];
}
@@ -139,12 +143,12 @@
return r[0] != r[1];
}
-static u32 cond12(const u32 *r, const u32 *imm)
+static u32 cond13(const u32 *r, const u32 *imm)
{
return r[0] & imm[0];
}
-static u32 cond7(const u32 *r, const u32 *imm)
+static u32 cond8(const u32 *r, const u32 *imm)
{
return r[0] < (r[1] - imm[0]);
}
@@ -169,6 +173,8 @@
cond10,
cond11,
cond12,
+ cond13,
+ cond14,
};
/******************************* Data Types **********************************/
@@ -181,11 +187,6 @@
MAX_PLATFORM_IDS
};
-struct dbg_array {
- const u32 *ptr;
- u32 size_in_dwords;
-};
-
struct chip_platform_defs {
u8 num_ports;
u8 num_pfs;
@@ -204,7 +205,9 @@
u32 delay_factor;
};
-/* Storm constant definitions */
+/* Storm constant definitions.
+ * Addresses are in bytes, sizes are in quad-regs.
+ */
struct storm_defs {
char letter;
enum block_id block_id;
@@ -218,13 +221,13 @@
u32 sem_sync_dbg_empty_addr;
u32 sem_slow_dbg_empty_addr;
u32 cm_ctx_wr_addr;
- u32 cm_conn_ag_ctx_lid_size; /* In quad-regs */
+ u32 cm_conn_ag_ctx_lid_size;
u32 cm_conn_ag_ctx_rd_addr;
- u32 cm_conn_st_ctx_lid_size; /* In quad-regs */
+ u32 cm_conn_st_ctx_lid_size;
u32 cm_conn_st_ctx_rd_addr;
- u32 cm_task_ag_ctx_lid_size; /* In quad-regs */
+ u32 cm_task_ag_ctx_lid_size;
u32 cm_task_ag_ctx_rd_addr;
- u32 cm_task_st_ctx_lid_size; /* In quad-regs */
+ u32 cm_task_st_ctx_lid_size;
u32 cm_task_st_ctx_rd_addr;
};
@@ -233,17 +236,23 @@
const char *name;
bool has_dbg_bus[MAX_CHIP_IDS];
bool associated_to_storm;
- u32 storm_id; /* Valid only if associated_to_storm is true */
+
+ /* Valid only if associated_to_storm is true */
+ u32 storm_id;
enum dbg_bus_clients dbg_client_id[MAX_CHIP_IDS];
u32 dbg_select_addr;
- u32 dbg_cycle_enable_addr;
+ u32 dbg_enable_addr;
u32 dbg_shift_addr;
u32 dbg_force_valid_addr;
u32 dbg_force_frame_addr;
bool has_reset_bit;
- bool unreset; /* If true, the block is taken out of reset before dump */
+
+ /* If true, block is taken out of reset before dump */
+ bool unreset;
enum dbg_reset_regs reset_reg;
- u8 reset_bit_offset; /* Bit offset in reset register */
+
+ /* Bit offset in reset register */
+ u8 reset_bit_offset;
};
/* Reset register definitions */
@@ -262,12 +271,13 @@
u32 crash_preset_val;
};
+/* Address is in 128b units. Width is in bits. */
struct rss_mem_defs {
const char *mem_name;
const char *type_name;
- u32 addr; /* In 128b units */
+ u32 addr;
u32 num_entries[MAX_CHIP_IDS];
- u32 entry_width[MAX_CHIP_IDS]; /* In bits */
+ u32 entry_width[MAX_CHIP_IDS];
};
struct vfc_ram_defs {
@@ -289,10 +299,20 @@
struct phy_defs {
const char *phy_name;
+
+ /* PHY base GRC address */
u32 base_addr;
+
+ /* Relative address of indirect TBUS address register (bits 0..7) */
u32 tbus_addr_lo_addr;
+
+ /* Relative address of indirect TBUS address register (bits 8..10) */
u32 tbus_addr_hi_addr;
+
+ /* Relative address of indirect TBUS data register (bits 0..7) */
u32 tbus_data_lo_addr;
+
+ /* Relative address of indirect TBUS data register (bits 8..11) */
u32 tbus_data_hi_addr;
};
@@ -300,9 +320,11 @@
#define MAX_LCIDS 320
#define MAX_LTIDS 320
+
#define NUM_IOR_SETS 2
#define IORS_PER_SET 176
#define IOR_SET_OFFSET(set_id) ((set_id) * 256)
+
#define BYTES_IN_DWORD sizeof(u32)
/* In the macros below, size and offset are specified in bits */
@@ -315,6 +337,7 @@
#define FIELD_BIT_MASK(type, field) \
(((1 << FIELD_BIT_SIZE(type, field)) - 1) << \
FIELD_DWORD_SHIFT(type, field))
+
#define SET_VAR_FIELD(var, type, field, val) \
do { \
var[FIELD_DWORD_OFFSET(type, field)] &= \
@@ -322,31 +345,51 @@
var[FIELD_DWORD_OFFSET(type, field)] |= \
(val) << FIELD_DWORD_SHIFT(type, field); \
} while (0)
+
#define ARR_REG_WR(dev, ptt, addr, arr, arr_size) \
do { \
for (i = 0; i < (arr_size); i++) \
qed_wr(dev, ptt, addr, (arr)[i]); \
} while (0)
+
#define ARR_REG_RD(dev, ptt, addr, arr, arr_size) \
do { \
for (i = 0; i < (arr_size); i++) \
(arr)[i] = qed_rd(dev, ptt, addr); \
} while (0)
+#ifndef DWORDS_TO_BYTES
#define DWORDS_TO_BYTES(dwords) ((dwords) * BYTES_IN_DWORD)
+#endif
+#ifndef BYTES_TO_DWORDS
#define BYTES_TO_DWORDS(bytes) ((bytes) / BYTES_IN_DWORD)
+#endif
+
+/* extra lines include a signature line + optional latency events line */
+#ifndef NUM_DBG_LINES
+#define NUM_EXTRA_DBG_LINES(block_desc) \
+ (1 + ((block_desc)->has_latency_events ? 1 : 0))
+#define NUM_DBG_LINES(block_desc) \
+ ((block_desc)->num_of_lines + NUM_EXTRA_DBG_LINES(block_desc))
+#endif
+
#define RAM_LINES_TO_DWORDS(lines) ((lines) * 2)
#define RAM_LINES_TO_BYTES(lines) \
DWORDS_TO_BYTES(RAM_LINES_TO_DWORDS(lines))
+
#define REG_DUMP_LEN_SHIFT 24
#define MEM_DUMP_ENTRY_SIZE_DWORDS \
BYTES_TO_DWORDS(sizeof(struct dbg_dump_mem))
+
#define IDLE_CHK_RULE_SIZE_DWORDS \
BYTES_TO_DWORDS(sizeof(struct dbg_idle_chk_rule))
+
#define IDLE_CHK_RESULT_HDR_DWORDS \
BYTES_TO_DWORDS(sizeof(struct dbg_idle_chk_result_hdr))
+
#define IDLE_CHK_RESULT_REG_HDR_DWORDS \
BYTES_TO_DWORDS(sizeof(struct dbg_idle_chk_result_reg_hdr))
+
#define IDLE_CHK_MAX_ENTRIES_SIZE 32
/* The sizes and offsets below are specified in bits */
@@ -363,62 +406,92 @@
#define VFC_RAM_ADDR_ROW_OFFSET 2
#define VFC_RAM_ADDR_ROW_SIZE 10
#define VFC_RAM_RESP_STRUCT_SIZE 256
+
#define VFC_CAM_CMD_DWORDS CEIL_DWORDS(VFC_CAM_CMD_STRUCT_SIZE)
#define VFC_CAM_ADDR_DWORDS CEIL_DWORDS(VFC_CAM_ADDR_STRUCT_SIZE)
#define VFC_CAM_RESP_DWORDS CEIL_DWORDS(VFC_CAM_RESP_STRUCT_SIZE)
#define VFC_RAM_CMD_DWORDS VFC_CAM_CMD_DWORDS
#define VFC_RAM_ADDR_DWORDS CEIL_DWORDS(VFC_RAM_ADDR_STRUCT_SIZE)
#define VFC_RAM_RESP_DWORDS CEIL_DWORDS(VFC_RAM_RESP_STRUCT_SIZE)
+
#define NUM_VFC_RAM_TYPES 4
+
#define VFC_CAM_NUM_ROWS 512
+
#define VFC_OPCODE_CAM_RD 14
#define VFC_OPCODE_RAM_RD 0
+
#define NUM_RSS_MEM_TYPES 5
+
#define NUM_BIG_RAM_TYPES 3
#define BIG_RAM_BLOCK_SIZE_BYTES 128
#define BIG_RAM_BLOCK_SIZE_DWORDS \
BYTES_TO_DWORDS(BIG_RAM_BLOCK_SIZE_BYTES)
+
#define NUM_PHY_TBUS_ADDRESSES 2048
#define PHY_DUMP_SIZE_DWORDS (NUM_PHY_TBUS_ADDRESSES / 2)
+
#define RESET_REG_UNRESET_OFFSET 4
+
#define STALL_DELAY_MS 500
+
#define STATIC_DEBUG_LINE_DWORDS 9
-#define NUM_DBG_BUS_LINES 256
+
#define NUM_COMMON_GLOBAL_PARAMS 8
+
#define FW_IMG_MAIN 1
-#define REG_FIFO_DEPTH_ELEMENTS 32
+
+#ifndef REG_FIFO_ELEMENT_DWORDS
#define REG_FIFO_ELEMENT_DWORDS 2
+#endif
+#define REG_FIFO_DEPTH_ELEMENTS 32
#define REG_FIFO_DEPTH_DWORDS \
(REG_FIFO_ELEMENT_DWORDS * REG_FIFO_DEPTH_ELEMENTS)
-#define IGU_FIFO_DEPTH_ELEMENTS 64
+
+#ifndef IGU_FIFO_ELEMENT_DWORDS
#define IGU_FIFO_ELEMENT_DWORDS 4
+#endif
+#define IGU_FIFO_DEPTH_ELEMENTS 64
#define IGU_FIFO_DEPTH_DWORDS \
(IGU_FIFO_ELEMENT_DWORDS * IGU_FIFO_DEPTH_ELEMENTS)
-#define PROTECTION_OVERRIDE_DEPTH_ELEMENTS 20
+
+#ifndef PROTECTION_OVERRIDE_ELEMENT_DWORDS
#define PROTECTION_OVERRIDE_ELEMENT_DWORDS 2
+#endif
+#define PROTECTION_OVERRIDE_DEPTH_ELEMENTS 20
#define PROTECTION_OVERRIDE_DEPTH_DWORDS \
(PROTECTION_OVERRIDE_DEPTH_ELEMENTS * \
PROTECTION_OVERRIDE_ELEMENT_DWORDS)
+
#define MCP_SPAD_TRACE_OFFSIZE_ADDR \
(MCP_REG_SCRATCH + \
offsetof(struct static_init, sections[SPAD_SECTION_TRACE]))
-#define MCP_TRACE_META_IMAGE_SIGNATURE 0x669955aa
+
#define EMPTY_FW_VERSION_STR "???_???_???_???"
#define EMPTY_FW_IMAGE_STR "???????????????"
/***************************** Constant Arrays *******************************/
+struct dbg_array {
+ const u32 *ptr;
+ u32 size_in_dwords;
+};
+
/* Debug arrays */
-static struct dbg_array s_dbg_arrays[MAX_BIN_DBG_BUFFER_TYPE] = { {0} };
+static struct dbg_array s_dbg_arrays[MAX_BIN_DBG_BUFFER_TYPE] = { {NULL} };
/* Chip constant definitions array */
static struct chip_defs s_chip_defs[MAX_CHIP_IDS] = {
- { "bb_b0",
- { {MAX_NUM_PORTS_BB, MAX_NUM_PFS_BB, MAX_NUM_VFS_BB}, {0, 0, 0},
- {0, 0, 0}, {0, 0, 0} } },
- { "k2",
- { {MAX_NUM_PORTS_K2, MAX_NUM_PFS_K2, MAX_NUM_VFS_K2}, {0, 0, 0},
- {0, 0, 0}, {0, 0, 0} } }
+ { "bb",
+ {{MAX_NUM_PORTS_BB, MAX_NUM_PFS_BB, MAX_NUM_VFS_BB},
+ {0, 0, 0},
+ {0, 0, 0},
+ {0, 0, 0} } },
+ { "ah",
+ {{MAX_NUM_PORTS_K2, MAX_NUM_PFS_K2, MAX_NUM_VFS_K2},
+ {0, 0, 0},
+ {0, 0, 0},
+ {0, 0, 0} } }
};
/* Storm constant definitions array */
@@ -427,69 +500,74 @@
{'T', BLOCK_TSEM,
{DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT}, true,
TSEM_REG_FAST_MEMORY,
- TSEM_REG_DBG_FRAME_MODE, TSEM_REG_SLOW_DBG_ACTIVE,
- TSEM_REG_SLOW_DBG_MODE, TSEM_REG_DBG_MODE1_CFG,
- TSEM_REG_SYNC_DBG_EMPTY, TSEM_REG_SLOW_DBG_EMPTY,
+ TSEM_REG_DBG_FRAME_MODE_BB_K2, TSEM_REG_SLOW_DBG_ACTIVE_BB_K2,
+ TSEM_REG_SLOW_DBG_MODE_BB_K2, TSEM_REG_DBG_MODE1_CFG_BB_K2,
+ TSEM_REG_SYNC_DBG_EMPTY, TSEM_REG_SLOW_DBG_EMPTY_BB_K2,
TCM_REG_CTX_RBC_ACCS,
4, TCM_REG_AGG_CON_CTX,
16, TCM_REG_SM_CON_CTX,
2, TCM_REG_AGG_TASK_CTX,
4, TCM_REG_SM_TASK_CTX},
+
/* Mstorm */
{'M', BLOCK_MSEM,
{DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM}, false,
MSEM_REG_FAST_MEMORY,
- MSEM_REG_DBG_FRAME_MODE, MSEM_REG_SLOW_DBG_ACTIVE,
- MSEM_REG_SLOW_DBG_MODE, MSEM_REG_DBG_MODE1_CFG,
- MSEM_REG_SYNC_DBG_EMPTY, MSEM_REG_SLOW_DBG_EMPTY,
+ MSEM_REG_DBG_FRAME_MODE_BB_K2, MSEM_REG_SLOW_DBG_ACTIVE_BB_K2,
+ MSEM_REG_SLOW_DBG_MODE_BB_K2, MSEM_REG_DBG_MODE1_CFG_BB_K2,
+ MSEM_REG_SYNC_DBG_EMPTY, MSEM_REG_SLOW_DBG_EMPTY_BB_K2,
MCM_REG_CTX_RBC_ACCS,
1, MCM_REG_AGG_CON_CTX,
10, MCM_REG_SM_CON_CTX,
2, MCM_REG_AGG_TASK_CTX,
7, MCM_REG_SM_TASK_CTX},
+
/* Ustorm */
{'U', BLOCK_USEM,
{DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU}, false,
USEM_REG_FAST_MEMORY,
- USEM_REG_DBG_FRAME_MODE, USEM_REG_SLOW_DBG_ACTIVE,
- USEM_REG_SLOW_DBG_MODE, USEM_REG_DBG_MODE1_CFG,
- USEM_REG_SYNC_DBG_EMPTY, USEM_REG_SLOW_DBG_EMPTY,
+ USEM_REG_DBG_FRAME_MODE_BB_K2, USEM_REG_SLOW_DBG_ACTIVE_BB_K2,
+ USEM_REG_SLOW_DBG_MODE_BB_K2, USEM_REG_DBG_MODE1_CFG_BB_K2,
+ USEM_REG_SYNC_DBG_EMPTY, USEM_REG_SLOW_DBG_EMPTY_BB_K2,
UCM_REG_CTX_RBC_ACCS,
2, UCM_REG_AGG_CON_CTX,
13, UCM_REG_SM_CON_CTX,
3, UCM_REG_AGG_TASK_CTX,
3, UCM_REG_SM_TASK_CTX},
+
/* Xstorm */
{'X', BLOCK_XSEM,
{DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX}, false,
XSEM_REG_FAST_MEMORY,
- XSEM_REG_DBG_FRAME_MODE, XSEM_REG_SLOW_DBG_ACTIVE,
- XSEM_REG_SLOW_DBG_MODE, XSEM_REG_DBG_MODE1_CFG,
- XSEM_REG_SYNC_DBG_EMPTY, XSEM_REG_SLOW_DBG_EMPTY,
+ XSEM_REG_DBG_FRAME_MODE_BB_K2, XSEM_REG_SLOW_DBG_ACTIVE_BB_K2,
+ XSEM_REG_SLOW_DBG_MODE_BB_K2, XSEM_REG_DBG_MODE1_CFG_BB_K2,
+ XSEM_REG_SYNC_DBG_EMPTY, XSEM_REG_SLOW_DBG_EMPTY_BB_K2,
XCM_REG_CTX_RBC_ACCS,
9, XCM_REG_AGG_CON_CTX,
15, XCM_REG_SM_CON_CTX,
0, 0,
0, 0},
+
/* Ystorm */
{'Y', BLOCK_YSEM,
{DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCY}, false,
YSEM_REG_FAST_MEMORY,
- YSEM_REG_DBG_FRAME_MODE, YSEM_REG_SLOW_DBG_ACTIVE,
- YSEM_REG_SLOW_DBG_MODE, YSEM_REG_DBG_MODE1_CFG,
- YSEM_REG_SYNC_DBG_EMPTY, TSEM_REG_SLOW_DBG_EMPTY,
+ YSEM_REG_DBG_FRAME_MODE_BB_K2, YSEM_REG_SLOW_DBG_ACTIVE_BB_K2,
+ YSEM_REG_SLOW_DBG_MODE_BB_K2, YSEM_REG_DBG_MODE1_CFG_BB_K2,
+ YSEM_REG_SYNC_DBG_EMPTY, TSEM_REG_SLOW_DBG_EMPTY_BB_K2,
YCM_REG_CTX_RBC_ACCS,
2, YCM_REG_AGG_CON_CTX,
3, YCM_REG_SM_CON_CTX,
2, YCM_REG_AGG_TASK_CTX,
12, YCM_REG_SM_TASK_CTX},
+
/* Pstorm */
{'P', BLOCK_PSEM,
{DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS}, true,
PSEM_REG_FAST_MEMORY,
- PSEM_REG_DBG_FRAME_MODE, PSEM_REG_SLOW_DBG_ACTIVE,
- PSEM_REG_SLOW_DBG_MODE, PSEM_REG_DBG_MODE1_CFG,
- PSEM_REG_SYNC_DBG_EMPTY, PSEM_REG_SLOW_DBG_EMPTY,
+ PSEM_REG_DBG_FRAME_MODE_BB_K2, PSEM_REG_SLOW_DBG_ACTIVE_BB_K2,
+ PSEM_REG_SLOW_DBG_MODE_BB_K2, PSEM_REG_DBG_MODE1_CFG_BB_K2,
+ PSEM_REG_SYNC_DBG_EMPTY, PSEM_REG_SLOW_DBG_EMPTY_BB_K2,
PCM_REG_CTX_RBC_ACCS,
0, 0,
10, PCM_REG_SM_CON_CTX,
@@ -498,6 +576,7 @@
};
/* Block definitions array */
+
static struct block_defs block_grc_defs = {
"grc",
{true, true}, false, 0,
@@ -587,9 +666,11 @@
"pcie",
{false, true}, false, 0,
{MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCH},
- PCIE_REG_DBG_COMMON_SELECT, PCIE_REG_DBG_COMMON_DWORD_ENABLE,
- PCIE_REG_DBG_COMMON_SHIFT, PCIE_REG_DBG_COMMON_FORCE_VALID,
- PCIE_REG_DBG_COMMON_FORCE_FRAME,
+ PCIE_REG_DBG_COMMON_SELECT_K2,
+ PCIE_REG_DBG_COMMON_DWORD_ENABLE_K2,
+ PCIE_REG_DBG_COMMON_SHIFT_K2,
+ PCIE_REG_DBG_COMMON_FORCE_VALID_K2,
+ PCIE_REG_DBG_COMMON_FORCE_FRAME_K2,
false, false, MAX_DBG_RESET_REGS, 0
};
@@ -691,9 +772,9 @@
"pglcs",
{false, true}, false, 0,
{MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCH},
- PGLCS_REG_DBG_SELECT, PGLCS_REG_DBG_DWORD_ENABLE,
- PGLCS_REG_DBG_SHIFT, PGLCS_REG_DBG_FORCE_VALID,
- PGLCS_REG_DBG_FORCE_FRAME,
+ PGLCS_REG_DBG_SELECT_K2, PGLCS_REG_DBG_DWORD_ENABLE_K2,
+ PGLCS_REG_DBG_SHIFT_K2, PGLCS_REG_DBG_FORCE_VALID_K2,
+ PGLCS_REG_DBG_FORCE_FRAME_K2,
true, false, DBG_RESET_REG_MISCS_PL_HV, 2
};
@@ -991,10 +1072,11 @@
"yuld",
{true, true}, false, 0,
{DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU},
- YULD_REG_DBG_SELECT, YULD_REG_DBG_DWORD_ENABLE,
- YULD_REG_DBG_SHIFT, YULD_REG_DBG_FORCE_VALID,
- YULD_REG_DBG_FORCE_FRAME,
- true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 15
+ YULD_REG_DBG_SELECT_BB_K2, YULD_REG_DBG_DWORD_ENABLE_BB_K2,
+ YULD_REG_DBG_SHIFT_BB_K2, YULD_REG_DBG_FORCE_VALID_BB_K2,
+ YULD_REG_DBG_FORCE_FRAME_BB_K2,
+ true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2,
+ 15
};
static struct block_defs block_xyld_defs = {
@@ -1143,9 +1225,9 @@
"umac",
{false, true}, false, 0,
{MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCZ},
- UMAC_REG_DBG_SELECT, UMAC_REG_DBG_DWORD_ENABLE,
- UMAC_REG_DBG_SHIFT, UMAC_REG_DBG_FORCE_VALID,
- UMAC_REG_DBG_FORCE_FRAME,
+ UMAC_REG_DBG_SELECT_K2, UMAC_REG_DBG_DWORD_ENABLE_K2,
+ UMAC_REG_DBG_SHIFT_K2, UMAC_REG_DBG_FORCE_VALID_K2,
+ UMAC_REG_DBG_FORCE_FRAME_K2,
true, false, DBG_RESET_REG_MISCS_PL_HV, 6
};
@@ -1177,9 +1259,9 @@
"wol",
{false, true}, false, 0,
{MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCZ},
- WOL_REG_DBG_SELECT, WOL_REG_DBG_DWORD_ENABLE,
- WOL_REG_DBG_SHIFT, WOL_REG_DBG_FORCE_VALID,
- WOL_REG_DBG_FORCE_FRAME,
+ WOL_REG_DBG_SELECT_K2, WOL_REG_DBG_DWORD_ENABLE_K2,
+ WOL_REG_DBG_SHIFT_K2, WOL_REG_DBG_FORCE_VALID_K2,
+ WOL_REG_DBG_FORCE_FRAME_K2,
true, true, DBG_RESET_REG_MISC_PL_PDA_VAUX, 7
};
@@ -1187,9 +1269,9 @@
"bmbn",
{false, true}, false, 0,
{MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCB},
- BMBN_REG_DBG_SELECT, BMBN_REG_DBG_DWORD_ENABLE,
- BMBN_REG_DBG_SHIFT, BMBN_REG_DBG_FORCE_VALID,
- BMBN_REG_DBG_FORCE_FRAME,
+ BMBN_REG_DBG_SELECT_K2, BMBN_REG_DBG_DWORD_ENABLE_K2,
+ BMBN_REG_DBG_SHIFT_K2, BMBN_REG_DBG_FORCE_VALID_K2,
+ BMBN_REG_DBG_FORCE_FRAME_K2,
false, false, MAX_DBG_RESET_REGS, 0
};
@@ -1204,9 +1286,9 @@
"nwm",
{false, true}, false, 0,
{MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCW},
- NWM_REG_DBG_SELECT, NWM_REG_DBG_DWORD_ENABLE,
- NWM_REG_DBG_SHIFT, NWM_REG_DBG_FORCE_VALID,
- NWM_REG_DBG_FORCE_FRAME,
+ NWM_REG_DBG_SELECT_K2, NWM_REG_DBG_DWORD_ENABLE_K2,
+ NWM_REG_DBG_SHIFT_K2, NWM_REG_DBG_FORCE_VALID_K2,
+ NWM_REG_DBG_FORCE_FRAME_K2,
true, false, DBG_RESET_REG_MISCS_PL_HV_2, 0
};
@@ -1214,9 +1296,9 @@
"nws",
{false, true}, false, 0,
{MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCW},
- NWS_REG_DBG_SELECT, NWS_REG_DBG_DWORD_ENABLE,
- NWS_REG_DBG_SHIFT, NWS_REG_DBG_FORCE_VALID,
- NWS_REG_DBG_FORCE_FRAME,
+ NWS_REG_DBG_SELECT_K2, NWS_REG_DBG_DWORD_ENABLE_K2,
+ NWS_REG_DBG_SHIFT_K2, NWS_REG_DBG_FORCE_VALID_K2,
+ NWS_REG_DBG_FORCE_FRAME_K2,
true, false, DBG_RESET_REG_MISCS_PL_HV, 12
};
@@ -1224,9 +1306,9 @@
"ms",
{false, true}, false, 0,
{MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCZ},
- MS_REG_DBG_SELECT, MS_REG_DBG_DWORD_ENABLE,
- MS_REG_DBG_SHIFT, MS_REG_DBG_FORCE_VALID,
- MS_REG_DBG_FORCE_FRAME,
+ MS_REG_DBG_SELECT_K2, MS_REG_DBG_DWORD_ENABLE_K2,
+ MS_REG_DBG_SHIFT_K2, MS_REG_DBG_FORCE_VALID_K2,
+ MS_REG_DBG_FORCE_FRAME_K2,
true, false, DBG_RESET_REG_MISCS_PL_HV, 13
};
@@ -1234,9 +1316,11 @@
"phy_pcie",
{false, true}, false, 0,
{MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCH},
- PCIE_REG_DBG_COMMON_SELECT, PCIE_REG_DBG_COMMON_DWORD_ENABLE,
- PCIE_REG_DBG_COMMON_SHIFT, PCIE_REG_DBG_COMMON_FORCE_VALID,
- PCIE_REG_DBG_COMMON_FORCE_FRAME,
+ PCIE_REG_DBG_COMMON_SELECT_K2,
+ PCIE_REG_DBG_COMMON_DWORD_ENABLE_K2,
+ PCIE_REG_DBG_COMMON_SHIFT_K2,
+ PCIE_REG_DBG_COMMON_FORCE_VALID_K2,
+ PCIE_REG_DBG_COMMON_FORCE_FRAME_K2,
false, false, MAX_DBG_RESET_REGS, 0
};
@@ -1261,6 +1345,13 @@
false, false, MAX_DBG_RESET_REGS, 0
};
+static struct block_defs block_rgsrc_defs = {
+ "rgsrc", {false, false}, false, 0,
+ {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
+ 0, 0, 0, 0, 0,
+ false, false, MAX_DBG_RESET_REGS, 0
+};
+
static struct block_defs block_tgfs_defs = {
"tgfs", {false, false}, false, 0,
{MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
@@ -1268,6 +1359,13 @@
false, false, MAX_DBG_RESET_REGS, 0
};
+static struct block_defs block_tgsrc_defs = {
+ "tgsrc", {false, false}, false, 0,
+ {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
+ 0, 0, 0, 0, 0,
+ false, false, MAX_DBG_RESET_REGS, 0
+};
+
static struct block_defs block_ptld_defs = {
"ptld", {false, false}, false, 0,
{MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
@@ -1350,6 +1448,8 @@
&block_muld_defs,
&block_yuld_defs,
&block_xyld_defs,
+ &block_ptld_defs,
+ &block_ypld_defs,
&block_prm_defs,
&block_pbf_pb1_defs,
&block_pbf_pb2_defs,
@@ -1363,6 +1463,10 @@
&block_tcfc_defs,
&block_igu_defs,
&block_cau_defs,
+ &block_rgfs_defs,
+ &block_rgsrc_defs,
+ &block_tgfs_defs,
+ &block_tgsrc_defs,
&block_umac_defs,
&block_xmac_defs,
&block_dbg_defs,
@@ -1376,10 +1480,6 @@
&block_phy_pcie_defs,
&block_led_defs,
&block_avs_wrap_defs,
- &block_rgfs_defs,
- &block_tgfs_defs,
- &block_ptld_defs,
- &block_ypld_defs,
&block_misc_aeu_defs,
&block_bar0_map_defs,
};
@@ -1392,66 +1492,151 @@
};
static struct grc_param_defs s_grc_param_defs[] = {
- {{1, 1}, 0, 1, false, 1, 1}, /* DBG_GRC_PARAM_DUMP_TSTORM */
- {{1, 1}, 0, 1, false, 1, 1}, /* DBG_GRC_PARAM_DUMP_MSTORM */
- {{1, 1}, 0, 1, false, 1, 1}, /* DBG_GRC_PARAM_DUMP_USTORM */
- {{1, 1}, 0, 1, false, 1, 1}, /* DBG_GRC_PARAM_DUMP_XSTORM */
- {{1, 1}, 0, 1, false, 1, 1}, /* DBG_GRC_PARAM_DUMP_YSTORM */
- {{1, 1}, 0, 1, false, 1, 1}, /* DBG_GRC_PARAM_DUMP_PSTORM */
- {{1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_REGS */
- {{1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_RAM */
- {{1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_PBUF */
- {{0, 0}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_IOR */
- {{0, 0}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_VFC */
- {{1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_CM_CTX */
- {{1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_ILT */
- {{1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_RSS */
- {{1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_CAU */
- {{1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_QM */
- {{1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_MCP */
- {{1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_RESERVED */
- {{1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_CFC */
- {{1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_IGU */
- {{0, 0}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_BRB */
- {{0, 0}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_BTB */
- {{0, 0}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_BMB */
- {{1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_NIG */
- {{1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_MULD */
- {{1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_PRS */
- {{1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_DMAE */
- {{1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_TM */
- {{1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_SDM */
- {{1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_DIF */
- {{1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_STATIC */
- {{0, 0}, 0, 1, false, 0, 0}, /* DBG_GRC_PARAM_UNSTALL */
+ /* DBG_GRC_PARAM_DUMP_TSTORM */
+ {{1, 1}, 0, 1, false, 1, 1},
+
+ /* DBG_GRC_PARAM_DUMP_MSTORM */
+ {{1, 1}, 0, 1, false, 1, 1},
+
+ /* DBG_GRC_PARAM_DUMP_USTORM */
+ {{1, 1}, 0, 1, false, 1, 1},
+
+ /* DBG_GRC_PARAM_DUMP_XSTORM */
+ {{1, 1}, 0, 1, false, 1, 1},
+
+ /* DBG_GRC_PARAM_DUMP_YSTORM */
+ {{1, 1}, 0, 1, false, 1, 1},
+
+ /* DBG_GRC_PARAM_DUMP_PSTORM */
+ {{1, 1}, 0, 1, false, 1, 1},
+
+ /* DBG_GRC_PARAM_DUMP_REGS */
+ {{1, 1}, 0, 1, false, 0, 1},
+
+ /* DBG_GRC_PARAM_DUMP_RAM */
+ {{1, 1}, 0, 1, false, 0, 1},
+
+ /* DBG_GRC_PARAM_DUMP_PBUF */
+ {{1, 1}, 0, 1, false, 0, 1},
+
+ /* DBG_GRC_PARAM_DUMP_IOR */
+ {{0, 0}, 0, 1, false, 0, 1},
+
+ /* DBG_GRC_PARAM_DUMP_VFC */
+ {{0, 0}, 0, 1, false, 0, 1},
+
+ /* DBG_GRC_PARAM_DUMP_CM_CTX */
+ {{1, 1}, 0, 1, false, 0, 1},
+
+ /* DBG_GRC_PARAM_DUMP_ILT */
+ {{1, 1}, 0, 1, false, 0, 1},
+
+ /* DBG_GRC_PARAM_DUMP_RSS */
+ {{1, 1}, 0, 1, false, 0, 1},
+
+ /* DBG_GRC_PARAM_DUMP_CAU */
+ {{1, 1}, 0, 1, false, 0, 1},
+
+ /* DBG_GRC_PARAM_DUMP_QM */
+ {{1, 1}, 0, 1, false, 0, 1},
+
+ /* DBG_GRC_PARAM_DUMP_MCP */
+ {{1, 1}, 0, 1, false, 0, 1},
+
+ /* DBG_GRC_PARAM_RESERVED */
+ {{1, 1}, 0, 1, false, 0, 1},
+
+ /* DBG_GRC_PARAM_DUMP_CFC */
+ {{1, 1}, 0, 1, false, 0, 1},
+
+ /* DBG_GRC_PARAM_DUMP_IGU */
+ {{1, 1}, 0, 1, false, 0, 1},
+
+ /* DBG_GRC_PARAM_DUMP_BRB */
+ {{0, 0}, 0, 1, false, 0, 1},
+
+ /* DBG_GRC_PARAM_DUMP_BTB */
+ {{0, 0}, 0, 1, false, 0, 1},
+
+ /* DBG_GRC_PARAM_DUMP_BMB */
+ {{0, 0}, 0, 1, false, 0, 1},
+
+ /* DBG_GRC_PARAM_DUMP_NIG */
+ {{1, 1}, 0, 1, false, 0, 1},
+
+ /* DBG_GRC_PARAM_DUMP_MULD */
+ {{1, 1}, 0, 1, false, 0, 1},
+
+ /* DBG_GRC_PARAM_DUMP_PRS */
+ {{1, 1}, 0, 1, false, 0, 1},
+
+ /* DBG_GRC_PARAM_DUMP_DMAE */
+ {{1, 1}, 0, 1, false, 0, 1},
+
+ /* DBG_GRC_PARAM_DUMP_TM */
+ {{1, 1}, 0, 1, false, 0, 1},
+
+ /* DBG_GRC_PARAM_DUMP_SDM */
+ {{1, 1}, 0, 1, false, 0, 1},
+
+ /* DBG_GRC_PARAM_DUMP_DIF */
+ {{1, 1}, 0, 1, false, 0, 1},
+
+ /* DBG_GRC_PARAM_DUMP_STATIC */
+ {{1, 1}, 0, 1, false, 0, 1},
+
+ /* DBG_GRC_PARAM_UNSTALL */
+ {{0, 0}, 0, 1, false, 0, 0},
+
+ /* DBG_GRC_PARAM_NUM_LCIDS */
{{MAX_LCIDS, MAX_LCIDS}, 1, MAX_LCIDS, false, MAX_LCIDS,
- MAX_LCIDS}, /* DBG_GRC_PARAM_NUM_LCIDS */
+ MAX_LCIDS},
+
+ /* DBG_GRC_PARAM_NUM_LTIDS */
{{MAX_LTIDS, MAX_LTIDS}, 1, MAX_LTIDS, false, MAX_LTIDS,
- MAX_LTIDS}, /* DBG_GRC_PARAM_NUM_LTIDS */
- {{0, 0}, 0, 1, true, 0, 0}, /* DBG_GRC_PARAM_EXCLUDE_ALL */
- {{0, 0}, 0, 1, true, 0, 0}, /* DBG_GRC_PARAM_CRASH */
- {{0, 0}, 0, 1, false, 1, 0}, /* DBG_GRC_PARAM_PARITY_SAFE */
- {{1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_CM */
- {{1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_PHY */
- {{0, 0}, 0, 1, false, 0, 0}, /* DBG_GRC_PARAM_NO_MCP */
- {{0, 0}, 0, 1, false, 0, 0} /* DBG_GRC_PARAM_NO_FW_VER */
+ MAX_LTIDS},
+
+ /* DBG_GRC_PARAM_EXCLUDE_ALL */
+ {{0, 0}, 0, 1, true, 0, 0},
+
+ /* DBG_GRC_PARAM_CRASH */
+ {{0, 0}, 0, 1, true, 0, 0},
+
+ /* DBG_GRC_PARAM_PARITY_SAFE */
+ {{0, 0}, 0, 1, false, 1, 0},
+
+ /* DBG_GRC_PARAM_DUMP_CM */
+ {{1, 1}, 0, 1, false, 0, 1},
+
+ /* DBG_GRC_PARAM_DUMP_PHY */
+ {{1, 1}, 0, 1, false, 0, 1},
+
+ /* DBG_GRC_PARAM_NO_MCP */
+ {{0, 0}, 0, 1, false, 0, 0},
+
+ /* DBG_GRC_PARAM_NO_FW_VER */
+ {{0, 0}, 0, 1, false, 0, 0}
};
static struct rss_mem_defs s_rss_mem_defs[] = {
{ "rss_mem_cid", "rss_cid", 0,
{256, 320},
{32, 32} },
+
{ "rss_mem_key_msb", "rss_key", 1024,
{128, 208},
{256, 256} },
+
{ "rss_mem_key_lsb", "rss_key", 2048,
{128, 208},
{64, 64} },
+
{ "rss_mem_info", "rss_info", 3072,
{128, 208},
{16, 16} },
+
{ "rss_mem_ind", "rss_ind", 4096,
- {(128 * 128), (128 * 208)},
+ {16384, 26624},
{16, 16} }
};
@@ -1466,50 +1651,71 @@
{ "BRB", MEM_GROUP_BRB_MEM, MEM_GROUP_BRB_RAM, DBG_GRC_PARAM_DUMP_BRB,
BRB_REG_BIG_RAM_ADDRESS, BRB_REG_BIG_RAM_DATA,
{4800, 5632} },
+
{ "BTB", MEM_GROUP_BTB_MEM, MEM_GROUP_BTB_RAM, DBG_GRC_PARAM_DUMP_BTB,
BTB_REG_BIG_RAM_ADDRESS, BTB_REG_BIG_RAM_DATA,
{2880, 3680} },
+
{ "BMB", MEM_GROUP_BMB_MEM, MEM_GROUP_BMB_RAM, DBG_GRC_PARAM_DUMP_BMB,
BMB_REG_BIG_RAM_ADDRESS, BMB_REG_BIG_RAM_DATA,
{1152, 1152} }
};
static struct reset_reg_defs s_reset_regs_defs[] = {
+ /* DBG_RESET_REG_MISCS_PL_UA */
{ MISCS_REG_RESET_PL_UA, 0x0,
- {true, true} }, /* DBG_RESET_REG_MISCS_PL_UA */
+ {true, true} },
+
+ /* DBG_RESET_REG_MISCS_PL_HV */
{ MISCS_REG_RESET_PL_HV, 0x0,
- {true, true} }, /* DBG_RESET_REG_MISCS_PL_HV */
- { MISCS_REG_RESET_PL_HV_2, 0x0,
- {false, true} }, /* DBG_RESET_REG_MISCS_PL_HV_2 */
+ {true, true} },
+
+ /* DBG_RESET_REG_MISCS_PL_HV_2 */
+ { MISCS_REG_RESET_PL_HV_2_K2, 0x0,
+ {false, true} },
+
+ /* DBG_RESET_REG_MISC_PL_UA */
{ MISC_REG_RESET_PL_UA, 0x0,
- {true, true} }, /* DBG_RESET_REG_MISC_PL_UA */
+ {true, true} },
+
+ /* DBG_RESET_REG_MISC_PL_HV */
{ MISC_REG_RESET_PL_HV, 0x0,
- {true, true} }, /* DBG_RESET_REG_MISC_PL_HV */
+ {true, true} },
+
+ /* DBG_RESET_REG_MISC_PL_PDA_VMAIN_1 */
{ MISC_REG_RESET_PL_PDA_VMAIN_1, 0x4404040,
- {true, true} }, /* DBG_RESET_REG_MISC_PL_PDA_VMAIN_1 */
+ {true, true} },
+
+ /* DBG_RESET_REG_MISC_PL_PDA_VMAIN_2 */
{ MISC_REG_RESET_PL_PDA_VMAIN_2, 0x7c00007,
- {true, true} }, /* DBG_RESET_REG_MISC_PL_PDA_VMAIN_2 */
+ {true, true} },
+
+ /* DBG_RESET_REG_MISC_PL_PDA_VAUX */
{ MISC_REG_RESET_PL_PDA_VAUX, 0x2,
- {true, true} }, /* DBG_RESET_REG_MISC_PL_PDA_VAUX */
+ {true, true} },
};
static struct phy_defs s_phy_defs[] = {
- {"nw_phy", NWS_REG_NWS_CMU, PHY_NW_IP_REG_PHY0_TOP_TBUS_ADDR_7_0,
- PHY_NW_IP_REG_PHY0_TOP_TBUS_ADDR_15_8,
- PHY_NW_IP_REG_PHY0_TOP_TBUS_DATA_7_0,
- PHY_NW_IP_REG_PHY0_TOP_TBUS_DATA_11_8},
- {"sgmii_phy", MS_REG_MS_CMU, PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X132,
- PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X133,
- PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X130,
- PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X131},
- {"pcie_phy0", PHY_PCIE_REG_PHY0, PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X132,
- PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X133,
- PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X130,
- PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X131},
- {"pcie_phy1", PHY_PCIE_REG_PHY1, PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X132,
- PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X133,
- PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X130,
- PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X131},
+ {"nw_phy", NWS_REG_NWS_CMU_K2,
+ PHY_NW_IP_REG_PHY0_TOP_TBUS_ADDR_7_0_K2,
+ PHY_NW_IP_REG_PHY0_TOP_TBUS_ADDR_15_8_K2,
+ PHY_NW_IP_REG_PHY0_TOP_TBUS_DATA_7_0_K2,
+ PHY_NW_IP_REG_PHY0_TOP_TBUS_DATA_11_8_K2},
+ {"sgmii_phy", MS_REG_MS_CMU_K2,
+ PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X132_K2,
+ PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X133_K2,
+ PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X130_K2,
+ PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X131_K2},
+ {"pcie_phy0", PHY_PCIE_REG_PHY0_K2,
+ PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X132_K2,
+ PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X133_K2,
+ PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X130_K2,
+ PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X131_K2},
+ {"pcie_phy1", PHY_PCIE_REG_PHY1_K2,
+ PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X132_K2,
+ PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X133_K2,
+ PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X130_K2,
+ PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X131_K2},
};
/**************************** Private Functions ******************************/
@@ -1556,7 +1762,7 @@
dev_data->chip_id = CHIP_K2;
dev_data->mode_enable[MODE_K2] = 1;
} else if (QED_IS_BB_B0(p_hwfn->cdev)) {
- dev_data->chip_id = CHIP_BB_B0;
+ dev_data->chip_id = CHIP_BB;
dev_data->mode_enable[MODE_BB] = 1;
} else {
return DBG_STATUS_UNKNOWN_CHIP;
@@ -1569,9 +1775,20 @@
qed_dbg_grc_init_params(p_hwfn);
dev_data->initialized = true;
+
return DBG_STATUS_OK;
}
+static struct dbg_bus_block *get_dbg_bus_block_desc(struct qed_hwfn *p_hwfn,
+ enum block_id block_id)
+{
+ struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
+
+ return (struct dbg_bus_block *)&dbg_bus_blocks[block_id *
+ MAX_CHIP_IDS +
+ dev_data->chip_id];
+}
+
/* Reads the FW info structure for the specified Storm from the chip,
* and writes it to the specified fw_info pointer.
*/
@@ -1579,25 +1796,28 @@
struct qed_ptt *p_ptt,
u8 storm_id, struct fw_info *fw_info)
{
- /* Read first the address that points to fw_info location.
- * The address is located in the last line of the Storm RAM.
- */
- u32 addr = s_storm_defs[storm_id].sem_fast_mem_addr +
- SEM_FAST_REG_INT_RAM +
- DWORDS_TO_BYTES(SEM_FAST_REG_INT_RAM_SIZE) -
- sizeof(struct fw_info_location);
+ struct storm_defs *storm = &s_storm_defs[storm_id];
struct fw_info_location fw_info_location;
- u32 *dest = (u32 *)&fw_info_location;
- u32 i;
+ u32 addr, i, *dest;
memset(&fw_info_location, 0, sizeof(fw_info_location));
memset(fw_info, 0, sizeof(*fw_info));
+
+ /* Read first the address that points to fw_info location.
+ * The address is located in the last line of the Storm RAM.
+ */
+ addr = storm->sem_fast_mem_addr + SEM_FAST_REG_INT_RAM +
+ DWORDS_TO_BYTES(SEM_FAST_REG_INT_RAM_SIZE) -
+ sizeof(fw_info_location);
+ dest = (u32 *)&fw_info_location;
+
for (i = 0; i < BYTES_TO_DWORDS(sizeof(fw_info_location));
i++, addr += BYTES_IN_DWORD)
dest[i] = qed_rd(p_hwfn, p_ptt, addr);
+
+ /* Read FW version info from Storm RAM */
if (fw_info_location.size > 0 && fw_info_location.size <=
sizeof(*fw_info)) {
- /* Read FW version info from Storm RAM */
addr = fw_info_location.grc_addr;
dest = (u32 *)fw_info;
for (i = 0; i < BYTES_TO_DWORDS(fw_info_location.size);
@@ -1606,27 +1826,30 @@
}
}
-/* Dumps the specified string to the specified buffer. Returns the dumped size
- * in bytes (actual length + 1 for the null character termination).
+/* Dumps the specified string to the specified buffer.
+ * Returns the dumped size in bytes.
*/
static u32 qed_dump_str(char *dump_buf, bool dump, const char *str)
{
if (dump)
strcpy(dump_buf, str);
+
return (u32)strlen(str) + 1;
}
-/* Dumps zeros to align the specified buffer to dwords. Returns the dumped size
- * in bytes.
+/* Dumps zeros to align the specified buffer to dwords.
+ * Returns the dumped size in bytes.
*/
static u32 qed_dump_align(char *dump_buf, bool dump, u32 byte_offset)
{
- u8 offset_in_dword = (u8)(byte_offset & 0x3), align_size;
+ u8 offset_in_dword, align_size;
+ offset_in_dword = (u8)(byte_offset & 0x3);
align_size = offset_in_dword ? BYTES_IN_DWORD - offset_in_dword : 0;
if (dump && align_size)
memset(dump_buf, 0, align_size);
+
return align_size;
}
@@ -1653,6 +1876,7 @@
/* Align buffer to next dword */
offset += qed_dump_align(char_buf + offset, dump, offset);
+
return BYTES_TO_DWORDS(offset);
}
@@ -1681,6 +1905,7 @@
if (dump)
*(dump_buf + offset) = param_val;
offset++;
+
return offset;
}
@@ -1695,7 +1920,6 @@
char fw_ver_str[16] = EMPTY_FW_VERSION_STR;
char fw_img_str[16] = EMPTY_FW_IMAGE_STR;
struct fw_info fw_info = { {0}, {0} };
- int printed_chars;
u32 offset = 0;
if (dump && !qed_grc_get_param(p_hwfn, DBG_GRC_PARAM_NO_FW_VER)) {
@@ -1705,37 +1929,32 @@
for (storm_id = 0; storm_id < MAX_DBG_STORMS && !found;
storm_id++) {
- /* Read FW version/image */
- if (!dev_data->block_in_reset
- [s_storm_defs[storm_id].block_id]) {
- /* read FW info for the current Storm */
- qed_read_fw_info(p_hwfn,
- p_ptt, storm_id, &fw_info);
+ struct storm_defs *storm = &s_storm_defs[storm_id];
- /* Create FW version/image strings */
- printed_chars =
- snprintf(fw_ver_str,
- sizeof(fw_ver_str),
- "%d_%d_%d_%d",
- fw_info.ver.num.major,
- fw_info.ver.num.minor,
- fw_info.ver.num.rev,
- fw_info.ver.num.eng);
- if (printed_chars < 0 || printed_chars >=
- sizeof(fw_ver_str))
- DP_NOTICE(p_hwfn,
- "Unexpected debug error: invalid FW version string\n");
- switch (fw_info.ver.image_id) {
- case FW_IMG_MAIN:
- strcpy(fw_img_str, "main");
- break;
- default:
- strcpy(fw_img_str, "unknown");
- break;
- }
+ /* Read FW version/image */
+ if (dev_data->block_in_reset[storm->block_id])
+ continue;
- found = true;
+ /* Read FW info for the current Storm */
+ qed_read_fw_info(p_hwfn, p_ptt, storm_id, &fw_info);
+
+ /* Create FW version/image strings */
+ if (snprintf(fw_ver_str, sizeof(fw_ver_str),
+ "%d_%d_%d_%d", fw_info.ver.num.major,
+ fw_info.ver.num.minor, fw_info.ver.num.rev,
+ fw_info.ver.num.eng) < 0)
+ DP_NOTICE(p_hwfn,
+ "Unexpected debug error: invalid FW version string\n");
+ switch (fw_info.ver.image_id) {
+ case FW_IMG_MAIN:
+ strcpy(fw_img_str, "main");
+ break;
+ default:
+ strcpy(fw_img_str, "unknown");
+ break;
}
+
+ found = true;
}
}
@@ -1747,6 +1966,7 @@
offset += qed_dump_num_param(dump_buf + offset,
dump,
"fw-timestamp", fw_info.ver.timestamp);
+
return offset;
}
@@ -1759,17 +1979,18 @@
{
char mfw_ver_str[16] = EMPTY_FW_VERSION_STR;
- if (dump && !qed_grc_get_param(p_hwfn, DBG_GRC_PARAM_NO_FW_VER)) {
+ if (dump &&
+ !qed_grc_get_param(p_hwfn, DBG_GRC_PARAM_NO_FW_VER)) {
u32 global_section_offsize, global_section_addr, mfw_ver;
u32 public_data_addr, global_section_offsize_addr;
- int printed_chars;
- /* Find MCP public data GRC address.
- * Needs to be ORed with MCP_REG_SCRATCH due to a HW bug.
+ /* Find MCP public data GRC address. Needs to be ORed with
+ * MCP_REG_SCRATCH due to a HW bug.
*/
- public_data_addr = qed_rd(p_hwfn, p_ptt,
+ public_data_addr = qed_rd(p_hwfn,
+ p_ptt,
MISC_REG_SHARED_MEM_ADDR) |
- MCP_REG_SCRATCH;
+ MCP_REG_SCRATCH;
/* Find MCP public global section offset */
global_section_offsize_addr = public_data_addr +
@@ -1778,9 +1999,9 @@
sizeof(offsize_t) * PUBLIC_GLOBAL;
global_section_offsize = qed_rd(p_hwfn, p_ptt,
global_section_offsize_addr);
- global_section_addr = MCP_REG_SCRATCH +
- (global_section_offsize &
- OFFSIZE_OFFSET_MASK) * 4;
+ global_section_addr =
+ MCP_REG_SCRATCH +
+ (global_section_offsize & OFFSIZE_OFFSET_MASK) * 4;
/* Read MFW version from MCP public global section */
mfw_ver = qed_rd(p_hwfn, p_ptt,
@@ -1788,13 +2009,9 @@
offsetof(struct public_global, mfw_ver));
/* Dump MFW version param */
- printed_chars = snprintf(mfw_ver_str, sizeof(mfw_ver_str),
- "%d_%d_%d_%d",
- (u8) (mfw_ver >> 24),
- (u8) (mfw_ver >> 16),
- (u8) (mfw_ver >> 8),
- (u8) mfw_ver);
- if (printed_chars < 0 || printed_chars >= sizeof(mfw_ver_str))
+ if (snprintf(mfw_ver_str, sizeof(mfw_ver_str), "%d_%d_%d_%d",
+ (u8)(mfw_ver >> 24), (u8)(mfw_ver >> 16),
+ (u8)(mfw_ver >> 8), (u8)mfw_ver) < 0)
DP_NOTICE(p_hwfn,
"Unexpected debug error: invalid MFW version string\n");
}
@@ -1820,11 +2037,12 @@
bool dump,
u8 num_specific_global_params)
{
- u8 num_params = NUM_COMMON_GLOBAL_PARAMS + num_specific_global_params;
struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
u32 offset = 0;
+ u8 num_params;
- /* Find platform string and dump global params section header */
+ /* Dump global params section header */
+ num_params = NUM_COMMON_GLOBAL_PARAMS + num_specific_global_params;
offset += qed_dump_section_hdr(dump_buf + offset,
dump, "global_params", num_params);
@@ -1846,25 +2064,29 @@
offset +=
qed_dump_num_param(dump_buf + offset, dump, "pci-func",
p_hwfn->abs_pf_id);
+
return offset;
}
-/* Writes the last section to the specified buffer at the given offset.
- * Returns the dumped size in dwords.
+/* Writes the "last" section (including CRC) to the specified buffer at the
+ * given offset. Returns the dumped size in dwords.
*/
-static u32 qed_dump_last_section(u32 *dump_buf, u32 offset, bool dump)
+static u32 qed_dump_last_section(struct qed_hwfn *p_hwfn,
+ u32 *dump_buf, u32 offset, bool dump)
{
- u32 start_offset = offset, crc = ~0;
+ u32 start_offset = offset;
/* Dump CRC section header */
offset += qed_dump_section_hdr(dump_buf + offset, dump, "last", 0);
- /* Calculate CRC32 and add it to the dword following the "last" section.
- */
+ /* Calculate CRC32 and add it to the dword after the "last" section */
if (dump)
- *(dump_buf + offset) = ~crc32(crc, (u8 *)dump_buf,
+ *(dump_buf + offset) = ~crc32(0xffffffff,
+ (u8 *)dump_buf,
DWORDS_TO_BYTES(offset));
+
offset++;
+
return offset - start_offset;
}
@@ -1883,11 +2105,12 @@
p_ptt, s_reset_regs_defs[i].addr);
/* Check if blocks are in reset */
- for (i = 0; i < MAX_BLOCK_ID; i++)
- dev_data->block_in_reset[i] =
- s_block_defs[i]->has_reset_bit &&
- !(reg_val[s_block_defs[i]->reset_reg] &
- BIT(s_block_defs[i]->reset_bit_offset));
+ for (i = 0; i < MAX_BLOCK_ID; i++) {
+ struct block_defs *block = s_block_defs[i];
+
+ dev_data->block_in_reset[i] = block->has_reset_bit &&
+ !(reg_val[block->reset_reg] & BIT(block->reset_bit_offset));
+ }
}
/* Enable / disable the Debug block */
@@ -1902,12 +2125,12 @@
struct qed_ptt *p_ptt)
{
u32 dbg_reset_reg_addr, old_reset_reg_val, new_reset_reg_val;
+ struct block_defs *dbg_block = s_block_defs[BLOCK_DBG];
- dbg_reset_reg_addr =
- s_reset_regs_defs[s_block_defs[BLOCK_DBG]->reset_reg].addr;
+ dbg_reset_reg_addr = s_reset_regs_defs[dbg_block->reset_reg].addr;
old_reset_reg_val = qed_rd(p_hwfn, p_ptt, dbg_reset_reg_addr);
- new_reset_reg_val = old_reset_reg_val &
- ~BIT(s_block_defs[BLOCK_DBG]->reset_bit_offset);
+ new_reset_reg_val =
+ old_reset_reg_val & ~BIT(dbg_block->reset_bit_offset);
qed_wr(p_hwfn, p_ptt, dbg_reset_reg_addr, new_reset_reg_val);
qed_wr(p_hwfn, p_ptt, dbg_reset_reg_addr, old_reset_reg_val);
@@ -1920,8 +2143,8 @@
qed_wr(p_hwfn, p_ptt, DBG_REG_FRAMING_MODE, (u8)mode);
}
-/* Enable / disable Debug Bus clients according to the specified mask.
- * (1 = enable, 0 = disable)
+/* Enable / disable Debug Bus clients according to the specified mask
+ * (1 = enable, 0 = disable).
*/
static void qed_bus_enable_clients(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt, u32 client_mask)
@@ -1931,10 +2154,14 @@
static bool qed_is_mode_match(struct qed_hwfn *p_hwfn, u16 *modes_buf_offset)
{
- const u32 *ptr = s_dbg_arrays[BIN_BUF_DBG_MODE_TREE].ptr;
struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
- u8 tree_val = ((u8 *)ptr)[(*modes_buf_offset)++];
bool arg1, arg2;
+ const u32 *ptr;
+ u8 tree_val;
+
+ /* Get next element from modes tree buffer */
+ ptr = s_dbg_arrays[BIN_BUF_DBG_MODE_TREE].ptr;
+ tree_val = ((u8 *)ptr)[(*modes_buf_offset)++];
switch (tree_val) {
case INIT_MODE_OP_NOT:
@@ -1974,75 +2201,81 @@
static bool qed_grc_is_mem_included(struct qed_hwfn *p_hwfn,
enum block_id block_id, u8 mem_group_id)
{
+ struct block_defs *block = s_block_defs[block_id];
u8 i;
/* Check Storm match */
- if (s_block_defs[block_id]->associated_to_storm &&
+ if (block->associated_to_storm &&
!qed_grc_is_storm_included(p_hwfn,
- (enum dbg_storms)s_block_defs[block_id]->storm_id))
+ (enum dbg_storms)block->storm_id))
return false;
- for (i = 0; i < NUM_BIG_RAM_TYPES; i++)
- if (mem_group_id == s_big_ram_defs[i].mem_group_id ||
- mem_group_id == s_big_ram_defs[i].ram_mem_group_id)
- return qed_grc_is_included(p_hwfn,
- s_big_ram_defs[i].grc_param);
- if (mem_group_id == MEM_GROUP_PXP_ILT || mem_group_id ==
- MEM_GROUP_PXP_MEM)
- return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_PXP);
- if (mem_group_id == MEM_GROUP_RAM)
- return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_RAM);
- if (mem_group_id == MEM_GROUP_PBUF)
- return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_PBUF);
- if (mem_group_id == MEM_GROUP_CAU_MEM ||
- mem_group_id == MEM_GROUP_CAU_SB ||
- mem_group_id == MEM_GROUP_CAU_PI)
- return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_CAU);
- if (mem_group_id == MEM_GROUP_QM_MEM)
- return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_QM);
- if (mem_group_id == MEM_GROUP_CONN_CFC_MEM ||
- mem_group_id == MEM_GROUP_TASK_CFC_MEM)
- return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_CFC);
- if (mem_group_id == MEM_GROUP_IGU_MEM || mem_group_id ==
- MEM_GROUP_IGU_MSIX)
- return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_IGU);
- if (mem_group_id == MEM_GROUP_MULD_MEM)
- return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_MULD);
- if (mem_group_id == MEM_GROUP_PRS_MEM)
- return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_PRS);
- if (mem_group_id == MEM_GROUP_DMAE_MEM)
- return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_DMAE);
- if (mem_group_id == MEM_GROUP_TM_MEM)
- return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_TM);
- if (mem_group_id == MEM_GROUP_SDM_MEM)
- return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_SDM);
- if (mem_group_id == MEM_GROUP_TDIF_CTX || mem_group_id ==
- MEM_GROUP_RDIF_CTX)
- return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_DIF);
- if (mem_group_id == MEM_GROUP_CM_MEM)
- return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_CM);
- if (mem_group_id == MEM_GROUP_IOR)
- return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_IOR);
+ for (i = 0; i < NUM_BIG_RAM_TYPES; i++) {
+ struct big_ram_defs *big_ram = &s_big_ram_defs[i];
- return true;
+ if (mem_group_id == big_ram->mem_group_id ||
+ mem_group_id == big_ram->ram_mem_group_id)
+ return qed_grc_is_included(p_hwfn, big_ram->grc_param);
+ }
+
+ switch (mem_group_id) {
+ case MEM_GROUP_PXP_ILT:
+ case MEM_GROUP_PXP_MEM:
+ return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_PXP);
+ case MEM_GROUP_RAM:
+ return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_RAM);
+ case MEM_GROUP_PBUF:
+ return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_PBUF);
+ case MEM_GROUP_CAU_MEM:
+ case MEM_GROUP_CAU_SB:
+ case MEM_GROUP_CAU_PI:
+ return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_CAU);
+ case MEM_GROUP_QM_MEM:
+ return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_QM);
+ case MEM_GROUP_CFC_MEM:
+ case MEM_GROUP_CONN_CFC_MEM:
+ case MEM_GROUP_TASK_CFC_MEM:
+ return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_CFC);
+ case MEM_GROUP_IGU_MEM:
+ case MEM_GROUP_IGU_MSIX:
+ return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_IGU);
+ case MEM_GROUP_MULD_MEM:
+ return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_MULD);
+ case MEM_GROUP_PRS_MEM:
+ return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_PRS);
+ case MEM_GROUP_DMAE_MEM:
+ return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_DMAE);
+ case MEM_GROUP_TM_MEM:
+ return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_TM);
+ case MEM_GROUP_SDM_MEM:
+ return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_SDM);
+ case MEM_GROUP_TDIF_CTX:
+ case MEM_GROUP_RDIF_CTX:
+ return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_DIF);
+ case MEM_GROUP_CM_MEM:
+ return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_CM);
+ case MEM_GROUP_IOR:
+ return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_IOR);
+ default:
+ return true;
+ }
}
/* Stalls all Storms */
static void qed_grc_stall_storms(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt, bool stall)
{
- u8 reg_val = stall ? 1 : 0;
+ u32 reg_addr;
u8 storm_id;
for (storm_id = 0; storm_id < MAX_DBG_STORMS; storm_id++) {
- if (qed_grc_is_storm_included(p_hwfn,
- (enum dbg_storms)storm_id)) {
- u32 reg_addr =
- s_storm_defs[storm_id].sem_fast_mem_addr +
- SEM_FAST_REG_STALL_0;
+ if (!qed_grc_is_storm_included(p_hwfn,
+ (enum dbg_storms)storm_id))
+ continue;
- qed_wr(p_hwfn, p_ptt, reg_addr, reg_val);
- }
+ reg_addr = s_storm_defs[storm_id].sem_fast_mem_addr +
+ SEM_FAST_REG_STALL_0_BB_K2;
+ qed_wr(p_hwfn, p_ptt, reg_addr, stall ? 1 : 0);
}
msleep(STALL_DELAY_MS);
@@ -2054,24 +2287,29 @@
{
struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
u32 reg_val[MAX_DBG_RESET_REGS] = { 0 };
- u32 i;
+ u32 block_id, i;
/* Fill reset regs values */
- for (i = 0; i < MAX_BLOCK_ID; i++)
- if (s_block_defs[i]->has_reset_bit && s_block_defs[i]->unreset)
- reg_val[s_block_defs[i]->reset_reg] |=
- BIT(s_block_defs[i]->reset_bit_offset);
+ for (block_id = 0; block_id < MAX_BLOCK_ID; block_id++) {
+ struct block_defs *block = s_block_defs[block_id];
+
+ if (block->has_reset_bit && block->unreset)
+ reg_val[block->reset_reg] |=
+ BIT(block->reset_bit_offset);
+ }
/* Write reset registers */
for (i = 0; i < MAX_DBG_RESET_REGS; i++) {
- if (s_reset_regs_defs[i].exists[dev_data->chip_id]) {
- reg_val[i] |= s_reset_regs_defs[i].unreset_val;
- if (reg_val[i])
- qed_wr(p_hwfn,
- p_ptt,
- s_reset_regs_defs[i].addr +
- RESET_REG_UNRESET_OFFSET, reg_val[i]);
- }
+ if (!s_reset_regs_defs[i].exists[dev_data->chip_id])
+ continue;
+
+ reg_val[i] |= s_reset_regs_defs[i].unreset_val;
+
+ if (reg_val[i])
+ qed_wr(p_hwfn,
+ p_ptt,
+ s_reset_regs_defs[i].addr +
+ RESET_REG_UNRESET_OFFSET, reg_val[i]);
}
}
@@ -2095,6 +2333,7 @@
qed_get_block_attn_data(block_id, attn_type);
*num_attn_regs = block_type_data->num_regs;
+
return &((const struct dbg_attn_reg *)
s_dbg_arrays[BIN_BUF_DBG_ATTN_REGS].ptr)[block_type_data->
regs_offset];
@@ -2105,34 +2344,34 @@
struct qed_ptt *p_ptt)
{
struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
+ const struct dbg_attn_reg *attn_reg_arr;
u8 reg_idx, num_attn_regs;
u32 block_id;
for (block_id = 0; block_id < MAX_BLOCK_ID; block_id++) {
- const struct dbg_attn_reg *attn_reg_arr;
-
if (dev_data->block_in_reset[block_id])
continue;
attn_reg_arr = qed_get_block_attn_regs((enum block_id)block_id,
ATTN_TYPE_PARITY,
&num_attn_regs);
+
for (reg_idx = 0; reg_idx < num_attn_regs; reg_idx++) {
const struct dbg_attn_reg *reg_data =
&attn_reg_arr[reg_idx];
+ u16 modes_buf_offset;
+ bool eval_mode;
/* Check mode */
- bool eval_mode = GET_FIELD(reg_data->mode.data,
- DBG_MODE_HDR_EVAL_MODE) > 0;
- u16 modes_buf_offset =
+ eval_mode = GET_FIELD(reg_data->mode.data,
+ DBG_MODE_HDR_EVAL_MODE) > 0;
+ modes_buf_offset =
GET_FIELD(reg_data->mode.data,
DBG_MODE_HDR_MODES_BUF_OFFSET);
+ /* If Mode match: clear parity status */
if (!eval_mode ||
qed_is_mode_match(p_hwfn, &modes_buf_offset))
- /* Mode match - read parity status read-clear
- * register.
- */
qed_rd(p_hwfn, p_ptt,
DWORDS_TO_BYTES(reg_data->
sts_clr_address));
@@ -2142,11 +2381,11 @@
/* Dumps GRC registers section header. Returns the dumped size in dwords.
* The following parameters are dumped:
- * - 'count' = num_dumped_entries
- * - 'split' = split_type
- * - 'id' = split_id (dumped only if split_id >= 0)
- * - 'param_name' = param_val (user param, dumped only if param_name != NULL and
- * param_val != NULL)
+ * - count: no. of dumped entries
+ * - split: split type
+ * - id: split ID (dumped only if split_id >= 0)
+ * - param_name: user parameter value (dumped only if param_name != NULL
+ * and param_val != NULL).
*/
static u32 qed_grc_dump_regs_hdr(u32 *dump_buf,
bool dump,
@@ -2170,84 +2409,100 @@
if (param_name && param_val)
offset += qed_dump_str_param(dump_buf + offset,
dump, param_name, param_val);
+
return offset;
}
/* Dumps the GRC registers in the specified address range.
* Returns the dumped size in dwords.
+ * The addr and len arguments are specified in dwords.
*/
static u32 qed_grc_dump_addr_range(struct qed_hwfn *p_hwfn,
- struct qed_ptt *p_ptt, u32 *dump_buf,
- bool dump, u32 addr, u32 len)
+ struct qed_ptt *p_ptt,
+ u32 *dump_buf,
+ bool dump, u32 addr, u32 len, bool wide_bus)
{
u32 byte_addr = DWORDS_TO_BYTES(addr), offset = 0, i;
- if (dump)
- for (i = 0; i < len; i++, byte_addr += BYTES_IN_DWORD, offset++)
- *(dump_buf + offset) = qed_rd(p_hwfn, p_ptt, byte_addr);
- else
- offset += len;
+ if (!dump)
+ return len;
+
+ for (i = 0; i < len; i++, byte_addr += BYTES_IN_DWORD, offset++)
+ *(dump_buf + offset) = qed_rd(p_hwfn, p_ptt, byte_addr);
+
return offset;
}
-/* Dumps GRC registers sequence header. Returns the dumped size in dwords. */
-static u32 qed_grc_dump_reg_entry_hdr(u32 *dump_buf, bool dump, u32 addr,
- u32 len)
+/* Dumps GRC registers sequence header. Returns the dumped size in dwords.
+ * The addr and len arguments are specified in dwords.
+ */
+static u32 qed_grc_dump_reg_entry_hdr(u32 *dump_buf,
+ bool dump, u32 addr, u32 len)
{
if (dump)
*dump_buf = addr | (len << REG_DUMP_LEN_SHIFT);
+
return 1;
}
-/* Dumps GRC registers sequence. Returns the dumped size in dwords. */
+/* Dumps GRC registers sequence. Returns the dumped size in dwords.
+ * The addr and len arguments are specified in dwords.
+ */
static u32 qed_grc_dump_reg_entry(struct qed_hwfn *p_hwfn,
- struct qed_ptt *p_ptt, u32 *dump_buf,
- bool dump, u32 addr, u32 len)
+ struct qed_ptt *p_ptt,
+ u32 *dump_buf,
+ bool dump, u32 addr, u32 len, bool wide_bus)
{
u32 offset = 0;
offset += qed_grc_dump_reg_entry_hdr(dump_buf, dump, addr, len);
offset += qed_grc_dump_addr_range(p_hwfn,
p_ptt,
- dump_buf + offset, dump, addr, len);
+ dump_buf + offset,
+ dump, addr, len, wide_bus);
+
return offset;
}
/* Dumps GRC registers sequence with skip cycle.
* Returns the dumped size in dwords.
+ * - addr: start GRC address in dwords
+ * - total_len: total no. of dwords to dump
+ * - read_len: no. consecutive dwords to read
+ * - skip_len: no. of dwords to skip (and fill with zeros)
*/
static u32 qed_grc_dump_reg_entry_skip(struct qed_hwfn *p_hwfn,
- struct qed_ptt *p_ptt, u32 *dump_buf,
- bool dump, u32 addr, u32 total_len,
+ struct qed_ptt *p_ptt,
+ u32 *dump_buf,
+ bool dump,
+ u32 addr,
+ u32 total_len,
u32 read_len, u32 skip_len)
{
u32 offset = 0, reg_offset = 0;
offset += qed_grc_dump_reg_entry_hdr(dump_buf, dump, addr, total_len);
- if (dump) {
- while (reg_offset < total_len) {
- u32 curr_len = min_t(u32,
- read_len,
- total_len - reg_offset);
- offset += qed_grc_dump_addr_range(p_hwfn,
- p_ptt,
- dump_buf + offset,
- dump, addr, curr_len);
+
+ if (!dump)
+ return offset + total_len;
+
+ while (reg_offset < total_len) {
+ u32 curr_len = min_t(u32, read_len, total_len - reg_offset);
+
+ offset += qed_grc_dump_addr_range(p_hwfn,
+ p_ptt,
+ dump_buf + offset,
+ dump, addr, curr_len, false);
+ reg_offset += curr_len;
+ addr += curr_len;
+
+ if (reg_offset < total_len) {
+ curr_len = min_t(u32, skip_len, total_len - skip_len);
+ memset(dump_buf + offset, 0, DWORDS_TO_BYTES(curr_len));
+ offset += curr_len;
reg_offset += curr_len;
addr += curr_len;
- if (reg_offset < total_len) {
- curr_len = min_t(u32,
- skip_len,
- total_len - skip_len);
- memset(dump_buf + offset, 0,
- DWORDS_TO_BYTES(curr_len));
- offset += curr_len;
- reg_offset += curr_len;
- addr += curr_len;
- }
}
- } else {
- offset += total_len;
}
return offset;
@@ -2266,43 +2521,48 @@
bool mode_match = true;
*num_dumped_reg_entries = 0;
+
while (input_offset < input_regs_arr.size_in_dwords) {
const struct dbg_dump_cond_hdr *cond_hdr =
(const struct dbg_dump_cond_hdr *)
&input_regs_arr.ptr[input_offset++];
- bool eval_mode = GET_FIELD(cond_hdr->mode.data,
- DBG_MODE_HDR_EVAL_MODE) > 0;
+ u16 modes_buf_offset;
+ bool eval_mode;
/* Check mode/block */
+ eval_mode = GET_FIELD(cond_hdr->mode.data,
+ DBG_MODE_HDR_EVAL_MODE) > 0;
if (eval_mode) {
- u16 modes_buf_offset =
+ modes_buf_offset =
GET_FIELD(cond_hdr->mode.data,
DBG_MODE_HDR_MODES_BUF_OFFSET);
mode_match = qed_is_mode_match(p_hwfn,
&modes_buf_offset);
}
- if (mode_match && block_enable[cond_hdr->block_id]) {
- for (i = 0; i < cond_hdr->data_size;
- i++, input_offset++) {
- const struct dbg_dump_reg *reg =
- (const struct dbg_dump_reg *)
- &input_regs_arr.ptr[input_offset];
- u32 addr, len;
-
- addr = GET_FIELD(reg->data,
- DBG_DUMP_REG_ADDRESS);
- len = GET_FIELD(reg->data, DBG_DUMP_REG_LENGTH);
- offset +=
- qed_grc_dump_reg_entry(p_hwfn, p_ptt,
- dump_buf + offset,
- dump,
- addr,
- len);
- (*num_dumped_reg_entries)++;
- }
- } else {
+ if (!mode_match || !block_enable[cond_hdr->block_id]) {
input_offset += cond_hdr->data_size;
+ continue;
+ }
+
+ for (i = 0; i < cond_hdr->data_size; i++, input_offset++) {
+ const struct dbg_dump_reg *reg =
+ (const struct dbg_dump_reg *)
+ &input_regs_arr.ptr[input_offset];
+ u32 addr, len;
+ bool wide_bus;
+
+ addr = GET_FIELD(reg->data, DBG_DUMP_REG_ADDRESS);
+ len = GET_FIELD(reg->data, DBG_DUMP_REG_LENGTH);
+ wide_bus = GET_FIELD(reg->data, DBG_DUMP_REG_WIDE_BUS);
+ offset += qed_grc_dump_reg_entry(p_hwfn,
+ p_ptt,
+ dump_buf + offset,
+ dump,
+ addr,
+ len,
+ wide_bus);
+ (*num_dumped_reg_entries)++;
}
}
@@ -2350,8 +2610,8 @@
return num_dumped_reg_entries > 0 ? offset : 0;
}
-/* Dumps registers according to the input registers array.
- * Returns the dumped size in dwords.
+/* Dumps registers according to the input registers array. Returns the dumped
+ * size in dwords.
*/
static u32 qed_grc_dump_registers(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
@@ -2361,29 +2621,37 @@
const char *param_name, const char *param_val)
{
struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
- struct chip_platform_defs *p_platform_defs;
+ struct chip_platform_defs *chip_platform;
u32 offset = 0, input_offset = 0;
- struct chip_defs *p_chip_defs;
+ struct chip_defs *chip;
u8 port_id, pf_id, vf_id;
u16 fid;
- p_chip_defs = &s_chip_defs[dev_data->chip_id];
- p_platform_defs = &p_chip_defs->per_platform[dev_data->platform_id];
+ chip = &s_chip_defs[dev_data->chip_id];
+ chip_platform = &chip->per_platform[dev_data->platform_id];
if (dump)
DP_VERBOSE(p_hwfn, QED_MSG_DEBUG, "Dumping registers...\n");
+
while (input_offset <
s_dbg_arrays[BIN_BUF_DBG_DUMP_REG].size_in_dwords) {
- const struct dbg_dump_split_hdr *split_hdr =
+ const struct dbg_dump_split_hdr *split_hdr;
+ struct dbg_array curr_input_regs_arr;
+ u32 split_data_size;
+ u8 split_type_id;
+
+ split_hdr =
(const struct dbg_dump_split_hdr *)
&s_dbg_arrays[BIN_BUF_DBG_DUMP_REG].ptr[input_offset++];
- u8 split_type_id = GET_FIELD(split_hdr->hdr,
- DBG_DUMP_SPLIT_HDR_SPLIT_TYPE_ID);
- u32 split_data_size = GET_FIELD(split_hdr->hdr,
- DBG_DUMP_SPLIT_HDR_DATA_SIZE);
- struct dbg_array curr_input_regs_arr = {
- &s_dbg_arrays[BIN_BUF_DBG_DUMP_REG].ptr[input_offset],
- split_data_size};
+ split_type_id =
+ GET_FIELD(split_hdr->hdr,
+ DBG_DUMP_SPLIT_HDR_SPLIT_TYPE_ID);
+ split_data_size =
+ GET_FIELD(split_hdr->hdr,
+ DBG_DUMP_SPLIT_HDR_DATA_SIZE);
+ curr_input_regs_arr.ptr =
+ &s_dbg_arrays[BIN_BUF_DBG_DUMP_REG].ptr[input_offset];
+ curr_input_regs_arr.size_in_dwords = split_data_size;
switch (split_type_id) {
case SPLIT_TYPE_NONE:
@@ -2398,8 +2666,9 @@
param_name,
param_val);
break;
+
case SPLIT_TYPE_PORT:
- for (port_id = 0; port_id < p_platform_defs->num_ports;
+ for (port_id = 0; port_id < chip_platform->num_ports;
port_id++) {
if (dump)
qed_port_pretend(p_hwfn, p_ptt,
@@ -2414,9 +2683,10 @@
param_val);
}
break;
+
case SPLIT_TYPE_PF:
case SPLIT_TYPE_PORT_PF:
- for (pf_id = 0; pf_id < p_platform_defs->num_pfs;
+ for (pf_id = 0; pf_id < chip_platform->num_pfs;
pf_id++) {
u8 pfid_shift =
PXP_PRETEND_CONCRETE_FID_PFID_SHIFT;
@@ -2427,17 +2697,21 @@
}
offset +=
- qed_grc_dump_split_data(p_hwfn, p_ptt,
+ qed_grc_dump_split_data(p_hwfn,
+ p_ptt,
curr_input_regs_arr,
dump_buf + offset,
- dump, block_enable,
- "pf", pf_id,
+ dump,
+ block_enable,
+ "pf",
+ pf_id,
param_name,
param_val);
}
break;
+
case SPLIT_TYPE_VF:
- for (vf_id = 0; vf_id < p_platform_defs->num_vfs;
+ for (vf_id = 0; vf_id < chip_platform->num_vfs;
vf_id++) {
u8 vfvalid_shift =
PXP_PRETEND_CONCRETE_FID_VFVALID_SHIFT;
@@ -2460,6 +2734,7 @@
param_val);
}
break;
+
default:
break;
}
@@ -2490,35 +2765,37 @@
/* Write reset registers */
for (i = 0; i < MAX_DBG_RESET_REGS; i++) {
- if (s_reset_regs_defs[i].exists[dev_data->chip_id]) {
- u32 addr = BYTES_TO_DWORDS(s_reset_regs_defs[i].addr);
+ if (!s_reset_regs_defs[i].exists[dev_data->chip_id])
+ continue;
- offset += qed_grc_dump_reg_entry(p_hwfn,
- p_ptt,
- dump_buf + offset,
- dump,
- addr,
- 1);
- num_regs++;
- }
+ offset += qed_grc_dump_reg_entry(p_hwfn,
+ p_ptt,
+ dump_buf + offset,
+ dump,
+ BYTES_TO_DWORDS
+ (s_reset_regs_defs[i].addr), 1,
+ false);
+ num_regs++;
}
/* Write header */
if (dump)
qed_grc_dump_regs_hdr(dump_buf,
true, num_regs, "eng", -1, NULL, NULL);
+
return offset;
}
-/* Dump registers that are modified during GRC Dump and therefore must be dumped
- * first. Returns the dumped size in dwords.
+/* Dump registers that are modified during GRC Dump and therefore must be
+ * dumped first. Returns the dumped size in dwords.
*/
static u32 qed_grc_dump_modified_regs(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
u32 *dump_buf, bool dump)
{
struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
- u32 offset = 0, num_reg_entries = 0, block_id;
+ u32 block_id, offset = 0, num_reg_entries = 0;
+ const struct dbg_attn_reg *attn_reg_arr;
u8 storm_id, reg_idx, num_attn_regs;
/* Calculate header size */
@@ -2527,14 +2804,13 @@
/* Write parity registers */
for (block_id = 0; block_id < MAX_BLOCK_ID; block_id++) {
- const struct dbg_attn_reg *attn_reg_arr;
-
if (dev_data->block_in_reset[block_id] && dump)
continue;
attn_reg_arr = qed_get_block_attn_regs((enum block_id)block_id,
ATTN_TYPE_PARITY,
&num_attn_regs);
+
for (reg_idx = 0; reg_idx < num_attn_regs; reg_idx++) {
const struct dbg_attn_reg *reg_data =
&attn_reg_arr[reg_idx];
@@ -2548,37 +2824,36 @@
modes_buf_offset =
GET_FIELD(reg_data->mode.data,
DBG_MODE_HDR_MODES_BUF_OFFSET);
- if (!eval_mode ||
- qed_is_mode_match(p_hwfn, &modes_buf_offset)) {
- /* Mode match - read and dump registers */
- addr = reg_data->mask_address;
- offset +=
- qed_grc_dump_reg_entry(p_hwfn,
- p_ptt,
- dump_buf + offset,
- dump,
- addr,
- 1);
- addr = GET_FIELD(reg_data->data,
- DBG_ATTN_REG_STS_ADDRESS);
- offset +=
- qed_grc_dump_reg_entry(p_hwfn,
- p_ptt,
- dump_buf + offset,
- dump,
- addr,
- 1);
- num_reg_entries += 2;
- }
+ if (eval_mode &&
+ !qed_is_mode_match(p_hwfn, &modes_buf_offset))
+ continue;
+
+ /* Mode match: read & dump registers */
+ addr = reg_data->mask_address;
+ offset += qed_grc_dump_reg_entry(p_hwfn,
+ p_ptt,
+ dump_buf + offset,
+ dump,
+ addr,
+ 1, false);
+ addr = GET_FIELD(reg_data->data,
+ DBG_ATTN_REG_STS_ADDRESS);
+ offset += qed_grc_dump_reg_entry(p_hwfn,
+ p_ptt,
+ dump_buf + offset,
+ dump,
+ addr,
+ 1, false);
+ num_reg_entries += 2;
}
}
- /* Write storm stall status registers */
+ /* Write Storm stall status registers */
for (storm_id = 0; storm_id < MAX_DBG_STORMS; storm_id++) {
+ struct storm_defs *storm = &s_storm_defs[storm_id];
u32 addr;
- if (dev_data->block_in_reset[s_storm_defs[storm_id].block_id] &&
- dump)
+ if (dev_data->block_in_reset[storm->block_id] && dump)
continue;
addr =
@@ -2589,7 +2864,8 @@
dump_buf + offset,
dump,
addr,
- 1);
+ 1,
+ false);
num_reg_entries++;
}
@@ -2598,6 +2874,7 @@
qed_grc_dump_regs_hdr(dump_buf,
true,
num_reg_entries, "eng", -1, NULL, NULL);
+
return offset;
}
@@ -2637,17 +2914,17 @@
return offset;
}
-/* Dumps a GRC memory header (section and params).
- * The following parameters are dumped:
- * name - name is dumped only if it's not NULL.
- * addr - addr is dumped only if name is NULL.
- * len - len is always dumped.
- * width - bit_width is dumped if it's not zero.
- * packed - packed=1 is dumped if it's not false.
- * mem_group - mem_group is always dumped.
- * is_storm - true only if the memory is related to a Storm.
- * storm_letter - storm letter (valid only if is_storm is true).
- * Returns the dumped size in dwords.
+/* Dumps a GRC memory header (section and params). Returns the dumped size in
+ * dwords. The following parameters are dumped:
+ * - name: dumped only if it's not NULL.
+ * - addr: in dwords, dumped only if name is NULL.
+ * - len: in dwords, always dumped.
+ * - width: dumped if it's not zero.
+ * - packed: dumped only if it's not false.
+ * - mem_group: always dumped.
+ * - is_storm: true only if the memory is related to a Storm.
+ * - storm_letter: valid only if is_storm is true.
+ *
*/
static u32 qed_grc_dump_mem_hdr(struct qed_hwfn *p_hwfn,
u32 *dump_buf,
@@ -2667,6 +2944,7 @@
if (!len)
DP_NOTICE(p_hwfn,
"Unexpected GRC Dump error: dumped memory size must be non-zero\n");
+
if (bit_width)
num_params++;
if (packed)
@@ -2675,6 +2953,7 @@
/* Dump section header */
offset += qed_dump_section_hdr(dump_buf + offset,
dump, "grc_mem", num_params);
+
if (name) {
/* Dump name */
if (is_storm) {
@@ -2694,14 +2973,15 @@
len, buf);
} else {
/* Dump address */
+ u32 addr_in_bytes = DWORDS_TO_BYTES(addr);
+
offset += qed_dump_num_param(dump_buf + offset,
- dump, "addr",
- DWORDS_TO_BYTES(addr));
+ dump, "addr", addr_in_bytes);
if (dump && len > 64)
DP_VERBOSE(p_hwfn,
QED_MSG_DEBUG,
"Dumping %d registers from address 0x%x...\n",
- len, (u32)DWORDS_TO_BYTES(addr));
+ len, addr_in_bytes);
}
/* Dump len */
@@ -2727,11 +3007,13 @@
}
offset += qed_dump_str_param(dump_buf + offset, dump, "type", buf);
+
return offset;
}
/* Dumps a single GRC memory. If name is NULL, the memory is stored by address.
* Returns the dumped size in dwords.
+ * The addr and len arguments are specified in dwords.
*/
static u32 qed_grc_dump_mem(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
@@ -2740,6 +3022,7 @@
const char *name,
u32 addr,
u32 len,
+ bool wide_bus,
u32 bit_width,
bool packed,
const char *mem_group,
@@ -2758,7 +3041,9 @@
mem_group, is_storm, storm_letter);
offset += qed_grc_dump_addr_range(p_hwfn,
p_ptt,
- dump_buf + offset, dump, addr, len);
+ dump_buf + offset,
+ dump, addr, len, wide_bus);
+
return offset;
}
@@ -2773,20 +3058,21 @@
while (input_offset < input_mems_arr.size_in_dwords) {
const struct dbg_dump_cond_hdr *cond_hdr;
+ u16 modes_buf_offset;
u32 num_entries;
bool eval_mode;
cond_hdr = (const struct dbg_dump_cond_hdr *)
&input_mems_arr.ptr[input_offset++];
- eval_mode = GET_FIELD(cond_hdr->mode.data,
- DBG_MODE_HDR_EVAL_MODE) > 0;
+ num_entries = cond_hdr->data_size / MEM_DUMP_ENTRY_SIZE_DWORDS;
/* Check required mode */
+ eval_mode = GET_FIELD(cond_hdr->mode.data,
+ DBG_MODE_HDR_EVAL_MODE) > 0;
if (eval_mode) {
- u16 modes_buf_offset =
+ modes_buf_offset =
GET_FIELD(cond_hdr->mode.data,
DBG_MODE_HDR_MODES_BUF_OFFSET);
-
mode_match = qed_is_mode_match(p_hwfn,
&modes_buf_offset);
}
@@ -2796,81 +3082,87 @@
continue;
}
- num_entries = cond_hdr->data_size / MEM_DUMP_ENTRY_SIZE_DWORDS;
for (i = 0; i < num_entries;
i++, input_offset += MEM_DUMP_ENTRY_SIZE_DWORDS) {
const struct dbg_dump_mem *mem =
(const struct dbg_dump_mem *)
&input_mems_arr.ptr[input_offset];
- u8 mem_group_id;
+ u8 mem_group_id = GET_FIELD(mem->dword0,
+ DBG_DUMP_MEM_MEM_GROUP_ID);
+ bool is_storm = false, mem_wide_bus;
+ enum dbg_grc_params grc_param;
+ char storm_letter = 'a';
+ enum block_id block_id;
+ u32 mem_addr, mem_len;
- mem_group_id = GET_FIELD(mem->dword0,
- DBG_DUMP_MEM_MEM_GROUP_ID);
if (mem_group_id >= MEM_GROUPS_NUM) {
DP_NOTICE(p_hwfn, "Invalid mem_group_id\n");
return 0;
}
- if (qed_grc_is_mem_included(p_hwfn,
- (enum block_id)cond_hdr->block_id,
- mem_group_id)) {
- u32 mem_addr = GET_FIELD(mem->dword0,
- DBG_DUMP_MEM_ADDRESS);
- u32 mem_len = GET_FIELD(mem->dword1,
- DBG_DUMP_MEM_LENGTH);
- enum dbg_grc_params grc_param;
- char storm_letter = 'a';
- bool is_storm = false;
+ block_id = (enum block_id)cond_hdr->block_id;
+ if (!qed_grc_is_mem_included(p_hwfn,
+ block_id,
+ mem_group_id))
+ continue;
- /* Update memory length for CCFC/TCFC memories
- * according to number of LCIDs/LTIDs.
- */
- if (mem_group_id == MEM_GROUP_CONN_CFC_MEM) {
- if (mem_len % MAX_LCIDS != 0) {
- DP_NOTICE(p_hwfn,
- "Invalid CCFC connection memory size\n");
- return 0;
- }
+ mem_addr = GET_FIELD(mem->dword0, DBG_DUMP_MEM_ADDRESS);
+ mem_len = GET_FIELD(mem->dword1, DBG_DUMP_MEM_LENGTH);
+ mem_wide_bus = GET_FIELD(mem->dword1,
+ DBG_DUMP_MEM_WIDE_BUS);
- grc_param = DBG_GRC_PARAM_NUM_LCIDS;
- mem_len = qed_grc_get_param(p_hwfn,
- grc_param) *
- (mem_len / MAX_LCIDS);
- } else if (mem_group_id ==
- MEM_GROUP_TASK_CFC_MEM) {
- if (mem_len % MAX_LTIDS != 0) {
- DP_NOTICE(p_hwfn,
- "Invalid TCFC task memory size\n");
- return 0;
- }
-
- grc_param = DBG_GRC_PARAM_NUM_LTIDS;
- mem_len = qed_grc_get_param(p_hwfn,
- grc_param) *
- (mem_len / MAX_LTIDS);
+ /* Update memory length for CCFC/TCFC memories
+ * according to number of LCIDs/LTIDs.
+ */
+ if (mem_group_id == MEM_GROUP_CONN_CFC_MEM) {
+ if (mem_len % MAX_LCIDS) {
+ DP_NOTICE(p_hwfn,
+ "Invalid CCFC connection memory size\n");
+ return 0;
}
- /* If memory is associated with Storm, update
- * Storm details.
- */
- if (s_block_defs[cond_hdr->block_id]->
- associated_to_storm) {
- is_storm = true;
- storm_letter =
- s_storm_defs[s_block_defs[
- cond_hdr->block_id]->
- storm_id].letter;
+ grc_param = DBG_GRC_PARAM_NUM_LCIDS;
+ mem_len = qed_grc_get_param(p_hwfn, grc_param) *
+ (mem_len / MAX_LCIDS);
+ } else if (mem_group_id == MEM_GROUP_TASK_CFC_MEM) {
+ if (mem_len % MAX_LTIDS) {
+ DP_NOTICE(p_hwfn,
+ "Invalid TCFC task memory size\n");
+ return 0;
}
- /* Dump memory */
- offset += qed_grc_dump_mem(p_hwfn, p_ptt,
- dump_buf + offset, dump, NULL,
- mem_addr, mem_len, 0,
+ grc_param = DBG_GRC_PARAM_NUM_LTIDS;
+ mem_len = qed_grc_get_param(p_hwfn, grc_param) *
+ (mem_len / MAX_LTIDS);
+ }
+
+ /* If memory is associated with Storm, update Storm
+ * details.
+ */
+ if (s_block_defs
+ [cond_hdr->block_id]->associated_to_storm) {
+ is_storm = true;
+ storm_letter =
+ s_storm_defs[s_block_defs
+ [cond_hdr->block_id]->
+ storm_id].letter;
+ }
+
+ /* Dump memory */
+ offset += qed_grc_dump_mem(p_hwfn,
+ p_ptt,
+ dump_buf + offset,
+ dump,
+ NULL,
+ mem_addr,
+ mem_len,
+ mem_wide_bus,
+ 0,
false,
s_mem_group_names[mem_group_id],
- is_storm, storm_letter);
- }
- }
+ is_storm,
+ storm_letter);
+ }
}
return offset;
@@ -2887,16 +3179,22 @@
while (input_offset <
s_dbg_arrays[BIN_BUF_DBG_DUMP_MEM].size_in_dwords) {
- const struct dbg_dump_split_hdr *split_hdr =
- (const struct dbg_dump_split_hdr *)
+ const struct dbg_dump_split_hdr *split_hdr;
+ struct dbg_array curr_input_mems_arr;
+ u32 split_data_size;
+ u8 split_type_id;
+
+ split_hdr = (const struct dbg_dump_split_hdr *)
&s_dbg_arrays[BIN_BUF_DBG_DUMP_MEM].ptr[input_offset++];
- u8 split_type_id = GET_FIELD(split_hdr->hdr,
- DBG_DUMP_SPLIT_HDR_SPLIT_TYPE_ID);
- u32 split_data_size = GET_FIELD(split_hdr->hdr,
- DBG_DUMP_SPLIT_HDR_DATA_SIZE);
- struct dbg_array curr_input_mems_arr = {
- &s_dbg_arrays[BIN_BUF_DBG_DUMP_MEM].ptr[input_offset],
- split_data_size};
+ split_type_id =
+ GET_FIELD(split_hdr->hdr,
+ DBG_DUMP_SPLIT_HDR_SPLIT_TYPE_ID);
+ split_data_size =
+ GET_FIELD(split_hdr->hdr,
+ DBG_DUMP_SPLIT_HDR_DATA_SIZE);
+ curr_input_mems_arr.ptr =
+ &s_dbg_arrays[BIN_BUF_DBG_DUMP_MEM].ptr[input_offset];
+ curr_input_mems_arr.size_in_dwords = split_data_size;
switch (split_type_id) {
case SPLIT_TYPE_NONE:
@@ -2906,6 +3204,7 @@
dump_buf + offset,
dump);
break;
+
default:
DP_NOTICE(p_hwfn,
"Dumping split memories is currently not supported\n");
@@ -2920,6 +3219,7 @@
/* Dumps GRC context data for the specified Storm.
* Returns the dumped size in dwords.
+ * The lid_size argument is specified in quad-regs.
*/
static u32 qed_grc_dump_ctx_data(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
@@ -2931,13 +3231,15 @@
u32 rd_reg_addr,
u8 storm_id)
{
- u32 i, lid, total_size;
- u32 offset = 0;
+ struct storm_defs *storm = &s_storm_defs[storm_id];
+ u32 i, lid, total_size, offset = 0;
if (!lid_size)
return 0;
+
lid_size *= BYTES_IN_DWORD;
total_size = num_lids * lid_size;
+
offset += qed_grc_dump_mem_hdr(p_hwfn,
dump_buf + offset,
dump,
@@ -2945,25 +3247,19 @@
0,
total_size,
lid_size * 32,
- false,
- name,
- true, s_storm_defs[storm_id].letter);
+ false, name, true, storm->letter);
+
+ if (!dump)
+ return offset + total_size;
/* Dump context data */
- if (dump) {
- for (lid = 0; lid < num_lids; lid++) {
- for (i = 0; i < lid_size; i++, offset++) {
- qed_wr(p_hwfn,
- p_ptt,
- s_storm_defs[storm_id].cm_ctx_wr_addr,
- (i << 9) | lid);
- *(dump_buf + offset) = qed_rd(p_hwfn,
- p_ptt,
- rd_reg_addr);
- }
+ for (lid = 0; lid < num_lids; lid++) {
+ for (i = 0; i < lid_size; i++, offset++) {
+ qed_wr(p_hwfn,
+ p_ptt, storm->cm_ctx_wr_addr, (i << 9) | lid);
+ *(dump_buf + offset) = qed_rd(p_hwfn,
+ p_ptt, rd_reg_addr);
}
- } else {
- offset += total_size;
}
return offset;
@@ -2973,15 +3269,19 @@
static u32 qed_grc_dump_ctx(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt, u32 *dump_buf, bool dump)
{
+ enum dbg_grc_params grc_param;
u32 offset = 0;
u8 storm_id;
for (storm_id = 0; storm_id < MAX_DBG_STORMS; storm_id++) {
+ struct storm_defs *storm = &s_storm_defs[storm_id];
+
if (!qed_grc_is_storm_included(p_hwfn,
(enum dbg_storms)storm_id))
continue;
/* Dump Conn AG context size */
+ grc_param = DBG_GRC_PARAM_NUM_LCIDS;
offset +=
qed_grc_dump_ctx_data(p_hwfn,
p_ptt,
@@ -2989,14 +3289,13 @@
dump,
"CONN_AG_CTX",
qed_grc_get_param(p_hwfn,
- DBG_GRC_PARAM_NUM_LCIDS),
- s_storm_defs[storm_id].
- cm_conn_ag_ctx_lid_size,
- s_storm_defs[storm_id].
- cm_conn_ag_ctx_rd_addr,
+ grc_param),
+ storm->cm_conn_ag_ctx_lid_size,
+ storm->cm_conn_ag_ctx_rd_addr,
storm_id);
/* Dump Conn ST context size */
+ grc_param = DBG_GRC_PARAM_NUM_LCIDS;
offset +=
qed_grc_dump_ctx_data(p_hwfn,
p_ptt,
@@ -3004,14 +3303,13 @@
dump,
"CONN_ST_CTX",
qed_grc_get_param(p_hwfn,
- DBG_GRC_PARAM_NUM_LCIDS),
- s_storm_defs[storm_id].
- cm_conn_st_ctx_lid_size,
- s_storm_defs[storm_id].
- cm_conn_st_ctx_rd_addr,
+ grc_param),
+ storm->cm_conn_st_ctx_lid_size,
+ storm->cm_conn_st_ctx_rd_addr,
storm_id);
/* Dump Task AG context size */
+ grc_param = DBG_GRC_PARAM_NUM_LTIDS;
offset +=
qed_grc_dump_ctx_data(p_hwfn,
p_ptt,
@@ -3019,14 +3317,13 @@
dump,
"TASK_AG_CTX",
qed_grc_get_param(p_hwfn,
- DBG_GRC_PARAM_NUM_LTIDS),
- s_storm_defs[storm_id].
- cm_task_ag_ctx_lid_size,
- s_storm_defs[storm_id].
- cm_task_ag_ctx_rd_addr,
+ grc_param),
+ storm->cm_task_ag_ctx_lid_size,
+ storm->cm_task_ag_ctx_rd_addr,
storm_id);
/* Dump Task ST context size */
+ grc_param = DBG_GRC_PARAM_NUM_LTIDS;
offset +=
qed_grc_dump_ctx_data(p_hwfn,
p_ptt,
@@ -3034,11 +3331,9 @@
dump,
"TASK_ST_CTX",
qed_grc_get_param(p_hwfn,
- DBG_GRC_PARAM_NUM_LTIDS),
- s_storm_defs[storm_id].
- cm_task_st_ctx_lid_size,
- s_storm_defs[storm_id].
- cm_task_st_ctx_rd_addr,
+ grc_param),
+ storm->cm_task_st_ctx_lid_size,
+ storm->cm_task_st_ctx_rd_addr,
storm_id);
}
@@ -3050,8 +3345,8 @@
struct qed_ptt *p_ptt, u32 *dump_buf, bool dump)
{
char buf[10] = "IOR_SET_?";
+ u32 addr, offset = 0;
u8 storm_id, set_id;
- u32 offset = 0;
for (storm_id = 0; storm_id < MAX_DBG_STORMS; storm_id++) {
struct storm_defs *storm = &s_storm_defs[storm_id];
@@ -3061,11 +3356,9 @@
continue;
for (set_id = 0; set_id < NUM_IOR_SETS; set_id++) {
- u32 dwords, addr;
-
- dwords = storm->sem_fast_mem_addr +
- SEM_FAST_REG_STORM_REG_FILE;
- addr = BYTES_TO_DWORDS(dwords) + IOR_SET_OFFSET(set_id);
+ addr = BYTES_TO_DWORDS(storm->sem_fast_mem_addr +
+ SEM_FAST_REG_STORM_REG_FILE) +
+ IOR_SET_OFFSET(set_id);
buf[strlen(buf) - 1] = '0' + set_id;
offset += qed_grc_dump_mem(p_hwfn,
p_ptt,
@@ -3074,6 +3367,7 @@
buf,
addr,
IORS_PER_SET,
+ false,
32,
false,
"ior",
@@ -3091,10 +3385,10 @@
u32 *dump_buf, bool dump, u8 storm_id)
{
u32 total_size = VFC_CAM_NUM_ROWS * VFC_CAM_RESP_DWORDS;
+ struct storm_defs *storm = &s_storm_defs[storm_id];
u32 cam_addr[VFC_CAM_ADDR_DWORDS] = { 0 };
u32 cam_cmd[VFC_CAM_CMD_DWORDS] = { 0 };
- u32 offset = 0;
- u32 row, i;
+ u32 row, i, offset = 0;
offset += qed_grc_dump_mem_hdr(p_hwfn,
dump_buf + offset,
@@ -3103,38 +3397,34 @@
0,
total_size,
256,
- false,
- "vfc_cam",
- true, s_storm_defs[storm_id].letter);
- if (dump) {
- /* Prepare CAM address */
- SET_VAR_FIELD(cam_addr, VFC_CAM_ADDR, OP, VFC_OPCODE_CAM_RD);
- for (row = 0; row < VFC_CAM_NUM_ROWS;
- row++, offset += VFC_CAM_RESP_DWORDS) {
- /* Write VFC CAM command */
- SET_VAR_FIELD(cam_cmd, VFC_CAM_CMD, ROW, row);
- ARR_REG_WR(p_hwfn,
- p_ptt,
- s_storm_defs[storm_id].sem_fast_mem_addr +
- SEM_FAST_REG_VFC_DATA_WR,
- cam_cmd, VFC_CAM_CMD_DWORDS);
+ false, "vfc_cam", true, storm->letter);
- /* Write VFC CAM address */
- ARR_REG_WR(p_hwfn,
- p_ptt,
- s_storm_defs[storm_id].sem_fast_mem_addr +
- SEM_FAST_REG_VFC_ADDR,
- cam_addr, VFC_CAM_ADDR_DWORDS);
+ if (!dump)
+ return offset + total_size;
- /* Read VFC CAM read response */
- ARR_REG_RD(p_hwfn,
- p_ptt,
- s_storm_defs[storm_id].sem_fast_mem_addr +
- SEM_FAST_REG_VFC_DATA_RD,
- dump_buf + offset, VFC_CAM_RESP_DWORDS);
- }
- } else {
- offset += total_size;
+ /* Prepare CAM address */
+ SET_VAR_FIELD(cam_addr, VFC_CAM_ADDR, OP, VFC_OPCODE_CAM_RD);
+
+ for (row = 0; row < VFC_CAM_NUM_ROWS;
+ row++, offset += VFC_CAM_RESP_DWORDS) {
+ /* Write VFC CAM command */
+ SET_VAR_FIELD(cam_cmd, VFC_CAM_CMD, ROW, row);
+ ARR_REG_WR(p_hwfn,
+ p_ptt,
+ storm->sem_fast_mem_addr + SEM_FAST_REG_VFC_DATA_WR,
+ cam_cmd, VFC_CAM_CMD_DWORDS);
+
+ /* Write VFC CAM address */
+ ARR_REG_WR(p_hwfn,
+ p_ptt,
+ storm->sem_fast_mem_addr + SEM_FAST_REG_VFC_ADDR,
+ cam_addr, VFC_CAM_ADDR_DWORDS);
+
+ /* Read VFC CAM read response */
+ ARR_REG_RD(p_hwfn,
+ p_ptt,
+ storm->sem_fast_mem_addr + SEM_FAST_REG_VFC_DATA_RD,
+ dump_buf + offset, VFC_CAM_RESP_DWORDS);
}
return offset;
@@ -3148,10 +3438,10 @@
u8 storm_id, struct vfc_ram_defs *ram_defs)
{
u32 total_size = ram_defs->num_rows * VFC_RAM_RESP_DWORDS;
+ struct storm_defs *storm = &s_storm_defs[storm_id];
u32 ram_addr[VFC_RAM_ADDR_DWORDS] = { 0 };
u32 ram_cmd[VFC_RAM_CMD_DWORDS] = { 0 };
- u32 offset = 0;
- u32 row, i;
+ u32 row, i, offset = 0;
offset += qed_grc_dump_mem_hdr(p_hwfn,
dump_buf + offset,
@@ -3162,7 +3452,7 @@
256,
false,
ram_defs->type_name,
- true, s_storm_defs[storm_id].letter);
+ true, storm->letter);
/* Prepare RAM address */
SET_VAR_FIELD(ram_addr, VFC_RAM_ADDR, OP, VFC_OPCODE_RAM_RD);
@@ -3176,23 +3466,20 @@
/* Write VFC RAM command */
ARR_REG_WR(p_hwfn,
p_ptt,
- s_storm_defs[storm_id].sem_fast_mem_addr +
- SEM_FAST_REG_VFC_DATA_WR,
+ storm->sem_fast_mem_addr + SEM_FAST_REG_VFC_DATA_WR,
ram_cmd, VFC_RAM_CMD_DWORDS);
/* Write VFC RAM address */
SET_VAR_FIELD(ram_addr, VFC_RAM_ADDR, ROW, row);
ARR_REG_WR(p_hwfn,
p_ptt,
- s_storm_defs[storm_id].sem_fast_mem_addr +
- SEM_FAST_REG_VFC_ADDR,
+ storm->sem_fast_mem_addr + SEM_FAST_REG_VFC_ADDR,
ram_addr, VFC_RAM_ADDR_DWORDS);
/* Read VFC RAM read response */
ARR_REG_RD(p_hwfn,
p_ptt,
- s_storm_defs[storm_id].sem_fast_mem_addr +
- SEM_FAST_REG_VFC_DATA_RD,
+ storm->sem_fast_mem_addr + SEM_FAST_REG_VFC_DATA_RD,
dump_buf + offset, VFC_RAM_RESP_DWORDS);
}
@@ -3208,28 +3495,27 @@
u32 offset = 0;
for (storm_id = 0; storm_id < MAX_DBG_STORMS; storm_id++) {
- if (qed_grc_is_storm_included(p_hwfn,
- (enum dbg_storms)storm_id) &&
- s_storm_defs[storm_id].has_vfc &&
- (storm_id != DBG_PSTORM_ID ||
- dev_data->platform_id == PLATFORM_ASIC)) {
- /* Read CAM */
- offset += qed_grc_dump_vfc_cam(p_hwfn,
+ if (!qed_grc_is_storm_included(p_hwfn,
+ (enum dbg_storms)storm_id) ||
+ !s_storm_defs[storm_id].has_vfc ||
+ (storm_id == DBG_PSTORM_ID && dev_data->platform_id !=
+ PLATFORM_ASIC))
+ continue;
+
+ /* Read CAM */
+ offset += qed_grc_dump_vfc_cam(p_hwfn,
+ p_ptt,
+ dump_buf + offset,
+ dump, storm_id);
+
+ /* Read RAM */
+ for (i = 0; i < NUM_VFC_RAM_TYPES; i++)
+ offset += qed_grc_dump_vfc_ram(p_hwfn,
p_ptt,
dump_buf + offset,
- dump, storm_id);
-
- /* Read RAM */
- for (i = 0; i < NUM_VFC_RAM_TYPES; i++)
- offset += qed_grc_dump_vfc_ram(p_hwfn,
- p_ptt,
- dump_buf +
- offset,
- dump,
- storm_id,
- &s_vfc_ram_defs
- [i]);
- }
+ dump,
+ storm_id,
+ &s_vfc_ram_defs[i]);
}
return offset;
@@ -3244,14 +3530,17 @@
u8 rss_mem_id;
for (rss_mem_id = 0; rss_mem_id < NUM_RSS_MEM_TYPES; rss_mem_id++) {
- struct rss_mem_defs *rss_defs = &s_rss_mem_defs[rss_mem_id];
- u32 num_entries = rss_defs->num_entries[dev_data->chip_id];
- u32 entry_width = rss_defs->entry_width[dev_data->chip_id];
- u32 total_dwords = (num_entries * entry_width) / 32;
- u32 size = RSS_REG_RSS_RAM_DATA_SIZE;
- bool packed = (entry_width == 16);
- u32 rss_addr = rss_defs->addr;
- u32 i, addr;
+ u32 rss_addr, num_entries, entry_width, total_dwords, i;
+ struct rss_mem_defs *rss_defs;
+ u32 addr, size;
+ bool packed;
+
+ rss_defs = &s_rss_mem_defs[rss_mem_id];
+ rss_addr = rss_defs->addr;
+ num_entries = rss_defs->num_entries[dev_data->chip_id];
+ entry_width = rss_defs->entry_width[dev_data->chip_id];
+ total_dwords = (num_entries * entry_width) / 32;
+ packed = (entry_width == 16);
offset += qed_grc_dump_mem_hdr(p_hwfn,
dump_buf + offset,
@@ -3263,23 +3552,23 @@
packed,
rss_defs->type_name, false, 0);
+ /* Dump RSS data */
if (!dump) {
offset += total_dwords;
continue;
}
- /* Dump RSS data */
- for (i = 0; i < total_dwords;
- i += RSS_REG_RSS_RAM_DATA_SIZE, rss_addr++) {
- addr = BYTES_TO_DWORDS(RSS_REG_RSS_RAM_DATA);
+ addr = BYTES_TO_DWORDS(RSS_REG_RSS_RAM_DATA);
+ size = RSS_REG_RSS_RAM_DATA_SIZE;
+ for (i = 0; i < total_dwords; i += size, rss_addr++) {
qed_wr(p_hwfn, p_ptt, RSS_REG_RSS_RAM_ADDR, rss_addr);
- offset += qed_grc_dump_addr_range(p_hwfn,
- p_ptt,
- dump_buf +
- offset,
- dump,
- addr,
- size);
+ offset += qed_grc_dump_addr_range(p_hwfn,
+ p_ptt,
+ dump_buf + offset,
+ dump,
+ addr,
+ size,
+ false);
}
}
@@ -3316,10 +3605,11 @@
BIG_RAM_BLOCK_SIZE_BYTES * 8,
false, type_name, false, 0);
+ /* Read and dump Big RAM data */
if (!dump)
return offset + ram_size;
- /* Read and dump Big RAM data */
+ /* Dump Big RAM */
for (i = 0; i < total_blocks / 2; i++) {
u32 addr, len;
@@ -3331,7 +3621,8 @@
dump_buf + offset,
dump,
addr,
- len);
+ len,
+ false);
}
return offset;
@@ -3359,7 +3650,7 @@
NULL,
BYTES_TO_DWORDS(MCP_REG_SCRATCH),
MCP_REG_SCRATCH_SIZE,
- 0, false, "MCP", false, 0);
+ false, 0, false, "MCP", false, 0);
/* Dump MCP cpu_reg_file */
offset += qed_grc_dump_mem(p_hwfn,
@@ -3369,7 +3660,7 @@
NULL,
BYTES_TO_DWORDS(MCP_REG_CPU_REG_FILE),
MCP_REG_CPU_REG_FILE_SIZE,
- 0, false, "MCP", false, 0);
+ false, 0, false, "MCP", false, 0);
/* Dump MCP registers */
block_enable[BLOCK_MCP] = true;
@@ -3387,11 +3678,13 @@
dump_buf + offset,
dump,
addr,
- 1);
+ 1,
+ false);
/* Release MCP */
if (halted && qed_mcp_resume(p_hwfn, p_ptt))
DP_NOTICE(p_hwfn, "Failed to resume MCP after halt!\n");
+
return offset;
}
@@ -3404,14 +3697,26 @@
u8 phy_id;
for (phy_id = 0; phy_id < ARRAY_SIZE(s_phy_defs); phy_id++) {
- struct phy_defs *phy_defs = &s_phy_defs[phy_id];
- int printed_chars;
+ u32 addr_lo_addr, addr_hi_addr, data_lo_addr, data_hi_addr;
+ struct phy_defs *phy_defs;
+ u8 *bytes_buf;
- printed_chars = snprintf(mem_name, sizeof(mem_name), "tbus_%s",
- phy_defs->phy_name);
- if (printed_chars < 0 || printed_chars >= sizeof(mem_name))
+ phy_defs = &s_phy_defs[phy_id];
+ addr_lo_addr = phy_defs->base_addr +
+ phy_defs->tbus_addr_lo_addr;
+ addr_hi_addr = phy_defs->base_addr +
+ phy_defs->tbus_addr_hi_addr;
+ data_lo_addr = phy_defs->base_addr +
+ phy_defs->tbus_data_lo_addr;
+ data_hi_addr = phy_defs->base_addr +
+ phy_defs->tbus_data_hi_addr;
+ bytes_buf = (u8 *)(dump_buf + offset);
+
+ if (snprintf(mem_name, sizeof(mem_name), "tbus_%s",
+ phy_defs->phy_name) < 0)
DP_NOTICE(p_hwfn,
"Unexpected debug error: invalid PHY memory name\n");
+
offset += qed_grc_dump_mem_hdr(p_hwfn,
dump_buf + offset,
dump,
@@ -3419,34 +3724,26 @@
0,
PHY_DUMP_SIZE_DWORDS,
16, true, mem_name, false, 0);
- if (dump) {
- u32 addr_lo_addr = phy_defs->base_addr +
- phy_defs->tbus_addr_lo_addr;
- u32 addr_hi_addr = phy_defs->base_addr +
- phy_defs->tbus_addr_hi_addr;
- u32 data_lo_addr = phy_defs->base_addr +
- phy_defs->tbus_data_lo_addr;
- u32 data_hi_addr = phy_defs->base_addr +
- phy_defs->tbus_data_hi_addr;
- u8 *bytes_buf = (u8 *)(dump_buf + offset);
- for (tbus_hi_offset = 0;
- tbus_hi_offset < (NUM_PHY_TBUS_ADDRESSES >> 8);
- tbus_hi_offset++) {
+ if (!dump) {
+ offset += PHY_DUMP_SIZE_DWORDS;
+ continue;
+ }
+
+ for (tbus_hi_offset = 0;
+ tbus_hi_offset < (NUM_PHY_TBUS_ADDRESSES >> 8);
+ tbus_hi_offset++) {
+ qed_wr(p_hwfn, p_ptt, addr_hi_addr, tbus_hi_offset);
+ for (tbus_lo_offset = 0; tbus_lo_offset < 256;
+ tbus_lo_offset++) {
qed_wr(p_hwfn,
- p_ptt, addr_hi_addr, tbus_hi_offset);
- for (tbus_lo_offset = 0; tbus_lo_offset < 256;
- tbus_lo_offset++) {
- qed_wr(p_hwfn,
- p_ptt,
- addr_lo_addr, tbus_lo_offset);
- *(bytes_buf++) =
- (u8)qed_rd(p_hwfn, p_ptt,
- data_lo_addr);
- *(bytes_buf++) =
- (u8)qed_rd(p_hwfn, p_ptt,
- data_hi_addr);
- }
+ p_ptt, addr_lo_addr, tbus_lo_offset);
+ *(bytes_buf++) = (u8)qed_rd(p_hwfn,
+ p_ptt,
+ data_lo_addr);
+ *(bytes_buf++) = (u8)qed_rd(p_hwfn,
+ p_ptt,
+ data_hi_addr);
}
}
@@ -3460,16 +3757,17 @@
struct qed_ptt *p_ptt,
enum block_id block_id,
u8 line_id,
- u8 cycle_en,
- u8 right_shift, u8 force_valid, u8 force_frame)
+ u8 enable_mask,
+ u8 right_shift,
+ u8 force_valid_mask, u8 force_frame_mask)
{
- struct block_defs *p_block_defs = s_block_defs[block_id];
+ struct block_defs *block = s_block_defs[block_id];
- qed_wr(p_hwfn, p_ptt, p_block_defs->dbg_select_addr, line_id);
- qed_wr(p_hwfn, p_ptt, p_block_defs->dbg_cycle_enable_addr, cycle_en);
- qed_wr(p_hwfn, p_ptt, p_block_defs->dbg_shift_addr, right_shift);
- qed_wr(p_hwfn, p_ptt, p_block_defs->dbg_force_valid_addr, force_valid);
- qed_wr(p_hwfn, p_ptt, p_block_defs->dbg_force_frame_addr, force_frame);
+ qed_wr(p_hwfn, p_ptt, block->dbg_select_addr, line_id);
+ qed_wr(p_hwfn, p_ptt, block->dbg_enable_addr, enable_mask);
+ qed_wr(p_hwfn, p_ptt, block->dbg_shift_addr, right_shift);
+ qed_wr(p_hwfn, p_ptt, block->dbg_force_valid_addr, force_valid_mask);
+ qed_wr(p_hwfn, p_ptt, block->dbg_force_frame_addr, force_frame_mask);
}
/* Dumps Static Debug data. Returns the dumped size in dwords. */
@@ -3477,10 +3775,12 @@
struct qed_ptt *p_ptt,
u32 *dump_buf, bool dump)
{
- u32 block_dwords = NUM_DBG_BUS_LINES * STATIC_DEBUG_LINE_DWORDS;
struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
- u32 offset = 0, block_id, line_id;
- struct block_defs *p_block_defs;
+ u32 block_id, line_id, offset = 0;
+
+ /* Skip static debug if a debug bus recording is in progress */
+ if (qed_rd(p_hwfn, p_ptt, DBG_REG_DBG_BLOCK_ON))
+ return 0;
if (dump) {
DP_VERBOSE(p_hwfn,
@@ -3488,11 +3788,11 @@
/* Disable all blocks debug output */
for (block_id = 0; block_id < MAX_BLOCK_ID; block_id++) {
- p_block_defs = s_block_defs[block_id];
+ struct block_defs *block = s_block_defs[block_id];
- if (p_block_defs->has_dbg_bus[dev_data->chip_id])
- qed_wr(p_hwfn, p_ptt,
- p_block_defs->dbg_cycle_enable_addr, 0);
+ if (block->has_dbg_bus[dev_data->chip_id])
+ qed_wr(p_hwfn, p_ptt, block->dbg_enable_addr,
+ 0);
}
qed_bus_reset_dbg_block(p_hwfn, p_ptt);
@@ -3506,59 +3806,71 @@
/* Dump all static debug lines for each relevant block */
for (block_id = 0; block_id < MAX_BLOCK_ID; block_id++) {
- p_block_defs = s_block_defs[block_id];
+ struct block_defs *block = s_block_defs[block_id];
+ struct dbg_bus_block *block_desc;
+ u32 block_dwords, addr, len;
+ u8 dbg_client_id;
- if (!p_block_defs->has_dbg_bus[dev_data->chip_id])
+ if (!block->has_dbg_bus[dev_data->chip_id])
continue;
+ block_desc =
+ get_dbg_bus_block_desc(p_hwfn,
+ (enum block_id)block_id);
+ block_dwords = NUM_DBG_LINES(block_desc) *
+ STATIC_DEBUG_LINE_DWORDS;
+
/* Dump static section params */
offset += qed_grc_dump_mem_hdr(p_hwfn,
dump_buf + offset,
dump,
- p_block_defs->name, 0,
- block_dwords, 32, false,
- "STATIC", false, 0);
+ block->name,
+ 0,
+ block_dwords,
+ 32, false, "STATIC", false, 0);
- if (dump && !dev_data->block_in_reset[block_id]) {
- u8 dbg_client_id =
- p_block_defs->dbg_client_id[dev_data->chip_id];
- u32 addr = BYTES_TO_DWORDS(DBG_REG_CALENDAR_OUT_DATA);
- u32 len = STATIC_DEBUG_LINE_DWORDS;
-
- /* Enable block's client */
- qed_bus_enable_clients(p_hwfn, p_ptt,
- BIT(dbg_client_id));
-
- for (line_id = 0; line_id < NUM_DBG_BUS_LINES;
- line_id++) {
- /* Configure debug line ID */
- qed_config_dbg_line(p_hwfn,
- p_ptt,
- (enum block_id)block_id,
- (u8)line_id,
- 0xf, 0, 0, 0);
-
- /* Read debug line info */
- offset +=
- qed_grc_dump_addr_range(p_hwfn,
- p_ptt,
- dump_buf + offset,
- dump,
- addr,
- len);
- }
-
- /* Disable block's client and debug output */
- qed_bus_enable_clients(p_hwfn, p_ptt, 0);
- qed_wr(p_hwfn, p_ptt,
- p_block_defs->dbg_cycle_enable_addr, 0);
- } else {
- /* All lines are invalid - dump zeros */
- if (dump)
- memset(dump_buf + offset, 0,
- DWORDS_TO_BYTES(block_dwords));
+ if (!dump) {
offset += block_dwords;
+ continue;
}
+
+ /* If all lines are invalid - dump zeros */
+ if (dev_data->block_in_reset[block_id]) {
+ memset(dump_buf + offset, 0,
+ DWORDS_TO_BYTES(block_dwords));
+ offset += block_dwords;
+ continue;
+ }
+
+ /* Enable block's client */
+ dbg_client_id = block->dbg_client_id[dev_data->chip_id];
+ qed_bus_enable_clients(p_hwfn,
+ p_ptt,
+ BIT(dbg_client_id));
+
+ addr = BYTES_TO_DWORDS(DBG_REG_CALENDAR_OUT_DATA);
+ len = STATIC_DEBUG_LINE_DWORDS;
+ for (line_id = 0; line_id < (u32)NUM_DBG_LINES(block_desc);
+ line_id++) {
+ /* Configure debug line ID */
+ qed_config_dbg_line(p_hwfn,
+ p_ptt,
+ (enum block_id)block_id,
+ (u8)line_id, 0xf, 0, 0, 0);
+
+ /* Read debug line info */
+ offset += qed_grc_dump_addr_range(p_hwfn,
+ p_ptt,
+ dump_buf + offset,
+ dump,
+ addr,
+ len,
+ true);
+ }
+
+ /* Disable block's client and debug output */
+ qed_bus_enable_clients(p_hwfn, p_ptt, 0);
+ qed_wr(p_hwfn, p_ptt, block->dbg_enable_addr, 0);
}
if (dump) {
@@ -3584,8 +3896,8 @@
*num_dumped_dwords = 0;
- /* Find port mode */
if (dump) {
+ /* Find port mode */
switch (qed_rd(p_hwfn, p_ptt, MISC_REG_PORT_MODE)) {
case 0:
port_mode = 1;
@@ -3597,11 +3909,10 @@
port_mode = 4;
break;
}
- }
- /* Update reset state */
- if (dump)
+ /* Update reset state */
qed_update_blocks_reset_state(p_hwfn, p_ptt);
+ }
/* Dump global params */
offset += qed_dump_common_global_params(p_hwfn,
@@ -3635,7 +3946,8 @@
}
/* Disable all parities using MFW command */
- if (dump && !qed_grc_get_param(p_hwfn, DBG_GRC_PARAM_NO_MCP)) {
+ if (dump &&
+ !qed_grc_get_param(p_hwfn, DBG_GRC_PARAM_NO_MCP)) {
parities_masked = !qed_mcp_mask_parities(p_hwfn, p_ptt, 1);
if (!parities_masked) {
DP_NOTICE(p_hwfn,
@@ -3661,9 +3973,9 @@
/* Dump all regs */
if (qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_REGS)) {
- /* Dump all blocks except MCP */
bool block_enable[MAX_BLOCK_ID];
+ /* Dump all blocks except MCP */
for (i = 0; i < MAX_BLOCK_ID; i++)
block_enable[i] = true;
block_enable[BLOCK_MCP] = false;
@@ -3732,7 +4044,8 @@
dump_buf + offset, dump);
/* Dump last section */
- offset += qed_dump_last_section(dump_buf, offset, dump);
+ offset += qed_dump_last_section(p_hwfn, dump_buf, offset, dump);
+
if (dump) {
/* Unstall storms */
if (qed_grc_get_param(p_hwfn, DBG_GRC_PARAM_UNSTALL))
@@ -3763,19 +4076,20 @@
const struct dbg_idle_chk_rule *rule,
u16 fail_entry_id, u32 *cond_reg_values)
{
- const union dbg_idle_chk_reg *regs = &((const union dbg_idle_chk_reg *)
- s_dbg_arrays
- [BIN_BUF_DBG_IDLE_CHK_REGS].
- ptr)[rule->reg_offset];
- const struct dbg_idle_chk_cond_reg *cond_regs = ®s[0].cond_reg;
struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
- struct dbg_idle_chk_result_hdr *hdr =
- (struct dbg_idle_chk_result_hdr *)dump_buf;
- const struct dbg_idle_chk_info_reg *info_regs =
- ®s[rule->num_cond_regs].info_reg;
- u32 next_reg_offset = 0, i, offset = 0;
+ const struct dbg_idle_chk_cond_reg *cond_regs;
+ const struct dbg_idle_chk_info_reg *info_regs;
+ u32 i, next_reg_offset = 0, offset = 0;
+ struct dbg_idle_chk_result_hdr *hdr;
+ const union dbg_idle_chk_reg *regs;
u8 reg_id;
+ hdr = (struct dbg_idle_chk_result_hdr *)dump_buf;
+ regs = &((const union dbg_idle_chk_reg *)
+ s_dbg_arrays[BIN_BUF_DBG_IDLE_CHK_REGS].ptr)[rule->reg_offset];
+ cond_regs = ®s[0].cond_reg;
+ info_regs = ®s[rule->num_cond_regs].info_reg;
+
/* Dump rule data */
if (dump) {
memset(hdr, 0, sizeof(*hdr));
@@ -3790,33 +4104,31 @@
/* Dump condition register values */
for (reg_id = 0; reg_id < rule->num_cond_regs; reg_id++) {
const struct dbg_idle_chk_cond_reg *reg = &cond_regs[reg_id];
+ struct dbg_idle_chk_result_reg_hdr *reg_hdr;
+
+ reg_hdr = (struct dbg_idle_chk_result_reg_hdr *)
+ (dump_buf + offset);
/* Write register header */
- if (dump) {
- struct dbg_idle_chk_result_reg_hdr *reg_hdr =
- (struct dbg_idle_chk_result_reg_hdr *)(dump_buf
- + offset);
- offset += IDLE_CHK_RESULT_REG_HDR_DWORDS;
- memset(reg_hdr, 0,
- sizeof(struct dbg_idle_chk_result_reg_hdr));
- reg_hdr->start_entry = reg->start_entry;
- reg_hdr->size = reg->entry_size;
- SET_FIELD(reg_hdr->data,
- DBG_IDLE_CHK_RESULT_REG_HDR_IS_MEM,
- reg->num_entries > 1 || reg->start_entry > 0
- ? 1 : 0);
- SET_FIELD(reg_hdr->data,
- DBG_IDLE_CHK_RESULT_REG_HDR_REG_ID, reg_id);
-
- /* Write register values */
- for (i = 0; i < reg_hdr->size;
- i++, next_reg_offset++, offset++)
- dump_buf[offset] =
- cond_reg_values[next_reg_offset];
- } else {
+ if (!dump) {
offset += IDLE_CHK_RESULT_REG_HDR_DWORDS +
reg->entry_size;
+ continue;
}
+
+ offset += IDLE_CHK_RESULT_REG_HDR_DWORDS;
+ memset(reg_hdr, 0, sizeof(*reg_hdr));
+ reg_hdr->start_entry = reg->start_entry;
+ reg_hdr->size = reg->entry_size;
+ SET_FIELD(reg_hdr->data,
+ DBG_IDLE_CHK_RESULT_REG_HDR_IS_MEM,
+ reg->num_entries > 1 || reg->start_entry > 0 ? 1 : 0);
+ SET_FIELD(reg_hdr->data,
+ DBG_IDLE_CHK_RESULT_REG_HDR_REG_ID, reg_id);
+
+ /* Write register values */
+ for (i = 0; i < reg_hdr->size; i++, next_reg_offset++, offset++)
+ dump_buf[offset] = cond_reg_values[next_reg_offset];
}
/* Dump info register values */
@@ -3824,12 +4136,12 @@
const struct dbg_idle_chk_info_reg *reg = &info_regs[reg_id];
u32 block_id;
+ /* Check if register's block is in reset */
if (!dump) {
offset += IDLE_CHK_RESULT_REG_HDR_DWORDS + reg->size;
continue;
}
- /* Check if register's block is in reset */
block_id = GET_FIELD(reg->data, DBG_IDLE_CHK_INFO_REG_BLOCK_ID);
if (block_id >= MAX_BLOCK_ID) {
DP_NOTICE(p_hwfn, "Invalid block_id\n");
@@ -3837,47 +4149,50 @@
}
if (!dev_data->block_in_reset[block_id]) {
- bool eval_mode = GET_FIELD(reg->mode.data,
- DBG_MODE_HDR_EVAL_MODE) > 0;
- bool mode_match = true;
+ struct dbg_idle_chk_result_reg_hdr *reg_hdr;
+ bool wide_bus, eval_mode, mode_match = true;
+ u16 modes_buf_offset;
+ u32 addr;
+
+ reg_hdr = (struct dbg_idle_chk_result_reg_hdr *)
+ (dump_buf + offset);
/* Check mode */
+ eval_mode = GET_FIELD(reg->mode.data,
+ DBG_MODE_HDR_EVAL_MODE) > 0;
if (eval_mode) {
- u16 modes_buf_offset =
- GET_FIELD(reg->mode.data,
- DBG_MODE_HDR_MODES_BUF_OFFSET);
+ modes_buf_offset =
+ GET_FIELD(reg->mode.data,
+ DBG_MODE_HDR_MODES_BUF_OFFSET);
mode_match =
qed_is_mode_match(p_hwfn,
&modes_buf_offset);
}
- if (mode_match) {
- u32 addr =
- GET_FIELD(reg->data,
- DBG_IDLE_CHK_INFO_REG_ADDRESS);
+ if (!mode_match)
+ continue;
- /* Write register header */
- struct dbg_idle_chk_result_reg_hdr *reg_hdr =
- (struct dbg_idle_chk_result_reg_hdr *)
- (dump_buf + offset);
+ addr = GET_FIELD(reg->data,
+ DBG_IDLE_CHK_INFO_REG_ADDRESS);
+ wide_bus = GET_FIELD(reg->data,
+ DBG_IDLE_CHK_INFO_REG_WIDE_BUS);
- offset += IDLE_CHK_RESULT_REG_HDR_DWORDS;
- hdr->num_dumped_info_regs++;
- memset(reg_hdr, 0, sizeof(*reg_hdr));
- reg_hdr->size = reg->size;
- SET_FIELD(reg_hdr->data,
- DBG_IDLE_CHK_RESULT_REG_HDR_REG_ID,
- rule->num_cond_regs + reg_id);
+ /* Write register header */
+ offset += IDLE_CHK_RESULT_REG_HDR_DWORDS;
+ hdr->num_dumped_info_regs++;
+ memset(reg_hdr, 0, sizeof(*reg_hdr));
+ reg_hdr->size = reg->size;
+ SET_FIELD(reg_hdr->data,
+ DBG_IDLE_CHK_RESULT_REG_HDR_REG_ID,
+ rule->num_cond_regs + reg_id);
- /* Write register values */
- offset +=
- qed_grc_dump_addr_range(p_hwfn,
- p_ptt,
- dump_buf + offset,
- dump,
- addr,
- reg->size);
- }
+ /* Write register values */
+ offset += qed_grc_dump_addr_range(p_hwfn,
+ p_ptt,
+ dump_buf + offset,
+ dump,
+ addr,
+ reg->size, wide_bus);
}
}
@@ -3898,6 +4213,7 @@
u8 reg_id;
*num_failing_rules = 0;
+
for (i = 0; i < num_input_rules; i++) {
const struct dbg_idle_chk_cond_reg *cond_regs;
const struct dbg_idle_chk_rule *rule;
@@ -3920,8 +4236,9 @@
*/
for (reg_id = 0; reg_id < rule->num_cond_regs && check_rule;
reg_id++) {
- u32 block_id = GET_FIELD(cond_regs[reg_id].data,
- DBG_IDLE_CHK_COND_REG_BLOCK_ID);
+ u32 block_id =
+ GET_FIELD(cond_regs[reg_id].data,
+ DBG_IDLE_CHK_COND_REG_BLOCK_ID);
if (block_id >= MAX_BLOCK_ID) {
DP_NOTICE(p_hwfn, "Invalid block_id\n");
@@ -3936,48 +4253,47 @@
if (!check_rule && dump)
continue;
- if (!dump) {
- u32 entry_dump_size =
- qed_idle_chk_dump_failure(p_hwfn,
- p_ptt,
- dump_buf + offset,
- false,
- rule->rule_id,
- rule,
- 0,
- NULL);
-
- offset += num_reg_entries * entry_dump_size;
- (*num_failing_rules) += num_reg_entries;
- continue;
- }
-
/* Go over all register entries (number of entries is the same
* for all condition registers).
*/
for (entry_id = 0; entry_id < num_reg_entries; entry_id++) {
- /* Read current entry of all condition registers */
u32 next_reg_offset = 0;
+ if (!dump) {
+ offset += qed_idle_chk_dump_failure(p_hwfn,
+ p_ptt,
+ dump_buf + offset,
+ false,
+ rule->rule_id,
+ rule,
+ entry_id,
+ NULL);
+ (*num_failing_rules)++;
+ break;
+ }
+
+ /* Read current entry of all condition registers */
for (reg_id = 0; reg_id < rule->num_cond_regs;
reg_id++) {
const struct dbg_idle_chk_cond_reg *reg =
- &cond_regs[reg_id];
+ &cond_regs[reg_id];
+ u32 padded_entry_size, addr;
+ bool wide_bus;
- /* Find GRC address (if it's a memory,the
+ /* Find GRC address (if it's a memory, the
* address of the specific entry is calculated).
*/
- u32 addr =
+ addr = GET_FIELD(reg->data,
+ DBG_IDLE_CHK_COND_REG_ADDRESS);
+ wide_bus =
GET_FIELD(reg->data,
- DBG_IDLE_CHK_COND_REG_ADDRESS);
-
+ DBG_IDLE_CHK_COND_REG_WIDE_BUS);
if (reg->num_entries > 1 ||
reg->start_entry > 0) {
- u32 padded_entry_size =
- reg->entry_size > 1 ?
- roundup_pow_of_two(reg->entry_size) :
- 1;
-
+ padded_entry_size =
+ reg->entry_size > 1 ?
+ roundup_pow_of_two(reg->entry_size)
+ : 1;
addr += (reg->start_entry + entry_id) *
padded_entry_size;
}
@@ -3991,28 +4307,27 @@
}
next_reg_offset +=
- qed_grc_dump_addr_range(p_hwfn,
- p_ptt,
+ qed_grc_dump_addr_range(p_hwfn, p_ptt,
cond_reg_values +
next_reg_offset,
dump, addr,
- reg->entry_size);
+ reg->entry_size,
+ wide_bus);
}
- /* Call rule's condition function - a return value of
- * true indicates failure.
+ /* Call rule condition function.
+ * If returns true, it's a failure.
*/
- if ((*cond_arr[rule->cond_id])(cond_reg_values,
- imm_values)) {
- offset +=
- qed_idle_chk_dump_failure(p_hwfn,
- p_ptt,
- dump_buf + offset,
- dump,
- rule->rule_id,
- rule,
- entry_id,
- cond_reg_values);
+ if ((*cond_arr[rule->cond_id]) (cond_reg_values,
+ imm_values)) {
+ offset += qed_idle_chk_dump_failure(p_hwfn,
+ p_ptt,
+ dump_buf + offset,
+ dump,
+ rule->rule_id,
+ rule,
+ entry_id,
+ cond_reg_values);
(*num_failing_rules)++;
break;
}
@@ -4028,8 +4343,8 @@
static u32 qed_idle_chk_dump(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt, u32 *dump_buf, bool dump)
{
- u32 offset = 0, input_offset = 0, num_failing_rules = 0;
- u32 num_failing_rules_offset;
+ u32 num_failing_rules_offset, offset = 0, input_offset = 0;
+ u32 num_failing_rules = 0;
/* Dump global params */
offset += qed_dump_common_global_params(p_hwfn,
@@ -4042,29 +4357,29 @@
offset += qed_dump_section_hdr(dump_buf + offset, dump, "idle_chk", 1);
num_failing_rules_offset = offset;
offset += qed_dump_num_param(dump_buf + offset, dump, "num_rules", 0);
+
while (input_offset <
s_dbg_arrays[BIN_BUF_DBG_IDLE_CHK_RULES].size_in_dwords) {
const struct dbg_idle_chk_cond_hdr *cond_hdr =
(const struct dbg_idle_chk_cond_hdr *)
&s_dbg_arrays[BIN_BUF_DBG_IDLE_CHK_RULES].ptr
[input_offset++];
- bool eval_mode = GET_FIELD(cond_hdr->mode.data,
- DBG_MODE_HDR_EVAL_MODE) > 0;
- bool mode_match = true;
+ bool eval_mode, mode_match = true;
+ u32 curr_failing_rules;
+ u16 modes_buf_offset;
/* Check mode */
+ eval_mode = GET_FIELD(cond_hdr->mode.data,
+ DBG_MODE_HDR_EVAL_MODE) > 0;
if (eval_mode) {
- u16 modes_buf_offset =
+ modes_buf_offset =
GET_FIELD(cond_hdr->mode.data,
DBG_MODE_HDR_MODES_BUF_OFFSET);
-
mode_match = qed_is_mode_match(p_hwfn,
&modes_buf_offset);
}
if (mode_match) {
- u32 curr_failing_rules;
-
offset +=
qed_idle_chk_dump_rule_entries(p_hwfn,
p_ptt,
@@ -4086,10 +4401,13 @@
qed_dump_num_param(dump_buf + num_failing_rules_offset,
dump, "num_rules", num_failing_rules);
+ /* Dump last section */
+ offset += qed_dump_last_section(p_hwfn, dump_buf, offset, dump);
+
return offset;
}
-/* Finds the meta data image in NVRAM. */
+/* Finds the meta data image in NVRAM */
static enum dbg_status qed_find_nvram_image(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
u32 image_type,
@@ -4098,16 +4416,16 @@
{
u32 ret_mcp_resp, ret_mcp_param, ret_txn_size;
struct mcp_file_att file_att;
+ int nvm_result;
/* Call NVRAM get file command */
- int nvm_result = qed_mcp_nvm_rd_cmd(p_hwfn,
- p_ptt,
- DRV_MSG_CODE_NVM_GET_FILE_ATT,
- image_type,
- &ret_mcp_resp,
- &ret_mcp_param,
- &ret_txn_size,
- (u32 *)&file_att);
+ nvm_result = qed_mcp_nvm_rd_cmd(p_hwfn,
+ p_ptt,
+ DRV_MSG_CODE_NVM_GET_FILE_ATT,
+ image_type,
+ &ret_mcp_resp,
+ &ret_mcp_param,
+ &ret_txn_size, (u32 *)&file_att);
/* Check response */
if (nvm_result ||
@@ -4117,6 +4435,7 @@
/* Update return values */
*nvram_offset_bytes = file_att.nvm_start_addr;
*nvram_size_bytes = file_att.len;
+
DP_VERBOSE(p_hwfn,
QED_MSG_DEBUG,
"find_nvram_image: found NVRAM image of type %d in NVRAM offset %d bytes with size %d bytes\n",
@@ -4125,22 +4444,25 @@
/* Check alignment */
if (*nvram_size_bytes & 0x3)
return DBG_STATUS_NON_ALIGNED_NVRAM_IMAGE;
+
return DBG_STATUS_OK;
}
+/* Reads data from NVRAM */
static enum dbg_status qed_nvram_read(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
u32 nvram_offset_bytes,
u32 nvram_size_bytes, u32 *ret_buf)
{
- u32 ret_mcp_resp, ret_mcp_param, ret_read_size;
- u32 bytes_to_copy, read_offset = 0;
+ u32 ret_mcp_resp, ret_mcp_param, ret_read_size, bytes_to_copy;
s32 bytes_left = nvram_size_bytes;
+ u32 read_offset = 0;
DP_VERBOSE(p_hwfn,
QED_MSG_DEBUG,
"nvram_read: reading image of size %d bytes from NVRAM\n",
nvram_size_bytes);
+
do {
bytes_to_copy =
(bytes_left >
@@ -4155,8 +4477,7 @@
DRV_MB_PARAM_NVM_LEN_SHIFT),
&ret_mcp_resp, &ret_mcp_param,
&ret_read_size,
- (u32 *)((u8 *)ret_buf +
- read_offset)) != 0)
+ (u32 *)((u8 *)ret_buf + read_offset)))
return DBG_STATUS_NVRAM_READ_FAILED;
/* Check response */
@@ -4172,24 +4493,20 @@
}
/* Get info on the MCP Trace data in the scratchpad:
- * - trace_data_grc_addr - the GRC address of the trace data
- * - trace_data_size_bytes - the size in bytes of the MCP Trace data (without
- * the header)
+ * - trace_data_grc_addr (OUT): trace data GRC address in bytes
+ * - trace_data_size (OUT): trace data size in bytes (without the header)
*/
static enum dbg_status qed_mcp_trace_get_data_info(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
u32 *trace_data_grc_addr,
- u32 *trace_data_size_bytes)
+ u32 *trace_data_size)
{
- /* Read MCP trace section offsize structure from MCP scratchpad */
- u32 spad_trace_offsize = qed_rd(p_hwfn,
- p_ptt,
- MCP_SPAD_TRACE_OFFSIZE_ADDR);
- u32 signature;
+ u32 spad_trace_offsize, signature;
- /* Extract MCP trace section GRC address from offsize structure (within
- * scratchpad).
- */
+ /* Read trace section offsize structure from MCP scratchpad */
+ spad_trace_offsize = qed_rd(p_hwfn, p_ptt, MCP_SPAD_TRACE_OFFSIZE_ADDR);
+
+ /* Extract trace section address from offsize (in scratchpad) */
*trace_data_grc_addr =
MCP_REG_SCRATCH + SECTION_OFFSET(spad_trace_offsize);
@@ -4197,42 +4514,41 @@
signature = qed_rd(p_hwfn, p_ptt,
*trace_data_grc_addr +
offsetof(struct mcp_trace, signature));
+
if (signature != MFW_TRACE_SIGNATURE)
return DBG_STATUS_INVALID_TRACE_SIGNATURE;
/* Read trace size from MCP trace section */
- *trace_data_size_bytes = qed_rd(p_hwfn,
- p_ptt,
- *trace_data_grc_addr +
- offsetof(struct mcp_trace, size));
+ *trace_data_size = qed_rd(p_hwfn,
+ p_ptt,
+ *trace_data_grc_addr +
+ offsetof(struct mcp_trace, size));
+
return DBG_STATUS_OK;
}
-/* Reads MCP trace meta data image from NVRAM.
- * - running_bundle_id (OUT) - the running bundle ID (invalid when loaded from
- * file)
- * - trace_meta_offset_bytes (OUT) - the NVRAM offset in bytes in which the MCP
- * Trace meta data starts (invalid when loaded from file)
- * - trace_meta_size_bytes (OUT) - the size in bytes of the MCP Trace meta data
+/* Reads MCP trace meta data image from NVRAM
+ * - running_bundle_id (OUT): running bundle ID (invalid when loaded from file)
+ * - trace_meta_offset (OUT): trace meta offset in NVRAM in bytes (invalid when
+ * loaded from file).
+ * - trace_meta_size (OUT): size in bytes of the trace meta data.
*/
static enum dbg_status qed_mcp_trace_get_meta_info(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
u32 trace_data_size_bytes,
u32 *running_bundle_id,
- u32 *trace_meta_offset_bytes,
- u32 *trace_meta_size_bytes)
+ u32 *trace_meta_offset,
+ u32 *trace_meta_size)
{
+ u32 spad_trace_offsize, nvram_image_type, running_mfw_addr;
+
/* Read MCP trace section offsize structure from MCP scratchpad */
- u32 spad_trace_offsize = qed_rd(p_hwfn,
- p_ptt,
- MCP_SPAD_TRACE_OFFSIZE_ADDR);
+ spad_trace_offsize = qed_rd(p_hwfn, p_ptt, MCP_SPAD_TRACE_OFFSIZE_ADDR);
/* Find running bundle ID */
- u32 running_mfw_addr =
+ running_mfw_addr =
MCP_REG_SCRATCH + SECTION_OFFSET(spad_trace_offsize) +
QED_SECTION_SIZE(spad_trace_offsize) + trace_data_size_bytes;
- u32 nvram_image_type;
-
*running_bundle_id = qed_rd(p_hwfn, p_ptt, running_mfw_addr);
if (*running_bundle_id > 1)
return DBG_STATUS_INVALID_NVRAM_BUNDLE;
@@ -4241,40 +4557,33 @@
nvram_image_type =
(*running_bundle_id ==
DIR_ID_1) ? NVM_TYPE_MFW_TRACE1 : NVM_TYPE_MFW_TRACE2;
-
return qed_find_nvram_image(p_hwfn,
p_ptt,
nvram_image_type,
- trace_meta_offset_bytes,
- trace_meta_size_bytes);
+ trace_meta_offset, trace_meta_size);
}
-/* Reads the MCP Trace meta data (from NVRAM or buffer) into the specified
- * buffer.
- */
+/* Reads the MCP Trace meta data from NVRAM into the specified buffer */
static enum dbg_status qed_mcp_trace_read_meta(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
u32 nvram_offset_in_bytes,
u32 size_in_bytes, u32 *buf)
{
- u8 *byte_buf = (u8 *)buf;
- u8 modules_num, i;
+ u8 modules_num, module_len, i, *byte_buf = (u8 *)buf;
+ enum dbg_status status;
u32 signature;
/* Read meta data from NVRAM */
- enum dbg_status status = qed_nvram_read(p_hwfn,
- p_ptt,
- nvram_offset_in_bytes,
- size_in_bytes,
- buf);
-
+ status = qed_nvram_read(p_hwfn,
+ p_ptt,
+ nvram_offset_in_bytes, size_in_bytes, buf);
if (status != DBG_STATUS_OK)
return status;
/* Extract and check first signature */
signature = qed_read_unaligned_dword(byte_buf);
- byte_buf += sizeof(u32);
- if (signature != MCP_TRACE_META_IMAGE_SIGNATURE)
+ byte_buf += sizeof(signature);
+ if (signature != NVM_MAGIC_VALUE)
return DBG_STATUS_INVALID_TRACE_SIGNATURE;
/* Extract number of modules */
@@ -4282,16 +4591,16 @@
/* Skip all modules */
for (i = 0; i < modules_num; i++) {
- u8 module_len = *(byte_buf++);
-
+ module_len = *(byte_buf++);
byte_buf += module_len;
}
/* Extract and check second signature */
signature = qed_read_unaligned_dword(byte_buf);
- byte_buf += sizeof(u32);
- if (signature != MCP_TRACE_META_IMAGE_SIGNATURE)
+ byte_buf += sizeof(signature);
+ if (signature != NVM_MAGIC_VALUE)
return DBG_STATUS_INVALID_TRACE_SIGNATURE;
+
return DBG_STATUS_OK;
}
@@ -4308,10 +4617,10 @@
bool mcp_access;
int halted = 0;
- mcp_access = !qed_grc_get_param(p_hwfn, DBG_GRC_PARAM_NO_MCP);
-
*num_dumped_dwords = 0;
+ mcp_access = !qed_grc_get_param(p_hwfn, DBG_GRC_PARAM_NO_MCP);
+
/* Get trace data info */
status = qed_mcp_trace_get_data_info(p_hwfn,
p_ptt,
@@ -4328,7 +4637,7 @@
dump, "dump-type", "mcp-trace");
/* Halt MCP while reading from scratchpad so the read data will be
- * consistent if halt fails, MCP trace is taken anyway, with a small
+ * consistent. if halt fails, MCP trace is taken anyway, with a small
* risk that it may be corrupt.
*/
if (dump && mcp_access) {
@@ -4339,8 +4648,8 @@
/* Find trace data size */
trace_data_size_dwords =
- DIV_ROUND_UP(trace_data_size_bytes + sizeof(struct mcp_trace),
- BYTES_IN_DWORD);
+ DIV_ROUND_UP(trace_data_size_bytes + sizeof(struct mcp_trace),
+ BYTES_IN_DWORD);
/* Dump trace data section header and param */
offset += qed_dump_section_hdr(dump_buf + offset,
@@ -4354,17 +4663,17 @@
dump_buf + offset,
dump,
BYTES_TO_DWORDS(trace_data_grc_addr),
- trace_data_size_dwords);
+ trace_data_size_dwords, false);
/* Resume MCP (only if halt succeeded) */
- if (halted && qed_mcp_resume(p_hwfn, p_ptt) != 0)
+ if (halted && qed_mcp_resume(p_hwfn, p_ptt))
DP_NOTICE(p_hwfn, "Failed to resume MCP after halt!\n");
/* Dump trace meta section header */
offset += qed_dump_section_hdr(dump_buf + offset,
dump, "mcp_trace_meta", 1);
- /* Read trace meta info */
+ /* Read trace meta info (trace_meta_size_bytes is dword-aligned) */
if (mcp_access) {
status = qed_mcp_trace_get_meta_info(p_hwfn,
p_ptt,
@@ -4391,6 +4700,9 @@
if (status == DBG_STATUS_OK)
offset += trace_meta_size_dwords;
+ /* Dump last section */
+ offset += qed_dump_last_section(p_hwfn, dump_buf, offset, dump);
+
*num_dumped_dwords = offset;
/* If no mcp access, indicate that the dump doesn't contain the meta
@@ -4405,7 +4717,7 @@
u32 *dump_buf,
bool dump, u32 *num_dumped_dwords)
{
- u32 offset = 0, dwords_read, size_param_offset;
+ u32 dwords_read, size_param_offset, offset = 0;
bool fifo_has_data;
*num_dumped_dwords = 0;
@@ -4417,8 +4729,8 @@
offset += qed_dump_str_param(dump_buf + offset,
dump, "dump-type", "reg-fifo");
- /* Dump fifo data section header and param. The size param is 0 for now,
- * and is overwritten after reading the FIFO.
+ /* Dump fifo data section header and param. The size param is 0 for
+ * now, and is overwritten after reading the FIFO.
*/
offset += qed_dump_section_hdr(dump_buf + offset,
dump, "reg_fifo_data", 1);
@@ -4430,8 +4742,7 @@
* test how much data is available, except for reading it.
*/
offset += REG_FIFO_DEPTH_DWORDS;
- *num_dumped_dwords = offset;
- return DBG_STATUS_OK;
+ goto out;
}
fifo_has_data = qed_rd(p_hwfn, p_ptt,
@@ -4456,8 +4767,12 @@
qed_dump_num_param(dump_buf + size_param_offset, dump, "size",
dwords_read);
+out:
+ /* Dump last section */
+ offset += qed_dump_last_section(p_hwfn, dump_buf, offset, dump);
*num_dumped_dwords = offset;
+
return DBG_STATUS_OK;
}
@@ -4467,7 +4782,7 @@
u32 *dump_buf,
bool dump, u32 *num_dumped_dwords)
{
- u32 offset = 0, dwords_read, size_param_offset;
+ u32 dwords_read, size_param_offset, offset = 0;
bool fifo_has_data;
*num_dumped_dwords = 0;
@@ -4479,8 +4794,8 @@
offset += qed_dump_str_param(dump_buf + offset,
dump, "dump-type", "igu-fifo");
- /* Dump fifo data section header and param. The size param is 0 for now,
- * and is overwritten after reading the FIFO.
+ /* Dump fifo data section header and param. The size param is 0 for
+ * now, and is overwritten after reading the FIFO.
*/
offset += qed_dump_section_hdr(dump_buf + offset,
dump, "igu_fifo_data", 1);
@@ -4492,8 +4807,7 @@
* test how much data is available, except for reading it.
*/
offset += IGU_FIFO_DEPTH_DWORDS;
- *num_dumped_dwords = offset;
- return DBG_STATUS_OK;
+ goto out;
}
fifo_has_data = qed_rd(p_hwfn, p_ptt,
@@ -4519,8 +4833,12 @@
qed_dump_num_param(dump_buf + size_param_offset, dump, "size",
dwords_read);
+out:
+ /* Dump last section */
+ offset += qed_dump_last_section(p_hwfn, dump_buf, offset, dump);
*num_dumped_dwords = offset;
+
return DBG_STATUS_OK;
}
@@ -4531,7 +4849,7 @@
bool dump,
u32 *num_dumped_dwords)
{
- u32 offset = 0, size_param_offset, override_window_dwords;
+ u32 size_param_offset, override_window_dwords, offset = 0;
*num_dumped_dwords = 0;
@@ -4542,8 +4860,8 @@
offset += qed_dump_str_param(dump_buf + offset,
dump, "dump-type", "protection-override");
- /* Dump data section header and param. The size param is 0 for now, and
- * is overwritten after reading the data.
+ /* Dump data section header and param. The size param is 0 for now,
+ * and is overwritten after reading the data.
*/
offset += qed_dump_section_hdr(dump_buf + offset,
dump, "protection_override_data", 1);
@@ -4552,8 +4870,7 @@
if (!dump) {
offset += PROTECTION_OVERRIDE_DEPTH_DWORDS;
- *num_dumped_dwords = offset;
- return DBG_STATUS_OK;
+ goto out;
}
/* Add override window info to buffer */
@@ -4569,8 +4886,12 @@
offset += override_window_dwords;
qed_dump_num_param(dump_buf + size_param_offset, dump, "size",
override_window_dwords);
+out:
+ /* Dump last section */
+ offset += qed_dump_last_section(p_hwfn, dump_buf, offset, dump);
*num_dumped_dwords = offset;
+
return DBG_STATUS_OK;
}
@@ -4593,11 +4914,14 @@
dump_buf + offset, dump, 1);
offset += qed_dump_str_param(dump_buf + offset,
dump, "dump-type", "fw-asserts");
+
+ /* Find Storm dump size */
for (storm_id = 0; storm_id < MAX_DBG_STORMS; storm_id++) {
u32 fw_asserts_section_addr, next_list_idx_addr, next_list_idx;
+ struct storm_defs *storm = &s_storm_defs[storm_id];
u32 last_list_idx, addr;
- if (dev_data->block_in_reset[s_storm_defs[storm_id].block_id])
+ if (dev_data->block_in_reset[storm->block_id])
continue;
/* Read FW info for the current Storm */
@@ -4606,26 +4930,26 @@
asserts = &fw_info.fw_asserts_section;
/* Dump FW Asserts section header and params */
- storm_letter_str[0] = s_storm_defs[storm_id].letter;
- offset += qed_dump_section_hdr(dump_buf + offset, dump,
- "fw_asserts", 2);
- offset += qed_dump_str_param(dump_buf + offset, dump, "storm",
- storm_letter_str);
- offset += qed_dump_num_param(dump_buf + offset, dump, "size",
+ storm_letter_str[0] = storm->letter;
+ offset += qed_dump_section_hdr(dump_buf + offset,
+ dump, "fw_asserts", 2);
+ offset += qed_dump_str_param(dump_buf + offset,
+ dump, "storm", storm_letter_str);
+ offset += qed_dump_num_param(dump_buf + offset,
+ dump,
+ "size",
asserts->list_element_dword_size);
+ /* Read and dump FW Asserts data */
if (!dump) {
offset += asserts->list_element_dword_size;
continue;
}
- /* Read and dump FW Asserts data */
- fw_asserts_section_addr =
- s_storm_defs[storm_id].sem_fast_mem_addr +
+ fw_asserts_section_addr = storm->sem_fast_mem_addr +
SEM_FAST_REG_INT_RAM +
RAM_LINES_TO_BYTES(asserts->section_ram_line_offset);
- next_list_idx_addr =
- fw_asserts_section_addr +
+ next_list_idx_addr = fw_asserts_section_addr +
DWORDS_TO_BYTES(asserts->list_next_index_dword_offset);
next_list_idx = qed_rd(p_hwfn, p_ptt, next_list_idx_addr);
last_list_idx = (next_list_idx > 0
@@ -4638,11 +4962,13 @@
qed_grc_dump_addr_range(p_hwfn, p_ptt,
dump_buf + offset,
dump, addr,
- asserts->list_element_dword_size);
+ asserts->list_element_dword_size,
+ false);
}
/* Dump last section */
- offset += qed_dump_section_hdr(dump_buf + offset, dump, "last", 0);
+ offset += qed_dump_last_section(p_hwfn, dump_buf, offset, dump);
+
return offset;
}
@@ -4650,10 +4976,10 @@
enum dbg_status qed_dbg_set_bin_ptr(const u8 * const bin_ptr)
{
- /* Convert binary data to debug arrays */
struct bin_buffer_hdr *buf_array = (struct bin_buffer_hdr *)bin_ptr;
u8 buf_id;
+ /* convert binary data to debug arrays */
for (buf_id = 0; buf_id < MAX_BIN_DBG_BUFFER_TYPE; buf_id++) {
s_dbg_arrays[buf_id].ptr =
(u32 *)(bin_ptr + buf_array[buf_id].offset);
@@ -4682,14 +5008,17 @@
enum dbg_status status = qed_dbg_dev_init(p_hwfn, p_ptt);
*buf_size = 0;
+
if (status != DBG_STATUS_OK)
return status;
+
if (!s_dbg_arrays[BIN_BUF_DBG_MODE_TREE].ptr ||
!s_dbg_arrays[BIN_BUF_DBG_DUMP_REG].ptr ||
!s_dbg_arrays[BIN_BUF_DBG_DUMP_MEM].ptr ||
!s_dbg_arrays[BIN_BUF_DBG_ATTN_BLOCKS].ptr ||
!s_dbg_arrays[BIN_BUF_DBG_ATTN_REGS].ptr)
return DBG_STATUS_DBG_ARRAY_NOT_SET;
+
return qed_grc_dump(p_hwfn, p_ptt, NULL, false, buf_size);
}
@@ -4702,12 +5031,14 @@
u32 needed_buf_size_in_dwords;
enum dbg_status status;
- status = qed_dbg_grc_get_dump_buf_size(p_hwfn, p_ptt,
- &needed_buf_size_in_dwords);
-
*num_dumped_dwords = 0;
+
+ status = qed_dbg_grc_get_dump_buf_size(p_hwfn,
+ p_ptt,
+ &needed_buf_size_in_dwords);
if (status != DBG_STATUS_OK)
return status;
+
if (buf_size_in_dwords < needed_buf_size_in_dwords)
return DBG_STATUS_DUMP_BUF_TOO_SMALL;
@@ -4724,25 +5055,31 @@
struct qed_ptt *p_ptt,
u32 *buf_size)
{
- enum dbg_status status = qed_dbg_dev_init(p_hwfn, p_ptt);
struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
+ struct idle_chk_data *idle_chk;
+ enum dbg_status status;
+ idle_chk = &dev_data->idle_chk;
*buf_size = 0;
+
+ status = qed_dbg_dev_init(p_hwfn, p_ptt);
if (status != DBG_STATUS_OK)
return status;
+
if (!s_dbg_arrays[BIN_BUF_DBG_MODE_TREE].ptr ||
!s_dbg_arrays[BIN_BUF_DBG_IDLE_CHK_REGS].ptr ||
!s_dbg_arrays[BIN_BUF_DBG_IDLE_CHK_IMMS].ptr ||
!s_dbg_arrays[BIN_BUF_DBG_IDLE_CHK_RULES].ptr)
return DBG_STATUS_DBG_ARRAY_NOT_SET;
- if (!dev_data->idle_chk.buf_size_set) {
- dev_data->idle_chk.buf_size = qed_idle_chk_dump(p_hwfn,
- p_ptt,
- NULL, false);
- dev_data->idle_chk.buf_size_set = true;
+
+ if (!idle_chk->buf_size_set) {
+ idle_chk->buf_size = qed_idle_chk_dump(p_hwfn,
+ p_ptt, NULL, false);
+ idle_chk->buf_size_set = true;
}
- *buf_size = dev_data->idle_chk.buf_size;
+ *buf_size = idle_chk->buf_size;
+
return DBG_STATUS_OK;
}
@@ -4755,12 +5092,14 @@
u32 needed_buf_size_in_dwords;
enum dbg_status status;
- status = qed_dbg_idle_chk_get_dump_buf_size(p_hwfn, p_ptt,
- &needed_buf_size_in_dwords);
-
*num_dumped_dwords = 0;
+
+ status = qed_dbg_idle_chk_get_dump_buf_size(p_hwfn,
+ p_ptt,
+ &needed_buf_size_in_dwords);
if (status != DBG_STATUS_OK)
return status;
+
if (buf_size_in_dwords < needed_buf_size_in_dwords)
return DBG_STATUS_DUMP_BUF_TOO_SMALL;
@@ -4783,8 +5122,10 @@
enum dbg_status status = qed_dbg_dev_init(p_hwfn, p_ptt);
*buf_size = 0;
+
if (status != DBG_STATUS_OK)
return status;
+
return qed_mcp_trace_dump(p_hwfn, p_ptt, NULL, false, buf_size);
}
@@ -4797,13 +5138,12 @@
u32 needed_buf_size_in_dwords;
enum dbg_status status;
- /* validate buffer size */
status =
- qed_dbg_mcp_trace_get_dump_buf_size(p_hwfn, p_ptt,
- &needed_buf_size_in_dwords);
-
- if (status != DBG_STATUS_OK &&
- status != DBG_STATUS_NVRAM_GET_IMAGE_FAILED)
+ qed_dbg_mcp_trace_get_dump_buf_size(p_hwfn,
+ p_ptt,
+ &needed_buf_size_in_dwords);
+ if (status != DBG_STATUS_OK && status !=
+ DBG_STATUS_NVRAM_GET_IMAGE_FAILED)
return status;
if (buf_size_in_dwords < needed_buf_size_in_dwords)
@@ -4829,8 +5169,10 @@
enum dbg_status status = qed_dbg_dev_init(p_hwfn, p_ptt);
*buf_size = 0;
+
if (status != DBG_STATUS_OK)
return status;
+
return qed_reg_fifo_dump(p_hwfn, p_ptt, NULL, false, buf_size);
}
@@ -4843,12 +5185,14 @@
u32 needed_buf_size_in_dwords;
enum dbg_status status;
- status = qed_dbg_reg_fifo_get_dump_buf_size(p_hwfn, p_ptt,
- &needed_buf_size_in_dwords);
-
*num_dumped_dwords = 0;
+
+ status = qed_dbg_reg_fifo_get_dump_buf_size(p_hwfn,
+ p_ptt,
+ &needed_buf_size_in_dwords);
if (status != DBG_STATUS_OK)
return status;
+
if (buf_size_in_dwords < needed_buf_size_in_dwords)
return DBG_STATUS_DUMP_BUF_TOO_SMALL;
@@ -4871,8 +5215,10 @@
enum dbg_status status = qed_dbg_dev_init(p_hwfn, p_ptt);
*buf_size = 0;
+
if (status != DBG_STATUS_OK)
return status;
+
return qed_igu_fifo_dump(p_hwfn, p_ptt, NULL, false, buf_size);
}
@@ -4885,12 +5231,14 @@
u32 needed_buf_size_in_dwords;
enum dbg_status status;
- status = qed_dbg_igu_fifo_get_dump_buf_size(p_hwfn, p_ptt,
- &needed_buf_size_in_dwords);
-
*num_dumped_dwords = 0;
+
+ status = qed_dbg_igu_fifo_get_dump_buf_size(p_hwfn,
+ p_ptt,
+ &needed_buf_size_in_dwords);
if (status != DBG_STATUS_OK)
return status;
+
if (buf_size_in_dwords < needed_buf_size_in_dwords)
return DBG_STATUS_DUMP_BUF_TOO_SMALL;
@@ -4913,8 +5261,10 @@
enum dbg_status status = qed_dbg_dev_init(p_hwfn, p_ptt);
*buf_size = 0;
+
if (status != DBG_STATUS_OK)
return status;
+
return qed_protection_override_dump(p_hwfn,
p_ptt, NULL, false, buf_size);
}
@@ -4925,15 +5275,18 @@
u32 buf_size_in_dwords,
u32 *num_dumped_dwords)
{
- u32 needed_buf_size_in_dwords;
+ u32 needed_buf_size_in_dwords, *p_size = &needed_buf_size_in_dwords;
enum dbg_status status;
- status = qed_dbg_protection_override_get_dump_buf_size(p_hwfn, p_ptt,
- &needed_buf_size_in_dwords);
-
*num_dumped_dwords = 0;
+
+ status =
+ qed_dbg_protection_override_get_dump_buf_size(p_hwfn,
+ p_ptt,
+ p_size);
if (status != DBG_STATUS_OK)
return status;
+
if (buf_size_in_dwords < needed_buf_size_in_dwords)
return DBG_STATUS_DUMP_BUF_TOO_SMALL;
@@ -4958,12 +5311,15 @@
enum dbg_status status = qed_dbg_dev_init(p_hwfn, p_ptt);
*buf_size = 0;
+
if (status != DBG_STATUS_OK)
return status;
/* Update reset state */
qed_update_blocks_reset_state(p_hwfn, p_ptt);
+
*buf_size = qed_fw_asserts_dump(p_hwfn, p_ptt, NULL, false);
+
return DBG_STATUS_OK;
}
@@ -4973,24 +5329,108 @@
u32 buf_size_in_dwords,
u32 *num_dumped_dwords)
{
- u32 needed_buf_size_in_dwords;
+ u32 needed_buf_size_in_dwords, *p_size = &needed_buf_size_in_dwords;
enum dbg_status status;
- status = qed_dbg_fw_asserts_get_dump_buf_size(p_hwfn, p_ptt,
- &needed_buf_size_in_dwords);
-
*num_dumped_dwords = 0;
+
+ status =
+ qed_dbg_fw_asserts_get_dump_buf_size(p_hwfn,
+ p_ptt,
+ p_size);
if (status != DBG_STATUS_OK)
return status;
+
if (buf_size_in_dwords < needed_buf_size_in_dwords)
return DBG_STATUS_DUMP_BUF_TOO_SMALL;
*num_dumped_dwords = qed_fw_asserts_dump(p_hwfn, p_ptt, dump_buf, true);
+
+ /* Revert GRC params to their default */
+ qed_dbg_grc_set_params_default(p_hwfn);
+
+ return DBG_STATUS_OK;
+}
+
+enum dbg_status qed_dbg_read_attn(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ enum block_id block_id,
+ enum dbg_attn_type attn_type,
+ bool clear_status,
+ struct dbg_attn_block_result *results)
+{
+ enum dbg_status status = qed_dbg_dev_init(p_hwfn, p_ptt);
+ u8 reg_idx, num_attn_regs, num_result_regs = 0;
+ const struct dbg_attn_reg *attn_reg_arr;
+
+ if (status != DBG_STATUS_OK)
+ return status;
+
+ if (!s_dbg_arrays[BIN_BUF_DBG_MODE_TREE].ptr ||
+ !s_dbg_arrays[BIN_BUF_DBG_ATTN_BLOCKS].ptr ||
+ !s_dbg_arrays[BIN_BUF_DBG_ATTN_REGS].ptr)
+ return DBG_STATUS_DBG_ARRAY_NOT_SET;
+
+ attn_reg_arr = qed_get_block_attn_regs(block_id,
+ attn_type, &num_attn_regs);
+
+ for (reg_idx = 0; reg_idx < num_attn_regs; reg_idx++) {
+ const struct dbg_attn_reg *reg_data = &attn_reg_arr[reg_idx];
+ struct dbg_attn_reg_result *reg_result;
+ u32 sts_addr, sts_val;
+ u16 modes_buf_offset;
+ bool eval_mode;
+
+ /* Check mode */
+ eval_mode = GET_FIELD(reg_data->mode.data,
+ DBG_MODE_HDR_EVAL_MODE) > 0;
+ modes_buf_offset = GET_FIELD(reg_data->mode.data,
+ DBG_MODE_HDR_MODES_BUF_OFFSET);
+ if (eval_mode && !qed_is_mode_match(p_hwfn, &modes_buf_offset))
+ continue;
+
+ /* Mode match - read attention status register */
+ sts_addr = DWORDS_TO_BYTES(clear_status ?
+ reg_data->sts_clr_address :
+ GET_FIELD(reg_data->data,
+ DBG_ATTN_REG_STS_ADDRESS));
+ sts_val = qed_rd(p_hwfn, p_ptt, sts_addr);
+ if (!sts_val)
+ continue;
+
+ /* Non-zero attention status - add to results */
+ reg_result = &results->reg_results[num_result_regs];
+ SET_FIELD(reg_result->data,
+ DBG_ATTN_REG_RESULT_STS_ADDRESS, sts_addr);
+ SET_FIELD(reg_result->data,
+ DBG_ATTN_REG_RESULT_NUM_REG_ATTN,
+ GET_FIELD(reg_data->data, DBG_ATTN_REG_NUM_REG_ATTN));
+ reg_result->block_attn_offset = reg_data->block_attn_offset;
+ reg_result->sts_val = sts_val;
+ reg_result->mask_val = qed_rd(p_hwfn,
+ p_ptt,
+ DWORDS_TO_BYTES
+ (reg_data->mask_address));
+ num_result_regs++;
+ }
+
+ results->block_id = (u8)block_id;
+ results->names_offset =
+ qed_get_block_attn_data(block_id, attn_type)->names_offset;
+ SET_FIELD(results->data, DBG_ATTN_BLOCK_RESULT_ATTN_TYPE, attn_type);
+ SET_FIELD(results->data,
+ DBG_ATTN_BLOCK_RESULT_NUM_REGS, num_result_regs);
+
return DBG_STATUS_OK;
}
/******************************* Data Types **********************************/
+struct block_info {
+ const char *name;
+ enum block_id id;
+};
+
struct mcp_trace_format {
u32 data;
#define MCP_TRACE_FORMAT_MODULE_MASK 0x0000ffff
@@ -5005,9 +5445,14 @@
#define MCP_TRACE_FORMAT_P3_SIZE_SHIFT 22
#define MCP_TRACE_FORMAT_LEN_MASK 0xff000000
#define MCP_TRACE_FORMAT_LEN_SHIFT 24
+
char *format_str;
};
+/* Meta data structure, generated by a perl script during MFW build. therefore,
+ * the structs mcp_trace_meta and mcp_trace_format are duplicated in the perl
+ * script.
+ */
struct mcp_trace_meta {
u32 modules_num;
char **modules;
@@ -5015,7 +5460,7 @@
struct mcp_trace_format *formats;
};
-/* Reg fifo element */
+/* REG fifo element */
struct reg_fifo_element {
u64 data;
#define REG_FIFO_ELEMENT_ADDRESS_SHIFT 0
@@ -5140,12 +5585,15 @@
/******************************** Constants **********************************/
#define MAX_MSG_LEN 1024
+
#define MCP_TRACE_MAX_MODULE_LEN 8
#define MCP_TRACE_FORMAT_MAX_PARAMS 3
#define MCP_TRACE_FORMAT_PARAM_WIDTH \
(MCP_TRACE_FORMAT_P2_SIZE_SHIFT - MCP_TRACE_FORMAT_P1_SIZE_SHIFT)
+
#define REG_FIFO_ELEMENT_ADDR_FACTOR 4
#define REG_FIFO_ELEMENT_IS_PF_VF_VAL 127
+
#define PROTECTION_OVERRIDE_ELEMENT_ADDR_FACTOR 4
/********************************* Macros ************************************/
@@ -5154,59 +5602,269 @@
/***************************** Constant Arrays *******************************/
+struct user_dbg_array {
+ const u32 *ptr;
+ u32 size_in_dwords;
+};
+
+/* Debug arrays */
+static struct user_dbg_array
+s_user_dbg_arrays[MAX_BIN_DBG_BUFFER_TYPE] = { {NULL} };
+
+/* Block names array */
+static struct block_info s_block_info_arr[] = {
+ {"grc", BLOCK_GRC},
+ {"miscs", BLOCK_MISCS},
+ {"misc", BLOCK_MISC},
+ {"dbu", BLOCK_DBU},
+ {"pglue_b", BLOCK_PGLUE_B},
+ {"cnig", BLOCK_CNIG},
+ {"cpmu", BLOCK_CPMU},
+ {"ncsi", BLOCK_NCSI},
+ {"opte", BLOCK_OPTE},
+ {"bmb", BLOCK_BMB},
+ {"pcie", BLOCK_PCIE},
+ {"mcp", BLOCK_MCP},
+ {"mcp2", BLOCK_MCP2},
+ {"pswhst", BLOCK_PSWHST},
+ {"pswhst2", BLOCK_PSWHST2},
+ {"pswrd", BLOCK_PSWRD},
+ {"pswrd2", BLOCK_PSWRD2},
+ {"pswwr", BLOCK_PSWWR},
+ {"pswwr2", BLOCK_PSWWR2},
+ {"pswrq", BLOCK_PSWRQ},
+ {"pswrq2", BLOCK_PSWRQ2},
+ {"pglcs", BLOCK_PGLCS},
+ {"ptu", BLOCK_PTU},
+ {"dmae", BLOCK_DMAE},
+ {"tcm", BLOCK_TCM},
+ {"mcm", BLOCK_MCM},
+ {"ucm", BLOCK_UCM},
+ {"xcm", BLOCK_XCM},
+ {"ycm", BLOCK_YCM},
+ {"pcm", BLOCK_PCM},
+ {"qm", BLOCK_QM},
+ {"tm", BLOCK_TM},
+ {"dorq", BLOCK_DORQ},
+ {"brb", BLOCK_BRB},
+ {"src", BLOCK_SRC},
+ {"prs", BLOCK_PRS},
+ {"tsdm", BLOCK_TSDM},
+ {"msdm", BLOCK_MSDM},
+ {"usdm", BLOCK_USDM},
+ {"xsdm", BLOCK_XSDM},
+ {"ysdm", BLOCK_YSDM},
+ {"psdm", BLOCK_PSDM},
+ {"tsem", BLOCK_TSEM},
+ {"msem", BLOCK_MSEM},
+ {"usem", BLOCK_USEM},
+ {"xsem", BLOCK_XSEM},
+ {"ysem", BLOCK_YSEM},
+ {"psem", BLOCK_PSEM},
+ {"rss", BLOCK_RSS},
+ {"tmld", BLOCK_TMLD},
+ {"muld", BLOCK_MULD},
+ {"yuld", BLOCK_YULD},
+ {"xyld", BLOCK_XYLD},
+ {"ptld", BLOCK_PTLD},
+ {"ypld", BLOCK_YPLD},
+ {"prm", BLOCK_PRM},
+ {"pbf_pb1", BLOCK_PBF_PB1},
+ {"pbf_pb2", BLOCK_PBF_PB2},
+ {"rpb", BLOCK_RPB},
+ {"btb", BLOCK_BTB},
+ {"pbf", BLOCK_PBF},
+ {"rdif", BLOCK_RDIF},
+ {"tdif", BLOCK_TDIF},
+ {"cdu", BLOCK_CDU},
+ {"ccfc", BLOCK_CCFC},
+ {"tcfc", BLOCK_TCFC},
+ {"igu", BLOCK_IGU},
+ {"cau", BLOCK_CAU},
+ {"rgfs", BLOCK_RGFS},
+ {"rgsrc", BLOCK_RGSRC},
+ {"tgfs", BLOCK_TGFS},
+ {"tgsrc", BLOCK_TGSRC},
+ {"umac", BLOCK_UMAC},
+ {"xmac", BLOCK_XMAC},
+ {"dbg", BLOCK_DBG},
+ {"nig", BLOCK_NIG},
+ {"wol", BLOCK_WOL},
+ {"bmbn", BLOCK_BMBN},
+ {"ipc", BLOCK_IPC},
+ {"nwm", BLOCK_NWM},
+ {"nws", BLOCK_NWS},
+ {"ms", BLOCK_MS},
+ {"phy_pcie", BLOCK_PHY_PCIE},
+ {"led", BLOCK_LED},
+ {"avs_wrap", BLOCK_AVS_WRAP},
+ {"misc_aeu", BLOCK_MISC_AEU},
+ {"bar0_map", BLOCK_BAR0_MAP}
+};
+
/* Status string array */
static const char * const s_status_str[] = {
+ /* DBG_STATUS_OK */
"Operation completed successfully",
+
+ /* DBG_STATUS_APP_VERSION_NOT_SET */
"Debug application version wasn't set",
+
+ /* DBG_STATUS_UNSUPPORTED_APP_VERSION */
"Unsupported debug application version",
+
+ /* DBG_STATUS_DBG_BLOCK_NOT_RESET */
"The debug block wasn't reset since the last recording",
+
+ /* DBG_STATUS_INVALID_ARGS */
"Invalid arguments",
+
+ /* DBG_STATUS_OUTPUT_ALREADY_SET */
"The debug output was already set",
+
+ /* DBG_STATUS_INVALID_PCI_BUF_SIZE */
"Invalid PCI buffer size",
+
+ /* DBG_STATUS_PCI_BUF_ALLOC_FAILED */
"PCI buffer allocation failed",
+
+ /* DBG_STATUS_PCI_BUF_NOT_ALLOCATED */
"A PCI buffer wasn't allocated",
+
+ /* DBG_STATUS_TOO_MANY_INPUTS */
"Too many inputs were enabled. Enabled less inputs, or set 'unifyInputs' to true",
- "GRC/Timestamp input overlap in cycle dword 0",
+
+ /* DBG_STATUS_INPUT_OVERLAP */
+ "Overlapping debug bus inputs",
+
+ /* DBG_STATUS_HW_ONLY_RECORDING */
"Cannot record Storm data since the entire recording cycle is used by HW",
+
+ /* DBG_STATUS_STORM_ALREADY_ENABLED */
"The Storm was already enabled",
+
+ /* DBG_STATUS_STORM_NOT_ENABLED */
"The specified Storm wasn't enabled",
+
+ /* DBG_STATUS_BLOCK_ALREADY_ENABLED */
"The block was already enabled",
+
+ /* DBG_STATUS_BLOCK_NOT_ENABLED */
"The specified block wasn't enabled",
+
+ /* DBG_STATUS_NO_INPUT_ENABLED */
"No input was enabled for recording",
+
+ /* DBG_STATUS_NO_FILTER_TRIGGER_64B */
"Filters and triggers are not allowed when recording in 64b units",
+
+ /* DBG_STATUS_FILTER_ALREADY_ENABLED */
"The filter was already enabled",
+
+ /* DBG_STATUS_TRIGGER_ALREADY_ENABLED */
"The trigger was already enabled",
+
+ /* DBG_STATUS_TRIGGER_NOT_ENABLED */
"The trigger wasn't enabled",
+
+ /* DBG_STATUS_CANT_ADD_CONSTRAINT */
"A constraint can be added only after a filter was enabled or a trigger state was added",
+
+ /* DBG_STATUS_TOO_MANY_TRIGGER_STATES */
"Cannot add more than 3 trigger states",
+
+ /* DBG_STATUS_TOO_MANY_CONSTRAINTS */
"Cannot add more than 4 constraints per filter or trigger state",
+
+ /* DBG_STATUS_RECORDING_NOT_STARTED */
"The recording wasn't started",
+
+ /* DBG_STATUS_DATA_DIDNT_TRIGGER */
"A trigger was configured, but it didn't trigger",
+
+ /* DBG_STATUS_NO_DATA_RECORDED */
"No data was recorded",
+
+ /* DBG_STATUS_DUMP_BUF_TOO_SMALL */
"Dump buffer is too small",
+
+ /* DBG_STATUS_DUMP_NOT_CHUNK_ALIGNED */
"Dumped data is not aligned to chunks",
+
+ /* DBG_STATUS_UNKNOWN_CHIP */
"Unknown chip",
+
+ /* DBG_STATUS_VIRT_MEM_ALLOC_FAILED */
"Failed allocating virtual memory",
+
+ /* DBG_STATUS_BLOCK_IN_RESET */
"The input block is in reset",
+
+ /* DBG_STATUS_INVALID_TRACE_SIGNATURE */
"Invalid MCP trace signature found in NVRAM",
+
+ /* DBG_STATUS_INVALID_NVRAM_BUNDLE */
"Invalid bundle ID found in NVRAM",
+
+ /* DBG_STATUS_NVRAM_GET_IMAGE_FAILED */
"Failed getting NVRAM image",
+
+ /* DBG_STATUS_NON_ALIGNED_NVRAM_IMAGE */
"NVRAM image is not dword-aligned",
+
+ /* DBG_STATUS_NVRAM_READ_FAILED */
"Failed reading from NVRAM",
+
+ /* DBG_STATUS_IDLE_CHK_PARSE_FAILED */
"Idle check parsing failed",
+
+ /* DBG_STATUS_MCP_TRACE_BAD_DATA */
"MCP Trace data is corrupt",
- "Dump doesn't contain meta data - it must be provided in an image file",
+
+ /* DBG_STATUS_MCP_TRACE_NO_META */
+ "Dump doesn't contain meta data - it must be provided in image file",
+
+ /* DBG_STATUS_MCP_COULD_NOT_HALT */
"Failed to halt MCP",
+
+ /* DBG_STATUS_MCP_COULD_NOT_RESUME */
"Failed to resume MCP after halt",
+
+ /* DBG_STATUS_DMAE_FAILED */
"DMAE transaction failed",
+
+ /* DBG_STATUS_SEMI_FIFO_NOT_EMPTY */
"Failed to empty SEMI sync FIFO",
+
+ /* DBG_STATUS_IGU_FIFO_BAD_DATA */
"IGU FIFO data is corrupt",
+
+ /* DBG_STATUS_MCP_COULD_NOT_MASK_PRTY */
"MCP failed to mask parities",
+
+ /* DBG_STATUS_FW_ASSERTS_PARSE_FAILED */
"FW Asserts parsing failed",
+
+ /* DBG_STATUS_REG_FIFO_BAD_DATA */
"GRC FIFO data is corrupt",
+
+ /* DBG_STATUS_PROTECTION_OVERRIDE_BAD_DATA */
"Protection Override data is corrupt",
+
+ /* DBG_STATUS_DBG_ARRAY_NOT_SET */
"Debug arrays were not set (when using binary files, dbg_set_bin_ptr must be called)",
- "When a block is filtered, no other blocks can be recorded unless inputs are unified (due to a HW bug)"
+
+ /* DBG_STATUS_FILTER_BUG */
+ "Debug Bus filtering requires the -unifyInputs option (due to a HW bug)",
+
+ /* DBG_STATUS_NON_MATCHING_LINES */
+ "Non-matching debug lines - all lines must be of the same type (either 128b or 256b)",
+
+ /* DBG_STATUS_INVALID_TRIGGER_DWORD_OFFSET */
+ "The selected trigger dword offset wasn't enabled in the recorded HW block",
+
+ /* DBG_STATUS_DBG_BUS_IN_USE */
+ "The debug bus is in use"
};
/* Idle check severity names array */
@@ -5223,12 +5881,13 @@
"DEBUG"
};
-/* Parsing strings */
+/* Access type names array */
static const char * const s_access_strs[] = {
"read",
"write"
};
+/* Privilege type names array */
static const char * const s_privilege_strs[] = {
"VF",
"PDA",
@@ -5236,6 +5895,7 @@
"UA"
};
+/* Protection type names array */
static const char * const s_protection_strs[] = {
"(default)",
"(default)",
@@ -5247,6 +5907,7 @@
"override UA"
};
+/* Master type names array */
static const char * const s_master_strs[] = {
"???",
"pxp",
@@ -5266,6 +5927,7 @@
"???"
};
+/* REG FIFO error messages array */
static const char * const s_reg_fifo_error_strs[] = {
"grc timeout",
"address doesn't belong to any block",
@@ -5274,6 +5936,7 @@
"path isolation error"
};
+/* IGU FIFO sources array */
static const char * const s_igu_fifo_source_strs[] = {
"TSTORM",
"MSTORM",
@@ -5288,6 +5951,7 @@
"GRC",
};
+/* IGU FIFO error messages */
static const char * const s_igu_fifo_error_strs[] = {
"no error",
"length error",
@@ -5308,13 +5972,18 @@
/* IGU FIFO address data */
static const struct igu_fifo_addr_data s_igu_fifo_addr_data[] = {
- {0x0, 0x101, "MSI-X Memory", NULL, IGU_ADDR_TYPE_MSIX_MEM},
- {0x102, 0x1ff, "reserved", NULL, IGU_ADDR_TYPE_RESERVED},
- {0x200, 0x200, "Write PBA[0:63]", NULL, IGU_ADDR_TYPE_WRITE_PBA},
+ {0x0, 0x101, "MSI-X Memory", NULL,
+ IGU_ADDR_TYPE_MSIX_MEM},
+ {0x102, 0x1ff, "reserved", NULL,
+ IGU_ADDR_TYPE_RESERVED},
+ {0x200, 0x200, "Write PBA[0:63]", NULL,
+ IGU_ADDR_TYPE_WRITE_PBA},
{0x201, 0x201, "Write PBA[64:127]", "reserved",
IGU_ADDR_TYPE_WRITE_PBA},
- {0x202, 0x202, "Write PBA[128]", "reserved", IGU_ADDR_TYPE_WRITE_PBA},
- {0x203, 0x3ff, "reserved", NULL, IGU_ADDR_TYPE_RESERVED},
+ {0x202, 0x202, "Write PBA[128]", "reserved",
+ IGU_ADDR_TYPE_WRITE_PBA},
+ {0x203, 0x3ff, "reserved", NULL,
+ IGU_ADDR_TYPE_RESERVED},
{0x400, 0x5ef, "Write interrupt acknowledgment", NULL,
IGU_ADDR_TYPE_WRITE_INT_ACK},
{0x5f0, 0x5f0, "Attention bits update", NULL,
@@ -5331,8 +6000,10 @@
IGU_ADDR_TYPE_READ_INT},
{0x5f6, 0x5f6, "Read interrupt 0:63 without mask", NULL,
IGU_ADDR_TYPE_READ_INT},
- {0x5f7, 0x5ff, "reserved", NULL, IGU_ADDR_TYPE_RESERVED},
- {0x600, 0x7ff, "Producer update", NULL, IGU_ADDR_TYPE_WRITE_PROD_UPDATE}
+ {0x5f7, 0x5ff, "reserved", NULL,
+ IGU_ADDR_TYPE_RESERVED},
+ {0x600, 0x7ff, "Producer update", NULL,
+ IGU_ADDR_TYPE_WRITE_PROD_UPDATE}
};
/******************************** Variables **********************************/
@@ -5340,28 +6011,12 @@
/* MCP Trace meta data - used in case the dump doesn't contain the meta data
* (e.g. due to no NVRAM access).
*/
-static struct dbg_array s_mcp_trace_meta = { NULL, 0 };
+static struct user_dbg_array s_mcp_trace_meta = { NULL, 0 };
/* Temporary buffer, used for print size calculations */
static char s_temp_buf[MAX_MSG_LEN];
-/***************************** Public Functions *******************************/
-
-enum dbg_status qed_dbg_user_set_bin_ptr(const u8 * const bin_ptr)
-{
- /* Convert binary data to debug arrays */
- struct bin_buffer_hdr *buf_array = (struct bin_buffer_hdr *)bin_ptr;
- u8 buf_id;
-
- for (buf_id = 0; buf_id < MAX_BIN_DBG_BUFFER_TYPE; buf_id++) {
- s_dbg_arrays[buf_id].ptr =
- (u32 *)(bin_ptr + buf_array[buf_id].offset);
- s_dbg_arrays[buf_id].size_in_dwords =
- BYTES_TO_DWORDS(buf_array[buf_id].length);
- }
-
- return DBG_STATUS_OK;
-}
+/**************************** Private Functions ******************************/
static u32 qed_cyclic_add(u32 a, u32 b, u32 size)
{
@@ -5381,10 +6036,8 @@
u32 *offset,
u32 buf_size, u8 num_bytes_to_read)
{
- u8 *bytes_buf = (u8 *)buf;
- u8 *val_ptr;
+ u8 i, *val_ptr, *bytes_buf = (u8 *)buf;
u32 val = 0;
- u8 i;
val_ptr = (u8 *)&val;
@@ -5412,6 +6065,7 @@
u32 dword_val = *(u32 *)&((u8 *)buf)[*offset];
*offset += 4;
+
return dword_val;
}
@@ -5445,7 +6099,7 @@
const char **param_str_val, u32 *param_num_val)
{
char *char_buf = (char *)dump_buf;
- u32 offset = 0; /* In bytes */
+ size_t offset = 0;
/* Extract param name */
*param_name = char_buf;
@@ -5493,37 +6147,31 @@
u32 i, dump_offset = 0, results_offset = 0;
for (i = 0; i < num_section_params; i++) {
- const char *param_name;
- const char *param_str_val;
+ const char *param_name, *param_str_val;
u32 param_num_val = 0;
dump_offset += qed_read_param(dump_buf + dump_offset,
¶m_name,
¶m_str_val, ¶m_num_val);
+
if (param_str_val)
- /* String param */
results_offset +=
sprintf(qed_get_buf_ptr(results_buf,
results_offset),
"%s: %s\n", param_name, param_str_val);
else if (strcmp(param_name, "fw-timestamp"))
- /* Numeric param */
results_offset +=
sprintf(qed_get_buf_ptr(results_buf,
results_offset),
"%s: %d\n", param_name, param_num_val);
}
- results_offset +=
- sprintf(qed_get_buf_ptr(results_buf, results_offset), "\n");
- *num_chars_printed = results_offset;
- return dump_offset;
-}
+ results_offset += sprintf(qed_get_buf_ptr(results_buf, results_offset),
+ "\n");
-const char *qed_dbg_get_status_str(enum dbg_status status)
-{
- return (status <
- MAX_DBG_STATUS) ? s_status_str[status] : "Invalid debug status";
+ *num_chars_printed = results_offset;
+
+ return dump_offset;
}
/* Parses the idle check rules and returns the number of characters printed.
@@ -5537,7 +6185,10 @@
char *results_buf,
u32 *num_errors, u32 *num_warnings)
{
- u32 rule_idx, results_offset = 0; /* Offset in results_buf in bytes */
+ /* Offset in results_buf in bytes */
+ u32 results_offset = 0;
+
+ u32 rule_idx;
u16 i, j;
*num_errors = 0;
@@ -5548,16 +6199,15 @@
rule_idx++) {
const struct dbg_idle_chk_rule_parsing_data *rule_parsing_data;
struct dbg_idle_chk_result_hdr *hdr;
- const char *parsing_str;
+ const char *parsing_str, *lsi_msg;
u32 parsing_str_offset;
- const char *lsi_msg;
- u8 curr_reg_id = 0;
bool has_fw_msg;
+ u8 curr_reg_id;
hdr = (struct dbg_idle_chk_result_hdr *)dump_buf;
rule_parsing_data =
(const struct dbg_idle_chk_rule_parsing_data *)
- &s_dbg_arrays[BIN_BUF_DBG_IDLE_CHK_PARSING_DATA].
+ &s_user_dbg_arrays[BIN_BUF_DBG_IDLE_CHK_PARSING_DATA].
ptr[hdr->rule_id];
parsing_str_offset =
GET_FIELD(rule_parsing_data->data,
@@ -5565,16 +6215,18 @@
has_fw_msg =
GET_FIELD(rule_parsing_data->data,
DBG_IDLE_CHK_RULE_PARSING_DATA_HAS_FW_MSG) > 0;
- parsing_str = &((const char *)
- s_dbg_arrays[BIN_BUF_DBG_PARSING_STRINGS].ptr)
- [parsing_str_offset];
+ parsing_str =
+ &((const char *)
+ s_user_dbg_arrays[BIN_BUF_DBG_PARSING_STRINGS].ptr)
+ [parsing_str_offset];
lsi_msg = parsing_str;
+ curr_reg_id = 0;
if (hdr->severity >= MAX_DBG_IDLE_CHK_SEVERITY_TYPES)
return 0;
/* Skip rule header */
- dump_buf += (sizeof(struct dbg_idle_chk_result_hdr) / 4);
+ dump_buf += BYTES_TO_DWORDS(sizeof(*hdr));
/* Update errors/warnings count */
if (hdr->severity == IDLE_CHK_SEVERITY_ERROR ||
@@ -5606,19 +6258,19 @@
for (i = 0;
i < hdr->num_dumped_cond_regs + hdr->num_dumped_info_regs;
i++) {
- struct dbg_idle_chk_result_reg_hdr *reg_hdr
- = (struct dbg_idle_chk_result_reg_hdr *)
- dump_buf;
- bool is_mem =
- GET_FIELD(reg_hdr->data,
- DBG_IDLE_CHK_RESULT_REG_HDR_IS_MEM);
- u8 reg_id =
- GET_FIELD(reg_hdr->data,
- DBG_IDLE_CHK_RESULT_REG_HDR_REG_ID);
+ struct dbg_idle_chk_result_reg_hdr *reg_hdr;
+ bool is_mem;
+ u8 reg_id;
+
+ reg_hdr =
+ (struct dbg_idle_chk_result_reg_hdr *)dump_buf;
+ is_mem = GET_FIELD(reg_hdr->data,
+ DBG_IDLE_CHK_RESULT_REG_HDR_IS_MEM);
+ reg_id = GET_FIELD(reg_hdr->data,
+ DBG_IDLE_CHK_RESULT_REG_HDR_REG_ID);
/* Skip reg header */
- dump_buf +=
- (sizeof(struct dbg_idle_chk_result_reg_hdr) / 4);
+ dump_buf += BYTES_TO_DWORDS(sizeof(*reg_hdr));
/* Skip register names until the required reg_id is
* reached.
@@ -5660,6 +6312,7 @@
/* Check if end of dump buffer was exceeded */
if (dump_buf > dump_buf_end)
return 0;
+
return results_offset;
}
@@ -5680,13 +6333,16 @@
const char *section_name, *param_name, *param_str_val;
u32 *dump_buf_end = dump_buf + num_dumped_dwords;
u32 num_section_params = 0, num_rules;
- u32 results_offset = 0; /* Offset in results_buf in bytes */
+
+ /* Offset in results_buf in bytes */
+ u32 results_offset = 0;
*parsed_results_bytes = 0;
*num_errors = 0;
*num_warnings = 0;
- if (!s_dbg_arrays[BIN_BUF_DBG_PARSING_STRINGS].ptr ||
- !s_dbg_arrays[BIN_BUF_DBG_IDLE_CHK_PARSING_DATA].ptr)
+
+ if (!s_user_dbg_arrays[BIN_BUF_DBG_PARSING_STRINGS].ptr ||
+ !s_user_dbg_arrays[BIN_BUF_DBG_IDLE_CHK_PARSING_DATA].ptr)
return DBG_STATUS_DBG_ARRAY_NOT_SET;
/* Read global_params section */
@@ -5705,10 +6361,9 @@
§ion_name, &num_section_params);
if (strcmp(section_name, "idle_chk") || num_section_params != 1)
return DBG_STATUS_IDLE_CHK_PARSE_FAILED;
-
dump_buf += qed_read_param(dump_buf,
¶m_name, ¶m_str_val, &num_rules);
- if (strcmp(param_name, "num_rules") != 0)
+ if (strcmp(param_name, "num_rules"))
return DBG_STATUS_IDLE_CHK_PARSE_FAILED;
if (num_rules) {
@@ -5728,7 +6383,7 @@
results_offset : NULL,
num_errors, num_warnings);
results_offset += rules_print_size;
- if (rules_print_size == 0)
+ if (!rules_print_size)
return DBG_STATUS_IDLE_CHK_PARSE_FAILED;
/* Print LSI output */
@@ -5745,66 +6400,35 @@
results_offset : NULL,
num_errors, num_warnings);
results_offset += rules_print_size;
- if (rules_print_size == 0)
+ if (!rules_print_size)
return DBG_STATUS_IDLE_CHK_PARSE_FAILED;
}
/* Print errors/warnings count */
- if (*num_errors) {
+ if (*num_errors)
results_offset +=
sprintf(qed_get_buf_ptr(results_buf,
results_offset),
"\nIdle Check failed!!! (with %d errors and %d warnings)\n",
*num_errors, *num_warnings);
- } else if (*num_warnings) {
+ else if (*num_warnings)
results_offset +=
sprintf(qed_get_buf_ptr(results_buf,
results_offset),
- "\nIdle Check completed successfuly (with %d warnings)\n",
+ "\nIdle Check completed successfully (with %d warnings)\n",
*num_warnings);
- } else {
+ else
results_offset +=
sprintf(qed_get_buf_ptr(results_buf,
results_offset),
- "\nIdle Check completed successfuly\n");
- }
+ "\nIdle Check completed successfully\n");
/* Add 1 for string NULL termination */
*parsed_results_bytes = results_offset + 1;
+
return DBG_STATUS_OK;
}
-enum dbg_status qed_get_idle_chk_results_buf_size(struct qed_hwfn *p_hwfn,
- u32 *dump_buf,
- u32 num_dumped_dwords,
- u32 *results_buf_size)
-{
- u32 num_errors, num_warnings;
-
- return qed_parse_idle_chk_dump(p_hwfn,
- dump_buf,
- num_dumped_dwords,
- NULL,
- results_buf_size,
- &num_errors, &num_warnings);
-}
-
-enum dbg_status qed_print_idle_chk_results(struct qed_hwfn *p_hwfn,
- u32 *dump_buf,
- u32 num_dumped_dwords,
- char *results_buf,
- u32 *num_errors, u32 *num_warnings)
-{
- u32 parsed_buf_size;
-
- return qed_parse_idle_chk_dump(p_hwfn,
- dump_buf,
- num_dumped_dwords,
- results_buf,
- &parsed_buf_size,
- num_errors, num_warnings);
-}
-
/* Frees the specified MCP Trace meta data */
static void qed_mcp_trace_free_meta(struct qed_hwfn *p_hwfn,
struct mcp_trace_meta *meta)
@@ -5841,12 +6465,10 @@
/* Read first signature */
signature = qed_read_dword_from_buf(meta_buf_bytes, &offset);
- if (signature != MCP_TRACE_META_IMAGE_SIGNATURE)
+ if (signature != NVM_MAGIC_VALUE)
return DBG_STATUS_INVALID_TRACE_SIGNATURE;
- /* Read number of modules and allocate memory for all the modules
- * pointers.
- */
+ /* Read no. of modules and allocate memory for their pointers */
meta->modules_num = qed_read_byte_from_buf(meta_buf_bytes, &offset);
meta->modules = kzalloc(meta->modules_num * sizeof(char *), GFP_KERNEL);
if (!meta->modules)
@@ -5871,7 +6493,7 @@
/* Read second signature */
signature = qed_read_dword_from_buf(meta_buf_bytes, &offset);
- if (signature != MCP_TRACE_META_IMAGE_SIGNATURE)
+ if (signature != NVM_MAGIC_VALUE)
return DBG_STATUS_INVALID_TRACE_SIGNATURE;
/* Read number of formats and allocate memory for all formats */
@@ -5919,10 +6541,10 @@
char *results_buf,
u32 *parsed_results_bytes)
{
- u32 results_offset = 0, param_mask, param_shift, param_num_val;
- u32 num_section_params, offset, end_offset, bytes_left;
+ u32 end_offset, bytes_left, trace_data_dwords, trace_meta_dwords;
+ u32 param_mask, param_shift, param_num_val, num_section_params;
const char *section_name, *param_name, *param_str_val;
- u32 trace_data_dwords, trace_meta_dwords;
+ u32 offset, results_offset = 0;
struct mcp_trace_meta meta;
struct mcp_trace *trace;
enum dbg_status status;
@@ -5955,7 +6577,7 @@
/* Prepare trace info */
trace = (struct mcp_trace *)dump_buf;
- trace_buf = (u8 *)dump_buf + sizeof(struct mcp_trace);
+ trace_buf = (u8 *)dump_buf + sizeof(*trace);
offset = trace->trace_oldest;
end_offset = trace->trace_prod;
bytes_left = qed_cyclic_sub(end_offset, offset, trace->size);
@@ -5968,7 +6590,7 @@
return DBG_STATUS_MCP_TRACE_BAD_DATA;
dump_buf += qed_read_param(dump_buf,
¶m_name, ¶m_str_val, ¶m_num_val);
- if (strcmp(param_name, "size") != 0)
+ if (strcmp(param_name, "size"))
return DBG_STATUS_MCP_TRACE_BAD_DATA;
trace_meta_dwords = param_num_val;
@@ -6028,6 +6650,7 @@
}
format_ptr = &meta.formats[format_idx];
+
for (i = 0,
param_mask = MCP_TRACE_FORMAT_P1_SIZE_MASK, param_shift =
MCP_TRACE_FORMAT_P1_SIZE_SHIFT;
@@ -6050,6 +6673,7 @@
*/
if (param_size == 3)
param_size = 4;
+
if (bytes_left < param_size) {
status = DBG_STATUS_MCP_TRACE_BAD_DATA;
goto free_mem;
@@ -6059,13 +6683,14 @@
&offset,
trace->size,
param_size);
+
bytes_left -= param_size;
}
format_level =
(u8)((format_ptr->data &
MCP_TRACE_FORMAT_LEVEL_MASK) >>
- MCP_TRACE_FORMAT_LEVEL_SHIFT);
+ MCP_TRACE_FORMAT_LEVEL_SHIFT);
format_module =
(u8)((format_ptr->data &
MCP_TRACE_FORMAT_MODULE_MASK) >>
@@ -6094,30 +6719,6 @@
return status;
}
-enum dbg_status qed_get_mcp_trace_results_buf_size(struct qed_hwfn *p_hwfn,
- u32 *dump_buf,
- u32 num_dumped_dwords,
- u32 *results_buf_size)
-{
- return qed_parse_mcp_trace_dump(p_hwfn,
- dump_buf,
- num_dumped_dwords,
- NULL, results_buf_size);
-}
-
-enum dbg_status qed_print_mcp_trace_results(struct qed_hwfn *p_hwfn,
- u32 *dump_buf,
- u32 num_dumped_dwords,
- char *results_buf)
-{
- u32 parsed_buf_size;
-
- return qed_parse_mcp_trace_dump(p_hwfn,
- dump_buf,
- num_dumped_dwords,
- results_buf, &parsed_buf_size);
-}
-
/* Parses a Reg FIFO dump buffer.
* If result_buf is not NULL, the Reg FIFO results are printed to it.
* In any case, the required results buffer size is assigned to
@@ -6130,10 +6731,11 @@
char *results_buf,
u32 *parsed_results_bytes)
{
- u32 results_offset = 0, param_num_val, num_section_params, num_elements;
const char *section_name, *param_name, *param_str_val;
+ u32 param_num_val, num_section_params, num_elements;
struct reg_fifo_element *elements;
u8 i, j, err_val, vf_val;
+ u32 results_offset = 0;
char vf_str[4];
/* Read global_params section */
@@ -6179,17 +6781,17 @@
"raw: 0x%016llx, address: 0x%07x, access: %-5s, pf: %2d, vf: %s, port: %d, privilege: %-3s, protection: %-12s, master: %-4s, errors: ",
elements[i].data,
(u32)GET_FIELD(elements[i].data,
- REG_FIFO_ELEMENT_ADDRESS) *
- REG_FIFO_ELEMENT_ADDR_FACTOR,
- s_access_strs[GET_FIELD(elements[i].data,
+ REG_FIFO_ELEMENT_ADDRESS) *
+ REG_FIFO_ELEMENT_ADDR_FACTOR,
+ s_access_strs[GET_FIELD(elements[i].data,
REG_FIFO_ELEMENT_ACCESS)],
(u32)GET_FIELD(elements[i].data,
- REG_FIFO_ELEMENT_PF), vf_str,
+ REG_FIFO_ELEMENT_PF),
+ vf_str,
(u32)GET_FIELD(elements[i].data,
- REG_FIFO_ELEMENT_PORT),
- s_privilege_strs[GET_FIELD(elements[i].
- data,
- REG_FIFO_ELEMENT_PRIVILEGE)],
+ REG_FIFO_ELEMENT_PORT),
+ s_privilege_strs[GET_FIELD(elements[i].data,
+ REG_FIFO_ELEMENT_PRIVILEGE)],
s_protection_strs[GET_FIELD(elements[i].data,
REG_FIFO_ELEMENT_PROTECTION)],
s_master_strs[GET_FIELD(elements[i].data,
@@ -6201,18 +6803,18 @@
REG_FIFO_ELEMENT_ERROR);
j < ARRAY_SIZE(s_reg_fifo_error_strs);
j++, err_val >>= 1) {
- if (!(err_val & 0x1))
- continue;
- if (err_printed)
+ if (err_val & 0x1) {
+ if (err_printed)
+ results_offset +=
+ sprintf(qed_get_buf_ptr
+ (results_buf,
+ results_offset), ", ");
results_offset +=
- sprintf(qed_get_buf_ptr(results_buf,
- results_offset),
- ", ");
- results_offset +=
- sprintf(qed_get_buf_ptr(results_buf,
- results_offset), "%s",
- s_reg_fifo_error_strs[j]);
- err_printed = true;
+ sprintf(qed_get_buf_ptr
+ (results_buf, results_offset), "%s",
+ s_reg_fifo_error_strs[j]);
+ err_printed = true;
+ }
}
results_offset +=
@@ -6225,31 +6827,140 @@
/* Add 1 for string NULL termination */
*parsed_results_bytes = results_offset + 1;
+
return DBG_STATUS_OK;
}
-enum dbg_status qed_get_reg_fifo_results_buf_size(struct qed_hwfn *p_hwfn,
- u32 *dump_buf,
- u32 num_dumped_dwords,
- u32 *results_buf_size)
+static enum dbg_status qed_parse_igu_fifo_element(struct igu_fifo_element
+ *element, char
+ *results_buf,
+ u32 *results_offset,
+ u32 *parsed_results_bytes)
{
- return qed_parse_reg_fifo_dump(p_hwfn,
- dump_buf,
- num_dumped_dwords,
- NULL, results_buf_size);
-}
+ const struct igu_fifo_addr_data *found_addr = NULL;
+ u8 source, err_type, i, is_cleanup;
+ char parsed_addr_data[32];
+ char parsed_wr_data[256];
+ u32 wr_data, prod_cons;
+ bool is_wr_cmd, is_pf;
+ u16 cmd_addr;
+ u64 dword12;
-enum dbg_status qed_print_reg_fifo_results(struct qed_hwfn *p_hwfn,
- u32 *dump_buf,
- u32 num_dumped_dwords,
- char *results_buf)
-{
- u32 parsed_buf_size;
+ /* Dword12 (dword index 1 and 2) contains bits 32..95 of the
+ * FIFO element.
+ */
+ dword12 = ((u64)element->dword2 << 32) | element->dword1;
+ is_wr_cmd = GET_FIELD(dword12, IGU_FIFO_ELEMENT_DWORD12_IS_WR_CMD);
+ is_pf = GET_FIELD(element->dword0, IGU_FIFO_ELEMENT_DWORD0_IS_PF);
+ cmd_addr = GET_FIELD(element->dword0, IGU_FIFO_ELEMENT_DWORD0_CMD_ADDR);
+ source = GET_FIELD(element->dword0, IGU_FIFO_ELEMENT_DWORD0_SOURCE);
+ err_type = GET_FIELD(element->dword0, IGU_FIFO_ELEMENT_DWORD0_ERR_TYPE);
- return qed_parse_reg_fifo_dump(p_hwfn,
- dump_buf,
- num_dumped_dwords,
- results_buf, &parsed_buf_size);
+ if (source >= ARRAY_SIZE(s_igu_fifo_source_strs))
+ return DBG_STATUS_IGU_FIFO_BAD_DATA;
+ if (err_type >= ARRAY_SIZE(s_igu_fifo_error_strs))
+ return DBG_STATUS_IGU_FIFO_BAD_DATA;
+
+ /* Find address data */
+ for (i = 0; i < ARRAY_SIZE(s_igu_fifo_addr_data) && !found_addr; i++) {
+ const struct igu_fifo_addr_data *curr_addr =
+ &s_igu_fifo_addr_data[i];
+
+ if (cmd_addr >= curr_addr->start_addr && cmd_addr <=
+ curr_addr->end_addr)
+ found_addr = curr_addr;
+ }
+
+ if (!found_addr)
+ return DBG_STATUS_IGU_FIFO_BAD_DATA;
+
+ /* Prepare parsed address data */
+ switch (found_addr->type) {
+ case IGU_ADDR_TYPE_MSIX_MEM:
+ sprintf(parsed_addr_data, " vector_num = 0x%x", cmd_addr / 2);
+ break;
+ case IGU_ADDR_TYPE_WRITE_INT_ACK:
+ case IGU_ADDR_TYPE_WRITE_PROD_UPDATE:
+ sprintf(parsed_addr_data,
+ " SB = 0x%x", cmd_addr - found_addr->start_addr);
+ break;
+ default:
+ parsed_addr_data[0] = '\0';
+ }
+
+ if (!is_wr_cmd) {
+ parsed_wr_data[0] = '\0';
+ goto out;
+ }
+
+ /* Prepare parsed write data */
+ wr_data = GET_FIELD(dword12, IGU_FIFO_ELEMENT_DWORD12_WR_DATA);
+ prod_cons = GET_FIELD(wr_data, IGU_FIFO_WR_DATA_PROD_CONS);
+ is_cleanup = GET_FIELD(wr_data, IGU_FIFO_WR_DATA_CMD_TYPE);
+
+ if (source == IGU_SRC_ATTN) {
+ sprintf(parsed_wr_data, "prod: 0x%x, ", prod_cons);
+ } else {
+ if (is_cleanup) {
+ u8 cleanup_val, cleanup_type;
+
+ cleanup_val =
+ GET_FIELD(wr_data,
+ IGU_FIFO_CLEANUP_WR_DATA_CLEANUP_VAL);
+ cleanup_type =
+ GET_FIELD(wr_data,
+ IGU_FIFO_CLEANUP_WR_DATA_CLEANUP_TYPE);
+
+ sprintf(parsed_wr_data,
+ "cmd_type: cleanup, cleanup_val: %s, cleanup_type : %d, ",
+ cleanup_val ? "set" : "clear",
+ cleanup_type);
+ } else {
+ u8 update_flag, en_dis_int_for_sb, segment;
+ u8 timer_mask;
+
+ update_flag = GET_FIELD(wr_data,
+ IGU_FIFO_WR_DATA_UPDATE_FLAG);
+ en_dis_int_for_sb =
+ GET_FIELD(wr_data,
+ IGU_FIFO_WR_DATA_EN_DIS_INT_FOR_SB);
+ segment = GET_FIELD(wr_data,
+ IGU_FIFO_WR_DATA_SEGMENT);
+ timer_mask = GET_FIELD(wr_data,
+ IGU_FIFO_WR_DATA_TIMER_MASK);
+
+ sprintf(parsed_wr_data,
+ "cmd_type: prod/cons update, prod/cons: 0x%x, update_flag: %s, en_dis_int_for_sb : %s, segment : %s, timer_mask = %d, ",
+ prod_cons,
+ update_flag ? "update" : "nop",
+ en_dis_int_for_sb
+ ? (en_dis_int_for_sb == 1 ? "disable" : "nop")
+ : "enable",
+ segment ? "attn" : "regular",
+ timer_mask);
+ }
+ }
+out:
+ /* Add parsed element to parsed buffer */
+ *results_offset += sprintf(qed_get_buf_ptr(results_buf,
+ *results_offset),
+ "raw: 0x%01x%08x%08x, %s: %d, source : %s, type : %s, cmd_addr : 0x%x(%s%s), %serror: %s\n",
+ element->dword2, element->dword1,
+ element->dword0,
+ is_pf ? "pf" : "vf",
+ GET_FIELD(element->dword0,
+ IGU_FIFO_ELEMENT_DWORD0_FID),
+ s_igu_fifo_source_strs[source],
+ is_wr_cmd ? "wr" : "rd",
+ cmd_addr,
+ (!is_pf && found_addr->vf_desc)
+ ? found_addr->vf_desc
+ : found_addr->desc,
+ parsed_addr_data,
+ parsed_wr_data,
+ s_igu_fifo_error_strs[err_type]);
+
+ return DBG_STATUS_OK;
}
/* Parses an IGU FIFO dump buffer.
@@ -6264,12 +6975,12 @@
char *results_buf,
u32 *parsed_results_bytes)
{
- u32 results_offset = 0, param_num_val, num_section_params, num_elements;
const char *section_name, *param_name, *param_str_val;
+ u32 param_num_val, num_section_params, num_elements;
struct igu_fifo_element *elements;
- char parsed_addr_data[32];
- char parsed_wr_data[256];
- u8 i, j;
+ enum dbg_status status;
+ u32 results_offset = 0;
+ u8 i;
/* Read global_params section */
dump_buf += qed_read_section_hdr(dump_buf,
@@ -6298,118 +7009,12 @@
/* Decode elements */
for (i = 0; i < num_elements; i++) {
- /* dword12 (dword index 1 and 2) contains bits 32..95 of the
- * FIFO element.
- */
- u64 dword12 =
- ((u64)elements[i].dword2 << 32) | elements[i].dword1;
- bool is_wr_cmd = GET_FIELD(dword12,
- IGU_FIFO_ELEMENT_DWORD12_IS_WR_CMD);
- bool is_pf = GET_FIELD(elements[i].dword0,
- IGU_FIFO_ELEMENT_DWORD0_IS_PF);
- u16 cmd_addr = GET_FIELD(elements[i].dword0,
- IGU_FIFO_ELEMENT_DWORD0_CMD_ADDR);
- u8 source = GET_FIELD(elements[i].dword0,
- IGU_FIFO_ELEMENT_DWORD0_SOURCE);
- u8 err_type = GET_FIELD(elements[i].dword0,
- IGU_FIFO_ELEMENT_DWORD0_ERR_TYPE);
- const struct igu_fifo_addr_data *addr_data = NULL;
-
- if (source >= ARRAY_SIZE(s_igu_fifo_source_strs))
- return DBG_STATUS_IGU_FIFO_BAD_DATA;
- if (err_type >= ARRAY_SIZE(s_igu_fifo_error_strs))
- return DBG_STATUS_IGU_FIFO_BAD_DATA;
-
- /* Find address data */
- for (j = 0; j < ARRAY_SIZE(s_igu_fifo_addr_data) && !addr_data;
- j++)
- if (cmd_addr >= s_igu_fifo_addr_data[j].start_addr &&
- cmd_addr <= s_igu_fifo_addr_data[j].end_addr)
- addr_data = &s_igu_fifo_addr_data[j];
- if (!addr_data)
- return DBG_STATUS_IGU_FIFO_BAD_DATA;
-
- /* Prepare parsed address data */
- switch (addr_data->type) {
- case IGU_ADDR_TYPE_MSIX_MEM:
- sprintf(parsed_addr_data,
- " vector_num=0x%x", cmd_addr / 2);
- break;
- case IGU_ADDR_TYPE_WRITE_INT_ACK:
- case IGU_ADDR_TYPE_WRITE_PROD_UPDATE:
- sprintf(parsed_addr_data,
- " SB=0x%x", cmd_addr - addr_data->start_addr);
- break;
- default:
- parsed_addr_data[0] = '\0';
- }
-
- /* Prepare parsed write data */
- if (is_wr_cmd) {
- u32 wr_data = GET_FIELD(dword12,
- IGU_FIFO_ELEMENT_DWORD12_WR_DATA);
- u32 prod_cons = GET_FIELD(wr_data,
- IGU_FIFO_WR_DATA_PROD_CONS);
- u8 is_cleanup = GET_FIELD(wr_data,
- IGU_FIFO_WR_DATA_CMD_TYPE);
-
- if (source == IGU_SRC_ATTN) {
- sprintf(parsed_wr_data,
- "prod: 0x%x, ", prod_cons);
- } else {
- if (is_cleanup) {
- u8 cleanup_val = GET_FIELD(wr_data,
- IGU_FIFO_CLEANUP_WR_DATA_CLEANUP_VAL);
- u8 cleanup_type = GET_FIELD(wr_data,
- IGU_FIFO_CLEANUP_WR_DATA_CLEANUP_TYPE);
-
- sprintf(parsed_wr_data,
- "cmd_type: cleanup, cleanup_val: %s, cleanup_type: %d, ",
- cleanup_val ? "set" : "clear",
- cleanup_type);
- } else {
- u8 update_flag = GET_FIELD(wr_data,
- IGU_FIFO_WR_DATA_UPDATE_FLAG);
- u8 en_dis_int_for_sb =
- GET_FIELD(wr_data,
- IGU_FIFO_WR_DATA_EN_DIS_INT_FOR_SB);
- u8 segment = GET_FIELD(wr_data,
- IGU_FIFO_WR_DATA_SEGMENT);
- u8 timer_mask = GET_FIELD(wr_data,
- IGU_FIFO_WR_DATA_TIMER_MASK);
-
- sprintf(parsed_wr_data,
- "cmd_type: prod/cons update, prod/cons: 0x%x, update_flag: %s, en_dis_int_for_sb: %s, segment: %s, timer_mask=%d, ",
- prod_cons,
- update_flag ? "update" : "nop",
- en_dis_int_for_sb
- ? (en_dis_int_for_sb ==
- 1 ? "disable" : "nop") :
- "enable",
- segment ? "attn" : "regular",
- timer_mask);
- }
- }
- } else {
- parsed_wr_data[0] = '\0';
- }
-
- /* Add parsed element to parsed buffer */
- results_offset +=
- sprintf(qed_get_buf_ptr(results_buf,
- results_offset),
- "raw: 0x%01x%08x%08x, %s: %d, source: %s, type: %s, cmd_addr: 0x%x (%s%s), %serror: %s\n",
- elements[i].dword2, elements[i].dword1,
- elements[i].dword0,
- is_pf ? "pf" : "vf",
- GET_FIELD(elements[i].dword0,
- IGU_FIFO_ELEMENT_DWORD0_FID),
- s_igu_fifo_source_strs[source],
- is_wr_cmd ? "wr" : "rd", cmd_addr,
- (!is_pf && addr_data->vf_desc)
- ? addr_data->vf_desc : addr_data->desc,
- parsed_addr_data, parsed_wr_data,
- s_igu_fifo_error_strs[err_type]);
+ status = qed_parse_igu_fifo_element(&elements[i],
+ results_buf,
+ &results_offset,
+ parsed_results_bytes);
+ if (status != DBG_STATUS_OK)
+ return status;
}
results_offset += sprintf(qed_get_buf_ptr(results_buf,
@@ -6418,33 +7023,10 @@
/* Add 1 for string NULL termination */
*parsed_results_bytes = results_offset + 1;
+
return DBG_STATUS_OK;
}
-enum dbg_status qed_get_igu_fifo_results_buf_size(struct qed_hwfn *p_hwfn,
- u32 *dump_buf,
- u32 num_dumped_dwords,
- u32 *results_buf_size)
-{
- return qed_parse_igu_fifo_dump(p_hwfn,
- dump_buf,
- num_dumped_dwords,
- NULL, results_buf_size);
-}
-
-enum dbg_status qed_print_igu_fifo_results(struct qed_hwfn *p_hwfn,
- u32 *dump_buf,
- u32 num_dumped_dwords,
- char *results_buf)
-{
- u32 parsed_buf_size;
-
- return qed_parse_igu_fifo_dump(p_hwfn,
- dump_buf,
- num_dumped_dwords,
- results_buf, &parsed_buf_size);
-}
-
static enum dbg_status
qed_parse_protection_override_dump(struct qed_hwfn *p_hwfn,
u32 *dump_buf,
@@ -6452,9 +7034,10 @@
char *results_buf,
u32 *parsed_results_bytes)
{
- u32 results_offset = 0, param_num_val, num_section_params, num_elements;
const char *section_name, *param_name, *param_str_val;
+ u32 param_num_val, num_section_params, num_elements;
struct protection_override_element *elements;
+ u32 results_offset = 0;
u8 i;
/* Read global_params section */
@@ -6477,7 +7060,7 @@
¶m_name, ¶m_str_val, ¶m_num_val);
if (strcmp(param_name, "size"))
return DBG_STATUS_PROTECTION_OVERRIDE_BAD_DATA;
- if (param_num_val % PROTECTION_OVERRIDE_ELEMENT_DWORDS != 0)
+ if (param_num_val % PROTECTION_OVERRIDE_ELEMENT_DWORDS)
return DBG_STATUS_PROTECTION_OVERRIDE_BAD_DATA;
num_elements = param_num_val / PROTECTION_OVERRIDE_ELEMENT_DWORDS;
elements = (struct protection_override_element *)dump_buf;
@@ -6486,7 +7069,7 @@
for (i = 0; i < num_elements; i++) {
u32 address = GET_FIELD(elements[i].data,
PROTECTION_OVERRIDE_ELEMENT_ADDRESS) *
- PROTECTION_OVERRIDE_ELEMENT_ADDR_FACTOR;
+ PROTECTION_OVERRIDE_ELEMENT_ADDR_FACTOR;
results_offset +=
sprintf(qed_get_buf_ptr(results_buf,
@@ -6512,9 +7095,222 @@
/* Add 1 for string NULL termination */
*parsed_results_bytes = results_offset + 1;
+
return DBG_STATUS_OK;
}
+/* Parses a FW Asserts dump buffer.
+ * If result_buf is not NULL, the FW Asserts results are printed to it.
+ * In any case, the required results buffer size is assigned to
+ * parsed_results_bytes.
+ * The parsing status is returned.
+ */
+static enum dbg_status qed_parse_fw_asserts_dump(struct qed_hwfn *p_hwfn,
+ u32 *dump_buf,
+ u32 num_dumped_dwords,
+ char *results_buf,
+ u32 *parsed_results_bytes)
+{
+ u32 num_section_params, param_num_val, i, results_offset = 0;
+ const char *param_name, *param_str_val, *section_name;
+ bool last_section_found = false;
+
+ *parsed_results_bytes = 0;
+
+ /* Read global_params section */
+ dump_buf += qed_read_section_hdr(dump_buf,
+ §ion_name, &num_section_params);
+ if (strcmp(section_name, "global_params"))
+ return DBG_STATUS_FW_ASSERTS_PARSE_FAILED;
+
+ /* Print global params */
+ dump_buf += qed_print_section_params(dump_buf,
+ num_section_params,
+ results_buf, &results_offset);
+
+ while (!last_section_found) {
+ dump_buf += qed_read_section_hdr(dump_buf,
+ §ion_name,
+ &num_section_params);
+ if (!strcmp(section_name, "fw_asserts")) {
+ /* Extract params */
+ const char *storm_letter = NULL;
+ u32 storm_dump_size = 0;
+
+ for (i = 0; i < num_section_params; i++) {
+ dump_buf += qed_read_param(dump_buf,
+ ¶m_name,
+ ¶m_str_val,
+ ¶m_num_val);
+ if (!strcmp(param_name, "storm"))
+ storm_letter = param_str_val;
+ else if (!strcmp(param_name, "size"))
+ storm_dump_size = param_num_val;
+ else
+ return
+ DBG_STATUS_FW_ASSERTS_PARSE_FAILED;
+ }
+
+ if (!storm_letter || !storm_dump_size)
+ return DBG_STATUS_FW_ASSERTS_PARSE_FAILED;
+
+ /* Print data */
+ results_offset +=
+ sprintf(qed_get_buf_ptr(results_buf,
+ results_offset),
+ "\n%sSTORM_ASSERT: size=%d\n",
+ storm_letter, storm_dump_size);
+ for (i = 0; i < storm_dump_size; i++, dump_buf++)
+ results_offset +=
+ sprintf(qed_get_buf_ptr(results_buf,
+ results_offset),
+ "%08x\n", *dump_buf);
+ } else if (!strcmp(section_name, "last")) {
+ last_section_found = true;
+ } else {
+ return DBG_STATUS_FW_ASSERTS_PARSE_FAILED;
+ }
+ }
+
+ /* Add 1 for string NULL termination */
+ *parsed_results_bytes = results_offset + 1;
+
+ return DBG_STATUS_OK;
+}
+
+/***************************** Public Functions *******************************/
+
+enum dbg_status qed_dbg_user_set_bin_ptr(const u8 * const bin_ptr)
+{
+ struct bin_buffer_hdr *buf_array = (struct bin_buffer_hdr *)bin_ptr;
+ u8 buf_id;
+
+ /* Convert binary data to debug arrays */
+ for (buf_id = 0; buf_id < MAX_BIN_DBG_BUFFER_TYPE; buf_id++) {
+ s_user_dbg_arrays[buf_id].ptr =
+ (u32 *)(bin_ptr + buf_array[buf_id].offset);
+ s_user_dbg_arrays[buf_id].size_in_dwords =
+ BYTES_TO_DWORDS(buf_array[buf_id].length);
+ }
+
+ return DBG_STATUS_OK;
+}
+
+const char *qed_dbg_get_status_str(enum dbg_status status)
+{
+ return (status <
+ MAX_DBG_STATUS) ? s_status_str[status] : "Invalid debug status";
+}
+
+enum dbg_status qed_get_idle_chk_results_buf_size(struct qed_hwfn *p_hwfn,
+ u32 *dump_buf,
+ u32 num_dumped_dwords,
+ u32 *results_buf_size)
+{
+ u32 num_errors, num_warnings;
+
+ return qed_parse_idle_chk_dump(p_hwfn,
+ dump_buf,
+ num_dumped_dwords,
+ NULL,
+ results_buf_size,
+ &num_errors, &num_warnings);
+}
+
+enum dbg_status qed_print_idle_chk_results(struct qed_hwfn *p_hwfn,
+ u32 *dump_buf,
+ u32 num_dumped_dwords,
+ char *results_buf,
+ u32 *num_errors, u32 *num_warnings)
+{
+ u32 parsed_buf_size;
+
+ return qed_parse_idle_chk_dump(p_hwfn,
+ dump_buf,
+ num_dumped_dwords,
+ results_buf,
+ &parsed_buf_size,
+ num_errors, num_warnings);
+}
+
+void qed_dbg_mcp_trace_set_meta_data(u32 *data, u32 size)
+{
+ s_mcp_trace_meta.ptr = data;
+ s_mcp_trace_meta.size_in_dwords = size;
+}
+
+enum dbg_status qed_get_mcp_trace_results_buf_size(struct qed_hwfn *p_hwfn,
+ u32 *dump_buf,
+ u32 num_dumped_dwords,
+ u32 *results_buf_size)
+{
+ return qed_parse_mcp_trace_dump(p_hwfn,
+ dump_buf,
+ num_dumped_dwords,
+ NULL, results_buf_size);
+}
+
+enum dbg_status qed_print_mcp_trace_results(struct qed_hwfn *p_hwfn,
+ u32 *dump_buf,
+ u32 num_dumped_dwords,
+ char *results_buf)
+{
+ u32 parsed_buf_size;
+
+ return qed_parse_mcp_trace_dump(p_hwfn,
+ dump_buf,
+ num_dumped_dwords,
+ results_buf, &parsed_buf_size);
+}
+
+enum dbg_status qed_get_reg_fifo_results_buf_size(struct qed_hwfn *p_hwfn,
+ u32 *dump_buf,
+ u32 num_dumped_dwords,
+ u32 *results_buf_size)
+{
+ return qed_parse_reg_fifo_dump(p_hwfn,
+ dump_buf,
+ num_dumped_dwords,
+ NULL, results_buf_size);
+}
+
+enum dbg_status qed_print_reg_fifo_results(struct qed_hwfn *p_hwfn,
+ u32 *dump_buf,
+ u32 num_dumped_dwords,
+ char *results_buf)
+{
+ u32 parsed_buf_size;
+
+ return qed_parse_reg_fifo_dump(p_hwfn,
+ dump_buf,
+ num_dumped_dwords,
+ results_buf, &parsed_buf_size);
+}
+
+enum dbg_status qed_get_igu_fifo_results_buf_size(struct qed_hwfn *p_hwfn,
+ u32 *dump_buf,
+ u32 num_dumped_dwords,
+ u32 *results_buf_size)
+{
+ return qed_parse_igu_fifo_dump(p_hwfn,
+ dump_buf,
+ num_dumped_dwords,
+ NULL, results_buf_size);
+}
+
+enum dbg_status qed_print_igu_fifo_results(struct qed_hwfn *p_hwfn,
+ u32 *dump_buf,
+ u32 num_dumped_dwords,
+ char *results_buf)
+{
+ u32 parsed_buf_size;
+
+ return qed_parse_igu_fifo_dump(p_hwfn,
+ dump_buf,
+ num_dumped_dwords,
+ results_buf, &parsed_buf_size);
+}
+
enum dbg_status
qed_get_protection_override_results_buf_size(struct qed_hwfn *p_hwfn,
u32 *dump_buf,
@@ -6541,82 +7337,6 @@
&parsed_buf_size);
}
-/* Parses a FW Asserts dump buffer.
- * If result_buf is not NULL, the FW Asserts results are printed to it.
- * In any case, the required results buffer size is assigned to
- * parsed_results_bytes.
- * The parsing status is returned.
- */
-static enum dbg_status qed_parse_fw_asserts_dump(struct qed_hwfn *p_hwfn,
- u32 *dump_buf,
- u32 num_dumped_dwords,
- char *results_buf,
- u32 *parsed_results_bytes)
-{
- u32 results_offset = 0, num_section_params, param_num_val, i;
- const char *param_name, *param_str_val, *section_name;
- bool last_section_found = false;
-
- *parsed_results_bytes = 0;
-
- /* Read global_params section */
- dump_buf += qed_read_section_hdr(dump_buf,
- §ion_name, &num_section_params);
- if (strcmp(section_name, "global_params"))
- return DBG_STATUS_FW_ASSERTS_PARSE_FAILED;
-
- /* Print global params */
- dump_buf += qed_print_section_params(dump_buf,
- num_section_params,
- results_buf, &results_offset);
- while (!last_section_found) {
- const char *storm_letter = NULL;
- u32 storm_dump_size = 0;
-
- dump_buf += qed_read_section_hdr(dump_buf,
- §ion_name,
- &num_section_params);
- if (!strcmp(section_name, "last")) {
- last_section_found = true;
- continue;
- } else if (strcmp(section_name, "fw_asserts")) {
- return DBG_STATUS_FW_ASSERTS_PARSE_FAILED;
- }
-
- /* Extract params */
- for (i = 0; i < num_section_params; i++) {
- dump_buf += qed_read_param(dump_buf,
- ¶m_name,
- ¶m_str_val,
- ¶m_num_val);
- if (!strcmp(param_name, "storm"))
- storm_letter = param_str_val;
- else if (!strcmp(param_name, "size"))
- storm_dump_size = param_num_val;
- else
- return DBG_STATUS_FW_ASSERTS_PARSE_FAILED;
- }
-
- if (!storm_letter || !storm_dump_size)
- return DBG_STATUS_FW_ASSERTS_PARSE_FAILED;
-
- /* Print data */
- results_offset += sprintf(qed_get_buf_ptr(results_buf,
- results_offset),
- "\n%sSTORM_ASSERT: size=%d\n",
- storm_letter, storm_dump_size);
- for (i = 0; i < storm_dump_size; i++, dump_buf++)
- results_offset +=
- sprintf(qed_get_buf_ptr(results_buf,
- results_offset),
- "%08x\n", *dump_buf);
- }
-
- /* Add 1 for string NULL termination */
- *parsed_results_bytes = results_offset + 1;
- return DBG_STATUS_OK;
-}
-
enum dbg_status qed_get_fw_asserts_results_buf_size(struct qed_hwfn *p_hwfn,
u32 *dump_buf,
u32 num_dumped_dwords,
@@ -6641,6 +7361,88 @@
results_buf, &parsed_buf_size);
}
+enum dbg_status qed_dbg_parse_attn(struct qed_hwfn *p_hwfn,
+ struct dbg_attn_block_result *results)
+{
+ struct user_dbg_array *block_attn, *pstrings;
+ const u32 *block_attn_name_offsets;
+ enum dbg_attn_type attn_type;
+ const char *block_name;
+ u8 num_regs, i, j;
+
+ num_regs = GET_FIELD(results->data, DBG_ATTN_BLOCK_RESULT_NUM_REGS);
+ attn_type = (enum dbg_attn_type)
+ GET_FIELD(results->data,
+ DBG_ATTN_BLOCK_RESULT_ATTN_TYPE);
+ block_name = s_block_info_arr[results->block_id].name;
+
+ if (!s_user_dbg_arrays[BIN_BUF_DBG_ATTN_INDEXES].ptr ||
+ !s_user_dbg_arrays[BIN_BUF_DBG_ATTN_NAME_OFFSETS].ptr ||
+ !s_user_dbg_arrays[BIN_BUF_DBG_PARSING_STRINGS].ptr)
+ return DBG_STATUS_DBG_ARRAY_NOT_SET;
+
+ block_attn = &s_user_dbg_arrays[BIN_BUF_DBG_ATTN_NAME_OFFSETS];
+ block_attn_name_offsets = &block_attn->ptr[results->names_offset];
+
+ /* Go over registers with a non-zero attention status */
+ for (i = 0; i < num_regs; i++) {
+ struct dbg_attn_reg_result *reg_result;
+ struct dbg_attn_bit_mapping *mapping;
+ u8 num_reg_attn, bit_idx = 0;
+
+ reg_result = &results->reg_results[i];
+ num_reg_attn = GET_FIELD(reg_result->data,
+ DBG_ATTN_REG_RESULT_NUM_REG_ATTN);
+ block_attn = &s_user_dbg_arrays[BIN_BUF_DBG_ATTN_INDEXES];
+ mapping = &((struct dbg_attn_bit_mapping *)
+ block_attn->ptr)[reg_result->block_attn_offset];
+
+ pstrings = &s_user_dbg_arrays[BIN_BUF_DBG_PARSING_STRINGS];
+
+ /* Go over attention status bits */
+ for (j = 0; j < num_reg_attn; j++) {
+ u16 attn_idx_val = GET_FIELD(mapping[j].data,
+ DBG_ATTN_BIT_MAPPING_VAL);
+ const char *attn_name, *attn_type_str, *masked_str;
+ u32 name_offset, sts_addr;
+
+ /* Check if bit mask should be advanced (due to unused
+ * bits).
+ */
+ if (GET_FIELD(mapping[j].data,
+ DBG_ATTN_BIT_MAPPING_IS_UNUSED_BIT_CNT)) {
+ bit_idx += (u8)attn_idx_val;
+ continue;
+ }
+
+ /* Check current bit index */
+ if (!(reg_result->sts_val & BIT(bit_idx))) {
+ bit_idx++;
+ continue;
+ }
+
+ /* Find attention name */
+ name_offset = block_attn_name_offsets[attn_idx_val];
+ attn_name = &((const char *)
+ pstrings->ptr)[name_offset];
+ attn_type_str = attn_type == ATTN_TYPE_INTERRUPT ?
+ "Interrupt" : "Parity";
+ masked_str = reg_result->mask_val & BIT(bit_idx) ?
+ " [masked]" : "";
+ sts_addr = GET_FIELD(reg_result->data,
+ DBG_ATTN_REG_RESULT_STS_ADDRESS);
+ DP_NOTICE(p_hwfn,
+ "%s (%s) : %s [address 0x%08x, bit %d]%s\n",
+ block_name, attn_type_str, attn_name,
+ sts_addr, bit_idx, masked_str);
+
+ bit_idx++;
+ }
+ }
+
+ return DBG_STATUS_OK;
+}
+
/* Wrapper for unifying the idle_chk and mcp_trace api */
static enum dbg_status
qed_print_idle_chk_results_wrapper(struct qed_hwfn *p_hwfn,
diff --git a/drivers/net/ethernet/qlogic/qed/qed_debug.h b/drivers/net/ethernet/qlogic/qed/qed_debug.h
index f872d73..ea1cc8e 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_debug.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_debug.h
@@ -20,6 +20,9 @@
DBG_FEATURE_NUM
};
+/* Forward Declaration */
+struct qed_dev;
+
int qed_dbg_grc(struct qed_dev *cdev, void *buffer, u32 *num_dumped_bytes);
int qed_dbg_grc_size(struct qed_dev *cdev);
int qed_dbg_idle_chk(struct qed_dev *cdev, void *buffer,
diff --git a/drivers/net/ethernet/qlogic/qed/qed_dev.c b/drivers/net/ethernet/qlogic/qed/qed_dev.c
index 463927f..49667ad 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_dev.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_dev.c
@@ -62,19 +62,13 @@
#include "qed_sp.h"
#include "qed_sriov.h"
#include "qed_vf.h"
-#include "qed_roce.h"
+#include "qed_rdma.h"
static DEFINE_SPINLOCK(qm_lock);
#define QED_MIN_DPIS (4)
#define QED_MIN_PWM_REGION (QED_WID_SIZE * QED_MIN_DPIS)
-/* API common to all protocols */
-enum BAR_ID {
- BAR_ID_0, /* used for GRC */
- BAR_ID_1 /* Used for doorbells */
-};
-
static u32 qed_hw_bar_size(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt, enum BAR_ID bar_id)
{
@@ -83,7 +77,7 @@
u32 val;
if (IS_VF(p_hwfn->cdev))
- return 1 << 17;
+ return qed_vf_hw_bar_size(p_hwfn, bar_id);
val = qed_rd(p_hwfn, p_ptt, bar_reg);
if (val)
@@ -154,13 +148,17 @@
{
int i;
- if (IS_VF(cdev))
+ if (IS_VF(cdev)) {
+ for_each_hwfn(cdev, i)
+ qed_l2_free(&cdev->hwfns[i]);
return;
+ }
kfree(cdev->fw_data);
cdev->fw_data = NULL;
kfree(cdev->reset_stats);
+ cdev->reset_stats = NULL;
for_each_hwfn(cdev, i) {
struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
@@ -168,20 +166,21 @@
qed_cxt_mngr_free(p_hwfn);
qed_qm_info_free(p_hwfn);
qed_spq_free(p_hwfn);
- qed_eq_free(p_hwfn, p_hwfn->p_eq);
- qed_consq_free(p_hwfn, p_hwfn->p_consq);
+ qed_eq_free(p_hwfn);
+ qed_consq_free(p_hwfn);
qed_int_free(p_hwfn);
#ifdef CONFIG_QED_LL2
- qed_ll2_free(p_hwfn, p_hwfn->p_ll2_info);
+ qed_ll2_free(p_hwfn);
#endif
if (p_hwfn->hw_info.personality == QED_PCI_FCOE)
- qed_fcoe_free(p_hwfn, p_hwfn->p_fcoe_info);
+ qed_fcoe_free(p_hwfn);
if (p_hwfn->hw_info.personality == QED_PCI_ISCSI) {
- qed_iscsi_free(p_hwfn, p_hwfn->p_iscsi_info);
- qed_ooo_free(p_hwfn, p_hwfn->p_ooo_info);
+ qed_iscsi_free(p_hwfn);
+ qed_ooo_free(p_hwfn);
}
qed_iov_free(p_hwfn);
+ qed_l2_free(p_hwfn);
qed_dmae_info_free(p_hwfn);
qed_dcbx_info_free(p_hwfn);
}
@@ -299,7 +298,7 @@
qm_info->vport_wfq_en = 1;
/* TC config is different for AH 4 port */
- four_port = p_hwfn->cdev->num_ports_in_engines == MAX_NUM_PORTS_K2;
+ four_port = p_hwfn->cdev->num_ports_in_engine == MAX_NUM_PORTS_K2;
/* in AH 4 port we have fewer TCs per port */
qm_info->max_phys_tcs_per_port = four_port ? NUM_PHYS_TCS_4PORT_K2 :
@@ -328,7 +327,7 @@
static void qed_init_qm_port_params(struct qed_hwfn *p_hwfn)
{
/* Initialize qm port parameters */
- u8 i, active_phys_tcs, num_ports = p_hwfn->cdev->num_ports_in_engines;
+ u8 i, active_phys_tcs, num_ports = p_hwfn->cdev->num_ports_in_engine;
/* indicate how ooo and high pri traffic is dealt with */
active_phys_tcs = num_ports == MAX_NUM_PORTS_K2 ?
@@ -692,7 +691,7 @@
qm_info->num_pf_rls, qed_get_pq_flags(p_hwfn));
/* port table */
- for (i = 0; i < p_hwfn->cdev->num_ports_in_engines; i++) {
+ for (i = 0; i < p_hwfn->cdev->num_ports_in_engine; i++) {
port = &(qm_info->qm_port_params[i]);
DP_VERBOSE(p_hwfn,
NETIF_MSG_HW,
@@ -822,7 +821,7 @@
goto alloc_err;
qm_info->qm_port_params = kzalloc(sizeof(*qm_info->qm_port_params) *
- p_hwfn->cdev->num_ports_in_engines,
+ p_hwfn->cdev->num_ports_in_engine,
GFP_KERNEL);
if (!qm_info->qm_port_params)
goto alloc_err;
@@ -843,20 +842,18 @@
int qed_resc_alloc(struct qed_dev *cdev)
{
- struct qed_iscsi_info *p_iscsi_info;
- struct qed_fcoe_info *p_fcoe_info;
- struct qed_ooo_info *p_ooo_info;
-#ifdef CONFIG_QED_LL2
- struct qed_ll2_info *p_ll2_info;
-#endif
u32 rdma_tasks, excess_tasks;
- struct qed_consq *p_consq;
- struct qed_eq *p_eq;
u32 line_count;
int i, rc = 0;
- if (IS_VF(cdev))
+ if (IS_VF(cdev)) {
+ for_each_hwfn(cdev, i) {
+ rc = qed_l2_alloc(&cdev->hwfns[i]);
+ if (rc)
+ return rc;
+ }
return rc;
+ }
cdev->fw_data = kzalloc(sizeof(*cdev->fw_data), GFP_KERNEL);
if (!cdev->fw_data)
@@ -956,45 +953,42 @@
DP_ERR(p_hwfn,
"Cannot allocate 0x%x EQ elements. The maximum of a u16 chain is 0x%x\n",
n_eqes, 0xFFFF);
- rc = -EINVAL;
- goto alloc_err;
+ goto alloc_no_mem;
}
- p_eq = qed_eq_alloc(p_hwfn, (u16) n_eqes);
- if (!p_eq)
- goto alloc_no_mem;
- p_hwfn->p_eq = p_eq;
+ rc = qed_eq_alloc(p_hwfn, (u16) n_eqes);
+ if (rc)
+ goto alloc_err;
- p_consq = qed_consq_alloc(p_hwfn);
- if (!p_consq)
- goto alloc_no_mem;
- p_hwfn->p_consq = p_consq;
+ rc = qed_consq_alloc(p_hwfn);
+ if (rc)
+ goto alloc_err;
+
+ rc = qed_l2_alloc(p_hwfn);
+ if (rc)
+ goto alloc_err;
#ifdef CONFIG_QED_LL2
if (p_hwfn->using_ll2) {
- p_ll2_info = qed_ll2_alloc(p_hwfn);
- if (!p_ll2_info)
- goto alloc_no_mem;
- p_hwfn->p_ll2_info = p_ll2_info;
+ rc = qed_ll2_alloc(p_hwfn);
+ if (rc)
+ goto alloc_err;
}
#endif
if (p_hwfn->hw_info.personality == QED_PCI_FCOE) {
- p_fcoe_info = qed_fcoe_alloc(p_hwfn);
- if (!p_fcoe_info)
- goto alloc_no_mem;
- p_hwfn->p_fcoe_info = p_fcoe_info;
+ rc = qed_fcoe_alloc(p_hwfn);
+ if (rc)
+ goto alloc_err;
}
if (p_hwfn->hw_info.personality == QED_PCI_ISCSI) {
- p_iscsi_info = qed_iscsi_alloc(p_hwfn);
- if (!p_iscsi_info)
- goto alloc_no_mem;
- p_hwfn->p_iscsi_info = p_iscsi_info;
- p_ooo_info = qed_ooo_alloc(p_hwfn);
- if (!p_ooo_info)
- goto alloc_no_mem;
- p_hwfn->p_ooo_info = p_ooo_info;
+ rc = qed_iscsi_alloc(p_hwfn);
+ if (rc)
+ goto alloc_err;
+ rc = qed_ooo_alloc(p_hwfn);
+ if (rc)
+ goto alloc_err;
}
/* DMA info initialization */
@@ -1025,16 +1019,19 @@
{
int i;
- if (IS_VF(cdev))
+ if (IS_VF(cdev)) {
+ for_each_hwfn(cdev, i)
+ qed_l2_setup(&cdev->hwfns[i]);
return;
+ }
for_each_hwfn(cdev, i) {
struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
qed_cxt_mngr_setup(p_hwfn);
qed_spq_setup(p_hwfn);
- qed_eq_setup(p_hwfn, p_hwfn->p_eq);
- qed_consq_setup(p_hwfn, p_hwfn->p_consq);
+ qed_eq_setup(p_hwfn);
+ qed_consq_setup(p_hwfn);
/* Read shadow of current MFW mailbox */
qed_mcp_read_mb(p_hwfn, p_hwfn->p_main_ptt);
@@ -1044,17 +1041,18 @@
qed_int_setup(p_hwfn, p_hwfn->p_main_ptt);
- qed_iov_setup(p_hwfn, p_hwfn->p_main_ptt);
+ qed_l2_setup(p_hwfn);
+ qed_iov_setup(p_hwfn);
#ifdef CONFIG_QED_LL2
if (p_hwfn->using_ll2)
- qed_ll2_setup(p_hwfn, p_hwfn->p_ll2_info);
+ qed_ll2_setup(p_hwfn);
#endif
if (p_hwfn->hw_info.personality == QED_PCI_FCOE)
- qed_fcoe_setup(p_hwfn, p_hwfn->p_fcoe_info);
+ qed_fcoe_setup(p_hwfn);
if (p_hwfn->hw_info.personality == QED_PCI_ISCSI) {
- qed_iscsi_setup(p_hwfn, p_hwfn->p_iscsi_info);
- qed_ooo_setup(p_hwfn, p_hwfn->p_ooo_info);
+ qed_iscsi_setup(p_hwfn);
+ qed_ooo_setup(p_hwfn);
}
}
}
@@ -1122,7 +1120,7 @@
return -EINVAL;
}
- switch (p_hwfn->cdev->num_ports_in_engines) {
+ switch (p_hwfn->cdev->num_ports_in_engine) {
case 1:
hw_mode |= 1 << MODE_PORTS_PER_ENG_1;
break;
@@ -1134,7 +1132,7 @@
break;
default:
DP_NOTICE(p_hwfn, "num_ports_in_engine = %d not supported\n",
- p_hwfn->cdev->num_ports_in_engines);
+ p_hwfn->cdev->num_ports_in_engine);
return -EINVAL;
}
@@ -1169,7 +1167,7 @@
static void qed_init_cau_rt_data(struct qed_dev *cdev)
{
u32 offset = CAU_REG_SB_VAR_MEMORY_RT_OFFSET;
- int i, sb_id;
+ int i, igu_sb_id;
for_each_hwfn(cdev, i) {
struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
@@ -1179,15 +1177,17 @@
p_igu_info = p_hwfn->hw_info.p_igu_info;
- for (sb_id = 0; sb_id < QED_MAPPING_MEMORY_SIZE(cdev);
- sb_id++) {
- p_block = &p_igu_info->igu_map.igu_blocks[sb_id];
+ for (igu_sb_id = 0;
+ igu_sb_id < QED_MAPPING_MEMORY_SIZE(cdev); igu_sb_id++) {
+ p_block = &p_igu_info->entry[igu_sb_id];
+
if (!p_block->is_pf)
continue;
qed_init_cau_sb_entry(p_hwfn, &sb_entry,
p_block->function_id, 0, 0);
- STORE_RT_REG_AGG(p_hwfn, offset + sb_id * 2, sb_entry);
+ STORE_RT_REG_AGG(p_hwfn, offset + igu_sb_id * 2,
+ sb_entry);
}
}
}
@@ -1241,6 +1241,10 @@
L1_CACHE_BYTES, wr_mbs);
STORE_RT_REG(p_hwfn, PGLUE_REG_B_CACHE_LINE_SIZE_RT_OFFSET, val);
+ if (val > 0) {
+ STORE_RT_REG(p_hwfn, PSWRQ2_REG_DRAM_ALIGN_WR_RT_OFFSET, val);
+ STORE_RT_REG(p_hwfn, PSWRQ2_REG_DRAM_ALIGN_RD_RT_OFFSET, val);
+ }
}
static int qed_hw_init_common(struct qed_hwfn *p_hwfn,
@@ -1267,7 +1271,7 @@
}
memset(¶ms, 0, sizeof(params));
- params.max_ports_per_engine = p_hwfn->cdev->num_ports_in_engines;
+ params.max_ports_per_engine = p_hwfn->cdev->num_ports_in_engine;
params.max_phys_tcs_per_port = qm_info->max_phys_tcs_per_port;
params.pf_rl_en = qm_info->pf_rl_en;
params.pf_wfq_en = qm_info->pf_wfq_en;
@@ -1447,8 +1451,15 @@
static int qed_hw_init_port(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt, int hw_mode)
{
- return qed_init_run(p_hwfn, p_ptt, PHASE_PORT,
- p_hwfn->port_id, hw_mode);
+ int rc = 0;
+
+ rc = qed_init_run(p_hwfn, p_ptt, PHASE_PORT, p_hwfn->port_id, hw_mode);
+ if (rc)
+ return rc;
+
+ qed_wr(p_hwfn, p_ptt, PGLUE_B_REG_MASTER_WRITE_PAD_ENABLE, 0);
+
+ return 0;
}
static int qed_hw_init_pf(struct qed_hwfn *p_hwfn,
@@ -1527,7 +1538,8 @@
qed_int_igu_enable(p_hwfn, p_ptt, int_mode);
/* send function start command */
- rc = qed_sp_pf_start(p_hwfn, p_tunn, p_hwfn->cdev->mf_mode,
+ rc = qed_sp_pf_start(p_hwfn, p_ptt, p_tunn,
+ p_hwfn->cdev->mf_mode,
allow_npar_tx_switch);
if (rc) {
DP_NOTICE(p_hwfn, "Function start ramrod failed\n");
@@ -1711,6 +1723,11 @@
return mfw_rc;
}
+ /* Check if there is a DID mismatch between nvm-cfg/efuse */
+ if (param & FW_MB_PARAM_LOAD_DONE_DID_EFUSE_ERROR)
+ DP_NOTICE(p_hwfn,
+ "warning: device configuration is not supported on this board type. The device may not function as expected.\n");
+
/* send DCBX attention request command */
DP_VERBOSE(p_hwfn,
QED_MSG_DCB,
@@ -1956,6 +1973,13 @@
if (!p_ptt)
return -EAGAIN;
+ /* If roce info is allocated it means roce is initialized and should
+ * be enabled in searcher.
+ */
+ if (p_hwfn->p_rdma_info &&
+ p_hwfn->b_rdma_enabled_in_prs)
+ qed_wr(p_hwfn, p_ptt, p_hwfn->rdma_prs_search_reg, 0x1);
+
/* Re-open incoming traffic */
qed_wr(p_hwfn, p_ptt, NIG_REG_RX_LLH_BRB_GATE_DNTFWD_PERPF, 0x0);
qed_ptt_release(p_hwfn, p_ptt);
@@ -1968,6 +1992,7 @@
{
qed_ptt_pool_free(p_hwfn);
kfree(p_hwfn->hw_info.p_igu_info);
+ p_hwfn->hw_info.p_igu_info = NULL;
}
/* Setup bar access */
@@ -2025,9 +2050,12 @@
static void qed_hw_set_feat(struct qed_hwfn *p_hwfn)
{
u32 *feat_num = p_hwfn->hw_info.feat_num;
- struct qed_sb_cnt_info sb_cnt_info;
+ struct qed_sb_cnt_info sb_cnt;
u32 non_l2_sbs = 0;
+ memset(&sb_cnt, 0, sizeof(sb_cnt));
+ qed_int_get_num_sbs(p_hwfn, &sb_cnt);
+
if (IS_ENABLED(CONFIG_QED_RDMA) &&
p_hwfn->hw_info.personality == QED_PCI_ETH_ROCE) {
/* Roce CNQ each requires: 1 status block + 1 CNQ. We divide
@@ -2035,7 +2063,7 @@
* consideration as to how many l2 queues / cnqs we have.
*/
feat_num[QED_RDMA_CNQ] =
- min_t(u32, RESC_NUM(p_hwfn, QED_SB) / 2,
+ min_t(u32, sb_cnt.cnt / 2,
RESC_NUM(p_hwfn, QED_RDMA_CNQ_RAM));
non_l2_sbs = feat_num[QED_RDMA_CNQ];
@@ -2044,32 +2072,35 @@
if (p_hwfn->hw_info.personality == QED_PCI_ETH_ROCE ||
p_hwfn->hw_info.personality == QED_PCI_ETH) {
/* Start by allocating VF queues, then PF's */
- memset(&sb_cnt_info, 0, sizeof(sb_cnt_info));
- qed_int_get_num_sbs(p_hwfn, &sb_cnt_info);
feat_num[QED_VF_L2_QUE] = min_t(u32,
RESC_NUM(p_hwfn, QED_L2_QUEUE),
- sb_cnt_info.sb_iov_cnt);
+ sb_cnt.iov_cnt);
feat_num[QED_PF_L2_QUE] = min_t(u32,
- RESC_NUM(p_hwfn, QED_SB) -
- non_l2_sbs,
+ sb_cnt.cnt - non_l2_sbs,
RESC_NUM(p_hwfn,
QED_L2_QUEUE) -
FEAT_NUM(p_hwfn,
QED_VF_L2_QUE));
}
+ if (p_hwfn->hw_info.personality == QED_PCI_FCOE)
+ feat_num[QED_FCOE_CQ] = min_t(u32, sb_cnt.cnt,
+ RESC_NUM(p_hwfn,
+ QED_CMDQS_CQS));
+
if (p_hwfn->hw_info.personality == QED_PCI_ISCSI)
- feat_num[QED_ISCSI_CQ] = min_t(u32, RESC_NUM(p_hwfn, QED_SB),
+ feat_num[QED_ISCSI_CQ] = min_t(u32, sb_cnt.cnt,
RESC_NUM(p_hwfn,
QED_CMDQS_CQS));
DP_VERBOSE(p_hwfn,
NETIF_MSG_PROBE,
- "#PF_L2_QUEUES=%d VF_L2_QUEUES=%d #ROCE_CNQ=%d ISCSI_CQ=%d #SBS=%d\n",
+ "#PF_L2_QUEUES=%d VF_L2_QUEUES=%d #ROCE_CNQ=%d FCOE_CQ=%d ISCSI_CQ=%d #SBS=%d\n",
(int)FEAT_NUM(p_hwfn, QED_PF_L2_QUE),
(int)FEAT_NUM(p_hwfn, QED_VF_L2_QUE),
(int)FEAT_NUM(p_hwfn, QED_RDMA_CNQ),
+ (int)FEAT_NUM(p_hwfn, QED_FCOE_CQ),
(int)FEAT_NUM(p_hwfn, QED_ISCSI_CQ),
- RESC_NUM(p_hwfn, QED_SB));
+ (int)sb_cnt.cnt);
}
const char *qed_hw_get_resc_name(enum qed_resources res_id)
@@ -2188,7 +2219,6 @@
{
u8 num_funcs = p_hwfn->num_funcs_on_engine;
bool b_ah = QED_IS_AH(p_hwfn->cdev);
- struct qed_sb_cnt_info sb_cnt_info;
switch (res_id) {
case QED_L2_QUEUE:
@@ -2240,9 +2270,10 @@
*p_resc_num = 1;
break;
case QED_SB:
- memset(&sb_cnt_info, 0, sizeof(sb_cnt_info));
- qed_int_get_num_sbs(p_hwfn, &sb_cnt_info);
- *p_resc_num = sb_cnt_info.sb_cnt;
+ /* Since we want its value to reflect whether MFW supports
+ * the new scheme, have a default of 0.
+ */
+ *p_resc_num = 0;
break;
default:
return -EINVAL;
@@ -2252,7 +2283,7 @@
case QED_BDQ:
if (!*p_resc_num)
*p_resc_start = 0;
- else if (p_hwfn->cdev->num_ports_in_engines == 4)
+ else if (p_hwfn->cdev->num_ports_in_engine == 4)
*p_resc_start = p_hwfn->port_id;
else if (p_hwfn->hw_info.personality == QED_PCI_ISCSI)
*p_resc_start = p_hwfn->port_id;
@@ -2311,11 +2342,6 @@
goto out;
}
- /* Special handling for status blocks; Would be revised in future */
- if (res_id == QED_SB) {
- *p_resc_num -= 1;
- *p_resc_start -= p_hwfn->enabled_func_idx;
- }
out:
/* PQs have to divide by 8 [that's the HW granularity].
* Reduce number so it would fit.
@@ -2413,6 +2439,10 @@
return -EINVAL;
}
+ /* This will also learn the number of SBs from MFW */
+ if (qed_int_igu_reset_cam(p_hwfn, p_ptt))
+ return -EINVAL;
+
qed_hw_set_feat(p_hwfn);
for (res_id = 0; res_id < QED_MAX_RESC; res_id++)
@@ -2669,15 +2699,15 @@
port_mode = qed_rd(p_hwfn, p_ptt, CNIG_REG_NW_PORT_MODE_BB_B0);
if (port_mode < 3) {
- p_hwfn->cdev->num_ports_in_engines = 1;
+ p_hwfn->cdev->num_ports_in_engine = 1;
} else if (port_mode <= 5) {
- p_hwfn->cdev->num_ports_in_engines = 2;
+ p_hwfn->cdev->num_ports_in_engine = 2;
} else {
DP_NOTICE(p_hwfn, "PORT MODE: %d not supported\n",
- p_hwfn->cdev->num_ports_in_engines);
+ p_hwfn->cdev->num_ports_in_engine);
- /* Default num_ports_in_engines to something */
- p_hwfn->cdev->num_ports_in_engines = 1;
+ /* Default num_ports_in_engine to something */
+ p_hwfn->cdev->num_ports_in_engine = 1;
}
}
@@ -2687,20 +2717,20 @@
u32 port;
int i;
- p_hwfn->cdev->num_ports_in_engines = 0;
+ p_hwfn->cdev->num_ports_in_engine = 0;
for (i = 0; i < MAX_NUM_PORTS_K2; i++) {
port = qed_rd(p_hwfn, p_ptt,
CNIG_REG_NIG_PORT0_CONF_K2 + (i * 4));
if (port & 1)
- p_hwfn->cdev->num_ports_in_engines++;
+ p_hwfn->cdev->num_ports_in_engine++;
}
- if (!p_hwfn->cdev->num_ports_in_engines) {
+ if (!p_hwfn->cdev->num_ports_in_engine) {
DP_NOTICE(p_hwfn, "All NIG ports are inactive\n");
/* Default num_ports_in_engine to something */
- p_hwfn->cdev->num_ports_in_engines = 1;
+ p_hwfn->cdev->num_ports_in_engine = 1;
}
}
@@ -2819,12 +2849,6 @@
cdev->chip_num, cdev->chip_rev,
cdev->chip_bond_id, cdev->chip_metal);
- if (QED_IS_BB(cdev) && CHIP_REV_IS_A0(cdev)) {
- DP_NOTICE(cdev->hwfns,
- "The chip type/rev (BB A0) is not supported!\n");
- return -EINVAL;
- }
-
return 0;
}
@@ -3051,12 +3075,15 @@
}
pbl_size = page_cnt * QED_CHAIN_PBL_ENTRY_SIZE;
- dma_free_coherent(&cdev->pdev->dev,
- pbl_size,
- p_chain->pbl_sp.p_virt_table,
- p_chain->pbl_sp.p_phys_table);
+
+ if (!p_chain->b_external_pbl)
+ dma_free_coherent(&cdev->pdev->dev,
+ pbl_size,
+ p_chain->pbl_sp.p_virt_table,
+ p_chain->pbl_sp.p_phys_table);
out:
vfree(p_chain->pbl.pp_virt_addr_tbl);
+ p_chain->pbl.pp_virt_addr_tbl = NULL;
}
void qed_chain_free(struct qed_dev *cdev, struct qed_chain *p_chain)
@@ -3150,7 +3177,10 @@
return 0;
}
-static int qed_chain_alloc_pbl(struct qed_dev *cdev, struct qed_chain *p_chain)
+static int
+qed_chain_alloc_pbl(struct qed_dev *cdev,
+ struct qed_chain *p_chain,
+ struct qed_chain_ext_pbl *ext_pbl)
{
u32 page_cnt = p_chain->page_cnt, size, i;
dma_addr_t p_phys = 0, p_pbl_phys = 0;
@@ -3170,8 +3200,16 @@
* should be saved to allow its freeing during the error flow.
*/
size = page_cnt * QED_CHAIN_PBL_ENTRY_SIZE;
- p_pbl_virt = dma_alloc_coherent(&cdev->pdev->dev,
- size, &p_pbl_phys, GFP_KERNEL);
+
+ if (!ext_pbl) {
+ p_pbl_virt = dma_alloc_coherent(&cdev->pdev->dev,
+ size, &p_pbl_phys, GFP_KERNEL);
+ } else {
+ p_pbl_virt = ext_pbl->p_pbl_virt;
+ p_pbl_phys = ext_pbl->p_pbl_phys;
+ p_chain->b_external_pbl = true;
+ }
+
qed_chain_init_pbl_mem(p_chain, p_pbl_virt, p_pbl_phys,
pp_virt_addr_tbl);
if (!p_pbl_virt)
@@ -3204,7 +3242,10 @@
enum qed_chain_use_mode intended_use,
enum qed_chain_mode mode,
enum qed_chain_cnt_type cnt_type,
- u32 num_elems, size_t elem_size, struct qed_chain *p_chain)
+ u32 num_elems,
+ size_t elem_size,
+ struct qed_chain *p_chain,
+ struct qed_chain_ext_pbl *ext_pbl)
{
u32 page_cnt;
int rc = 0;
@@ -3235,7 +3276,7 @@
rc = qed_chain_alloc_single(cdev, p_chain);
break;
case QED_CHAIN_MODE_PBL:
- rc = qed_chain_alloc_pbl(cdev, p_chain);
+ rc = qed_chain_alloc_pbl(cdev, p_chain, ext_pbl);
break;
}
if (rc)
@@ -4074,7 +4115,7 @@
if (cdev->num_hwfns > 1)
return 1;
- return cdev->num_ports_in_engines * qed_device_num_engines(cdev);
+ return cdev->num_ports_in_engine * qed_device_num_engines(cdev);
}
int qed_device_get_port_id(struct qed_dev *cdev)
diff --git a/drivers/net/ethernet/qlogic/qed/qed_dev_api.h b/drivers/net/ethernet/qlogic/qed/qed_dev_api.h
index 12d16c0..1f1df1b 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_dev_api.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_dev_api.h
@@ -307,6 +307,7 @@
* @param num_elems
* @param elem_size
* @param p_chain
+ * @param ext_pbl - a possible external PBL
*
* @return int
*/
@@ -315,7 +316,9 @@
enum qed_chain_use_mode intended_use,
enum qed_chain_mode mode,
enum qed_chain_cnt_type cnt_type,
- u32 num_elems, size_t elem_size, struct qed_chain *p_chain);
+ u32 num_elems,
+ size_t elem_size,
+ struct qed_chain *p_chain, struct qed_chain_ext_pbl *ext_pbl);
/**
* @brief qed_chain_free - Free chain DMA memory
diff --git a/drivers/net/ethernet/qlogic/qed/qed_fcoe.c b/drivers/net/ethernet/qlogic/qed/qed_fcoe.c
index 21a58ff..df195c0 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_fcoe.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_fcoe.c
@@ -43,7 +43,6 @@
#include <linux/slab.h>
#include <linux/stddef.h>
#include <linux/string.h>
-#include <linux/version.h>
#include <linux/workqueue.h>
#include <linux/errno.h>
#include <linux/list.h>
@@ -142,6 +141,15 @@
p_data = &p_ramrod->init_ramrod_data;
fcoe_pf_params = &p_hwfn->pf_params.fcoe_pf_params;
+ /* Sanity */
+ if (fcoe_pf_params->num_cqs > p_hwfn->hw_info.feat_num[QED_FCOE_CQ]) {
+ DP_ERR(p_hwfn,
+ "Cannot satisfy CQ amount. CQs requested %d, CQs available %d. Aborting function start\n",
+ fcoe_pf_params->num_cqs,
+ p_hwfn->hw_info.feat_num[QED_FCOE_CQ]);
+ return -EINVAL;
+ }
+
p_data->mtu = cpu_to_le16(fcoe_pf_params->mtu);
tmp = cpu_to_le16(fcoe_pf_params->sq_num_pbl_pages);
p_data->sq_num_pages_in_pbl = tmp;
@@ -184,7 +192,10 @@
p_data->q_params.queue_relative_offset = (u8)tmp;
for (i = 0; i < fcoe_pf_params->num_cqs; i++) {
- tmp = cpu_to_le16(p_hwfn->sbs_info[i]->igu_sb_id);
+ u16 igu_sb_id;
+
+ igu_sb_id = qed_get_igu_sb_id(p_hwfn, i);
+ tmp = cpu_to_le16(igu_sb_id);
p_data->q_params.cq_cmdq_sb_num_arr[i] = tmp;
}
@@ -539,7 +550,7 @@
}
}
-struct qed_fcoe_info *qed_fcoe_alloc(struct qed_hwfn *p_hwfn)
+int qed_fcoe_alloc(struct qed_hwfn *p_hwfn)
{
struct qed_fcoe_info *p_fcoe_info;
@@ -547,19 +558,21 @@
p_fcoe_info = kzalloc(sizeof(*p_fcoe_info), GFP_KERNEL);
if (!p_fcoe_info) {
DP_NOTICE(p_hwfn, "Failed to allocate qed_fcoe_info'\n");
- return NULL;
+ return -ENOMEM;
}
INIT_LIST_HEAD(&p_fcoe_info->free_list);
- return p_fcoe_info;
+
+ p_hwfn->p_fcoe_info = p_fcoe_info;
+ return 0;
}
-void qed_fcoe_setup(struct qed_hwfn *p_hwfn, struct qed_fcoe_info *p_fcoe_info)
+void qed_fcoe_setup(struct qed_hwfn *p_hwfn)
{
struct fcoe_task_context *p_task_ctx = NULL;
int rc;
u32 i;
- spin_lock_init(&p_fcoe_info->lock);
+ spin_lock_init(&p_hwfn->p_fcoe_info->lock);
for (i = 0; i < p_hwfn->pf_params.fcoe_pf_params.num_tasks; i++) {
rc = qed_cxt_get_task_ctx(p_hwfn, i,
QED_CTX_WORKING_MEM,
@@ -577,15 +590,15 @@
}
}
-void qed_fcoe_free(struct qed_hwfn *p_hwfn, struct qed_fcoe_info *p_fcoe_info)
+void qed_fcoe_free(struct qed_hwfn *p_hwfn)
{
struct qed_fcoe_conn *p_conn = NULL;
- if (!p_fcoe_info)
+ if (!p_hwfn->p_fcoe_info)
return;
- while (!list_empty(&p_fcoe_info->free_list)) {
- p_conn = list_first_entry(&p_fcoe_info->free_list,
+ while (!list_empty(&p_hwfn->p_fcoe_info->free_list)) {
+ p_conn = list_first_entry(&p_hwfn->p_fcoe_info->free_list,
struct qed_fcoe_conn, list_entry);
if (!p_conn)
break;
@@ -593,7 +606,8 @@
qed_fcoe_free_connection(p_hwfn, p_conn);
}
- kfree(p_fcoe_info);
+ kfree(p_hwfn->p_fcoe_info);
+ p_hwfn->p_fcoe_info = NULL;
}
static int
@@ -734,6 +748,11 @@
info->secondary_bdq_rq_addr =
qed_fcoe_get_secondary_bdq_prod(hwfn, BDQ_ID_RQ);
+ info->wwpn = hwfn->mcp_info->func_info.wwn_port;
+ info->wwnn = hwfn->mcp_info->func_info.wwn_node;
+
+ info->num_cqs = FEAT_NUM(hwfn, QED_FCOE_CQ);
+
return rc;
}
diff --git a/drivers/net/ethernet/qlogic/qed/qed_fcoe.h b/drivers/net/ethernet/qlogic/qed/qed_fcoe.h
index 472af34..027a76a 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_fcoe.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_fcoe.h
@@ -49,29 +49,21 @@
};
#if IS_ENABLED(CONFIG_QED_FCOE)
-struct qed_fcoe_info *qed_fcoe_alloc(struct qed_hwfn *p_hwfn);
+int qed_fcoe_alloc(struct qed_hwfn *p_hwfn);
-void qed_fcoe_setup(struct qed_hwfn *p_hwfn, struct qed_fcoe_info *p_fcoe_info);
+void qed_fcoe_setup(struct qed_hwfn *p_hwfn);
-void qed_fcoe_free(struct qed_hwfn *p_hwfn, struct qed_fcoe_info *p_fcoe_info);
+void qed_fcoe_free(struct qed_hwfn *p_hwfn);
void qed_get_protocol_stats_fcoe(struct qed_dev *cdev,
struct qed_mcp_fcoe_stats *stats);
#else /* CONFIG_QED_FCOE */
-static inline struct qed_fcoe_info *
-qed_fcoe_alloc(struct qed_hwfn *p_hwfn)
+static inline int qed_fcoe_alloc(struct qed_hwfn *p_hwfn)
{
- return NULL;
+ return -EINVAL;
}
-static inline void qed_fcoe_setup(struct qed_hwfn *p_hwfn,
- struct qed_fcoe_info *p_fcoe_info)
-{
-}
-
-static inline void qed_fcoe_free(struct qed_hwfn *p_hwfn,
- struct qed_fcoe_info *p_fcoe_info)
-{
-}
+static inline void qed_fcoe_setup(struct qed_hwfn *p_hwfn) {}
+static inline void qed_fcoe_free(struct qed_hwfn *p_hwfn) {}
static inline void qed_get_protocol_stats_fcoe(struct qed_dev *cdev,
struct qed_mcp_fcoe_stats *stats)
diff --git a/drivers/net/ethernet/qlogic/qed/qed_hsi.h b/drivers/net/ethernet/qlogic/qed/qed_hsi.h
index 858a57a..3bf3614 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_hsi.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_hsi.h
@@ -346,7 +346,7 @@
u8 byte13;
u8 byte14;
u8 byte15;
- u8 byte16;
+ u8 e5_reserved;
__le16 word11;
__le32 reg10;
__le32 reg11;
@@ -368,85 +368,85 @@
u8 byte0;
u8 byte1;
u8 flags0;
-#define TSTORM_CORE_CONN_AG_CTX_BIT0_MASK 0x1
-#define TSTORM_CORE_CONN_AG_CTX_BIT0_SHIFT 0
-#define TSTORM_CORE_CONN_AG_CTX_BIT1_MASK 0x1
-#define TSTORM_CORE_CONN_AG_CTX_BIT1_SHIFT 1
-#define TSTORM_CORE_CONN_AG_CTX_BIT2_MASK 0x1
-#define TSTORM_CORE_CONN_AG_CTX_BIT2_SHIFT 2
-#define TSTORM_CORE_CONN_AG_CTX_BIT3_MASK 0x1
-#define TSTORM_CORE_CONN_AG_CTX_BIT3_SHIFT 3
-#define TSTORM_CORE_CONN_AG_CTX_BIT4_MASK 0x1
-#define TSTORM_CORE_CONN_AG_CTX_BIT4_SHIFT 4
-#define TSTORM_CORE_CONN_AG_CTX_BIT5_MASK 0x1
-#define TSTORM_CORE_CONN_AG_CTX_BIT5_SHIFT 5
-#define TSTORM_CORE_CONN_AG_CTX_CF0_MASK 0x3
-#define TSTORM_CORE_CONN_AG_CTX_CF0_SHIFT 6
+#define TSTORM_CORE_CONN_AG_CTX_BIT0_MASK 0x1 /* exist_in_qm0 */
+#define TSTORM_CORE_CONN_AG_CTX_BIT0_SHIFT 0
+#define TSTORM_CORE_CONN_AG_CTX_BIT1_MASK 0x1 /* exist_in_qm1 */
+#define TSTORM_CORE_CONN_AG_CTX_BIT1_SHIFT 1
+#define TSTORM_CORE_CONN_AG_CTX_BIT2_MASK 0x1 /* bit2 */
+#define TSTORM_CORE_CONN_AG_CTX_BIT2_SHIFT 2
+#define TSTORM_CORE_CONN_AG_CTX_BIT3_MASK 0x1 /* bit3 */
+#define TSTORM_CORE_CONN_AG_CTX_BIT3_SHIFT 3
+#define TSTORM_CORE_CONN_AG_CTX_BIT4_MASK 0x1 /* bit4 */
+#define TSTORM_CORE_CONN_AG_CTX_BIT4_SHIFT 4
+#define TSTORM_CORE_CONN_AG_CTX_BIT5_MASK 0x1 /* bit5 */
+#define TSTORM_CORE_CONN_AG_CTX_BIT5_SHIFT 5
+#define TSTORM_CORE_CONN_AG_CTX_CF0_MASK 0x3 /* timer0cf */
+#define TSTORM_CORE_CONN_AG_CTX_CF0_SHIFT 6
u8 flags1;
-#define TSTORM_CORE_CONN_AG_CTX_CF1_MASK 0x3
-#define TSTORM_CORE_CONN_AG_CTX_CF1_SHIFT 0
-#define TSTORM_CORE_CONN_AG_CTX_CF2_MASK 0x3
-#define TSTORM_CORE_CONN_AG_CTX_CF2_SHIFT 2
-#define TSTORM_CORE_CONN_AG_CTX_CF3_MASK 0x3
-#define TSTORM_CORE_CONN_AG_CTX_CF3_SHIFT 4
-#define TSTORM_CORE_CONN_AG_CTX_CF4_MASK 0x3
-#define TSTORM_CORE_CONN_AG_CTX_CF4_SHIFT 6
+#define TSTORM_CORE_CONN_AG_CTX_CF1_MASK 0x3 /* timer1cf */
+#define TSTORM_CORE_CONN_AG_CTX_CF1_SHIFT 0
+#define TSTORM_CORE_CONN_AG_CTX_CF2_MASK 0x3 /* timer2cf */
+#define TSTORM_CORE_CONN_AG_CTX_CF2_SHIFT 2
+#define TSTORM_CORE_CONN_AG_CTX_CF3_MASK 0x3 /* timer_stop_all */
+#define TSTORM_CORE_CONN_AG_CTX_CF3_SHIFT 4
+#define TSTORM_CORE_CONN_AG_CTX_CF4_MASK 0x3 /* cf4 */
+#define TSTORM_CORE_CONN_AG_CTX_CF4_SHIFT 6
u8 flags2;
-#define TSTORM_CORE_CONN_AG_CTX_CF5_MASK 0x3
-#define TSTORM_CORE_CONN_AG_CTX_CF5_SHIFT 0
-#define TSTORM_CORE_CONN_AG_CTX_CF6_MASK 0x3
-#define TSTORM_CORE_CONN_AG_CTX_CF6_SHIFT 2
-#define TSTORM_CORE_CONN_AG_CTX_CF7_MASK 0x3
-#define TSTORM_CORE_CONN_AG_CTX_CF7_SHIFT 4
-#define TSTORM_CORE_CONN_AG_CTX_CF8_MASK 0x3
-#define TSTORM_CORE_CONN_AG_CTX_CF8_SHIFT 6
+#define TSTORM_CORE_CONN_AG_CTX_CF5_MASK 0x3 /* cf5 */
+#define TSTORM_CORE_CONN_AG_CTX_CF5_SHIFT 0
+#define TSTORM_CORE_CONN_AG_CTX_CF6_MASK 0x3 /* cf6 */
+#define TSTORM_CORE_CONN_AG_CTX_CF6_SHIFT 2
+#define TSTORM_CORE_CONN_AG_CTX_CF7_MASK 0x3 /* cf7 */
+#define TSTORM_CORE_CONN_AG_CTX_CF7_SHIFT 4
+#define TSTORM_CORE_CONN_AG_CTX_CF8_MASK 0x3 /* cf8 */
+#define TSTORM_CORE_CONN_AG_CTX_CF8_SHIFT 6
u8 flags3;
-#define TSTORM_CORE_CONN_AG_CTX_CF9_MASK 0x3
-#define TSTORM_CORE_CONN_AG_CTX_CF9_SHIFT 0
-#define TSTORM_CORE_CONN_AG_CTX_CF10_MASK 0x3
-#define TSTORM_CORE_CONN_AG_CTX_CF10_SHIFT 2
-#define TSTORM_CORE_CONN_AG_CTX_CF0EN_MASK 0x1
-#define TSTORM_CORE_CONN_AG_CTX_CF0EN_SHIFT 4
-#define TSTORM_CORE_CONN_AG_CTX_CF1EN_MASK 0x1
-#define TSTORM_CORE_CONN_AG_CTX_CF1EN_SHIFT 5
-#define TSTORM_CORE_CONN_AG_CTX_CF2EN_MASK 0x1
-#define TSTORM_CORE_CONN_AG_CTX_CF2EN_SHIFT 6
-#define TSTORM_CORE_CONN_AG_CTX_CF3EN_MASK 0x1
-#define TSTORM_CORE_CONN_AG_CTX_CF3EN_SHIFT 7
+#define TSTORM_CORE_CONN_AG_CTX_CF9_MASK 0x3 /* cf9 */
+#define TSTORM_CORE_CONN_AG_CTX_CF9_SHIFT 0
+#define TSTORM_CORE_CONN_AG_CTX_CF10_MASK 0x3 /* cf10 */
+#define TSTORM_CORE_CONN_AG_CTX_CF10_SHIFT 2
+#define TSTORM_CORE_CONN_AG_CTX_CF0EN_MASK 0x1 /* cf0en */
+#define TSTORM_CORE_CONN_AG_CTX_CF0EN_SHIFT 4
+#define TSTORM_CORE_CONN_AG_CTX_CF1EN_MASK 0x1 /* cf1en */
+#define TSTORM_CORE_CONN_AG_CTX_CF1EN_SHIFT 5
+#define TSTORM_CORE_CONN_AG_CTX_CF2EN_MASK 0x1 /* cf2en */
+#define TSTORM_CORE_CONN_AG_CTX_CF2EN_SHIFT 6
+#define TSTORM_CORE_CONN_AG_CTX_CF3EN_MASK 0x1 /* cf3en */
+#define TSTORM_CORE_CONN_AG_CTX_CF3EN_SHIFT 7
u8 flags4;
-#define TSTORM_CORE_CONN_AG_CTX_CF4EN_MASK 0x1
-#define TSTORM_CORE_CONN_AG_CTX_CF4EN_SHIFT 0
-#define TSTORM_CORE_CONN_AG_CTX_CF5EN_MASK 0x1
-#define TSTORM_CORE_CONN_AG_CTX_CF5EN_SHIFT 1
-#define TSTORM_CORE_CONN_AG_CTX_CF6EN_MASK 0x1
-#define TSTORM_CORE_CONN_AG_CTX_CF6EN_SHIFT 2
-#define TSTORM_CORE_CONN_AG_CTX_CF7EN_MASK 0x1
-#define TSTORM_CORE_CONN_AG_CTX_CF7EN_SHIFT 3
-#define TSTORM_CORE_CONN_AG_CTX_CF8EN_MASK 0x1
-#define TSTORM_CORE_CONN_AG_CTX_CF8EN_SHIFT 4
-#define TSTORM_CORE_CONN_AG_CTX_CF9EN_MASK 0x1
-#define TSTORM_CORE_CONN_AG_CTX_CF9EN_SHIFT 5
-#define TSTORM_CORE_CONN_AG_CTX_CF10EN_MASK 0x1
-#define TSTORM_CORE_CONN_AG_CTX_CF10EN_SHIFT 6
-#define TSTORM_CORE_CONN_AG_CTX_RULE0EN_MASK 0x1
-#define TSTORM_CORE_CONN_AG_CTX_RULE0EN_SHIFT 7
+#define TSTORM_CORE_CONN_AG_CTX_CF4EN_MASK 0x1 /* cf4en */
+#define TSTORM_CORE_CONN_AG_CTX_CF4EN_SHIFT 0
+#define TSTORM_CORE_CONN_AG_CTX_CF5EN_MASK 0x1 /* cf5en */
+#define TSTORM_CORE_CONN_AG_CTX_CF5EN_SHIFT 1
+#define TSTORM_CORE_CONN_AG_CTX_CF6EN_MASK 0x1 /* cf6en */
+#define TSTORM_CORE_CONN_AG_CTX_CF6EN_SHIFT 2
+#define TSTORM_CORE_CONN_AG_CTX_CF7EN_MASK 0x1 /* cf7en */
+#define TSTORM_CORE_CONN_AG_CTX_CF7EN_SHIFT 3
+#define TSTORM_CORE_CONN_AG_CTX_CF8EN_MASK 0x1 /* cf8en */
+#define TSTORM_CORE_CONN_AG_CTX_CF8EN_SHIFT 4
+#define TSTORM_CORE_CONN_AG_CTX_CF9EN_MASK 0x1 /* cf9en */
+#define TSTORM_CORE_CONN_AG_CTX_CF9EN_SHIFT 5
+#define TSTORM_CORE_CONN_AG_CTX_CF10EN_MASK 0x1 /* cf10en */
+#define TSTORM_CORE_CONN_AG_CTX_CF10EN_SHIFT 6
+#define TSTORM_CORE_CONN_AG_CTX_RULE0EN_MASK 0x1 /* rule0en */
+#define TSTORM_CORE_CONN_AG_CTX_RULE0EN_SHIFT 7
u8 flags5;
-#define TSTORM_CORE_CONN_AG_CTX_RULE1EN_MASK 0x1
-#define TSTORM_CORE_CONN_AG_CTX_RULE1EN_SHIFT 0
-#define TSTORM_CORE_CONN_AG_CTX_RULE2EN_MASK 0x1
-#define TSTORM_CORE_CONN_AG_CTX_RULE2EN_SHIFT 1
-#define TSTORM_CORE_CONN_AG_CTX_RULE3EN_MASK 0x1
-#define TSTORM_CORE_CONN_AG_CTX_RULE3EN_SHIFT 2
-#define TSTORM_CORE_CONN_AG_CTX_RULE4EN_MASK 0x1
-#define TSTORM_CORE_CONN_AG_CTX_RULE4EN_SHIFT 3
-#define TSTORM_CORE_CONN_AG_CTX_RULE5EN_MASK 0x1
-#define TSTORM_CORE_CONN_AG_CTX_RULE5EN_SHIFT 4
-#define TSTORM_CORE_CONN_AG_CTX_RULE6EN_MASK 0x1
-#define TSTORM_CORE_CONN_AG_CTX_RULE6EN_SHIFT 5
-#define TSTORM_CORE_CONN_AG_CTX_RULE7EN_MASK 0x1
-#define TSTORM_CORE_CONN_AG_CTX_RULE7EN_SHIFT 6
-#define TSTORM_CORE_CONN_AG_CTX_RULE8EN_MASK 0x1
-#define TSTORM_CORE_CONN_AG_CTX_RULE8EN_SHIFT 7
+#define TSTORM_CORE_CONN_AG_CTX_RULE1EN_MASK 0x1 /* rule1en */
+#define TSTORM_CORE_CONN_AG_CTX_RULE1EN_SHIFT 0
+#define TSTORM_CORE_CONN_AG_CTX_RULE2EN_MASK 0x1 /* rule2en */
+#define TSTORM_CORE_CONN_AG_CTX_RULE2EN_SHIFT 1
+#define TSTORM_CORE_CONN_AG_CTX_RULE3EN_MASK 0x1 /* rule3en */
+#define TSTORM_CORE_CONN_AG_CTX_RULE3EN_SHIFT 2
+#define TSTORM_CORE_CONN_AG_CTX_RULE4EN_MASK 0x1 /* rule4en */
+#define TSTORM_CORE_CONN_AG_CTX_RULE4EN_SHIFT 3
+#define TSTORM_CORE_CONN_AG_CTX_RULE5EN_MASK 0x1 /* rule5en */
+#define TSTORM_CORE_CONN_AG_CTX_RULE5EN_SHIFT 4
+#define TSTORM_CORE_CONN_AG_CTX_RULE6EN_MASK 0x1 /* rule6en */
+#define TSTORM_CORE_CONN_AG_CTX_RULE6EN_SHIFT 5
+#define TSTORM_CORE_CONN_AG_CTX_RULE7EN_MASK 0x1 /* rule7en */
+#define TSTORM_CORE_CONN_AG_CTX_RULE7EN_SHIFT 6
+#define TSTORM_CORE_CONN_AG_CTX_RULE8EN_MASK 0x1 /* rule8en */
+#define TSTORM_CORE_CONN_AG_CTX_RULE8EN_SHIFT 7
__le32 reg0;
__le32 reg1;
__le32 reg2;
@@ -681,7 +681,9 @@
__le16 packet_length;
__le16 vlan;
struct core_rx_cqe_opaque_data opaque_data;
- __le32 reserved[4];
+ struct parsing_err_flags err_flags;
+ __le16 reserved0;
+ __le32 reserved1[3];
};
struct core_rx_gsi_offload_cqe {
@@ -692,7 +694,7 @@
__le16 vlan;
__le32 src_mac_addrhi;
__le16 src_mac_addrlo;
- u8 reserved1[2];
+ __le16 qp_id;
__le32 gid_dst[4];
};
@@ -774,15 +776,15 @@
__le16 bitfield1;
#define CORE_TX_BD_L4_HDR_OFFSET_W_MASK 0x3FFF
#define CORE_TX_BD_L4_HDR_OFFSET_W_SHIFT 0
-#define CORE_TX_BD_TX_DST_MASK 0x1
-#define CORE_TX_BD_TX_DST_SHIFT 14
-#define CORE_TX_BD_RESERVED_MASK 0x1
-#define CORE_TX_BD_RESERVED_SHIFT 15
+#define CORE_TX_BD_TX_DST_MASK 0x3
+#define CORE_TX_BD_TX_DST_SHIFT 14
};
enum core_tx_dest {
CORE_TX_DEST_NW,
CORE_TX_DEST_LB,
+ CORE_TX_DEST_RESERVED,
+ CORE_TX_DEST_DROP,
MAX_CORE_TX_DEST
};
@@ -804,12 +806,12 @@
__le32 reserved0[2];
};
-enum dcb_dhcp_update_flag {
- DONT_UPDATE_DCB_DHCP,
+enum dcb_dscp_update_mode {
+ DONT_UPDATE_DCB_DSCP,
UPDATE_DCB,
UPDATE_DSCP,
UPDATE_DCB_DSCP,
- MAX_DCB_DHCP_UPDATE_FLAG
+ MAX_DCB_DSCP_UPDATE_MODE
};
struct eth_mstorm_per_pf_stat {
@@ -917,6 +919,14 @@
u8 major_ver_arr[2];
};
+enum iwarp_ll2_tx_queues {
+ IWARP_LL2_IN_ORDER_TX_QUEUE = 1,
+ IWARP_LL2_ALIGNED_TX_QUEUE,
+ IWARP_LL2_ALIGNED_RIGHT_TRIMMED_TX_QUEUE,
+ IWARP_LL2_ERROR,
+ MAX_IWARP_LL2_TX_QUEUES
+};
+
/* Mstorm non-triggering VF zone */
enum malicious_vf_error_id {
MALICIOUS_VF_NO_ERROR,
@@ -960,7 +970,7 @@
PERSONALITY_ISCSI,
PERSONALITY_FCOE,
PERSONALITY_RDMA_AND_ETH,
- PERSONALITY_RESERVED3,
+ PERSONALITY_RDMA,
PERSONALITY_CORE,
PERSONALITY_ETH,
PERSONALITY_RESERVED4,
@@ -971,16 +981,12 @@
struct pf_start_tunnel_config {
u8 set_vxlan_udp_port_flg;
u8 set_geneve_udp_port_flg;
- u8 tx_enable_vxlan;
- u8 tx_enable_l2geneve;
- u8 tx_enable_ipgeneve;
- u8 tx_enable_l2gre;
- u8 tx_enable_ipgre;
u8 tunnel_clss_vxlan;
u8 tunnel_clss_l2geneve;
u8 tunnel_clss_ipgeneve;
u8 tunnel_clss_l2gre;
u8 tunnel_clss_ipgre;
+ u8 reserved;
__le16 vxlan_udp_port;
__le16 geneve_udp_port;
};
@@ -990,6 +996,7 @@
struct regpair event_ring_pbl_addr;
struct regpair consolid_q_pbl_addr;
struct pf_start_tunnel_config tunnel_config;
+ __le32 reserved;
__le16 event_ring_sb_id;
u8 base_vf_id;
u8 num_vfs;
@@ -1007,7 +1014,6 @@
u8 pri_map_valid;
__le32 outer_tag;
struct hsi_fp_ver_struct hsi_fp_ver;
-
};
struct protocol_dcb_data {
@@ -1023,14 +1029,8 @@
u8 update_rx_pf_clss;
u8 update_rx_def_ucast_clss;
u8 update_rx_def_non_ucast_clss;
- u8 update_tx_pf_clss;
u8 set_vxlan_udp_port_flg;
u8 set_geneve_udp_port_flg;
- u8 tx_enable_vxlan;
- u8 tx_enable_l2geneve;
- u8 tx_enable_ipgeneve;
- u8 tx_enable_l2gre;
- u8 tx_enable_ipgre;
u8 tunnel_clss_vxlan;
u8 tunnel_clss_l2geneve;
u8 tunnel_clss_ipgeneve;
@@ -1038,17 +1038,17 @@
u8 tunnel_clss_ipgre;
__le16 vxlan_udp_port;
__le16 geneve_udp_port;
- __le16 reserved[2];
+ __le16 reserved;
};
struct pf_update_ramrod_data {
u8 pf_id;
- u8 update_eth_dcb_data_flag;
- u8 update_fcoe_dcb_data_flag;
- u8 update_iscsi_dcb_data_flag;
- u8 update_roce_dcb_data_flag;
- u8 update_rroce_dcb_data_flag;
- u8 update_iwarp_dcb_data_flag;
+ u8 update_eth_dcb_data_mode;
+ u8 update_fcoe_dcb_data_mode;
+ u8 update_iscsi_dcb_data_mode;
+ u8 update_roce_dcb_data_mode;
+ u8 update_rroce_dcb_data_mode;
+ u8 update_iwarp_dcb_data_mode;
u8 update_mf_vlan_flag;
struct protocol_dcb_data eth_dcb_data;
struct protocol_dcb_data fcoe_dcb_data;
@@ -1127,7 +1127,7 @@
struct regpair iscsi_irregular_pkt;
struct regpair fcoe_irregular_pkt;
struct regpair roce_irregular_pkt;
- struct regpair reserved;
+ struct regpair iwarp_irregular_pkt;
struct regpair eth_irregular_pkt;
struct regpair reserved1;
struct regpair preroce_irregular_pkt;
@@ -1326,6 +1326,87 @@
MAX_DMAE_CMD_SRC_ENUM
};
+struct mstorm_core_conn_ag_ctx {
+ u8 byte0;
+ u8 byte1;
+ u8 flags0;
+#define MSTORM_CORE_CONN_AG_CTX_BIT0_MASK 0x1
+#define MSTORM_CORE_CONN_AG_CTX_BIT0_SHIFT 0
+#define MSTORM_CORE_CONN_AG_CTX_BIT1_MASK 0x1
+#define MSTORM_CORE_CONN_AG_CTX_BIT1_SHIFT 1
+#define MSTORM_CORE_CONN_AG_CTX_CF0_MASK 0x3
+#define MSTORM_CORE_CONN_AG_CTX_CF0_SHIFT 2
+#define MSTORM_CORE_CONN_AG_CTX_CF1_MASK 0x3
+#define MSTORM_CORE_CONN_AG_CTX_CF1_SHIFT 4
+#define MSTORM_CORE_CONN_AG_CTX_CF2_MASK 0x3
+#define MSTORM_CORE_CONN_AG_CTX_CF2_SHIFT 6
+ u8 flags1;
+#define MSTORM_CORE_CONN_AG_CTX_CF0EN_MASK 0x1
+#define MSTORM_CORE_CONN_AG_CTX_CF0EN_SHIFT 0
+#define MSTORM_CORE_CONN_AG_CTX_CF1EN_MASK 0x1
+#define MSTORM_CORE_CONN_AG_CTX_CF1EN_SHIFT 1
+#define MSTORM_CORE_CONN_AG_CTX_CF2EN_MASK 0x1
+#define MSTORM_CORE_CONN_AG_CTX_CF2EN_SHIFT 2
+#define MSTORM_CORE_CONN_AG_CTX_RULE0EN_MASK 0x1
+#define MSTORM_CORE_CONN_AG_CTX_RULE0EN_SHIFT 3
+#define MSTORM_CORE_CONN_AG_CTX_RULE1EN_MASK 0x1
+#define MSTORM_CORE_CONN_AG_CTX_RULE1EN_SHIFT 4
+#define MSTORM_CORE_CONN_AG_CTX_RULE2EN_MASK 0x1
+#define MSTORM_CORE_CONN_AG_CTX_RULE2EN_SHIFT 5
+#define MSTORM_CORE_CONN_AG_CTX_RULE3EN_MASK 0x1
+#define MSTORM_CORE_CONN_AG_CTX_RULE3EN_SHIFT 6
+#define MSTORM_CORE_CONN_AG_CTX_RULE4EN_MASK 0x1
+#define MSTORM_CORE_CONN_AG_CTX_RULE4EN_SHIFT 7
+ __le16 word0;
+ __le16 word1;
+ __le32 reg0;
+ __le32 reg1;
+};
+
+struct ystorm_core_conn_ag_ctx {
+ u8 byte0;
+ u8 byte1;
+ u8 flags0;
+#define YSTORM_CORE_CONN_AG_CTX_BIT0_MASK 0x1
+#define YSTORM_CORE_CONN_AG_CTX_BIT0_SHIFT 0
+#define YSTORM_CORE_CONN_AG_CTX_BIT1_MASK 0x1
+#define YSTORM_CORE_CONN_AG_CTX_BIT1_SHIFT 1
+#define YSTORM_CORE_CONN_AG_CTX_CF0_MASK 0x3
+#define YSTORM_CORE_CONN_AG_CTX_CF0_SHIFT 2
+#define YSTORM_CORE_CONN_AG_CTX_CF1_MASK 0x3
+#define YSTORM_CORE_CONN_AG_CTX_CF1_SHIFT 4
+#define YSTORM_CORE_CONN_AG_CTX_CF2_MASK 0x3
+#define YSTORM_CORE_CONN_AG_CTX_CF2_SHIFT 6
+ u8 flags1;
+#define YSTORM_CORE_CONN_AG_CTX_CF0EN_MASK 0x1
+#define YSTORM_CORE_CONN_AG_CTX_CF0EN_SHIFT 0
+#define YSTORM_CORE_CONN_AG_CTX_CF1EN_MASK 0x1
+#define YSTORM_CORE_CONN_AG_CTX_CF1EN_SHIFT 1
+#define YSTORM_CORE_CONN_AG_CTX_CF2EN_MASK 0x1
+#define YSTORM_CORE_CONN_AG_CTX_CF2EN_SHIFT 2
+#define YSTORM_CORE_CONN_AG_CTX_RULE0EN_MASK 0x1
+#define YSTORM_CORE_CONN_AG_CTX_RULE0EN_SHIFT 3
+#define YSTORM_CORE_CONN_AG_CTX_RULE1EN_MASK 0x1
+#define YSTORM_CORE_CONN_AG_CTX_RULE1EN_SHIFT 4
+#define YSTORM_CORE_CONN_AG_CTX_RULE2EN_MASK 0x1
+#define YSTORM_CORE_CONN_AG_CTX_RULE2EN_SHIFT 5
+#define YSTORM_CORE_CONN_AG_CTX_RULE3EN_MASK 0x1
+#define YSTORM_CORE_CONN_AG_CTX_RULE3EN_SHIFT 6
+#define YSTORM_CORE_CONN_AG_CTX_RULE4EN_MASK 0x1
+#define YSTORM_CORE_CONN_AG_CTX_RULE4EN_SHIFT 7
+ u8 byte2;
+ u8 byte3;
+ __le16 word0;
+ __le32 reg0;
+ __le32 reg1;
+ __le16 word1;
+ __le16 word2;
+ __le16 word3;
+ __le16 word4;
+ __le32 reg2;
+ __le32 reg3;
+};
+
/* IGU cleanup command */
struct igu_cleanup {
__le32 sb_id_and_flags;
@@ -1389,44 +1470,6 @@
#define IGU_MSIX_VECTOR_RESERVED1_MASK 0xFF
#define IGU_MSIX_VECTOR_RESERVED1_SHIFT 24
};
-
-struct mstorm_core_conn_ag_ctx {
- u8 byte0;
- u8 byte1;
- u8 flags0;
-#define MSTORM_CORE_CONN_AG_CTX_BIT0_MASK 0x1
-#define MSTORM_CORE_CONN_AG_CTX_BIT0_SHIFT 0
-#define MSTORM_CORE_CONN_AG_CTX_BIT1_MASK 0x1
-#define MSTORM_CORE_CONN_AG_CTX_BIT1_SHIFT 1
-#define MSTORM_CORE_CONN_AG_CTX_CF0_MASK 0x3
-#define MSTORM_CORE_CONN_AG_CTX_CF0_SHIFT 2
-#define MSTORM_CORE_CONN_AG_CTX_CF1_MASK 0x3
-#define MSTORM_CORE_CONN_AG_CTX_CF1_SHIFT 4
-#define MSTORM_CORE_CONN_AG_CTX_CF2_MASK 0x3
-#define MSTORM_CORE_CONN_AG_CTX_CF2_SHIFT 6
- u8 flags1;
-#define MSTORM_CORE_CONN_AG_CTX_CF0EN_MASK 0x1
-#define MSTORM_CORE_CONN_AG_CTX_CF0EN_SHIFT 0
-#define MSTORM_CORE_CONN_AG_CTX_CF1EN_MASK 0x1
-#define MSTORM_CORE_CONN_AG_CTX_CF1EN_SHIFT 1
-#define MSTORM_CORE_CONN_AG_CTX_CF2EN_MASK 0x1
-#define MSTORM_CORE_CONN_AG_CTX_CF2EN_SHIFT 2
-#define MSTORM_CORE_CONN_AG_CTX_RULE0EN_MASK 0x1
-#define MSTORM_CORE_CONN_AG_CTX_RULE0EN_SHIFT 3
-#define MSTORM_CORE_CONN_AG_CTX_RULE1EN_MASK 0x1
-#define MSTORM_CORE_CONN_AG_CTX_RULE1EN_SHIFT 4
-#define MSTORM_CORE_CONN_AG_CTX_RULE2EN_MASK 0x1
-#define MSTORM_CORE_CONN_AG_CTX_RULE2EN_SHIFT 5
-#define MSTORM_CORE_CONN_AG_CTX_RULE3EN_MASK 0x1
-#define MSTORM_CORE_CONN_AG_CTX_RULE3EN_SHIFT 6
-#define MSTORM_CORE_CONN_AG_CTX_RULE4EN_MASK 0x1
-#define MSTORM_CORE_CONN_AG_CTX_RULE4EN_SHIFT 7
- __le16 word0;
- __le16 word1;
- __le32 reg0;
- __le32 reg1;
-};
-
/* per encapsulation type enabling flags */
struct prs_reg_encapsulation_type_en {
u8 flags;
@@ -1541,50 +1584,6 @@
#define SDM_OP_GEN_RESERVED_SHIFT 20
};
-struct ystorm_core_conn_ag_ctx {
- u8 byte0;
- u8 byte1;
- u8 flags0;
-#define YSTORM_CORE_CONN_AG_CTX_BIT0_MASK 0x1
-#define YSTORM_CORE_CONN_AG_CTX_BIT0_SHIFT 0
-#define YSTORM_CORE_CONN_AG_CTX_BIT1_MASK 0x1
-#define YSTORM_CORE_CONN_AG_CTX_BIT1_SHIFT 1
-#define YSTORM_CORE_CONN_AG_CTX_CF0_MASK 0x3
-#define YSTORM_CORE_CONN_AG_CTX_CF0_SHIFT 2
-#define YSTORM_CORE_CONN_AG_CTX_CF1_MASK 0x3
-#define YSTORM_CORE_CONN_AG_CTX_CF1_SHIFT 4
-#define YSTORM_CORE_CONN_AG_CTX_CF2_MASK 0x3
-#define YSTORM_CORE_CONN_AG_CTX_CF2_SHIFT 6
- u8 flags1;
-#define YSTORM_CORE_CONN_AG_CTX_CF0EN_MASK 0x1
-#define YSTORM_CORE_CONN_AG_CTX_CF0EN_SHIFT 0
-#define YSTORM_CORE_CONN_AG_CTX_CF1EN_MASK 0x1
-#define YSTORM_CORE_CONN_AG_CTX_CF1EN_SHIFT 1
-#define YSTORM_CORE_CONN_AG_CTX_CF2EN_MASK 0x1
-#define YSTORM_CORE_CONN_AG_CTX_CF2EN_SHIFT 2
-#define YSTORM_CORE_CONN_AG_CTX_RULE0EN_MASK 0x1
-#define YSTORM_CORE_CONN_AG_CTX_RULE0EN_SHIFT 3
-#define YSTORM_CORE_CONN_AG_CTX_RULE1EN_MASK 0x1
-#define YSTORM_CORE_CONN_AG_CTX_RULE1EN_SHIFT 4
-#define YSTORM_CORE_CONN_AG_CTX_RULE2EN_MASK 0x1
-#define YSTORM_CORE_CONN_AG_CTX_RULE2EN_SHIFT 5
-#define YSTORM_CORE_CONN_AG_CTX_RULE3EN_MASK 0x1
-#define YSTORM_CORE_CONN_AG_CTX_RULE3EN_SHIFT 6
-#define YSTORM_CORE_CONN_AG_CTX_RULE4EN_MASK 0x1
-#define YSTORM_CORE_CONN_AG_CTX_RULE4EN_SHIFT 7
- u8 byte2;
- u8 byte3;
- __le16 word0;
- __le32 reg0;
- __le32 reg1;
- __le16 word1;
- __le16 word2;
- __le16 word3;
- __le16 word4;
- __le32 reg2;
- __le32 reg3;
-};
-
/****************************************/
/* Debug Tools HSI constants and macros */
/****************************************/
@@ -1643,6 +1642,8 @@
GRCBASE_MULD = 0x4e0000,
GRCBASE_YULD = 0x4c8000,
GRCBASE_XYLD = 0x4c0000,
+ GRCBASE_PTLD = 0x590000,
+ GRCBASE_YPLD = 0x5b0000,
GRCBASE_PRM = 0x230000,
GRCBASE_PBF_PB1 = 0xda0000,
GRCBASE_PBF_PB2 = 0xda4000,
@@ -1656,6 +1657,10 @@
GRCBASE_TCFC = 0x2d0000,
GRCBASE_IGU = 0x180000,
GRCBASE_CAU = 0x1c0000,
+ GRCBASE_RGFS = 0xf00000,
+ GRCBASE_RGSRC = 0x320000,
+ GRCBASE_TGFS = 0xd00000,
+ GRCBASE_TGSRC = 0x322000,
GRCBASE_UMAC = 0x51000,
GRCBASE_XMAC = 0x210000,
GRCBASE_DBG = 0x10000,
@@ -1669,10 +1674,6 @@
GRCBASE_PHY_PCIE = 0x620000,
GRCBASE_LED = 0x6b8000,
GRCBASE_AVS_WRAP = 0x6b0000,
- GRCBASE_RGFS = 0x19d0000,
- GRCBASE_TGFS = 0x19e0000,
- GRCBASE_PTLD = 0x19f0000,
- GRCBASE_YPLD = 0x1a10000,
GRCBASE_MISC_AEU = 0x8000,
GRCBASE_BAR0_MAP = 0x1c00000,
MAX_BLOCK_ADDR
@@ -1732,6 +1733,8 @@
BLOCK_MULD,
BLOCK_YULD,
BLOCK_XYLD,
+ BLOCK_PTLD,
+ BLOCK_YPLD,
BLOCK_PRM,
BLOCK_PBF_PB1,
BLOCK_PBF_PB2,
@@ -1745,6 +1748,10 @@
BLOCK_TCFC,
BLOCK_IGU,
BLOCK_CAU,
+ BLOCK_RGFS,
+ BLOCK_RGSRC,
+ BLOCK_TGFS,
+ BLOCK_TGSRC,
BLOCK_UMAC,
BLOCK_XMAC,
BLOCK_DBG,
@@ -1758,10 +1765,6 @@
BLOCK_PHY_PCIE,
BLOCK_LED,
BLOCK_AVS_WRAP,
- BLOCK_RGFS,
- BLOCK_TGFS,
- BLOCK_PTLD,
- BLOCK_YPLD,
BLOCK_MISC_AEU,
BLOCK_BAR0_MAP,
MAX_BLOCK_ID
@@ -1780,6 +1783,10 @@
BIN_BUF_DBG_ATTN_REGS,
BIN_BUF_DBG_ATTN_INDEXES,
BIN_BUF_DBG_ATTN_NAME_OFFSETS,
+ BIN_BUF_DBG_BUS_BLOCKS,
+ BIN_BUF_DBG_BUS_LINES,
+ BIN_BUF_DBG_BUS_BLOCKS_USER_DATA,
+ BIN_BUF_DBG_BUS_LINE_NAME_OFFSETS,
BIN_BUF_DBG_PARSING_STRINGS,
MAX_BIN_DBG_BUFFER_TYPE
};
@@ -1862,6 +1869,29 @@
MAX_DBG_ATTN_TYPE
};
+struct dbg_bus_block {
+ u8 num_of_lines;
+ u8 has_latency_events;
+ __le16 lines_offset;
+};
+
+struct dbg_bus_block_user_data {
+ u8 num_of_lines;
+ u8 has_latency_events;
+ __le16 names_offset;
+};
+
+struct dbg_bus_line {
+ u8 data;
+#define DBG_BUS_LINE_NUM_OF_GROUPS_MASK 0xF
+#define DBG_BUS_LINE_NUM_OF_GROUPS_SHIFT 0
+#define DBG_BUS_LINE_IS_256B_MASK 0x1
+#define DBG_BUS_LINE_IS_256B_SHIFT 4
+#define DBG_BUS_LINE_RESERVED_MASK 0x7
+#define DBG_BUS_LINE_RESERVED_SHIFT 5
+ u8 group_sizes;
+};
+
/* condition header for registers dump */
struct dbg_dump_cond_hdr {
struct dbg_mode_hdr mode; /* Mode header */
@@ -1879,17 +1909,21 @@
__le32 dword1;
#define DBG_DUMP_MEM_LENGTH_MASK 0xFFFFFF
#define DBG_DUMP_MEM_LENGTH_SHIFT 0
-#define DBG_DUMP_MEM_RESERVED_MASK 0xFF
-#define DBG_DUMP_MEM_RESERVED_SHIFT 24
+#define DBG_DUMP_MEM_WIDE_BUS_MASK 0x1
+#define DBG_DUMP_MEM_WIDE_BUS_SHIFT 24
+#define DBG_DUMP_MEM_RESERVED_MASK 0x7F
+#define DBG_DUMP_MEM_RESERVED_SHIFT 25
};
/* register data for registers dump */
struct dbg_dump_reg {
__le32 data;
-#define DBG_DUMP_REG_ADDRESS_MASK 0xFFFFFF /* register address (in dwords) */
+#define DBG_DUMP_REG_ADDRESS_MASK 0x7FFFFF /* register address (in dwords) */
#define DBG_DUMP_REG_ADDRESS_SHIFT 0
-#define DBG_DUMP_REG_LENGTH_MASK 0xFF /* register size (in dwords) */
-#define DBG_DUMP_REG_LENGTH_SHIFT 24
+#define DBG_DUMP_REG_WIDE_BUS_MASK 0x1 /* indicates register is wide-bus */
+#define DBG_DUMP_REG_WIDE_BUS_SHIFT 23
+#define DBG_DUMP_REG_LENGTH_MASK 0xFF /* register size (in dwords) */
+#define DBG_DUMP_REG_LENGTH_SHIFT 24
};
/* split header for registers dump */
@@ -1910,20 +1944,24 @@
/* Idle Check condition register */
struct dbg_idle_chk_cond_reg {
__le32 data;
-#define DBG_IDLE_CHK_COND_REG_ADDRESS_MASK 0xFFFFFF
+#define DBG_IDLE_CHK_COND_REG_ADDRESS_MASK 0x7FFFFF
#define DBG_IDLE_CHK_COND_REG_ADDRESS_SHIFT 0
+#define DBG_IDLE_CHK_COND_REG_WIDE_BUS_MASK 0x1
+#define DBG_IDLE_CHK_COND_REG_WIDE_BUS_SHIFT 23
#define DBG_IDLE_CHK_COND_REG_BLOCK_ID_MASK 0xFF
#define DBG_IDLE_CHK_COND_REG_BLOCK_ID_SHIFT 24
- __le16 num_entries; /* number of registers entries to check */
- u8 entry_size; /* size of registers entry (in dwords) */
- u8 start_entry; /* index of the first entry to check */
+ __le16 num_entries;
+ u8 entry_size;
+ u8 start_entry;
};
/* Idle Check info register */
struct dbg_idle_chk_info_reg {
__le32 data;
-#define DBG_IDLE_CHK_INFO_REG_ADDRESS_MASK 0xFFFFFF
+#define DBG_IDLE_CHK_INFO_REG_ADDRESS_MASK 0x7FFFFF
#define DBG_IDLE_CHK_INFO_REG_ADDRESS_SHIFT 0
+#define DBG_IDLE_CHK_INFO_REG_WIDE_BUS_MASK 0x1
+#define DBG_IDLE_CHK_INFO_REG_WIDE_BUS_SHIFT 23
#define DBG_IDLE_CHK_INFO_REG_BLOCK_ID_MASK 0xFF
#define DBG_IDLE_CHK_INFO_REG_BLOCK_ID_SHIFT 24
__le16 size; /* register size in dwords */
@@ -1996,15 +2034,17 @@
/* Debug Bus block data */
struct dbg_bus_block_data {
- u8 enabled; /* Indicates if the block is enabled for recording (0/1) */
- u8 hw_id; /* HW ID associated with the block */
- u8 line_num; /* Debug line number to select */
- u8 right_shift; /* Number of units to right the debug data (0-3) */
- u8 cycle_en; /* 4-bit value: bit i set -> unit i is enabled. */
- u8 force_valid; /* 4-bit value: bit i set -> unit i is forced valid. */
- u8 force_frame; /* 4-bit value: bit i set -> unit i frame bit is forced.
- */
- u8 reserved;
+ __le16 data;
+#define DBG_BUS_BLOCK_DATA_ENABLE_MASK_MASK 0xF
+#define DBG_BUS_BLOCK_DATA_ENABLE_MASK_SHIFT 0
+#define DBG_BUS_BLOCK_DATA_RIGHT_SHIFT_MASK 0xF
+#define DBG_BUS_BLOCK_DATA_RIGHT_SHIFT_SHIFT 4
+#define DBG_BUS_BLOCK_DATA_FORCE_VALID_MASK_MASK 0xF
+#define DBG_BUS_BLOCK_DATA_FORCE_VALID_MASK_SHIFT 8
+#define DBG_BUS_BLOCK_DATA_FORCE_FRAME_MASK_MASK 0xF
+#define DBG_BUS_BLOCK_DATA_FORCE_FRAME_MASK_SHIFT 12
+ u8 line_num;
+ u8 hw_id;
};
/* Debug Bus Clients */
@@ -2045,6 +2085,14 @@
MAX_DBG_BUS_CONSTRAINT_OPS
};
+struct dbg_bus_trigger_state_data {
+ u8 data;
+#define DBG_BUS_TRIGGER_STATE_DATA_BLOCK_SHIFTED_ENABLE_MASK_MASK 0xF
+#define DBG_BUS_TRIGGER_STATE_DATA_BLOCK_SHIFTED_ENABLE_MASK_SHIFT 0
+#define DBG_BUS_TRIGGER_STATE_DATA_CONSTRAINT_DWORD_MASK_MASK 0xF
+#define DBG_BUS_TRIGGER_STATE_DATA_CONSTRAINT_DWORD_MASK_SHIFT 4
+};
+
/* Debug Bus memory address */
struct dbg_bus_mem_addr {
__le32 lo;
@@ -2078,66 +2126,42 @@
/* Debug Bus Storm data */
struct dbg_bus_storm_data {
- u8 fast_enabled;
- u8 fast_mode;
- u8 slow_enabled;
- u8 slow_mode;
+ u8 enabled;
+ u8 mode;
u8 hw_id;
u8 eid_filter_en;
u8 eid_range_not_mask;
u8 cid_filter_en;
union dbg_bus_storm_eid_params eid_filter_params;
- __le16 reserved;
__le32 cid;
};
/* Debug Bus data */
struct dbg_bus_data {
- __le32 app_version; /* The tools version number of the application */
- u8 state; /* The current debug bus state */
- u8 hw_dwords; /* HW dwords per cycle */
- u8 next_hw_id; /* Next HW ID to be associated with an input */
- u8 num_enabled_blocks; /* Number of blocks enabled for recording */
- u8 num_enabled_storms; /* Number of Storms enabled for recording */
- u8 target; /* Output target */
- u8 next_trigger_state; /* ID of next trigger state to be added */
- u8 next_constraint_id; /* ID of next filter/trigger constraint to be
- * added.
- */
- u8 one_shot_en; /* Indicates if one-shot mode is enabled (0/1) */
- u8 grc_input_en; /* Indicates if GRC recording is enabled (0/1) */
- u8 timestamp_input_en; /* Indicates if timestamp recording is enabled
- * (0/1).
- */
- u8 filter_en; /* Indicates if the recording filter is enabled (0/1) */
- u8 trigger_en; /* Indicates if the recording trigger is enabled (0/1) */
- u8 adding_filter; /* If true, the next added constraint belong to the
- * filter. Otherwise, it belongs to the last added
- * trigger state. Valid only if either filter or
- * triggers are enabled.
- */
- u8 filter_pre_trigger; /* Indicates if the recording filter should be
- * applied before the trigger. Valid only if both
- * filter and trigger are enabled (0/1).
- */
- u8 filter_post_trigger; /* Indicates if the recording filter should be
- * applied after the trigger. Valid only if both
- * filter and trigger are enabled (0/1).
- */
- u8 unify_inputs; /* If true, all inputs are associated with HW ID 0.
- * Otherwise, each input is assigned a different HW ID
- * (0/1).
- */
- u8 rcv_from_other_engine; /* Indicates if the other engine sends it NW
- * recording to this engine (0/1).
- */
- struct dbg_bus_pci_buf_data pci_buf; /* Debug Bus PCI buffer data. Valid
- * only when the target is
- * DBG_BUS_TARGET_ID_PCI.
- */
+ __le32 app_version;
+ u8 state;
+ u8 hw_dwords;
+ __le16 hw_id_mask;
+ u8 num_enabled_blocks;
+ u8 num_enabled_storms;
+ u8 target;
+ u8 one_shot_en;
+ u8 grc_input_en;
+ u8 timestamp_input_en;
+ u8 filter_en;
+ u8 adding_filter;
+ u8 filter_pre_trigger;
+ u8 filter_post_trigger;
__le16 reserved;
- struct dbg_bus_block_data blocks[88];/* Debug Bus data for each block */
- struct dbg_bus_storm_data storms[6]; /* Debug Bus data for each block */
+ u8 trigger_en;
+ struct dbg_bus_trigger_state_data trigger_states[3];
+ u8 next_trigger_state;
+ u8 next_constraint_id;
+ u8 unify_inputs;
+ u8 rcv_from_other_engine;
+ struct dbg_bus_pci_buf_data pci_buf;
+ struct dbg_bus_block_data blocks[88];
+ struct dbg_bus_storm_data storms[6];
};
enum dbg_bus_filter_types {
@@ -2156,12 +2180,6 @@
MAX_DBG_BUS_FRAME_MODES
};
-enum dbg_bus_input_types {
- DBG_BUS_INPUT_TYPE_STORM,
- DBG_BUS_INPUT_TYPE_BLOCK,
- MAX_DBG_BUS_INPUT_TYPES
-};
-
enum dbg_bus_other_engine_modes {
DBG_BUS_OTHER_ENGINE_MODE_NONE,
DBG_BUS_OTHER_ENGINE_MODE_DOUBLE_BW_TX,
@@ -2185,19 +2203,19 @@
};
enum dbg_bus_semi_frame_modes {
- DBG_BUS_SEMI_FRAME_MODE_0SLOW_4FAST = 0,
- DBG_BUS_SEMI_FRAME_MODE_4SLOW_0FAST = 3,
+ DBG_BUS_SEMI_FRAME_MODE_0SLOW_4FAST =
+ 0,
+ DBG_BUS_SEMI_FRAME_MODE_4SLOW_0FAST =
+ 3,
MAX_DBG_BUS_SEMI_FRAME_MODES
};
/* Debug bus states */
enum dbg_bus_states {
- DBG_BUS_STATE_IDLE, /* debug bus idle state (not recording) */
- DBG_BUS_STATE_READY, /* debug bus is ready for configuration and
- * recording.
- */
- DBG_BUS_STATE_RECORDING, /* debug bus is currently recording */
- DBG_BUS_STATE_STOPPED, /* debug bus recording has stopped */
+ DBG_BUS_STATE_IDLE,
+ DBG_BUS_STATE_READY,
+ DBG_BUS_STATE_RECORDING,
+ DBG_BUS_STATE_STOPPED,
MAX_DBG_BUS_STATES
};
@@ -2216,11 +2234,8 @@
/* Debug bus target IDs */
enum dbg_bus_targets {
- /* records debug bus to DBG block internal buffer */
DBG_BUS_TARGET_ID_INT_BUF,
- /* records debug bus to the NW */
DBG_BUS_TARGET_ID_NIG,
- /* records debug bus to a PCI buffer */
DBG_BUS_TARGET_ID_PCI,
MAX_DBG_BUS_TARGETS
};
@@ -2235,48 +2250,45 @@
/* Debug GRC params */
enum dbg_grc_params {
- DBG_GRC_PARAM_DUMP_TSTORM, /* dump Tstorm memories (0/1) */
- DBG_GRC_PARAM_DUMP_MSTORM, /* dump Mstorm memories (0/1) */
- DBG_GRC_PARAM_DUMP_USTORM, /* dump Ustorm memories (0/1) */
- DBG_GRC_PARAM_DUMP_XSTORM, /* dump Xstorm memories (0/1) */
- DBG_GRC_PARAM_DUMP_YSTORM, /* dump Ystorm memories (0/1) */
- DBG_GRC_PARAM_DUMP_PSTORM, /* dump Pstorm memories (0/1) */
- DBG_GRC_PARAM_DUMP_REGS, /* dump non-memory registers (0/1) */
- DBG_GRC_PARAM_DUMP_RAM, /* dump Storm internal RAMs (0/1) */
- DBG_GRC_PARAM_DUMP_PBUF, /* dump Storm passive buffer (0/1) */
- DBG_GRC_PARAM_DUMP_IOR, /* dump Storm IORs (0/1) */
- DBG_GRC_PARAM_DUMP_VFC, /* dump VFC memories (0/1) */
- DBG_GRC_PARAM_DUMP_CM_CTX, /* dump CM contexts (0/1) */
- DBG_GRC_PARAM_DUMP_PXP, /* dump PXP memories (0/1) */
- DBG_GRC_PARAM_DUMP_RSS, /* dump RSS memories (0/1) */
- DBG_GRC_PARAM_DUMP_CAU, /* dump CAU memories (0/1) */
- DBG_GRC_PARAM_DUMP_QM, /* dump QM memories (0/1) */
- DBG_GRC_PARAM_DUMP_MCP, /* dump MCP memories (0/1) */
- DBG_GRC_PARAM_RESERVED, /* reserved */
- DBG_GRC_PARAM_DUMP_CFC, /* dump CFC memories (0/1) */
- DBG_GRC_PARAM_DUMP_IGU, /* dump IGU memories (0/1) */
- DBG_GRC_PARAM_DUMP_BRB, /* dump BRB memories (0/1) */
- DBG_GRC_PARAM_DUMP_BTB, /* dump BTB memories (0/1) */
- DBG_GRC_PARAM_DUMP_BMB, /* dump BMB memories (0/1) */
- DBG_GRC_PARAM_DUMP_NIG, /* dump NIG memories (0/1) */
- DBG_GRC_PARAM_DUMP_MULD, /* dump MULD memories (0/1) */
- DBG_GRC_PARAM_DUMP_PRS, /* dump PRS memories (0/1) */
- DBG_GRC_PARAM_DUMP_DMAE, /* dump PRS memories (0/1) */
- DBG_GRC_PARAM_DUMP_TM, /* dump TM (timers) memories (0/1) */
- DBG_GRC_PARAM_DUMP_SDM, /* dump SDM memories (0/1) */
- DBG_GRC_PARAM_DUMP_DIF, /* dump DIF memories (0/1) */
- DBG_GRC_PARAM_DUMP_STATIC, /* dump static debug data (0/1) */
- DBG_GRC_PARAM_UNSTALL, /* un-stall Storms after dump (0/1) */
- DBG_GRC_PARAM_NUM_LCIDS, /* number of LCIDs (0..320) */
- DBG_GRC_PARAM_NUM_LTIDS, /* number of LTIDs (0..320) */
- /* preset: exclude all memories from dump (1 only) */
+ DBG_GRC_PARAM_DUMP_TSTORM,
+ DBG_GRC_PARAM_DUMP_MSTORM,
+ DBG_GRC_PARAM_DUMP_USTORM,
+ DBG_GRC_PARAM_DUMP_XSTORM,
+ DBG_GRC_PARAM_DUMP_YSTORM,
+ DBG_GRC_PARAM_DUMP_PSTORM,
+ DBG_GRC_PARAM_DUMP_REGS,
+ DBG_GRC_PARAM_DUMP_RAM,
+ DBG_GRC_PARAM_DUMP_PBUF,
+ DBG_GRC_PARAM_DUMP_IOR,
+ DBG_GRC_PARAM_DUMP_VFC,
+ DBG_GRC_PARAM_DUMP_CM_CTX,
+ DBG_GRC_PARAM_DUMP_PXP,
+ DBG_GRC_PARAM_DUMP_RSS,
+ DBG_GRC_PARAM_DUMP_CAU,
+ DBG_GRC_PARAM_DUMP_QM,
+ DBG_GRC_PARAM_DUMP_MCP,
+ DBG_GRC_PARAM_RESERVED,
+ DBG_GRC_PARAM_DUMP_CFC,
+ DBG_GRC_PARAM_DUMP_IGU,
+ DBG_GRC_PARAM_DUMP_BRB,
+ DBG_GRC_PARAM_DUMP_BTB,
+ DBG_GRC_PARAM_DUMP_BMB,
+ DBG_GRC_PARAM_DUMP_NIG,
+ DBG_GRC_PARAM_DUMP_MULD,
+ DBG_GRC_PARAM_DUMP_PRS,
+ DBG_GRC_PARAM_DUMP_DMAE,
+ DBG_GRC_PARAM_DUMP_TM,
+ DBG_GRC_PARAM_DUMP_SDM,
+ DBG_GRC_PARAM_DUMP_DIF,
+ DBG_GRC_PARAM_DUMP_STATIC,
+ DBG_GRC_PARAM_UNSTALL,
+ DBG_GRC_PARAM_NUM_LCIDS,
+ DBG_GRC_PARAM_NUM_LTIDS,
DBG_GRC_PARAM_EXCLUDE_ALL,
- /* preset: include memories for crash dump (1 only) */
DBG_GRC_PARAM_CRASH,
- /* perform dump only if MFW is responding (0/1) */
DBG_GRC_PARAM_PARITY_SAFE,
- DBG_GRC_PARAM_DUMP_CM, /* dump CM memories (0/1) */
- DBG_GRC_PARAM_DUMP_PHY, /* dump PHY memories (0/1) */
+ DBG_GRC_PARAM_DUMP_CM,
+ DBG_GRC_PARAM_DUMP_PHY,
DBG_GRC_PARAM_NO_MCP,
DBG_GRC_PARAM_NO_FW_VER,
MAX_DBG_GRC_PARAMS
@@ -2347,7 +2359,10 @@
DBG_STATUS_REG_FIFO_BAD_DATA,
DBG_STATUS_PROTECTION_OVERRIDE_BAD_DATA,
DBG_STATUS_DBG_ARRAY_NOT_SET,
- DBG_STATUS_MULTI_BLOCKS_WITH_FILTER,
+ DBG_STATUS_FILTER_BUG,
+ DBG_STATUS_NON_MATCHING_LINES,
+ DBG_STATUS_INVALID_TRIGGER_DWORD_OFFSET,
+ DBG_STATUS_DBG_BUS_IN_USE,
MAX_DBG_STATUS
};
@@ -2364,25 +2379,22 @@
/* Idle Check data */
struct idle_chk_data {
- __le32 buf_size; /* Idle check buffer size in dwords */
- u8 buf_size_set; /* Indicates if the idle check buffer size was set
- * (0/1).
- */
+ __le32 buf_size;
+ u8 buf_size_set;
u8 reserved1;
__le16 reserved2;
};
/* Debug Tools data (per HW function) */
struct dbg_tools_data {
- struct dbg_grc_data grc; /* GRC Dump data */
- struct dbg_bus_data bus; /* Debug Bus data */
- struct idle_chk_data idle_chk; /* Idle Check data */
- u8 mode_enable[40]; /* Indicates if a mode is enabled (0/1) */
- u8 block_in_reset[88]; /* Indicates if a block is in reset state (0/1).
- */
- u8 chip_id; /* Chip ID (from enum chip_ids) */
- u8 platform_id; /* Platform ID (from enum platform_ids) */
- u8 initialized; /* Indicates if the data was initialized */
+ struct dbg_grc_data grc;
+ struct dbg_bus_data bus;
+ struct idle_chk_data idle_chk;
+ u8 mode_enable[40];
+ u8 block_in_reset[88];
+ u8 chip_id;
+ u8 platform_id;
+ u8 initialized;
u8 reserved;
};
@@ -2464,6 +2476,12 @@
/* Max size in dwords of a zipped array */
#define MAX_ZIPPED_SIZE 8192
+enum chip_ids {
+ CHIP_BB,
+ CHIP_K2,
+ CHIP_RESERVED,
+ MAX_CHIP_IDS
+};
struct fw_asserts_ram_section {
__le16 section_ram_line_offset;
@@ -2475,18 +2493,18 @@
};
struct fw_ver_num {
- u8 major; /* Firmware major version number */
- u8 minor; /* Firmware minor version number */
- u8 rev; /* Firmware revision version number */
- u8 eng; /* Firmware engineering version number (for bootleg versions) */
+ u8 major;
+ u8 minor;
+ u8 rev;
+ u8 eng;
};
struct fw_ver_info {
- __le16 tools_ver; /* Tools version number */
- u8 image_id; /* FW image ID (e.g. main) */
+ __le16 tools_ver;
+ u8 image_id;
u8 reserved1;
- struct fw_ver_num num; /* FW version number */
- __le32 timestamp; /* FW Timestamp in unix time (sec. since 1970) */
+ struct fw_ver_num num;
+ __le32 timestamp;
__le32 reserved2;
};
@@ -2722,7 +2740,6 @@
#define INIT_READ_OP_ADDRESS_MASK 0x7FFFFF
#define INIT_READ_OP_ADDRESS_SHIFT 9
__le32 expected_val;
-
};
/* Init operations union */
@@ -2782,6 +2799,7 @@
* @param bin_ptr - a pointer to the binary data with debug arrays.
*/
enum dbg_status qed_dbg_set_bin_ptr(const u8 * const bin_ptr);
+
/**
* @brief qed_dbg_grc_set_params_default - Reverts all GRC parameters to their
* default value.
@@ -2805,6 +2823,7 @@
enum dbg_status qed_dbg_grc_get_dump_buf_size(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
u32 *buf_size);
+
/**
* @brief qed_dbg_grc_dump - Dumps GRC data into the specified buffer.
*
@@ -2824,6 +2843,7 @@
u32 *dump_buf,
u32 buf_size_in_dwords,
u32 *num_dumped_dwords);
+
/**
* @brief qed_dbg_idle_chk_get_dump_buf_size - Returns the required buffer size
* for idle check results.
@@ -2840,6 +2860,7 @@
enum dbg_status qed_dbg_idle_chk_get_dump_buf_size(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
u32 *buf_size);
+
/**
* @brief qed_dbg_idle_chk_dump - Performs idle check and writes the results
* into the specified buffer.
@@ -2860,6 +2881,7 @@
u32 *dump_buf,
u32 buf_size_in_dwords,
u32 *num_dumped_dwords);
+
/**
* @brief qed_dbg_mcp_trace_get_dump_buf_size - Returns the required buffer size
* for mcp trace results.
@@ -2878,6 +2900,7 @@
enum dbg_status qed_dbg_mcp_trace_get_dump_buf_size(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
u32 *buf_size);
+
/**
* @brief qed_dbg_mcp_trace_dump - Performs mcp trace and writes the results
* into the specified buffer.
@@ -2902,6 +2925,7 @@
u32 *dump_buf,
u32 buf_size_in_dwords,
u32 *num_dumped_dwords);
+
/**
* @brief qed_dbg_reg_fifo_get_dump_buf_size - Returns the required buffer size
* for grc trace fifo results.
@@ -2917,6 +2941,7 @@
enum dbg_status qed_dbg_reg_fifo_get_dump_buf_size(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
u32 *buf_size);
+
/**
* @brief qed_dbg_reg_fifo_dump - Reads the reg fifo and writes the results into
* the specified buffer.
@@ -2938,6 +2963,7 @@
u32 *dump_buf,
u32 buf_size_in_dwords,
u32 *num_dumped_dwords);
+
/**
* @brief qed_dbg_igu_fifo_get_dump_buf_size - Returns the required buffer size
* for the IGU fifo results.
@@ -2954,6 +2980,7 @@
enum dbg_status qed_dbg_igu_fifo_get_dump_buf_size(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
u32 *buf_size);
+
/**
* @brief qed_dbg_igu_fifo_dump - Reads the IGU fifo and writes the results into
* the specified buffer.
@@ -2975,6 +3002,7 @@
u32 *dump_buf,
u32 buf_size_in_dwords,
u32 *num_dumped_dwords);
+
/**
* @brief qed_dbg_protection_override_get_dump_buf_size - Returns the required
* buffer size for protection override window results.
@@ -3048,6 +3076,29 @@
u32 *dump_buf,
u32 buf_size_in_dwords,
u32 *num_dumped_dwords);
+
+/**
+ * @brief qed_dbg_read_attn - Reads the attention registers of the specified
+ * block and type, and writes the results into the specified buffer.
+ *
+ * @param p_hwfn - HW device data
+ * @param p_ptt - Ptt window used for writing the registers.
+ * @param block - Block ID.
+ * @param attn_type - Attention type.
+ * @param clear_status - Indicates if the attention status should be cleared.
+ * @param results - OUT: Pointer to write the read results into
+ *
+ * @return error if one of the following holds:
+ * - the version wasn't set
+ * Otherwise, returns ok.
+ */
+enum dbg_status qed_dbg_read_attn(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ enum block_id block,
+ enum dbg_attn_type attn_type,
+ bool clear_status,
+ struct dbg_attn_block_result *results);
+
/**
* @brief qed_dbg_print_attn - Prints attention registers values in the
* specified results struct.
@@ -3074,6 +3125,7 @@
* @param bin_ptr - a pointer to the binary data with debug arrays.
*/
enum dbg_status qed_dbg_user_set_bin_ptr(const u8 * const bin_ptr);
+
/**
* @brief qed_dbg_get_status_str - Returns a string for the specified status.
*
@@ -3082,6 +3134,7 @@
* @return a string for the specified status
*/
const char *qed_dbg_get_status_str(enum dbg_status status);
+
/**
* @brief qed_get_idle_chk_results_buf_size - Returns the required buffer size
* for idle check results (in bytes).
@@ -3116,6 +3169,7 @@
char *results_buf,
u32 *num_errors,
u32 *num_warnings);
+
/**
* @brief qed_get_mcp_trace_results_buf_size - Returns the required buffer size
* for MCP Trace results (in bytes).
@@ -3132,6 +3186,7 @@
u32 *dump_buf,
u32 num_dumped_dwords,
u32 *results_buf_size);
+
/**
* @brief qed_print_mcp_trace_results - Prints MCP Trace results
*
@@ -3146,6 +3201,7 @@
u32 *dump_buf,
u32 num_dumped_dwords,
char *results_buf);
+
/**
* @brief qed_get_reg_fifo_results_buf_size - Returns the required buffer size
* for reg_fifo results (in bytes).
@@ -3162,6 +3218,7 @@
u32 *dump_buf,
u32 num_dumped_dwords,
u32 *results_buf_size);
+
/**
* @brief qed_print_reg_fifo_results - Prints reg fifo results
*
@@ -3176,6 +3233,7 @@
u32 *dump_buf,
u32 num_dumped_dwords,
char *results_buf);
+
/**
* @brief qed_get_igu_fifo_results_buf_size - Returns the required buffer size
* for igu_fifo results (in bytes).
@@ -3192,6 +3250,7 @@
u32 *dump_buf,
u32 num_dumped_dwords,
u32 *results_buf_size);
+
/**
* @brief qed_print_igu_fifo_results - Prints IGU fifo results
*
@@ -3206,6 +3265,7 @@
u32 *dump_buf,
u32 num_dumped_dwords,
char *results_buf);
+
/**
* @brief qed_get_protection_override_results_buf_size - Returns the required
* buffer size for protection override results (in bytes).
@@ -3223,6 +3283,7 @@
u32 *dump_buf,
u32 num_dumped_dwords,
u32 *results_buf_size);
+
/**
* @brief qed_print_protection_override_results - Prints protection override
* results.
@@ -3238,6 +3299,7 @@
u32 *dump_buf,
u32 num_dumped_dwords,
char *results_buf);
+
/**
* @brief qed_get_fw_asserts_results_buf_size - Returns the required buffer size
* for FW Asserts results (in bytes).
@@ -3254,6 +3316,7 @@
u32 *dump_buf,
u32 num_dumped_dwords,
u32 *results_buf_size);
+
/**
* @brief qed_print_fw_asserts_results - Prints FW Asserts results
*
@@ -3268,6 +3331,283 @@
u32 *dump_buf,
u32 num_dumped_dwords,
char *results_buf);
+
+/**
+ * @brief qed_dbg_parse_attn - Parses and prints attention registers values in
+ * the specified results struct.
+ *
+ * @param p_hwfn - HW device data
+ * @param results - Pointer to the attention read results
+ *
+ * @return error if one of the following holds:
+ * - the version wasn't set
+ * Otherwise, returns ok.
+ */
+enum dbg_status qed_dbg_parse_attn(struct qed_hwfn *p_hwfn,
+ struct dbg_attn_block_result *results);
+
+/* Debug Bus blocks */
+static const u32 dbg_bus_blocks[] = {
+ 0x0000000f, /* grc, bb, 15 lines */
+ 0x0000000f, /* grc, k2, 15 lines */
+ 0x00000000,
+ 0x00000000, /* miscs, bb, 0 lines */
+ 0x00000000, /* miscs, k2, 0 lines */
+ 0x00000000,
+ 0x00000000, /* misc, bb, 0 lines */
+ 0x00000000, /* misc, k2, 0 lines */
+ 0x00000000,
+ 0x00000000, /* dbu, bb, 0 lines */
+ 0x00000000, /* dbu, k2, 0 lines */
+ 0x00000000,
+ 0x000f0127, /* pglue_b, bb, 39 lines */
+ 0x0036012a, /* pglue_b, k2, 42 lines */
+ 0x00000000,
+ 0x00000000, /* cnig, bb, 0 lines */
+ 0x00120102, /* cnig, k2, 2 lines */
+ 0x00000000,
+ 0x00000000, /* cpmu, bb, 0 lines */
+ 0x00000000, /* cpmu, k2, 0 lines */
+ 0x00000000,
+ 0x00000001, /* ncsi, bb, 1 lines */
+ 0x00000001, /* ncsi, k2, 1 lines */
+ 0x00000000,
+ 0x00000000, /* opte, bb, 0 lines */
+ 0x00000000, /* opte, k2, 0 lines */
+ 0x00000000,
+ 0x00600085, /* bmb, bb, 133 lines */
+ 0x00600085, /* bmb, k2, 133 lines */
+ 0x00000000,
+ 0x00000000, /* pcie, bb, 0 lines */
+ 0x00e50033, /* pcie, k2, 51 lines */
+ 0x00000000,
+ 0x00000000, /* mcp, bb, 0 lines */
+ 0x00000000, /* mcp, k2, 0 lines */
+ 0x00000000,
+ 0x01180009, /* mcp2, bb, 9 lines */
+ 0x01180009, /* mcp2, k2, 9 lines */
+ 0x00000000,
+ 0x01210104, /* pswhst, bb, 4 lines */
+ 0x01210104, /* pswhst, k2, 4 lines */
+ 0x00000000,
+ 0x01250103, /* pswhst2, bb, 3 lines */
+ 0x01250103, /* pswhst2, k2, 3 lines */
+ 0x00000000,
+ 0x00340101, /* pswrd, bb, 1 lines */
+ 0x00340101, /* pswrd, k2, 1 lines */
+ 0x00000000,
+ 0x01280119, /* pswrd2, bb, 25 lines */
+ 0x01280119, /* pswrd2, k2, 25 lines */
+ 0x00000000,
+ 0x01410109, /* pswwr, bb, 9 lines */
+ 0x01410109, /* pswwr, k2, 9 lines */
+ 0x00000000,
+ 0x00000000, /* pswwr2, bb, 0 lines */
+ 0x00000000, /* pswwr2, k2, 0 lines */
+ 0x00000000,
+ 0x001c0001, /* pswrq, bb, 1 lines */
+ 0x001c0001, /* pswrq, k2, 1 lines */
+ 0x00000000,
+ 0x014a0015, /* pswrq2, bb, 21 lines */
+ 0x014a0015, /* pswrq2, k2, 21 lines */
+ 0x00000000,
+ 0x00000000, /* pglcs, bb, 0 lines */
+ 0x00120006, /* pglcs, k2, 6 lines */
+ 0x00000000,
+ 0x00100001, /* dmae, bb, 1 lines */
+ 0x00100001, /* dmae, k2, 1 lines */
+ 0x00000000,
+ 0x015f0105, /* ptu, bb, 5 lines */
+ 0x015f0105, /* ptu, k2, 5 lines */
+ 0x00000000,
+ 0x01640120, /* tcm, bb, 32 lines */
+ 0x01640120, /* tcm, k2, 32 lines */
+ 0x00000000,
+ 0x01640120, /* mcm, bb, 32 lines */
+ 0x01640120, /* mcm, k2, 32 lines */
+ 0x00000000,
+ 0x01640120, /* ucm, bb, 32 lines */
+ 0x01640120, /* ucm, k2, 32 lines */
+ 0x00000000,
+ 0x01640120, /* xcm, bb, 32 lines */
+ 0x01640120, /* xcm, k2, 32 lines */
+ 0x00000000,
+ 0x01640120, /* ycm, bb, 32 lines */
+ 0x01640120, /* ycm, k2, 32 lines */
+ 0x00000000,
+ 0x01640120, /* pcm, bb, 32 lines */
+ 0x01640120, /* pcm, k2, 32 lines */
+ 0x00000000,
+ 0x01840062, /* qm, bb, 98 lines */
+ 0x01840062, /* qm, k2, 98 lines */
+ 0x00000000,
+ 0x01e60021, /* tm, bb, 33 lines */
+ 0x01e60021, /* tm, k2, 33 lines */
+ 0x00000000,
+ 0x02070107, /* dorq, bb, 7 lines */
+ 0x02070107, /* dorq, k2, 7 lines */
+ 0x00000000,
+ 0x00600185, /* brb, bb, 133 lines */
+ 0x00600185, /* brb, k2, 133 lines */
+ 0x00000000,
+ 0x020e0019, /* src, bb, 25 lines */
+ 0x020c001a, /* src, k2, 26 lines */
+ 0x00000000,
+ 0x02270104, /* prs, bb, 4 lines */
+ 0x02270104, /* prs, k2, 4 lines */
+ 0x00000000,
+ 0x022b0133, /* tsdm, bb, 51 lines */
+ 0x022b0133, /* tsdm, k2, 51 lines */
+ 0x00000000,
+ 0x022b0133, /* msdm, bb, 51 lines */
+ 0x022b0133, /* msdm, k2, 51 lines */
+ 0x00000000,
+ 0x022b0133, /* usdm, bb, 51 lines */
+ 0x022b0133, /* usdm, k2, 51 lines */
+ 0x00000000,
+ 0x022b0133, /* xsdm, bb, 51 lines */
+ 0x022b0133, /* xsdm, k2, 51 lines */
+ 0x00000000,
+ 0x022b0133, /* ysdm, bb, 51 lines */
+ 0x022b0133, /* ysdm, k2, 51 lines */
+ 0x00000000,
+ 0x022b0133, /* psdm, bb, 51 lines */
+ 0x022b0133, /* psdm, k2, 51 lines */
+ 0x00000000,
+ 0x025e010c, /* tsem, bb, 12 lines */
+ 0x025e010c, /* tsem, k2, 12 lines */
+ 0x00000000,
+ 0x025e010c, /* msem, bb, 12 lines */
+ 0x025e010c, /* msem, k2, 12 lines */
+ 0x00000000,
+ 0x025e010c, /* usem, bb, 12 lines */
+ 0x025e010c, /* usem, k2, 12 lines */
+ 0x00000000,
+ 0x025e010c, /* xsem, bb, 12 lines */
+ 0x025e010c, /* xsem, k2, 12 lines */
+ 0x00000000,
+ 0x025e010c, /* ysem, bb, 12 lines */
+ 0x025e010c, /* ysem, k2, 12 lines */
+ 0x00000000,
+ 0x025e010c, /* psem, bb, 12 lines */
+ 0x025e010c, /* psem, k2, 12 lines */
+ 0x00000000,
+ 0x026a000d, /* rss, bb, 13 lines */
+ 0x026a000d, /* rss, k2, 13 lines */
+ 0x00000000,
+ 0x02770106, /* tmld, bb, 6 lines */
+ 0x02770106, /* tmld, k2, 6 lines */
+ 0x00000000,
+ 0x027d0106, /* muld, bb, 6 lines */
+ 0x027d0106, /* muld, k2, 6 lines */
+ 0x00000000,
+ 0x02770005, /* yuld, bb, 5 lines */
+ 0x02770005, /* yuld, k2, 5 lines */
+ 0x00000000,
+ 0x02830107, /* xyld, bb, 7 lines */
+ 0x027d0107, /* xyld, k2, 7 lines */
+ 0x00000000,
+ 0x00000000, /* ptld, bb, 0 lines */
+ 0x00000000, /* ptld, k2, 0 lines */
+ 0x00000000,
+ 0x00000000, /* ypld, bb, 0 lines */
+ 0x00000000, /* ypld, k2, 0 lines */
+ 0x00000000,
+ 0x028a010e, /* prm, bb, 14 lines */
+ 0x02980110, /* prm, k2, 16 lines */
+ 0x00000000,
+ 0x02a8000d, /* pbf_pb1, bb, 13 lines */
+ 0x02a8000d, /* pbf_pb1, k2, 13 lines */
+ 0x00000000,
+ 0x02a8000d, /* pbf_pb2, bb, 13 lines */
+ 0x02a8000d, /* pbf_pb2, k2, 13 lines */
+ 0x00000000,
+ 0x02a8000d, /* rpb, bb, 13 lines */
+ 0x02a8000d, /* rpb, k2, 13 lines */
+ 0x00000000,
+ 0x00600185, /* btb, bb, 133 lines */
+ 0x00600185, /* btb, k2, 133 lines */
+ 0x00000000,
+ 0x02b50117, /* pbf, bb, 23 lines */
+ 0x02b50117, /* pbf, k2, 23 lines */
+ 0x00000000,
+ 0x02cc0006, /* rdif, bb, 6 lines */
+ 0x02cc0006, /* rdif, k2, 6 lines */
+ 0x00000000,
+ 0x02d20006, /* tdif, bb, 6 lines */
+ 0x02d20006, /* tdif, k2, 6 lines */
+ 0x00000000,
+ 0x02d80003, /* cdu, bb, 3 lines */
+ 0x02db000e, /* cdu, k2, 14 lines */
+ 0x00000000,
+ 0x02e9010d, /* ccfc, bb, 13 lines */
+ 0x02f60117, /* ccfc, k2, 23 lines */
+ 0x00000000,
+ 0x02e9010d, /* tcfc, bb, 13 lines */
+ 0x02f60117, /* tcfc, k2, 23 lines */
+ 0x00000000,
+ 0x030d0133, /* igu, bb, 51 lines */
+ 0x030d0133, /* igu, k2, 51 lines */
+ 0x00000000,
+ 0x03400106, /* cau, bb, 6 lines */
+ 0x03400106, /* cau, k2, 6 lines */
+ 0x00000000,
+ 0x00000000, /* rgfs, bb, 0 lines */
+ 0x00000000, /* rgfs, k2, 0 lines */
+ 0x00000000,
+ 0x00000000, /* rgsrc, bb, 0 lines */
+ 0x00000000, /* rgsrc, k2, 0 lines */
+ 0x00000000,
+ 0x00000000, /* tgfs, bb, 0 lines */
+ 0x00000000, /* tgfs, k2, 0 lines */
+ 0x00000000,
+ 0x00000000, /* tgsrc, bb, 0 lines */
+ 0x00000000, /* tgsrc, k2, 0 lines */
+ 0x00000000,
+ 0x00000000, /* umac, bb, 0 lines */
+ 0x00120006, /* umac, k2, 6 lines */
+ 0x00000000,
+ 0x00000000, /* xmac, bb, 0 lines */
+ 0x00000000, /* xmac, k2, 0 lines */
+ 0x00000000,
+ 0x00000000, /* dbg, bb, 0 lines */
+ 0x00000000, /* dbg, k2, 0 lines */
+ 0x00000000,
+ 0x0346012b, /* nig, bb, 43 lines */
+ 0x0346011d, /* nig, k2, 29 lines */
+ 0x00000000,
+ 0x00000000, /* wol, bb, 0 lines */
+ 0x001c0002, /* wol, k2, 2 lines */
+ 0x00000000,
+ 0x00000000, /* bmbn, bb, 0 lines */
+ 0x00210008, /* bmbn, k2, 8 lines */
+ 0x00000000,
+ 0x00000000, /* ipc, bb, 0 lines */
+ 0x00000000, /* ipc, k2, 0 lines */
+ 0x00000000,
+ 0x00000000, /* nwm, bb, 0 lines */
+ 0x0371000b, /* nwm, k2, 11 lines */
+ 0x00000000,
+ 0x00000000, /* nws, bb, 0 lines */
+ 0x037c0009, /* nws, k2, 9 lines */
+ 0x00000000,
+ 0x00000000, /* ms, bb, 0 lines */
+ 0x00120004, /* ms, k2, 4 lines */
+ 0x00000000,
+ 0x00000000, /* phy_pcie, bb, 0 lines */
+ 0x00e5001a, /* phy_pcie, k2, 26 lines */
+ 0x00000000,
+ 0x00000000, /* led, bb, 0 lines */
+ 0x00000000, /* led, k2, 0 lines */
+ 0x00000000,
+ 0x00000000, /* avs_wrap, bb, 0 lines */
+ 0x00000000, /* avs_wrap, k2, 0 lines */
+ 0x00000000,
+ 0x00000000, /* bar0_map, bb, 0 lines */
+ 0x00000000, /* bar0_map, k2, 0 lines */
+ 0x00000000,
+};
+
/* Win 2 */
#define GTT_BAR0_MAP_REG_IGU_CMD 0x00f000UL
@@ -3589,37 +3929,37 @@
#define PSTORM_FCOE_TX_STATS_OFFSET(pf_id) \
(IRO[44].base + ((pf_id) * IRO[44].m1))
-static const struct iro iro_arr[47] = {
+static const struct iro iro_arr[49] = {
{0x0, 0x0, 0x0, 0x0, 0x8},
{0x4cb0, 0x80, 0x0, 0x0, 0x80},
- {0x6318, 0x20, 0x0, 0x0, 0x20},
+ {0x6518, 0x20, 0x0, 0x0, 0x20},
{0xb00, 0x8, 0x0, 0x0, 0x4},
{0xa80, 0x8, 0x0, 0x0, 0x4},
{0x0, 0x8, 0x0, 0x0, 0x2},
{0x80, 0x8, 0x0, 0x0, 0x4},
{0x84, 0x8, 0x0, 0x0, 0x2},
- {0x4bc0, 0x0, 0x0, 0x0, 0x78},
+ {0x4c40, 0x0, 0x0, 0x0, 0x78},
{0x3df0, 0x0, 0x0, 0x0, 0x78},
{0x29b0, 0x0, 0x0, 0x0, 0x78},
{0x4c38, 0x0, 0x0, 0x0, 0x78},
{0x4990, 0x0, 0x0, 0x0, 0x78},
- {0x7e48, 0x0, 0x0, 0x0, 0x78},
+ {0x7f48, 0x0, 0x0, 0x0, 0x78},
{0xa28, 0x8, 0x0, 0x0, 0x8},
- {0x60f8, 0x10, 0x0, 0x0, 0x10},
- {0xb820, 0x30, 0x0, 0x0, 0x30},
+ {0x61f8, 0x10, 0x0, 0x0, 0x10},
+ {0xbd20, 0x30, 0x0, 0x0, 0x30},
{0x95b8, 0x30, 0x0, 0x0, 0x30},
{0x4b60, 0x80, 0x0, 0x0, 0x40},
{0x1f8, 0x4, 0x0, 0x0, 0x4},
{0x53a0, 0x80, 0x4, 0x0, 0x4},
- {0xc8f0, 0x0, 0x0, 0x0, 0x4},
+ {0xc7c8, 0x0, 0x0, 0x0, 0x4},
{0x4ba0, 0x80, 0x0, 0x0, 0x20},
- {0x8050, 0x40, 0x0, 0x0, 0x30},
- {0xe770, 0x60, 0x0, 0x0, 0x60},
+ {0x8150, 0x40, 0x0, 0x0, 0x30},
+ {0xec70, 0x60, 0x0, 0x0, 0x60},
{0x2b48, 0x80, 0x0, 0x0, 0x38},
- {0xf188, 0x78, 0x0, 0x0, 0x78},
+ {0xf1b0, 0x78, 0x0, 0x0, 0x78},
{0x1f8, 0x4, 0x0, 0x0, 0x4},
- {0xacf0, 0x0, 0x0, 0x0, 0xf0},
- {0xade0, 0x8, 0x0, 0x0, 0x8},
+ {0xaef8, 0x0, 0x0, 0x0, 0xf0},
+ {0xafe8, 0x8, 0x0, 0x0, 0x8},
{0x1f8, 0x8, 0x0, 0x0, 0x8},
{0xac0, 0x8, 0x0, 0x0, 0x8},
{0x2578, 0x8, 0x0, 0x0, 0x8},
@@ -3627,16 +3967,18 @@
{0x0, 0x8, 0x0, 0x0, 0x8},
{0x200, 0x10, 0x8, 0x0, 0x8},
{0xb78, 0x10, 0x8, 0x0, 0x2},
- {0xd888, 0x38, 0x0, 0x0, 0x24},
- {0x12c38, 0x10, 0x0, 0x0, 0x8},
- {0x11aa0, 0x38, 0x0, 0x0, 0x18},
- {0xa8c0, 0x38, 0x0, 0x0, 0x10},
+ {0xd9a8, 0x38, 0x0, 0x0, 0x24},
+ {0x12988, 0x10, 0x0, 0x0, 0x8},
+ {0x11fa0, 0x38, 0x0, 0x0, 0x18},
+ {0xa580, 0x38, 0x0, 0x0, 0x10},
{0x86f8, 0x30, 0x0, 0x0, 0x18},
{0x101f8, 0x10, 0x0, 0x0, 0x10},
- {0xdd08, 0x48, 0x0, 0x0, 0x38},
+ {0xde28, 0x48, 0x0, 0x0, 0x38},
{0x10660, 0x20, 0x0, 0x0, 0x20},
{0x2b80, 0x80, 0x0, 0x0, 0x10},
{0x5020, 0x10, 0x0, 0x0, 0x10},
+ {0xc9b0, 0x30, 0x0, 0x0, 0x10},
+ {0xeec0, 0x10, 0x0, 0x0, 0x10},
};
/* Runtime array offsets */
@@ -3724,361 +4066,359 @@
#define PSWRQ2_REG_CDUC_BLOCKS_FACTOR_RT_OFFSET 6697
#define PSWRQ2_REG_VF_BASE_RT_OFFSET 6698
#define PSWRQ2_REG_VF_LAST_ILT_RT_OFFSET 6699
-#define PSWRQ2_REG_WR_MBS0_RT_OFFSET 6700
-#define PSWRQ2_REG_RD_MBS0_RT_OFFSET 6701
-#define PSWRQ2_REG_DRAM_ALIGN_WR_RT_OFFSET 6702
-#define PSWRQ2_REG_DRAM_ALIGN_RD_RT_OFFSET 6703
-#define PSWRQ2_REG_ILT_MEMORY_RT_OFFSET 6704
+#define PSWRQ2_REG_DRAM_ALIGN_WR_RT_OFFSET 6700
+#define PSWRQ2_REG_DRAM_ALIGN_RD_RT_OFFSET 6701
+#define PSWRQ2_REG_ILT_MEMORY_RT_OFFSET 6702
#define PSWRQ2_REG_ILT_MEMORY_RT_SIZE 22000
-#define PGLUE_REG_B_VF_BASE_RT_OFFSET 28704
-#define PGLUE_REG_B_MSDM_OFFSET_MASK_B_RT_OFFSET 28705
-#define PGLUE_REG_B_MSDM_VF_SHIFT_B_RT_OFFSET 28706
-#define PGLUE_REG_B_CACHE_LINE_SIZE_RT_OFFSET 28707
-#define PGLUE_REG_B_PF_BAR0_SIZE_RT_OFFSET 28708
-#define PGLUE_REG_B_PF_BAR1_SIZE_RT_OFFSET 28709
-#define PGLUE_REG_B_VF_BAR1_SIZE_RT_OFFSET 28710
-#define TM_REG_VF_ENABLE_CONN_RT_OFFSET 28711
-#define TM_REG_PF_ENABLE_CONN_RT_OFFSET 28712
-#define TM_REG_PF_ENABLE_TASK_RT_OFFSET 28713
-#define TM_REG_GROUP_SIZE_RESOLUTION_CONN_RT_OFFSET 28714
-#define TM_REG_GROUP_SIZE_RESOLUTION_TASK_RT_OFFSET 28715
-#define TM_REG_CONFIG_CONN_MEM_RT_OFFSET 28716
+#define PGLUE_REG_B_VF_BASE_RT_OFFSET 28702
+#define PGLUE_REG_B_MSDM_OFFSET_MASK_B_RT_OFFSET 28703
+#define PGLUE_REG_B_MSDM_VF_SHIFT_B_RT_OFFSET 28704
+#define PGLUE_REG_B_CACHE_LINE_SIZE_RT_OFFSET 28705
+#define PGLUE_REG_B_PF_BAR0_SIZE_RT_OFFSET 28706
+#define PGLUE_REG_B_PF_BAR1_SIZE_RT_OFFSET 28707
+#define PGLUE_REG_B_VF_BAR1_SIZE_RT_OFFSET 28708
+#define TM_REG_VF_ENABLE_CONN_RT_OFFSET 28709
+#define TM_REG_PF_ENABLE_CONN_RT_OFFSET 28710
+#define TM_REG_PF_ENABLE_TASK_RT_OFFSET 28711
+#define TM_REG_GROUP_SIZE_RESOLUTION_CONN_RT_OFFSET 28712
+#define TM_REG_GROUP_SIZE_RESOLUTION_TASK_RT_OFFSET 28713
+#define TM_REG_CONFIG_CONN_MEM_RT_OFFSET 28714
#define TM_REG_CONFIG_CONN_MEM_RT_SIZE 416
-#define TM_REG_CONFIG_TASK_MEM_RT_OFFSET 29132
-#define TM_REG_CONFIG_TASK_MEM_RT_SIZE 512
-#define QM_REG_MAXPQSIZE_0_RT_OFFSET 29644
-#define QM_REG_MAXPQSIZE_1_RT_OFFSET 29645
-#define QM_REG_MAXPQSIZE_2_RT_OFFSET 29646
-#define QM_REG_MAXPQSIZETXSEL_0_RT_OFFSET 29647
-#define QM_REG_MAXPQSIZETXSEL_1_RT_OFFSET 29648
-#define QM_REG_MAXPQSIZETXSEL_2_RT_OFFSET 29649
-#define QM_REG_MAXPQSIZETXSEL_3_RT_OFFSET 29650
-#define QM_REG_MAXPQSIZETXSEL_4_RT_OFFSET 29651
-#define QM_REG_MAXPQSIZETXSEL_5_RT_OFFSET 29652
-#define QM_REG_MAXPQSIZETXSEL_6_RT_OFFSET 29653
-#define QM_REG_MAXPQSIZETXSEL_7_RT_OFFSET 29654
-#define QM_REG_MAXPQSIZETXSEL_8_RT_OFFSET 29655
-#define QM_REG_MAXPQSIZETXSEL_9_RT_OFFSET 29656
-#define QM_REG_MAXPQSIZETXSEL_10_RT_OFFSET 29657
-#define QM_REG_MAXPQSIZETXSEL_11_RT_OFFSET 29658
-#define QM_REG_MAXPQSIZETXSEL_12_RT_OFFSET 29659
-#define QM_REG_MAXPQSIZETXSEL_13_RT_OFFSET 29660
-#define QM_REG_MAXPQSIZETXSEL_14_RT_OFFSET 29661
-#define QM_REG_MAXPQSIZETXSEL_15_RT_OFFSET 29662
-#define QM_REG_MAXPQSIZETXSEL_16_RT_OFFSET 29663
-#define QM_REG_MAXPQSIZETXSEL_17_RT_OFFSET 29664
-#define QM_REG_MAXPQSIZETXSEL_18_RT_OFFSET 29665
-#define QM_REG_MAXPQSIZETXSEL_19_RT_OFFSET 29666
-#define QM_REG_MAXPQSIZETXSEL_20_RT_OFFSET 29667
-#define QM_REG_MAXPQSIZETXSEL_21_RT_OFFSET 29668
-#define QM_REG_MAXPQSIZETXSEL_22_RT_OFFSET 29669
-#define QM_REG_MAXPQSIZETXSEL_23_RT_OFFSET 29670
-#define QM_REG_MAXPQSIZETXSEL_24_RT_OFFSET 29671
-#define QM_REG_MAXPQSIZETXSEL_25_RT_OFFSET 29672
-#define QM_REG_MAXPQSIZETXSEL_26_RT_OFFSET 29673
-#define QM_REG_MAXPQSIZETXSEL_27_RT_OFFSET 29674
-#define QM_REG_MAXPQSIZETXSEL_28_RT_OFFSET 29675
-#define QM_REG_MAXPQSIZETXSEL_29_RT_OFFSET 29676
-#define QM_REG_MAXPQSIZETXSEL_30_RT_OFFSET 29677
-#define QM_REG_MAXPQSIZETXSEL_31_RT_OFFSET 29678
-#define QM_REG_MAXPQSIZETXSEL_32_RT_OFFSET 29679
-#define QM_REG_MAXPQSIZETXSEL_33_RT_OFFSET 29680
-#define QM_REG_MAXPQSIZETXSEL_34_RT_OFFSET 29681
-#define QM_REG_MAXPQSIZETXSEL_35_RT_OFFSET 29682
-#define QM_REG_MAXPQSIZETXSEL_36_RT_OFFSET 29683
-#define QM_REG_MAXPQSIZETXSEL_37_RT_OFFSET 29684
-#define QM_REG_MAXPQSIZETXSEL_38_RT_OFFSET 29685
-#define QM_REG_MAXPQSIZETXSEL_39_RT_OFFSET 29686
-#define QM_REG_MAXPQSIZETXSEL_40_RT_OFFSET 29687
-#define QM_REG_MAXPQSIZETXSEL_41_RT_OFFSET 29688
-#define QM_REG_MAXPQSIZETXSEL_42_RT_OFFSET 29689
-#define QM_REG_MAXPQSIZETXSEL_43_RT_OFFSET 29690
-#define QM_REG_MAXPQSIZETXSEL_44_RT_OFFSET 29691
-#define QM_REG_MAXPQSIZETXSEL_45_RT_OFFSET 29692
-#define QM_REG_MAXPQSIZETXSEL_46_RT_OFFSET 29693
-#define QM_REG_MAXPQSIZETXSEL_47_RT_OFFSET 29694
-#define QM_REG_MAXPQSIZETXSEL_48_RT_OFFSET 29695
-#define QM_REG_MAXPQSIZETXSEL_49_RT_OFFSET 29696
-#define QM_REG_MAXPQSIZETXSEL_50_RT_OFFSET 29697
-#define QM_REG_MAXPQSIZETXSEL_51_RT_OFFSET 29698
-#define QM_REG_MAXPQSIZETXSEL_52_RT_OFFSET 29699
-#define QM_REG_MAXPQSIZETXSEL_53_RT_OFFSET 29700
-#define QM_REG_MAXPQSIZETXSEL_54_RT_OFFSET 29701
-#define QM_REG_MAXPQSIZETXSEL_55_RT_OFFSET 29702
-#define QM_REG_MAXPQSIZETXSEL_56_RT_OFFSET 29703
-#define QM_REG_MAXPQSIZETXSEL_57_RT_OFFSET 29704
-#define QM_REG_MAXPQSIZETXSEL_58_RT_OFFSET 29705
-#define QM_REG_MAXPQSIZETXSEL_59_RT_OFFSET 29706
-#define QM_REG_MAXPQSIZETXSEL_60_RT_OFFSET 29707
-#define QM_REG_MAXPQSIZETXSEL_61_RT_OFFSET 29708
-#define QM_REG_MAXPQSIZETXSEL_62_RT_OFFSET 29709
-#define QM_REG_MAXPQSIZETXSEL_63_RT_OFFSET 29710
-#define QM_REG_BASEADDROTHERPQ_RT_OFFSET 29711
+#define TM_REG_CONFIG_TASK_MEM_RT_OFFSET 29130
+#define TM_REG_CONFIG_TASK_MEM_RT_SIZE 608
+#define QM_REG_MAXPQSIZE_0_RT_OFFSET 29738
+#define QM_REG_MAXPQSIZE_1_RT_OFFSET 29739
+#define QM_REG_MAXPQSIZE_2_RT_OFFSET 29740
+#define QM_REG_MAXPQSIZETXSEL_0_RT_OFFSET 29741
+#define QM_REG_MAXPQSIZETXSEL_1_RT_OFFSET 29742
+#define QM_REG_MAXPQSIZETXSEL_2_RT_OFFSET 29743
+#define QM_REG_MAXPQSIZETXSEL_3_RT_OFFSET 29744
+#define QM_REG_MAXPQSIZETXSEL_4_RT_OFFSET 29745
+#define QM_REG_MAXPQSIZETXSEL_5_RT_OFFSET 29746
+#define QM_REG_MAXPQSIZETXSEL_6_RT_OFFSET 29747
+#define QM_REG_MAXPQSIZETXSEL_7_RT_OFFSET 29748
+#define QM_REG_MAXPQSIZETXSEL_8_RT_OFFSET 29749
+#define QM_REG_MAXPQSIZETXSEL_9_RT_OFFSET 29750
+#define QM_REG_MAXPQSIZETXSEL_10_RT_OFFSET 29751
+#define QM_REG_MAXPQSIZETXSEL_11_RT_OFFSET 29752
+#define QM_REG_MAXPQSIZETXSEL_12_RT_OFFSET 29753
+#define QM_REG_MAXPQSIZETXSEL_13_RT_OFFSET 29754
+#define QM_REG_MAXPQSIZETXSEL_14_RT_OFFSET 29755
+#define QM_REG_MAXPQSIZETXSEL_15_RT_OFFSET 29756
+#define QM_REG_MAXPQSIZETXSEL_16_RT_OFFSET 29757
+#define QM_REG_MAXPQSIZETXSEL_17_RT_OFFSET 29758
+#define QM_REG_MAXPQSIZETXSEL_18_RT_OFFSET 29759
+#define QM_REG_MAXPQSIZETXSEL_19_RT_OFFSET 29760
+#define QM_REG_MAXPQSIZETXSEL_20_RT_OFFSET 29761
+#define QM_REG_MAXPQSIZETXSEL_21_RT_OFFSET 29762
+#define QM_REG_MAXPQSIZETXSEL_22_RT_OFFSET 29763
+#define QM_REG_MAXPQSIZETXSEL_23_RT_OFFSET 29764
+#define QM_REG_MAXPQSIZETXSEL_24_RT_OFFSET 29765
+#define QM_REG_MAXPQSIZETXSEL_25_RT_OFFSET 29766
+#define QM_REG_MAXPQSIZETXSEL_26_RT_OFFSET 29767
+#define QM_REG_MAXPQSIZETXSEL_27_RT_OFFSET 29768
+#define QM_REG_MAXPQSIZETXSEL_28_RT_OFFSET 29769
+#define QM_REG_MAXPQSIZETXSEL_29_RT_OFFSET 29770
+#define QM_REG_MAXPQSIZETXSEL_30_RT_OFFSET 29771
+#define QM_REG_MAXPQSIZETXSEL_31_RT_OFFSET 29772
+#define QM_REG_MAXPQSIZETXSEL_32_RT_OFFSET 29773
+#define QM_REG_MAXPQSIZETXSEL_33_RT_OFFSET 29774
+#define QM_REG_MAXPQSIZETXSEL_34_RT_OFFSET 29775
+#define QM_REG_MAXPQSIZETXSEL_35_RT_OFFSET 29776
+#define QM_REG_MAXPQSIZETXSEL_36_RT_OFFSET 29777
+#define QM_REG_MAXPQSIZETXSEL_37_RT_OFFSET 29778
+#define QM_REG_MAXPQSIZETXSEL_38_RT_OFFSET 29779
+#define QM_REG_MAXPQSIZETXSEL_39_RT_OFFSET 29780
+#define QM_REG_MAXPQSIZETXSEL_40_RT_OFFSET 29781
+#define QM_REG_MAXPQSIZETXSEL_41_RT_OFFSET 29782
+#define QM_REG_MAXPQSIZETXSEL_42_RT_OFFSET 29783
+#define QM_REG_MAXPQSIZETXSEL_43_RT_OFFSET 29784
+#define QM_REG_MAXPQSIZETXSEL_44_RT_OFFSET 29785
+#define QM_REG_MAXPQSIZETXSEL_45_RT_OFFSET 29786
+#define QM_REG_MAXPQSIZETXSEL_46_RT_OFFSET 29787
+#define QM_REG_MAXPQSIZETXSEL_47_RT_OFFSET 29788
+#define QM_REG_MAXPQSIZETXSEL_48_RT_OFFSET 29789
+#define QM_REG_MAXPQSIZETXSEL_49_RT_OFFSET 29790
+#define QM_REG_MAXPQSIZETXSEL_50_RT_OFFSET 29791
+#define QM_REG_MAXPQSIZETXSEL_51_RT_OFFSET 29792
+#define QM_REG_MAXPQSIZETXSEL_52_RT_OFFSET 29793
+#define QM_REG_MAXPQSIZETXSEL_53_RT_OFFSET 29794
+#define QM_REG_MAXPQSIZETXSEL_54_RT_OFFSET 29795
+#define QM_REG_MAXPQSIZETXSEL_55_RT_OFFSET 29796
+#define QM_REG_MAXPQSIZETXSEL_56_RT_OFFSET 29797
+#define QM_REG_MAXPQSIZETXSEL_57_RT_OFFSET 29798
+#define QM_REG_MAXPQSIZETXSEL_58_RT_OFFSET 29799
+#define QM_REG_MAXPQSIZETXSEL_59_RT_OFFSET 29800
+#define QM_REG_MAXPQSIZETXSEL_60_RT_OFFSET 29801
+#define QM_REG_MAXPQSIZETXSEL_61_RT_OFFSET 29802
+#define QM_REG_MAXPQSIZETXSEL_62_RT_OFFSET 29803
+#define QM_REG_MAXPQSIZETXSEL_63_RT_OFFSET 29804
+#define QM_REG_BASEADDROTHERPQ_RT_OFFSET 29805
#define QM_REG_BASEADDROTHERPQ_RT_SIZE 128
-#define QM_REG_VOQCRDLINE_RT_OFFSET 29839
-#define QM_REG_VOQCRDLINE_RT_SIZE 20
-#define QM_REG_VOQINITCRDLINE_RT_OFFSET 29859
-#define QM_REG_VOQINITCRDLINE_RT_SIZE 20
-#define QM_REG_AFULLQMBYPTHRPFWFQ_RT_OFFSET 29879
-#define QM_REG_AFULLQMBYPTHRVPWFQ_RT_OFFSET 29880
-#define QM_REG_AFULLQMBYPTHRPFRL_RT_OFFSET 29881
-#define QM_REG_AFULLQMBYPTHRGLBLRL_RT_OFFSET 29882
-#define QM_REG_AFULLOPRTNSTCCRDMASK_RT_OFFSET 29883
-#define QM_REG_WRROTHERPQGRP_0_RT_OFFSET 29884
-#define QM_REG_WRROTHERPQGRP_1_RT_OFFSET 29885
-#define QM_REG_WRROTHERPQGRP_2_RT_OFFSET 29886
-#define QM_REG_WRROTHERPQGRP_3_RT_OFFSET 29887
-#define QM_REG_WRROTHERPQGRP_4_RT_OFFSET 29888
-#define QM_REG_WRROTHERPQGRP_5_RT_OFFSET 29889
-#define QM_REG_WRROTHERPQGRP_6_RT_OFFSET 29890
-#define QM_REG_WRROTHERPQGRP_7_RT_OFFSET 29891
-#define QM_REG_WRROTHERPQGRP_8_RT_OFFSET 29892
-#define QM_REG_WRROTHERPQGRP_9_RT_OFFSET 29893
-#define QM_REG_WRROTHERPQGRP_10_RT_OFFSET 29894
-#define QM_REG_WRROTHERPQGRP_11_RT_OFFSET 29895
-#define QM_REG_WRROTHERPQGRP_12_RT_OFFSET 29896
-#define QM_REG_WRROTHERPQGRP_13_RT_OFFSET 29897
-#define QM_REG_WRROTHERPQGRP_14_RT_OFFSET 29898
-#define QM_REG_WRROTHERPQGRP_15_RT_OFFSET 29899
-#define QM_REG_WRROTHERGRPWEIGHT_0_RT_OFFSET 29900
-#define QM_REG_WRROTHERGRPWEIGHT_1_RT_OFFSET 29901
-#define QM_REG_WRROTHERGRPWEIGHT_2_RT_OFFSET 29902
-#define QM_REG_WRROTHERGRPWEIGHT_3_RT_OFFSET 29903
-#define QM_REG_WRRTXGRPWEIGHT_0_RT_OFFSET 29904
-#define QM_REG_WRRTXGRPWEIGHT_1_RT_OFFSET 29905
-#define QM_REG_PQTX2PF_0_RT_OFFSET 29906
-#define QM_REG_PQTX2PF_1_RT_OFFSET 29907
-#define QM_REG_PQTX2PF_2_RT_OFFSET 29908
-#define QM_REG_PQTX2PF_3_RT_OFFSET 29909
-#define QM_REG_PQTX2PF_4_RT_OFFSET 29910
-#define QM_REG_PQTX2PF_5_RT_OFFSET 29911
-#define QM_REG_PQTX2PF_6_RT_OFFSET 29912
-#define QM_REG_PQTX2PF_7_RT_OFFSET 29913
-#define QM_REG_PQTX2PF_8_RT_OFFSET 29914
-#define QM_REG_PQTX2PF_9_RT_OFFSET 29915
-#define QM_REG_PQTX2PF_10_RT_OFFSET 29916
-#define QM_REG_PQTX2PF_11_RT_OFFSET 29917
-#define QM_REG_PQTX2PF_12_RT_OFFSET 29918
-#define QM_REG_PQTX2PF_13_RT_OFFSET 29919
-#define QM_REG_PQTX2PF_14_RT_OFFSET 29920
-#define QM_REG_PQTX2PF_15_RT_OFFSET 29921
-#define QM_REG_PQTX2PF_16_RT_OFFSET 29922
-#define QM_REG_PQTX2PF_17_RT_OFFSET 29923
-#define QM_REG_PQTX2PF_18_RT_OFFSET 29924
-#define QM_REG_PQTX2PF_19_RT_OFFSET 29925
-#define QM_REG_PQTX2PF_20_RT_OFFSET 29926
-#define QM_REG_PQTX2PF_21_RT_OFFSET 29927
-#define QM_REG_PQTX2PF_22_RT_OFFSET 29928
-#define QM_REG_PQTX2PF_23_RT_OFFSET 29929
-#define QM_REG_PQTX2PF_24_RT_OFFSET 29930
-#define QM_REG_PQTX2PF_25_RT_OFFSET 29931
-#define QM_REG_PQTX2PF_26_RT_OFFSET 29932
-#define QM_REG_PQTX2PF_27_RT_OFFSET 29933
-#define QM_REG_PQTX2PF_28_RT_OFFSET 29934
-#define QM_REG_PQTX2PF_29_RT_OFFSET 29935
-#define QM_REG_PQTX2PF_30_RT_OFFSET 29936
-#define QM_REG_PQTX2PF_31_RT_OFFSET 29937
-#define QM_REG_PQTX2PF_32_RT_OFFSET 29938
-#define QM_REG_PQTX2PF_33_RT_OFFSET 29939
-#define QM_REG_PQTX2PF_34_RT_OFFSET 29940
-#define QM_REG_PQTX2PF_35_RT_OFFSET 29941
-#define QM_REG_PQTX2PF_36_RT_OFFSET 29942
-#define QM_REG_PQTX2PF_37_RT_OFFSET 29943
-#define QM_REG_PQTX2PF_38_RT_OFFSET 29944
-#define QM_REG_PQTX2PF_39_RT_OFFSET 29945
-#define QM_REG_PQTX2PF_40_RT_OFFSET 29946
-#define QM_REG_PQTX2PF_41_RT_OFFSET 29947
-#define QM_REG_PQTX2PF_42_RT_OFFSET 29948
-#define QM_REG_PQTX2PF_43_RT_OFFSET 29949
-#define QM_REG_PQTX2PF_44_RT_OFFSET 29950
-#define QM_REG_PQTX2PF_45_RT_OFFSET 29951
-#define QM_REG_PQTX2PF_46_RT_OFFSET 29952
-#define QM_REG_PQTX2PF_47_RT_OFFSET 29953
-#define QM_REG_PQTX2PF_48_RT_OFFSET 29954
-#define QM_REG_PQTX2PF_49_RT_OFFSET 29955
-#define QM_REG_PQTX2PF_50_RT_OFFSET 29956
-#define QM_REG_PQTX2PF_51_RT_OFFSET 29957
-#define QM_REG_PQTX2PF_52_RT_OFFSET 29958
-#define QM_REG_PQTX2PF_53_RT_OFFSET 29959
-#define QM_REG_PQTX2PF_54_RT_OFFSET 29960
-#define QM_REG_PQTX2PF_55_RT_OFFSET 29961
-#define QM_REG_PQTX2PF_56_RT_OFFSET 29962
-#define QM_REG_PQTX2PF_57_RT_OFFSET 29963
-#define QM_REG_PQTX2PF_58_RT_OFFSET 29964
-#define QM_REG_PQTX2PF_59_RT_OFFSET 29965
-#define QM_REG_PQTX2PF_60_RT_OFFSET 29966
-#define QM_REG_PQTX2PF_61_RT_OFFSET 29967
-#define QM_REG_PQTX2PF_62_RT_OFFSET 29968
-#define QM_REG_PQTX2PF_63_RT_OFFSET 29969
-#define QM_REG_PQOTHER2PF_0_RT_OFFSET 29970
-#define QM_REG_PQOTHER2PF_1_RT_OFFSET 29971
-#define QM_REG_PQOTHER2PF_2_RT_OFFSET 29972
-#define QM_REG_PQOTHER2PF_3_RT_OFFSET 29973
-#define QM_REG_PQOTHER2PF_4_RT_OFFSET 29974
-#define QM_REG_PQOTHER2PF_5_RT_OFFSET 29975
-#define QM_REG_PQOTHER2PF_6_RT_OFFSET 29976
-#define QM_REG_PQOTHER2PF_7_RT_OFFSET 29977
-#define QM_REG_PQOTHER2PF_8_RT_OFFSET 29978
-#define QM_REG_PQOTHER2PF_9_RT_OFFSET 29979
-#define QM_REG_PQOTHER2PF_10_RT_OFFSET 29980
-#define QM_REG_PQOTHER2PF_11_RT_OFFSET 29981
-#define QM_REG_PQOTHER2PF_12_RT_OFFSET 29982
-#define QM_REG_PQOTHER2PF_13_RT_OFFSET 29983
-#define QM_REG_PQOTHER2PF_14_RT_OFFSET 29984
-#define QM_REG_PQOTHER2PF_15_RT_OFFSET 29985
-#define QM_REG_RLGLBLPERIOD_0_RT_OFFSET 29986
-#define QM_REG_RLGLBLPERIOD_1_RT_OFFSET 29987
-#define QM_REG_RLGLBLPERIODTIMER_0_RT_OFFSET 29988
-#define QM_REG_RLGLBLPERIODTIMER_1_RT_OFFSET 29989
-#define QM_REG_RLGLBLPERIODSEL_0_RT_OFFSET 29990
-#define QM_REG_RLGLBLPERIODSEL_1_RT_OFFSET 29991
-#define QM_REG_RLGLBLPERIODSEL_2_RT_OFFSET 29992
-#define QM_REG_RLGLBLPERIODSEL_3_RT_OFFSET 29993
-#define QM_REG_RLGLBLPERIODSEL_4_RT_OFFSET 29994
-#define QM_REG_RLGLBLPERIODSEL_5_RT_OFFSET 29995
-#define QM_REG_RLGLBLPERIODSEL_6_RT_OFFSET 29996
-#define QM_REG_RLGLBLPERIODSEL_7_RT_OFFSET 29997
-#define QM_REG_RLGLBLINCVAL_RT_OFFSET 29998
+#define QM_REG_AFULLQMBYPTHRPFWFQ_RT_OFFSET 29933
+#define QM_REG_AFULLQMBYPTHRVPWFQ_RT_OFFSET 29934
+#define QM_REG_AFULLQMBYPTHRPFRL_RT_OFFSET 29935
+#define QM_REG_AFULLQMBYPTHRGLBLRL_RT_OFFSET 29936
+#define QM_REG_AFULLOPRTNSTCCRDMASK_RT_OFFSET 29937
+#define QM_REG_WRROTHERPQGRP_0_RT_OFFSET 29938
+#define QM_REG_WRROTHERPQGRP_1_RT_OFFSET 29939
+#define QM_REG_WRROTHERPQGRP_2_RT_OFFSET 29940
+#define QM_REG_WRROTHERPQGRP_3_RT_OFFSET 29941
+#define QM_REG_WRROTHERPQGRP_4_RT_OFFSET 29942
+#define QM_REG_WRROTHERPQGRP_5_RT_OFFSET 29943
+#define QM_REG_WRROTHERPQGRP_6_RT_OFFSET 29944
+#define QM_REG_WRROTHERPQGRP_7_RT_OFFSET 29945
+#define QM_REG_WRROTHERPQGRP_8_RT_OFFSET 29946
+#define QM_REG_WRROTHERPQGRP_9_RT_OFFSET 29947
+#define QM_REG_WRROTHERPQGRP_10_RT_OFFSET 29948
+#define QM_REG_WRROTHERPQGRP_11_RT_OFFSET 29949
+#define QM_REG_WRROTHERPQGRP_12_RT_OFFSET 29950
+#define QM_REG_WRROTHERPQGRP_13_RT_OFFSET 29951
+#define QM_REG_WRROTHERPQGRP_14_RT_OFFSET 29952
+#define QM_REG_WRROTHERPQGRP_15_RT_OFFSET 29953
+#define QM_REG_WRROTHERGRPWEIGHT_0_RT_OFFSET 29954
+#define QM_REG_WRROTHERGRPWEIGHT_1_RT_OFFSET 29955
+#define QM_REG_WRROTHERGRPWEIGHT_2_RT_OFFSET 29956
+#define QM_REG_WRROTHERGRPWEIGHT_3_RT_OFFSET 29957
+#define QM_REG_WRRTXGRPWEIGHT_0_RT_OFFSET 29958
+#define QM_REG_WRRTXGRPWEIGHT_1_RT_OFFSET 29959
+#define QM_REG_PQTX2PF_0_RT_OFFSET 29960
+#define QM_REG_PQTX2PF_1_RT_OFFSET 29961
+#define QM_REG_PQTX2PF_2_RT_OFFSET 29962
+#define QM_REG_PQTX2PF_3_RT_OFFSET 29963
+#define QM_REG_PQTX2PF_4_RT_OFFSET 29964
+#define QM_REG_PQTX2PF_5_RT_OFFSET 29965
+#define QM_REG_PQTX2PF_6_RT_OFFSET 29966
+#define QM_REG_PQTX2PF_7_RT_OFFSET 29967
+#define QM_REG_PQTX2PF_8_RT_OFFSET 29968
+#define QM_REG_PQTX2PF_9_RT_OFFSET 29969
+#define QM_REG_PQTX2PF_10_RT_OFFSET 29970
+#define QM_REG_PQTX2PF_11_RT_OFFSET 29971
+#define QM_REG_PQTX2PF_12_RT_OFFSET 29972
+#define QM_REG_PQTX2PF_13_RT_OFFSET 29973
+#define QM_REG_PQTX2PF_14_RT_OFFSET 29974
+#define QM_REG_PQTX2PF_15_RT_OFFSET 29975
+#define QM_REG_PQTX2PF_16_RT_OFFSET 29976
+#define QM_REG_PQTX2PF_17_RT_OFFSET 29977
+#define QM_REG_PQTX2PF_18_RT_OFFSET 29978
+#define QM_REG_PQTX2PF_19_RT_OFFSET 29979
+#define QM_REG_PQTX2PF_20_RT_OFFSET 29980
+#define QM_REG_PQTX2PF_21_RT_OFFSET 29981
+#define QM_REG_PQTX2PF_22_RT_OFFSET 29982
+#define QM_REG_PQTX2PF_23_RT_OFFSET 29983
+#define QM_REG_PQTX2PF_24_RT_OFFSET 29984
+#define QM_REG_PQTX2PF_25_RT_OFFSET 29985
+#define QM_REG_PQTX2PF_26_RT_OFFSET 29986
+#define QM_REG_PQTX2PF_27_RT_OFFSET 29987
+#define QM_REG_PQTX2PF_28_RT_OFFSET 29988
+#define QM_REG_PQTX2PF_29_RT_OFFSET 29989
+#define QM_REG_PQTX2PF_30_RT_OFFSET 29990
+#define QM_REG_PQTX2PF_31_RT_OFFSET 29991
+#define QM_REG_PQTX2PF_32_RT_OFFSET 29992
+#define QM_REG_PQTX2PF_33_RT_OFFSET 29993
+#define QM_REG_PQTX2PF_34_RT_OFFSET 29994
+#define QM_REG_PQTX2PF_35_RT_OFFSET 29995
+#define QM_REG_PQTX2PF_36_RT_OFFSET 29996
+#define QM_REG_PQTX2PF_37_RT_OFFSET 29997
+#define QM_REG_PQTX2PF_38_RT_OFFSET 29998
+#define QM_REG_PQTX2PF_39_RT_OFFSET 29999
+#define QM_REG_PQTX2PF_40_RT_OFFSET 30000
+#define QM_REG_PQTX2PF_41_RT_OFFSET 30001
+#define QM_REG_PQTX2PF_42_RT_OFFSET 30002
+#define QM_REG_PQTX2PF_43_RT_OFFSET 30003
+#define QM_REG_PQTX2PF_44_RT_OFFSET 30004
+#define QM_REG_PQTX2PF_45_RT_OFFSET 30005
+#define QM_REG_PQTX2PF_46_RT_OFFSET 30006
+#define QM_REG_PQTX2PF_47_RT_OFFSET 30007
+#define QM_REG_PQTX2PF_48_RT_OFFSET 30008
+#define QM_REG_PQTX2PF_49_RT_OFFSET 30009
+#define QM_REG_PQTX2PF_50_RT_OFFSET 30010
+#define QM_REG_PQTX2PF_51_RT_OFFSET 30011
+#define QM_REG_PQTX2PF_52_RT_OFFSET 30012
+#define QM_REG_PQTX2PF_53_RT_OFFSET 30013
+#define QM_REG_PQTX2PF_54_RT_OFFSET 30014
+#define QM_REG_PQTX2PF_55_RT_OFFSET 30015
+#define QM_REG_PQTX2PF_56_RT_OFFSET 30016
+#define QM_REG_PQTX2PF_57_RT_OFFSET 30017
+#define QM_REG_PQTX2PF_58_RT_OFFSET 30018
+#define QM_REG_PQTX2PF_59_RT_OFFSET 30019
+#define QM_REG_PQTX2PF_60_RT_OFFSET 30020
+#define QM_REG_PQTX2PF_61_RT_OFFSET 30021
+#define QM_REG_PQTX2PF_62_RT_OFFSET 30022
+#define QM_REG_PQTX2PF_63_RT_OFFSET 30023
+#define QM_REG_PQOTHER2PF_0_RT_OFFSET 30024
+#define QM_REG_PQOTHER2PF_1_RT_OFFSET 30025
+#define QM_REG_PQOTHER2PF_2_RT_OFFSET 30026
+#define QM_REG_PQOTHER2PF_3_RT_OFFSET 30027
+#define QM_REG_PQOTHER2PF_4_RT_OFFSET 30028
+#define QM_REG_PQOTHER2PF_5_RT_OFFSET 30029
+#define QM_REG_PQOTHER2PF_6_RT_OFFSET 30030
+#define QM_REG_PQOTHER2PF_7_RT_OFFSET 30031
+#define QM_REG_PQOTHER2PF_8_RT_OFFSET 30032
+#define QM_REG_PQOTHER2PF_9_RT_OFFSET 30033
+#define QM_REG_PQOTHER2PF_10_RT_OFFSET 30034
+#define QM_REG_PQOTHER2PF_11_RT_OFFSET 30035
+#define QM_REG_PQOTHER2PF_12_RT_OFFSET 30036
+#define QM_REG_PQOTHER2PF_13_RT_OFFSET 30037
+#define QM_REG_PQOTHER2PF_14_RT_OFFSET 30038
+#define QM_REG_PQOTHER2PF_15_RT_OFFSET 30039
+#define QM_REG_RLGLBLPERIOD_0_RT_OFFSET 30040
+#define QM_REG_RLGLBLPERIOD_1_RT_OFFSET 30041
+#define QM_REG_RLGLBLPERIODTIMER_0_RT_OFFSET 30042
+#define QM_REG_RLGLBLPERIODTIMER_1_RT_OFFSET 30043
+#define QM_REG_RLGLBLPERIODSEL_0_RT_OFFSET 30044
+#define QM_REG_RLGLBLPERIODSEL_1_RT_OFFSET 30045
+#define QM_REG_RLGLBLPERIODSEL_2_RT_OFFSET 30046
+#define QM_REG_RLGLBLPERIODSEL_3_RT_OFFSET 30047
+#define QM_REG_RLGLBLPERIODSEL_4_RT_OFFSET 30048
+#define QM_REG_RLGLBLPERIODSEL_5_RT_OFFSET 30049
+#define QM_REG_RLGLBLPERIODSEL_6_RT_OFFSET 30050
+#define QM_REG_RLGLBLPERIODSEL_7_RT_OFFSET 30051
+#define QM_REG_RLGLBLINCVAL_RT_OFFSET 30052
#define QM_REG_RLGLBLINCVAL_RT_SIZE 256
-#define QM_REG_RLGLBLUPPERBOUND_RT_OFFSET 30254
+#define QM_REG_RLGLBLUPPERBOUND_RT_OFFSET 30308
#define QM_REG_RLGLBLUPPERBOUND_RT_SIZE 256
-#define QM_REG_RLGLBLCRD_RT_OFFSET 30510
+#define QM_REG_RLGLBLCRD_RT_OFFSET 30564
#define QM_REG_RLGLBLCRD_RT_SIZE 256
-#define QM_REG_RLGLBLENABLE_RT_OFFSET 30766
-#define QM_REG_RLPFPERIOD_RT_OFFSET 30767
-#define QM_REG_RLPFPERIODTIMER_RT_OFFSET 30768
-#define QM_REG_RLPFINCVAL_RT_OFFSET 30769
+#define QM_REG_RLGLBLENABLE_RT_OFFSET 30820
+#define QM_REG_RLPFPERIOD_RT_OFFSET 30821
+#define QM_REG_RLPFPERIODTIMER_RT_OFFSET 30822
+#define QM_REG_RLPFINCVAL_RT_OFFSET 30823
#define QM_REG_RLPFINCVAL_RT_SIZE 16
-#define QM_REG_RLPFUPPERBOUND_RT_OFFSET 30785
+#define QM_REG_RLPFUPPERBOUND_RT_OFFSET 30839
#define QM_REG_RLPFUPPERBOUND_RT_SIZE 16
-#define QM_REG_RLPFCRD_RT_OFFSET 30801
+#define QM_REG_RLPFCRD_RT_OFFSET 30855
#define QM_REG_RLPFCRD_RT_SIZE 16
-#define QM_REG_RLPFENABLE_RT_OFFSET 30817
-#define QM_REG_RLPFVOQENABLE_RT_OFFSET 30818
-#define QM_REG_WFQPFWEIGHT_RT_OFFSET 30819
+#define QM_REG_RLPFENABLE_RT_OFFSET 30871
+#define QM_REG_RLPFVOQENABLE_RT_OFFSET 30872
+#define QM_REG_WFQPFWEIGHT_RT_OFFSET 30873
#define QM_REG_WFQPFWEIGHT_RT_SIZE 16
-#define QM_REG_WFQPFUPPERBOUND_RT_OFFSET 30835
+#define QM_REG_WFQPFUPPERBOUND_RT_OFFSET 30889
#define QM_REG_WFQPFUPPERBOUND_RT_SIZE 16
-#define QM_REG_WFQPFCRD_RT_OFFSET 30851
-#define QM_REG_WFQPFCRD_RT_SIZE 160
-#define QM_REG_WFQPFENABLE_RT_OFFSET 31011
-#define QM_REG_WFQVPENABLE_RT_OFFSET 31012
-#define QM_REG_BASEADDRTXPQ_RT_OFFSET 31013
+#define QM_REG_WFQPFCRD_RT_OFFSET 30905
+#define QM_REG_WFQPFCRD_RT_SIZE 256
+#define QM_REG_WFQPFENABLE_RT_OFFSET 31161
+#define QM_REG_WFQVPENABLE_RT_OFFSET 31162
+#define QM_REG_BASEADDRTXPQ_RT_OFFSET 31163
#define QM_REG_BASEADDRTXPQ_RT_SIZE 512
-#define QM_REG_TXPQMAP_RT_OFFSET 31525
+#define QM_REG_TXPQMAP_RT_OFFSET 31675
#define QM_REG_TXPQMAP_RT_SIZE 512
-#define QM_REG_WFQVPWEIGHT_RT_OFFSET 32037
+#define QM_REG_WFQVPWEIGHT_RT_OFFSET 32187
#define QM_REG_WFQVPWEIGHT_RT_SIZE 512
-#define QM_REG_WFQVPCRD_RT_OFFSET 32549
+#define QM_REG_WFQVPCRD_RT_OFFSET 32699
#define QM_REG_WFQVPCRD_RT_SIZE 512
-#define QM_REG_WFQVPMAP_RT_OFFSET 33061
+#define QM_REG_WFQVPMAP_RT_OFFSET 33211
#define QM_REG_WFQVPMAP_RT_SIZE 512
-#define QM_REG_WFQPFCRD_MSB_RT_OFFSET 33573
-#define QM_REG_WFQPFCRD_MSB_RT_SIZE 160
-#define NIG_REG_TAG_ETHERTYPE_0_RT_OFFSET 33733
-#define NIG_REG_OUTER_TAG_VALUE_LIST0_RT_OFFSET 33734
-#define NIG_REG_OUTER_TAG_VALUE_LIST1_RT_OFFSET 33735
-#define NIG_REG_OUTER_TAG_VALUE_LIST2_RT_OFFSET 33736
-#define NIG_REG_OUTER_TAG_VALUE_LIST3_RT_OFFSET 33737
-#define NIG_REG_OUTER_TAG_VALUE_MASK_RT_OFFSET 33738
-#define NIG_REG_LLH_FUNC_TAGMAC_CLS_TYPE_RT_OFFSET 33739
-#define NIG_REG_LLH_FUNC_TAG_EN_RT_OFFSET 33740
+#define QM_REG_WFQPFCRD_MSB_RT_OFFSET 33723
+#define QM_REG_WFQPFCRD_MSB_RT_SIZE 320
+#define QM_REG_VOQCRDLINE_RT_OFFSET 34043
+#define QM_REG_VOQCRDLINE_RT_SIZE 36
+#define QM_REG_VOQINITCRDLINE_RT_OFFSET 34079
+#define QM_REG_VOQINITCRDLINE_RT_SIZE 36
+#define NIG_REG_TAG_ETHERTYPE_0_RT_OFFSET 34115
+#define NIG_REG_OUTER_TAG_VALUE_LIST0_RT_OFFSET 34116
+#define NIG_REG_OUTER_TAG_VALUE_LIST1_RT_OFFSET 34117
+#define NIG_REG_OUTER_TAG_VALUE_LIST2_RT_OFFSET 34118
+#define NIG_REG_OUTER_TAG_VALUE_LIST3_RT_OFFSET 34119
+#define NIG_REG_OUTER_TAG_VALUE_MASK_RT_OFFSET 34120
+#define NIG_REG_LLH_FUNC_TAGMAC_CLS_TYPE_RT_OFFSET 34121
+#define NIG_REG_LLH_FUNC_TAG_EN_RT_OFFSET 34122
#define NIG_REG_LLH_FUNC_TAG_EN_RT_SIZE 4
-#define NIG_REG_LLH_FUNC_TAG_HDR_SEL_RT_OFFSET 33744
+#define NIG_REG_LLH_FUNC_TAG_HDR_SEL_RT_OFFSET 34126
#define NIG_REG_LLH_FUNC_TAG_HDR_SEL_RT_SIZE 4
-#define NIG_REG_LLH_FUNC_TAG_VALUE_RT_OFFSET 33748
+#define NIG_REG_LLH_FUNC_TAG_VALUE_RT_OFFSET 34130
#define NIG_REG_LLH_FUNC_TAG_VALUE_RT_SIZE 4
-#define NIG_REG_LLH_FUNC_NO_TAG_RT_OFFSET 33752
-#define NIG_REG_LLH_FUNC_FILTER_VALUE_RT_OFFSET 33753
+#define NIG_REG_LLH_FUNC_NO_TAG_RT_OFFSET 34134
+#define NIG_REG_LLH_FUNC_FILTER_VALUE_RT_OFFSET 34135
#define NIG_REG_LLH_FUNC_FILTER_VALUE_RT_SIZE 32
-#define NIG_REG_LLH_FUNC_FILTER_EN_RT_OFFSET 33785
+#define NIG_REG_LLH_FUNC_FILTER_EN_RT_OFFSET 34167
#define NIG_REG_LLH_FUNC_FILTER_EN_RT_SIZE 16
-#define NIG_REG_LLH_FUNC_FILTER_MODE_RT_OFFSET 33801
+#define NIG_REG_LLH_FUNC_FILTER_MODE_RT_OFFSET 34183
#define NIG_REG_LLH_FUNC_FILTER_MODE_RT_SIZE 16
-#define NIG_REG_LLH_FUNC_FILTER_PROTOCOL_TYPE_RT_OFFSET 33817
+#define NIG_REG_LLH_FUNC_FILTER_PROTOCOL_TYPE_RT_OFFSET 34199
#define NIG_REG_LLH_FUNC_FILTER_PROTOCOL_TYPE_RT_SIZE 16
-#define NIG_REG_LLH_FUNC_FILTER_HDR_SEL_RT_OFFSET 33833
+#define NIG_REG_LLH_FUNC_FILTER_HDR_SEL_RT_OFFSET 34215
#define NIG_REG_LLH_FUNC_FILTER_HDR_SEL_RT_SIZE 16
-#define NIG_REG_TX_EDPM_CTRL_RT_OFFSET 33849
-#define NIG_REG_ROCE_DUPLICATE_TO_HOST_RT_OFFSET 33850
-#define CDU_REG_CID_ADDR_PARAMS_RT_OFFSET 33851
-#define CDU_REG_SEGMENT0_PARAMS_RT_OFFSET 33852
-#define CDU_REG_SEGMENT1_PARAMS_RT_OFFSET 33853
-#define CDU_REG_PF_SEG0_TYPE_OFFSET_RT_OFFSET 33854
-#define CDU_REG_PF_SEG1_TYPE_OFFSET_RT_OFFSET 33855
-#define CDU_REG_PF_SEG2_TYPE_OFFSET_RT_OFFSET 33856
-#define CDU_REG_PF_SEG3_TYPE_OFFSET_RT_OFFSET 33857
-#define CDU_REG_PF_FL_SEG0_TYPE_OFFSET_RT_OFFSET 33858
-#define CDU_REG_PF_FL_SEG1_TYPE_OFFSET_RT_OFFSET 33859
-#define CDU_REG_PF_FL_SEG2_TYPE_OFFSET_RT_OFFSET 33860
-#define CDU_REG_PF_FL_SEG3_TYPE_OFFSET_RT_OFFSET 33861
-#define CDU_REG_VF_SEG_TYPE_OFFSET_RT_OFFSET 33862
-#define CDU_REG_VF_FL_SEG_TYPE_OFFSET_RT_OFFSET 33863
-#define PBF_REG_TAG_ETHERTYPE_0_RT_OFFSET 33864
-#define PBF_REG_BTB_SHARED_AREA_SIZE_RT_OFFSET 33865
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ0_RT_OFFSET 33866
-#define PBF_REG_BTB_GUARANTEED_VOQ0_RT_OFFSET 33867
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ0_RT_OFFSET 33868
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ1_RT_OFFSET 33869
-#define PBF_REG_BTB_GUARANTEED_VOQ1_RT_OFFSET 33870
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ1_RT_OFFSET 33871
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ2_RT_OFFSET 33872
-#define PBF_REG_BTB_GUARANTEED_VOQ2_RT_OFFSET 33873
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ2_RT_OFFSET 33874
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ3_RT_OFFSET 33875
-#define PBF_REG_BTB_GUARANTEED_VOQ3_RT_OFFSET 33876
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ3_RT_OFFSET 33877
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ4_RT_OFFSET 33878
-#define PBF_REG_BTB_GUARANTEED_VOQ4_RT_OFFSET 33879
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ4_RT_OFFSET 33880
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ5_RT_OFFSET 33881
-#define PBF_REG_BTB_GUARANTEED_VOQ5_RT_OFFSET 33882
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ5_RT_OFFSET 33883
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ6_RT_OFFSET 33884
-#define PBF_REG_BTB_GUARANTEED_VOQ6_RT_OFFSET 33885
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ6_RT_OFFSET 33886
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ7_RT_OFFSET 33887
-#define PBF_REG_BTB_GUARANTEED_VOQ7_RT_OFFSET 33888
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ7_RT_OFFSET 33889
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ8_RT_OFFSET 33890
-#define PBF_REG_BTB_GUARANTEED_VOQ8_RT_OFFSET 33891
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ8_RT_OFFSET 33892
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ9_RT_OFFSET 33893
-#define PBF_REG_BTB_GUARANTEED_VOQ9_RT_OFFSET 33894
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ9_RT_OFFSET 33895
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ10_RT_OFFSET 33896
-#define PBF_REG_BTB_GUARANTEED_VOQ10_RT_OFFSET 33897
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ10_RT_OFFSET 33898
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ11_RT_OFFSET 33899
-#define PBF_REG_BTB_GUARANTEED_VOQ11_RT_OFFSET 33900
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ11_RT_OFFSET 33901
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ12_RT_OFFSET 33902
-#define PBF_REG_BTB_GUARANTEED_VOQ12_RT_OFFSET 33903
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ12_RT_OFFSET 33904
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ13_RT_OFFSET 33905
-#define PBF_REG_BTB_GUARANTEED_VOQ13_RT_OFFSET 33906
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ13_RT_OFFSET 33907
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ14_RT_OFFSET 33908
-#define PBF_REG_BTB_GUARANTEED_VOQ14_RT_OFFSET 33909
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ14_RT_OFFSET 33910
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ15_RT_OFFSET 33911
-#define PBF_REG_BTB_GUARANTEED_VOQ15_RT_OFFSET 33912
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ15_RT_OFFSET 33913
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ16_RT_OFFSET 33914
-#define PBF_REG_BTB_GUARANTEED_VOQ16_RT_OFFSET 33915
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ16_RT_OFFSET 33916
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ17_RT_OFFSET 33917
-#define PBF_REG_BTB_GUARANTEED_VOQ17_RT_OFFSET 33918
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ17_RT_OFFSET 33919
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ18_RT_OFFSET 33920
-#define PBF_REG_BTB_GUARANTEED_VOQ18_RT_OFFSET 33921
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ18_RT_OFFSET 33922
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ19_RT_OFFSET 33923
-#define PBF_REG_BTB_GUARANTEED_VOQ19_RT_OFFSET 33924
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ19_RT_OFFSET 33925
-#define XCM_REG_CON_PHY_Q3_RT_OFFSET 33926
+#define NIG_REG_TX_EDPM_CTRL_RT_OFFSET 34231
+#define NIG_REG_ROCE_DUPLICATE_TO_HOST_RT_OFFSET 34232
+#define CDU_REG_CID_ADDR_PARAMS_RT_OFFSET 34233
+#define CDU_REG_SEGMENT0_PARAMS_RT_OFFSET 34234
+#define CDU_REG_SEGMENT1_PARAMS_RT_OFFSET 34235
+#define CDU_REG_PF_SEG0_TYPE_OFFSET_RT_OFFSET 34236
+#define CDU_REG_PF_SEG1_TYPE_OFFSET_RT_OFFSET 34237
+#define CDU_REG_PF_SEG2_TYPE_OFFSET_RT_OFFSET 34238
+#define CDU_REG_PF_SEG3_TYPE_OFFSET_RT_OFFSET 34239
+#define CDU_REG_PF_FL_SEG0_TYPE_OFFSET_RT_OFFSET 34240
+#define CDU_REG_PF_FL_SEG1_TYPE_OFFSET_RT_OFFSET 34241
+#define CDU_REG_PF_FL_SEG2_TYPE_OFFSET_RT_OFFSET 34242
+#define CDU_REG_PF_FL_SEG3_TYPE_OFFSET_RT_OFFSET 34243
+#define CDU_REG_VF_SEG_TYPE_OFFSET_RT_OFFSET 34244
+#define CDU_REG_VF_FL_SEG_TYPE_OFFSET_RT_OFFSET 34245
+#define PBF_REG_TAG_ETHERTYPE_0_RT_OFFSET 34246
+#define PBF_REG_BTB_SHARED_AREA_SIZE_RT_OFFSET 34247
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ0_RT_OFFSET 34248
+#define PBF_REG_BTB_GUARANTEED_VOQ0_RT_OFFSET 34249
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ0_RT_OFFSET 34250
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ1_RT_OFFSET 34251
+#define PBF_REG_BTB_GUARANTEED_VOQ1_RT_OFFSET 34252
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ1_RT_OFFSET 34253
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ2_RT_OFFSET 34254
+#define PBF_REG_BTB_GUARANTEED_VOQ2_RT_OFFSET 34255
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ2_RT_OFFSET 34256
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ3_RT_OFFSET 34257
+#define PBF_REG_BTB_GUARANTEED_VOQ3_RT_OFFSET 34258
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ3_RT_OFFSET 34259
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ4_RT_OFFSET 34260
+#define PBF_REG_BTB_GUARANTEED_VOQ4_RT_OFFSET 34261
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ4_RT_OFFSET 34262
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ5_RT_OFFSET 34263
+#define PBF_REG_BTB_GUARANTEED_VOQ5_RT_OFFSET 34264
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ5_RT_OFFSET 34265
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ6_RT_OFFSET 34266
+#define PBF_REG_BTB_GUARANTEED_VOQ6_RT_OFFSET 34267
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ6_RT_OFFSET 34268
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ7_RT_OFFSET 34269
+#define PBF_REG_BTB_GUARANTEED_VOQ7_RT_OFFSET 34270
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ7_RT_OFFSET 34271
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ8_RT_OFFSET 34272
+#define PBF_REG_BTB_GUARANTEED_VOQ8_RT_OFFSET 34273
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ8_RT_OFFSET 34274
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ9_RT_OFFSET 34275
+#define PBF_REG_BTB_GUARANTEED_VOQ9_RT_OFFSET 34276
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ9_RT_OFFSET 34277
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ10_RT_OFFSET 34278
+#define PBF_REG_BTB_GUARANTEED_VOQ10_RT_OFFSET 34279
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ10_RT_OFFSET 34280
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ11_RT_OFFSET 34281
+#define PBF_REG_BTB_GUARANTEED_VOQ11_RT_OFFSET 34282
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ11_RT_OFFSET 34283
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ12_RT_OFFSET 34284
+#define PBF_REG_BTB_GUARANTEED_VOQ12_RT_OFFSET 34285
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ12_RT_OFFSET 34286
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ13_RT_OFFSET 34287
+#define PBF_REG_BTB_GUARANTEED_VOQ13_RT_OFFSET 34288
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ13_RT_OFFSET 34289
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ14_RT_OFFSET 34290
+#define PBF_REG_BTB_GUARANTEED_VOQ14_RT_OFFSET 34291
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ14_RT_OFFSET 34292
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ15_RT_OFFSET 34293
+#define PBF_REG_BTB_GUARANTEED_VOQ15_RT_OFFSET 34294
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ15_RT_OFFSET 34295
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ16_RT_OFFSET 34296
+#define PBF_REG_BTB_GUARANTEED_VOQ16_RT_OFFSET 34297
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ16_RT_OFFSET 34298
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ17_RT_OFFSET 34299
+#define PBF_REG_BTB_GUARANTEED_VOQ17_RT_OFFSET 34300
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ17_RT_OFFSET 34301
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ18_RT_OFFSET 34302
+#define PBF_REG_BTB_GUARANTEED_VOQ18_RT_OFFSET 34303
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ18_RT_OFFSET 34304
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ19_RT_OFFSET 34305
+#define PBF_REG_BTB_GUARANTEED_VOQ19_RT_OFFSET 34306
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ19_RT_OFFSET 34307
+#define XCM_REG_CON_PHY_Q3_RT_OFFSET 34308
-#define RUNTIME_ARRAY_SIZE 33927
+#define RUNTIME_ARRAY_SIZE 34309
/* The eth storm context for the Tstorm */
struct tstorm_eth_conn_st_ctx {
@@ -4307,7 +4647,7 @@
#define XSTORM_ETH_CONN_AG_CTX_TPH_ENABLE_SHIFT 6
u8 edpm_event_id;
__le16 physical_q0;
- __le16 quota;
+ __le16 ereserved1;
__le16 edpm_num_bds;
__le16 tx_bd_cons;
__le16 tx_bd_prod;
@@ -4340,7 +4680,7 @@
u8 byte13;
u8 byte14;
u8 byte15;
- u8 byte16;
+ u8 ereserved;
__le16 word11;
__le32 reg10;
__le32 reg11;
@@ -4627,6 +4967,7 @@
ETH_FILTERS_PAIR_ADD_FAIL_ZERO_MAC,
ETH_FILTERS_VNI_ADD_FAIL_FULL,
ETH_FILTERS_VNI_ADD_FAIL_DUP,
+ ETH_FILTERS_GFT_UPDATE_FAIL,
MAX_ETH_ERROR_CODE
};
@@ -4879,6 +5220,39 @@
MAX_GFT_LOGIC_FILTER_TYPE
};
+struct rx_add_openflow_filter_data {
+ __le16 action_icid;
+ u8 priority;
+ u8 reserved0;
+ __le32 tenant_id;
+ __le16 dst_mac_hi;
+ __le16 dst_mac_mid;
+ __le16 dst_mac_lo;
+ __le16 src_mac_hi;
+ __le16 src_mac_mid;
+ __le16 src_mac_lo;
+ __le16 vlan_id;
+ __le16 l2_eth_type;
+ u8 ipv4_dscp;
+ u8 ipv4_frag_type;
+ u8 ipv4_over_ip;
+ u8 tenant_id_exists;
+ __le32 ipv4_dst_addr;
+ __le32 ipv4_src_addr;
+ __le16 l4_dst_port;
+ __le16 l4_src_port;
+};
+
+struct rx_create_gft_action_data {
+ u8 vport_id;
+ u8 reserved[7];
+};
+
+struct rx_create_openflow_action_data {
+ u8 vport_id;
+ u8 reserved[7];
+};
+
/* Ramrod data for rx queue start ramrod */
struct rx_queue_start_ramrod_data {
__le16 rx_queue_id;
@@ -4956,7 +5330,7 @@
u8 vport_id;
u8 filter_type;
u8 filter_action;
- u8 reserved;
+ u8 assert_on_error;
};
/* Ramrod data for rx queue start ramrod */
@@ -5102,203 +5476,6 @@
struct eth_vport_rss_config rss_config;
};
-struct gft_cam_line {
- __le32 camline;
-#define GFT_CAM_LINE_VALID_MASK 0x1
-#define GFT_CAM_LINE_VALID_SHIFT 0
-#define GFT_CAM_LINE_DATA_MASK 0x3FFF
-#define GFT_CAM_LINE_DATA_SHIFT 1
-#define GFT_CAM_LINE_MASK_BITS_MASK 0x3FFF
-#define GFT_CAM_LINE_MASK_BITS_SHIFT 15
-#define GFT_CAM_LINE_RESERVED1_MASK 0x7
-#define GFT_CAM_LINE_RESERVED1_SHIFT 29
-};
-
-struct gft_cam_line_mapped {
- __le32 camline;
-#define GFT_CAM_LINE_MAPPED_VALID_MASK 0x1
-#define GFT_CAM_LINE_MAPPED_VALID_SHIFT 0
-#define GFT_CAM_LINE_MAPPED_IP_VERSION_MASK 0x1
-#define GFT_CAM_LINE_MAPPED_IP_VERSION_SHIFT 1
-#define GFT_CAM_LINE_MAPPED_TUNNEL_IP_VERSION_MASK 0x1
-#define GFT_CAM_LINE_MAPPED_TUNNEL_IP_VERSION_SHIFT 2
-#define GFT_CAM_LINE_MAPPED_UPPER_PROTOCOL_TYPE_MASK 0xF
-#define GFT_CAM_LINE_MAPPED_UPPER_PROTOCOL_TYPE_SHIFT 3
-#define GFT_CAM_LINE_MAPPED_TUNNEL_TYPE_MASK 0xF
-#define GFT_CAM_LINE_MAPPED_TUNNEL_TYPE_SHIFT 7
-#define GFT_CAM_LINE_MAPPED_PF_ID_MASK 0xF
-#define GFT_CAM_LINE_MAPPED_PF_ID_SHIFT 11
-#define GFT_CAM_LINE_MAPPED_IP_VERSION_MASK_MASK 0x1
-#define GFT_CAM_LINE_MAPPED_IP_VERSION_MASK_SHIFT 15
-#define GFT_CAM_LINE_MAPPED_TUNNEL_IP_VERSION_MASK_MASK 0x1
-#define GFT_CAM_LINE_MAPPED_TUNNEL_IP_VERSION_MASK_SHIFT 16
-#define GFT_CAM_LINE_MAPPED_UPPER_PROTOCOL_TYPE_MASK_MASK 0xF
-#define GFT_CAM_LINE_MAPPED_UPPER_PROTOCOL_TYPE_MASK_SHIFT 17
-#define GFT_CAM_LINE_MAPPED_TUNNEL_TYPE_MASK_MASK 0xF
-#define GFT_CAM_LINE_MAPPED_TUNNEL_TYPE_MASK_SHIFT 21
-#define GFT_CAM_LINE_MAPPED_PF_ID_MASK_MASK 0xF
-#define GFT_CAM_LINE_MAPPED_PF_ID_MASK_SHIFT 25
-#define GFT_CAM_LINE_MAPPED_RESERVED1_MASK 0x7
-#define GFT_CAM_LINE_MAPPED_RESERVED1_SHIFT 29
-};
-
-union gft_cam_line_union {
- struct gft_cam_line cam_line;
- struct gft_cam_line_mapped cam_line_mapped;
-};
-
-enum gft_profile_ip_version {
- GFT_PROFILE_IPV4 = 0,
- GFT_PROFILE_IPV6 = 1,
- MAX_GFT_PROFILE_IP_VERSION
-};
-
-enum gft_profile_upper_protocol_type {
- GFT_PROFILE_ROCE_PROTOCOL = 0,
- GFT_PROFILE_RROCE_PROTOCOL = 1,
- GFT_PROFILE_FCOE_PROTOCOL = 2,
- GFT_PROFILE_ICMP_PROTOCOL = 3,
- GFT_PROFILE_ARP_PROTOCOL = 4,
- GFT_PROFILE_USER_TCP_SRC_PORT_1_INNER = 5,
- GFT_PROFILE_USER_TCP_DST_PORT_1_INNER = 6,
- GFT_PROFILE_TCP_PROTOCOL = 7,
- GFT_PROFILE_USER_UDP_DST_PORT_1_INNER = 8,
- GFT_PROFILE_USER_UDP_DST_PORT_2_OUTER = 9,
- GFT_PROFILE_UDP_PROTOCOL = 10,
- GFT_PROFILE_USER_IP_1_INNER = 11,
- GFT_PROFILE_USER_IP_2_OUTER = 12,
- GFT_PROFILE_USER_ETH_1_INNER = 13,
- GFT_PROFILE_USER_ETH_2_OUTER = 14,
- GFT_PROFILE_RAW = 15,
- MAX_GFT_PROFILE_UPPER_PROTOCOL_TYPE
-};
-
-struct gft_ram_line {
- __le32 low32bits;
-#define GFT_RAM_LINE_VLAN_SELECT_MASK 0x3
-#define GFT_RAM_LINE_VLAN_SELECT_SHIFT 0
-#define GFT_RAM_LINE_TUNNEL_ENTROPHY_MASK 0x1
-#define GFT_RAM_LINE_TUNNEL_ENTROPHY_SHIFT 2
-#define GFT_RAM_LINE_TUNNEL_TTL_EQUAL_ONE_MASK 0x1
-#define GFT_RAM_LINE_TUNNEL_TTL_EQUAL_ONE_SHIFT 3
-#define GFT_RAM_LINE_TUNNEL_TTL_MASK 0x1
-#define GFT_RAM_LINE_TUNNEL_TTL_SHIFT 4
-#define GFT_RAM_LINE_TUNNEL_ETHERTYPE_MASK 0x1
-#define GFT_RAM_LINE_TUNNEL_ETHERTYPE_SHIFT 5
-#define GFT_RAM_LINE_TUNNEL_DST_PORT_MASK 0x1
-#define GFT_RAM_LINE_TUNNEL_DST_PORT_SHIFT 6
-#define GFT_RAM_LINE_TUNNEL_SRC_PORT_MASK 0x1
-#define GFT_RAM_LINE_TUNNEL_SRC_PORT_SHIFT 7
-#define GFT_RAM_LINE_TUNNEL_DSCP_MASK 0x1
-#define GFT_RAM_LINE_TUNNEL_DSCP_SHIFT 8
-#define GFT_RAM_LINE_TUNNEL_OVER_IP_PROTOCOL_MASK 0x1
-#define GFT_RAM_LINE_TUNNEL_OVER_IP_PROTOCOL_SHIFT 9
-#define GFT_RAM_LINE_TUNNEL_DST_IP_MASK 0x1
-#define GFT_RAM_LINE_TUNNEL_DST_IP_SHIFT 10
-#define GFT_RAM_LINE_TUNNEL_SRC_IP_MASK 0x1
-#define GFT_RAM_LINE_TUNNEL_SRC_IP_SHIFT 11
-#define GFT_RAM_LINE_TUNNEL_PRIORITY_MASK 0x1
-#define GFT_RAM_LINE_TUNNEL_PRIORITY_SHIFT 12
-#define GFT_RAM_LINE_TUNNEL_PROVIDER_VLAN_MASK 0x1
-#define GFT_RAM_LINE_TUNNEL_PROVIDER_VLAN_SHIFT 13
-#define GFT_RAM_LINE_TUNNEL_VLAN_MASK 0x1
-#define GFT_RAM_LINE_TUNNEL_VLAN_SHIFT 14
-#define GFT_RAM_LINE_TUNNEL_DST_MAC_MASK 0x1
-#define GFT_RAM_LINE_TUNNEL_DST_MAC_SHIFT 15
-#define GFT_RAM_LINE_TUNNEL_SRC_MAC_MASK 0x1
-#define GFT_RAM_LINE_TUNNEL_SRC_MAC_SHIFT 16
-#define GFT_RAM_LINE_TTL_EQUAL_ONE_MASK 0x1
-#define GFT_RAM_LINE_TTL_EQUAL_ONE_SHIFT 17
-#define GFT_RAM_LINE_TTL_MASK 0x1
-#define GFT_RAM_LINE_TTL_SHIFT 18
-#define GFT_RAM_LINE_ETHERTYPE_MASK 0x1
-#define GFT_RAM_LINE_ETHERTYPE_SHIFT 19
-#define GFT_RAM_LINE_RESERVED0_MASK 0x1
-#define GFT_RAM_LINE_RESERVED0_SHIFT 20
-#define GFT_RAM_LINE_TCP_FLAG_FIN_MASK 0x1
-#define GFT_RAM_LINE_TCP_FLAG_FIN_SHIFT 21
-#define GFT_RAM_LINE_TCP_FLAG_SYN_MASK 0x1
-#define GFT_RAM_LINE_TCP_FLAG_SYN_SHIFT 22
-#define GFT_RAM_LINE_TCP_FLAG_RST_MASK 0x1
-#define GFT_RAM_LINE_TCP_FLAG_RST_SHIFT 23
-#define GFT_RAM_LINE_TCP_FLAG_PSH_MASK 0x1
-#define GFT_RAM_LINE_TCP_FLAG_PSH_SHIFT 24
-#define GFT_RAM_LINE_TCP_FLAG_ACK_MASK 0x1
-#define GFT_RAM_LINE_TCP_FLAG_ACK_SHIFT 25
-#define GFT_RAM_LINE_TCP_FLAG_URG_MASK 0x1
-#define GFT_RAM_LINE_TCP_FLAG_URG_SHIFT 26
-#define GFT_RAM_LINE_TCP_FLAG_ECE_MASK 0x1
-#define GFT_RAM_LINE_TCP_FLAG_ECE_SHIFT 27
-#define GFT_RAM_LINE_TCP_FLAG_CWR_MASK 0x1
-#define GFT_RAM_LINE_TCP_FLAG_CWR_SHIFT 28
-#define GFT_RAM_LINE_TCP_FLAG_NS_MASK 0x1
-#define GFT_RAM_LINE_TCP_FLAG_NS_SHIFT 29
-#define GFT_RAM_LINE_DST_PORT_MASK 0x1
-#define GFT_RAM_LINE_DST_PORT_SHIFT 30
-#define GFT_RAM_LINE_SRC_PORT_MASK 0x1
-#define GFT_RAM_LINE_SRC_PORT_SHIFT 31
- __le32 high32bits;
-#define GFT_RAM_LINE_DSCP_MASK 0x1
-#define GFT_RAM_LINE_DSCP_SHIFT 0
-#define GFT_RAM_LINE_OVER_IP_PROTOCOL_MASK 0x1
-#define GFT_RAM_LINE_OVER_IP_PROTOCOL_SHIFT 1
-#define GFT_RAM_LINE_DST_IP_MASK 0x1
-#define GFT_RAM_LINE_DST_IP_SHIFT 2
-#define GFT_RAM_LINE_SRC_IP_MASK 0x1
-#define GFT_RAM_LINE_SRC_IP_SHIFT 3
-#define GFT_RAM_LINE_PRIORITY_MASK 0x1
-#define GFT_RAM_LINE_PRIORITY_SHIFT 4
-#define GFT_RAM_LINE_PROVIDER_VLAN_MASK 0x1
-#define GFT_RAM_LINE_PROVIDER_VLAN_SHIFT 5
-#define GFT_RAM_LINE_VLAN_MASK 0x1
-#define GFT_RAM_LINE_VLAN_SHIFT 6
-#define GFT_RAM_LINE_DST_MAC_MASK 0x1
-#define GFT_RAM_LINE_DST_MAC_SHIFT 7
-#define GFT_RAM_LINE_SRC_MAC_MASK 0x1
-#define GFT_RAM_LINE_SRC_MAC_SHIFT 8
-#define GFT_RAM_LINE_TENANT_ID_MASK 0x1
-#define GFT_RAM_LINE_TENANT_ID_SHIFT 9
-#define GFT_RAM_LINE_RESERVED1_MASK 0x3FFFFF
-#define GFT_RAM_LINE_RESERVED1_SHIFT 10
-};
-
-struct mstorm_eth_conn_ag_ctx {
- u8 byte0;
- u8 byte1;
- u8 flags0;
-#define MSTORM_ETH_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1
-#define MSTORM_ETH_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0
-#define MSTORM_ETH_CONN_AG_CTX_BIT1_MASK 0x1
-#define MSTORM_ETH_CONN_AG_CTX_BIT1_SHIFT 1
-#define MSTORM_ETH_CONN_AG_CTX_CF0_MASK 0x3
-#define MSTORM_ETH_CONN_AG_CTX_CF0_SHIFT 2
-#define MSTORM_ETH_CONN_AG_CTX_CF1_MASK 0x3
-#define MSTORM_ETH_CONN_AG_CTX_CF1_SHIFT 4
-#define MSTORM_ETH_CONN_AG_CTX_CF2_MASK 0x3
-#define MSTORM_ETH_CONN_AG_CTX_CF2_SHIFT 6
- u8 flags1;
-#define MSTORM_ETH_CONN_AG_CTX_CF0EN_MASK 0x1
-#define MSTORM_ETH_CONN_AG_CTX_CF0EN_SHIFT 0
-#define MSTORM_ETH_CONN_AG_CTX_CF1EN_MASK 0x1
-#define MSTORM_ETH_CONN_AG_CTX_CF1EN_SHIFT 1
-#define MSTORM_ETH_CONN_AG_CTX_CF2EN_MASK 0x1
-#define MSTORM_ETH_CONN_AG_CTX_CF2EN_SHIFT 2
-#define MSTORM_ETH_CONN_AG_CTX_RULE0EN_MASK 0x1
-#define MSTORM_ETH_CONN_AG_CTX_RULE0EN_SHIFT 3
-#define MSTORM_ETH_CONN_AG_CTX_RULE1EN_MASK 0x1
-#define MSTORM_ETH_CONN_AG_CTX_RULE1EN_SHIFT 4
-#define MSTORM_ETH_CONN_AG_CTX_RULE2EN_MASK 0x1
-#define MSTORM_ETH_CONN_AG_CTX_RULE2EN_SHIFT 5
-#define MSTORM_ETH_CONN_AG_CTX_RULE3EN_MASK 0x1
-#define MSTORM_ETH_CONN_AG_CTX_RULE3EN_SHIFT 6
-#define MSTORM_ETH_CONN_AG_CTX_RULE4EN_MASK 0x1
-#define MSTORM_ETH_CONN_AG_CTX_RULE4EN_SHIFT 7
- __le16 word0;
- __le16 word1;
- __le32 reg0;
- __le32 reg1;
-};
-
struct xstorm_eth_conn_agctxdq_ext_ldpart {
u8 reserved0;
u8 eth_state;
@@ -5511,7 +5688,7 @@
#define XSTORMETHCONNAGCTXDQEXTLDPART_TPH_ENABLE_SHIFT 6
u8 edpm_event_id;
__le16 physical_q0;
- __le16 quota;
+ __le16 ereserved1;
__le16 edpm_num_bds;
__le16 tx_bd_cons;
__le16 tx_bd_prod;
@@ -5528,6 +5705,43 @@
__le32 reg4;
};
+struct mstorm_eth_conn_ag_ctx {
+ u8 byte0;
+ u8 byte1;
+ u8 flags0;
+#define MSTORM_ETH_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1
+#define MSTORM_ETH_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0
+#define MSTORM_ETH_CONN_AG_CTX_BIT1_MASK 0x1
+#define MSTORM_ETH_CONN_AG_CTX_BIT1_SHIFT 1
+#define MSTORM_ETH_CONN_AG_CTX_CF0_MASK 0x3
+#define MSTORM_ETH_CONN_AG_CTX_CF0_SHIFT 2
+#define MSTORM_ETH_CONN_AG_CTX_CF1_MASK 0x3
+#define MSTORM_ETH_CONN_AG_CTX_CF1_SHIFT 4
+#define MSTORM_ETH_CONN_AG_CTX_CF2_MASK 0x3
+#define MSTORM_ETH_CONN_AG_CTX_CF2_SHIFT 6
+ u8 flags1;
+#define MSTORM_ETH_CONN_AG_CTX_CF0EN_MASK 0x1
+#define MSTORM_ETH_CONN_AG_CTX_CF0EN_SHIFT 0
+#define MSTORM_ETH_CONN_AG_CTX_CF1EN_MASK 0x1
+#define MSTORM_ETH_CONN_AG_CTX_CF1EN_SHIFT 1
+#define MSTORM_ETH_CONN_AG_CTX_CF2EN_MASK 0x1
+#define MSTORM_ETH_CONN_AG_CTX_CF2EN_SHIFT 2
+#define MSTORM_ETH_CONN_AG_CTX_RULE0EN_MASK 0x1
+#define MSTORM_ETH_CONN_AG_CTX_RULE0EN_SHIFT 3
+#define MSTORM_ETH_CONN_AG_CTX_RULE1EN_MASK 0x1
+#define MSTORM_ETH_CONN_AG_CTX_RULE1EN_SHIFT 4
+#define MSTORM_ETH_CONN_AG_CTX_RULE2EN_MASK 0x1
+#define MSTORM_ETH_CONN_AG_CTX_RULE2EN_SHIFT 5
+#define MSTORM_ETH_CONN_AG_CTX_RULE3EN_MASK 0x1
+#define MSTORM_ETH_CONN_AG_CTX_RULE3EN_SHIFT 6
+#define MSTORM_ETH_CONN_AG_CTX_RULE4EN_MASK 0x1
+#define MSTORM_ETH_CONN_AG_CTX_RULE4EN_SHIFT 7
+ __le16 word0;
+ __le16 word1;
+ __le32 reg0;
+ __le32 reg1;
+};
+
struct xstorm_eth_hw_conn_ag_ctx {
u8 reserved0;
u8 eth_state;
@@ -5740,7 +5954,7 @@
#define XSTORM_ETH_HW_CONN_AG_CTX_TPH_ENABLE_SHIFT 6
u8 edpm_event_id;
__le16 physical_q0;
- __le16 quota;
+ __le16 ereserved1;
__le16 edpm_num_bds;
__le16 tx_bd_cons;
__le16 tx_bd_prod;
@@ -5748,6 +5962,200 @@
__le16 conn_dpi;
};
+struct gft_cam_line {
+ __le32 camline;
+#define GFT_CAM_LINE_VALID_MASK 0x1
+#define GFT_CAM_LINE_VALID_SHIFT 0
+#define GFT_CAM_LINE_DATA_MASK 0x3FFF
+#define GFT_CAM_LINE_DATA_SHIFT 1
+#define GFT_CAM_LINE_MASK_BITS_MASK 0x3FFF
+#define GFT_CAM_LINE_MASK_BITS_SHIFT 15
+#define GFT_CAM_LINE_RESERVED1_MASK 0x7
+#define GFT_CAM_LINE_RESERVED1_SHIFT 29
+};
+
+struct gft_cam_line_mapped {
+ __le32 camline;
+#define GFT_CAM_LINE_MAPPED_VALID_MASK 0x1
+#define GFT_CAM_LINE_MAPPED_VALID_SHIFT 0
+#define GFT_CAM_LINE_MAPPED_IP_VERSION_MASK 0x1
+#define GFT_CAM_LINE_MAPPED_IP_VERSION_SHIFT 1
+#define GFT_CAM_LINE_MAPPED_TUNNEL_IP_VERSION_MASK 0x1
+#define GFT_CAM_LINE_MAPPED_TUNNEL_IP_VERSION_SHIFT 2
+#define GFT_CAM_LINE_MAPPED_UPPER_PROTOCOL_TYPE_MASK 0xF
+#define GFT_CAM_LINE_MAPPED_UPPER_PROTOCOL_TYPE_SHIFT 3
+#define GFT_CAM_LINE_MAPPED_TUNNEL_TYPE_MASK 0xF
+#define GFT_CAM_LINE_MAPPED_TUNNEL_TYPE_SHIFT 7
+#define GFT_CAM_LINE_MAPPED_PF_ID_MASK 0xF
+#define GFT_CAM_LINE_MAPPED_PF_ID_SHIFT 11
+#define GFT_CAM_LINE_MAPPED_IP_VERSION_MASK_MASK 0x1
+#define GFT_CAM_LINE_MAPPED_IP_VERSION_MASK_SHIFT 15
+#define GFT_CAM_LINE_MAPPED_TUNNEL_IP_VERSION_MASK_MASK 0x1
+#define GFT_CAM_LINE_MAPPED_TUNNEL_IP_VERSION_MASK_SHIFT 16
+#define GFT_CAM_LINE_MAPPED_UPPER_PROTOCOL_TYPE_MASK_MASK 0xF
+#define GFT_CAM_LINE_MAPPED_UPPER_PROTOCOL_TYPE_MASK_SHIFT 17
+#define GFT_CAM_LINE_MAPPED_TUNNEL_TYPE_MASK_MASK 0xF
+#define GFT_CAM_LINE_MAPPED_TUNNEL_TYPE_MASK_SHIFT 21
+#define GFT_CAM_LINE_MAPPED_PF_ID_MASK_MASK 0xF
+#define GFT_CAM_LINE_MAPPED_PF_ID_MASK_SHIFT 25
+#define GFT_CAM_LINE_MAPPED_RESERVED1_MASK 0x7
+#define GFT_CAM_LINE_MAPPED_RESERVED1_SHIFT 29
+};
+
+union gft_cam_line_union {
+ struct gft_cam_line cam_line;
+ struct gft_cam_line_mapped cam_line_mapped;
+};
+
+enum gft_profile_ip_version {
+ GFT_PROFILE_IPV4 = 0,
+ GFT_PROFILE_IPV6 = 1,
+ MAX_GFT_PROFILE_IP_VERSION
+};
+
+struct gft_profile_key {
+ __le16 profile_key;
+#define GFT_PROFILE_KEY_IP_VERSION_MASK 0x1
+#define GFT_PROFILE_KEY_IP_VERSION_SHIFT 0
+#define GFT_PROFILE_KEY_TUNNEL_IP_VERSION_MASK 0x1
+#define GFT_PROFILE_KEY_TUNNEL_IP_VERSION_SHIFT 1
+#define GFT_PROFILE_KEY_UPPER_PROTOCOL_TYPE_MASK 0xF
+#define GFT_PROFILE_KEY_UPPER_PROTOCOL_TYPE_SHIFT 2
+#define GFT_PROFILE_KEY_TUNNEL_TYPE_MASK 0xF
+#define GFT_PROFILE_KEY_TUNNEL_TYPE_SHIFT 6
+#define GFT_PROFILE_KEY_PF_ID_MASK 0xF
+#define GFT_PROFILE_KEY_PF_ID_SHIFT 10
+#define GFT_PROFILE_KEY_RESERVED0_MASK 0x3
+#define GFT_PROFILE_KEY_RESERVED0_SHIFT 14
+};
+
+enum gft_profile_tunnel_type {
+ GFT_PROFILE_NO_TUNNEL = 0,
+ GFT_PROFILE_VXLAN_TUNNEL = 1,
+ GFT_PROFILE_GRE_MAC_OR_NVGRE_TUNNEL = 2,
+ GFT_PROFILE_GRE_IP_TUNNEL = 3,
+ GFT_PROFILE_GENEVE_MAC_TUNNEL = 4,
+ GFT_PROFILE_GENEVE_IP_TUNNEL = 5,
+ MAX_GFT_PROFILE_TUNNEL_TYPE
+};
+
+enum gft_profile_upper_protocol_type {
+ GFT_PROFILE_ROCE_PROTOCOL = 0,
+ GFT_PROFILE_RROCE_PROTOCOL = 1,
+ GFT_PROFILE_FCOE_PROTOCOL = 2,
+ GFT_PROFILE_ICMP_PROTOCOL = 3,
+ GFT_PROFILE_ARP_PROTOCOL = 4,
+ GFT_PROFILE_USER_TCP_SRC_PORT_1_INNER = 5,
+ GFT_PROFILE_USER_TCP_DST_PORT_1_INNER = 6,
+ GFT_PROFILE_TCP_PROTOCOL = 7,
+ GFT_PROFILE_USER_UDP_DST_PORT_1_INNER = 8,
+ GFT_PROFILE_USER_UDP_DST_PORT_2_OUTER = 9,
+ GFT_PROFILE_UDP_PROTOCOL = 10,
+ GFT_PROFILE_USER_IP_1_INNER = 11,
+ GFT_PROFILE_USER_IP_2_OUTER = 12,
+ GFT_PROFILE_USER_ETH_1_INNER = 13,
+ GFT_PROFILE_USER_ETH_2_OUTER = 14,
+ GFT_PROFILE_RAW = 15,
+ MAX_GFT_PROFILE_UPPER_PROTOCOL_TYPE
+};
+
+struct gft_ram_line {
+ __le32 lo;
+#define GFT_RAM_LINE_VLAN_SELECT_MASK 0x3
+#define GFT_RAM_LINE_VLAN_SELECT_SHIFT 0
+#define GFT_RAM_LINE_TUNNEL_ENTROPHY_MASK 0x1
+#define GFT_RAM_LINE_TUNNEL_ENTROPHY_SHIFT 2
+#define GFT_RAM_LINE_TUNNEL_TTL_EQUAL_ONE_MASK 0x1
+#define GFT_RAM_LINE_TUNNEL_TTL_EQUAL_ONE_SHIFT 3
+#define GFT_RAM_LINE_TUNNEL_TTL_MASK 0x1
+#define GFT_RAM_LINE_TUNNEL_TTL_SHIFT 4
+#define GFT_RAM_LINE_TUNNEL_ETHERTYPE_MASK 0x1
+#define GFT_RAM_LINE_TUNNEL_ETHERTYPE_SHIFT 5
+#define GFT_RAM_LINE_TUNNEL_DST_PORT_MASK 0x1
+#define GFT_RAM_LINE_TUNNEL_DST_PORT_SHIFT 6
+#define GFT_RAM_LINE_TUNNEL_SRC_PORT_MASK 0x1
+#define GFT_RAM_LINE_TUNNEL_SRC_PORT_SHIFT 7
+#define GFT_RAM_LINE_TUNNEL_DSCP_MASK 0x1
+#define GFT_RAM_LINE_TUNNEL_DSCP_SHIFT 8
+#define GFT_RAM_LINE_TUNNEL_OVER_IP_PROTOCOL_MASK 0x1
+#define GFT_RAM_LINE_TUNNEL_OVER_IP_PROTOCOL_SHIFT 9
+#define GFT_RAM_LINE_TUNNEL_DST_IP_MASK 0x1
+#define GFT_RAM_LINE_TUNNEL_DST_IP_SHIFT 10
+#define GFT_RAM_LINE_TUNNEL_SRC_IP_MASK 0x1
+#define GFT_RAM_LINE_TUNNEL_SRC_IP_SHIFT 11
+#define GFT_RAM_LINE_TUNNEL_PRIORITY_MASK 0x1
+#define GFT_RAM_LINE_TUNNEL_PRIORITY_SHIFT 12
+#define GFT_RAM_LINE_TUNNEL_PROVIDER_VLAN_MASK 0x1
+#define GFT_RAM_LINE_TUNNEL_PROVIDER_VLAN_SHIFT 13
+#define GFT_RAM_LINE_TUNNEL_VLAN_MASK 0x1
+#define GFT_RAM_LINE_TUNNEL_VLAN_SHIFT 14
+#define GFT_RAM_LINE_TUNNEL_DST_MAC_MASK 0x1
+#define GFT_RAM_LINE_TUNNEL_DST_MAC_SHIFT 15
+#define GFT_RAM_LINE_TUNNEL_SRC_MAC_MASK 0x1
+#define GFT_RAM_LINE_TUNNEL_SRC_MAC_SHIFT 16
+#define GFT_RAM_LINE_TTL_EQUAL_ONE_MASK 0x1
+#define GFT_RAM_LINE_TTL_EQUAL_ONE_SHIFT 17
+#define GFT_RAM_LINE_TTL_MASK 0x1
+#define GFT_RAM_LINE_TTL_SHIFT 18
+#define GFT_RAM_LINE_ETHERTYPE_MASK 0x1
+#define GFT_RAM_LINE_ETHERTYPE_SHIFT 19
+#define GFT_RAM_LINE_RESERVED0_MASK 0x1
+#define GFT_RAM_LINE_RESERVED0_SHIFT 20
+#define GFT_RAM_LINE_TCP_FLAG_FIN_MASK 0x1
+#define GFT_RAM_LINE_TCP_FLAG_FIN_SHIFT 21
+#define GFT_RAM_LINE_TCP_FLAG_SYN_MASK 0x1
+#define GFT_RAM_LINE_TCP_FLAG_SYN_SHIFT 22
+#define GFT_RAM_LINE_TCP_FLAG_RST_MASK 0x1
+#define GFT_RAM_LINE_TCP_FLAG_RST_SHIFT 23
+#define GFT_RAM_LINE_TCP_FLAG_PSH_MASK 0x1
+#define GFT_RAM_LINE_TCP_FLAG_PSH_SHIFT 24
+#define GFT_RAM_LINE_TCP_FLAG_ACK_MASK 0x1
+#define GFT_RAM_LINE_TCP_FLAG_ACK_SHIFT 25
+#define GFT_RAM_LINE_TCP_FLAG_URG_MASK 0x1
+#define GFT_RAM_LINE_TCP_FLAG_URG_SHIFT 26
+#define GFT_RAM_LINE_TCP_FLAG_ECE_MASK 0x1
+#define GFT_RAM_LINE_TCP_FLAG_ECE_SHIFT 27
+#define GFT_RAM_LINE_TCP_FLAG_CWR_MASK 0x1
+#define GFT_RAM_LINE_TCP_FLAG_CWR_SHIFT 28
+#define GFT_RAM_LINE_TCP_FLAG_NS_MASK 0x1
+#define GFT_RAM_LINE_TCP_FLAG_NS_SHIFT 29
+#define GFT_RAM_LINE_DST_PORT_MASK 0x1
+#define GFT_RAM_LINE_DST_PORT_SHIFT 30
+#define GFT_RAM_LINE_SRC_PORT_MASK 0x1
+#define GFT_RAM_LINE_SRC_PORT_SHIFT 31
+ __le32 hi;
+#define GFT_RAM_LINE_DSCP_MASK 0x1
+#define GFT_RAM_LINE_DSCP_SHIFT 0
+#define GFT_RAM_LINE_OVER_IP_PROTOCOL_MASK 0x1
+#define GFT_RAM_LINE_OVER_IP_PROTOCOL_SHIFT 1
+#define GFT_RAM_LINE_DST_IP_MASK 0x1
+#define GFT_RAM_LINE_DST_IP_SHIFT 2
+#define GFT_RAM_LINE_SRC_IP_MASK 0x1
+#define GFT_RAM_LINE_SRC_IP_SHIFT 3
+#define GFT_RAM_LINE_PRIORITY_MASK 0x1
+#define GFT_RAM_LINE_PRIORITY_SHIFT 4
+#define GFT_RAM_LINE_PROVIDER_VLAN_MASK 0x1
+#define GFT_RAM_LINE_PROVIDER_VLAN_SHIFT 5
+#define GFT_RAM_LINE_VLAN_MASK 0x1
+#define GFT_RAM_LINE_VLAN_SHIFT 6
+#define GFT_RAM_LINE_DST_MAC_MASK 0x1
+#define GFT_RAM_LINE_DST_MAC_SHIFT 7
+#define GFT_RAM_LINE_SRC_MAC_MASK 0x1
+#define GFT_RAM_LINE_SRC_MAC_SHIFT 8
+#define GFT_RAM_LINE_TENANT_ID_MASK 0x1
+#define GFT_RAM_LINE_TENANT_ID_SHIFT 9
+#define GFT_RAM_LINE_RESERVED1_MASK 0x3FFFFF
+#define GFT_RAM_LINE_RESERVED1_SHIFT 10
+};
+
+enum gft_vlan_select {
+ INNER_PROVIDER_VLAN = 0,
+ INNER_VLAN = 1,
+ OUTER_PROVIDER_VLAN = 2,
+ OUTER_VLAN = 3,
+ MAX_GFT_VLAN_SELECT
+};
+
struct mstorm_rdma_task_st_ctx {
struct regpair temp[4];
};
@@ -5827,12 +6235,9 @@
u8 cnq_start_offset;
u8 num_cnqs;
u8 cq_ring_mode;
- u8 cnp_vlan_priority;
- __le32 cnp_send_timeout;
- u8 cnp_dscp;
u8 vf_id;
u8 vf_valid;
- u8 reserved[5];
+ u8 reserved[3];
};
struct rdma_init_func_ramrod_data {
@@ -5856,54 +6261,55 @@
};
struct rdma_register_tid_ramrod_data {
- __le32 flags;
-#define RDMA_REGISTER_TID_RAMROD_DATA_MAX_ID_MASK 0x3FFFF
-#define RDMA_REGISTER_TID_RAMROD_DATA_MAX_ID_SHIFT 0
-#define RDMA_REGISTER_TID_RAMROD_DATA_PAGE_SIZE_LOG_MASK 0x1F
-#define RDMA_REGISTER_TID_RAMROD_DATA_PAGE_SIZE_LOG_SHIFT 18
-#define RDMA_REGISTER_TID_RAMROD_DATA_TWO_LEVEL_PBL_MASK 0x1
-#define RDMA_REGISTER_TID_RAMROD_DATA_TWO_LEVEL_PBL_SHIFT 23
-#define RDMA_REGISTER_TID_RAMROD_DATA_ZERO_BASED_MASK 0x1
-#define RDMA_REGISTER_TID_RAMROD_DATA_ZERO_BASED_SHIFT 24
-#define RDMA_REGISTER_TID_RAMROD_DATA_PHY_MR_MASK 0x1
-#define RDMA_REGISTER_TID_RAMROD_DATA_PHY_MR_SHIFT 25
-#define RDMA_REGISTER_TID_RAMROD_DATA_REMOTE_READ_MASK 0x1
-#define RDMA_REGISTER_TID_RAMROD_DATA_REMOTE_READ_SHIFT 26
-#define RDMA_REGISTER_TID_RAMROD_DATA_REMOTE_WRITE_MASK 0x1
-#define RDMA_REGISTER_TID_RAMROD_DATA_REMOTE_WRITE_SHIFT 27
-#define RDMA_REGISTER_TID_RAMROD_DATA_REMOTE_ATOMIC_MASK 0x1
-#define RDMA_REGISTER_TID_RAMROD_DATA_REMOTE_ATOMIC_SHIFT 28
-#define RDMA_REGISTER_TID_RAMROD_DATA_LOCAL_WRITE_MASK 0x1
-#define RDMA_REGISTER_TID_RAMROD_DATA_LOCAL_WRITE_SHIFT 29
-#define RDMA_REGISTER_TID_RAMROD_DATA_LOCAL_READ_MASK 0x1
-#define RDMA_REGISTER_TID_RAMROD_DATA_LOCAL_READ_SHIFT 30
-#define RDMA_REGISTER_TID_RAMROD_DATA_ENABLE_MW_BIND_MASK 0x1
-#define RDMA_REGISTER_TID_RAMROD_DATA_ENABLE_MW_BIND_SHIFT 31
+ __le16 flags;
+#define RDMA_REGISTER_TID_RAMROD_DATA_PAGE_SIZE_LOG_MASK 0x1F
+#define RDMA_REGISTER_TID_RAMROD_DATA_PAGE_SIZE_LOG_SHIFT 0
+#define RDMA_REGISTER_TID_RAMROD_DATA_TWO_LEVEL_PBL_MASK 0x1
+#define RDMA_REGISTER_TID_RAMROD_DATA_TWO_LEVEL_PBL_SHIFT 5
+#define RDMA_REGISTER_TID_RAMROD_DATA_ZERO_BASED_MASK 0x1
+#define RDMA_REGISTER_TID_RAMROD_DATA_ZERO_BASED_SHIFT 6
+#define RDMA_REGISTER_TID_RAMROD_DATA_PHY_MR_MASK 0x1
+#define RDMA_REGISTER_TID_RAMROD_DATA_PHY_MR_SHIFT 7
+#define RDMA_REGISTER_TID_RAMROD_DATA_REMOTE_READ_MASK 0x1
+#define RDMA_REGISTER_TID_RAMROD_DATA_REMOTE_READ_SHIFT 8
+#define RDMA_REGISTER_TID_RAMROD_DATA_REMOTE_WRITE_MASK 0x1
+#define RDMA_REGISTER_TID_RAMROD_DATA_REMOTE_WRITE_SHIFT 9
+#define RDMA_REGISTER_TID_RAMROD_DATA_REMOTE_ATOMIC_MASK 0x1
+#define RDMA_REGISTER_TID_RAMROD_DATA_REMOTE_ATOMIC_SHIFT 10
+#define RDMA_REGISTER_TID_RAMROD_DATA_LOCAL_WRITE_MASK 0x1
+#define RDMA_REGISTER_TID_RAMROD_DATA_LOCAL_WRITE_SHIFT 11
+#define RDMA_REGISTER_TID_RAMROD_DATA_LOCAL_READ_MASK 0x1
+#define RDMA_REGISTER_TID_RAMROD_DATA_LOCAL_READ_SHIFT 12
+#define RDMA_REGISTER_TID_RAMROD_DATA_ENABLE_MW_BIND_MASK 0x1
+#define RDMA_REGISTER_TID_RAMROD_DATA_ENABLE_MW_BIND_SHIFT 13
+#define RDMA_REGISTER_TID_RAMROD_DATA_RESERVED_MASK 0x3
+#define RDMA_REGISTER_TID_RAMROD_DATA_RESERVED_SHIFT 14
u8 flags1;
-#define RDMA_REGISTER_TID_RAMROD_DATA_PBL_PAGE_SIZE_LOG_MASK 0x1F
+#define RDMA_REGISTER_TID_RAMROD_DATA_PBL_PAGE_SIZE_LOG_MASK 0x1F
#define RDMA_REGISTER_TID_RAMROD_DATA_PBL_PAGE_SIZE_LOG_SHIFT 0
-#define RDMA_REGISTER_TID_RAMROD_DATA_TID_TYPE_MASK 0x7
-#define RDMA_REGISTER_TID_RAMROD_DATA_TID_TYPE_SHIFT 5
+#define RDMA_REGISTER_TID_RAMROD_DATA_TID_TYPE_MASK 0x7
+#define RDMA_REGISTER_TID_RAMROD_DATA_TID_TYPE_SHIFT 5
u8 flags2;
-#define RDMA_REGISTER_TID_RAMROD_DATA_DMA_MR_MASK 0x1
-#define RDMA_REGISTER_TID_RAMROD_DATA_DMA_MR_SHIFT 0
-#define RDMA_REGISTER_TID_RAMROD_DATA_DIF_ON_HOST_FLG_MASK 0x1
-#define RDMA_REGISTER_TID_RAMROD_DATA_DIF_ON_HOST_FLG_SHIFT 1
-#define RDMA_REGISTER_TID_RAMROD_DATA_RESERVED1_MASK 0x3F
-#define RDMA_REGISTER_TID_RAMROD_DATA_RESERVED1_SHIFT 2
+#define RDMA_REGISTER_TID_RAMROD_DATA_DMA_MR_MASK 0x1
+#define RDMA_REGISTER_TID_RAMROD_DATA_DMA_MR_SHIFT 0
+#define RDMA_REGISTER_TID_RAMROD_DATA_DIF_ON_HOST_FLG_MASK 0x1
+#define RDMA_REGISTER_TID_RAMROD_DATA_DIF_ON_HOST_FLG_SHIFT 1
+#define RDMA_REGISTER_TID_RAMROD_DATA_RESERVED1_MASK 0x3F
+#define RDMA_REGISTER_TID_RAMROD_DATA_RESERVED1_SHIFT 2
u8 key;
u8 length_hi;
u8 vf_id;
u8 vf_valid;
__le16 pd;
+ __le16 reserved2;
__le32 length_lo;
__le32 itid;
- __le32 reserved2;
+ __le32 reserved3;
struct regpair va;
struct regpair pbl_base;
struct regpair dif_error_addr;
struct regpair dif_runt_addr;
- __le32 reserved3[2];
+ __le32 reserved4[2];
};
struct rdma_resize_cq_output_params {
@@ -6149,6 +6555,233 @@
MAX_RDMA_TID_TYPE
};
+struct xstorm_roce_conn_ag_ctx_dq_ext_ld_part {
+ u8 reserved0;
+ u8 state;
+ u8 flags0;
+#define XSTORMROCECONNAGCTXDQEXTLDPART_EXIST_IN_QM0_MASK 0x1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_EXIST_IN_QM0_SHIFT 0
+#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT1_MASK 0x1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT1_SHIFT 1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT2_MASK 0x1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT2_SHIFT 2
+#define XSTORMROCECONNAGCTXDQEXTLDPART_EXIST_IN_QM3_MASK 0x1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_EXIST_IN_QM3_SHIFT 3
+#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT4_MASK 0x1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT4_SHIFT 4
+#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT5_MASK 0x1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT5_SHIFT 5
+#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT6_MASK 0x1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT6_SHIFT 6
+#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT7_MASK 0x1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT7_SHIFT 7
+ u8 flags1;
+#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT8_MASK 0x1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT8_SHIFT 0
+#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT9_MASK 0x1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT9_SHIFT 1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT10_MASK 0x1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT10_SHIFT 2
+#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT11_MASK 0x1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT11_SHIFT 3
+#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT12_MASK 0x1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT12_SHIFT 4
+#define XSTORMROCECONNAGCTXDQEXTLDPART_MSTORM_FLUSH_MASK 0x1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_MSTORM_FLUSH_SHIFT 5
+#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT14_MASK 0x1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT14_SHIFT 6
+#define XSTORMROCECONNAGCTXDQEXTLDPART_YSTORM_FLUSH_MASK 0x1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_YSTORM_FLUSH_SHIFT 7
+ u8 flags2;
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF0_MASK 0x3
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF0_SHIFT 0
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF1_MASK 0x3
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF1_SHIFT 2
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF2_MASK 0x3
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF2_SHIFT 4
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF3_MASK 0x3
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF3_SHIFT 6
+ u8 flags3;
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF4_MASK 0x3
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF4_SHIFT 0
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF5_MASK 0x3
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF5_SHIFT 2
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF6_MASK 0x3
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF6_SHIFT 4
+#define XSTORMROCECONNAGCTXDQEXTLDPART_FLUSH_Q0_CF_MASK 0x3
+#define XSTORMROCECONNAGCTXDQEXTLDPART_FLUSH_Q0_CF_SHIFT 6
+ u8 flags4;
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF8_MASK 0x3
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF8_SHIFT 0
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF9_MASK 0x3
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF9_SHIFT 2
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF10_MASK 0x3
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF10_SHIFT 4
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF11_MASK 0x3
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF11_SHIFT 6
+ u8 flags5;
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF12_MASK 0x3
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF12_SHIFT 0
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF13_MASK 0x3
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF13_SHIFT 2
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF14_MASK 0x3
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF14_SHIFT 4
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF15_MASK 0x3
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF15_SHIFT 6
+ u8 flags6;
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF16_MASK 0x3
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF16_SHIFT 0
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF17_MASK 0x3
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF17_SHIFT 2
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF18_MASK 0x3
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF18_SHIFT 4
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF19_MASK 0x3
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF19_SHIFT 6
+ u8 flags7;
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF20_MASK 0x3
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF20_SHIFT 0
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF21_MASK 0x3
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF21_SHIFT 2
+#define XSTORMROCECONNAGCTXDQEXTLDPART_SLOW_PATH_MASK 0x3
+#define XSTORMROCECONNAGCTXDQEXTLDPART_SLOW_PATH_SHIFT 4
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF0EN_MASK 0x1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF0EN_SHIFT 6
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF1EN_MASK 0x1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF1EN_SHIFT 7
+ u8 flags8;
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF2EN_MASK 0x1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF2EN_SHIFT 0
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF3EN_MASK 0x1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF3EN_SHIFT 1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF4EN_MASK 0x1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF4EN_SHIFT 2
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF5EN_MASK 0x1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF5EN_SHIFT 3
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF6EN_MASK 0x1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF6EN_SHIFT 4
+#define XSTORMROCECONNAGCTXDQEXTLDPART_FLUSH_Q0_CF_EN_MASK 0x1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_FLUSH_Q0_CF_EN_SHIFT 5
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF8EN_MASK 0x1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF8EN_SHIFT 6
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF9EN_MASK 0x1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF9EN_SHIFT 7
+ u8 flags9;
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF10EN_MASK 0x1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF10EN_SHIFT 0
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF11EN_MASK 0x1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF11EN_SHIFT 1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF12EN_MASK 0x1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF12EN_SHIFT 2
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF13EN_MASK 0x1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF13EN_SHIFT 3
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF14EN_MASK 0x1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF14EN_SHIFT 4
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF15EN_MASK 0x1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF15EN_SHIFT 5
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF16EN_MASK 0x1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF16EN_SHIFT 6
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF17EN_MASK 0x1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF17EN_SHIFT 7
+ u8 flags10;
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF18EN_MASK 0x1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF18EN_SHIFT 0
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF19EN_MASK 0x1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF19EN_SHIFT 1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF20EN_MASK 0x1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF20EN_SHIFT 2
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF21EN_MASK 0x1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF21EN_SHIFT 3
+#define XSTORMROCECONNAGCTXDQEXTLDPART_SLOW_PATH_EN_MASK 0x1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_SLOW_PATH_EN_SHIFT 4
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF23EN_MASK 0x1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF23EN_SHIFT 5
+#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE0EN_MASK 0x1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE0EN_SHIFT 6
+#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE1EN_MASK 0x1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE1EN_SHIFT 7
+ u8 flags11;
+#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE2EN_MASK 0x1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE2EN_SHIFT 0
+#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE3EN_MASK 0x1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE3EN_SHIFT 1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE4EN_MASK 0x1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE4EN_SHIFT 2
+#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE5EN_MASK 0x1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE5EN_SHIFT 3
+#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE6EN_MASK 0x1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE6EN_SHIFT 4
+#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE7EN_MASK 0x1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE7EN_SHIFT 5
+#define XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED1_MASK 0x1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED1_SHIFT 6
+#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE9EN_MASK 0x1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE9EN_SHIFT 7
+ u8 flags12;
+#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE10EN_MASK 0x1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE10EN_SHIFT 0
+#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE11EN_MASK 0x1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE11EN_SHIFT 1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED2_MASK 0x1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED2_SHIFT 2
+#define XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED3_MASK 0x1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED3_SHIFT 3
+#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE14EN_MASK 0x1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE14EN_SHIFT 4
+#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE15EN_MASK 0x1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE15EN_SHIFT 5
+#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE16EN_MASK 0x1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE16EN_SHIFT 6
+#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE17EN_MASK 0x1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE17EN_SHIFT 7
+ u8 flags13;
+#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE18EN_MASK 0x1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE18EN_SHIFT 0
+#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE19EN_MASK 0x1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE19EN_SHIFT 1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED4_MASK 0x1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED4_SHIFT 2
+#define XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED5_MASK 0x1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED5_SHIFT 3
+#define XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED6_MASK 0x1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED6_SHIFT 4
+#define XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED7_MASK 0x1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED7_SHIFT 5
+#define XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED8_MASK 0x1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED8_SHIFT 6
+#define XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED9_MASK 0x1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED9_SHIFT 7
+ u8 flags14;
+#define XSTORMROCECONNAGCTXDQEXTLDPART_MIGRATION_MASK 0x1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_MIGRATION_SHIFT 0
+#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT17_MASK 0x1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT17_SHIFT 1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_DPM_PORT_NUM_MASK 0x3
+#define XSTORMROCECONNAGCTXDQEXTLDPART_DPM_PORT_NUM_SHIFT 2
+#define XSTORMROCECONNAGCTXDQEXTLDPART_RESERVED_MASK 0x1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_RESERVED_SHIFT 4
+#define XSTORMROCECONNAGCTXDQEXTLDPART_ROCE_EDPM_ENABLE_MASK 0x1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_ROCE_EDPM_ENABLE_SHIFT 5
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF23_MASK 0x3
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF23_SHIFT 6
+ u8 byte2;
+ __le16 physical_q0;
+ __le16 word1;
+ __le16 word2;
+ __le16 word3;
+ __le16 word4;
+ __le16 word5;
+ __le16 conn_dpi;
+ u8 byte3;
+ u8 byte4;
+ u8 byte5;
+ u8 byte6;
+ __le32 reg0;
+ __le32 reg1;
+ __le32 reg2;
+ __le32 snd_nxt_psn;
+ __le32 reg4;
+};
+
struct mstorm_rdma_conn_ag_ctx {
u8 byte0;
u8 byte1;
@@ -6438,233 +7071,6 @@
__le16 word3;
};
-struct xstorm_roce_conn_ag_ctx_dq_ext_ld_part {
- u8 reserved0;
- u8 state;
- u8 flags0;
-#define XSTORMROCECONNAGCTXDQEXTLDPART_EXIST_IN_QM0_MASK 0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_EXIST_IN_QM0_SHIFT 0
-#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT1_MASK 0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT1_SHIFT 1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT2_MASK 0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT2_SHIFT 2
-#define XSTORMROCECONNAGCTXDQEXTLDPART_EXIST_IN_QM3_MASK 0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_EXIST_IN_QM3_SHIFT 3
-#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT4_MASK 0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT4_SHIFT 4
-#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT5_MASK 0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT5_SHIFT 5
-#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT6_MASK 0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT6_SHIFT 6
-#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT7_MASK 0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT7_SHIFT 7
- u8 flags1;
-#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT8_MASK 0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT8_SHIFT 0
-#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT9_MASK 0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT9_SHIFT 1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT10_MASK 0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT10_SHIFT 2
-#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT11_MASK 0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT11_SHIFT 3
-#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT12_MASK 0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT12_SHIFT 4
-#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT13_MASK 0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT13_SHIFT 5
-#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT14_MASK 0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT14_SHIFT 6
-#define XSTORMROCECONNAGCTXDQEXTLDPART_YSTORM_FLUSH_MASK 0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_YSTORM_FLUSH_SHIFT 7
- u8 flags2;
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF0_MASK 0x3
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF0_SHIFT 0
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF1_MASK 0x3
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF1_SHIFT 2
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF2_MASK 0x3
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF2_SHIFT 4
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF3_MASK 0x3
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF3_SHIFT 6
- u8 flags3;
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF4_MASK 0x3
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF4_SHIFT 0
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF5_MASK 0x3
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF5_SHIFT 2
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF6_MASK 0x3
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF6_SHIFT 4
-#define XSTORMROCECONNAGCTXDQEXTLDPART_FLUSH_Q0_CF_MASK 0x3
-#define XSTORMROCECONNAGCTXDQEXTLDPART_FLUSH_Q0_CF_SHIFT 6
- u8 flags4;
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF8_MASK 0x3
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF8_SHIFT 0
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF9_MASK 0x3
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF9_SHIFT 2
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF10_MASK 0x3
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF10_SHIFT 4
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF11_MASK 0x3
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF11_SHIFT 6
- u8 flags5;
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF12_MASK 0x3
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF12_SHIFT 0
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF13_MASK 0x3
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF13_SHIFT 2
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF14_MASK 0x3
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF14_SHIFT 4
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF15_MASK 0x3
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF15_SHIFT 6
- u8 flags6;
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF16_MASK 0x3
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF16_SHIFT 0
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF17_MASK 0x3
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF17_SHIFT 2
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF18_MASK 0x3
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF18_SHIFT 4
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF19_MASK 0x3
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF19_SHIFT 6
- u8 flags7;
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF20_MASK 0x3
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF20_SHIFT 0
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF21_MASK 0x3
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF21_SHIFT 2
-#define XSTORMROCECONNAGCTXDQEXTLDPART_SLOW_PATH_MASK 0x3
-#define XSTORMROCECONNAGCTXDQEXTLDPART_SLOW_PATH_SHIFT 4
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF0EN_MASK 0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF0EN_SHIFT 6
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF1EN_MASK 0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF1EN_SHIFT 7
- u8 flags8;
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF2EN_MASK 0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF2EN_SHIFT 0
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF3EN_MASK 0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF3EN_SHIFT 1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF4EN_MASK 0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF4EN_SHIFT 2
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF5EN_MASK 0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF5EN_SHIFT 3
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF6EN_MASK 0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF6EN_SHIFT 4
-#define XSTORMROCECONNAGCTXDQEXTLDPART_FLUSH_Q0_CF_EN_MASK 0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_FLUSH_Q0_CF_EN_SHIFT 5
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF8EN_MASK 0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF8EN_SHIFT 6
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF9EN_MASK 0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF9EN_SHIFT 7
- u8 flags9;
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF10EN_MASK 0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF10EN_SHIFT 0
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF11EN_MASK 0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF11EN_SHIFT 1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF12EN_MASK 0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF12EN_SHIFT 2
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF13EN_MASK 0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF13EN_SHIFT 3
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF14EN_MASK 0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF14EN_SHIFT 4
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF15EN_MASK 0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF15EN_SHIFT 5
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF16EN_MASK 0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF16EN_SHIFT 6
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF17EN_MASK 0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF17EN_SHIFT 7
- u8 flags10;
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF18EN_MASK 0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF18EN_SHIFT 0
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF19EN_MASK 0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF19EN_SHIFT 1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF20EN_MASK 0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF20EN_SHIFT 2
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF21EN_MASK 0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF21EN_SHIFT 3
-#define XSTORMROCECONNAGCTXDQEXTLDPART_SLOW_PATH_EN_MASK 0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_SLOW_PATH_EN_SHIFT 4
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF23EN_MASK 0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF23EN_SHIFT 5
-#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE0EN_MASK 0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE0EN_SHIFT 6
-#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE1EN_MASK 0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE1EN_SHIFT 7
- u8 flags11;
-#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE2EN_MASK 0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE2EN_SHIFT 0
-#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE3EN_MASK 0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE3EN_SHIFT 1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE4EN_MASK 0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE4EN_SHIFT 2
-#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE5EN_MASK 0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE5EN_SHIFT 3
-#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE6EN_MASK 0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE6EN_SHIFT 4
-#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE7EN_MASK 0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE7EN_SHIFT 5
-#define XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED1_MASK 0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED1_SHIFT 6
-#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE9EN_MASK 0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE9EN_SHIFT 7
- u8 flags12;
-#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE10EN_MASK 0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE10EN_SHIFT 0
-#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE11EN_MASK 0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE11EN_SHIFT 1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED2_MASK 0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED2_SHIFT 2
-#define XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED3_MASK 0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED3_SHIFT 3
-#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE14EN_MASK 0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE14EN_SHIFT 4
-#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE15EN_MASK 0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE15EN_SHIFT 5
-#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE16EN_MASK 0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE16EN_SHIFT 6
-#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE17EN_MASK 0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE17EN_SHIFT 7
- u8 flags13;
-#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE18EN_MASK 0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE18EN_SHIFT 0
-#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE19EN_MASK 0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE19EN_SHIFT 1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED4_MASK 0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED4_SHIFT 2
-#define XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED5_MASK 0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED5_SHIFT 3
-#define XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED6_MASK 0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED6_SHIFT 4
-#define XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED7_MASK 0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED7_SHIFT 5
-#define XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED8_MASK 0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED8_SHIFT 6
-#define XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED9_MASK 0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED9_SHIFT 7
- u8 flags14;
-#define XSTORMROCECONNAGCTXDQEXTLDPART_MIGRATION_MASK 0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_MIGRATION_SHIFT 0
-#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT17_MASK 0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT17_SHIFT 1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_DPM_PORT_NUM_MASK 0x3
-#define XSTORMROCECONNAGCTXDQEXTLDPART_DPM_PORT_NUM_SHIFT 2
-#define XSTORMROCECONNAGCTXDQEXTLDPART_RESERVED_MASK 0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_RESERVED_SHIFT 4
-#define XSTORMROCECONNAGCTXDQEXTLDPART_ROCE_EDPM_ENABLE_MASK 0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_ROCE_EDPM_ENABLE_SHIFT 5
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF23_MASK 0x3
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF23_SHIFT 6
- u8 byte2;
- __le16 physical_q0;
- __le16 word1;
- __le16 word2;
- __le16 word3;
- __le16 word4;
- __le16 word5;
- __le16 conn_dpi;
- u8 byte3;
- u8 byte4;
- u8 byte5;
- u8 byte6;
- __le32 reg0;
- __le32 reg1;
- __le32 reg2;
- __le32 snd_nxt_psn;
- __le32 reg4;
-};
-
struct xstorm_rdma_conn_ag_ctx {
u8 reserved0;
u8 state;
@@ -6696,8 +7102,8 @@
#define XSTORM_RDMA_CONN_AG_CTX_BIT11_SHIFT 3
#define XSTORM_RDMA_CONN_AG_CTX_BIT12_MASK 0x1
#define XSTORM_RDMA_CONN_AG_CTX_BIT12_SHIFT 4
-#define XSTORM_RDMA_CONN_AG_CTX_BIT13_MASK 0x1
-#define XSTORM_RDMA_CONN_AG_CTX_BIT13_SHIFT 5
+#define XSTORM_RDMA_CONN_AG_CTX_MSTORM_FLUSH_MASK 0x1
+#define XSTORM_RDMA_CONN_AG_CTX_MSTORM_FLUSH_SHIFT 5
#define XSTORM_RDMA_CONN_AG_CTX_BIT14_MASK 0x1
#define XSTORM_RDMA_CONN_AG_CTX_BIT14_SHIFT 6
#define XSTORM_RDMA_CONN_AG_CTX_YSTORM_FLUSH_MASK 0x1
@@ -7093,16 +7499,35 @@
struct regpair output_params_addr;
};
+struct roce_events_stats {
+ __le16 silent_drops;
+ __le16 rnr_naks_sent;
+ __le32 retransmit_count;
+ __le32 icrc_error_count;
+ __le32 reserved;
+};
+
enum roce_event_opcode {
ROCE_EVENT_CREATE_QP = 11,
ROCE_EVENT_MODIFY_QP,
ROCE_EVENT_QUERY_QP,
ROCE_EVENT_DESTROY_QP,
+ ROCE_EVENT_CREATE_UD_QP,
+ ROCE_EVENT_DESTROY_UD_QP,
MAX_ROCE_EVENT_OPCODE
};
+struct roce_init_func_params {
+ u8 ll2_queue_id;
+ u8 cnp_vlan_priority;
+ u8 cnp_dscp;
+ u8 reserved;
+ __le32 cnp_send_timeout;
+};
+
struct roce_init_func_ramrod_data {
struct rdma_init_func_ramrod_data rdma;
+ struct roce_init_func_params roce;
};
struct roce_modify_qp_req_ramrod_data {
@@ -7222,6 +7647,8 @@
ROCE_RAMROD_MODIFY_QP,
ROCE_RAMROD_QUERY_QP,
ROCE_RAMROD_DESTROY_QP,
+ ROCE_RAMROD_CREATE_UD_QP,
+ ROCE_RAMROD_DESTROY_UD_QP,
MAX_ROCE_RAMROD_CMD_ID
};
@@ -7299,13 +7726,6 @@
__le32 reg1;
};
-enum roce_flavor {
- PLAIN_ROCE /* RoCE v1 */ ,
- RROCE_IPV4 /* RoCE v2 (Routable RoCE) over ipv4 */ ,
- RROCE_IPV6 /* RoCE v2 (Routable RoCE) over ipv6 */ ,
- MAX_ROCE_FLAVOR
-};
-
struct tstorm_roce_req_conn_ag_ctx {
u8 reserved0;
u8 state;
@@ -7416,8 +7836,8 @@
u8 flags0;
#define TSTORM_ROCE_RESP_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1
#define TSTORM_ROCE_RESP_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0
-#define TSTORM_ROCE_RESP_CONN_AG_CTX_BIT1_MASK 0x1
-#define TSTORM_ROCE_RESP_CONN_AG_CTX_BIT1_SHIFT 1
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_RX_ERROR_NOTIFY_REQUESTER_MASK 0x1
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_RX_ERROR_NOTIFY_REQUESTER_SHIFT 1
#define TSTORM_ROCE_RESP_CONN_AG_CTX_BIT2_MASK 0x1
#define TSTORM_ROCE_RESP_CONN_AG_CTX_BIT2_SHIFT 2
#define TSTORM_ROCE_RESP_CONN_AG_CTX_BIT3_MASK 0x1
@@ -8097,7 +8517,7 @@
__le16 irq_prod;
__le16 word3;
__le16 word4;
- __le16 word5;
+ __le16 ereserved1;
__le16 irq_cons;
u8 rxmit_opcode;
u8 byte4;
@@ -8200,6 +8620,812 @@
__le32 reg3;
};
+enum roce_flavor {
+ PLAIN_ROCE,
+ RROCE_IPV4,
+ RROCE_IPV6,
+ MAX_ROCE_FLAVOR
+};
+
+struct ystorm_iwarp_conn_st_ctx {
+ __le32 reserved[4];
+};
+
+struct pstorm_iwarp_conn_st_ctx {
+ __le32 reserved[36];
+};
+
+struct xstorm_iwarp_conn_st_ctx {
+ __le32 reserved[44];
+};
+
+struct xstorm_iwarp_conn_ag_ctx {
+ u8 reserved0;
+ u8 state;
+ u8 flags0;
+#define XSTORM_IWARP_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0
+#define XSTORM_IWARP_CONN_AG_CTX_EXIST_IN_QM1_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_EXIST_IN_QM1_SHIFT 1
+#define XSTORM_IWARP_CONN_AG_CTX_EXIST_IN_QM2_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_EXIST_IN_QM2_SHIFT 2
+#define XSTORM_IWARP_CONN_AG_CTX_EXIST_IN_QM3_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_EXIST_IN_QM3_SHIFT 3
+#define XSTORM_IWARP_CONN_AG_CTX_BIT4_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_BIT4_SHIFT 4
+#define XSTORM_IWARP_CONN_AG_CTX_RESERVED2_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_RESERVED2_SHIFT 5
+#define XSTORM_IWARP_CONN_AG_CTX_BIT6_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_BIT6_SHIFT 6
+#define XSTORM_IWARP_CONN_AG_CTX_BIT7_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_BIT7_SHIFT 7
+ u8 flags1;
+#define XSTORM_IWARP_CONN_AG_CTX_BIT8_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_BIT8_SHIFT 0
+#define XSTORM_IWARP_CONN_AG_CTX_BIT9_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_BIT9_SHIFT 1
+#define XSTORM_IWARP_CONN_AG_CTX_BIT10_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_BIT10_SHIFT 2
+#define XSTORM_IWARP_CONN_AG_CTX_BIT11_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_BIT11_SHIFT 3
+#define XSTORM_IWARP_CONN_AG_CTX_BIT12_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_BIT12_SHIFT 4
+#define XSTORM_IWARP_CONN_AG_CTX_BIT13_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_BIT13_SHIFT 5
+#define XSTORM_IWARP_CONN_AG_CTX_BIT14_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_BIT14_SHIFT 6
+#define XSTORM_IWARP_CONN_AG_CTX_YSTORM_FLUSH_OR_REWIND_SND_MAX_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_YSTORM_FLUSH_OR_REWIND_SND_MAX_SHIFT 7
+ u8 flags2;
+#define XSTORM_IWARP_CONN_AG_CTX_CF0_MASK 0x3
+#define XSTORM_IWARP_CONN_AG_CTX_CF0_SHIFT 0
+#define XSTORM_IWARP_CONN_AG_CTX_CF1_MASK 0x3
+#define XSTORM_IWARP_CONN_AG_CTX_CF1_SHIFT 2
+#define XSTORM_IWARP_CONN_AG_CTX_CF2_MASK 0x3
+#define XSTORM_IWARP_CONN_AG_CTX_CF2_SHIFT 4
+#define XSTORM_IWARP_CONN_AG_CTX_TIMER_STOP_ALL_MASK 0x3
+#define XSTORM_IWARP_CONN_AG_CTX_TIMER_STOP_ALL_SHIFT 6
+ u8 flags3;
+#define XSTORM_IWARP_CONN_AG_CTX_CF4_MASK 0x3
+#define XSTORM_IWARP_CONN_AG_CTX_CF4_SHIFT 0
+#define XSTORM_IWARP_CONN_AG_CTX_CF5_MASK 0x3
+#define XSTORM_IWARP_CONN_AG_CTX_CF5_SHIFT 2
+#define XSTORM_IWARP_CONN_AG_CTX_CF6_MASK 0x3
+#define XSTORM_IWARP_CONN_AG_CTX_CF6_SHIFT 4
+#define XSTORM_IWARP_CONN_AG_CTX_CF7_MASK 0x3
+#define XSTORM_IWARP_CONN_AG_CTX_CF7_SHIFT 6
+ u8 flags4;
+#define XSTORM_IWARP_CONN_AG_CTX_CF8_MASK 0x3
+#define XSTORM_IWARP_CONN_AG_CTX_CF8_SHIFT 0
+#define XSTORM_IWARP_CONN_AG_CTX_CF9_MASK 0x3
+#define XSTORM_IWARP_CONN_AG_CTX_CF9_SHIFT 2
+#define XSTORM_IWARP_CONN_AG_CTX_CF10_MASK 0x3
+#define XSTORM_IWARP_CONN_AG_CTX_CF10_SHIFT 4
+#define XSTORM_IWARP_CONN_AG_CTX_CF11_MASK 0x3
+#define XSTORM_IWARP_CONN_AG_CTX_CF11_SHIFT 6
+ u8 flags5;
+#define XSTORM_IWARP_CONN_AG_CTX_CF12_MASK 0x3
+#define XSTORM_IWARP_CONN_AG_CTX_CF12_SHIFT 0
+#define XSTORM_IWARP_CONN_AG_CTX_CF13_MASK 0x3
+#define XSTORM_IWARP_CONN_AG_CTX_CF13_SHIFT 2
+#define XSTORM_IWARP_CONN_AG_CTX_SQ_FLUSH_CF_MASK 0x3
+#define XSTORM_IWARP_CONN_AG_CTX_SQ_FLUSH_CF_SHIFT 4
+#define XSTORM_IWARP_CONN_AG_CTX_CF15_MASK 0x3
+#define XSTORM_IWARP_CONN_AG_CTX_CF15_SHIFT 6
+ u8 flags6;
+#define XSTORM_IWARP_CONN_AG_CTX_MPA_OR_ERROR_WAKEUP_TRIGGER_CF_MASK 0x3
+#define XSTORM_IWARP_CONN_AG_CTX_MPA_OR_ERROR_WAKEUP_TRIGGER_CF_SHIFT 0
+#define XSTORM_IWARP_CONN_AG_CTX_CF17_MASK 0x3
+#define XSTORM_IWARP_CONN_AG_CTX_CF17_SHIFT 2
+#define XSTORM_IWARP_CONN_AG_CTX_CF18_MASK 0x3
+#define XSTORM_IWARP_CONN_AG_CTX_CF18_SHIFT 4
+#define XSTORM_IWARP_CONN_AG_CTX_DQ_FLUSH_MASK 0x3
+#define XSTORM_IWARP_CONN_AG_CTX_DQ_FLUSH_SHIFT 6
+ u8 flags7;
+#define XSTORM_IWARP_CONN_AG_CTX_FLUSH_Q0_MASK 0x3
+#define XSTORM_IWARP_CONN_AG_CTX_FLUSH_Q0_SHIFT 0
+#define XSTORM_IWARP_CONN_AG_CTX_FLUSH_Q1_MASK 0x3
+#define XSTORM_IWARP_CONN_AG_CTX_FLUSH_Q1_SHIFT 2
+#define XSTORM_IWARP_CONN_AG_CTX_SLOW_PATH_MASK 0x3
+#define XSTORM_IWARP_CONN_AG_CTX_SLOW_PATH_SHIFT 4
+#define XSTORM_IWARP_CONN_AG_CTX_CF0EN_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_CF0EN_SHIFT 6
+#define XSTORM_IWARP_CONN_AG_CTX_CF1EN_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_CF1EN_SHIFT 7
+ u8 flags8;
+#define XSTORM_IWARP_CONN_AG_CTX_CF2EN_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_CF2EN_SHIFT 0
+#define XSTORM_IWARP_CONN_AG_CTX_TIMER_STOP_ALL_EN_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_TIMER_STOP_ALL_EN_SHIFT 1
+#define XSTORM_IWARP_CONN_AG_CTX_CF4EN_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_CF4EN_SHIFT 2
+#define XSTORM_IWARP_CONN_AG_CTX_CF5EN_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_CF5EN_SHIFT 3
+#define XSTORM_IWARP_CONN_AG_CTX_CF6EN_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_CF6EN_SHIFT 4
+#define XSTORM_IWARP_CONN_AG_CTX_CF7EN_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_CF7EN_SHIFT 5
+#define XSTORM_IWARP_CONN_AG_CTX_CF8EN_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_CF8EN_SHIFT 6
+#define XSTORM_IWARP_CONN_AG_CTX_CF9EN_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_CF9EN_SHIFT 7
+ u8 flags9;
+#define XSTORM_IWARP_CONN_AG_CTX_CF10EN_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_CF10EN_SHIFT 0
+#define XSTORM_IWARP_CONN_AG_CTX_CF11EN_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_CF11EN_SHIFT 1
+#define XSTORM_IWARP_CONN_AG_CTX_CF12EN_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_CF12EN_SHIFT 2
+#define XSTORM_IWARP_CONN_AG_CTX_CF13EN_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_CF13EN_SHIFT 3
+#define XSTORM_IWARP_CONN_AG_CTX_SQ_FLUSH_CF_EN_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_SQ_FLUSH_CF_EN_SHIFT 4
+#define XSTORM_IWARP_CONN_AG_CTX_CF15EN_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_CF15EN_SHIFT 5
+#define XSTORM_IWARP_CONN_AG_CTX_MPA_OR_ERROR_WAKEUP_TRIGGER_CF_EN_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_MPA_OR_ERROR_WAKEUP_TRIGGER_CF_EN_SHIFT 6
+#define XSTORM_IWARP_CONN_AG_CTX_CF17EN_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_CF17EN_SHIFT 7
+ u8 flags10;
+#define XSTORM_IWARP_CONN_AG_CTX_CF18EN_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_CF18EN_SHIFT 0
+#define XSTORM_IWARP_CONN_AG_CTX_DQ_FLUSH_EN_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_DQ_FLUSH_EN_SHIFT 1
+#define XSTORM_IWARP_CONN_AG_CTX_FLUSH_Q0_EN_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_FLUSH_Q0_EN_SHIFT 2
+#define XSTORM_IWARP_CONN_AG_CTX_FLUSH_Q1_EN_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_FLUSH_Q1_EN_SHIFT 3
+#define XSTORM_IWARP_CONN_AG_CTX_SLOW_PATH_EN_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_SLOW_PATH_EN_SHIFT 4
+#define XSTORM_IWARP_CONN_AG_CTX_CF23EN_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_CF23EN_SHIFT 5
+#define XSTORM_IWARP_CONN_AG_CTX_RULE0EN_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_RULE0EN_SHIFT 6
+#define XSTORM_IWARP_CONN_AG_CTX_MORE_TO_SEND_RULE_EN_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_MORE_TO_SEND_RULE_EN_SHIFT 7
+ u8 flags11;
+#define XSTORM_IWARP_CONN_AG_CTX_TX_BLOCKED_EN_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_TX_BLOCKED_EN_SHIFT 0
+#define XSTORM_IWARP_CONN_AG_CTX_RULE3EN_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_RULE3EN_SHIFT 1
+#define XSTORM_IWARP_CONN_AG_CTX_RESERVED3_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_RESERVED3_SHIFT 2
+#define XSTORM_IWARP_CONN_AG_CTX_RULE5EN_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_RULE5EN_SHIFT 3
+#define XSTORM_IWARP_CONN_AG_CTX_RULE6EN_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_RULE6EN_SHIFT 4
+#define XSTORM_IWARP_CONN_AG_CTX_RULE7EN_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_RULE7EN_SHIFT 5
+#define XSTORM_IWARP_CONN_AG_CTX_A0_RESERVED1_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_A0_RESERVED1_SHIFT 6
+#define XSTORM_IWARP_CONN_AG_CTX_RULE9EN_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_RULE9EN_SHIFT 7
+ u8 flags12;
+#define XSTORM_IWARP_CONN_AG_CTX_SQ_NOT_EMPTY_RULE_EN_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_SQ_NOT_EMPTY_RULE_EN_SHIFT 0
+#define XSTORM_IWARP_CONN_AG_CTX_RULE11EN_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_RULE11EN_SHIFT 1
+#define XSTORM_IWARP_CONN_AG_CTX_A0_RESERVED2_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_A0_RESERVED2_SHIFT 2
+#define XSTORM_IWARP_CONN_AG_CTX_A0_RESERVED3_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_A0_RESERVED3_SHIFT 3
+#define XSTORM_IWARP_CONN_AG_CTX_SQ_FENCE_RULE_EN_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_SQ_FENCE_RULE_EN_SHIFT 4
+#define XSTORM_IWARP_CONN_AG_CTX_RULE15EN_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_RULE15EN_SHIFT 5
+#define XSTORM_IWARP_CONN_AG_CTX_RULE16EN_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_RULE16EN_SHIFT 6
+#define XSTORM_IWARP_CONN_AG_CTX_RULE17EN_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_RULE17EN_SHIFT 7
+ u8 flags13;
+#define XSTORM_IWARP_CONN_AG_CTX_IRQ_NOT_EMPTY_RULE_EN_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_IRQ_NOT_EMPTY_RULE_EN_SHIFT 0
+#define XSTORM_IWARP_CONN_AG_CTX_HQ_NOT_FULL_RULE_EN_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_HQ_NOT_FULL_RULE_EN_SHIFT 1
+#define XSTORM_IWARP_CONN_AG_CTX_ORQ_RD_FENCE_RULE_EN_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_ORQ_RD_FENCE_RULE_EN_SHIFT 2
+#define XSTORM_IWARP_CONN_AG_CTX_RULE21EN_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_RULE21EN_SHIFT 3
+#define XSTORM_IWARP_CONN_AG_CTX_A0_RESERVED6_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_A0_RESERVED6_SHIFT 4
+#define XSTORM_IWARP_CONN_AG_CTX_ORQ_NOT_FULL_RULE_EN_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_ORQ_NOT_FULL_RULE_EN_SHIFT 5
+#define XSTORM_IWARP_CONN_AG_CTX_A0_RESERVED8_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_A0_RESERVED8_SHIFT 6
+#define XSTORM_IWARP_CONN_AG_CTX_A0_RESERVED9_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_A0_RESERVED9_SHIFT 7
+ u8 flags14;
+#define XSTORM_IWARP_CONN_AG_CTX_BIT16_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_BIT16_SHIFT 0
+#define XSTORM_IWARP_CONN_AG_CTX_BIT17_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_BIT17_SHIFT 1
+#define XSTORM_IWARP_CONN_AG_CTX_BIT18_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_BIT18_SHIFT 2
+#define XSTORM_IWARP_CONN_AG_CTX_E5_RESERVED1_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_E5_RESERVED1_SHIFT 3
+#define XSTORM_IWARP_CONN_AG_CTX_E5_RESERVED2_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_E5_RESERVED2_SHIFT 4
+#define XSTORM_IWARP_CONN_AG_CTX_E5_RESERVED3_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_E5_RESERVED3_SHIFT 5
+#define XSTORM_IWARP_CONN_AG_CTX_CF23_MASK 0x3
+#define XSTORM_IWARP_CONN_AG_CTX_CF23_SHIFT 6
+ u8 byte2;
+ __le16 physical_q0;
+ __le16 physical_q1;
+ __le16 sq_comp_cons;
+ __le16 sq_tx_cons;
+ __le16 sq_prod;
+ __le16 word5;
+ __le16 conn_dpi;
+ u8 byte3;
+ u8 byte4;
+ u8 byte5;
+ u8 byte6;
+ __le32 reg0;
+ __le32 reg1;
+ __le32 reg2;
+ __le32 more_to_send_seq;
+ __le32 reg4;
+ __le32 rewinded_snd_max;
+ __le32 rd_msn;
+ __le16 irq_prod_via_msdm;
+ __le16 irq_cons;
+ __le16 hq_cons_th_or_mpa_data;
+ __le16 hq_cons;
+ __le32 atom_msn;
+ __le32 orq_cons;
+ __le32 orq_cons_th;
+ u8 byte7;
+ u8 max_ord;
+ u8 wqe_data_pad_bytes;
+ u8 former_hq_prod;
+ u8 irq_prod_via_msem;
+ u8 byte12;
+ u8 max_pkt_pdu_size_lo;
+ u8 max_pkt_pdu_size_hi;
+ u8 byte15;
+ u8 e5_reserved;
+ __le16 e5_reserved4;
+ __le32 reg10;
+ __le32 reg11;
+ __le32 shared_queue_page_addr_lo;
+ __le32 shared_queue_page_addr_hi;
+ __le32 reg14;
+ __le32 reg15;
+ __le32 reg16;
+ __le32 reg17;
+};
+
+struct tstorm_iwarp_conn_ag_ctx {
+ u8 reserved0;
+ u8 state;
+ u8 flags0;
+#define TSTORM_IWARP_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1
+#define TSTORM_IWARP_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0
+#define TSTORM_IWARP_CONN_AG_CTX_BIT1_MASK 0x1
+#define TSTORM_IWARP_CONN_AG_CTX_BIT1_SHIFT 1
+#define TSTORM_IWARP_CONN_AG_CTX_BIT2_MASK 0x1
+#define TSTORM_IWARP_CONN_AG_CTX_BIT2_SHIFT 2
+#define TSTORM_IWARP_CONN_AG_CTX_MSTORM_FLUSH_MASK 0x1
+#define TSTORM_IWARP_CONN_AG_CTX_MSTORM_FLUSH_SHIFT 3
+#define TSTORM_IWARP_CONN_AG_CTX_BIT4_MASK 0x1
+#define TSTORM_IWARP_CONN_AG_CTX_BIT4_SHIFT 4
+#define TSTORM_IWARP_CONN_AG_CTX_CACHED_ORQ_MASK 0x1
+#define TSTORM_IWARP_CONN_AG_CTX_CACHED_ORQ_SHIFT 5
+#define TSTORM_IWARP_CONN_AG_CTX_CF0_MASK 0x3
+#define TSTORM_IWARP_CONN_AG_CTX_CF0_SHIFT 6
+ u8 flags1;
+#define TSTORM_IWARP_CONN_AG_CTX_RQ_POST_CF_MASK 0x3
+#define TSTORM_IWARP_CONN_AG_CTX_RQ_POST_CF_SHIFT 0
+#define TSTORM_IWARP_CONN_AG_CTX_MPA_TIMEOUT_CF_MASK 0x3
+#define TSTORM_IWARP_CONN_AG_CTX_MPA_TIMEOUT_CF_SHIFT 2
+#define TSTORM_IWARP_CONN_AG_CTX_TIMER_STOP_ALL_MASK 0x3
+#define TSTORM_IWARP_CONN_AG_CTX_TIMER_STOP_ALL_SHIFT 4
+#define TSTORM_IWARP_CONN_AG_CTX_CF4_MASK 0x3
+#define TSTORM_IWARP_CONN_AG_CTX_CF4_SHIFT 6
+ u8 flags2;
+#define TSTORM_IWARP_CONN_AG_CTX_CF5_MASK 0x3
+#define TSTORM_IWARP_CONN_AG_CTX_CF5_SHIFT 0
+#define TSTORM_IWARP_CONN_AG_CTX_CF6_MASK 0x3
+#define TSTORM_IWARP_CONN_AG_CTX_CF6_SHIFT 2
+#define TSTORM_IWARP_CONN_AG_CTX_CF7_MASK 0x3
+#define TSTORM_IWARP_CONN_AG_CTX_CF7_SHIFT 4
+#define TSTORM_IWARP_CONN_AG_CTX_CF8_MASK 0x3
+#define TSTORM_IWARP_CONN_AG_CTX_CF8_SHIFT 6
+ u8 flags3;
+#define TSTORM_IWARP_CONN_AG_CTX_FLUSH_Q0_MASK 0x3
+#define TSTORM_IWARP_CONN_AG_CTX_FLUSH_Q0_SHIFT 0
+#define TSTORM_IWARP_CONN_AG_CTX_FLUSH_OR_ERROR_DETECTED_MASK 0x3
+#define TSTORM_IWARP_CONN_AG_CTX_FLUSH_OR_ERROR_DETECTED_SHIFT 2
+#define TSTORM_IWARP_CONN_AG_CTX_CF0EN_MASK 0x1
+#define TSTORM_IWARP_CONN_AG_CTX_CF0EN_SHIFT 4
+#define TSTORM_IWARP_CONN_AG_CTX_RQ_POST_CF_EN_MASK 0x1
+#define TSTORM_IWARP_CONN_AG_CTX_RQ_POST_CF_EN_SHIFT 5
+#define TSTORM_IWARP_CONN_AG_CTX_MPA_TIMEOUT_CF_EN_MASK 0x1
+#define TSTORM_IWARP_CONN_AG_CTX_MPA_TIMEOUT_CF_EN_SHIFT 6
+#define TSTORM_IWARP_CONN_AG_CTX_TIMER_STOP_ALL_EN_MASK 0x1
+#define TSTORM_IWARP_CONN_AG_CTX_TIMER_STOP_ALL_EN_SHIFT 7
+ u8 flags4;
+#define TSTORM_IWARP_CONN_AG_CTX_CF4EN_MASK 0x1
+#define TSTORM_IWARP_CONN_AG_CTX_CF4EN_SHIFT 0
+#define TSTORM_IWARP_CONN_AG_CTX_CF5EN_MASK 0x1
+#define TSTORM_IWARP_CONN_AG_CTX_CF5EN_SHIFT 1
+#define TSTORM_IWARP_CONN_AG_CTX_CF6EN_MASK 0x1
+#define TSTORM_IWARP_CONN_AG_CTX_CF6EN_SHIFT 2
+#define TSTORM_IWARP_CONN_AG_CTX_CF7EN_MASK 0x1
+#define TSTORM_IWARP_CONN_AG_CTX_CF7EN_SHIFT 3
+#define TSTORM_IWARP_CONN_AG_CTX_CF8EN_MASK 0x1
+#define TSTORM_IWARP_CONN_AG_CTX_CF8EN_SHIFT 4
+#define TSTORM_IWARP_CONN_AG_CTX_FLUSH_Q0_EN_MASK 0x1
+#define TSTORM_IWARP_CONN_AG_CTX_FLUSH_Q0_EN_SHIFT 5
+#define TSTORM_IWARP_CONN_AG_CTX_FLUSH_OR_ERROR_DETECTED_EN_MASK 0x1
+#define TSTORM_IWARP_CONN_AG_CTX_FLUSH_OR_ERROR_DETECTED_EN_SHIFT 6
+#define TSTORM_IWARP_CONN_AG_CTX_RULE0EN_MASK 0x1
+#define TSTORM_IWARP_CONN_AG_CTX_RULE0EN_SHIFT 7
+ u8 flags5;
+#define TSTORM_IWARP_CONN_AG_CTX_RULE1EN_MASK 0x1
+#define TSTORM_IWARP_CONN_AG_CTX_RULE1EN_SHIFT 0
+#define TSTORM_IWARP_CONN_AG_CTX_RULE2EN_MASK 0x1
+#define TSTORM_IWARP_CONN_AG_CTX_RULE2EN_SHIFT 1
+#define TSTORM_IWARP_CONN_AG_CTX_RULE3EN_MASK 0x1
+#define TSTORM_IWARP_CONN_AG_CTX_RULE3EN_SHIFT 2
+#define TSTORM_IWARP_CONN_AG_CTX_RULE4EN_MASK 0x1
+#define TSTORM_IWARP_CONN_AG_CTX_RULE4EN_SHIFT 3
+#define TSTORM_IWARP_CONN_AG_CTX_RULE5EN_MASK 0x1
+#define TSTORM_IWARP_CONN_AG_CTX_RULE5EN_SHIFT 4
+#define TSTORM_IWARP_CONN_AG_CTX_SND_SQ_CONS_RULE_MASK 0x1
+#define TSTORM_IWARP_CONN_AG_CTX_SND_SQ_CONS_RULE_SHIFT 5
+#define TSTORM_IWARP_CONN_AG_CTX_RULE7EN_MASK 0x1
+#define TSTORM_IWARP_CONN_AG_CTX_RULE7EN_SHIFT 6
+#define TSTORM_IWARP_CONN_AG_CTX_RULE8EN_MASK 0x1
+#define TSTORM_IWARP_CONN_AG_CTX_RULE8EN_SHIFT 7
+ __le32 reg0;
+ __le32 reg1;
+ __le32 unaligned_nxt_seq;
+ __le32 reg3;
+ __le32 reg4;
+ __le32 reg5;
+ __le32 reg6;
+ __le32 reg7;
+ __le32 reg8;
+ u8 orq_cache_idx;
+ u8 hq_prod;
+ __le16 sq_tx_cons_th;
+ u8 orq_prod;
+ u8 irq_cons;
+ __le16 sq_tx_cons;
+ __le16 conn_dpi;
+ __le16 rq_prod;
+ __le32 snd_seq;
+ __le32 last_hq_sequence;
+};
+
+struct tstorm_iwarp_conn_st_ctx {
+ __le32 reserved[60];
+};
+
+struct mstorm_iwarp_conn_st_ctx {
+ __le32 reserved[32];
+};
+
+struct ustorm_iwarp_conn_st_ctx {
+ __le32 reserved[24];
+};
+
+struct iwarp_conn_context {
+ struct ystorm_iwarp_conn_st_ctx ystorm_st_context;
+ struct regpair ystorm_st_padding[2];
+ struct pstorm_iwarp_conn_st_ctx pstorm_st_context;
+ struct regpair pstorm_st_padding[2];
+ struct xstorm_iwarp_conn_st_ctx xstorm_st_context;
+ struct regpair xstorm_st_padding[2];
+ struct xstorm_iwarp_conn_ag_ctx xstorm_ag_context;
+ struct tstorm_iwarp_conn_ag_ctx tstorm_ag_context;
+ struct timers_context timer_context;
+ struct ustorm_rdma_conn_ag_ctx ustorm_ag_context;
+ struct tstorm_iwarp_conn_st_ctx tstorm_st_context;
+ struct regpair tstorm_st_padding[2];
+ struct mstorm_iwarp_conn_st_ctx mstorm_st_context;
+ struct ustorm_iwarp_conn_st_ctx ustorm_st_context;
+};
+
+struct iwarp_create_qp_ramrod_data {
+ u8 flags;
+#define IWARP_CREATE_QP_RAMROD_DATA_FMR_AND_RESERVED_EN_MASK 0x1
+#define IWARP_CREATE_QP_RAMROD_DATA_FMR_AND_RESERVED_EN_SHIFT 0
+#define IWARP_CREATE_QP_RAMROD_DATA_SIGNALED_COMP_MASK 0x1
+#define IWARP_CREATE_QP_RAMROD_DATA_SIGNALED_COMP_SHIFT 1
+#define IWARP_CREATE_QP_RAMROD_DATA_RDMA_RD_EN_MASK 0x1
+#define IWARP_CREATE_QP_RAMROD_DATA_RDMA_RD_EN_SHIFT 2
+#define IWARP_CREATE_QP_RAMROD_DATA_RDMA_WR_EN_MASK 0x1
+#define IWARP_CREATE_QP_RAMROD_DATA_RDMA_WR_EN_SHIFT 3
+#define IWARP_CREATE_QP_RAMROD_DATA_ATOMIC_EN_MASK 0x1
+#define IWARP_CREATE_QP_RAMROD_DATA_ATOMIC_EN_SHIFT 4
+#define IWARP_CREATE_QP_RAMROD_DATA_SRQ_FLG_MASK 0x1
+#define IWARP_CREATE_QP_RAMROD_DATA_SRQ_FLG_SHIFT 5
+#define IWARP_CREATE_QP_RAMROD_DATA_RESERVED0_MASK 0x3
+#define IWARP_CREATE_QP_RAMROD_DATA_RESERVED0_SHIFT 6
+ u8 reserved1;
+ __le16 pd;
+ __le16 sq_num_pages;
+ __le16 rq_num_pages;
+ __le32 reserved3[2];
+ struct regpair qp_handle_for_cqe;
+ struct rdma_srq_id srq_id;
+ __le32 cq_cid_for_sq;
+ __le32 cq_cid_for_rq;
+ __le16 dpi;
+ __le16 physical_q0;
+ __le16 physical_q1;
+ u8 reserved2[6];
+};
+
+enum iwarp_eqe_async_opcode {
+ IWARP_EVENT_TYPE_ASYNC_CONNECT_COMPLETE,
+ IWARP_EVENT_TYPE_ASYNC_ENHANCED_MPA_REPLY_ARRIVED,
+ IWARP_EVENT_TYPE_ASYNC_MPA_HANDSHAKE_COMPLETE,
+ IWARP_EVENT_TYPE_ASYNC_CID_CLEANED,
+ IWARP_EVENT_TYPE_ASYNC_EXCEPTION_DETECTED,
+ IWARP_EVENT_TYPE_ASYNC_QP_IN_ERROR_STATE,
+ IWARP_EVENT_TYPE_ASYNC_CQ_OVERFLOW,
+ MAX_IWARP_EQE_ASYNC_OPCODE
+};
+
+struct iwarp_eqe_data_mpa_async_completion {
+ __le16 ulp_data_len;
+ u8 reserved[6];
+};
+
+struct iwarp_eqe_data_tcp_async_completion {
+ __le16 ulp_data_len;
+ u8 mpa_handshake_mode;
+ u8 reserved[5];
+};
+
+enum iwarp_eqe_sync_opcode {
+ IWARP_EVENT_TYPE_TCP_OFFLOAD =
+ 11,
+ IWARP_EVENT_TYPE_MPA_OFFLOAD,
+ IWARP_EVENT_TYPE_MPA_OFFLOAD_SEND_RTR,
+ IWARP_EVENT_TYPE_CREATE_QP,
+ IWARP_EVENT_TYPE_QUERY_QP,
+ IWARP_EVENT_TYPE_MODIFY_QP,
+ IWARP_EVENT_TYPE_DESTROY_QP,
+ MAX_IWARP_EQE_SYNC_OPCODE
+};
+
+enum iwarp_fw_return_code {
+ IWARP_CONN_ERROR_TCP_CONNECT_INVALID_PACKET = 5,
+ IWARP_CONN_ERROR_TCP_CONNECTION_RST,
+ IWARP_CONN_ERROR_TCP_CONNECT_TIMEOUT,
+ IWARP_CONN_ERROR_MPA_ERROR_REJECT,
+ IWARP_CONN_ERROR_MPA_NOT_SUPPORTED_VER,
+ IWARP_CONN_ERROR_MPA_RST,
+ IWARP_CONN_ERROR_MPA_FIN,
+ IWARP_CONN_ERROR_MPA_RTR_MISMATCH,
+ IWARP_CONN_ERROR_MPA_INSUF_IRD,
+ IWARP_CONN_ERROR_MPA_INVALID_PACKET,
+ IWARP_CONN_ERROR_MPA_LOCAL_ERROR,
+ IWARP_CONN_ERROR_MPA_TIMEOUT,
+ IWARP_CONN_ERROR_MPA_TERMINATE,
+ IWARP_QP_IN_ERROR_GOOD_CLOSE,
+ IWARP_QP_IN_ERROR_BAD_CLOSE,
+ IWARP_EXCEPTION_DETECTED_LLP_CLOSED,
+ IWARP_EXCEPTION_DETECTED_LLP_RESET,
+ IWARP_EXCEPTION_DETECTED_IRQ_FULL,
+ IWARP_EXCEPTION_DETECTED_RQ_EMPTY,
+ IWARP_EXCEPTION_DETECTED_LLP_TIMEOUT,
+ IWARP_EXCEPTION_DETECTED_REMOTE_PROTECTION_ERROR,
+ IWARP_EXCEPTION_DETECTED_CQ_OVERFLOW,
+ IWARP_EXCEPTION_DETECTED_LOCAL_CATASTROPHIC,
+ IWARP_EXCEPTION_DETECTED_LOCAL_ACCESS_ERROR,
+ IWARP_EXCEPTION_DETECTED_REMOTE_OPERATION_ERROR,
+ IWARP_EXCEPTION_DETECTED_TERMINATE_RECEIVED,
+ MAX_IWARP_FW_RETURN_CODE
+};
+
+struct iwarp_init_func_params {
+ u8 ll2_ooo_q_index;
+ u8 reserved1[7];
+};
+
+struct iwarp_init_func_ramrod_data {
+ struct rdma_init_func_ramrod_data rdma;
+ struct tcp_init_params tcp;
+ struct iwarp_init_func_params iwarp;
+};
+
+enum iwarp_modify_qp_new_state_type {
+ IWARP_MODIFY_QP_STATE_CLOSING = 1,
+ IWARP_MODIFY_QP_STATE_ERROR =
+ 2,
+ MAX_IWARP_MODIFY_QP_NEW_STATE_TYPE
+};
+
+struct iwarp_modify_qp_ramrod_data {
+ __le16 transition_to_state;
+ __le16 flags;
+#define IWARP_MODIFY_QP_RAMROD_DATA_RDMA_RD_EN_MASK 0x1
+#define IWARP_MODIFY_QP_RAMROD_DATA_RDMA_RD_EN_SHIFT 0
+#define IWARP_MODIFY_QP_RAMROD_DATA_RDMA_WR_EN_MASK 0x1
+#define IWARP_MODIFY_QP_RAMROD_DATA_RDMA_WR_EN_SHIFT 1
+#define IWARP_MODIFY_QP_RAMROD_DATA_ATOMIC_EN_MASK 0x1
+#define IWARP_MODIFY_QP_RAMROD_DATA_ATOMIC_EN_SHIFT 2
+#define IWARP_MODIFY_QP_RAMROD_DATA_STATE_TRANS_EN_MASK 0x1
+#define IWARP_MODIFY_QP_RAMROD_DATA_STATE_TRANS_EN_SHIFT 3
+#define IWARP_MODIFY_QP_RAMROD_DATA_RDMA_OPS_EN_FLG_MASK 0x1
+#define IWARP_MODIFY_QP_RAMROD_DATA_RDMA_OPS_EN_FLG_SHIFT 4
+#define IWARP_MODIFY_QP_RAMROD_DATA_RESERVED_MASK 0x7FF
+#define IWARP_MODIFY_QP_RAMROD_DATA_RESERVED_SHIFT 5
+ __le32 reserved3[3];
+ __le32 reserved4[8];
+};
+
+struct mpa_rq_params {
+ __le32 ird;
+ __le32 ord;
+};
+
+struct mpa_ulp_buffer {
+ struct regpair addr;
+ __le16 len;
+ __le16 reserved[3];
+};
+
+struct mpa_outgoing_params {
+ u8 crc_needed;
+ u8 reject;
+ u8 reserved[6];
+ struct mpa_rq_params out_rq;
+ struct mpa_ulp_buffer outgoing_ulp_buffer;
+};
+
+struct iwarp_mpa_offload_ramrod_data {
+ struct mpa_outgoing_params common;
+ __le32 tcp_cid;
+ u8 mode;
+ u8 tcp_connect_side;
+ u8 rtr_pref;
+#define IWARP_MPA_OFFLOAD_RAMROD_DATA_RTR_SUPPORTED_MASK 0x7
+#define IWARP_MPA_OFFLOAD_RAMROD_DATA_RTR_SUPPORTED_SHIFT 0
+#define IWARP_MPA_OFFLOAD_RAMROD_DATA_RESERVED1_MASK 0x1F
+#define IWARP_MPA_OFFLOAD_RAMROD_DATA_RESERVED1_SHIFT 3
+ u8 reserved2;
+ struct mpa_ulp_buffer incoming_ulp_buffer;
+ struct regpair async_eqe_output_buf;
+ struct regpair handle_for_async;
+ struct regpair shared_queue_addr;
+ u8 stats_counter_id;
+ u8 reserved3[15];
+};
+
+struct iwarp_offload_params {
+ struct mpa_ulp_buffer incoming_ulp_buffer;
+ struct regpair async_eqe_output_buf;
+ struct regpair handle_for_async;
+ __le16 physical_q0;
+ __le16 physical_q1;
+ u8 stats_counter_id;
+ u8 mpa_mode;
+ u8 reserved[10];
+};
+
+struct iwarp_query_qp_output_params {
+ __le32 flags;
+#define IWARP_QUERY_QP_OUTPUT_PARAMS_ERROR_FLG_MASK 0x1
+#define IWARP_QUERY_QP_OUTPUT_PARAMS_ERROR_FLG_SHIFT 0
+#define IWARP_QUERY_QP_OUTPUT_PARAMS_RESERVED0_MASK 0x7FFFFFFF
+#define IWARP_QUERY_QP_OUTPUT_PARAMS_RESERVED0_SHIFT 1
+ u8 reserved1[4];
+};
+
+struct iwarp_query_qp_ramrod_data {
+ struct regpair output_params_addr;
+};
+
+enum iwarp_ramrod_cmd_id {
+ IWARP_RAMROD_CMD_ID_TCP_OFFLOAD =
+ 11,
+ IWARP_RAMROD_CMD_ID_MPA_OFFLOAD,
+ IWARP_RAMROD_CMD_ID_MPA_OFFLOAD_SEND_RTR,
+ IWARP_RAMROD_CMD_ID_CREATE_QP,
+ IWARP_RAMROD_CMD_ID_QUERY_QP,
+ IWARP_RAMROD_CMD_ID_MODIFY_QP,
+ IWARP_RAMROD_CMD_ID_DESTROY_QP,
+ MAX_IWARP_RAMROD_CMD_ID
+};
+
+struct iwarp_rxmit_stats_drv {
+ struct regpair tx_go_to_slow_start_event_cnt;
+ struct regpair tx_fast_retransmit_event_cnt;
+};
+
+struct iwarp_tcp_offload_ramrod_data {
+ struct iwarp_offload_params iwarp;
+ struct tcp_offload_params_opt2 tcp;
+};
+
+enum mpa_negotiation_mode {
+ MPA_NEGOTIATION_TYPE_BASIC = 1,
+ MPA_NEGOTIATION_TYPE_ENHANCED = 2,
+ MAX_MPA_NEGOTIATION_MODE
+};
+
+enum mpa_rtr_type {
+ MPA_RTR_TYPE_NONE = 0,
+ MPA_RTR_TYPE_ZERO_SEND = 1,
+ MPA_RTR_TYPE_ZERO_WRITE = 2,
+ MPA_RTR_TYPE_ZERO_SEND_AND_WRITE = 3,
+ MPA_RTR_TYPE_ZERO_READ = 4,
+ MPA_RTR_TYPE_ZERO_SEND_AND_READ = 5,
+ MPA_RTR_TYPE_ZERO_WRITE_AND_READ = 6,
+ MPA_RTR_TYPE_ZERO_SEND_AND_WRITE_AND_READ = 7,
+ MAX_MPA_RTR_TYPE
+};
+
+struct unaligned_opaque_data {
+ __le16 first_mpa_offset;
+ u8 tcp_payload_offset;
+ u8 flags;
+#define UNALIGNED_OPAQUE_DATA_PKT_REACHED_WIN_RIGHT_EDGE_MASK 0x1
+#define UNALIGNED_OPAQUE_DATA_PKT_REACHED_WIN_RIGHT_EDGE_SHIFT 0
+#define UNALIGNED_OPAQUE_DATA_CONNECTION_CLOSED_MASK 0x1
+#define UNALIGNED_OPAQUE_DATA_CONNECTION_CLOSED_SHIFT 1
+#define UNALIGNED_OPAQUE_DATA_RESERVED_MASK 0x3F
+#define UNALIGNED_OPAQUE_DATA_RESERVED_SHIFT 2
+ __le32 cid;
+};
+
+struct mstorm_iwarp_conn_ag_ctx {
+ u8 reserved;
+ u8 state;
+ u8 flags0;
+#define MSTORM_IWARP_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1
+#define MSTORM_IWARP_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0
+#define MSTORM_IWARP_CONN_AG_CTX_BIT1_MASK 0x1
+#define MSTORM_IWARP_CONN_AG_CTX_BIT1_SHIFT 1
+#define MSTORM_IWARP_CONN_AG_CTX_INV_STAG_DONE_CF_MASK 0x3
+#define MSTORM_IWARP_CONN_AG_CTX_INV_STAG_DONE_CF_SHIFT 2
+#define MSTORM_IWARP_CONN_AG_CTX_CF1_MASK 0x3
+#define MSTORM_IWARP_CONN_AG_CTX_CF1_SHIFT 4
+#define MSTORM_IWARP_CONN_AG_CTX_CF2_MASK 0x3
+#define MSTORM_IWARP_CONN_AG_CTX_CF2_SHIFT 6
+ u8 flags1;
+#define MSTORM_IWARP_CONN_AG_CTX_INV_STAG_DONE_CF_EN_MASK 0x1
+#define MSTORM_IWARP_CONN_AG_CTX_INV_STAG_DONE_CF_EN_SHIFT 0
+#define MSTORM_IWARP_CONN_AG_CTX_CF1EN_MASK 0x1
+#define MSTORM_IWARP_CONN_AG_CTX_CF1EN_SHIFT 1
+#define MSTORM_IWARP_CONN_AG_CTX_CF2EN_MASK 0x1
+#define MSTORM_IWARP_CONN_AG_CTX_CF2EN_SHIFT 2
+#define MSTORM_IWARP_CONN_AG_CTX_RULE0EN_MASK 0x1
+#define MSTORM_IWARP_CONN_AG_CTX_RULE0EN_SHIFT 3
+#define MSTORM_IWARP_CONN_AG_CTX_RULE1EN_MASK 0x1
+#define MSTORM_IWARP_CONN_AG_CTX_RULE1EN_SHIFT 4
+#define MSTORM_IWARP_CONN_AG_CTX_RULE2EN_MASK 0x1
+#define MSTORM_IWARP_CONN_AG_CTX_RULE2EN_SHIFT 5
+#define MSTORM_IWARP_CONN_AG_CTX_RCQ_CONS_EN_MASK 0x1
+#define MSTORM_IWARP_CONN_AG_CTX_RCQ_CONS_EN_SHIFT 6
+#define MSTORM_IWARP_CONN_AG_CTX_RULE4EN_MASK 0x1
+#define MSTORM_IWARP_CONN_AG_CTX_RULE4EN_SHIFT 7
+ __le16 rcq_cons;
+ __le16 rcq_cons_th;
+ __le32 reg0;
+ __le32 reg1;
+};
+
+struct ustorm_iwarp_conn_ag_ctx {
+ u8 reserved;
+ u8 byte1;
+ u8 flags0;
+#define USTORM_IWARP_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1
+#define USTORM_IWARP_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0
+#define USTORM_IWARP_CONN_AG_CTX_BIT1_MASK 0x1
+#define USTORM_IWARP_CONN_AG_CTX_BIT1_SHIFT 1
+#define USTORM_IWARP_CONN_AG_CTX_CF0_MASK 0x3
+#define USTORM_IWARP_CONN_AG_CTX_CF0_SHIFT 2
+#define USTORM_IWARP_CONN_AG_CTX_CF1_MASK 0x3
+#define USTORM_IWARP_CONN_AG_CTX_CF1_SHIFT 4
+#define USTORM_IWARP_CONN_AG_CTX_CF2_MASK 0x3
+#define USTORM_IWARP_CONN_AG_CTX_CF2_SHIFT 6
+ u8 flags1;
+#define USTORM_IWARP_CONN_AG_CTX_CF3_MASK 0x3
+#define USTORM_IWARP_CONN_AG_CTX_CF3_SHIFT 0
+#define USTORM_IWARP_CONN_AG_CTX_CQ_ARM_SE_CF_MASK 0x3
+#define USTORM_IWARP_CONN_AG_CTX_CQ_ARM_SE_CF_SHIFT 2
+#define USTORM_IWARP_CONN_AG_CTX_CQ_ARM_CF_MASK 0x3
+#define USTORM_IWARP_CONN_AG_CTX_CQ_ARM_CF_SHIFT 4
+#define USTORM_IWARP_CONN_AG_CTX_CF6_MASK 0x3
+#define USTORM_IWARP_CONN_AG_CTX_CF6_SHIFT 6
+ u8 flags2;
+#define USTORM_IWARP_CONN_AG_CTX_CF0EN_MASK 0x1
+#define USTORM_IWARP_CONN_AG_CTX_CF0EN_SHIFT 0
+#define USTORM_IWARP_CONN_AG_CTX_CF1EN_MASK 0x1
+#define USTORM_IWARP_CONN_AG_CTX_CF1EN_SHIFT 1
+#define USTORM_IWARP_CONN_AG_CTX_CF2EN_MASK 0x1
+#define USTORM_IWARP_CONN_AG_CTX_CF2EN_SHIFT 2
+#define USTORM_IWARP_CONN_AG_CTX_CF3EN_MASK 0x1
+#define USTORM_IWARP_CONN_AG_CTX_CF3EN_SHIFT 3
+#define USTORM_IWARP_CONN_AG_CTX_CQ_ARM_SE_CF_EN_MASK 0x1
+#define USTORM_IWARP_CONN_AG_CTX_CQ_ARM_SE_CF_EN_SHIFT 4
+#define USTORM_IWARP_CONN_AG_CTX_CQ_ARM_CF_EN_MASK 0x1
+#define USTORM_IWARP_CONN_AG_CTX_CQ_ARM_CF_EN_SHIFT 5
+#define USTORM_IWARP_CONN_AG_CTX_CF6EN_MASK 0x1
+#define USTORM_IWARP_CONN_AG_CTX_CF6EN_SHIFT 6
+#define USTORM_IWARP_CONN_AG_CTX_CQ_SE_EN_MASK 0x1
+#define USTORM_IWARP_CONN_AG_CTX_CQ_SE_EN_SHIFT 7
+ u8 flags3;
+#define USTORM_IWARP_CONN_AG_CTX_CQ_EN_MASK 0x1
+#define USTORM_IWARP_CONN_AG_CTX_CQ_EN_SHIFT 0
+#define USTORM_IWARP_CONN_AG_CTX_RULE2EN_MASK 0x1
+#define USTORM_IWARP_CONN_AG_CTX_RULE2EN_SHIFT 1
+#define USTORM_IWARP_CONN_AG_CTX_RULE3EN_MASK 0x1
+#define USTORM_IWARP_CONN_AG_CTX_RULE3EN_SHIFT 2
+#define USTORM_IWARP_CONN_AG_CTX_RULE4EN_MASK 0x1
+#define USTORM_IWARP_CONN_AG_CTX_RULE4EN_SHIFT 3
+#define USTORM_IWARP_CONN_AG_CTX_RULE5EN_MASK 0x1
+#define USTORM_IWARP_CONN_AG_CTX_RULE5EN_SHIFT 4
+#define USTORM_IWARP_CONN_AG_CTX_RULE6EN_MASK 0x1
+#define USTORM_IWARP_CONN_AG_CTX_RULE6EN_SHIFT 5
+#define USTORM_IWARP_CONN_AG_CTX_RULE7EN_MASK 0x1
+#define USTORM_IWARP_CONN_AG_CTX_RULE7EN_SHIFT 6
+#define USTORM_IWARP_CONN_AG_CTX_RULE8EN_MASK 0x1
+#define USTORM_IWARP_CONN_AG_CTX_RULE8EN_SHIFT 7
+ u8 byte2;
+ u8 byte3;
+ __le16 word0;
+ __le16 word1;
+ __le32 cq_cons;
+ __le32 cq_se_prod;
+ __le32 cq_prod;
+ __le32 reg3;
+ __le16 word2;
+ __le16 word3;
+};
+
+struct ystorm_iwarp_conn_ag_ctx {
+ u8 byte0;
+ u8 byte1;
+ u8 flags0;
+#define YSTORM_IWARP_CONN_AG_CTX_BIT0_MASK 0x1
+#define YSTORM_IWARP_CONN_AG_CTX_BIT0_SHIFT 0
+#define YSTORM_IWARP_CONN_AG_CTX_BIT1_MASK 0x1
+#define YSTORM_IWARP_CONN_AG_CTX_BIT1_SHIFT 1
+#define YSTORM_IWARP_CONN_AG_CTX_CF0_MASK 0x3
+#define YSTORM_IWARP_CONN_AG_CTX_CF0_SHIFT 2
+#define YSTORM_IWARP_CONN_AG_CTX_CF1_MASK 0x3
+#define YSTORM_IWARP_CONN_AG_CTX_CF1_SHIFT 4
+#define YSTORM_IWARP_CONN_AG_CTX_CF2_MASK 0x3
+#define YSTORM_IWARP_CONN_AG_CTX_CF2_SHIFT 6
+ u8 flags1;
+#define YSTORM_IWARP_CONN_AG_CTX_CF0EN_MASK 0x1
+#define YSTORM_IWARP_CONN_AG_CTX_CF0EN_SHIFT 0
+#define YSTORM_IWARP_CONN_AG_CTX_CF1EN_MASK 0x1
+#define YSTORM_IWARP_CONN_AG_CTX_CF1EN_SHIFT 1
+#define YSTORM_IWARP_CONN_AG_CTX_CF2EN_MASK 0x1
+#define YSTORM_IWARP_CONN_AG_CTX_CF2EN_SHIFT 2
+#define YSTORM_IWARP_CONN_AG_CTX_RULE0EN_MASK 0x1
+#define YSTORM_IWARP_CONN_AG_CTX_RULE0EN_SHIFT 3
+#define YSTORM_IWARP_CONN_AG_CTX_RULE1EN_MASK 0x1
+#define YSTORM_IWARP_CONN_AG_CTX_RULE1EN_SHIFT 4
+#define YSTORM_IWARP_CONN_AG_CTX_RULE2EN_MASK 0x1
+#define YSTORM_IWARP_CONN_AG_CTX_RULE2EN_SHIFT 5
+#define YSTORM_IWARP_CONN_AG_CTX_RULE3EN_MASK 0x1
+#define YSTORM_IWARP_CONN_AG_CTX_RULE3EN_SHIFT 6
+#define YSTORM_IWARP_CONN_AG_CTX_RULE4EN_MASK 0x1
+#define YSTORM_IWARP_CONN_AG_CTX_RULE4EN_SHIFT 7
+ u8 byte2;
+ u8 byte3;
+ __le16 word0;
+ __le32 reg0;
+ __le32 reg1;
+ __le16 word1;
+ __le16 word2;
+ __le16 word3;
+ __le16 word4;
+ __le32 reg2;
+ __le32 reg3;
+};
+
struct ystorm_fcoe_conn_st_ctx {
u8 func_mode;
u8 cos;
@@ -9222,7 +10448,7 @@
u8 byte13;
u8 byte14;
u8 byte15;
- u8 byte16;
+ u8 ereserved;
__le16 word11;
__le32 reg10;
__le32 reg11;
@@ -10285,9 +11511,11 @@
#define DRV_MSG_CODE_BW_UPDATE_ACK 0x32000000
#define DRV_MSG_CODE_NIG_DRAIN 0x30000000
+#define DRV_MSG_CODE_S_TAG_UPDATE_ACK 0x3b000000
#define DRV_MSG_CODE_INITIATE_PF_FLR 0x02010000
#define DRV_MSG_CODE_VF_DISABLED_DONE 0xc0000000
#define DRV_MSG_CODE_CFG_VF_MSIX 0xc0010000
+#define DRV_MSG_CODE_CFG_PF_VFS_MSIX 0xc0020000
#define DRV_MSG_CODE_NVM_GET_FILE_ATT 0x00030000
#define DRV_MSG_CODE_NVM_READ_NVRAM 0x00050000
#define DRV_MSG_CODE_MCP_RESET 0x00090000
@@ -10444,6 +11672,7 @@
#define FW_MSG_CODE_RESOURCE_ALLOC_OK 0x34000000
#define FW_MSG_CODE_RESOURCE_ALLOC_UNKNOWN 0x35000000
#define FW_MSG_CODE_RESOURCE_ALLOC_DEPRECATED 0x36000000
+#define FW_MSG_CODE_S_TAG_UPDATE_ACK_DONE 0x3b000000
#define FW_MSG_CODE_DRV_CFG_VF_MSIX_DONE 0xb0010000
#define FW_MSG_CODE_NVM_OK 0x00010000
@@ -10451,7 +11680,7 @@
#define FW_MSG_CODE_OS_WOL_SUPPORTED 0x00800000
#define FW_MSG_CODE_OS_WOL_NOT_SUPPORTED 0x00810000
-
+#define FW_MSG_CODE_DRV_CFG_PF_VFS_MSIX_DONE 0x00870000
#define FW_MSG_SEQ_NUMBER_MASK 0x0000ffff
u32 fw_mb_param;
@@ -10466,6 +11695,8 @@
#define FW_MB_PARAM_GET_PF_RDMA_IWARP 0x2
#define FW_MB_PARAM_GET_PF_RDMA_BOTH 0x3
+#define FW_MB_PARAM_LOAD_DONE_DID_EFUSE_ERROR (1 << 0)
+
u32 drv_pulse_mb;
#define DRV_PULSE_SEQ_MASK 0x00007fff
#define DRV_PULSE_SYSTEM_TIME_MASK 0xffff0000
@@ -10489,7 +11720,7 @@
MFW_DRV_MSG_DCBX_OPERATIONAL_MIB_UPDATED,
MFW_DRV_MSG_RESERVED4,
MFW_DRV_MSG_BW_UPDATE,
- MFW_DRV_MSG_BW_UPDATE5,
+ MFW_DRV_MSG_S_TAG_UPDATE,
MFW_DRV_MSG_GET_LAN_STATS,
MFW_DRV_MSG_GET_FCOE_STATS,
MFW_DRV_MSG_GET_ISCSI_STATS,
@@ -10591,6 +11822,12 @@
u32 led_global_settings;
u32 generic_cont1;
u32 mbi_version;
+#define NVM_CFG1_GLOB_MBI_VERSION_0_MASK 0x000000FF
+#define NVM_CFG1_GLOB_MBI_VERSION_0_OFFSET 0
+#define NVM_CFG1_GLOB_MBI_VERSION_1_MASK 0x0000FF00
+#define NVM_CFG1_GLOB_MBI_VERSION_1_OFFSET 8
+#define NVM_CFG1_GLOB_MBI_VERSION_2_MASK 0x00FF0000
+#define NVM_CFG1_GLOB_MBI_VERSION_2_OFFSET 16
u32 mbi_date;
u32 misc_sig;
u32 device_capabilities;
@@ -10758,6 +11995,8 @@
u32 rsrv_persist[5]; /* Persist reserved for MFW upgrades */
};
+#define NVM_MAGIC_VALUE 0x669955aa
+
enum nvm_image_type {
NVM_TYPE_TIM1 = 0x01,
NVM_TYPE_TIM2 = 0x02,
diff --git a/drivers/net/ethernet/qlogic/qed/qed_init_fw_funcs.c b/drivers/net/ethernet/qlogic/qed/qed_init_fw_funcs.c
index 0a8fde6..b069ad0 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_init_fw_funcs.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_init_fw_funcs.c
@@ -40,31 +40,17 @@
#include "qed_init_ops.h"
#include "qed_reg_addr.h"
-enum cminterface {
- MCM_SEC,
- MCM_PRI,
- UCM_SEC,
- UCM_PRI,
- TCM_SEC,
- TCM_PRI,
- YCM_SEC,
- YCM_PRI,
- XCM_SEC,
- XCM_PRI,
- NUM_OF_CM_INTERFACES
-};
-
-/* general constants */
+/* General constants */
#define QM_PQ_MEM_4KB(pq_size) (pq_size ? DIV_ROUND_UP((pq_size + 1) * \
QM_PQ_ELEMENT_SIZE, \
0x1000) : 0)
#define QM_PQ_SIZE_256B(pq_size) (pq_size ? DIV_ROUND_UP(pq_size, \
0x100) - 1 : 0)
#define QM_INVALID_PQ_ID 0xffff
-/* feature enable */
+/* Feature enable */
#define QM_BYPASS_EN 1
#define QM_BYTE_CRD_EN 1
-/* other PQ constants */
+/* Other PQ constants */
#define QM_OTHER_PQS_PER_PF 4
/* WFQ constants */
#define QM_WFQ_UPPER_BOUND 62500000
@@ -106,20 +92,21 @@
#define BTB_PURE_LB_FACTOR 10
#define BTB_PURE_LB_RATIO 7
/* QM stop command constants */
-#define QM_STOP_PQ_MASK_WIDTH 32
-#define QM_STOP_CMD_ADDR 0x2
-#define QM_STOP_CMD_STRUCT_SIZE 2
+#define QM_STOP_PQ_MASK_WIDTH 32
+#define QM_STOP_CMD_ADDR 2
+#define QM_STOP_CMD_STRUCT_SIZE 2
#define QM_STOP_CMD_PAUSE_MASK_OFFSET 0
#define QM_STOP_CMD_PAUSE_MASK_SHIFT 0
-#define QM_STOP_CMD_PAUSE_MASK_MASK -1
-#define QM_STOP_CMD_GROUP_ID_OFFSET 1
-#define QM_STOP_CMD_GROUP_ID_SHIFT 16
-#define QM_STOP_CMD_GROUP_ID_MASK 15
-#define QM_STOP_CMD_PQ_TYPE_OFFSET 1
-#define QM_STOP_CMD_PQ_TYPE_SHIFT 24
-#define QM_STOP_CMD_PQ_TYPE_MASK 1
-#define QM_STOP_CMD_MAX_POLL_COUNT 100
-#define QM_STOP_CMD_POLL_PERIOD_US 500
+#define QM_STOP_CMD_PAUSE_MASK_MASK -1
+#define QM_STOP_CMD_GROUP_ID_OFFSET 1
+#define QM_STOP_CMD_GROUP_ID_SHIFT 16
+#define QM_STOP_CMD_GROUP_ID_MASK 15
+#define QM_STOP_CMD_PQ_TYPE_OFFSET 1
+#define QM_STOP_CMD_PQ_TYPE_SHIFT 24
+#define QM_STOP_CMD_PQ_TYPE_MASK 1
+#define QM_STOP_CMD_MAX_POLL_COUNT 100
+#define QM_STOP_CMD_POLL_PERIOD_US 500
+
/* QM command macros */
#define QM_CMD_STRUCT_SIZE(cmd) cmd ## \
_STRUCT_SIZE
@@ -146,16 +133,17 @@
{
STORE_RT_REG(p_hwfn, QM_REG_RLPFENABLE_RT_OFFSET, pf_rl_en ? 1 : 0);
if (pf_rl_en) {
- /* enable RLs for all VOQs */
+ /* Enable RLs for all VOQs */
STORE_RT_REG(p_hwfn, QM_REG_RLPFVOQENABLE_RT_OFFSET,
(1 << MAX_NUM_VOQS) - 1);
- /* write RL period */
+ /* Write RL period */
STORE_RT_REG(p_hwfn,
QM_REG_RLPFPERIOD_RT_OFFSET, QM_RL_PERIOD_CLK_25M);
STORE_RT_REG(p_hwfn,
QM_REG_RLPFPERIODTIMER_RT_OFFSET,
QM_RL_PERIOD_CLK_25M);
- /* set credit threshold for QM bypass flow */
+
+ /* Set credit threshold for QM bypass flow */
if (QM_BYPASS_EN)
STORE_RT_REG(p_hwfn,
QM_REG_AFULLQMBYPTHRPFRL_RT_OFFSET,
@@ -167,7 +155,8 @@
static void qed_enable_pf_wfq(struct qed_hwfn *p_hwfn, bool pf_wfq_en)
{
STORE_RT_REG(p_hwfn, QM_REG_WFQPFENABLE_RT_OFFSET, pf_wfq_en ? 1 : 0);
- /* set credit threshold for QM bypass flow */
+
+ /* Set credit threshold for QM bypass flow */
if (pf_wfq_en && QM_BYPASS_EN)
STORE_RT_REG(p_hwfn,
QM_REG_AFULLQMBYPTHRPFWFQ_RT_OFFSET,
@@ -180,14 +169,15 @@
STORE_RT_REG(p_hwfn, QM_REG_RLGLBLENABLE_RT_OFFSET,
vport_rl_en ? 1 : 0);
if (vport_rl_en) {
- /* write RL period (use timer 0 only) */
+ /* Write RL period (use timer 0 only) */
STORE_RT_REG(p_hwfn,
QM_REG_RLGLBLPERIOD_0_RT_OFFSET,
QM_RL_PERIOD_CLK_25M);
STORE_RT_REG(p_hwfn,
QM_REG_RLGLBLPERIODTIMER_0_RT_OFFSET,
QM_RL_PERIOD_CLK_25M);
- /* set credit threshold for QM bypass flow */
+
+ /* Set credit threshold for QM bypass flow */
if (QM_BYPASS_EN)
STORE_RT_REG(p_hwfn,
QM_REG_AFULLQMBYPTHRGLBLRL_RT_OFFSET,
@@ -200,7 +190,8 @@
{
STORE_RT_REG(p_hwfn, QM_REG_WFQVPENABLE_RT_OFFSET,
vport_wfq_en ? 1 : 0);
- /* set credit threshold for QM bypass flow */
+
+ /* Set credit threshold for QM bypass flow */
if (vport_wfq_en && QM_BYPASS_EN)
STORE_RT_REG(p_hwfn,
QM_REG_AFULLQMBYPTHRVPWFQ_RT_OFFSET,
@@ -208,7 +199,7 @@
}
/* Prepare runtime init values to allocate PBF command queue lines for
- * the specified VOQ
+ * the specified VOQ.
*/
static void qed_cmdq_lines_voq_rt_init(struct qed_hwfn *p_hwfn,
u8 voq, u16 cmdq_lines)
@@ -232,7 +223,7 @@
{
u8 tc, voq, port_id, num_tcs_in_port;
- /* clear PBF lines for all VOQs */
+ /* Clear PBF lines for all VOQs */
for (voq = 0; voq < MAX_NUM_VOQS; voq++)
STORE_RT_REG(p_hwfn, PBF_CMDQ_LINES_RT_OFFSET(voq), 0);
for (port_id = 0; port_id < max_ports_per_engine; port_id++) {
@@ -285,7 +276,7 @@
if (!port_params[port_id].active)
continue;
- /* subtract headroom blocks */
+ /* Subtract headroom blocks */
usable_blocks = port_params[port_id].num_btb_blocks -
BTB_HEADROOM_BLOCKS;
@@ -305,7 +296,7 @@
phys_blocks = (usable_blocks - pure_lb_blocks) /
num_tcs_in_port;
- /* init physical TCs */
+ /* Init physical TCs */
for (tc = 0; tc < NUM_OF_PHYS_TCS; tc++) {
if (((port_params[port_id].active_phys_tcs >>
tc) & 0x1) != 1)
@@ -317,7 +308,7 @@
phys_blocks);
}
- /* init pure LB TC */
+ /* Init pure LB TC */
temp = LB_VOQ(port_id);
STORE_RT_REG(p_hwfn, PBF_BTB_GUARANTEED_RT_OFFSET(temp),
pure_lb_blocks);
@@ -338,24 +329,24 @@
QM_PF_QUEUE_GROUP_SIZE;
u16 i, pq_id, pq_group;
- /* a bit per Tx PQ indicating if the PQ is associated with a VF */
+ /* A bit per Tx PQ indicating if the PQ is associated with a VF */
u32 tx_pq_vf_mask[MAX_QM_TX_QUEUES / QM_PF_QUEUE_GROUP_SIZE] = { 0 };
u32 num_tx_pq_vf_masks = MAX_QM_TX_QUEUES / QM_PF_QUEUE_GROUP_SIZE;
u32 pq_mem_4kb = QM_PQ_MEM_4KB(p_params->num_pf_cids);
u32 vport_pq_mem_4kb = QM_PQ_MEM_4KB(p_params->num_vf_cids);
u32 mem_addr_4kb = base_mem_addr_4kb;
- /* set mapping from PQ group to PF */
+ /* Set mapping from PQ group to PF */
for (pq_group = first_pq_group; pq_group <= last_pq_group; pq_group++)
STORE_RT_REG(p_hwfn, QM_REG_PQTX2PF_0_RT_OFFSET + pq_group,
(u32)(p_params->pf_id));
- /* set PQ sizes */
+ /* Set PQ sizes */
STORE_RT_REG(p_hwfn, QM_REG_MAXPQSIZE_0_RT_OFFSET,
QM_PQ_SIZE_256B(p_params->num_pf_cids));
STORE_RT_REG(p_hwfn, QM_REG_MAXPQSIZE_1_RT_OFFSET,
QM_PQ_SIZE_256B(p_params->num_vf_cids));
- /* go over all Tx PQs */
+ /* Go over all Tx PQs */
for (i = 0, pq_id = p_params->start_pq; i < num_pqs; i++, pq_id++) {
u8 voq = VOQ(p_params->port_id, p_params->pq_params[i].tc_id,
p_params->max_phys_tcs_per_port);
@@ -366,17 +357,18 @@
(p_params->pq_params[i].vport_id <
MAX_QM_GLOBAL_RLS);
- /* update first Tx PQ of VPORT/TC */
+ /* Update first Tx PQ of VPORT/TC */
u8 vport_id_in_pf = p_params->pq_params[i].vport_id -
p_params->start_vport;
u16 *pq_ids = &vport_params[vport_id_in_pf].first_tx_pq_id[0];
u16 first_tx_pq_id = pq_ids[p_params->pq_params[i].tc_id];
if (first_tx_pq_id == QM_INVALID_PQ_ID) {
- /* create new VP PQ */
+ /* Create new VP PQ */
pq_ids[p_params->pq_params[i].tc_id] = pq_id;
first_tx_pq_id = pq_id;
- /* map VP PQ to VOQ and PF */
+
+ /* Map VP PQ to VOQ and PF */
STORE_RT_REG(p_hwfn,
QM_REG_WFQVPMAP_RT_OFFSET +
first_tx_pq_id,
@@ -388,7 +380,7 @@
if (p_params->pq_params[i].rl_valid && !rl_valid)
DP_NOTICE(p_hwfn,
"Invalid VPORT ID for rate limiter configuration");
- /* fill PQ map entry */
+ /* Fill PQ map entry */
memset(&tx_pq_map, 0, sizeof(tx_pq_map));
SET_FIELD(tx_pq_map.reg, QM_RF_PQ_MAP_PQ_VALID, 1);
SET_FIELD(tx_pq_map.reg,
@@ -400,18 +392,16 @@
SET_FIELD(tx_pq_map.reg, QM_RF_PQ_MAP_VOQ, voq);
SET_FIELD(tx_pq_map.reg, QM_RF_PQ_MAP_WRR_WEIGHT_GROUP,
p_params->pq_params[i].wrr_group);
- /* write PQ map entry to CAM */
+ /* Write PQ map entry to CAM */
STORE_RT_REG(p_hwfn, QM_REG_TXPQMAP_RT_OFFSET + pq_id,
*((u32 *)&tx_pq_map));
- /* set base address */
+ /* Set base address */
STORE_RT_REG(p_hwfn,
QM_REG_BASEADDRTXPQ_RT_OFFSET + pq_id,
mem_addr_4kb);
- /* check if VF PQ */
+
+ /* If VF PQ, add indication to PQ VF mask */
if (is_vf_pq) {
- /* if PQ is associated with a VF, add indication
- * to PQ VF mask
- */
tx_pq_vf_mask[pq_id /
QM_PF_QUEUE_GROUP_SIZE] |=
BIT((pq_id % QM_PF_QUEUE_GROUP_SIZE));
@@ -421,16 +411,12 @@
}
}
- /* store Tx PQ VF mask to size select register */
- for (i = 0; i < num_tx_pq_vf_masks; i++) {
- if (tx_pq_vf_mask[i]) {
- u32 addr;
-
- addr = QM_REG_MAXPQSIZETXSEL_0_RT_OFFSET + i;
- STORE_RT_REG(p_hwfn, addr,
+ /* Store Tx PQ VF mask to size select register */
+ for (i = 0; i < num_tx_pq_vf_masks; i++)
+ if (tx_pq_vf_mask[i])
+ STORE_RT_REG(p_hwfn,
+ QM_REG_MAXPQSIZETXSEL_0_RT_OFFSET + i,
tx_pq_vf_mask[i]);
- }
- }
}
/* Prepare Other PQ mapping runtime init values for the specified PF */
@@ -440,23 +426,25 @@
u32 num_pf_cids,
u32 num_tids, u32 base_mem_addr_4kb)
{
- u16 i, pq_id;
+ u32 pq_size, pq_mem_4kb, mem_addr_4kb;
+ u16 i, pq_id, pq_group;
/* a single other PQ group is used in each PF,
* where PQ group i is used in PF i.
*/
- u16 pq_group = pf_id;
- u32 pq_size = num_pf_cids + num_tids;
- u32 pq_mem_4kb = QM_PQ_MEM_4KB(pq_size);
- u32 mem_addr_4kb = base_mem_addr_4kb;
+ pq_group = pf_id;
+ pq_size = num_pf_cids + num_tids;
+ pq_mem_4kb = QM_PQ_MEM_4KB(pq_size);
+ mem_addr_4kb = base_mem_addr_4kb;
- /* map PQ group to PF */
+ /* Map PQ group to PF */
STORE_RT_REG(p_hwfn, QM_REG_PQOTHER2PF_0_RT_OFFSET + pq_group,
(u32)(pf_id));
- /* set PQ sizes */
+ /* Set PQ sizes */
STORE_RT_REG(p_hwfn, QM_REG_MAXPQSIZE_2_RT_OFFSET,
QM_PQ_SIZE_256B(pq_size));
- /* set base address */
+
+ /* Set base address */
for (i = 0, pq_id = pf_id * QM_PF_QUEUE_GROUP_SIZE;
i < QM_OTHER_PQS_PER_PF; i++, pq_id++) {
STORE_RT_REG(p_hwfn,
@@ -485,7 +473,7 @@
inc_val = QM_WFQ_INC_VAL(p_params->pf_wfq);
if (!inc_val || inc_val > QM_WFQ_MAX_INC_VAL) {
- DP_NOTICE(p_hwfn, "Invalid PF WFQ weight configuration");
+ DP_NOTICE(p_hwfn, "Invalid PF WFQ weight configuration\n");
return -1;
}
@@ -514,7 +502,7 @@
u32 inc_val = QM_RL_INC_VAL(pf_rl);
if (inc_val > QM_RL_MAX_INC_VAL) {
- DP_NOTICE(p_hwfn, "Invalid PF rate limit configuration");
+ DP_NOTICE(p_hwfn, "Invalid PF rate limit configuration\n");
return -1;
}
STORE_RT_REG(p_hwfn, QM_REG_RLPFCRD_RT_OFFSET + pf_id,
@@ -535,7 +523,7 @@
u32 inc_val;
u8 tc, i;
- /* go over all PF VPORTs */
+ /* Go over all PF VPORTs */
for (i = 0; i < num_vports; i++) {
if (!vport_params[i].vport_wfq)
@@ -544,7 +532,7 @@
inc_val = QM_WFQ_INC_VAL(vport_params[i].vport_wfq);
if (inc_val > QM_WFQ_MAX_INC_VAL) {
DP_NOTICE(p_hwfn,
- "Invalid VPORT WFQ weight configuration");
+ "Invalid VPORT WFQ weight configuration\n");
return -1;
}
@@ -578,17 +566,17 @@
if (start_vport + num_vports >= MAX_QM_GLOBAL_RLS) {
DP_NOTICE(p_hwfn,
- "Invalid VPORT ID for rate limiter configuration");
+ "Invalid VPORT ID for rate limiter configuration\n");
return -1;
}
- /* go over all PF VPORTs */
+ /* Go over all PF VPORTs */
for (i = 0, vport_id = start_vport; i < num_vports; i++, vport_id++) {
u32 inc_val = QM_RL_INC_VAL(vport_params[i].vport_rl);
if (inc_val > QM_RL_MAX_INC_VAL) {
DP_NOTICE(p_hwfn,
- "Invalid VPORT rate-limit configuration");
+ "Invalid VPORT rate-limit configuration\n");
return -1;
}
@@ -617,7 +605,7 @@
reg_val = qed_rd(p_hwfn, p_ptt, QM_REG_SDMCMDREADY);
}
- /* check if timeout while waiting for SDM command ready */
+ /* Check if timeout while waiting for SDM command ready */
if (i == QM_STOP_CMD_MAX_POLL_COUNT) {
DP_VERBOSE(p_hwfn, NETIF_MSG_HW,
"Timeout when waiting for QM SDM command ready signal\n");
@@ -701,16 +689,16 @@
QM_OTHER_PQS_PER_PF;
u8 tc, i;
- /* clear first Tx PQ ID array for each VPORT */
+ /* Clear first Tx PQ ID array for each VPORT */
for (i = 0; i < p_params->num_vports; i++)
for (tc = 0; tc < NUM_OF_TCS; tc++)
vport_params[i].first_tx_pq_id[tc] = QM_INVALID_PQ_ID;
- /* map Other PQs (if any) */
+ /* Map Other PQs (if any) */
qed_other_pq_map_rt_init(p_hwfn, p_params->port_id, p_params->pf_id,
p_params->num_pf_cids, p_params->num_tids, 0);
- /* map Tx PQs */
+ /* Map Tx PQs */
qed_tx_pq_map_rt_init(p_hwfn, p_ptt, p_params, other_mem_size_4kb);
if (p_params->pf_wfq)
@@ -736,7 +724,7 @@
u32 inc_val = QM_WFQ_INC_VAL(pf_wfq);
if (!inc_val || inc_val > QM_WFQ_MAX_INC_VAL) {
- DP_NOTICE(p_hwfn, "Invalid PF WFQ weight configuration");
+ DP_NOTICE(p_hwfn, "Invalid PF WFQ weight configuration\n");
return -1;
}
@@ -750,7 +738,7 @@
u32 inc_val = QM_RL_INC_VAL(pf_rl);
if (inc_val > QM_RL_MAX_INC_VAL) {
- DP_NOTICE(p_hwfn, "Invalid PF rate limit configuration");
+ DP_NOTICE(p_hwfn, "Invalid PF rate limit configuration\n");
return -1;
}
@@ -766,17 +754,18 @@
struct qed_ptt *p_ptt,
u16 first_tx_pq_id[NUM_OF_TCS], u16 vport_wfq)
{
- u32 inc_val = QM_WFQ_INC_VAL(vport_wfq);
+ u16 vport_pq_id;
+ u32 inc_val;
u8 tc;
+ inc_val = QM_WFQ_INC_VAL(vport_wfq);
if (!inc_val || inc_val > QM_WFQ_MAX_INC_VAL) {
- DP_NOTICE(p_hwfn, "Invalid VPORT WFQ weight configuration");
+ DP_NOTICE(p_hwfn, "Invalid VPORT WFQ weight configuration\n");
return -1;
}
for (tc = 0; tc < NUM_OF_TCS; tc++) {
- u16 vport_pq_id = first_tx_pq_id[tc];
-
+ vport_pq_id = first_tx_pq_id[tc];
if (vport_pq_id != QM_INVALID_PQ_ID)
qed_wr(p_hwfn, p_ptt,
QM_REG_WFQVPWEIGHT + vport_pq_id * 4,
@@ -793,12 +782,12 @@
if (vport_id >= MAX_QM_GLOBAL_RLS) {
DP_NOTICE(p_hwfn,
- "Invalid VPORT ID for rate limiter configuration");
+ "Invalid VPORT ID for rate limiter configuration\n");
return -1;
}
if (inc_val > QM_RL_MAX_INC_VAL) {
- DP_NOTICE(p_hwfn, "Invalid VPORT rate-limit configuration");
+ DP_NOTICE(p_hwfn, "Invalid VPORT rate-limit configuration\n");
return -1;
}
@@ -818,15 +807,15 @@
u32 cmd_arr[QM_CMD_STRUCT_SIZE(QM_STOP_CMD)] = { 0 };
u32 pq_mask = 0, last_pq = start_pq + num_pqs - 1, pq_id;
- /* set command's PQ type */
+ /* Set command's PQ type */
QM_CMD_SET_FIELD(cmd_arr, QM_STOP_CMD, PQ_TYPE, is_tx_pq ? 0 : 1);
for (pq_id = start_pq; pq_id <= last_pq; pq_id++) {
- /* set PQ bit in mask (stop command only) */
+ /* Set PQ bit in mask (stop command only) */
if (!is_release_cmd)
pq_mask |= (1 << (pq_id % QM_STOP_PQ_MASK_WIDTH));
- /* if last PQ or end of PQ mask, write command */
+ /* If last PQ or end of PQ mask, write command */
if ((pq_id == last_pq) ||
(pq_id % QM_STOP_PQ_MASK_WIDTH ==
(QM_STOP_PQ_MASK_WIDTH - 1))) {
@@ -962,8 +951,10 @@
ip_geneve_enable ? 1 : 0);
}
+#define T_ETH_PACKET_ACTION_GFT_EVENTID 23
+#define PARSER_ETH_CONN_GFT_ACTION_CM_HDR 272
#define T_ETH_PACKET_MATCH_RFS_EVENTID 25
-#define PARSER_ETH_CONN_CM_HDR (0x0)
+#define PARSER_ETH_CONN_CM_HDR 0
#define CAM_LINE_SIZE sizeof(u32)
#define RAM_LINE_SIZE sizeof(u64)
#define REG_SIZE sizeof(u32)
@@ -971,40 +962,26 @@
void qed_set_rfs_mode_disable(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt, u16 pf_id)
{
- union gft_cam_line_union camline;
- struct gft_ram_line ramline;
- u32 *p_ramline, i;
-
- p_ramline = (u32 *)&ramline;
+ u32 hw_addr = PRS_REG_GFT_PROFILE_MASK_RAM +
+ pf_id * RAM_LINE_SIZE;
/*stop using gft logic */
qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_GFT, 0);
qed_wr(p_hwfn, p_ptt, PRS_REG_CM_HDR_GFT, 0x0);
- memset(&camline, 0, sizeof(union gft_cam_line_union));
- qed_wr(p_hwfn, p_ptt, PRS_REG_GFT_CAM + CAM_LINE_SIZE * pf_id,
- camline.cam_line_mapped.camline);
- memset(&ramline, 0, sizeof(ramline));
-
- for (i = 0; i < RAM_LINE_SIZE / REG_SIZE; i++) {
- u32 hw_addr = PRS_REG_GFT_PROFILE_MASK_RAM;
-
- hw_addr += (RAM_LINE_SIZE * pf_id + i * REG_SIZE);
-
- qed_wr(p_hwfn, p_ptt, hw_addr, *(p_ramline + i));
- }
+ qed_wr(p_hwfn, p_ptt, PRS_REG_GFT_CAM + CAM_LINE_SIZE * pf_id, 0);
+ qed_wr(p_hwfn, p_ptt, hw_addr, 0);
+ qed_wr(p_hwfn, p_ptt, hw_addr + 4, 0);
}
void qed_set_rfs_mode_enable(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
u16 pf_id, bool tcp, bool udp,
bool ipv4, bool ipv6)
{
- u32 rfs_cm_hdr_event_id, *p_ramline;
union gft_cam_line_union camline;
struct gft_ram_line ramline;
- int i;
+ u32 rfs_cm_hdr_event_id;
rfs_cm_hdr_event_id = qed_rd(p_hwfn, p_ptt, PRS_REG_CM_HDR_GFT);
- p_ramline = (u32 *)&ramline;
if (!ipv6 && !ipv4)
DP_NOTICE(p_hwfn,
@@ -1024,18 +1001,20 @@
qed_wr(p_hwfn, p_ptt, PRS_REG_LOAD_L2_FILTER, 0);
camline.cam_line_mapped.camline = 0;
- /* cam line is now valid!! */
+ /* Cam line is now valid!! */
SET_FIELD(camline.cam_line_mapped.camline,
GFT_CAM_LINE_MAPPED_VALID, 1);
/* filters are per PF!! */
SET_FIELD(camline.cam_line_mapped.camline,
- GFT_CAM_LINE_MAPPED_PF_ID_MASK, 1);
+ GFT_CAM_LINE_MAPPED_PF_ID_MASK,
+ GFT_CAM_LINE_MAPPED_PF_ID_MASK_MASK);
SET_FIELD(camline.cam_line_mapped.camline,
GFT_CAM_LINE_MAPPED_PF_ID, pf_id);
if (!(tcp && udp)) {
SET_FIELD(camline.cam_line_mapped.camline,
- GFT_CAM_LINE_MAPPED_UPPER_PROTOCOL_TYPE_MASK, 1);
+ GFT_CAM_LINE_MAPPED_UPPER_PROTOCOL_TYPE_MASK,
+ GFT_CAM_LINE_MAPPED_UPPER_PROTOCOL_TYPE_MASK_MASK);
if (tcp)
SET_FIELD(camline.cam_line_mapped.camline,
GFT_CAM_LINE_MAPPED_UPPER_PROTOCOL_TYPE,
@@ -1059,34 +1038,38 @@
GFT_PROFILE_IPV6);
}
- /* write characteristics to cam */
+ /* Write characteristics to cam */
qed_wr(p_hwfn, p_ptt, PRS_REG_GFT_CAM + CAM_LINE_SIZE * pf_id,
camline.cam_line_mapped.camline);
camline.cam_line_mapped.camline = qed_rd(p_hwfn, p_ptt,
PRS_REG_GFT_CAM +
CAM_LINE_SIZE * pf_id);
- /* write line to RAM - compare to filter 4 tuple */
- ramline.low32bits = 0;
- ramline.high32bits = 0;
- SET_FIELD(ramline.high32bits, GFT_RAM_LINE_DST_IP, 1);
- SET_FIELD(ramline.high32bits, GFT_RAM_LINE_SRC_IP, 1);
- SET_FIELD(ramline.low32bits, GFT_RAM_LINE_SRC_PORT, 1);
- SET_FIELD(ramline.low32bits, GFT_RAM_LINE_DST_PORT, 1);
+ /* Write line to RAM - compare to filter 4 tuple */
+ ramline.lo = 0;
+ ramline.hi = 0;
+ SET_FIELD(ramline.hi, GFT_RAM_LINE_DST_IP, 1);
+ SET_FIELD(ramline.hi, GFT_RAM_LINE_SRC_IP, 1);
+ SET_FIELD(ramline.hi, GFT_RAM_LINE_OVER_IP_PROTOCOL, 1);
+ SET_FIELD(ramline.lo, GFT_RAM_LINE_ETHERTYPE, 1);
+ SET_FIELD(ramline.lo, GFT_RAM_LINE_SRC_PORT, 1);
+ SET_FIELD(ramline.lo, GFT_RAM_LINE_DST_PORT, 1);
- /* each iteration write to reg */
- for (i = 0; i < RAM_LINE_SIZE / REG_SIZE; i++)
- qed_wr(p_hwfn, p_ptt,
- PRS_REG_GFT_PROFILE_MASK_RAM + RAM_LINE_SIZE * pf_id +
- i * REG_SIZE, *(p_ramline + i));
+ /* Each iteration write to reg */
+ qed_wr(p_hwfn, p_ptt,
+ PRS_REG_GFT_PROFILE_MASK_RAM + RAM_LINE_SIZE * pf_id,
+ ramline.lo);
+ qed_wr(p_hwfn, p_ptt,
+ PRS_REG_GFT_PROFILE_MASK_RAM + RAM_LINE_SIZE * pf_id + 4,
+ ramline.hi);
- /* set default profile so that no filter match will happen */
- ramline.low32bits = 0xffff;
- ramline.high32bits = 0xffff;
-
- for (i = 0; i < RAM_LINE_SIZE / REG_SIZE; i++)
- qed_wr(p_hwfn, p_ptt,
- PRS_REG_GFT_PROFILE_MASK_RAM + RAM_LINE_SIZE *
- PRS_GFT_CAM_LINES_NO_MATCH + i * REG_SIZE,
- *(p_ramline + i));
+ /* Set default profile so that no filter match will happen */
+ qed_wr(p_hwfn, p_ptt,
+ PRS_REG_GFT_PROFILE_MASK_RAM +
+ RAM_LINE_SIZE * PRS_GFT_CAM_LINES_NO_MATCH,
+ ramline.lo);
+ qed_wr(p_hwfn, p_ptt,
+ PRS_REG_GFT_PROFILE_MASK_RAM +
+ RAM_LINE_SIZE * PRS_GFT_CAM_LINES_NO_MATCH + 4,
+ ramline.hi);
}
diff --git a/drivers/net/ethernet/qlogic/qed/qed_init_ops.c b/drivers/net/ethernet/qlogic/qed/qed_init_ops.c
index 4a2e7be..e3f3688 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_init_ops.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_init_ops.c
@@ -158,6 +158,7 @@
GFP_KERNEL);
if (!rt_data->init_val) {
kfree(rt_data->b_valid);
+ rt_data->b_valid = NULL;
return -ENOMEM;
}
@@ -167,7 +168,9 @@
void qed_init_free(struct qed_hwfn *p_hwfn)
{
kfree(p_hwfn->rt_data.init_val);
+ p_hwfn->rt_data.init_val = NULL;
kfree(p_hwfn->rt_data.b_valid);
+ p_hwfn->rt_data.b_valid = NULL;
}
static int qed_init_array_dmae(struct qed_hwfn *p_hwfn,
@@ -525,6 +528,7 @@
}
kfree(p_hwfn->unzip_buf);
+ p_hwfn->unzip_buf = NULL;
return rc;
}
diff --git a/drivers/net/ethernet/qlogic/qed/qed_int.c b/drivers/net/ethernet/qlogic/qed/qed_int.c
index 40f057e..719cdbf 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_int.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_int.c
@@ -90,6 +90,12 @@
/* Multiple bits start with this offset */
#define ATTENTION_OFFSET_MASK (0x000ff000)
#define ATTENTION_OFFSET_SHIFT (12)
+
+#define ATTENTION_BB_MASK (0x00700000)
+#define ATTENTION_BB_SHIFT (20)
+#define ATTENTION_BB(value) (value << ATTENTION_BB_SHIFT)
+#define ATTENTION_BB_DIFFERENT BIT(23)
+
unsigned int flags;
/* Callback to call if attention will be triggered */
@@ -105,1215 +111,6 @@
#define MAX_ATTN_GRPS (8)
#define NUM_ATTN_REGS (9)
-/* HW Attention register */
-struct attn_hw_reg {
- u16 reg_idx; /* Index of this register in its block */
- u16 num_of_bits; /* number of valid attention bits */
- u32 sts_addr; /* Address of the STS register */
- u32 sts_clr_addr; /* Address of the STS_CLR register */
- u32 sts_wr_addr; /* Address of the STS_WR register */
- u32 mask_addr; /* Address of the MASK register */
-};
-
-/* HW block attention registers */
-struct attn_hw_regs {
- u16 num_of_int_regs; /* Number of interrupt regs */
- u16 num_of_prty_regs; /* Number of parity regs */
- struct attn_hw_reg **int_regs; /* interrupt regs */
- struct attn_hw_reg **prty_regs; /* parity regs */
-};
-
-/* HW block attention registers */
-struct attn_hw_block {
- const char *name; /* Block name */
- struct attn_hw_regs chip_regs[1];
-};
-
-static struct attn_hw_reg grc_int0_bb_b0 = {
- 0, 4, 0x50180, 0x5018c, 0x50188, 0x50184};
-
-static struct attn_hw_reg *grc_int_bb_b0_regs[1] = {
- &grc_int0_bb_b0};
-
-static struct attn_hw_reg grc_prty1_bb_b0 = {
- 0, 2, 0x50200, 0x5020c, 0x50208, 0x50204};
-
-static struct attn_hw_reg *grc_prty_bb_b0_regs[1] = {
- &grc_prty1_bb_b0};
-
-static struct attn_hw_reg miscs_int0_bb_b0 = {
- 0, 3, 0x9180, 0x918c, 0x9188, 0x9184};
-
-static struct attn_hw_reg miscs_int1_bb_b0 = {
- 1, 11, 0x9190, 0x919c, 0x9198, 0x9194};
-
-static struct attn_hw_reg *miscs_int_bb_b0_regs[2] = {
- &miscs_int0_bb_b0, &miscs_int1_bb_b0};
-
-static struct attn_hw_reg miscs_prty0_bb_b0 = {
- 0, 1, 0x91a0, 0x91ac, 0x91a8, 0x91a4};
-
-static struct attn_hw_reg *miscs_prty_bb_b0_regs[1] = {
- &miscs_prty0_bb_b0};
-
-static struct attn_hw_reg misc_int0_bb_b0 = {
- 0, 1, 0x8180, 0x818c, 0x8188, 0x8184};
-
-static struct attn_hw_reg *misc_int_bb_b0_regs[1] = {
- &misc_int0_bb_b0};
-
-static struct attn_hw_reg pglue_b_int0_bb_b0 = {
- 0, 23, 0x2a8180, 0x2a818c, 0x2a8188, 0x2a8184};
-
-static struct attn_hw_reg *pglue_b_int_bb_b0_regs[1] = {
- &pglue_b_int0_bb_b0};
-
-static struct attn_hw_reg pglue_b_prty0_bb_b0 = {
- 0, 1, 0x2a8190, 0x2a819c, 0x2a8198, 0x2a8194};
-
-static struct attn_hw_reg pglue_b_prty1_bb_b0 = {
- 1, 22, 0x2a8200, 0x2a820c, 0x2a8208, 0x2a8204};
-
-static struct attn_hw_reg *pglue_b_prty_bb_b0_regs[2] = {
- &pglue_b_prty0_bb_b0, &pglue_b_prty1_bb_b0};
-
-static struct attn_hw_reg cnig_int0_bb_b0 = {
- 0, 6, 0x2182e8, 0x2182f4, 0x2182f0, 0x2182ec};
-
-static struct attn_hw_reg *cnig_int_bb_b0_regs[1] = {
- &cnig_int0_bb_b0};
-
-static struct attn_hw_reg cnig_prty0_bb_b0 = {
- 0, 2, 0x218348, 0x218354, 0x218350, 0x21834c};
-
-static struct attn_hw_reg *cnig_prty_bb_b0_regs[1] = {
- &cnig_prty0_bb_b0};
-
-static struct attn_hw_reg cpmu_int0_bb_b0 = {
- 0, 1, 0x303e0, 0x303ec, 0x303e8, 0x303e4};
-
-static struct attn_hw_reg *cpmu_int_bb_b0_regs[1] = {
- &cpmu_int0_bb_b0};
-
-static struct attn_hw_reg ncsi_int0_bb_b0 = {
- 0, 1, 0x404cc, 0x404d8, 0x404d4, 0x404d0};
-
-static struct attn_hw_reg *ncsi_int_bb_b0_regs[1] = {
- &ncsi_int0_bb_b0};
-
-static struct attn_hw_reg ncsi_prty1_bb_b0 = {
- 0, 1, 0x40000, 0x4000c, 0x40008, 0x40004};
-
-static struct attn_hw_reg *ncsi_prty_bb_b0_regs[1] = {
- &ncsi_prty1_bb_b0};
-
-static struct attn_hw_reg opte_prty1_bb_b0 = {
- 0, 11, 0x53000, 0x5300c, 0x53008, 0x53004};
-
-static struct attn_hw_reg opte_prty0_bb_b0 = {
- 1, 1, 0x53208, 0x53214, 0x53210, 0x5320c};
-
-static struct attn_hw_reg *opte_prty_bb_b0_regs[2] = {
- &opte_prty1_bb_b0, &opte_prty0_bb_b0};
-
-static struct attn_hw_reg bmb_int0_bb_b0 = {
- 0, 16, 0x5400c0, 0x5400cc, 0x5400c8, 0x5400c4};
-
-static struct attn_hw_reg bmb_int1_bb_b0 = {
- 1, 28, 0x5400d8, 0x5400e4, 0x5400e0, 0x5400dc};
-
-static struct attn_hw_reg bmb_int2_bb_b0 = {
- 2, 26, 0x5400f0, 0x5400fc, 0x5400f8, 0x5400f4};
-
-static struct attn_hw_reg bmb_int3_bb_b0 = {
- 3, 31, 0x540108, 0x540114, 0x540110, 0x54010c};
-
-static struct attn_hw_reg bmb_int4_bb_b0 = {
- 4, 27, 0x540120, 0x54012c, 0x540128, 0x540124};
-
-static struct attn_hw_reg bmb_int5_bb_b0 = {
- 5, 29, 0x540138, 0x540144, 0x540140, 0x54013c};
-
-static struct attn_hw_reg bmb_int6_bb_b0 = {
- 6, 30, 0x540150, 0x54015c, 0x540158, 0x540154};
-
-static struct attn_hw_reg bmb_int7_bb_b0 = {
- 7, 32, 0x540168, 0x540174, 0x540170, 0x54016c};
-
-static struct attn_hw_reg bmb_int8_bb_b0 = {
- 8, 32, 0x540184, 0x540190, 0x54018c, 0x540188};
-
-static struct attn_hw_reg bmb_int9_bb_b0 = {
- 9, 32, 0x54019c, 0x5401a8, 0x5401a4, 0x5401a0};
-
-static struct attn_hw_reg bmb_int10_bb_b0 = {
- 10, 3, 0x5401b4, 0x5401c0, 0x5401bc, 0x5401b8};
-
-static struct attn_hw_reg bmb_int11_bb_b0 = {
- 11, 4, 0x5401cc, 0x5401d8, 0x5401d4, 0x5401d0};
-
-static struct attn_hw_reg *bmb_int_bb_b0_regs[12] = {
- &bmb_int0_bb_b0, &bmb_int1_bb_b0, &bmb_int2_bb_b0, &bmb_int3_bb_b0,
- &bmb_int4_bb_b0, &bmb_int5_bb_b0, &bmb_int6_bb_b0, &bmb_int7_bb_b0,
- &bmb_int8_bb_b0, &bmb_int9_bb_b0, &bmb_int10_bb_b0, &bmb_int11_bb_b0};
-
-static struct attn_hw_reg bmb_prty0_bb_b0 = {
- 0, 5, 0x5401dc, 0x5401e8, 0x5401e4, 0x5401e0};
-
-static struct attn_hw_reg bmb_prty1_bb_b0 = {
- 1, 31, 0x540400, 0x54040c, 0x540408, 0x540404};
-
-static struct attn_hw_reg bmb_prty2_bb_b0 = {
- 2, 15, 0x540410, 0x54041c, 0x540418, 0x540414};
-
-static struct attn_hw_reg *bmb_prty_bb_b0_regs[3] = {
- &bmb_prty0_bb_b0, &bmb_prty1_bb_b0, &bmb_prty2_bb_b0};
-
-static struct attn_hw_reg pcie_prty1_bb_b0 = {
- 0, 17, 0x54000, 0x5400c, 0x54008, 0x54004};
-
-static struct attn_hw_reg *pcie_prty_bb_b0_regs[1] = {
- &pcie_prty1_bb_b0};
-
-static struct attn_hw_reg mcp2_prty0_bb_b0 = {
- 0, 1, 0x52040, 0x5204c, 0x52048, 0x52044};
-
-static struct attn_hw_reg mcp2_prty1_bb_b0 = {
- 1, 12, 0x52204, 0x52210, 0x5220c, 0x52208};
-
-static struct attn_hw_reg *mcp2_prty_bb_b0_regs[2] = {
- &mcp2_prty0_bb_b0, &mcp2_prty1_bb_b0};
-
-static struct attn_hw_reg pswhst_int0_bb_b0 = {
- 0, 18, 0x2a0180, 0x2a018c, 0x2a0188, 0x2a0184};
-
-static struct attn_hw_reg *pswhst_int_bb_b0_regs[1] = {
- &pswhst_int0_bb_b0};
-
-static struct attn_hw_reg pswhst_prty0_bb_b0 = {
- 0, 1, 0x2a0190, 0x2a019c, 0x2a0198, 0x2a0194};
-
-static struct attn_hw_reg pswhst_prty1_bb_b0 = {
- 1, 17, 0x2a0200, 0x2a020c, 0x2a0208, 0x2a0204};
-
-static struct attn_hw_reg *pswhst_prty_bb_b0_regs[2] = {
- &pswhst_prty0_bb_b0, &pswhst_prty1_bb_b0};
-
-static struct attn_hw_reg pswhst2_int0_bb_b0 = {
- 0, 5, 0x29e180, 0x29e18c, 0x29e188, 0x29e184};
-
-static struct attn_hw_reg *pswhst2_int_bb_b0_regs[1] = {
- &pswhst2_int0_bb_b0};
-
-static struct attn_hw_reg pswhst2_prty0_bb_b0 = {
- 0, 1, 0x29e190, 0x29e19c, 0x29e198, 0x29e194};
-
-static struct attn_hw_reg *pswhst2_prty_bb_b0_regs[1] = {
- &pswhst2_prty0_bb_b0};
-
-static struct attn_hw_reg pswrd_int0_bb_b0 = {
- 0, 3, 0x29c180, 0x29c18c, 0x29c188, 0x29c184};
-
-static struct attn_hw_reg *pswrd_int_bb_b0_regs[1] = {
- &pswrd_int0_bb_b0};
-
-static struct attn_hw_reg pswrd_prty0_bb_b0 = {
- 0, 1, 0x29c190, 0x29c19c, 0x29c198, 0x29c194};
-
-static struct attn_hw_reg *pswrd_prty_bb_b0_regs[1] = {
- &pswrd_prty0_bb_b0};
-
-static struct attn_hw_reg pswrd2_int0_bb_b0 = {
- 0, 5, 0x29d180, 0x29d18c, 0x29d188, 0x29d184};
-
-static struct attn_hw_reg *pswrd2_int_bb_b0_regs[1] = {
- &pswrd2_int0_bb_b0};
-
-static struct attn_hw_reg pswrd2_prty0_bb_b0 = {
- 0, 1, 0x29d190, 0x29d19c, 0x29d198, 0x29d194};
-
-static struct attn_hw_reg pswrd2_prty1_bb_b0 = {
- 1, 31, 0x29d200, 0x29d20c, 0x29d208, 0x29d204};
-
-static struct attn_hw_reg pswrd2_prty2_bb_b0 = {
- 2, 3, 0x29d210, 0x29d21c, 0x29d218, 0x29d214};
-
-static struct attn_hw_reg *pswrd2_prty_bb_b0_regs[3] = {
- &pswrd2_prty0_bb_b0, &pswrd2_prty1_bb_b0, &pswrd2_prty2_bb_b0};
-
-static struct attn_hw_reg pswwr_int0_bb_b0 = {
- 0, 16, 0x29a180, 0x29a18c, 0x29a188, 0x29a184};
-
-static struct attn_hw_reg *pswwr_int_bb_b0_regs[1] = {
- &pswwr_int0_bb_b0};
-
-static struct attn_hw_reg pswwr_prty0_bb_b0 = {
- 0, 1, 0x29a190, 0x29a19c, 0x29a198, 0x29a194};
-
-static struct attn_hw_reg *pswwr_prty_bb_b0_regs[1] = {
- &pswwr_prty0_bb_b0};
-
-static struct attn_hw_reg pswwr2_int0_bb_b0 = {
- 0, 19, 0x29b180, 0x29b18c, 0x29b188, 0x29b184};
-
-static struct attn_hw_reg *pswwr2_int_bb_b0_regs[1] = {
- &pswwr2_int0_bb_b0};
-
-static struct attn_hw_reg pswwr2_prty0_bb_b0 = {
- 0, 1, 0x29b190, 0x29b19c, 0x29b198, 0x29b194};
-
-static struct attn_hw_reg pswwr2_prty1_bb_b0 = {
- 1, 31, 0x29b200, 0x29b20c, 0x29b208, 0x29b204};
-
-static struct attn_hw_reg pswwr2_prty2_bb_b0 = {
- 2, 31, 0x29b210, 0x29b21c, 0x29b218, 0x29b214};
-
-static struct attn_hw_reg pswwr2_prty3_bb_b0 = {
- 3, 31, 0x29b220, 0x29b22c, 0x29b228, 0x29b224};
-
-static struct attn_hw_reg pswwr2_prty4_bb_b0 = {
- 4, 20, 0x29b230, 0x29b23c, 0x29b238, 0x29b234};
-
-static struct attn_hw_reg *pswwr2_prty_bb_b0_regs[5] = {
- &pswwr2_prty0_bb_b0, &pswwr2_prty1_bb_b0, &pswwr2_prty2_bb_b0,
- &pswwr2_prty3_bb_b0, &pswwr2_prty4_bb_b0};
-
-static struct attn_hw_reg pswrq_int0_bb_b0 = {
- 0, 21, 0x280180, 0x28018c, 0x280188, 0x280184};
-
-static struct attn_hw_reg *pswrq_int_bb_b0_regs[1] = {
- &pswrq_int0_bb_b0};
-
-static struct attn_hw_reg pswrq_prty0_bb_b0 = {
- 0, 1, 0x280190, 0x28019c, 0x280198, 0x280194};
-
-static struct attn_hw_reg *pswrq_prty_bb_b0_regs[1] = {
- &pswrq_prty0_bb_b0};
-
-static struct attn_hw_reg pswrq2_int0_bb_b0 = {
- 0, 15, 0x240180, 0x24018c, 0x240188, 0x240184};
-
-static struct attn_hw_reg *pswrq2_int_bb_b0_regs[1] = {
- &pswrq2_int0_bb_b0};
-
-static struct attn_hw_reg pswrq2_prty1_bb_b0 = {
- 0, 9, 0x240200, 0x24020c, 0x240208, 0x240204};
-
-static struct attn_hw_reg *pswrq2_prty_bb_b0_regs[1] = {
- &pswrq2_prty1_bb_b0};
-
-static struct attn_hw_reg pglcs_int0_bb_b0 = {
- 0, 1, 0x1d00, 0x1d0c, 0x1d08, 0x1d04};
-
-static struct attn_hw_reg *pglcs_int_bb_b0_regs[1] = {
- &pglcs_int0_bb_b0};
-
-static struct attn_hw_reg dmae_int0_bb_b0 = {
- 0, 2, 0xc180, 0xc18c, 0xc188, 0xc184};
-
-static struct attn_hw_reg *dmae_int_bb_b0_regs[1] = {
- &dmae_int0_bb_b0};
-
-static struct attn_hw_reg dmae_prty1_bb_b0 = {
- 0, 3, 0xc200, 0xc20c, 0xc208, 0xc204};
-
-static struct attn_hw_reg *dmae_prty_bb_b0_regs[1] = {
- &dmae_prty1_bb_b0};
-
-static struct attn_hw_reg ptu_int0_bb_b0 = {
- 0, 8, 0x560180, 0x56018c, 0x560188, 0x560184};
-
-static struct attn_hw_reg *ptu_int_bb_b0_regs[1] = {
- &ptu_int0_bb_b0};
-
-static struct attn_hw_reg ptu_prty1_bb_b0 = {
- 0, 18, 0x560200, 0x56020c, 0x560208, 0x560204};
-
-static struct attn_hw_reg *ptu_prty_bb_b0_regs[1] = {
- &ptu_prty1_bb_b0};
-
-static struct attn_hw_reg tcm_int0_bb_b0 = {
- 0, 8, 0x1180180, 0x118018c, 0x1180188, 0x1180184};
-
-static struct attn_hw_reg tcm_int1_bb_b0 = {
- 1, 32, 0x1180190, 0x118019c, 0x1180198, 0x1180194};
-
-static struct attn_hw_reg tcm_int2_bb_b0 = {
- 2, 1, 0x11801a0, 0x11801ac, 0x11801a8, 0x11801a4};
-
-static struct attn_hw_reg *tcm_int_bb_b0_regs[3] = {
- &tcm_int0_bb_b0, &tcm_int1_bb_b0, &tcm_int2_bb_b0};
-
-static struct attn_hw_reg tcm_prty1_bb_b0 = {
- 0, 31, 0x1180200, 0x118020c, 0x1180208, 0x1180204};
-
-static struct attn_hw_reg tcm_prty2_bb_b0 = {
- 1, 2, 0x1180210, 0x118021c, 0x1180218, 0x1180214};
-
-static struct attn_hw_reg *tcm_prty_bb_b0_regs[2] = {
- &tcm_prty1_bb_b0, &tcm_prty2_bb_b0};
-
-static struct attn_hw_reg mcm_int0_bb_b0 = {
- 0, 14, 0x1200180, 0x120018c, 0x1200188, 0x1200184};
-
-static struct attn_hw_reg mcm_int1_bb_b0 = {
- 1, 26, 0x1200190, 0x120019c, 0x1200198, 0x1200194};
-
-static struct attn_hw_reg mcm_int2_bb_b0 = {
- 2, 1, 0x12001a0, 0x12001ac, 0x12001a8, 0x12001a4};
-
-static struct attn_hw_reg *mcm_int_bb_b0_regs[3] = {
- &mcm_int0_bb_b0, &mcm_int1_bb_b0, &mcm_int2_bb_b0};
-
-static struct attn_hw_reg mcm_prty1_bb_b0 = {
- 0, 31, 0x1200200, 0x120020c, 0x1200208, 0x1200204};
-
-static struct attn_hw_reg mcm_prty2_bb_b0 = {
- 1, 4, 0x1200210, 0x120021c, 0x1200218, 0x1200214};
-
-static struct attn_hw_reg *mcm_prty_bb_b0_regs[2] = {
- &mcm_prty1_bb_b0, &mcm_prty2_bb_b0};
-
-static struct attn_hw_reg ucm_int0_bb_b0 = {
- 0, 17, 0x1280180, 0x128018c, 0x1280188, 0x1280184};
-
-static struct attn_hw_reg ucm_int1_bb_b0 = {
- 1, 29, 0x1280190, 0x128019c, 0x1280198, 0x1280194};
-
-static struct attn_hw_reg ucm_int2_bb_b0 = {
- 2, 1, 0x12801a0, 0x12801ac, 0x12801a8, 0x12801a4};
-
-static struct attn_hw_reg *ucm_int_bb_b0_regs[3] = {
- &ucm_int0_bb_b0, &ucm_int1_bb_b0, &ucm_int2_bb_b0};
-
-static struct attn_hw_reg ucm_prty1_bb_b0 = {
- 0, 31, 0x1280200, 0x128020c, 0x1280208, 0x1280204};
-
-static struct attn_hw_reg ucm_prty2_bb_b0 = {
- 1, 7, 0x1280210, 0x128021c, 0x1280218, 0x1280214};
-
-static struct attn_hw_reg *ucm_prty_bb_b0_regs[2] = {
- &ucm_prty1_bb_b0, &ucm_prty2_bb_b0};
-
-static struct attn_hw_reg xcm_int0_bb_b0 = {
- 0, 16, 0x1000180, 0x100018c, 0x1000188, 0x1000184};
-
-static struct attn_hw_reg xcm_int1_bb_b0 = {
- 1, 25, 0x1000190, 0x100019c, 0x1000198, 0x1000194};
-
-static struct attn_hw_reg xcm_int2_bb_b0 = {
- 2, 8, 0x10001a0, 0x10001ac, 0x10001a8, 0x10001a4};
-
-static struct attn_hw_reg *xcm_int_bb_b0_regs[3] = {
- &xcm_int0_bb_b0, &xcm_int1_bb_b0, &xcm_int2_bb_b0};
-
-static struct attn_hw_reg xcm_prty1_bb_b0 = {
- 0, 31, 0x1000200, 0x100020c, 0x1000208, 0x1000204};
-
-static struct attn_hw_reg xcm_prty2_bb_b0 = {
- 1, 11, 0x1000210, 0x100021c, 0x1000218, 0x1000214};
-
-static struct attn_hw_reg *xcm_prty_bb_b0_regs[2] = {
- &xcm_prty1_bb_b0, &xcm_prty2_bb_b0};
-
-static struct attn_hw_reg ycm_int0_bb_b0 = {
- 0, 13, 0x1080180, 0x108018c, 0x1080188, 0x1080184};
-
-static struct attn_hw_reg ycm_int1_bb_b0 = {
- 1, 23, 0x1080190, 0x108019c, 0x1080198, 0x1080194};
-
-static struct attn_hw_reg ycm_int2_bb_b0 = {
- 2, 1, 0x10801a0, 0x10801ac, 0x10801a8, 0x10801a4};
-
-static struct attn_hw_reg *ycm_int_bb_b0_regs[3] = {
- &ycm_int0_bb_b0, &ycm_int1_bb_b0, &ycm_int2_bb_b0};
-
-static struct attn_hw_reg ycm_prty1_bb_b0 = {
- 0, 31, 0x1080200, 0x108020c, 0x1080208, 0x1080204};
-
-static struct attn_hw_reg ycm_prty2_bb_b0 = {
- 1, 3, 0x1080210, 0x108021c, 0x1080218, 0x1080214};
-
-static struct attn_hw_reg *ycm_prty_bb_b0_regs[2] = {
- &ycm_prty1_bb_b0, &ycm_prty2_bb_b0};
-
-static struct attn_hw_reg pcm_int0_bb_b0 = {
- 0, 5, 0x1100180, 0x110018c, 0x1100188, 0x1100184};
-
-static struct attn_hw_reg pcm_int1_bb_b0 = {
- 1, 14, 0x1100190, 0x110019c, 0x1100198, 0x1100194};
-
-static struct attn_hw_reg pcm_int2_bb_b0 = {
- 2, 1, 0x11001a0, 0x11001ac, 0x11001a8, 0x11001a4};
-
-static struct attn_hw_reg *pcm_int_bb_b0_regs[3] = {
- &pcm_int0_bb_b0, &pcm_int1_bb_b0, &pcm_int2_bb_b0};
-
-static struct attn_hw_reg pcm_prty1_bb_b0 = {
- 0, 11, 0x1100200, 0x110020c, 0x1100208, 0x1100204};
-
-static struct attn_hw_reg *pcm_prty_bb_b0_regs[1] = {
- &pcm_prty1_bb_b0};
-
-static struct attn_hw_reg qm_int0_bb_b0 = {
- 0, 22, 0x2f0180, 0x2f018c, 0x2f0188, 0x2f0184};
-
-static struct attn_hw_reg *qm_int_bb_b0_regs[1] = {
- &qm_int0_bb_b0};
-
-static struct attn_hw_reg qm_prty0_bb_b0 = {
- 0, 11, 0x2f0190, 0x2f019c, 0x2f0198, 0x2f0194};
-
-static struct attn_hw_reg qm_prty1_bb_b0 = {
- 1, 31, 0x2f0200, 0x2f020c, 0x2f0208, 0x2f0204};
-
-static struct attn_hw_reg qm_prty2_bb_b0 = {
- 2, 31, 0x2f0210, 0x2f021c, 0x2f0218, 0x2f0214};
-
-static struct attn_hw_reg qm_prty3_bb_b0 = {
- 3, 11, 0x2f0220, 0x2f022c, 0x2f0228, 0x2f0224};
-
-static struct attn_hw_reg *qm_prty_bb_b0_regs[4] = {
- &qm_prty0_bb_b0, &qm_prty1_bb_b0, &qm_prty2_bb_b0, &qm_prty3_bb_b0};
-
-static struct attn_hw_reg tm_int0_bb_b0 = {
- 0, 32, 0x2c0180, 0x2c018c, 0x2c0188, 0x2c0184};
-
-static struct attn_hw_reg tm_int1_bb_b0 = {
- 1, 11, 0x2c0190, 0x2c019c, 0x2c0198, 0x2c0194};
-
-static struct attn_hw_reg *tm_int_bb_b0_regs[2] = {
- &tm_int0_bb_b0, &tm_int1_bb_b0};
-
-static struct attn_hw_reg tm_prty1_bb_b0 = {
- 0, 17, 0x2c0200, 0x2c020c, 0x2c0208, 0x2c0204};
-
-static struct attn_hw_reg *tm_prty_bb_b0_regs[1] = {
- &tm_prty1_bb_b0};
-
-static struct attn_hw_reg dorq_int0_bb_b0 = {
- 0, 9, 0x100180, 0x10018c, 0x100188, 0x100184};
-
-static struct attn_hw_reg *dorq_int_bb_b0_regs[1] = {
- &dorq_int0_bb_b0};
-
-static struct attn_hw_reg dorq_prty0_bb_b0 = {
- 0, 1, 0x100190, 0x10019c, 0x100198, 0x100194};
-
-static struct attn_hw_reg dorq_prty1_bb_b0 = {
- 1, 6, 0x100200, 0x10020c, 0x100208, 0x100204};
-
-static struct attn_hw_reg *dorq_prty_bb_b0_regs[2] = {
- &dorq_prty0_bb_b0, &dorq_prty1_bb_b0};
-
-static struct attn_hw_reg brb_int0_bb_b0 = {
- 0, 32, 0x3400c0, 0x3400cc, 0x3400c8, 0x3400c4};
-
-static struct attn_hw_reg brb_int1_bb_b0 = {
- 1, 30, 0x3400d8, 0x3400e4, 0x3400e0, 0x3400dc};
-
-static struct attn_hw_reg brb_int2_bb_b0 = {
- 2, 28, 0x3400f0, 0x3400fc, 0x3400f8, 0x3400f4};
-
-static struct attn_hw_reg brb_int3_bb_b0 = {
- 3, 31, 0x340108, 0x340114, 0x340110, 0x34010c};
-
-static struct attn_hw_reg brb_int4_bb_b0 = {
- 4, 27, 0x340120, 0x34012c, 0x340128, 0x340124};
-
-static struct attn_hw_reg brb_int5_bb_b0 = {
- 5, 1, 0x340138, 0x340144, 0x340140, 0x34013c};
-
-static struct attn_hw_reg brb_int6_bb_b0 = {
- 6, 8, 0x340150, 0x34015c, 0x340158, 0x340154};
-
-static struct attn_hw_reg brb_int7_bb_b0 = {
- 7, 32, 0x340168, 0x340174, 0x340170, 0x34016c};
-
-static struct attn_hw_reg brb_int8_bb_b0 = {
- 8, 17, 0x340184, 0x340190, 0x34018c, 0x340188};
-
-static struct attn_hw_reg brb_int9_bb_b0 = {
- 9, 1, 0x34019c, 0x3401a8, 0x3401a4, 0x3401a0};
-
-static struct attn_hw_reg brb_int10_bb_b0 = {
- 10, 14, 0x3401b4, 0x3401c0, 0x3401bc, 0x3401b8};
-
-static struct attn_hw_reg brb_int11_bb_b0 = {
- 11, 8, 0x3401cc, 0x3401d8, 0x3401d4, 0x3401d0};
-
-static struct attn_hw_reg *brb_int_bb_b0_regs[12] = {
- &brb_int0_bb_b0, &brb_int1_bb_b0, &brb_int2_bb_b0, &brb_int3_bb_b0,
- &brb_int4_bb_b0, &brb_int5_bb_b0, &brb_int6_bb_b0, &brb_int7_bb_b0,
- &brb_int8_bb_b0, &brb_int9_bb_b0, &brb_int10_bb_b0, &brb_int11_bb_b0};
-
-static struct attn_hw_reg brb_prty0_bb_b0 = {
- 0, 5, 0x3401dc, 0x3401e8, 0x3401e4, 0x3401e0};
-
-static struct attn_hw_reg brb_prty1_bb_b0 = {
- 1, 31, 0x340400, 0x34040c, 0x340408, 0x340404};
-
-static struct attn_hw_reg brb_prty2_bb_b0 = {
- 2, 14, 0x340410, 0x34041c, 0x340418, 0x340414};
-
-static struct attn_hw_reg *brb_prty_bb_b0_regs[3] = {
- &brb_prty0_bb_b0, &brb_prty1_bb_b0, &brb_prty2_bb_b0};
-
-static struct attn_hw_reg src_int0_bb_b0 = {
- 0, 1, 0x2381d8, 0x2381dc, 0x2381e0, 0x2381e4};
-
-static struct attn_hw_reg *src_int_bb_b0_regs[1] = {
- &src_int0_bb_b0};
-
-static struct attn_hw_reg prs_int0_bb_b0 = {
- 0, 2, 0x1f0040, 0x1f004c, 0x1f0048, 0x1f0044};
-
-static struct attn_hw_reg *prs_int_bb_b0_regs[1] = {
- &prs_int0_bb_b0};
-
-static struct attn_hw_reg prs_prty0_bb_b0 = {
- 0, 2, 0x1f0050, 0x1f005c, 0x1f0058, 0x1f0054};
-
-static struct attn_hw_reg prs_prty1_bb_b0 = {
- 1, 31, 0x1f0204, 0x1f0210, 0x1f020c, 0x1f0208};
-
-static struct attn_hw_reg prs_prty2_bb_b0 = {
- 2, 5, 0x1f0214, 0x1f0220, 0x1f021c, 0x1f0218};
-
-static struct attn_hw_reg *prs_prty_bb_b0_regs[3] = {
- &prs_prty0_bb_b0, &prs_prty1_bb_b0, &prs_prty2_bb_b0};
-
-static struct attn_hw_reg tsdm_int0_bb_b0 = {
- 0, 26, 0xfb0040, 0xfb004c, 0xfb0048, 0xfb0044};
-
-static struct attn_hw_reg *tsdm_int_bb_b0_regs[1] = {
- &tsdm_int0_bb_b0};
-
-static struct attn_hw_reg tsdm_prty1_bb_b0 = {
- 0, 10, 0xfb0200, 0xfb020c, 0xfb0208, 0xfb0204};
-
-static struct attn_hw_reg *tsdm_prty_bb_b0_regs[1] = {
- &tsdm_prty1_bb_b0};
-
-static struct attn_hw_reg msdm_int0_bb_b0 = {
- 0, 26, 0xfc0040, 0xfc004c, 0xfc0048, 0xfc0044};
-
-static struct attn_hw_reg *msdm_int_bb_b0_regs[1] = {
- &msdm_int0_bb_b0};
-
-static struct attn_hw_reg msdm_prty1_bb_b0 = {
- 0, 11, 0xfc0200, 0xfc020c, 0xfc0208, 0xfc0204};
-
-static struct attn_hw_reg *msdm_prty_bb_b0_regs[1] = {
- &msdm_prty1_bb_b0};
-
-static struct attn_hw_reg usdm_int0_bb_b0 = {
- 0, 26, 0xfd0040, 0xfd004c, 0xfd0048, 0xfd0044};
-
-static struct attn_hw_reg *usdm_int_bb_b0_regs[1] = {
- &usdm_int0_bb_b0};
-
-static struct attn_hw_reg usdm_prty1_bb_b0 = {
- 0, 10, 0xfd0200, 0xfd020c, 0xfd0208, 0xfd0204};
-
-static struct attn_hw_reg *usdm_prty_bb_b0_regs[1] = {
- &usdm_prty1_bb_b0};
-
-static struct attn_hw_reg xsdm_int0_bb_b0 = {
- 0, 26, 0xf80040, 0xf8004c, 0xf80048, 0xf80044};
-
-static struct attn_hw_reg *xsdm_int_bb_b0_regs[1] = {
- &xsdm_int0_bb_b0};
-
-static struct attn_hw_reg xsdm_prty1_bb_b0 = {
- 0, 10, 0xf80200, 0xf8020c, 0xf80208, 0xf80204};
-
-static struct attn_hw_reg *xsdm_prty_bb_b0_regs[1] = {
- &xsdm_prty1_bb_b0};
-
-static struct attn_hw_reg ysdm_int0_bb_b0 = {
- 0, 26, 0xf90040, 0xf9004c, 0xf90048, 0xf90044};
-
-static struct attn_hw_reg *ysdm_int_bb_b0_regs[1] = {
- &ysdm_int0_bb_b0};
-
-static struct attn_hw_reg ysdm_prty1_bb_b0 = {
- 0, 9, 0xf90200, 0xf9020c, 0xf90208, 0xf90204};
-
-static struct attn_hw_reg *ysdm_prty_bb_b0_regs[1] = {
- &ysdm_prty1_bb_b0};
-
-static struct attn_hw_reg psdm_int0_bb_b0 = {
- 0, 26, 0xfa0040, 0xfa004c, 0xfa0048, 0xfa0044};
-
-static struct attn_hw_reg *psdm_int_bb_b0_regs[1] = {
- &psdm_int0_bb_b0};
-
-static struct attn_hw_reg psdm_prty1_bb_b0 = {
- 0, 9, 0xfa0200, 0xfa020c, 0xfa0208, 0xfa0204};
-
-static struct attn_hw_reg *psdm_prty_bb_b0_regs[1] = {
- &psdm_prty1_bb_b0};
-
-static struct attn_hw_reg tsem_int0_bb_b0 = {
- 0, 32, 0x1700040, 0x170004c, 0x1700048, 0x1700044};
-
-static struct attn_hw_reg tsem_int1_bb_b0 = {
- 1, 13, 0x1700050, 0x170005c, 0x1700058, 0x1700054};
-
-static struct attn_hw_reg tsem_fast_memory_int0_bb_b0 = {
- 2, 1, 0x1740040, 0x174004c, 0x1740048, 0x1740044};
-
-static struct attn_hw_reg *tsem_int_bb_b0_regs[3] = {
- &tsem_int0_bb_b0, &tsem_int1_bb_b0, &tsem_fast_memory_int0_bb_b0};
-
-static struct attn_hw_reg tsem_prty0_bb_b0 = {
- 0, 3, 0x17000c8, 0x17000d4, 0x17000d0, 0x17000cc};
-
-static struct attn_hw_reg tsem_prty1_bb_b0 = {
- 1, 6, 0x1700200, 0x170020c, 0x1700208, 0x1700204};
-
-static struct attn_hw_reg tsem_fast_memory_vfc_config_prty1_bb_b0 = {
- 2, 6, 0x174a200, 0x174a20c, 0x174a208, 0x174a204};
-
-static struct attn_hw_reg *tsem_prty_bb_b0_regs[3] = {
- &tsem_prty0_bb_b0, &tsem_prty1_bb_b0,
- &tsem_fast_memory_vfc_config_prty1_bb_b0};
-
-static struct attn_hw_reg msem_int0_bb_b0 = {
- 0, 32, 0x1800040, 0x180004c, 0x1800048, 0x1800044};
-
-static struct attn_hw_reg msem_int1_bb_b0 = {
- 1, 13, 0x1800050, 0x180005c, 0x1800058, 0x1800054};
-
-static struct attn_hw_reg msem_fast_memory_int0_bb_b0 = {
- 2, 1, 0x1840040, 0x184004c, 0x1840048, 0x1840044};
-
-static struct attn_hw_reg *msem_int_bb_b0_regs[3] = {
- &msem_int0_bb_b0, &msem_int1_bb_b0, &msem_fast_memory_int0_bb_b0};
-
-static struct attn_hw_reg msem_prty0_bb_b0 = {
- 0, 3, 0x18000c8, 0x18000d4, 0x18000d0, 0x18000cc};
-
-static struct attn_hw_reg msem_prty1_bb_b0 = {
- 1, 6, 0x1800200, 0x180020c, 0x1800208, 0x1800204};
-
-static struct attn_hw_reg *msem_prty_bb_b0_regs[2] = {
- &msem_prty0_bb_b0, &msem_prty1_bb_b0};
-
-static struct attn_hw_reg usem_int0_bb_b0 = {
- 0, 32, 0x1900040, 0x190004c, 0x1900048, 0x1900044};
-
-static struct attn_hw_reg usem_int1_bb_b0 = {
- 1, 13, 0x1900050, 0x190005c, 0x1900058, 0x1900054};
-
-static struct attn_hw_reg usem_fast_memory_int0_bb_b0 = {
- 2, 1, 0x1940040, 0x194004c, 0x1940048, 0x1940044};
-
-static struct attn_hw_reg *usem_int_bb_b0_regs[3] = {
- &usem_int0_bb_b0, &usem_int1_bb_b0, &usem_fast_memory_int0_bb_b0};
-
-static struct attn_hw_reg usem_prty0_bb_b0 = {
- 0, 3, 0x19000c8, 0x19000d4, 0x19000d0, 0x19000cc};
-
-static struct attn_hw_reg usem_prty1_bb_b0 = {
- 1, 6, 0x1900200, 0x190020c, 0x1900208, 0x1900204};
-
-static struct attn_hw_reg *usem_prty_bb_b0_regs[2] = {
- &usem_prty0_bb_b0, &usem_prty1_bb_b0};
-
-static struct attn_hw_reg xsem_int0_bb_b0 = {
- 0, 32, 0x1400040, 0x140004c, 0x1400048, 0x1400044};
-
-static struct attn_hw_reg xsem_int1_bb_b0 = {
- 1, 13, 0x1400050, 0x140005c, 0x1400058, 0x1400054};
-
-static struct attn_hw_reg xsem_fast_memory_int0_bb_b0 = {
- 2, 1, 0x1440040, 0x144004c, 0x1440048, 0x1440044};
-
-static struct attn_hw_reg *xsem_int_bb_b0_regs[3] = {
- &xsem_int0_bb_b0, &xsem_int1_bb_b0, &xsem_fast_memory_int0_bb_b0};
-
-static struct attn_hw_reg xsem_prty0_bb_b0 = {
- 0, 3, 0x14000c8, 0x14000d4, 0x14000d0, 0x14000cc};
-
-static struct attn_hw_reg xsem_prty1_bb_b0 = {
- 1, 7, 0x1400200, 0x140020c, 0x1400208, 0x1400204};
-
-static struct attn_hw_reg *xsem_prty_bb_b0_regs[2] = {
- &xsem_prty0_bb_b0, &xsem_prty1_bb_b0};
-
-static struct attn_hw_reg ysem_int0_bb_b0 = {
- 0, 32, 0x1500040, 0x150004c, 0x1500048, 0x1500044};
-
-static struct attn_hw_reg ysem_int1_bb_b0 = {
- 1, 13, 0x1500050, 0x150005c, 0x1500058, 0x1500054};
-
-static struct attn_hw_reg ysem_fast_memory_int0_bb_b0 = {
- 2, 1, 0x1540040, 0x154004c, 0x1540048, 0x1540044};
-
-static struct attn_hw_reg *ysem_int_bb_b0_regs[3] = {
- &ysem_int0_bb_b0, &ysem_int1_bb_b0, &ysem_fast_memory_int0_bb_b0};
-
-static struct attn_hw_reg ysem_prty0_bb_b0 = {
- 0, 3, 0x15000c8, 0x15000d4, 0x15000d0, 0x15000cc};
-
-static struct attn_hw_reg ysem_prty1_bb_b0 = {
- 1, 7, 0x1500200, 0x150020c, 0x1500208, 0x1500204};
-
-static struct attn_hw_reg *ysem_prty_bb_b0_regs[2] = {
- &ysem_prty0_bb_b0, &ysem_prty1_bb_b0};
-
-static struct attn_hw_reg psem_int0_bb_b0 = {
- 0, 32, 0x1600040, 0x160004c, 0x1600048, 0x1600044};
-
-static struct attn_hw_reg psem_int1_bb_b0 = {
- 1, 13, 0x1600050, 0x160005c, 0x1600058, 0x1600054};
-
-static struct attn_hw_reg psem_fast_memory_int0_bb_b0 = {
- 2, 1, 0x1640040, 0x164004c, 0x1640048, 0x1640044};
-
-static struct attn_hw_reg *psem_int_bb_b0_regs[3] = {
- &psem_int0_bb_b0, &psem_int1_bb_b0, &psem_fast_memory_int0_bb_b0};
-
-static struct attn_hw_reg psem_prty0_bb_b0 = {
- 0, 3, 0x16000c8, 0x16000d4, 0x16000d0, 0x16000cc};
-
-static struct attn_hw_reg psem_prty1_bb_b0 = {
- 1, 6, 0x1600200, 0x160020c, 0x1600208, 0x1600204};
-
-static struct attn_hw_reg psem_fast_memory_vfc_config_prty1_bb_b0 = {
- 2, 6, 0x164a200, 0x164a20c, 0x164a208, 0x164a204};
-
-static struct attn_hw_reg *psem_prty_bb_b0_regs[3] = {
- &psem_prty0_bb_b0, &psem_prty1_bb_b0,
- &psem_fast_memory_vfc_config_prty1_bb_b0};
-
-static struct attn_hw_reg rss_int0_bb_b0 = {
- 0, 12, 0x238980, 0x23898c, 0x238988, 0x238984};
-
-static struct attn_hw_reg *rss_int_bb_b0_regs[1] = {
- &rss_int0_bb_b0};
-
-static struct attn_hw_reg rss_prty1_bb_b0 = {
- 0, 4, 0x238a00, 0x238a0c, 0x238a08, 0x238a04};
-
-static struct attn_hw_reg *rss_prty_bb_b0_regs[1] = {
- &rss_prty1_bb_b0};
-
-static struct attn_hw_reg tmld_int0_bb_b0 = {
- 0, 6, 0x4d0180, 0x4d018c, 0x4d0188, 0x4d0184};
-
-static struct attn_hw_reg *tmld_int_bb_b0_regs[1] = {
- &tmld_int0_bb_b0};
-
-static struct attn_hw_reg tmld_prty1_bb_b0 = {
- 0, 8, 0x4d0200, 0x4d020c, 0x4d0208, 0x4d0204};
-
-static struct attn_hw_reg *tmld_prty_bb_b0_regs[1] = {
- &tmld_prty1_bb_b0};
-
-static struct attn_hw_reg muld_int0_bb_b0 = {
- 0, 6, 0x4e0180, 0x4e018c, 0x4e0188, 0x4e0184};
-
-static struct attn_hw_reg *muld_int_bb_b0_regs[1] = {
- &muld_int0_bb_b0};
-
-static struct attn_hw_reg muld_prty1_bb_b0 = {
- 0, 10, 0x4e0200, 0x4e020c, 0x4e0208, 0x4e0204};
-
-static struct attn_hw_reg *muld_prty_bb_b0_regs[1] = {
- &muld_prty1_bb_b0};
-
-static struct attn_hw_reg yuld_int0_bb_b0 = {
- 0, 6, 0x4c8180, 0x4c818c, 0x4c8188, 0x4c8184};
-
-static struct attn_hw_reg *yuld_int_bb_b0_regs[1] = {
- &yuld_int0_bb_b0};
-
-static struct attn_hw_reg yuld_prty1_bb_b0 = {
- 0, 6, 0x4c8200, 0x4c820c, 0x4c8208, 0x4c8204};
-
-static struct attn_hw_reg *yuld_prty_bb_b0_regs[1] = {
- &yuld_prty1_bb_b0};
-
-static struct attn_hw_reg xyld_int0_bb_b0 = {
- 0, 6, 0x4c0180, 0x4c018c, 0x4c0188, 0x4c0184};
-
-static struct attn_hw_reg *xyld_int_bb_b0_regs[1] = {
- &xyld_int0_bb_b0};
-
-static struct attn_hw_reg xyld_prty1_bb_b0 = {
- 0, 9, 0x4c0200, 0x4c020c, 0x4c0208, 0x4c0204};
-
-static struct attn_hw_reg *xyld_prty_bb_b0_regs[1] = {
- &xyld_prty1_bb_b0};
-
-static struct attn_hw_reg prm_int0_bb_b0 = {
- 0, 11, 0x230040, 0x23004c, 0x230048, 0x230044};
-
-static struct attn_hw_reg *prm_int_bb_b0_regs[1] = {
- &prm_int0_bb_b0};
-
-static struct attn_hw_reg prm_prty0_bb_b0 = {
- 0, 1, 0x230050, 0x23005c, 0x230058, 0x230054};
-
-static struct attn_hw_reg prm_prty1_bb_b0 = {
- 1, 24, 0x230200, 0x23020c, 0x230208, 0x230204};
-
-static struct attn_hw_reg *prm_prty_bb_b0_regs[2] = {
- &prm_prty0_bb_b0, &prm_prty1_bb_b0};
-
-static struct attn_hw_reg pbf_pb1_int0_bb_b0 = {
- 0, 9, 0xda0040, 0xda004c, 0xda0048, 0xda0044};
-
-static struct attn_hw_reg *pbf_pb1_int_bb_b0_regs[1] = {
- &pbf_pb1_int0_bb_b0};
-
-static struct attn_hw_reg pbf_pb1_prty0_bb_b0 = {
- 0, 1, 0xda0050, 0xda005c, 0xda0058, 0xda0054};
-
-static struct attn_hw_reg *pbf_pb1_prty_bb_b0_regs[1] = {
- &pbf_pb1_prty0_bb_b0};
-
-static struct attn_hw_reg pbf_pb2_int0_bb_b0 = {
- 0, 9, 0xda4040, 0xda404c, 0xda4048, 0xda4044};
-
-static struct attn_hw_reg *pbf_pb2_int_bb_b0_regs[1] = {
- &pbf_pb2_int0_bb_b0};
-
-static struct attn_hw_reg pbf_pb2_prty0_bb_b0 = {
- 0, 1, 0xda4050, 0xda405c, 0xda4058, 0xda4054};
-
-static struct attn_hw_reg *pbf_pb2_prty_bb_b0_regs[1] = {
- &pbf_pb2_prty0_bb_b0};
-
-static struct attn_hw_reg rpb_int0_bb_b0 = {
- 0, 9, 0x23c040, 0x23c04c, 0x23c048, 0x23c044};
-
-static struct attn_hw_reg *rpb_int_bb_b0_regs[1] = {
- &rpb_int0_bb_b0};
-
-static struct attn_hw_reg rpb_prty0_bb_b0 = {
- 0, 1, 0x23c050, 0x23c05c, 0x23c058, 0x23c054};
-
-static struct attn_hw_reg *rpb_prty_bb_b0_regs[1] = {
- &rpb_prty0_bb_b0};
-
-static struct attn_hw_reg btb_int0_bb_b0 = {
- 0, 16, 0xdb00c0, 0xdb00cc, 0xdb00c8, 0xdb00c4};
-
-static struct attn_hw_reg btb_int1_bb_b0 = {
- 1, 16, 0xdb00d8, 0xdb00e4, 0xdb00e0, 0xdb00dc};
-
-static struct attn_hw_reg btb_int2_bb_b0 = {
- 2, 4, 0xdb00f0, 0xdb00fc, 0xdb00f8, 0xdb00f4};
-
-static struct attn_hw_reg btb_int3_bb_b0 = {
- 3, 32, 0xdb0108, 0xdb0114, 0xdb0110, 0xdb010c};
-
-static struct attn_hw_reg btb_int4_bb_b0 = {
- 4, 23, 0xdb0120, 0xdb012c, 0xdb0128, 0xdb0124};
-
-static struct attn_hw_reg btb_int5_bb_b0 = {
- 5, 32, 0xdb0138, 0xdb0144, 0xdb0140, 0xdb013c};
-
-static struct attn_hw_reg btb_int6_bb_b0 = {
- 6, 1, 0xdb0150, 0xdb015c, 0xdb0158, 0xdb0154};
-
-static struct attn_hw_reg btb_int8_bb_b0 = {
- 7, 1, 0xdb0184, 0xdb0190, 0xdb018c, 0xdb0188};
-
-static struct attn_hw_reg btb_int9_bb_b0 = {
- 8, 1, 0xdb019c, 0xdb01a8, 0xdb01a4, 0xdb01a0};
-
-static struct attn_hw_reg btb_int10_bb_b0 = {
- 9, 1, 0xdb01b4, 0xdb01c0, 0xdb01bc, 0xdb01b8};
-
-static struct attn_hw_reg btb_int11_bb_b0 = {
- 10, 2, 0xdb01cc, 0xdb01d8, 0xdb01d4, 0xdb01d0};
-
-static struct attn_hw_reg *btb_int_bb_b0_regs[11] = {
- &btb_int0_bb_b0, &btb_int1_bb_b0, &btb_int2_bb_b0, &btb_int3_bb_b0,
- &btb_int4_bb_b0, &btb_int5_bb_b0, &btb_int6_bb_b0, &btb_int8_bb_b0,
- &btb_int9_bb_b0, &btb_int10_bb_b0, &btb_int11_bb_b0};
-
-static struct attn_hw_reg btb_prty0_bb_b0 = {
- 0, 5, 0xdb01dc, 0xdb01e8, 0xdb01e4, 0xdb01e0};
-
-static struct attn_hw_reg btb_prty1_bb_b0 = {
- 1, 23, 0xdb0400, 0xdb040c, 0xdb0408, 0xdb0404};
-
-static struct attn_hw_reg *btb_prty_bb_b0_regs[2] = {
- &btb_prty0_bb_b0, &btb_prty1_bb_b0};
-
-static struct attn_hw_reg pbf_int0_bb_b0 = {
- 0, 1, 0xd80180, 0xd8018c, 0xd80188, 0xd80184};
-
-static struct attn_hw_reg *pbf_int_bb_b0_regs[1] = {
- &pbf_int0_bb_b0};
-
-static struct attn_hw_reg pbf_prty0_bb_b0 = {
- 0, 1, 0xd80190, 0xd8019c, 0xd80198, 0xd80194};
-
-static struct attn_hw_reg pbf_prty1_bb_b0 = {
- 1, 31, 0xd80200, 0xd8020c, 0xd80208, 0xd80204};
-
-static struct attn_hw_reg pbf_prty2_bb_b0 = {
- 2, 27, 0xd80210, 0xd8021c, 0xd80218, 0xd80214};
-
-static struct attn_hw_reg *pbf_prty_bb_b0_regs[3] = {
- &pbf_prty0_bb_b0, &pbf_prty1_bb_b0, &pbf_prty2_bb_b0};
-
-static struct attn_hw_reg rdif_int0_bb_b0 = {
- 0, 8, 0x300180, 0x30018c, 0x300188, 0x300184};
-
-static struct attn_hw_reg *rdif_int_bb_b0_regs[1] = {
- &rdif_int0_bb_b0};
-
-static struct attn_hw_reg rdif_prty0_bb_b0 = {
- 0, 1, 0x300190, 0x30019c, 0x300198, 0x300194};
-
-static struct attn_hw_reg *rdif_prty_bb_b0_regs[1] = {
- &rdif_prty0_bb_b0};
-
-static struct attn_hw_reg tdif_int0_bb_b0 = {
- 0, 8, 0x310180, 0x31018c, 0x310188, 0x310184};
-
-static struct attn_hw_reg *tdif_int_bb_b0_regs[1] = {
- &tdif_int0_bb_b0};
-
-static struct attn_hw_reg tdif_prty0_bb_b0 = {
- 0, 1, 0x310190, 0x31019c, 0x310198, 0x310194};
-
-static struct attn_hw_reg tdif_prty1_bb_b0 = {
- 1, 11, 0x310200, 0x31020c, 0x310208, 0x310204};
-
-static struct attn_hw_reg *tdif_prty_bb_b0_regs[2] = {
- &tdif_prty0_bb_b0, &tdif_prty1_bb_b0};
-
-static struct attn_hw_reg cdu_int0_bb_b0 = {
- 0, 8, 0x5801c0, 0x5801c4, 0x5801c8, 0x5801cc};
-
-static struct attn_hw_reg *cdu_int_bb_b0_regs[1] = {
- &cdu_int0_bb_b0};
-
-static struct attn_hw_reg cdu_prty1_bb_b0 = {
- 0, 5, 0x580200, 0x58020c, 0x580208, 0x580204};
-
-static struct attn_hw_reg *cdu_prty_bb_b0_regs[1] = {
- &cdu_prty1_bb_b0};
-
-static struct attn_hw_reg ccfc_int0_bb_b0 = {
- 0, 2, 0x2e0180, 0x2e018c, 0x2e0188, 0x2e0184};
-
-static struct attn_hw_reg *ccfc_int_bb_b0_regs[1] = {
- &ccfc_int0_bb_b0};
-
-static struct attn_hw_reg ccfc_prty1_bb_b0 = {
- 0, 2, 0x2e0200, 0x2e020c, 0x2e0208, 0x2e0204};
-
-static struct attn_hw_reg ccfc_prty0_bb_b0 = {
- 1, 6, 0x2e05e4, 0x2e05f0, 0x2e05ec, 0x2e05e8};
-
-static struct attn_hw_reg *ccfc_prty_bb_b0_regs[2] = {
- &ccfc_prty1_bb_b0, &ccfc_prty0_bb_b0};
-
-static struct attn_hw_reg tcfc_int0_bb_b0 = {
- 0, 2, 0x2d0180, 0x2d018c, 0x2d0188, 0x2d0184};
-
-static struct attn_hw_reg *tcfc_int_bb_b0_regs[1] = {
- &tcfc_int0_bb_b0};
-
-static struct attn_hw_reg tcfc_prty1_bb_b0 = {
- 0, 2, 0x2d0200, 0x2d020c, 0x2d0208, 0x2d0204};
-
-static struct attn_hw_reg tcfc_prty0_bb_b0 = {
- 1, 6, 0x2d05e4, 0x2d05f0, 0x2d05ec, 0x2d05e8};
-
-static struct attn_hw_reg *tcfc_prty_bb_b0_regs[2] = {
- &tcfc_prty1_bb_b0, &tcfc_prty0_bb_b0};
-
-static struct attn_hw_reg igu_int0_bb_b0 = {
- 0, 11, 0x180180, 0x18018c, 0x180188, 0x180184};
-
-static struct attn_hw_reg *igu_int_bb_b0_regs[1] = {
- &igu_int0_bb_b0};
-
-static struct attn_hw_reg igu_prty0_bb_b0 = {
- 0, 1, 0x180190, 0x18019c, 0x180198, 0x180194};
-
-static struct attn_hw_reg igu_prty1_bb_b0 = {
- 1, 31, 0x180200, 0x18020c, 0x180208, 0x180204};
-
-static struct attn_hw_reg igu_prty2_bb_b0 = {
- 2, 1, 0x180210, 0x18021c, 0x180218, 0x180214};
-
-static struct attn_hw_reg *igu_prty_bb_b0_regs[3] = {
- &igu_prty0_bb_b0, &igu_prty1_bb_b0, &igu_prty2_bb_b0};
-
-static struct attn_hw_reg cau_int0_bb_b0 = {
- 0, 11, 0x1c00d4, 0x1c00d8, 0x1c00dc, 0x1c00e0};
-
-static struct attn_hw_reg *cau_int_bb_b0_regs[1] = {
- &cau_int0_bb_b0};
-
-static struct attn_hw_reg cau_prty1_bb_b0 = {
- 0, 13, 0x1c0200, 0x1c020c, 0x1c0208, 0x1c0204};
-
-static struct attn_hw_reg *cau_prty_bb_b0_regs[1] = {
- &cau_prty1_bb_b0};
-
-static struct attn_hw_reg dbg_int0_bb_b0 = {
- 0, 1, 0x10180, 0x1018c, 0x10188, 0x10184};
-
-static struct attn_hw_reg *dbg_int_bb_b0_regs[1] = {
- &dbg_int0_bb_b0};
-
-static struct attn_hw_reg dbg_prty1_bb_b0 = {
- 0, 1, 0x10200, 0x1020c, 0x10208, 0x10204};
-
-static struct attn_hw_reg *dbg_prty_bb_b0_regs[1] = {
- &dbg_prty1_bb_b0};
-
-static struct attn_hw_reg nig_int0_bb_b0 = {
- 0, 12, 0x500040, 0x50004c, 0x500048, 0x500044};
-
-static struct attn_hw_reg nig_int1_bb_b0 = {
- 1, 32, 0x500050, 0x50005c, 0x500058, 0x500054};
-
-static struct attn_hw_reg nig_int2_bb_b0 = {
- 2, 20, 0x500060, 0x50006c, 0x500068, 0x500064};
-
-static struct attn_hw_reg nig_int3_bb_b0 = {
- 3, 18, 0x500070, 0x50007c, 0x500078, 0x500074};
-
-static struct attn_hw_reg nig_int4_bb_b0 = {
- 4, 20, 0x500080, 0x50008c, 0x500088, 0x500084};
-
-static struct attn_hw_reg nig_int5_bb_b0 = {
- 5, 18, 0x500090, 0x50009c, 0x500098, 0x500094};
-
-static struct attn_hw_reg *nig_int_bb_b0_regs[6] = {
- &nig_int0_bb_b0, &nig_int1_bb_b0, &nig_int2_bb_b0, &nig_int3_bb_b0,
- &nig_int4_bb_b0, &nig_int5_bb_b0};
-
-static struct attn_hw_reg nig_prty0_bb_b0 = {
- 0, 1, 0x5000a0, 0x5000ac, 0x5000a8, 0x5000a4};
-
-static struct attn_hw_reg nig_prty1_bb_b0 = {
- 1, 31, 0x500200, 0x50020c, 0x500208, 0x500204};
-
-static struct attn_hw_reg nig_prty2_bb_b0 = {
- 2, 31, 0x500210, 0x50021c, 0x500218, 0x500214};
-
-static struct attn_hw_reg nig_prty3_bb_b0 = {
- 3, 31, 0x500220, 0x50022c, 0x500228, 0x500224};
-
-static struct attn_hw_reg nig_prty4_bb_b0 = {
- 4, 17, 0x500230, 0x50023c, 0x500238, 0x500234};
-
-static struct attn_hw_reg *nig_prty_bb_b0_regs[5] = {
- &nig_prty0_bb_b0, &nig_prty1_bb_b0, &nig_prty2_bb_b0,
- &nig_prty3_bb_b0, &nig_prty4_bb_b0};
-
-static struct attn_hw_reg ipc_int0_bb_b0 = {
- 0, 13, 0x2050c, 0x20518, 0x20514, 0x20510};
-
-static struct attn_hw_reg *ipc_int_bb_b0_regs[1] = {
- &ipc_int0_bb_b0};
-
-static struct attn_hw_reg ipc_prty0_bb_b0 = {
- 0, 1, 0x2051c, 0x20528, 0x20524, 0x20520};
-
-static struct attn_hw_reg *ipc_prty_bb_b0_regs[1] = {
- &ipc_prty0_bb_b0};
-
-static struct attn_hw_block attn_blocks[] = {
- {"grc", {{1, 1, grc_int_bb_b0_regs, grc_prty_bb_b0_regs} } },
- {"miscs", {{2, 1, miscs_int_bb_b0_regs, miscs_prty_bb_b0_regs} } },
- {"misc", {{1, 0, misc_int_bb_b0_regs, NULL} } },
- {"dbu", {{0, 0, NULL, NULL} } },
- {"pglue_b", {{1, 2, pglue_b_int_bb_b0_regs,
- pglue_b_prty_bb_b0_regs} } },
- {"cnig", {{1, 1, cnig_int_bb_b0_regs, cnig_prty_bb_b0_regs} } },
- {"cpmu", {{1, 0, cpmu_int_bb_b0_regs, NULL} } },
- {"ncsi", {{1, 1, ncsi_int_bb_b0_regs, ncsi_prty_bb_b0_regs} } },
- {"opte", {{0, 2, NULL, opte_prty_bb_b0_regs} } },
- {"bmb", {{12, 3, bmb_int_bb_b0_regs, bmb_prty_bb_b0_regs} } },
- {"pcie", {{0, 1, NULL, pcie_prty_bb_b0_regs} } },
- {"mcp", {{0, 0, NULL, NULL} } },
- {"mcp2", {{0, 2, NULL, mcp2_prty_bb_b0_regs} } },
- {"pswhst", {{1, 2, pswhst_int_bb_b0_regs, pswhst_prty_bb_b0_regs} } },
- {"pswhst2", {{1, 1, pswhst2_int_bb_b0_regs,
- pswhst2_prty_bb_b0_regs} } },
- {"pswrd", {{1, 1, pswrd_int_bb_b0_regs, pswrd_prty_bb_b0_regs} } },
- {"pswrd2", {{1, 3, pswrd2_int_bb_b0_regs, pswrd2_prty_bb_b0_regs} } },
- {"pswwr", {{1, 1, pswwr_int_bb_b0_regs, pswwr_prty_bb_b0_regs} } },
- {"pswwr2", {{1, 5, pswwr2_int_bb_b0_regs, pswwr2_prty_bb_b0_regs} } },
- {"pswrq", {{1, 1, pswrq_int_bb_b0_regs, pswrq_prty_bb_b0_regs} } },
- {"pswrq2", {{1, 1, pswrq2_int_bb_b0_regs, pswrq2_prty_bb_b0_regs} } },
- {"pglcs", {{1, 0, pglcs_int_bb_b0_regs, NULL} } },
- {"dmae", {{1, 1, dmae_int_bb_b0_regs, dmae_prty_bb_b0_regs} } },
- {"ptu", {{1, 1, ptu_int_bb_b0_regs, ptu_prty_bb_b0_regs} } },
- {"tcm", {{3, 2, tcm_int_bb_b0_regs, tcm_prty_bb_b0_regs} } },
- {"mcm", {{3, 2, mcm_int_bb_b0_regs, mcm_prty_bb_b0_regs} } },
- {"ucm", {{3, 2, ucm_int_bb_b0_regs, ucm_prty_bb_b0_regs} } },
- {"xcm", {{3, 2, xcm_int_bb_b0_regs, xcm_prty_bb_b0_regs} } },
- {"ycm", {{3, 2, ycm_int_bb_b0_regs, ycm_prty_bb_b0_regs} } },
- {"pcm", {{3, 1, pcm_int_bb_b0_regs, pcm_prty_bb_b0_regs} } },
- {"qm", {{1, 4, qm_int_bb_b0_regs, qm_prty_bb_b0_regs} } },
- {"tm", {{2, 1, tm_int_bb_b0_regs, tm_prty_bb_b0_regs} } },
- {"dorq", {{1, 2, dorq_int_bb_b0_regs, dorq_prty_bb_b0_regs} } },
- {"brb", {{12, 3, brb_int_bb_b0_regs, brb_prty_bb_b0_regs} } },
- {"src", {{1, 0, src_int_bb_b0_regs, NULL} } },
- {"prs", {{1, 3, prs_int_bb_b0_regs, prs_prty_bb_b0_regs} } },
- {"tsdm", {{1, 1, tsdm_int_bb_b0_regs, tsdm_prty_bb_b0_regs} } },
- {"msdm", {{1, 1, msdm_int_bb_b0_regs, msdm_prty_bb_b0_regs} } },
- {"usdm", {{1, 1, usdm_int_bb_b0_regs, usdm_prty_bb_b0_regs} } },
- {"xsdm", {{1, 1, xsdm_int_bb_b0_regs, xsdm_prty_bb_b0_regs} } },
- {"ysdm", {{1, 1, ysdm_int_bb_b0_regs, ysdm_prty_bb_b0_regs} } },
- {"psdm", {{1, 1, psdm_int_bb_b0_regs, psdm_prty_bb_b0_regs} } },
- {"tsem", {{3, 3, tsem_int_bb_b0_regs, tsem_prty_bb_b0_regs} } },
- {"msem", {{3, 2, msem_int_bb_b0_regs, msem_prty_bb_b0_regs} } },
- {"usem", {{3, 2, usem_int_bb_b0_regs, usem_prty_bb_b0_regs} } },
- {"xsem", {{3, 2, xsem_int_bb_b0_regs, xsem_prty_bb_b0_regs} } },
- {"ysem", {{3, 2, ysem_int_bb_b0_regs, ysem_prty_bb_b0_regs} } },
- {"psem", {{3, 3, psem_int_bb_b0_regs, psem_prty_bb_b0_regs} } },
- {"rss", {{1, 1, rss_int_bb_b0_regs, rss_prty_bb_b0_regs} } },
- {"tmld", {{1, 1, tmld_int_bb_b0_regs, tmld_prty_bb_b0_regs} } },
- {"muld", {{1, 1, muld_int_bb_b0_regs, muld_prty_bb_b0_regs} } },
- {"yuld", {{1, 1, yuld_int_bb_b0_regs, yuld_prty_bb_b0_regs} } },
- {"xyld", {{1, 1, xyld_int_bb_b0_regs, xyld_prty_bb_b0_regs} } },
- {"prm", {{1, 2, prm_int_bb_b0_regs, prm_prty_bb_b0_regs} } },
- {"pbf_pb1", {{1, 1, pbf_pb1_int_bb_b0_regs,
- pbf_pb1_prty_bb_b0_regs} } },
- {"pbf_pb2", {{1, 1, pbf_pb2_int_bb_b0_regs,
- pbf_pb2_prty_bb_b0_regs} } },
- {"rpb", { {1, 1, rpb_int_bb_b0_regs, rpb_prty_bb_b0_regs} } },
- {"btb", { {11, 2, btb_int_bb_b0_regs, btb_prty_bb_b0_regs} } },
- {"pbf", { {1, 3, pbf_int_bb_b0_regs, pbf_prty_bb_b0_regs} } },
- {"rdif", { {1, 1, rdif_int_bb_b0_regs, rdif_prty_bb_b0_regs} } },
- {"tdif", { {1, 2, tdif_int_bb_b0_regs, tdif_prty_bb_b0_regs} } },
- {"cdu", { {1, 1, cdu_int_bb_b0_regs, cdu_prty_bb_b0_regs} } },
- {"ccfc", { {1, 2, ccfc_int_bb_b0_regs, ccfc_prty_bb_b0_regs} } },
- {"tcfc", { {1, 2, tcfc_int_bb_b0_regs, tcfc_prty_bb_b0_regs} } },
- {"igu", { {1, 3, igu_int_bb_b0_regs, igu_prty_bb_b0_regs} } },
- {"cau", { {1, 1, cau_int_bb_b0_regs, cau_prty_bb_b0_regs} } },
- {"umac", { {0, 0, NULL, NULL} } },
- {"xmac", { {0, 0, NULL, NULL} } },
- {"dbg", { {1, 1, dbg_int_bb_b0_regs, dbg_prty_bb_b0_regs} } },
- {"nig", { {6, 5, nig_int_bb_b0_regs, nig_prty_bb_b0_regs} } },
- {"wol", { {0, 0, NULL, NULL} } },
- {"bmbn", { {0, 0, NULL, NULL} } },
- {"ipc", { {1, 1, ipc_int_bb_b0_regs, ipc_prty_bb_b0_regs} } },
- {"nwm", { {0, 0, NULL, NULL} } },
- {"nws", { {0, 0, NULL, NULL} } },
- {"ms", { {0, 0, NULL, NULL} } },
- {"phy_pcie", { {0, 0, NULL, NULL} } },
- {"misc_aeu", { {0, 0, NULL, NULL} } },
- {"bar0_map", { {0, 0, NULL, NULL} } },};
-
/* Specific HW attention callbacks */
static int qed_mcp_attn_cb(struct qed_hwfn *p_hwfn)
{
@@ -1590,6 +387,25 @@
return -EINVAL;
}
+/* Instead of major changes to the data-structure, we have a some 'special'
+ * identifiers for sources that changed meaning between adapters.
+ */
+enum aeu_invert_reg_special_type {
+ AEU_INVERT_REG_SPECIAL_CNIG_0,
+ AEU_INVERT_REG_SPECIAL_CNIG_1,
+ AEU_INVERT_REG_SPECIAL_CNIG_2,
+ AEU_INVERT_REG_SPECIAL_CNIG_3,
+ AEU_INVERT_REG_SPECIAL_MAX,
+};
+
+static struct aeu_invert_reg_bit
+aeu_descs_special[AEU_INVERT_REG_SPECIAL_MAX] = {
+ {"CNIG port 0", ATTENTION_SINGLE, NULL, BLOCK_CNIG},
+ {"CNIG port 1", ATTENTION_SINGLE, NULL, BLOCK_CNIG},
+ {"CNIG port 2", ATTENTION_SINGLE, NULL, BLOCK_CNIG},
+ {"CNIG port 3", ATTENTION_SINGLE, NULL, BLOCK_CNIG},
+};
+
/* Notice aeu_invert_reg must be defined in the same order of bits as HW; */
static struct aeu_invert_reg aeu_descs[NUM_ATTN_REGS] = {
{
@@ -1636,8 +452,22 @@
(33 << ATTENTION_OFFSET_SHIFT), NULL, MAX_BLOCK_ID},
{"General Attention 35", ATTENTION_SINGLE,
NULL, MAX_BLOCK_ID},
- {"CNIG port %d", (4 << ATTENTION_LENGTH_SHIFT),
- NULL, BLOCK_CNIG},
+ {"NWS Parity",
+ ATTENTION_PAR | ATTENTION_BB_DIFFERENT |
+ ATTENTION_BB(AEU_INVERT_REG_SPECIAL_CNIG_0),
+ NULL, BLOCK_NWS},
+ {"NWS Interrupt",
+ ATTENTION_SINGLE | ATTENTION_BB_DIFFERENT |
+ ATTENTION_BB(AEU_INVERT_REG_SPECIAL_CNIG_1),
+ NULL, BLOCK_NWS},
+ {"NWM Parity",
+ ATTENTION_PAR | ATTENTION_BB_DIFFERENT |
+ ATTENTION_BB(AEU_INVERT_REG_SPECIAL_CNIG_2),
+ NULL, BLOCK_NWM},
+ {"NWM Interrupt",
+ ATTENTION_SINGLE | ATTENTION_BB_DIFFERENT |
+ ATTENTION_BB(AEU_INVERT_REG_SPECIAL_CNIG_3),
+ NULL, BLOCK_NWM},
{"MCP CPU", ATTENTION_SINGLE,
qed_mcp_attn_cb, MAX_BLOCK_ID},
{"MCP Watchdog timer", ATTENTION_SINGLE,
@@ -1775,6 +605,27 @@
},
};
+static struct aeu_invert_reg_bit *
+qed_int_aeu_translate(struct qed_hwfn *p_hwfn,
+ struct aeu_invert_reg_bit *p_bit)
+{
+ if (!QED_IS_BB(p_hwfn->cdev))
+ return p_bit;
+
+ if (!(p_bit->flags & ATTENTION_BB_DIFFERENT))
+ return p_bit;
+
+ return &aeu_descs_special[(p_bit->flags & ATTENTION_BB_MASK) >>
+ ATTENTION_BB_SHIFT];
+}
+
+static bool qed_int_is_parity_flag(struct qed_hwfn *p_hwfn,
+ struct aeu_invert_reg_bit *p_bit)
+{
+ return !!(qed_int_aeu_translate(p_hwfn, p_bit)->flags &
+ ATTENTION_PARITY);
+}
+
#define ATTN_STATE_BITS (0xfff)
#define ATTN_BITS_MASKABLE (0x3ff)
struct qed_sb_attn_info {
@@ -1863,26 +714,23 @@
return 0;
}
-static void qed_int_deassertion_print_bit(struct qed_hwfn *p_hwfn,
- struct attn_hw_reg *p_reg_desc,
- struct attn_hw_block *p_block,
- enum qed_attention_type type,
- u32 val, u32 mask)
+static void qed_int_attn_print(struct qed_hwfn *p_hwfn,
+ enum block_id id,
+ enum dbg_attn_type type, bool b_clear)
{
- int j;
+ struct dbg_attn_block_result attn_results;
+ enum dbg_status status;
- for (j = 0; j < p_reg_desc->num_of_bits; j++) {
- if (!(val & (1 << j)))
- continue;
+ memset(&attn_results, 0, sizeof(attn_results));
+ status = qed_dbg_read_attn(p_hwfn, p_hwfn->p_dpc_ptt, id, type,
+ b_clear, &attn_results);
+ if (status != DBG_STATUS_OK)
DP_NOTICE(p_hwfn,
- "%s (%s): reg %d [0x%08x], bit %d [%s]\n",
- p_block->name,
- type == QED_ATTN_TYPE_ATTN ? "Interrupt" :
- "Parity",
- p_reg_desc->reg_idx, p_reg_desc->sts_addr,
- j, (mask & (1 << j)) ? " [MASKED]" : "");
- }
+ "Failed to parse attention information [status: %s]\n",
+ qed_dbg_get_status_str(status));
+ else
+ qed_dbg_parse_attn(p_hwfn, &attn_results);
}
/**
@@ -1901,53 +749,30 @@
qed_int_deassertion_aeu_bit(struct qed_hwfn *p_hwfn,
struct aeu_invert_reg_bit *p_aeu,
u32 aeu_en_reg,
- u32 bitmask)
+ const char *p_bit_name, u32 bitmask)
{
+ bool b_fatal = false;
int rc = -EINVAL;
u32 val;
DP_INFO(p_hwfn, "Deasserted attention `%s'[%08x]\n",
- p_aeu->bit_name, bitmask);
+ p_bit_name, bitmask);
/* Call callback before clearing the interrupt status */
if (p_aeu->cb) {
DP_INFO(p_hwfn, "`%s (attention)': Calling Callback function\n",
- p_aeu->bit_name);
+ p_bit_name);
rc = p_aeu->cb(p_hwfn);
}
- /* Handle HW block interrupt registers */
- if (p_aeu->block_index != MAX_BLOCK_ID) {
- struct attn_hw_block *p_block;
- u32 mask;
- int i;
+ if (rc)
+ b_fatal = true;
- p_block = &attn_blocks[p_aeu->block_index];
+ /* Print HW block interrupt registers */
+ if (p_aeu->block_index != MAX_BLOCK_ID)
+ qed_int_attn_print(p_hwfn, p_aeu->block_index,
+ ATTN_TYPE_INTERRUPT, !b_fatal);
- /* Handle each interrupt register */
- for (i = 0; i < p_block->chip_regs[0].num_of_int_regs; i++) {
- struct attn_hw_reg *p_reg_desc;
- u32 sts_addr;
-
- p_reg_desc = p_block->chip_regs[0].int_regs[i];
-
- /* In case of fatal attention, don't clear the status
- * so it would appear in following idle check.
- */
- if (rc == 0)
- sts_addr = p_reg_desc->sts_clr_addr;
- else
- sts_addr = p_reg_desc->sts_addr;
-
- val = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt, sts_addr);
- mask = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt,
- p_reg_desc->mask_addr);
- qed_int_deassertion_print_bit(p_hwfn, p_reg_desc,
- p_block,
- QED_ATTN_TYPE_ATTN,
- val, mask);
- }
- }
/* If the attention is benign, no need to prevent it */
if (!rc)
@@ -1957,66 +782,48 @@
val = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt, aeu_en_reg);
qed_wr(p_hwfn, p_hwfn->p_dpc_ptt, aeu_en_reg, (val & ~bitmask));
DP_INFO(p_hwfn, "`%s' - Disabled future attentions\n",
- p_aeu->bit_name);
+ p_bit_name);
out:
return rc;
}
-static void qed_int_parity_print(struct qed_hwfn *p_hwfn,
- struct aeu_invert_reg_bit *p_aeu,
- struct attn_hw_block *p_block,
- u8 bit_index)
-{
- int i;
-
- for (i = 0; i < p_block->chip_regs[0].num_of_prty_regs; i++) {
- struct attn_hw_reg *p_reg_desc;
- u32 val, mask;
-
- p_reg_desc = p_block->chip_regs[0].prty_regs[i];
-
- val = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt,
- p_reg_desc->sts_clr_addr);
- mask = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt,
- p_reg_desc->mask_addr);
- qed_int_deassertion_print_bit(p_hwfn, p_reg_desc,
- p_block,
- QED_ATTN_TYPE_PARITY,
- val, mask);
- }
-}
-
/**
* @brief qed_int_deassertion_parity - handle a single parity AEU source
*
* @param p_hwfn
* @param p_aeu - descriptor of an AEU bit which caused the parity
+ * @param aeu_en_reg - address of the AEU enable register
* @param bit_index
*/
static void qed_int_deassertion_parity(struct qed_hwfn *p_hwfn,
struct aeu_invert_reg_bit *p_aeu,
- u8 bit_index)
+ u32 aeu_en_reg, u8 bit_index)
{
- u32 block_id = p_aeu->block_index;
+ u32 block_id = p_aeu->block_index, mask, val;
- DP_INFO(p_hwfn->cdev, "%s[%d] parity attention is set\n",
- p_aeu->bit_name, bit_index);
+ DP_NOTICE(p_hwfn->cdev,
+ "%s parity attention is set [address 0x%08x, bit %d]\n",
+ p_aeu->bit_name, aeu_en_reg, bit_index);
if (block_id != MAX_BLOCK_ID) {
- qed_int_parity_print(p_hwfn, p_aeu, &attn_blocks[block_id],
- bit_index);
+ qed_int_attn_print(p_hwfn, block_id, ATTN_TYPE_PARITY, false);
/* In BB, there's a single parity bit for several blocks */
if (block_id == BLOCK_BTB) {
- qed_int_parity_print(p_hwfn, p_aeu,
- &attn_blocks[BLOCK_OPTE],
- bit_index);
- qed_int_parity_print(p_hwfn, p_aeu,
- &attn_blocks[BLOCK_MCP],
- bit_index);
+ qed_int_attn_print(p_hwfn, BLOCK_OPTE,
+ ATTN_TYPE_PARITY, false);
+ qed_int_attn_print(p_hwfn, BLOCK_MCP,
+ ATTN_TYPE_PARITY, false);
}
}
+
+ /* Prevent this parity error from being re-asserted */
+ mask = ~BIT(bit_index);
+ val = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt, aeu_en_reg);
+ qed_wr(p_hwfn, p_hwfn->p_dpc_ptt, aeu_en_reg, val & mask);
+ DP_INFO(p_hwfn, "`%s' - Disabled future parity errors\n",
+ p_aeu->bit_name);
}
/**
@@ -2031,7 +838,7 @@
u16 deasserted_bits)
{
struct qed_sb_attn_info *sb_attn_sw = p_hwfn->p_sb_attn;
- u32 aeu_inv_arr[NUM_ATTN_REGS], aeu_mask;
+ u32 aeu_inv_arr[NUM_ATTN_REGS], aeu_mask, aeu_en, en;
u8 i, j, k, bit_idx;
int rc = 0;
@@ -2048,11 +855,11 @@
/* Find parity attentions first */
for (i = 0; i < NUM_ATTN_REGS; i++) {
struct aeu_invert_reg *p_aeu = &sb_attn_sw->p_aeu_desc[i];
- u32 en = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt,
- MISC_REG_AEU_ENABLE1_IGU_OUT_0 +
- i * sizeof(u32));
u32 parities;
+ aeu_en = MISC_REG_AEU_ENABLE1_IGU_OUT_0 + i * sizeof(u32);
+ en = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt, aeu_en);
+
/* Skip register in which no parity bit is currently set */
parities = sb_attn_sw->parity_mask[i] & aeu_inv_arr[i] & en;
if (!parities)
@@ -2061,10 +868,10 @@
for (j = 0, bit_idx = 0; bit_idx < 32; j++) {
struct aeu_invert_reg_bit *p_bit = &p_aeu->bits[j];
- if ((p_bit->flags & ATTENTION_PARITY) &&
+ if (qed_int_is_parity_flag(p_hwfn, p_bit) &&
!!(parities & BIT(bit_idx)))
qed_int_deassertion_parity(p_hwfn, p_bit,
- bit_idx);
+ aeu_en, bit_idx);
bit_idx += ATTENTION_LENGTH(p_bit->flags);
}
@@ -2079,10 +886,11 @@
continue;
for (i = 0; i < NUM_ATTN_REGS; i++) {
- u32 aeu_en = MISC_REG_AEU_ENABLE1_IGU_OUT_0 +
- i * sizeof(u32) +
- k * sizeof(u32) * NUM_ATTN_REGS;
- u32 en, bits;
+ u32 bits;
+
+ aeu_en = MISC_REG_AEU_ENABLE1_IGU_OUT_0 +
+ i * sizeof(u32) +
+ k * sizeof(u32) * NUM_ATTN_REGS;
en = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt, aeu_en);
bits = aeu_inv_arr[i] & en;
@@ -2096,29 +904,54 @@
* previous assertion.
*/
for (j = 0, bit_idx = 0; bit_idx < 32; j++) {
+ long unsigned int bitmask;
u8 bit, bit_len;
- u32 bitmask;
p_aeu = &sb_attn_sw->p_aeu_desc[i].bits[j];
-
- /* No need to handle parity-only bits */
- if (p_aeu->flags == ATTENTION_PAR)
- continue;
+ p_aeu = qed_int_aeu_translate(p_hwfn, p_aeu);
bit = bit_idx;
bit_len = ATTENTION_LENGTH(p_aeu->flags);
- if (p_aeu->flags & ATTENTION_PAR_INT) {
+ if (qed_int_is_parity_flag(p_hwfn, p_aeu)) {
/* Skip Parity */
bit++;
bit_len--;
}
bitmask = bits & (((1 << bit_len) - 1) << bit);
+ bitmask >>= bit;
+
if (bitmask) {
+ u32 flags = p_aeu->flags;
+ char bit_name[30];
+ u8 num;
+
+ num = (u8)find_first_bit(&bitmask,
+ bit_len);
+
+ /* Some bits represent more than a
+ * a single interrupt. Correctly print
+ * their name.
+ */
+ if (ATTENTION_LENGTH(flags) > 2 ||
+ ((flags & ATTENTION_PAR_INT) &&
+ ATTENTION_LENGTH(flags) > 1))
+ snprintf(bit_name, 30,
+ p_aeu->bit_name, num);
+ else
+ strncpy(bit_name,
+ p_aeu->bit_name, 30);
+
+ /* We now need to pass bitmask in its
+ * correct position.
+ */
+ bitmask <<= bit;
+
/* Handle source of the attention */
qed_int_deassertion_aeu_bit(p_hwfn,
p_aeu,
aeu_en,
+ bit_name,
bitmask);
}
@@ -2328,6 +1161,7 @@
SB_ATTN_ALIGNED_SIZE(p_hwfn),
p_sb->sb_attn, p_sb->sb_phys);
kfree(p_sb);
+ p_hwfn->p_sb_attn = NULL;
}
static void qed_int_sb_attn_setup(struct qed_hwfn *p_hwfn,
@@ -2365,12 +1199,13 @@
for (i = 0; i < NUM_ATTN_REGS; i++) {
/* j is array index, k is bit index */
for (j = 0, k = 0; k < 32; j++) {
- unsigned int flags = aeu_descs[i].bits[j].flags;
+ struct aeu_invert_reg_bit *p_aeu;
- if (flags & ATTENTION_PARITY)
+ p_aeu = &aeu_descs[i].bits[j];
+ if (qed_int_is_parity_flag(p_hwfn, p_aeu))
sb_info->parity_mask[i] |= 1 << k;
- k += ATTENTION_LENGTH(flags);
+ k += ATTENTION_LENGTH(p_aeu->flags);
}
DP_VERBOSE(p_hwfn, NETIF_MSG_INTR,
"Attn Mask [Reg %d]: 0x%08x\n",
@@ -2465,6 +1300,40 @@
SET_FIELD(p_sb_entry->data, CAU_SB_ENTRY_STATE1, cau_state);
}
+static void qed_int_cau_conf_pi(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u16 igu_sb_id,
+ u32 pi_index,
+ enum qed_coalescing_fsm coalescing_fsm,
+ u8 timeset)
+{
+ struct cau_pi_entry pi_entry;
+ u32 sb_offset, pi_offset;
+
+ if (IS_VF(p_hwfn->cdev))
+ return;
+
+ sb_offset = igu_sb_id * PIS_PER_SB;
+ memset(&pi_entry, 0, sizeof(struct cau_pi_entry));
+
+ SET_FIELD(pi_entry.prod, CAU_PI_ENTRY_PI_TIMESET, timeset);
+ if (coalescing_fsm == QED_COAL_RX_STATE_MACHINE)
+ SET_FIELD(pi_entry.prod, CAU_PI_ENTRY_FSM_SEL, 0);
+ else
+ SET_FIELD(pi_entry.prod, CAU_PI_ENTRY_FSM_SEL, 1);
+
+ pi_offset = sb_offset + pi_index;
+ if (p_hwfn->hw_init_done) {
+ qed_wr(p_hwfn, p_ptt,
+ CAU_REG_PI_MEMORY + pi_offset * sizeof(u32),
+ *((u32 *)&(pi_entry)));
+ } else {
+ STORE_RT_REG(p_hwfn,
+ CAU_REG_PI_MEMORY_RT_OFFSET + pi_offset,
+ *((u32 *)&(pi_entry)));
+ }
+}
+
void qed_int_cau_conf_sb(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
dma_addr_t sb_phys,
@@ -2531,40 +1400,6 @@
}
}
-void qed_int_cau_conf_pi(struct qed_hwfn *p_hwfn,
- struct qed_ptt *p_ptt,
- u16 igu_sb_id,
- u32 pi_index,
- enum qed_coalescing_fsm coalescing_fsm,
- u8 timeset)
-{
- struct cau_pi_entry pi_entry;
- u32 sb_offset, pi_offset;
-
- if (IS_VF(p_hwfn->cdev))
- return;
-
- sb_offset = igu_sb_id * PIS_PER_SB;
- memset(&pi_entry, 0, sizeof(struct cau_pi_entry));
-
- SET_FIELD(pi_entry.prod, CAU_PI_ENTRY_PI_TIMESET, timeset);
- if (coalescing_fsm == QED_COAL_RX_STATE_MACHINE)
- SET_FIELD(pi_entry.prod, CAU_PI_ENTRY_FSM_SEL, 0);
- else
- SET_FIELD(pi_entry.prod, CAU_PI_ENTRY_FSM_SEL, 1);
-
- pi_offset = sb_offset + pi_index;
- if (p_hwfn->hw_init_done) {
- qed_wr(p_hwfn, p_ptt,
- CAU_REG_PI_MEMORY + pi_offset * sizeof(u32),
- *((u32 *)&(pi_entry)));
- } else {
- STORE_RT_REG(p_hwfn,
- CAU_REG_PI_MEMORY_RT_OFFSET + pi_offset,
- *((u32 *)&(pi_entry)));
- }
-}
-
void qed_int_sb_setup(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt, struct qed_sb_info *sb_info)
{
@@ -2577,16 +1412,47 @@
sb_info->igu_sb_id, 0, 0);
}
-/**
- * @brief qed_get_igu_sb_id - given a sw sb_id return the
- * igu_sb_id
- *
- * @param p_hwfn
- * @param sb_id
- *
- * @return u16
- */
-static u16 qed_get_igu_sb_id(struct qed_hwfn *p_hwfn, u16 sb_id)
+struct qed_igu_block *qed_get_igu_free_sb(struct qed_hwfn *p_hwfn, bool b_is_pf)
+{
+ struct qed_igu_block *p_block;
+ u16 igu_id;
+
+ for (igu_id = 0; igu_id < QED_MAPPING_MEMORY_SIZE(p_hwfn->cdev);
+ igu_id++) {
+ p_block = &p_hwfn->hw_info.p_igu_info->entry[igu_id];
+
+ if (!(p_block->status & QED_IGU_STATUS_VALID) ||
+ !(p_block->status & QED_IGU_STATUS_FREE))
+ continue;
+
+ if (!!(p_block->status & QED_IGU_STATUS_PF) == b_is_pf)
+ return p_block;
+ }
+
+ return NULL;
+}
+
+static u16 qed_get_pf_igu_sb_id(struct qed_hwfn *p_hwfn, u16 vector_id)
+{
+ struct qed_igu_block *p_block;
+ u16 igu_id;
+
+ for (igu_id = 0; igu_id < QED_MAPPING_MEMORY_SIZE(p_hwfn->cdev);
+ igu_id++) {
+ p_block = &p_hwfn->hw_info.p_igu_info->entry[igu_id];
+
+ if (!(p_block->status & QED_IGU_STATUS_VALID) ||
+ !p_block->is_pf ||
+ p_block->vector_number != vector_id)
+ continue;
+
+ return igu_id;
+ }
+
+ return QED_SB_INVALID_IDX;
+}
+
+u16 qed_get_igu_sb_id(struct qed_hwfn *p_hwfn, u16 sb_id)
{
u16 igu_sb_id;
@@ -2594,7 +1460,7 @@
if (sb_id == QED_SP_SB_ID)
igu_sb_id = p_hwfn->hw_info.p_igu_info->igu_dsb_id;
else if (IS_PF(p_hwfn->cdev))
- igu_sb_id = sb_id + p_hwfn->hw_info.p_igu_info->igu_base_sb;
+ igu_sb_id = qed_get_pf_igu_sb_id(p_hwfn, sb_id + 1);
else
igu_sb_id = qed_vf_get_igu_sb_id(p_hwfn, sb_id);
@@ -2619,8 +1485,19 @@
sb_info->igu_sb_id = qed_get_igu_sb_id(p_hwfn, sb_id);
if (sb_id != QED_SP_SB_ID) {
- p_hwfn->sbs_info[sb_id] = sb_info;
- p_hwfn->num_sbs++;
+ if (IS_PF(p_hwfn->cdev)) {
+ struct qed_igu_info *p_info;
+ struct qed_igu_block *p_block;
+
+ p_info = p_hwfn->hw_info.p_igu_info;
+ p_block = &p_info->entry[sb_info->igu_sb_id];
+
+ p_block->sb_info = sb_info;
+ p_block->status &= ~QED_IGU_STATUS_FREE;
+ p_info->usage.free_cnt--;
+ } else {
+ qed_vf_set_sb_info(p_hwfn, sb_id, sb_info);
+ }
}
sb_info->cdev = p_hwfn->cdev;
@@ -2649,20 +1526,35 @@
int qed_int_sb_release(struct qed_hwfn *p_hwfn,
struct qed_sb_info *sb_info, u16 sb_id)
{
- if (sb_id == QED_SP_SB_ID) {
- DP_ERR(p_hwfn, "Do Not free sp sb using this function");
- return -EINVAL;
- }
+ struct qed_igu_block *p_block;
+ struct qed_igu_info *p_info;
+
+ if (!sb_info)
+ return 0;
/* zero status block and ack counter */
sb_info->sb_ack = 0;
memset(sb_info->sb_virt, 0, sizeof(*sb_info->sb_virt));
- if (p_hwfn->sbs_info[sb_id] != NULL) {
- p_hwfn->sbs_info[sb_id] = NULL;
- p_hwfn->num_sbs--;
+ if (IS_VF(p_hwfn->cdev)) {
+ qed_vf_set_sb_info(p_hwfn, sb_id, NULL);
+ return 0;
}
+ p_info = p_hwfn->hw_info.p_igu_info;
+ p_block = &p_info->entry[sb_info->igu_sb_id];
+
+ /* Vector 0 is reserved to Default SB */
+ if (!p_block->vector_number) {
+ DP_ERR(p_hwfn, "Do Not free sp sb using this function");
+ return -EINVAL;
+ }
+
+ /* Lose reference to client's SB info, and fix counters */
+ p_block->sb_info = NULL;
+ p_block->status |= QED_IGU_STATUS_FREE;
+ p_info->usage.free_cnt++;
+
return 0;
}
@@ -2679,6 +1571,7 @@
p_sb->sb_info.sb_virt,
p_sb->sb_info.sb_phys);
kfree(p_sb);
+ p_hwfn->p_sp_sb = NULL;
}
static int qed_int_sp_sb_alloc(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
@@ -2780,10 +1673,9 @@
qed_wr(p_hwfn, p_ptt, IGU_REG_PF_CONFIGURATION, igu_pf_conf);
}
-int qed_int_igu_enable(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
- enum qed_int_mode int_mode)
+static void qed_int_igu_enable_attn(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt)
{
- int rc = 0;
/* Configure AEU signal change to produce attentions */
qed_wr(p_hwfn, p_ptt, IGU_REG_ATTENTION_ENABLE, 0);
@@ -2796,6 +1688,16 @@
/* Unmask AEU signals toward IGU */
qed_wr(p_hwfn, p_ptt, MISC_REG_AEU_MASK_ATTN_IGU, 0xff);
+}
+
+int
+qed_int_igu_enable(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt, enum qed_int_mode int_mode)
+{
+ int rc = 0;
+
+ qed_int_igu_enable_attn(p_hwfn, p_ptt);
+
if ((int_mode != QED_INT_MODE_INTA) || IS_LEAD_HWFN(p_hwfn)) {
rc = qed_slowpath_irq_req(p_hwfn);
if (rc) {
@@ -2824,10 +1726,11 @@
#define IGU_CLEANUP_SLEEP_LENGTH (1000)
static void qed_int_igu_cleanup_sb(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
- u32 sb_id, bool cleanup_set, u16 opaque_fid)
+ u16 igu_sb_id,
+ bool cleanup_set, u16 opaque_fid)
{
u32 cmd_ctrl = 0, val = 0, sb_bit = 0, sb_bit_addr = 0, data = 0;
- u32 pxp_addr = IGU_CMD_INT_ACK_BASE + sb_id;
+ u32 pxp_addr = IGU_CMD_INT_ACK_BASE + igu_sb_id;
u32 sleep_cnt = IGU_CLEANUP_SLEEP_LENGTH;
/* Set the data field */
@@ -2850,8 +1753,8 @@
mmiowb();
/* calculate where to read the status bit from */
- sb_bit = 1 << (sb_id % 32);
- sb_bit_addr = sb_id / 32 * sizeof(u32);
+ sb_bit = 1 << (igu_sb_id % 32);
+ sb_bit_addr = igu_sb_id / 32 * sizeof(u32);
sb_bit_addr += IGU_REG_CLEANUP_STATUS_0;
@@ -2868,29 +1771,38 @@
if (!sleep_cnt)
DP_NOTICE(p_hwfn,
"Timeout waiting for clear status 0x%08x [for sb %d]\n",
- val, sb_id);
+ val, igu_sb_id);
}
void qed_int_igu_init_pure_rt_single(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
- u32 sb_id, u16 opaque, bool b_set)
+ u16 igu_sb_id, u16 opaque, bool b_set)
{
+ struct qed_igu_block *p_block;
int pi, i;
+ p_block = &p_hwfn->hw_info.p_igu_info->entry[igu_sb_id];
+ DP_VERBOSE(p_hwfn, NETIF_MSG_INTR,
+ "Cleaning SB [%04x]: func_id= %d is_pf = %d vector_num = 0x%0x\n",
+ igu_sb_id,
+ p_block->function_id,
+ p_block->is_pf, p_block->vector_number);
+
/* Set */
if (b_set)
- qed_int_igu_cleanup_sb(p_hwfn, p_ptt, sb_id, 1, opaque);
+ qed_int_igu_cleanup_sb(p_hwfn, p_ptt, igu_sb_id, 1, opaque);
/* Clear */
- qed_int_igu_cleanup_sb(p_hwfn, p_ptt, sb_id, 0, opaque);
+ qed_int_igu_cleanup_sb(p_hwfn, p_ptt, igu_sb_id, 0, opaque);
/* Wait for the IGU SB to cleanup */
for (i = 0; i < IGU_CLEANUP_SLEEP_LENGTH; i++) {
u32 val;
val = qed_rd(p_hwfn, p_ptt,
- IGU_REG_WRITE_DONE_PENDING + ((sb_id / 32) * 4));
- if (val & (1 << (sb_id % 32)))
+ IGU_REG_WRITE_DONE_PENDING +
+ ((igu_sb_id / 32) * 4));
+ if (val & BIT((igu_sb_id % 32)))
usleep_range(10, 20);
else
break;
@@ -2898,84 +1810,205 @@
if (i == IGU_CLEANUP_SLEEP_LENGTH)
DP_NOTICE(p_hwfn,
"Failed SB[0x%08x] still appearing in WRITE_DONE_PENDING\n",
- sb_id);
+ igu_sb_id);
/* Clear the CAU for the SB */
for (pi = 0; pi < 12; pi++)
qed_wr(p_hwfn, p_ptt,
- CAU_REG_PI_MEMORY + (sb_id * 12 + pi) * 4, 0);
+ CAU_REG_PI_MEMORY + (igu_sb_id * 12 + pi) * 4, 0);
}
void qed_int_igu_init_pure_rt(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
bool b_set, bool b_slowpath)
{
- u32 igu_base_sb = p_hwfn->hw_info.p_igu_info->igu_base_sb;
- u32 igu_sb_cnt = p_hwfn->hw_info.p_igu_info->igu_sb_cnt;
- u32 sb_id = 0, val = 0;
+ struct qed_igu_info *p_info = p_hwfn->hw_info.p_igu_info;
+ struct qed_igu_block *p_block;
+ u16 igu_sb_id = 0;
+ u32 val = 0;
val = qed_rd(p_hwfn, p_ptt, IGU_REG_BLOCK_CONFIGURATION);
val |= IGU_REG_BLOCK_CONFIGURATION_VF_CLEANUP_EN;
val &= ~IGU_REG_BLOCK_CONFIGURATION_PXP_TPH_INTERFACE_EN;
qed_wr(p_hwfn, p_ptt, IGU_REG_BLOCK_CONFIGURATION, val);
- DP_VERBOSE(p_hwfn, NETIF_MSG_INTR,
- "IGU cleaning SBs [%d,...,%d]\n",
- igu_base_sb, igu_base_sb + igu_sb_cnt - 1);
+ for (igu_sb_id = 0;
+ igu_sb_id < QED_MAPPING_MEMORY_SIZE(p_hwfn->cdev); igu_sb_id++) {
+ p_block = &p_info->entry[igu_sb_id];
- for (sb_id = igu_base_sb; sb_id < igu_base_sb + igu_sb_cnt; sb_id++)
- qed_int_igu_init_pure_rt_single(p_hwfn, p_ptt, sb_id,
+ if (!(p_block->status & QED_IGU_STATUS_VALID) ||
+ !p_block->is_pf ||
+ (p_block->status & QED_IGU_STATUS_DSB))
+ continue;
+
+ qed_int_igu_init_pure_rt_single(p_hwfn, p_ptt, igu_sb_id,
p_hwfn->hw_info.opaque_fid,
b_set);
+ }
- if (!b_slowpath)
- return;
-
- sb_id = p_hwfn->hw_info.p_igu_info->igu_dsb_id;
- DP_VERBOSE(p_hwfn, NETIF_MSG_INTR,
- "IGU cleaning slowpath SB [%d]\n", sb_id);
- qed_int_igu_init_pure_rt_single(p_hwfn, p_ptt, sb_id,
- p_hwfn->hw_info.opaque_fid, b_set);
+ if (b_slowpath)
+ qed_int_igu_init_pure_rt_single(p_hwfn, p_ptt,
+ p_info->igu_dsb_id,
+ p_hwfn->hw_info.opaque_fid,
+ b_set);
}
-static u32 qed_int_igu_read_cam_block(struct qed_hwfn *p_hwfn,
- struct qed_ptt *p_ptt, u16 sb_id)
+int qed_int_igu_reset_cam(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
+{
+ struct qed_igu_info *p_info = p_hwfn->hw_info.p_igu_info;
+ struct qed_igu_block *p_block;
+ int pf_sbs, vf_sbs;
+ u16 igu_sb_id;
+ u32 val, rval;
+
+ if (!RESC_NUM(p_hwfn, QED_SB)) {
+ p_info->b_allow_pf_vf_change = false;
+ } else {
+ /* Use the numbers the MFW have provided -
+ * don't forget MFW accounts for the default SB as well.
+ */
+ p_info->b_allow_pf_vf_change = true;
+
+ if (p_info->usage.cnt != RESC_NUM(p_hwfn, QED_SB) - 1) {
+ DP_INFO(p_hwfn,
+ "MFW notifies of 0x%04x PF SBs; IGU indicates of only 0x%04x\n",
+ RESC_NUM(p_hwfn, QED_SB) - 1,
+ p_info->usage.cnt);
+ p_info->usage.cnt = RESC_NUM(p_hwfn, QED_SB) - 1;
+ }
+
+ if (IS_PF_SRIOV(p_hwfn)) {
+ u16 vfs = p_hwfn->cdev->p_iov_info->total_vfs;
+
+ if (vfs != p_info->usage.iov_cnt)
+ DP_VERBOSE(p_hwfn,
+ NETIF_MSG_INTR,
+ "0x%04x VF SBs in IGU CAM != PCI configuration 0x%04x\n",
+ p_info->usage.iov_cnt, vfs);
+
+ /* At this point we know how many SBs we have totally
+ * in IGU + number of PF SBs. So we can validate that
+ * we'd have sufficient for VF.
+ */
+ if (vfs > p_info->usage.free_cnt +
+ p_info->usage.free_cnt_iov - p_info->usage.cnt) {
+ DP_NOTICE(p_hwfn,
+ "Not enough SBs for VFs - 0x%04x SBs, from which %04x PFs and %04x are required\n",
+ p_info->usage.free_cnt +
+ p_info->usage.free_cnt_iov,
+ p_info->usage.cnt, vfs);
+ return -EINVAL;
+ }
+
+ /* Currently cap the number of VFs SBs by the
+ * number of VFs.
+ */
+ p_info->usage.iov_cnt = vfs;
+ }
+ }
+
+ /* Mark all SBs as free, now in the right PF/VFs division */
+ p_info->usage.free_cnt = p_info->usage.cnt;
+ p_info->usage.free_cnt_iov = p_info->usage.iov_cnt;
+ p_info->usage.orig = p_info->usage.cnt;
+ p_info->usage.iov_orig = p_info->usage.iov_cnt;
+
+ /* We now proceed to re-configure the IGU cam to reflect the initial
+ * configuration. We can start with the Default SB.
+ */
+ pf_sbs = p_info->usage.cnt;
+ vf_sbs = p_info->usage.iov_cnt;
+
+ for (igu_sb_id = p_info->igu_dsb_id;
+ igu_sb_id < QED_MAPPING_MEMORY_SIZE(p_hwfn->cdev); igu_sb_id++) {
+ p_block = &p_info->entry[igu_sb_id];
+ val = 0;
+
+ if (!(p_block->status & QED_IGU_STATUS_VALID))
+ continue;
+
+ if (p_block->status & QED_IGU_STATUS_DSB) {
+ p_block->function_id = p_hwfn->rel_pf_id;
+ p_block->is_pf = 1;
+ p_block->vector_number = 0;
+ p_block->status = QED_IGU_STATUS_VALID |
+ QED_IGU_STATUS_PF |
+ QED_IGU_STATUS_DSB;
+ } else if (pf_sbs) {
+ pf_sbs--;
+ p_block->function_id = p_hwfn->rel_pf_id;
+ p_block->is_pf = 1;
+ p_block->vector_number = p_info->usage.cnt - pf_sbs;
+ p_block->status = QED_IGU_STATUS_VALID |
+ QED_IGU_STATUS_PF |
+ QED_IGU_STATUS_FREE;
+ } else if (vf_sbs) {
+ p_block->function_id =
+ p_hwfn->cdev->p_iov_info->first_vf_in_pf +
+ p_info->usage.iov_cnt - vf_sbs;
+ p_block->is_pf = 0;
+ p_block->vector_number = 0;
+ p_block->status = QED_IGU_STATUS_VALID |
+ QED_IGU_STATUS_FREE;
+ vf_sbs--;
+ } else {
+ p_block->function_id = 0;
+ p_block->is_pf = 0;
+ p_block->vector_number = 0;
+ }
+
+ SET_FIELD(val, IGU_MAPPING_LINE_FUNCTION_NUMBER,
+ p_block->function_id);
+ SET_FIELD(val, IGU_MAPPING_LINE_PF_VALID, p_block->is_pf);
+ SET_FIELD(val, IGU_MAPPING_LINE_VECTOR_NUMBER,
+ p_block->vector_number);
+
+ /* VF entries would be enabled when VF is initializaed */
+ SET_FIELD(val, IGU_MAPPING_LINE_VALID, p_block->is_pf);
+
+ rval = qed_rd(p_hwfn, p_ptt,
+ IGU_REG_MAPPING_MEMORY + sizeof(u32) * igu_sb_id);
+
+ if (rval != val) {
+ qed_wr(p_hwfn, p_ptt,
+ IGU_REG_MAPPING_MEMORY +
+ sizeof(u32) * igu_sb_id, val);
+
+ DP_VERBOSE(p_hwfn,
+ NETIF_MSG_INTR,
+ "IGU reset: [SB 0x%04x] func_id = %d is_pf = %d vector_num = 0x%x [%08x -> %08x]\n",
+ igu_sb_id,
+ p_block->function_id,
+ p_block->is_pf,
+ p_block->vector_number, rval, val);
+ }
+ }
+
+ return 0;
+}
+
+static void qed_int_igu_read_cam_block(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt, u16 igu_sb_id)
{
u32 val = qed_rd(p_hwfn, p_ptt,
- IGU_REG_MAPPING_MEMORY + sizeof(u32) * sb_id);
+ IGU_REG_MAPPING_MEMORY + sizeof(u32) * igu_sb_id);
struct qed_igu_block *p_block;
- p_block = &p_hwfn->hw_info.p_igu_info->igu_map.igu_blocks[sb_id];
-
- /* stop scanning when hit first invalid PF entry */
- if (!GET_FIELD(val, IGU_MAPPING_LINE_VALID) &&
- GET_FIELD(val, IGU_MAPPING_LINE_PF_VALID))
- goto out;
+ p_block = &p_hwfn->hw_info.p_igu_info->entry[igu_sb_id];
/* Fill the block information */
- p_block->status = QED_IGU_STATUS_VALID;
- p_block->function_id = GET_FIELD(val,
- IGU_MAPPING_LINE_FUNCTION_NUMBER);
- p_block->is_pf = GET_FIELD(val, IGU_MAPPING_LINE_PF_VALID);
- p_block->vector_number = GET_FIELD(val,
- IGU_MAPPING_LINE_VECTOR_NUMBER);
-
- DP_VERBOSE(p_hwfn, NETIF_MSG_INTR,
- "IGU_BLOCK: [SB 0x%04x, Value in CAM 0x%08x] func_id = %d is_pf = %d vector_num = 0x%x\n",
- sb_id, val, p_block->function_id,
- p_block->is_pf, p_block->vector_number);
-
-out:
- return val;
+ p_block->function_id = GET_FIELD(val, IGU_MAPPING_LINE_FUNCTION_NUMBER);
+ p_block->is_pf = GET_FIELD(val, IGU_MAPPING_LINE_PF_VALID);
+ p_block->vector_number = GET_FIELD(val, IGU_MAPPING_LINE_VECTOR_NUMBER);
+ p_block->igu_sb_id = igu_sb_id;
}
int qed_int_igu_read_cam(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
{
struct qed_igu_info *p_igu_info;
- u32 val, min_vf = 0, max_vf = 0;
- u16 sb_id, last_iov_sb_id = 0;
- struct qed_igu_block *blk;
- u16 prev_sb_id = 0xFF;
+ struct qed_igu_block *p_block;
+ u32 min_vf = 0, max_vf = 0;
+ u16 igu_sb_id;
p_hwfn->hw_info.p_igu_info = kzalloc(sizeof(*p_igu_info), GFP_KERNEL);
if (!p_hwfn->hw_info.p_igu_info)
@@ -2983,12 +2016,10 @@
p_igu_info = p_hwfn->hw_info.p_igu_info;
- /* Initialize base sb / sb cnt for PFs and VFs */
- p_igu_info->igu_base_sb = 0xffff;
- p_igu_info->igu_sb_cnt = 0;
- p_igu_info->igu_dsb_id = 0xffff;
- p_igu_info->igu_base_sb_iov = 0xffff;
+ /* Distinguish between existent and non-existent default SB */
+ p_igu_info->igu_dsb_id = QED_SB_INVALID_IDX;
+ /* Find the range of VF ids whose SB belong to this PF */
if (p_hwfn->cdev->p_iov_info) {
struct qed_hw_sriov_info *p_iov = p_hwfn->cdev->p_iov_info;
@@ -2996,113 +2027,69 @@
max_vf = p_iov->first_vf_in_pf + p_iov->total_vfs;
}
- for (sb_id = 0; sb_id < QED_MAPPING_MEMORY_SIZE(p_hwfn->cdev);
- sb_id++) {
- blk = &p_igu_info->igu_map.igu_blocks[sb_id];
+ for (igu_sb_id = 0;
+ igu_sb_id < QED_MAPPING_MEMORY_SIZE(p_hwfn->cdev); igu_sb_id++) {
+ /* Read current entry; Notice it might not belong to this PF */
+ qed_int_igu_read_cam_block(p_hwfn, p_ptt, igu_sb_id);
+ p_block = &p_igu_info->entry[igu_sb_id];
- val = qed_int_igu_read_cam_block(p_hwfn, p_ptt, sb_id);
+ if ((p_block->is_pf) &&
+ (p_block->function_id == p_hwfn->rel_pf_id)) {
+ p_block->status = QED_IGU_STATUS_PF |
+ QED_IGU_STATUS_VALID |
+ QED_IGU_STATUS_FREE;
- /* stop scanning when hit first invalid PF entry */
- if (!GET_FIELD(val, IGU_MAPPING_LINE_VALID) &&
- GET_FIELD(val, IGU_MAPPING_LINE_PF_VALID))
- break;
+ if (p_igu_info->igu_dsb_id != QED_SB_INVALID_IDX)
+ p_igu_info->usage.cnt++;
+ } else if (!(p_block->is_pf) &&
+ (p_block->function_id >= min_vf) &&
+ (p_block->function_id < max_vf)) {
+ /* Available for VFs of this PF */
+ p_block->status = QED_IGU_STATUS_VALID |
+ QED_IGU_STATUS_FREE;
- if (blk->is_pf) {
- if (blk->function_id == p_hwfn->rel_pf_id) {
- blk->status |= QED_IGU_STATUS_PF;
+ if (p_igu_info->igu_dsb_id != QED_SB_INVALID_IDX)
+ p_igu_info->usage.iov_cnt++;
+ }
- if (blk->vector_number == 0) {
- if (p_igu_info->igu_dsb_id == 0xffff)
- p_igu_info->igu_dsb_id = sb_id;
- } else {
- if (p_igu_info->igu_base_sb ==
- 0xffff) {
- p_igu_info->igu_base_sb = sb_id;
- } else if (prev_sb_id != sb_id - 1) {
- DP_NOTICE(p_hwfn->cdev,
- "consecutive igu vectors for HWFN %x broken",
- p_hwfn->rel_pf_id);
- break;
- }
- prev_sb_id = sb_id;
- /* we don't count the default */
- (p_igu_info->igu_sb_cnt)++;
- }
- }
- } else {
- if ((blk->function_id >= min_vf) &&
- (blk->function_id < max_vf)) {
- /* Available for VFs of this PF */
- if (p_igu_info->igu_base_sb_iov == 0xffff) {
- p_igu_info->igu_base_sb_iov = sb_id;
- } else if (last_iov_sb_id != sb_id - 1) {
- if (!val) {
- DP_VERBOSE(p_hwfn->cdev,
- NETIF_MSG_INTR,
- "First uninitialized IGU CAM entry at index 0x%04x\n",
- sb_id);
- } else {
- DP_NOTICE(p_hwfn->cdev,
- "Consecutive igu vectors for HWFN %x vfs is broken [jumps from %04x to %04x]\n",
- p_hwfn->rel_pf_id,
- last_iov_sb_id,
- sb_id); }
- break;
- }
- blk->status |= QED_IGU_STATUS_FREE;
- p_hwfn->hw_info.p_igu_info->free_blks++;
- last_iov_sb_id = sb_id;
- }
+ /* Mark the First entry belonging to the PF or its VFs
+ * as the default SB [we'll reset IGU prior to first usage].
+ */
+ if ((p_block->status & QED_IGU_STATUS_VALID) &&
+ (p_igu_info->igu_dsb_id == QED_SB_INVALID_IDX)) {
+ p_igu_info->igu_dsb_id = igu_sb_id;
+ p_block->status |= QED_IGU_STATUS_DSB;
+ }
+
+ /* limit number of prints by having each PF print only its
+ * entries with the exception of PF0 which would print
+ * everything.
+ */
+ if ((p_block->status & QED_IGU_STATUS_VALID) ||
+ (p_hwfn->abs_pf_id == 0)) {
+ DP_VERBOSE(p_hwfn, NETIF_MSG_INTR,
+ "IGU_BLOCK: [SB 0x%04x] func_id = %d is_pf = %d vector_num = 0x%x\n",
+ igu_sb_id, p_block->function_id,
+ p_block->is_pf, p_block->vector_number);
}
}
- /* There's a possibility the igu_sb_cnt_iov doesn't properly reflect
- * the number of VF SBs [especially for first VF on engine, as we can't
- * differentiate between empty entries and its entries].
- * Since we don't really support more SBs than VFs today, prevent any
- * such configuration by sanitizing the number of SBs to equal the
- * number of VFs.
- */
- if (IS_PF_SRIOV(p_hwfn)) {
- u16 total_vfs = p_hwfn->cdev->p_iov_info->total_vfs;
-
- if (total_vfs < p_igu_info->free_blks) {
- DP_VERBOSE(p_hwfn,
- (NETIF_MSG_INTR | QED_MSG_IOV),
- "Limiting number of SBs for IOV - %04x --> %04x\n",
- p_igu_info->free_blks,
- p_hwfn->cdev->p_iov_info->total_vfs);
- p_igu_info->free_blks = total_vfs;
- } else if (total_vfs > p_igu_info->free_blks) {
- DP_NOTICE(p_hwfn,
- "IGU has only %04x SBs for VFs while the device has %04x VFs\n",
- p_igu_info->free_blks, total_vfs);
- return -EINVAL;
- }
- }
- p_igu_info->igu_sb_cnt_iov = p_igu_info->free_blks;
-
- DP_VERBOSE(
- p_hwfn,
- NETIF_MSG_INTR,
- "IGU igu_base_sb=0x%x [IOV 0x%x] igu_sb_cnt=%d [IOV 0x%x] igu_dsb_id=0x%x\n",
- p_igu_info->igu_base_sb,
- p_igu_info->igu_base_sb_iov,
- p_igu_info->igu_sb_cnt,
- p_igu_info->igu_sb_cnt_iov,
- p_igu_info->igu_dsb_id);
-
- if (p_igu_info->igu_base_sb == 0xffff ||
- p_igu_info->igu_dsb_id == 0xffff ||
- p_igu_info->igu_sb_cnt == 0) {
+ if (p_igu_info->igu_dsb_id == QED_SB_INVALID_IDX) {
DP_NOTICE(p_hwfn,
- "IGU CAM returned invalid values igu_base_sb=0x%x igu_sb_cnt=%d igu_dsb_id=0x%x\n",
- p_igu_info->igu_base_sb,
- p_igu_info->igu_sb_cnt,
- p_igu_info->igu_dsb_id);
+ "IGU CAM returned invalid values igu_dsb_id=0x%x\n",
+ p_igu_info->igu_dsb_id);
return -EINVAL;
}
+ /* All non default SB are considered free at this point */
+ p_igu_info->usage.free_cnt = p_igu_info->usage.cnt;
+ p_igu_info->usage.free_cnt_iov = p_igu_info->usage.iov_cnt;
+
+ DP_VERBOSE(p_hwfn, NETIF_MSG_INTR,
+ "igu_dsb_id=0x%x, num Free SBs - PF: %04x VF: %04x [might change after resource allocation]\n",
+ p_igu_info->igu_dsb_id,
+ p_igu_info->usage.cnt, p_igu_info->usage.iov_cnt);
+
return 0;
}
@@ -3157,6 +2144,7 @@
static void qed_int_sp_dpc_free(struct qed_hwfn *p_hwfn)
{
kfree(p_hwfn->sp_dpc);
+ p_hwfn->sp_dpc = NULL;
}
int qed_int_alloc(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
@@ -3198,31 +2186,7 @@
if (!info || !p_sb_cnt_info)
return;
- p_sb_cnt_info->sb_cnt = info->igu_sb_cnt;
- p_sb_cnt_info->sb_iov_cnt = info->igu_sb_cnt_iov;
- p_sb_cnt_info->sb_free_blk = info->free_blks;
-}
-
-u16 qed_int_queue_id_from_sb_id(struct qed_hwfn *p_hwfn, u16 sb_id)
-{
- struct qed_igu_info *p_info = p_hwfn->hw_info.p_igu_info;
-
- /* Determine origin of SB id */
- if ((sb_id >= p_info->igu_base_sb) &&
- (sb_id < p_info->igu_base_sb + p_info->igu_sb_cnt)) {
- return sb_id - p_info->igu_base_sb;
- } else if ((sb_id >= p_info->igu_base_sb_iov) &&
- (sb_id < p_info->igu_base_sb_iov + p_info->igu_sb_cnt_iov)) {
- /* We want the first VF queue to be adjacent to the
- * last PF queue. Since L2 queues can be partial to
- * SBs, we'll use the feature instead.
- */
- return sb_id - p_info->igu_base_sb_iov +
- FEAT_NUM(p_hwfn, QED_PF_L2_QUE);
- } else {
- DP_NOTICE(p_hwfn, "SB %d not in range for function\n", sb_id);
- return 0;
- }
+ memcpy(p_sb_cnt_info, &info->usage, sizeof(*p_sb_cnt_info));
}
void qed_int_disable_post_isr_release(struct qed_dev *cdev)
diff --git a/drivers/net/ethernet/qlogic/qed/qed_int.h b/drivers/net/ethernet/qlogic/qed/qed_int.h
index 0ae0bb4..5199634 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_int.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_int.h
@@ -79,24 +79,6 @@
};
/**
- * @brief qed_int_cau_conf_pi - configure cau for a given
- * status block
- *
- * @param p_hwfn
- * @param p_ptt
- * @param igu_sb_id
- * @param pi_index
- * @param state
- * @param timeset
- */
-void qed_int_cau_conf_pi(struct qed_hwfn *p_hwfn,
- struct qed_ptt *p_ptt,
- u16 igu_sb_id,
- u32 pi_index,
- enum qed_coalescing_fsm coalescing_fsm,
- u8 timeset);
-
-/**
* @brief qed_int_igu_enable_int - enable device interrupts
*
* @param p_hwfn
@@ -217,32 +199,63 @@
#define SB_ALIGNED_SIZE(p_hwfn) \
ALIGNED_TYPE_SIZE(struct status_block, p_hwfn)
+#define QED_SB_INVALID_IDX 0xffff
+
struct qed_igu_block {
- u8 status;
+ u8 status;
#define QED_IGU_STATUS_FREE 0x01
#define QED_IGU_STATUS_VALID 0x02
#define QED_IGU_STATUS_PF 0x04
+#define QED_IGU_STATUS_DSB 0x08
- u8 vector_number;
- u8 function_id;
- u8 is_pf;
-};
+ u8 vector_number;
+ u8 function_id;
+ u8 is_pf;
-struct qed_igu_map {
- struct qed_igu_block igu_blocks[MAX_TOT_SB_PER_PATH];
+ /* Index inside IGU [meant for back reference] */
+ u16 igu_sb_id;
+
+ struct qed_sb_info *sb_info;
};
struct qed_igu_info {
- struct qed_igu_map igu_map;
- u16 igu_dsb_id;
- u16 igu_base_sb;
- u16 igu_base_sb_iov;
- u16 igu_sb_cnt;
- u16 igu_sb_cnt_iov;
- u16 free_blks;
+ struct qed_igu_block entry[MAX_TOT_SB_PER_PATH];
+ u16 igu_dsb_id;
+
+ struct qed_sb_cnt_info usage;
+
+ bool b_allow_pf_vf_change;
};
-/* TODO Names of function may change... */
+/**
+ * @brief - Make sure the IGU CAM reflects the resources provided by MFW
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ */
+int qed_int_igu_reset_cam(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt);
+
+/**
+ * @brief Translate the weakly-defined client sb-id into an IGU sb-id
+ *
+ * @param p_hwfn
+ * @param sb_id - user provided sb_id
+ *
+ * @return an index inside IGU CAM where the SB resides
+ */
+u16 qed_get_igu_sb_id(struct qed_hwfn *p_hwfn, u16 sb_id);
+
+/**
+ * @brief return a pointer to an unused valid SB
+ *
+ * @param p_hwfn
+ * @param b_is_pf - true iff we want a SB belonging to a PF
+ *
+ * @return point to an igu_block, NULL if none is available
+ */
+struct qed_igu_block *qed_get_igu_free_sb(struct qed_hwfn *p_hwfn,
+ bool b_is_pf);
+
void qed_int_igu_init_pure_rt(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
bool b_set,
@@ -321,13 +334,13 @@
*
* @param p_hwfn
* @param p_ptt
- * @param sb_id - igu status block id
+ * @param igu_sb_id - igu status block id
* @param opaque - opaque fid of the sb owner.
* @param b_set - set(1) / clear(0)
*/
void qed_int_igu_init_pure_rt_single(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
- u32 sb_id,
+ u16 igu_sb_id,
u16 opaque,
bool b_set);
@@ -377,16 +390,6 @@
struct qed_ptt *p_ptt);
/**
- * @brief - Returns an Rx queue index appropriate for usage with given SB.
- *
- * @param p_hwfn
- * @param sb_id - absolute index of SB
- *
- * @return index of Rx queue
- */
-u16 qed_int_queue_id_from_sb_id(struct qed_hwfn *p_hwfn, u16 sb_id);
-
-/**
* @brief - Enable Interrupt & Attention for hw function
*
* @param p_hwfn
diff --git a/drivers/net/ethernet/qlogic/qed/qed_iscsi.c b/drivers/net/ethernet/qlogic/qed/qed_iscsi.c
index 339c91d..813c77c 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_iscsi.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_iscsi.c
@@ -44,7 +44,6 @@
#include <linux/slab.h>
#include <linux/stddef.h>
#include <linux/string.h>
-#include <linux/version.h>
#include <linux/workqueue.h>
#include <linux/errno.h>
#include <linux/list.h>
@@ -63,6 +62,22 @@
#include "qed_sriov.h"
#include "qed_reg_addr.h"
+static int
+qed_iscsi_async_event(struct qed_hwfn *p_hwfn,
+ u8 fw_event_code,
+ u16 echo, union event_ring_data *data, u8 fw_return_code)
+{
+ if (p_hwfn->p_iscsi_info->event_cb) {
+ struct qed_iscsi_info *p_iscsi = p_hwfn->p_iscsi_info;
+
+ return p_iscsi->event_cb(p_iscsi->event_context,
+ fw_event_code, data);
+ } else {
+ DP_NOTICE(p_hwfn, "iSCSI async completion is not set\n");
+ return -EINVAL;
+ }
+}
+
struct qed_iscsi_conn {
struct list_head list_entry;
bool free_on_delete;
@@ -186,7 +201,7 @@
DP_ERR(p_hwfn,
"Cannot satisfy CQ amount. Queues requested %d, CQs available %d. Aborting function start\n",
p_params->num_queues,
- p_hwfn->hw_info.resc_num[QED_ISCSI_CQ]);
+ p_hwfn->hw_info.feat_num[QED_ISCSI_CQ]);
return -EINVAL;
}
@@ -221,7 +236,7 @@
p_queue->cmdq_sb_pi = p_params->gl_cmd_pi;
for (i = 0; i < p_params->num_queues; i++) {
- val = p_hwfn->sbs_info[i]->igu_sb_id;
+ val = qed_get_igu_sb_id(p_hwfn, i);
p_queue->cq_cmdq_sb_num_arr[i] = cpu_to_le16(val);
}
@@ -266,6 +281,9 @@
p_hwfn->p_iscsi_info->event_context = event_context;
p_hwfn->p_iscsi_info->event_cb = async_event_cb;
+ qed_spq_register_async_cb(p_hwfn, PROTOCOLID_ISCSI,
+ qed_iscsi_async_event);
+
return qed_spq_post(p_hwfn, p_ent, NULL);
}
@@ -375,7 +393,6 @@
p_tcp->ss_thresh = cpu_to_le32(p_conn->ss_thresh);
p_tcp->srtt = cpu_to_le16(p_conn->srtt);
p_tcp->rtt_var = cpu_to_le16(p_conn->rtt_var);
- p_tcp->ts_time = cpu_to_le32(p_conn->ts_time);
p_tcp->ts_recent = cpu_to_le32(p_conn->ts_recent);
p_tcp->ts_recent_age = cpu_to_le32(p_conn->ts_recent_age);
p_tcp->total_rt = cpu_to_le32(p_conn->total_rt);
@@ -400,8 +417,6 @@
p_tcp->mss = cpu_to_le16(p_conn->mss);
p_tcp->snd_wnd_scale = p_conn->snd_wnd_scale;
p_tcp->rcv_wnd_scale = p_conn->rcv_wnd_scale;
- dval = p_conn->ts_ticks_per_second;
- p_tcp->ts_ticks_per_second = cpu_to_le32(dval);
wval = p_conn->da_timeout_value;
p_tcp->da_timeout_value = cpu_to_le16(wval);
p_tcp->ack_frequency = p_conn->ack_frequency;
@@ -492,6 +507,54 @@
return qed_spq_post(p_hwfn, p_ent, NULL);
}
+static int
+qed_sp_iscsi_mac_update(struct qed_hwfn *p_hwfn,
+ struct qed_iscsi_conn *p_conn,
+ enum spq_mode comp_mode,
+ struct qed_spq_comp_cb *p_comp_addr)
+{
+ struct iscsi_spe_conn_mac_update *p_ramrod = NULL;
+ struct qed_spq_entry *p_ent = NULL;
+ struct qed_sp_init_data init_data;
+ int rc = -EINVAL;
+ u8 ucval;
+
+ /* Get SPQ entry */
+ memset(&init_data, 0, sizeof(init_data));
+ init_data.cid = p_conn->icid;
+ init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
+ init_data.comp_mode = comp_mode;
+ init_data.p_comp_data = p_comp_addr;
+
+ rc = qed_sp_init_request(p_hwfn, &p_ent,
+ ISCSI_RAMROD_CMD_ID_MAC_UPDATE,
+ PROTOCOLID_ISCSI, &init_data);
+ if (rc)
+ return rc;
+
+ p_ramrod = &p_ent->ramrod.iscsi_conn_mac_update;
+ p_ramrod->hdr.op_code = ISCSI_RAMROD_CMD_ID_MAC_UPDATE;
+ SET_FIELD(p_ramrod->hdr.flags,
+ ISCSI_SLOW_PATH_HDR_LAYER_CODE, p_conn->layer_code);
+
+ p_ramrod->conn_id = cpu_to_le16(p_conn->conn_id);
+ p_ramrod->fw_cid = cpu_to_le32(p_conn->icid);
+ ucval = p_conn->remote_mac[1];
+ ((u8 *)(&p_ramrod->remote_mac_addr_hi))[0] = ucval;
+ ucval = p_conn->remote_mac[0];
+ ((u8 *)(&p_ramrod->remote_mac_addr_hi))[1] = ucval;
+ ucval = p_conn->remote_mac[3];
+ ((u8 *)(&p_ramrod->remote_mac_addr_mid))[0] = ucval;
+ ucval = p_conn->remote_mac[2];
+ ((u8 *)(&p_ramrod->remote_mac_addr_mid))[1] = ucval;
+ ucval = p_conn->remote_mac[5];
+ ((u8 *)(&p_ramrod->remote_mac_addr_lo))[0] = ucval;
+ ucval = p_conn->remote_mac[4];
+ ((u8 *)(&p_ramrod->remote_mac_addr_lo))[1] = ucval;
+
+ return qed_spq_post(p_hwfn, p_ent, NULL);
+}
+
static int qed_sp_iscsi_conn_terminate(struct qed_hwfn *p_hwfn,
struct qed_iscsi_conn *p_conn,
enum spq_mode comp_mode,
@@ -587,7 +650,10 @@
p_ramrod = &p_ent->ramrod.iscsi_destroy;
p_ramrod->hdr.op_code = ISCSI_RAMROD_CMD_ID_DESTROY_FUNC;
- return qed_spq_post(p_hwfn, p_ent, NULL);
+ rc = qed_spq_post(p_hwfn, p_ent, NULL);
+
+ qed_spq_unregister_async_cb(p_hwfn, PROTOCOLID_ISCSI);
+ return rc;
}
static void __iomem *qed_iscsi_get_db_addr(struct qed_hwfn *p_hwfn, u32 cid)
@@ -708,7 +774,7 @@
QED_CHAIN_USE_TO_CONSUME_PRODUCE,
QED_CHAIN_MODE_PBL,
QED_CHAIN_CNT_TYPE_U16,
- r2tq_num_elements, 0x80, &p_conn->r2tq);
+ r2tq_num_elements, 0x80, &p_conn->r2tq, NULL);
if (rc)
goto nomem_r2tq;
@@ -719,7 +785,7 @@
QED_CHAIN_MODE_PBL,
QED_CHAIN_CNT_TYPE_U16,
uhq_num_elements,
- sizeof(struct iscsi_uhqe), &p_conn->uhq);
+ sizeof(struct iscsi_uhqe), &p_conn->uhq, NULL);
if (rc)
goto nomem_uhq;
@@ -729,7 +795,7 @@
QED_CHAIN_MODE_PBL,
QED_CHAIN_CNT_TYPE_U16,
xhq_num_elements,
- sizeof(struct iscsi_xhqe), &p_conn->xhq);
+ sizeof(struct iscsi_xhqe), &p_conn->xhq, NULL);
if (rc)
goto nomem;
@@ -822,29 +888,32 @@
kfree(p_conn);
}
-struct qed_iscsi_info *qed_iscsi_alloc(struct qed_hwfn *p_hwfn)
+int qed_iscsi_alloc(struct qed_hwfn *p_hwfn)
{
struct qed_iscsi_info *p_iscsi_info;
p_iscsi_info = kzalloc(sizeof(*p_iscsi_info), GFP_KERNEL);
if (!p_iscsi_info)
- return NULL;
+ return -ENOMEM;
INIT_LIST_HEAD(&p_iscsi_info->free_list);
- return p_iscsi_info;
+
+ p_hwfn->p_iscsi_info = p_iscsi_info;
+ return 0;
}
-void qed_iscsi_setup(struct qed_hwfn *p_hwfn,
- struct qed_iscsi_info *p_iscsi_info)
+void qed_iscsi_setup(struct qed_hwfn *p_hwfn)
{
- spin_lock_init(&p_iscsi_info->lock);
+ spin_lock_init(&p_hwfn->p_iscsi_info->lock);
}
-void qed_iscsi_free(struct qed_hwfn *p_hwfn,
- struct qed_iscsi_info *p_iscsi_info)
+void qed_iscsi_free(struct qed_hwfn *p_hwfn)
{
struct qed_iscsi_conn *p_conn = NULL;
+ if (!p_hwfn->p_iscsi_info)
+ return;
+
while (!list_empty(&p_hwfn->p_iscsi_info->free_list)) {
p_conn = list_first_entry(&p_hwfn->p_iscsi_info->free_list,
struct qed_iscsi_conn, list_entry);
@@ -854,7 +923,8 @@
}
}
- kfree(p_iscsi_info);
+ kfree(p_hwfn->p_iscsi_info);
+ p_hwfn->p_iscsi_info = NULL;
}
static void _qed_iscsi_get_tstats(struct qed_hwfn *p_hwfn,
@@ -1324,6 +1394,23 @@
return qed_iscsi_get_stats(QED_LEADING_HWFN(cdev), stats);
}
+static int qed_iscsi_change_mac(struct qed_dev *cdev,
+ u32 handle, const u8 *mac)
+{
+ struct qed_hash_iscsi_con *hash_con;
+
+ hash_con = qed_iscsi_get_hash(cdev, handle);
+ if (!hash_con) {
+ DP_NOTICE(cdev, "Failed to find connection for handle %d\n",
+ handle);
+ return -EINVAL;
+ }
+
+ return qed_sp_iscsi_mac_update(QED_LEADING_HWFN(cdev),
+ hash_con->con,
+ QED_SPQ_MODE_EBLOCK, NULL);
+}
+
void qed_get_protocol_stats_iscsi(struct qed_dev *cdev,
struct qed_mcp_iscsi_stats *stats)
{
@@ -1358,6 +1445,7 @@
.destroy_conn = &qed_iscsi_destroy_conn,
.clear_sq = &qed_iscsi_clear_conn_sq,
.get_stats = &qed_iscsi_stats,
+ .change_mac = &qed_iscsi_change_mac,
};
const struct qed_iscsi_ops *qed_get_iscsi_ops(void)
diff --git a/drivers/net/ethernet/qlogic/qed/qed_iscsi.h b/drivers/net/ethernet/qlogic/qed/qed_iscsi.h
index ae98f77..225c75b 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_iscsi.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_iscsi.h
@@ -57,13 +57,11 @@
#endif
#if IS_ENABLED(CONFIG_QED_ISCSI)
-struct qed_iscsi_info *qed_iscsi_alloc(struct qed_hwfn *p_hwfn);
+int qed_iscsi_alloc(struct qed_hwfn *p_hwfn);
-void qed_iscsi_setup(struct qed_hwfn *p_hwfn,
- struct qed_iscsi_info *p_iscsi_info);
+void qed_iscsi_setup(struct qed_hwfn *p_hwfn);
-void qed_iscsi_free(struct qed_hwfn *p_hwfn,
- struct qed_iscsi_info *p_iscsi_info);
+void qed_iscsi_free(struct qed_hwfn *p_hwfn);
/**
* @brief - Fills provided statistics struct with statistics.
@@ -74,12 +72,15 @@
void qed_get_protocol_stats_iscsi(struct qed_dev *cdev,
struct qed_mcp_iscsi_stats *stats);
#else /* IS_ENABLED(CONFIG_QED_ISCSI) */
-static inline struct qed_iscsi_info *qed_iscsi_alloc(
- struct qed_hwfn *p_hwfn) { return NULL; }
-static inline void qed_iscsi_setup(struct qed_hwfn *p_hwfn,
- struct qed_iscsi_info *p_iscsi_info) {}
-static inline void qed_iscsi_free(struct qed_hwfn *p_hwfn,
- struct qed_iscsi_info *p_iscsi_info) {}
+static inline int qed_iscsi_alloc(struct qed_hwfn *p_hwfn)
+{
+ return -EINVAL;
+}
+
+static inline void qed_iscsi_setup(struct qed_hwfn *p_hwfn) {}
+
+static inline void qed_iscsi_free(struct qed_hwfn *p_hwfn) {}
+
static inline void
qed_get_protocol_stats_iscsi(struct qed_dev *cdev,
struct qed_mcp_iscsi_stats *stats) {}
diff --git a/drivers/net/ethernet/qlogic/qed/qed_l2.c b/drivers/net/ethernet/qlogic/qed/qed_l2.c
index 746fed4..e57699b 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_l2.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_l2.c
@@ -43,7 +43,6 @@
#include <linux/slab.h>
#include <linux/stddef.h>
#include <linux/string.h>
-#include <linux/version.h>
#include <linux/workqueue.h>
#include <linux/bitops.h>
#include <linux/bug.h>
@@ -66,26 +65,162 @@
#define QED_MAX_SGES_NUM 16
#define CRC32_POLY 0x1edc6f41
+struct qed_l2_info {
+ u32 queues;
+ unsigned long **pp_qid_usage;
+
+ /* The lock is meant to synchronize access to the qid usage */
+ struct mutex lock;
+};
+
+int qed_l2_alloc(struct qed_hwfn *p_hwfn)
+{
+ struct qed_l2_info *p_l2_info;
+ unsigned long **pp_qids;
+ u32 i;
+
+ if (p_hwfn->hw_info.personality != QED_PCI_ETH &&
+ p_hwfn->hw_info.personality != QED_PCI_ETH_ROCE)
+ return 0;
+
+ p_l2_info = kzalloc(sizeof(*p_l2_info), GFP_KERNEL);
+ if (!p_l2_info)
+ return -ENOMEM;
+ p_hwfn->p_l2_info = p_l2_info;
+
+ if (IS_PF(p_hwfn->cdev)) {
+ p_l2_info->queues = RESC_NUM(p_hwfn, QED_L2_QUEUE);
+ } else {
+ u8 rx = 0, tx = 0;
+
+ qed_vf_get_num_rxqs(p_hwfn, &rx);
+ qed_vf_get_num_txqs(p_hwfn, &tx);
+
+ p_l2_info->queues = max_t(u8, rx, tx);
+ }
+
+ pp_qids = kzalloc(sizeof(unsigned long *) * p_l2_info->queues,
+ GFP_KERNEL);
+ if (!pp_qids)
+ return -ENOMEM;
+ p_l2_info->pp_qid_usage = pp_qids;
+
+ for (i = 0; i < p_l2_info->queues; i++) {
+ pp_qids[i] = kzalloc(MAX_QUEUES_PER_QZONE / 8, GFP_KERNEL);
+ if (!pp_qids[i])
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+void qed_l2_setup(struct qed_hwfn *p_hwfn)
+{
+ if (p_hwfn->hw_info.personality != QED_PCI_ETH &&
+ p_hwfn->hw_info.personality != QED_PCI_ETH_ROCE)
+ return;
+
+ mutex_init(&p_hwfn->p_l2_info->lock);
+}
+
+void qed_l2_free(struct qed_hwfn *p_hwfn)
+{
+ u32 i;
+
+ if (p_hwfn->hw_info.personality != QED_PCI_ETH &&
+ p_hwfn->hw_info.personality != QED_PCI_ETH_ROCE)
+ return;
+
+ if (!p_hwfn->p_l2_info)
+ return;
+
+ if (!p_hwfn->p_l2_info->pp_qid_usage)
+ goto out_l2_info;
+
+ /* Free until hit first uninitialized entry */
+ for (i = 0; i < p_hwfn->p_l2_info->queues; i++) {
+ if (!p_hwfn->p_l2_info->pp_qid_usage[i])
+ break;
+ kfree(p_hwfn->p_l2_info->pp_qid_usage[i]);
+ }
+
+ kfree(p_hwfn->p_l2_info->pp_qid_usage);
+
+out_l2_info:
+ kfree(p_hwfn->p_l2_info);
+ p_hwfn->p_l2_info = NULL;
+}
+
+static bool qed_eth_queue_qid_usage_add(struct qed_hwfn *p_hwfn,
+ struct qed_queue_cid *p_cid)
+{
+ struct qed_l2_info *p_l2_info = p_hwfn->p_l2_info;
+ u16 queue_id = p_cid->rel.queue_id;
+ bool b_rc = true;
+ u8 first;
+
+ mutex_lock(&p_l2_info->lock);
+
+ if (queue_id >= p_l2_info->queues) {
+ DP_NOTICE(p_hwfn,
+ "Requested to increase usage for qzone %04x out of %08x\n",
+ queue_id, p_l2_info->queues);
+ b_rc = false;
+ goto out;
+ }
+
+ first = (u8)find_first_zero_bit(p_l2_info->pp_qid_usage[queue_id],
+ MAX_QUEUES_PER_QZONE);
+ if (first >= MAX_QUEUES_PER_QZONE) {
+ b_rc = false;
+ goto out;
+ }
+
+ __set_bit(first, p_l2_info->pp_qid_usage[queue_id]);
+ p_cid->qid_usage_idx = first;
+
+out:
+ mutex_unlock(&p_l2_info->lock);
+ return b_rc;
+}
+
+static void qed_eth_queue_qid_usage_del(struct qed_hwfn *p_hwfn,
+ struct qed_queue_cid *p_cid)
+{
+ mutex_lock(&p_hwfn->p_l2_info->lock);
+
+ clear_bit(p_cid->qid_usage_idx,
+ p_hwfn->p_l2_info->pp_qid_usage[p_cid->rel.queue_id]);
+
+ mutex_unlock(&p_hwfn->p_l2_info->lock);
+}
+
void qed_eth_queue_cid_release(struct qed_hwfn *p_hwfn,
struct qed_queue_cid *p_cid)
{
- /* VFs' CIDs are 0-based in PF-view, and uninitialized on VF */
- if (!p_cid->is_vf && IS_PF(p_hwfn->cdev))
- qed_cxt_release_cid(p_hwfn, p_cid->cid);
+ bool b_legacy_vf = !!(p_cid->vf_legacy & QED_QCID_LEGACY_VF_CID);
+
+ if (IS_PF(p_hwfn->cdev) && !b_legacy_vf)
+ _qed_cxt_release_cid(p_hwfn, p_cid->cid, p_cid->vfid);
+
+ /* For PF's VFs we maintain the index inside queue-zone in IOV */
+ if (p_cid->vfid == QED_QUEUE_CID_SELF)
+ qed_eth_queue_qid_usage_del(p_hwfn, p_cid);
+
vfree(p_cid);
}
/* The internal is only meant to be directly called by PFs initializeing CIDs
* for their VFs.
*/
-struct qed_queue_cid *
+static struct qed_queue_cid *
_qed_eth_queue_to_cid(struct qed_hwfn *p_hwfn,
u16 opaque_fid,
u32 cid,
- u8 vf_qid,
- struct qed_queue_start_common_params *p_params)
+ struct qed_queue_start_common_params *p_params,
+ bool b_is_rx,
+ struct qed_queue_cid_vf_params *p_vf_params)
{
- bool b_is_same = (p_hwfn->hw_info.opaque_fid == opaque_fid);
struct qed_queue_cid *p_cid;
int rc;
@@ -96,10 +231,25 @@
p_cid->opaque_fid = opaque_fid;
p_cid->cid = cid;
- p_cid->vf_qid = vf_qid;
- p_cid->rel = *p_params;
p_cid->p_owner = p_hwfn;
+ /* Fill in parameters */
+ p_cid->rel.vport_id = p_params->vport_id;
+ p_cid->rel.queue_id = p_params->queue_id;
+ p_cid->rel.stats_id = p_params->stats_id;
+ p_cid->sb_igu_id = p_params->p_sb->igu_sb_id;
+ p_cid->b_is_rx = b_is_rx;
+ p_cid->sb_idx = p_params->sb_idx;
+
+ /* Fill-in bits related to VFs' queues if information was provided */
+ if (p_vf_params) {
+ p_cid->vfid = p_vf_params->vfid;
+ p_cid->vf_qid = p_vf_params->vf_qid;
+ p_cid->vf_legacy = p_vf_params->vf_legacy;
+ } else {
+ p_cid->vfid = QED_QUEUE_CID_SELF;
+ }
+
/* Don't try calculating the absolute indices for VFs */
if (IS_VF(p_hwfn->cdev)) {
p_cid->abs = p_cid->rel;
@@ -121,7 +271,7 @@
/* In case of a PF configuring its VF's queues, the stats-id is already
* absolute [since there's a single index that's suitable per-VF].
*/
- if (b_is_same) {
+ if (p_cid->vfid == QED_QUEUE_CID_SELF) {
rc = qed_fw_vport(p_hwfn, p_cid->rel.stats_id,
&p_cid->abs.stats_id);
if (rc)
@@ -130,27 +280,29 @@
p_cid->abs.stats_id = p_cid->rel.stats_id;
}
- /* SBs relevant information was already provided as absolute */
- p_cid->abs.sb = p_cid->rel.sb;
- p_cid->abs.sb_idx = p_cid->rel.sb_idx;
-
- /* This is tricky - we're actually interested in whehter this is a PF
- * entry meant for the VF.
- */
- if (!b_is_same)
- p_cid->is_vf = true;
out:
+ /* VF-images have provided the qid_usage_idx on their own.
+ * Otherwise, we need to allocate a unique one.
+ */
+ if (!p_vf_params) {
+ if (!qed_eth_queue_qid_usage_add(p_hwfn, p_cid))
+ goto fail;
+ } else {
+ p_cid->qid_usage_idx = p_vf_params->qid_usage_idx;
+ }
+
DP_VERBOSE(p_hwfn,
QED_MSG_SP,
- "opaque_fid: %04x CID %08x vport %02x [%02x] qzone %04x [%04x] stats %02x [%02x] SB %04x PI %02x\n",
+ "opaque_fid: %04x CID %08x vport %02x [%02x] qzone %04x.%02x [%04x] stats %02x [%02x] SB %04x PI %02x\n",
p_cid->opaque_fid,
p_cid->cid,
p_cid->rel.vport_id,
p_cid->abs.vport_id,
p_cid->rel.queue_id,
+ p_cid->qid_usage_idx,
p_cid->abs.queue_id,
p_cid->rel.stats_id,
- p_cid->abs.stats_id, p_cid->abs.sb, p_cid->abs.sb_idx);
+ p_cid->abs.stats_id, p_cid->sb_igu_id, p_cid->sb_idx);
return p_cid;
@@ -159,32 +311,61 @@
return NULL;
}
-static struct qed_queue_cid *qed_eth_queue_to_cid(struct qed_hwfn *p_hwfn,
- u16 opaque_fid, struct
- qed_queue_start_common_params
- *p_params)
+struct qed_queue_cid *
+qed_eth_queue_to_cid(struct qed_hwfn *p_hwfn,
+ u16 opaque_fid,
+ struct qed_queue_start_common_params *p_params,
+ bool b_is_rx,
+ struct qed_queue_cid_vf_params *p_vf_params)
{
struct qed_queue_cid *p_cid;
+ u8 vfid = QED_CXT_PF_CID;
+ bool b_legacy_vf = false;
u32 cid = 0;
+ /* In case of legacy VFs, The CID can be derived from the additional
+ * VF parameters - the VF assumes queue X uses CID X, so we can simply
+ * use the vf_qid for this purpose as well.
+ */
+ if (p_vf_params) {
+ vfid = p_vf_params->vfid;
+
+ if (p_vf_params->vf_legacy & QED_QCID_LEGACY_VF_CID) {
+ b_legacy_vf = true;
+ cid = p_vf_params->vf_qid;
+ }
+ }
+
/* Get a unique firmware CID for this queue, in case it's a PF.
* VF's don't need a CID as the queue configuration will be done
* by PF.
*/
- if (IS_PF(p_hwfn->cdev)) {
- if (qed_cxt_acquire_cid(p_hwfn, PROTOCOLID_ETH, &cid)) {
+ if (IS_PF(p_hwfn->cdev) && !b_legacy_vf) {
+ if (_qed_cxt_acquire_cid(p_hwfn, PROTOCOLID_ETH,
+ &cid, vfid)) {
DP_NOTICE(p_hwfn, "Failed to acquire cid\n");
return NULL;
}
}
- p_cid = _qed_eth_queue_to_cid(p_hwfn, opaque_fid, cid, 0, p_params);
- if (!p_cid && IS_PF(p_hwfn->cdev))
- qed_cxt_release_cid(p_hwfn, cid);
+ p_cid = _qed_eth_queue_to_cid(p_hwfn, opaque_fid, cid,
+ p_params, b_is_rx, p_vf_params);
+ if (!p_cid && IS_PF(p_hwfn->cdev) && !b_legacy_vf)
+ _qed_cxt_release_cid(p_hwfn, cid, vfid);
return p_cid;
}
+static struct qed_queue_cid *
+qed_eth_queue_to_cid_pf(struct qed_hwfn *p_hwfn,
+ u16 opaque_fid,
+ bool b_is_rx,
+ struct qed_queue_start_common_params *p_params)
+{
+ return qed_eth_queue_to_cid(p_hwfn, opaque_fid, p_params, b_is_rx,
+ NULL);
+}
+
int qed_sp_eth_vport_start(struct qed_hwfn *p_hwfn,
struct qed_sp_vport_start_params *p_params)
{
@@ -682,7 +863,7 @@
DP_VERBOSE(p_hwfn, QED_MSG_SP,
"opaque_fid=0x%x, cid=0x%x, rx_qzone=0x%x, vport_id=0x%x, sb_id=0x%x\n",
p_cid->opaque_fid, p_cid->cid,
- p_cid->abs.queue_id, p_cid->abs.vport_id, p_cid->abs.sb);
+ p_cid->abs.queue_id, p_cid->abs.vport_id, p_cid->sb_igu_id);
/* Get SPQ entry */
memset(&init_data, 0, sizeof(init_data));
@@ -698,8 +879,8 @@
p_ramrod = &p_ent->ramrod.rx_queue_start;
- p_ramrod->sb_id = cpu_to_le16(p_cid->abs.sb);
- p_ramrod->sb_index = p_cid->abs.sb_idx;
+ p_ramrod->sb_id = cpu_to_le16(p_cid->sb_igu_id);
+ p_ramrod->sb_index = p_cid->sb_idx;
p_ramrod->vport_id = p_cid->abs.vport_id;
p_ramrod->stats_counter_id = p_cid->abs.stats_id;
p_ramrod->rx_queue_id = cpu_to_le16(p_cid->abs.queue_id);
@@ -712,13 +893,15 @@
p_ramrod->num_of_pbl_pages = cpu_to_le16(cqe_pbl_size);
DMA_REGPAIR_LE(p_ramrod->cqe_pbl_addr, cqe_pbl_addr);
- if (p_cid->is_vf) {
+ if (p_cid->vfid != QED_QUEUE_CID_SELF) {
+ bool b_legacy_vf = !!(p_cid->vf_legacy &
+ QED_QCID_LEGACY_VF_RX_PROD);
+
p_ramrod->vf_rx_prod_index = p_cid->vf_qid;
DP_VERBOSE(p_hwfn, QED_MSG_SP,
"Queue%s is meant for VF rxq[%02x]\n",
- !!p_cid->b_legacy_vf ? " [legacy]" : "",
- p_cid->vf_qid);
- p_ramrod->vf_rx_prod_use_zone_a = !!p_cid->b_legacy_vf;
+ b_legacy_vf ? " [legacy]" : "", p_cid->vf_qid);
+ p_ramrod->vf_rx_prod_use_zone_a = b_legacy_vf;
}
return qed_spq_post(p_hwfn, p_ent, NULL);
@@ -762,7 +945,7 @@
int rc;
/* Allocate a CID for the queue */
- p_cid = qed_eth_queue_to_cid(p_hwfn, opaque_fid, p_params);
+ p_cid = qed_eth_queue_to_cid_pf(p_hwfn, opaque_fid, true, p_params);
if (!p_cid)
return -ENOMEM;
@@ -864,10 +1047,11 @@
/* Cleaning the queue requires the completion to arrive there.
* In addition, VFs require the answer to come as eqe to PF.
*/
- p_ramrod->complete_cqe_flg = (!p_cid->is_vf &&
+ p_ramrod->complete_cqe_flg = ((p_cid->vfid == QED_QUEUE_CID_SELF) &&
!b_eq_completion_only) ||
b_cqe_completion;
- p_ramrod->complete_event_flg = p_cid->is_vf || b_eq_completion_only;
+ p_ramrod->complete_event_flg = (p_cid->vfid != QED_QUEUE_CID_SELF) ||
+ b_eq_completion_only;
return qed_spq_post(p_hwfn, p_ent, NULL);
}
@@ -916,8 +1100,8 @@
p_ramrod = &p_ent->ramrod.tx_queue_start;
p_ramrod->vport_id = p_cid->abs.vport_id;
- p_ramrod->sb_id = cpu_to_le16(p_cid->abs.sb);
- p_ramrod->sb_index = p_cid->abs.sb_idx;
+ p_ramrod->sb_id = cpu_to_le16(p_cid->sb_igu_id);
+ p_ramrod->sb_index = p_cid->sb_idx;
p_ramrod->stats_counter_id = p_cid->abs.stats_id;
p_ramrod->queue_zone_id = cpu_to_le16(p_cid->abs.queue_id);
@@ -966,7 +1150,7 @@
struct qed_queue_cid *p_cid;
int rc;
- p_cid = qed_eth_queue_to_cid(p_hwfn, opaque_fid, p_params);
+ p_cid = qed_eth_queue_to_cid_pf(p_hwfn, opaque_fid, false, p_params);
if (!p_cid)
return -EINVAL;
@@ -1935,15 +2119,26 @@
ether_addr_copy(info->port_mac,
cdev->hwfns[0].hw_info.hw_mac_addr);
- } else {
- qed_vf_get_num_rxqs(QED_LEADING_HWFN(cdev), &info->num_queues);
- if (cdev->num_hwfns > 1) {
- u8 queues = 0;
- qed_vf_get_num_rxqs(&cdev->hwfns[1], &queues);
+ info->xdp_supported = true;
+ } else {
+ u16 total_cids = 0;
+
+ /* Determine queues & XDP support */
+ for_each_hwfn(cdev, i) {
+ struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
+ u8 queues, cids;
+
+ qed_vf_get_num_cids(p_hwfn, &cids);
+ qed_vf_get_num_rxqs(p_hwfn, &queues);
info->num_queues += queues;
+ total_cids += cids;
}
+ /* Enable VF XDP in case PF guarntees sufficient connections */
+ if (total_cids >= info->num_queues * 3)
+ info->xdp_supported = true;
+
qed_vf_get_num_vlan_filters(&cdev->hwfns[0],
(u8 *)&info->num_vlan_filters);
qed_vf_get_num_mac_filters(&cdev->hwfns[0],
@@ -2194,9 +2389,9 @@
}
DP_VERBOSE(cdev, (QED_MSG_SPQ | NETIF_MSG_IFUP),
- "Started RX-Q %d [rss_num %d] on V-PORT %d and SB %d\n",
+ "Started RX-Q %d [rss_num %d] on V-PORT %d and SB igu %d\n",
p_params->queue_id, rss_num, p_params->vport_id,
- p_params->sb);
+ p_params->p_sb->igu_sb_id);
return 0;
}
@@ -2244,9 +2439,9 @@
}
DP_VERBOSE(cdev, (QED_MSG_SPQ | NETIF_MSG_IFUP),
- "Started TX-Q %d [rss_num %d] on V-PORT %d and SB %d\n",
+ "Started TX-Q %d [rss_num %d] on V-PORT %d and SB igu %d\n",
p_params->queue_id, rss_num, p_params->vport_id,
- p_params->sb);
+ p_params->p_sb->igu_sb_id);
return 0;
}
@@ -2301,14 +2496,25 @@
for_each_hwfn(cdev, i) {
struct qed_hwfn *hwfn = &cdev->hwfns[i];
+ struct qed_ptt *p_ptt;
struct qed_tunnel_info *tun;
tun = &hwfn->cdev->tunnel;
+ if (IS_PF(cdev)) {
+ p_ptt = qed_ptt_acquire(hwfn);
+ if (!p_ptt)
+ return -EAGAIN;
+ } else {
+ p_ptt = NULL;
+ }
- rc = qed_sp_pf_update_tunn_cfg(hwfn, &tunn_info,
+ rc = qed_sp_pf_update_tunn_cfg(hwfn, p_ptt, &tunn_info,
QED_SPQ_MODE_EBLOCK, NULL);
- if (rc)
+ if (rc) {
+ if (IS_PF(cdev))
+ qed_ptt_release(hwfn, p_ptt);
return rc;
+ }
if (IS_PF_SRIOV(hwfn)) {
u16 vxlan_port, geneve_port;
@@ -2325,6 +2531,8 @@
qed_schedule_iov(hwfn, QED_IOV_WQ_BULLETIN_UPDATE_FLAG);
}
+ if (IS_PF(cdev))
+ qed_ptt_release(hwfn, p_ptt);
}
return 0;
diff --git a/drivers/net/ethernet/qlogic/qed/qed_l2.h b/drivers/net/ethernet/qlogic/qed/qed_l2.h
index 6f44229..f8f09aa 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_l2.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_l2.h
@@ -277,40 +277,87 @@
void qed_reset_vport_stats(struct qed_dev *cdev);
-struct qed_queue_cid {
- /* 'Relative' is a relative term ;-). Usually the indices [not counting
- * SBs] would be PF-relative, but there are some cases where that isn't
- * the case - specifically for a PF configuring its VF indices it's
- * possible some fields [E.g., stats-id] in 'rel' would already be abs.
+#define MAX_QUEUES_PER_QZONE (sizeof(unsigned long) * 8)
+#define QED_QUEUE_CID_SELF (0xff)
+
+/* Almost identical to the qed_queue_start_common_params,
+ * but here we maintain the SB index in IGU CAM.
+ */
+struct qed_queue_cid_params {
+ u8 vport_id;
+ u16 queue_id;
+ u8 stats_id;
+};
+
+/* Additional parameters required for initialization of the queue_cid
+ * and are relevant only for a PF initializing one for its VFs.
+ */
+struct qed_queue_cid_vf_params {
+ /* Should match the VF's relative index */
+ u8 vfid;
+
+ /* 0-based queue index. Should reflect the relative qzone the
+ * VF thinks is associated with it [in its range].
*/
- struct qed_queue_start_common_params rel;
- struct qed_queue_start_common_params abs;
+ u8 vf_qid;
+
+ /* Indicates a VF is legacy, making it differ in several things:
+ * - Producers would be placed in a different place.
+ * - Makes assumptions regarding the CIDs.
+ */
+ u8 vf_legacy;
+
+ u8 qid_usage_idx;
+};
+
+struct qed_queue_cid {
+ /* For stats-id, the `rel' is actually absolute as well */
+ struct qed_queue_cid_params rel;
+ struct qed_queue_cid_params abs;
+
+ /* These have no 'relative' meaning */
+ u16 sb_igu_id;
+ u8 sb_idx;
+
u32 cid;
u16 opaque_fid;
+ bool b_is_rx;
+
/* VFs queues are mapped differently, so we need to know the
* relative queue associated with them [0-based].
* Notice this is relevant on the *PF* queue-cid of its VF's queues,
* and not on the VF itself.
*/
- bool is_vf;
+ u8 vfid;
u8 vf_qid;
- /* Legacy VFs might have Rx producer located elsewhere */
- bool b_legacy_vf;
+ /* We need an additional index to differentiate between queues opened
+ * for same queue-zone, as VFs would have to communicate the info
+ * to the PF [otherwise PF has no way to differentiate].
+ */
+ u8 qid_usage_idx;
+
+ u8 vf_legacy;
+#define QED_QCID_LEGACY_VF_RX_PROD (BIT(0))
+#define QED_QCID_LEGACY_VF_CID (BIT(1))
struct qed_hwfn *p_owner;
};
+int qed_l2_alloc(struct qed_hwfn *p_hwfn);
+void qed_l2_setup(struct qed_hwfn *p_hwfn);
+void qed_l2_free(struct qed_hwfn *p_hwfn);
+
void qed_eth_queue_cid_release(struct qed_hwfn *p_hwfn,
struct qed_queue_cid *p_cid);
-struct qed_queue_cid *_qed_eth_queue_to_cid(struct qed_hwfn *p_hwfn,
- u16 opaque_fid,
- u32 cid,
- u8 vf_qid,
- struct qed_queue_start_common_params
- *p_params);
+struct qed_queue_cid *
+qed_eth_queue_to_cid(struct qed_hwfn *p_hwfn,
+ u16 opaque_fid,
+ struct qed_queue_start_common_params *p_params,
+ bool b_is_rx,
+ struct qed_queue_cid_vf_params *p_vf_params);
int
qed_sp_eth_vport_start(struct qed_hwfn *p_hwfn,
diff --git a/drivers/net/ethernet/qlogic/qed/qed_ll2.c b/drivers/net/ethernet/qlogic/qed/qed_ll2.c
index 09c8641..17f9b0a 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_ll2.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_ll2.c
@@ -38,7 +38,6 @@
#include <linux/pci.h>
#include <linux/slab.h>
#include <linux/stddef.h>
-#include <linux/version.h>
#include <linux/workqueue.h>
#include <net/ipv6.h>
#include <linux/bitops.h>
@@ -62,7 +61,7 @@
#include "qed_ooo.h"
#include "qed_reg_addr.h"
#include "qed_sp.h"
-#include "qed_roce.h"
+#include "qed_rdma.h"
#define QED_LL2_RX_REGISTERED(ll2) ((ll2)->rx_queue.b_cb_registred)
#define QED_LL2_TX_REGISTERED(ll2) ((ll2)->tx_queue.b_cb_registred)
@@ -74,7 +73,6 @@
int rx_cnt;
u32 rx_size;
u8 handle;
- bool frags_mapped;
/* Lock protecting LL2 buffer lists in sleepless context */
spinlock_t lock;
@@ -90,13 +88,14 @@
dma_addr_t phys_addr;
};
-static void qed_ll2b_complete_tx_packet(struct qed_hwfn *p_hwfn,
+static void qed_ll2b_complete_tx_packet(void *cxt,
u8 connection_handle,
void *cookie,
dma_addr_t first_frag_addr,
bool b_last_fragment,
bool b_last_packet)
{
+ struct qed_hwfn *p_hwfn = cxt;
struct qed_dev *cdev = p_hwfn->cdev;
struct sk_buff *skb = cookie;
@@ -108,12 +107,6 @@
cdev->ll2->cbs->tx_cb(cdev->ll2->cb_cookie, skb,
b_last_fragment);
- if (cdev->ll2->frags_mapped)
- /* Case where mapped frags were received, need to
- * free skb with nr_frags marked as 0
- */
- skb_shinfo(skb)->nr_frags = 0;
-
dev_kfree_skb_any(skb);
}
@@ -165,42 +158,34 @@
qed_ll2_dealloc_buffer(cdev, buffer);
}
-static void qed_ll2b_complete_rx_packet(struct qed_hwfn *p_hwfn,
- u8 connection_handle,
- struct qed_ll2_rx_packet *p_pkt,
- struct core_rx_fast_path_cqe *p_cqe,
- bool b_last_packet)
+void qed_ll2b_complete_rx_packet(void *cxt, struct qed_ll2_comp_rx_data *data)
{
- u16 packet_length = le16_to_cpu(p_cqe->packet_length);
- struct qed_ll2_buffer *buffer = p_pkt->cookie;
+ struct qed_hwfn *p_hwfn = cxt;
+ struct qed_ll2_buffer *buffer = data->cookie;
struct qed_dev *cdev = p_hwfn->cdev;
- u16 vlan = le16_to_cpu(p_cqe->vlan);
- u32 opaque_data_0, opaque_data_1;
- u8 pad = p_cqe->placement_offset;
dma_addr_t new_phys_addr;
struct sk_buff *skb;
bool reuse = false;
int rc = -EINVAL;
u8 *new_data;
- opaque_data_0 = le32_to_cpu(p_cqe->opaque_data.data[0]);
- opaque_data_1 = le32_to_cpu(p_cqe->opaque_data.data[1]);
-
DP_VERBOSE(p_hwfn,
(NETIF_MSG_RX_STATUS | QED_MSG_STORAGE | NETIF_MSG_PKTDATA),
"Got an LL2 Rx completion: [Buffer at phys 0x%llx, offset 0x%02x] Length 0x%04x Parse_flags 0x%04x vlan 0x%04x Opaque data [0x%08x:0x%08x]\n",
- (u64)p_pkt->rx_buf_addr, pad, packet_length,
- le16_to_cpu(p_cqe->parse_flags.flags), vlan,
- opaque_data_0, opaque_data_1);
+ (u64)data->rx_buf_addr,
+ data->u.placement_offset,
+ data->length.packet_length,
+ data->parse_flags,
+ data->vlan, data->opaque_data_0, data->opaque_data_1);
if ((cdev->dp_module & NETIF_MSG_PKTDATA) && buffer->data) {
print_hex_dump(KERN_INFO, "",
DUMP_PREFIX_OFFSET, 16, 1,
- buffer->data, packet_length, false);
+ buffer->data, data->length.packet_length, false);
}
/* Determine if data is valid */
- if (packet_length < ETH_HLEN)
+ if (data->length.packet_length < ETH_HLEN)
reuse = true;
/* Allocate a replacement for buffer; Reuse upon failure */
@@ -220,9 +205,9 @@
goto out_post;
}
- pad += NET_SKB_PAD;
- skb_reserve(skb, pad);
- skb_put(skb, packet_length);
+ data->u.placement_offset += NET_SKB_PAD;
+ skb_reserve(skb, data->u.placement_offset);
+ skb_put(skb, data->length.packet_length);
skb_checksum_none_assert(skb);
/* Get parital ethernet information instead of eth_type_trans(),
@@ -233,10 +218,12 @@
/* Pass SKB onward */
if (cdev->ll2->cbs && cdev->ll2->cbs->rx_cb) {
- if (vlan)
- __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan);
+ if (data->vlan)
+ __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
+ data->vlan);
cdev->ll2->cbs->rx_cb(cdev->ll2->cb_cookie, skb,
- opaque_data_0, opaque_data_1);
+ data->opaque_data_0,
+ data->opaque_data_1);
}
/* Update Buffer information and update FW producer */
@@ -322,7 +309,7 @@
list_del(&p_pkt->list_entry);
b_last_packet = list_empty(&p_tx->active_descq);
list_add_tail(&p_pkt->list_entry, &p_tx->free_descq);
- if (p_ll2_conn->conn.conn_type == QED_LL2_TYPE_ISCSI_OOO) {
+ if (p_ll2_conn->input.conn_type == QED_LL2_TYPE_ISCSI_OOO) {
struct qed_ooo_buffer *p_buffer;
p_buffer = (struct qed_ooo_buffer *)p_pkt->cookie;
@@ -334,21 +321,12 @@
b_last_frag =
p_tx->cur_completing_bd_idx == p_pkt->bd_used;
tx_frag = p_pkt->bds_set[0].tx_frag;
- if (p_ll2_conn->conn.gsi_enable)
- qed_ll2b_release_tx_gsi_packet(p_hwfn,
- p_ll2_conn->
- my_id,
- p_pkt->cookie,
- tx_frag,
- b_last_frag,
- b_last_packet);
- else
- qed_ll2b_complete_tx_packet(p_hwfn,
- p_ll2_conn->my_id,
- p_pkt->cookie,
- tx_frag,
- b_last_frag,
- b_last_packet);
+ p_ll2_conn->cbs.tx_release_cb(p_ll2_conn->cbs.cookie,
+ p_ll2_conn->my_id,
+ p_pkt->cookie,
+ tx_frag,
+ b_last_frag,
+ b_last_packet);
}
}
}
@@ -361,7 +339,6 @@
struct qed_ll2_tx_packet *p_pkt;
bool b_last_frag = false;
unsigned long flags;
- dma_addr_t tx_frag;
int rc = -EINVAL;
spin_lock_irqsave(&p_tx->lock, flags);
@@ -402,19 +379,13 @@
list_add_tail(&p_pkt->list_entry, &p_tx->free_descq);
spin_unlock_irqrestore(&p_tx->lock, flags);
- tx_frag = p_pkt->bds_set[0].tx_frag;
- if (p_ll2_conn->conn.gsi_enable)
- qed_ll2b_complete_tx_gsi_packet(p_hwfn,
- p_ll2_conn->my_id,
- p_pkt->cookie,
- tx_frag,
- b_last_frag, !num_bds);
- else
- qed_ll2b_complete_tx_packet(p_hwfn,
- p_ll2_conn->my_id,
- p_pkt->cookie,
- tx_frag,
- b_last_frag, !num_bds);
+
+ p_ll2_conn->cbs.tx_comp_cb(p_ll2_conn->cbs.cookie,
+ p_ll2_conn->my_id,
+ p_pkt->cookie,
+ p_pkt->bds_set[0].tx_frag,
+ b_last_frag, !num_bds);
+
spin_lock_irqsave(&p_tx->lock, flags);
}
@@ -425,81 +396,71 @@
return rc;
}
-static int
-qed_ll2_rxq_completion_gsi(struct qed_hwfn *p_hwfn,
- struct qed_ll2_info *p_ll2_info,
- union core_rx_cqe_union *p_cqe,
- unsigned long lock_flags, bool b_last_cqe)
+static void qed_ll2_rxq_parse_gsi(struct qed_hwfn *p_hwfn,
+ union core_rx_cqe_union *p_cqe,
+ struct qed_ll2_comp_rx_data *data)
{
- struct qed_ll2_rx_queue *p_rx = &p_ll2_info->rx_queue;
- struct qed_ll2_rx_packet *p_pkt = NULL;
- u16 packet_length, parse_flags, vlan;
- u32 src_mac_addrhi;
- u16 src_mac_addrlo;
-
- if (!list_empty(&p_rx->active_descq))
- p_pkt = list_first_entry(&p_rx->active_descq,
- struct qed_ll2_rx_packet, list_entry);
- if (!p_pkt) {
- DP_NOTICE(p_hwfn,
- "GSI Rx completion but active_descq is empty\n");
- return -EIO;
- }
-
- list_del(&p_pkt->list_entry);
- parse_flags = le16_to_cpu(p_cqe->rx_cqe_gsi.parse_flags.flags);
- packet_length = le16_to_cpu(p_cqe->rx_cqe_gsi.data_length);
- vlan = le16_to_cpu(p_cqe->rx_cqe_gsi.vlan);
- src_mac_addrhi = le32_to_cpu(p_cqe->rx_cqe_gsi.src_mac_addrhi);
- src_mac_addrlo = le16_to_cpu(p_cqe->rx_cqe_gsi.src_mac_addrlo);
- if (qed_chain_consume(&p_rx->rxq_chain) != p_pkt->rxq_bd)
- DP_NOTICE(p_hwfn,
- "Mismatch between active_descq and the LL2 Rx chain\n");
- list_add_tail(&p_pkt->list_entry, &p_rx->free_descq);
-
- spin_unlock_irqrestore(&p_rx->lock, lock_flags);
- qed_ll2b_complete_rx_gsi_packet(p_hwfn,
- p_ll2_info->my_id,
- p_pkt->cookie,
- p_pkt->rx_buf_addr,
- packet_length,
- p_cqe->rx_cqe_gsi.data_length_error,
- parse_flags,
- vlan,
- src_mac_addrhi,
- src_mac_addrlo, b_last_cqe);
- spin_lock_irqsave(&p_rx->lock, lock_flags);
-
- return 0;
+ data->parse_flags = le16_to_cpu(p_cqe->rx_cqe_gsi.parse_flags.flags);
+ data->length.data_length = le16_to_cpu(p_cqe->rx_cqe_gsi.data_length);
+ data->vlan = le16_to_cpu(p_cqe->rx_cqe_gsi.vlan);
+ data->opaque_data_0 = le32_to_cpu(p_cqe->rx_cqe_gsi.src_mac_addrhi);
+ data->opaque_data_1 = le16_to_cpu(p_cqe->rx_cqe_gsi.src_mac_addrlo);
+ data->u.data_length_error = p_cqe->rx_cqe_gsi.data_length_error;
}
-static int qed_ll2_rxq_completion_reg(struct qed_hwfn *p_hwfn,
- struct qed_ll2_info *p_ll2_conn,
- union core_rx_cqe_union *p_cqe,
- unsigned long *p_lock_flags,
- bool b_last_cqe)
+static void qed_ll2_rxq_parse_reg(struct qed_hwfn *p_hwfn,
+ union core_rx_cqe_union *p_cqe,
+ struct qed_ll2_comp_rx_data *data)
+{
+ data->parse_flags = le16_to_cpu(p_cqe->rx_cqe_fp.parse_flags.flags);
+ data->length.packet_length =
+ le16_to_cpu(p_cqe->rx_cqe_fp.packet_length);
+ data->vlan = le16_to_cpu(p_cqe->rx_cqe_fp.vlan);
+ data->opaque_data_0 = le32_to_cpu(p_cqe->rx_cqe_fp.opaque_data.data[0]);
+ data->opaque_data_1 = le32_to_cpu(p_cqe->rx_cqe_fp.opaque_data.data[1]);
+ data->u.placement_offset = p_cqe->rx_cqe_fp.placement_offset;
+}
+
+static int
+qed_ll2_rxq_handle_completion(struct qed_hwfn *p_hwfn,
+ struct qed_ll2_info *p_ll2_conn,
+ union core_rx_cqe_union *p_cqe,
+ unsigned long *p_lock_flags, bool b_last_cqe)
{
struct qed_ll2_rx_queue *p_rx = &p_ll2_conn->rx_queue;
struct qed_ll2_rx_packet *p_pkt = NULL;
+ struct qed_ll2_comp_rx_data data;
if (!list_empty(&p_rx->active_descq))
p_pkt = list_first_entry(&p_rx->active_descq,
struct qed_ll2_rx_packet, list_entry);
if (!p_pkt) {
DP_NOTICE(p_hwfn,
- "LL2 Rx completion but active_descq is empty\n");
+ "[%d] LL2 Rx completion but active_descq is empty\n",
+ p_ll2_conn->input.conn_type);
+
return -EIO;
}
list_del(&p_pkt->list_entry);
+ if (p_cqe->rx_cqe_sp.type == CORE_RX_CQE_TYPE_REGULAR)
+ qed_ll2_rxq_parse_reg(p_hwfn, p_cqe, &data);
+ else
+ qed_ll2_rxq_parse_gsi(p_hwfn, p_cqe, &data);
if (qed_chain_consume(&p_rx->rxq_chain) != p_pkt->rxq_bd)
DP_NOTICE(p_hwfn,
"Mismatch between active_descq and the LL2 Rx chain\n");
+
list_add_tail(&p_pkt->list_entry, &p_rx->free_descq);
+ data.connection_handle = p_ll2_conn->my_id;
+ data.cookie = p_pkt->cookie;
+ data.rx_buf_addr = p_pkt->rx_buf_addr;
+ data.b_last_packet = b_last_cqe;
+
spin_unlock_irqrestore(&p_rx->lock, *p_lock_flags);
- qed_ll2b_complete_rx_packet(p_hwfn, p_ll2_conn->my_id,
- p_pkt, &p_cqe->rx_cqe_fp, b_last_cqe);
+ p_ll2_conn->cbs.rx_comp_cb(p_ll2_conn->cbs.cookie, &data);
+
spin_lock_irqsave(&p_rx->lock, *p_lock_flags);
return 0;
@@ -507,7 +468,7 @@
static int qed_ll2_rxq_completion(struct qed_hwfn *p_hwfn, void *cookie)
{
- struct qed_ll2_info *p_ll2_conn = cookie;
+ struct qed_ll2_info *p_ll2_conn = (struct qed_ll2_info *)cookie;
struct qed_ll2_rx_queue *p_rx = &p_ll2_conn->rx_queue;
union core_rx_cqe_union *cqe = NULL;
u16 cq_new_idx = 0, cq_old_idx = 0;
@@ -521,7 +482,9 @@
while (cq_new_idx != cq_old_idx) {
bool b_last_cqe = (cq_new_idx == cq_old_idx);
- cqe = qed_chain_consume(&p_rx->rcq_chain);
+ cqe =
+ (union core_rx_cqe_union *)
+ qed_chain_consume(&p_rx->rcq_chain);
cq_old_idx = qed_chain_get_cons_idx(&p_rx->rcq_chain);
DP_VERBOSE(p_hwfn,
@@ -535,13 +498,10 @@
rc = -EINVAL;
break;
case CORE_RX_CQE_TYPE_GSI_OFFLOAD:
- rc = qed_ll2_rxq_completion_gsi(p_hwfn, p_ll2_conn,
- cqe, flags, b_last_cqe);
- break;
case CORE_RX_CQE_TYPE_REGULAR:
- rc = qed_ll2_rxq_completion_reg(p_hwfn, p_ll2_conn,
- cqe, &flags,
- b_last_cqe);
+ rc = qed_ll2_rxq_handle_completion(p_hwfn, p_ll2_conn,
+ cqe, &flags,
+ b_last_cqe);
break;
default:
rc = -EIO;
@@ -565,10 +525,6 @@
p_rx = &p_ll2_conn->rx_queue;
while (!list_empty(&p_rx->active_descq)) {
- dma_addr_t rx_buf_addr;
- void *cookie;
- bool b_last;
-
p_pkt = list_first_entry(&p_rx->active_descq,
struct qed_ll2_rx_packet, list_entry);
if (!p_pkt)
@@ -576,22 +532,26 @@
list_move_tail(&p_pkt->list_entry, &p_rx->free_descq);
- if (p_ll2_conn->conn.conn_type == QED_LL2_TYPE_ISCSI_OOO) {
+ if (p_ll2_conn->input.conn_type == QED_LL2_TYPE_ISCSI_OOO) {
struct qed_ooo_buffer *p_buffer;
p_buffer = (struct qed_ooo_buffer *)p_pkt->cookie;
qed_ooo_put_free_buffer(p_hwfn, p_hwfn->p_ooo_info,
p_buffer);
} else {
- rx_buf_addr = p_pkt->rx_buf_addr;
- cookie = p_pkt->cookie;
+ dma_addr_t rx_buf_addr = p_pkt->rx_buf_addr;
+ void *cookie = p_pkt->cookie;
+ bool b_last;
b_last = list_empty(&p_rx->active_descq);
+ p_ll2_conn->cbs.rx_release_cb(p_ll2_conn->cbs.cookie,
+ p_ll2_conn->my_id,
+ cookie,
+ rx_buf_addr, b_last);
}
}
}
-#if IS_ENABLED(CONFIG_QED_ISCSI)
static u8 qed_ll2_convert_rx_parse_to_tx_flags(u16 parse_flags)
{
u8 bd_flags = 0;
@@ -741,12 +701,13 @@
qed_ooo_submit_tx_buffers(struct qed_hwfn *p_hwfn,
struct qed_ll2_info *p_ll2_conn)
{
+ struct qed_ll2_tx_pkt_info tx_pkt;
struct qed_ooo_buffer *p_buffer;
- int rc;
u16 l4_hdr_offset_w;
dma_addr_t first_frag;
u16 parse_flags;
u8 bd_flags;
+ int rc;
/* Submit Tx buffers here */
while ((p_buffer = qed_ooo_get_ready_buffer(p_hwfn,
@@ -761,13 +722,18 @@
SET_FIELD(bd_flags, CORE_TX_BD_DATA_FORCE_VLAN_MODE, 1);
SET_FIELD(bd_flags, CORE_TX_BD_DATA_L4_PROTOCOL, 1);
- rc = qed_ll2_prepare_tx_packet(p_hwfn, p_ll2_conn->my_id, 1,
- p_buffer->vlan, bd_flags,
- l4_hdr_offset_w,
- p_ll2_conn->conn.tx_dest, 0,
- first_frag,
- p_buffer->packet_length,
- p_buffer, true);
+ memset(&tx_pkt, 0, sizeof(tx_pkt));
+ tx_pkt.num_of_bds = 1;
+ tx_pkt.vlan = p_buffer->vlan;
+ tx_pkt.bd_flags = bd_flags;
+ tx_pkt.l4_hdr_offset_w = l4_hdr_offset_w;
+ tx_pkt.tx_dest = p_ll2_conn->tx_dest;
+ tx_pkt.first_frag = first_frag;
+ tx_pkt.first_frag_len = p_buffer->packet_length;
+ tx_pkt.cookie = p_buffer;
+
+ rc = qed_ll2_prepare_tx_packet(p_hwfn, p_ll2_conn->my_id,
+ &tx_pkt, true);
if (rc) {
qed_ooo_put_ready_buffer(p_hwfn, p_hwfn->p_ooo_info,
p_buffer, false);
@@ -874,85 +840,6 @@
return 0;
}
-static int
-qed_ll2_acquire_connection_ooo(struct qed_hwfn *p_hwfn,
- struct qed_ll2_info *p_ll2_info,
- u16 rx_num_ooo_buffers, u16 mtu)
-{
- struct qed_ooo_buffer *p_buf = NULL;
- void *p_virt;
- u16 buf_idx;
- int rc = 0;
-
- if (p_ll2_info->conn.conn_type != QED_LL2_TYPE_ISCSI_OOO)
- return rc;
-
- if (!rx_num_ooo_buffers)
- return -EINVAL;
-
- for (buf_idx = 0; buf_idx < rx_num_ooo_buffers; buf_idx++) {
- p_buf = kzalloc(sizeof(*p_buf), GFP_KERNEL);
- if (!p_buf) {
- rc = -ENOMEM;
- goto out;
- }
-
- p_buf->rx_buffer_size = mtu + 26 + ETH_CACHE_LINE_SIZE;
- p_buf->rx_buffer_size = (p_buf->rx_buffer_size +
- ETH_CACHE_LINE_SIZE - 1) &
- ~(ETH_CACHE_LINE_SIZE - 1);
- p_virt = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
- p_buf->rx_buffer_size,
- &p_buf->rx_buffer_phys_addr,
- GFP_KERNEL);
- if (!p_virt) {
- kfree(p_buf);
- rc = -ENOMEM;
- goto out;
- }
-
- p_buf->rx_buffer_virt_addr = p_virt;
- qed_ooo_put_free_buffer(p_hwfn, p_hwfn->p_ooo_info, p_buf);
- }
-
- DP_VERBOSE(p_hwfn, QED_MSG_LL2,
- "Allocated [%04x] LL2 OOO buffers [each of size 0x%08x]\n",
- rx_num_ooo_buffers, p_buf->rx_buffer_size);
-
-out:
- return rc;
-}
-
-static void
-qed_ll2_establish_connection_ooo(struct qed_hwfn *p_hwfn,
- struct qed_ll2_info *p_ll2_conn)
-{
- if (p_ll2_conn->conn.conn_type != QED_LL2_TYPE_ISCSI_OOO)
- return;
-
- qed_ooo_release_all_isles(p_hwfn, p_hwfn->p_ooo_info);
- qed_ooo_submit_rx_buffers(p_hwfn, p_ll2_conn);
-}
-
-static void qed_ll2_release_connection_ooo(struct qed_hwfn *p_hwfn,
- struct qed_ll2_info *p_ll2_conn)
-{
- struct qed_ooo_buffer *p_buffer;
-
- if (p_ll2_conn->conn.conn_type != QED_LL2_TYPE_ISCSI_OOO)
- return;
-
- qed_ooo_release_all_isles(p_hwfn, p_hwfn->p_ooo_info);
- while ((p_buffer = qed_ooo_get_free_buffer(p_hwfn,
- p_hwfn->p_ooo_info))) {
- dma_free_coherent(&p_hwfn->cdev->pdev->dev,
- p_buffer->rx_buffer_size,
- p_buffer->rx_buffer_virt_addr,
- p_buffer->rx_buffer_phys_addr);
- kfree(p_buffer);
- }
-}
-
static void qed_ll2_stop_ooo(struct qed_dev *cdev)
{
struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
@@ -966,69 +853,11 @@
*handle = QED_LL2_UNUSED_HANDLE;
}
-static int qed_ll2_start_ooo(struct qed_dev *cdev,
- struct qed_ll2_params *params)
-{
- struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
- u8 *handle = &hwfn->pf_params.iscsi_pf_params.ll2_ooo_queue_id;
- struct qed_ll2_conn ll2_info = { 0 };
- int rc;
-
- ll2_info.conn_type = QED_LL2_TYPE_ISCSI_OOO;
- ll2_info.mtu = params->mtu;
- ll2_info.rx_drop_ttl0_flg = params->drop_ttl0_packets;
- ll2_info.rx_vlan_removal_en = params->rx_vlan_stripping;
- ll2_info.tx_tc = OOO_LB_TC;
- ll2_info.tx_dest = CORE_TX_DEST_LB;
-
- rc = qed_ll2_acquire_connection(hwfn, &ll2_info,
- QED_LL2_RX_SIZE, QED_LL2_TX_SIZE,
- handle);
- if (rc) {
- DP_INFO(cdev, "Failed to acquire LL2 OOO connection\n");
- goto out;
- }
-
- rc = qed_ll2_establish_connection(hwfn, *handle);
- if (rc) {
- DP_INFO(cdev, "Failed to establist LL2 OOO connection\n");
- goto fail;
- }
-
- return 0;
-
-fail:
- qed_ll2_release_connection(hwfn, *handle);
-out:
- *handle = QED_LL2_UNUSED_HANDLE;
- return rc;
-}
-#else /* IS_ENABLED(CONFIG_QED_ISCSI) */
-static int qed_ll2_lb_rxq_completion(struct qed_hwfn *p_hwfn,
- void *p_cookie) { return -EINVAL; }
-static int qed_ll2_lb_txq_completion(struct qed_hwfn *p_hwfn,
- void *p_cookie) { return -EINVAL; }
-static inline int
-qed_ll2_acquire_connection_ooo(struct qed_hwfn *p_hwfn,
- struct qed_ll2_info *p_ll2_info,
- u16 rx_num_ooo_buffers, u16 mtu) { return 0; }
-static inline void
-qed_ll2_establish_connection_ooo(struct qed_hwfn *p_hwfn,
- struct qed_ll2_info *p_ll2_conn) { return; }
-static inline void
-qed_ll2_release_connection_ooo(struct qed_hwfn *p_hwfn,
- struct qed_ll2_info *p_ll2_conn) { return; }
-static inline void qed_ll2_stop_ooo(struct qed_dev *cdev) { return; }
-static inline int qed_ll2_start_ooo(struct qed_dev *cdev,
- struct qed_ll2_params *params)
- { return -EINVAL; }
-#endif /* IS_ENABLED(CONFIG_QED_ISCSI) */
-
static int qed_sp_ll2_rx_queue_start(struct qed_hwfn *p_hwfn,
struct qed_ll2_info *p_ll2_conn,
u8 action_on_error)
{
- enum qed_ll2_conn_type conn_type = p_ll2_conn->conn.conn_type;
+ enum qed_ll2_conn_type conn_type = p_ll2_conn->input.conn_type;
struct qed_ll2_rx_queue *p_rx = &p_ll2_conn->rx_queue;
struct core_rx_start_ramrod_data *p_ramrod = NULL;
struct qed_spq_entry *p_ent = NULL;
@@ -1054,16 +883,15 @@
p_ramrod->sb_index = p_rx->rx_sb_index;
p_ramrod->complete_event_flg = 1;
- p_ramrod->mtu = cpu_to_le16(p_ll2_conn->conn.mtu);
- DMA_REGPAIR_LE(p_ramrod->bd_base,
- p_rx->rxq_chain.p_phys_addr);
+ p_ramrod->mtu = cpu_to_le16(p_ll2_conn->input.mtu);
+ DMA_REGPAIR_LE(p_ramrod->bd_base, p_rx->rxq_chain.p_phys_addr);
cqe_pbl_size = (u16)qed_chain_get_page_cnt(&p_rx->rcq_chain);
p_ramrod->num_of_pbl_pages = cpu_to_le16(cqe_pbl_size);
DMA_REGPAIR_LE(p_ramrod->cqe_pbl_addr,
qed_chain_get_pbl_phys(&p_rx->rcq_chain));
- p_ramrod->drop_ttl0_flg = p_ll2_conn->conn.rx_drop_ttl0_flg;
- p_ramrod->inner_vlan_removal_en = p_ll2_conn->conn.rx_vlan_removal_en;
+ p_ramrod->drop_ttl0_flg = p_ll2_conn->input.rx_drop_ttl0_flg;
+ p_ramrod->inner_vlan_removal_en = p_ll2_conn->input.rx_vlan_removal_en;
p_ramrod->queue_id = p_ll2_conn->queue_id;
p_ramrod->main_func_queue = (conn_type == QED_LL2_TYPE_ISCSI_OOO) ? 0
: 1;
@@ -1078,14 +906,14 @@
}
p_ramrod->action_on_error.error_type = action_on_error;
- p_ramrod->gsi_offload_flag = p_ll2_conn->conn.gsi_enable;
+ p_ramrod->gsi_offload_flag = p_ll2_conn->input.gsi_enable;
return qed_spq_post(p_hwfn, p_ent, NULL);
}
static int qed_sp_ll2_tx_queue_start(struct qed_hwfn *p_hwfn,
struct qed_ll2_info *p_ll2_conn)
{
- enum qed_ll2_conn_type conn_type = p_ll2_conn->conn.conn_type;
+ enum qed_ll2_conn_type conn_type = p_ll2_conn->input.conn_type;
struct qed_ll2_tx_queue *p_tx = &p_ll2_conn->tx_queue;
struct core_tx_start_ramrod_data *p_ramrod = NULL;
struct qed_spq_entry *p_ent = NULL;
@@ -1096,7 +924,7 @@
if (!QED_LL2_TX_REGISTERED(p_ll2_conn))
return 0;
- if (p_ll2_conn->conn.conn_type == QED_LL2_TYPE_ISCSI_OOO)
+ if (p_ll2_conn->input.conn_type == QED_LL2_TYPE_ISCSI_OOO)
p_ll2_conn->tx_stats_en = 0;
else
p_ll2_conn->tx_stats_en = 1;
@@ -1117,7 +945,7 @@
p_ramrod->sb_id = cpu_to_le16(qed_int_get_sp_sb_id(p_hwfn));
p_ramrod->sb_index = p_tx->tx_sb_index;
- p_ramrod->mtu = cpu_to_le16(p_ll2_conn->conn.mtu);
+ p_ramrod->mtu = cpu_to_le16(p_ll2_conn->input.mtu);
p_ramrod->stats_en = p_ll2_conn->tx_stats_en;
p_ramrod->stats_id = p_ll2_conn->tx_stats_id;
@@ -1126,7 +954,7 @@
pbl_size = qed_chain_get_page_cnt(&p_tx->txq_chain);
p_ramrod->pbl_size = cpu_to_le16(pbl_size);
- switch (p_ll2_conn->conn.tx_tc) {
+ switch (p_ll2_conn->input.tx_tc) {
case LB_TC:
pq_id = qed_get_cm_pq_idx(p_hwfn, PQ_FLAGS_LB);
break;
@@ -1156,7 +984,8 @@
DP_NOTICE(p_hwfn, "Unknown connection type: %d\n", conn_type);
}
- p_ramrod->gsi_offload_flag = p_ll2_conn->conn.gsi_enable;
+ p_ramrod->gsi_offload_flag = p_ll2_conn->input.gsi_enable;
+
return qed_spq_post(p_hwfn, p_ent, NULL);
}
@@ -1212,22 +1041,22 @@
static int
qed_ll2_acquire_connection_rx(struct qed_hwfn *p_hwfn,
- struct qed_ll2_info *p_ll2_info, u16 rx_num_desc)
+ struct qed_ll2_info *p_ll2_info)
{
struct qed_ll2_rx_packet *p_descq;
u32 capacity;
int rc = 0;
- if (!rx_num_desc)
+ if (!p_ll2_info->input.rx_num_desc)
goto out;
rc = qed_chain_alloc(p_hwfn->cdev,
QED_CHAIN_USE_TO_CONSUME_PRODUCE,
QED_CHAIN_MODE_NEXT_PTR,
QED_CHAIN_CNT_TYPE_U16,
- rx_num_desc,
+ p_ll2_info->input.rx_num_desc,
sizeof(struct core_rx_bd),
- &p_ll2_info->rx_queue.rxq_chain);
+ &p_ll2_info->rx_queue.rxq_chain, NULL);
if (rc) {
DP_NOTICE(p_hwfn, "Failed to allocate ll2 rxq chain\n");
goto out;
@@ -1247,9 +1076,9 @@
QED_CHAIN_USE_TO_CONSUME_PRODUCE,
QED_CHAIN_MODE_PBL,
QED_CHAIN_CNT_TYPE_U16,
- rx_num_desc,
+ p_ll2_info->input.rx_num_desc,
sizeof(struct core_rx_fast_path_cqe),
- &p_ll2_info->rx_queue.rcq_chain);
+ &p_ll2_info->rx_queue.rcq_chain, NULL);
if (rc) {
DP_NOTICE(p_hwfn, "Failed to allocate ll2 rcq chain\n");
goto out;
@@ -1257,30 +1086,29 @@
DP_VERBOSE(p_hwfn, QED_MSG_LL2,
"Allocated LL2 Rxq [Type %08x] with 0x%08x buffers\n",
- p_ll2_info->conn.conn_type, rx_num_desc);
+ p_ll2_info->input.conn_type, p_ll2_info->input.rx_num_desc);
out:
return rc;
}
static int qed_ll2_acquire_connection_tx(struct qed_hwfn *p_hwfn,
- struct qed_ll2_info *p_ll2_info,
- u16 tx_num_desc)
+ struct qed_ll2_info *p_ll2_info)
{
struct qed_ll2_tx_packet *p_descq;
u32 capacity;
int rc = 0;
- if (!tx_num_desc)
+ if (!p_ll2_info->input.tx_num_desc)
goto out;
rc = qed_chain_alloc(p_hwfn->cdev,
QED_CHAIN_USE_TO_CONSUME_PRODUCE,
QED_CHAIN_MODE_PBL,
QED_CHAIN_CNT_TYPE_U16,
- tx_num_desc,
+ p_ll2_info->input.tx_num_desc,
sizeof(struct core_tx_bd),
- &p_ll2_info->tx_queue.txq_chain);
+ &p_ll2_info->tx_queue.txq_chain, NULL);
if (rc)
goto out;
@@ -1295,28 +1123,112 @@
DP_VERBOSE(p_hwfn, QED_MSG_LL2,
"Allocated LL2 Txq [Type %08x] with 0x%08x buffers\n",
- p_ll2_info->conn.conn_type, tx_num_desc);
+ p_ll2_info->input.conn_type, p_ll2_info->input.tx_num_desc);
out:
if (rc)
DP_NOTICE(p_hwfn,
"Can't allocate memory for Tx LL2 with 0x%08x buffers\n",
- tx_num_desc);
+ p_ll2_info->input.tx_num_desc);
return rc;
}
-int qed_ll2_acquire_connection(struct qed_hwfn *p_hwfn,
- struct qed_ll2_conn *p_params,
- u16 rx_num_desc,
- u16 tx_num_desc,
- u8 *p_connection_handle)
+static int
+qed_ll2_acquire_connection_ooo(struct qed_hwfn *p_hwfn,
+ struct qed_ll2_info *p_ll2_info, u16 mtu)
{
+ struct qed_ooo_buffer *p_buf = NULL;
+ void *p_virt;
+ u16 buf_idx;
+ int rc = 0;
+
+ if (p_ll2_info->input.conn_type != QED_LL2_TYPE_ISCSI_OOO)
+ return rc;
+
+ /* Correct number of requested OOO buffers if needed */
+ if (!p_ll2_info->input.rx_num_ooo_buffers) {
+ u16 num_desc = p_ll2_info->input.rx_num_desc;
+
+ if (!num_desc)
+ return -EINVAL;
+ p_ll2_info->input.rx_num_ooo_buffers = num_desc * 2;
+ }
+
+ for (buf_idx = 0; buf_idx < p_ll2_info->input.rx_num_ooo_buffers;
+ buf_idx++) {
+ p_buf = kzalloc(sizeof(*p_buf), GFP_KERNEL);
+ if (!p_buf) {
+ rc = -ENOMEM;
+ goto out;
+ }
+
+ p_buf->rx_buffer_size = mtu + 26 + ETH_CACHE_LINE_SIZE;
+ p_buf->rx_buffer_size = (p_buf->rx_buffer_size +
+ ETH_CACHE_LINE_SIZE - 1) &
+ ~(ETH_CACHE_LINE_SIZE - 1);
+ p_virt = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
+ p_buf->rx_buffer_size,
+ &p_buf->rx_buffer_phys_addr,
+ GFP_KERNEL);
+ if (!p_virt) {
+ kfree(p_buf);
+ rc = -ENOMEM;
+ goto out;
+ }
+
+ p_buf->rx_buffer_virt_addr = p_virt;
+ qed_ooo_put_free_buffer(p_hwfn, p_hwfn->p_ooo_info, p_buf);
+ }
+
+ DP_VERBOSE(p_hwfn, QED_MSG_LL2,
+ "Allocated [%04x] LL2 OOO buffers [each of size 0x%08x]\n",
+ p_ll2_info->input.rx_num_ooo_buffers, p_buf->rx_buffer_size);
+
+out:
+ return rc;
+}
+
+static int
+qed_ll2_set_cbs(struct qed_ll2_info *p_ll2_info, const struct qed_ll2_cbs *cbs)
+{
+ if (!cbs || (!cbs->rx_comp_cb ||
+ !cbs->rx_release_cb ||
+ !cbs->tx_comp_cb || !cbs->tx_release_cb || !cbs->cookie))
+ return -EINVAL;
+
+ p_ll2_info->cbs.rx_comp_cb = cbs->rx_comp_cb;
+ p_ll2_info->cbs.rx_release_cb = cbs->rx_release_cb;
+ p_ll2_info->cbs.tx_comp_cb = cbs->tx_comp_cb;
+ p_ll2_info->cbs.tx_release_cb = cbs->tx_release_cb;
+ p_ll2_info->cbs.cookie = cbs->cookie;
+
+ return 0;
+}
+
+static enum core_error_handle
+qed_ll2_get_error_choice(enum qed_ll2_error_handle err)
+{
+ switch (err) {
+ case QED_LL2_DROP_PACKET:
+ return LL2_DROP_PACKET;
+ case QED_LL2_DO_NOTHING:
+ return LL2_DO_NOTHING;
+ case QED_LL2_ASSERT:
+ return LL2_ASSERT;
+ default:
+ return LL2_DO_NOTHING;
+ }
+}
+
+int qed_ll2_acquire_connection(void *cxt, struct qed_ll2_acquire_data *data)
+{
+ struct qed_hwfn *p_hwfn = cxt;
qed_int_comp_cb_t comp_rx_cb, comp_tx_cb;
struct qed_ll2_info *p_ll2_info = NULL;
+ u8 i, *p_tx_max;
int rc;
- u8 i;
- if (!p_connection_handle || !p_hwfn->p_ll2_info)
+ if (!data->p_connection_handle || !p_hwfn->p_ll2_info)
return -EINVAL;
/* Find a free connection to be used */
@@ -1335,23 +1247,40 @@
if (!p_ll2_info)
return -EBUSY;
- p_ll2_info->conn = *p_params;
+ memcpy(&p_ll2_info->input, &data->input, sizeof(p_ll2_info->input));
- rc = qed_ll2_acquire_connection_rx(p_hwfn, p_ll2_info, rx_num_desc);
+ p_ll2_info->tx_dest = (data->input.tx_dest == QED_LL2_TX_DEST_NW) ?
+ CORE_TX_DEST_NW : CORE_TX_DEST_LB;
+
+ /* Correct maximum number of Tx BDs */
+ p_tx_max = &p_ll2_info->input.tx_max_bds_per_packet;
+ if (*p_tx_max == 0)
+ *p_tx_max = CORE_LL2_TX_MAX_BDS_PER_PACKET;
+ else
+ *p_tx_max = min_t(u8, *p_tx_max,
+ CORE_LL2_TX_MAX_BDS_PER_PACKET);
+
+ rc = qed_ll2_set_cbs(p_ll2_info, data->cbs);
+ if (rc) {
+ DP_NOTICE(p_hwfn, "Invalid callback functions\n");
+ goto q_allocate_fail;
+ }
+
+ rc = qed_ll2_acquire_connection_rx(p_hwfn, p_ll2_info);
if (rc)
goto q_allocate_fail;
- rc = qed_ll2_acquire_connection_tx(p_hwfn, p_ll2_info, tx_num_desc);
+ rc = qed_ll2_acquire_connection_tx(p_hwfn, p_ll2_info);
if (rc)
goto q_allocate_fail;
rc = qed_ll2_acquire_connection_ooo(p_hwfn, p_ll2_info,
- rx_num_desc * 2, p_params->mtu);
+ data->input.mtu);
if (rc)
goto q_allocate_fail;
/* Register callbacks for the Rx/Tx queues */
- if (p_params->conn_type == QED_LL2_TYPE_ISCSI_OOO) {
+ if (data->input.conn_type == QED_LL2_TYPE_ISCSI_OOO) {
comp_rx_cb = qed_ll2_lb_rxq_completion;
comp_tx_cb = qed_ll2_lb_txq_completion;
} else {
@@ -1359,7 +1288,7 @@
comp_tx_cb = qed_ll2_txq_completion;
}
- if (rx_num_desc) {
+ if (data->input.rx_num_desc) {
qed_int_register_cb(p_hwfn, comp_rx_cb,
&p_hwfn->p_ll2_info[i],
&p_ll2_info->rx_queue.rx_sb_index,
@@ -1367,7 +1296,7 @@
p_ll2_info->rx_queue.b_cb_registred = true;
}
- if (tx_num_desc) {
+ if (data->input.tx_num_desc) {
qed_int_register_cb(p_hwfn,
comp_tx_cb,
&p_hwfn->p_ll2_info[i],
@@ -1376,7 +1305,7 @@
p_ll2_info->tx_queue.b_cb_registred = true;
}
- *p_connection_handle = i;
+ *data->p_connection_handle = i;
return rc;
q_allocate_fail:
@@ -1387,24 +1316,39 @@
static int qed_ll2_establish_connection_rx(struct qed_hwfn *p_hwfn,
struct qed_ll2_info *p_ll2_conn)
{
+ enum qed_ll2_error_handle error_input;
+ enum core_error_handle error_mode;
u8 action_on_error = 0;
if (!QED_LL2_RX_REGISTERED(p_ll2_conn))
return 0;
DIRECT_REG_WR(p_ll2_conn->rx_queue.set_prod_addr, 0x0);
-
+ error_input = p_ll2_conn->input.ai_err_packet_too_big;
+ error_mode = qed_ll2_get_error_choice(error_input);
SET_FIELD(action_on_error,
- CORE_RX_ACTION_ON_ERROR_PACKET_TOO_BIG,
- p_ll2_conn->conn.ai_err_packet_too_big);
- SET_FIELD(action_on_error,
- CORE_RX_ACTION_ON_ERROR_NO_BUFF, p_ll2_conn->conn.ai_err_no_buf);
+ CORE_RX_ACTION_ON_ERROR_PACKET_TOO_BIG, error_mode);
+ error_input = p_ll2_conn->input.ai_err_no_buf;
+ error_mode = qed_ll2_get_error_choice(error_input);
+ SET_FIELD(action_on_error, CORE_RX_ACTION_ON_ERROR_NO_BUFF, error_mode);
return qed_sp_ll2_rx_queue_start(p_hwfn, p_ll2_conn, action_on_error);
}
-int qed_ll2_establish_connection(struct qed_hwfn *p_hwfn, u8 connection_handle)
+static void
+qed_ll2_establish_connection_ooo(struct qed_hwfn *p_hwfn,
+ struct qed_ll2_info *p_ll2_conn)
{
+ if (p_ll2_conn->input.conn_type != QED_LL2_TYPE_ISCSI_OOO)
+ return;
+
+ qed_ooo_release_all_isles(p_hwfn, p_hwfn->p_ooo_info);
+ qed_ooo_submit_rx_buffers(p_hwfn, p_ll2_conn);
+}
+
+int qed_ll2_establish_connection(void *cxt, u8 connection_handle)
+{
+ struct qed_hwfn *p_hwfn = cxt;
struct qed_ll2_info *p_ll2_conn;
struct qed_ll2_rx_queue *p_rx;
struct qed_ll2_tx_queue *p_tx;
@@ -1482,7 +1426,7 @@
qed_ll2_establish_connection_ooo(p_hwfn, p_ll2_conn);
- if (p_ll2_conn->conn.conn_type == QED_LL2_TYPE_FCOE) {
+ if (p_ll2_conn->input.conn_type == QED_LL2_TYPE_FCOE) {
qed_llh_add_protocol_filter(p_hwfn, p_ptt,
0x8906, 0,
QED_LLH_FILTER_ETHERTYPE);
@@ -1531,11 +1475,12 @@
DIRECT_REG_WR(p_rx->set_prod_addr, *((u32 *)&rx_prod));
}
-int qed_ll2_post_rx_buffer(struct qed_hwfn *p_hwfn,
+int qed_ll2_post_rx_buffer(void *cxt,
u8 connection_handle,
dma_addr_t addr,
u16 buf_len, void *cookie, u8 notify_fw)
{
+ struct qed_hwfn *p_hwfn = cxt;
struct core_rx_bd_with_buff_len *p_curb = NULL;
struct qed_ll2_rx_packet *p_curp = NULL;
struct qed_ll2_info *p_ll2_conn;
@@ -1594,20 +1539,18 @@
static void qed_ll2_prepare_tx_packet_set(struct qed_hwfn *p_hwfn,
struct qed_ll2_tx_queue *p_tx,
struct qed_ll2_tx_packet *p_curp,
- u8 num_of_bds,
- dma_addr_t first_frag,
- u16 first_frag_len, void *p_cookie,
+ struct qed_ll2_tx_pkt_info *pkt,
u8 notify_fw)
{
list_del(&p_curp->list_entry);
- p_curp->cookie = p_cookie;
- p_curp->bd_used = num_of_bds;
+ p_curp->cookie = pkt->cookie;
+ p_curp->bd_used = pkt->num_of_bds;
p_curp->notify_fw = notify_fw;
p_tx->cur_send_packet = p_curp;
p_tx->cur_send_frag_num = 0;
- p_curp->bds_set[p_tx->cur_send_frag_num].tx_frag = first_frag;
- p_curp->bds_set[p_tx->cur_send_frag_num].frag_len = first_frag_len;
+ p_curp->bds_set[p_tx->cur_send_frag_num].tx_frag = pkt->first_frag;
+ p_curp->bds_set[p_tx->cur_send_frag_num].frag_len = pkt->first_frag_len;
p_tx->cur_send_frag_num++;
}
@@ -1615,51 +1558,52 @@
qed_ll2_prepare_tx_packet_set_bd(struct qed_hwfn *p_hwfn,
struct qed_ll2_info *p_ll2,
struct qed_ll2_tx_packet *p_curp,
- u8 num_of_bds,
- enum core_tx_dest tx_dest,
- u16 vlan,
- u8 bd_flags,
- u16 l4_hdr_offset_w,
- enum core_roce_flavor_type roce_flavor,
- dma_addr_t first_frag,
- u16 first_frag_len)
+ struct qed_ll2_tx_pkt_info *pkt)
{
struct qed_chain *p_tx_chain = &p_ll2->tx_queue.txq_chain;
u16 prod_idx = qed_chain_get_prod_idx(p_tx_chain);
struct core_tx_bd *start_bd = NULL;
+ enum core_roce_flavor_type roce_flavor;
+ enum core_tx_dest tx_dest;
u16 bd_data = 0, frag_idx;
+ roce_flavor = (pkt->qed_roce_flavor == QED_LL2_ROCE) ? CORE_ROCE
+ : CORE_RROCE;
+
+ tx_dest = (pkt->tx_dest == QED_LL2_TX_DEST_NW) ? CORE_TX_DEST_NW
+ : CORE_TX_DEST_LB;
+
start_bd = (struct core_tx_bd *)qed_chain_produce(p_tx_chain);
- start_bd->nw_vlan_or_lb_echo = cpu_to_le16(vlan);
+ start_bd->nw_vlan_or_lb_echo = cpu_to_le16(pkt->vlan);
SET_FIELD(start_bd->bitfield1, CORE_TX_BD_L4_HDR_OFFSET_W,
- cpu_to_le16(l4_hdr_offset_w));
+ cpu_to_le16(pkt->l4_hdr_offset_w));
SET_FIELD(start_bd->bitfield1, CORE_TX_BD_TX_DST, tx_dest);
- bd_data |= bd_flags;
+ bd_data |= pkt->bd_flags;
SET_FIELD(bd_data, CORE_TX_BD_DATA_START_BD, 0x1);
- SET_FIELD(bd_data, CORE_TX_BD_DATA_NBDS, num_of_bds);
+ SET_FIELD(bd_data, CORE_TX_BD_DATA_NBDS, pkt->num_of_bds);
SET_FIELD(bd_data, CORE_TX_BD_DATA_ROCE_FLAV, roce_flavor);
start_bd->bd_data.as_bitfield = cpu_to_le16(bd_data);
- DMA_REGPAIR_LE(start_bd->addr, first_frag);
- start_bd->nbytes = cpu_to_le16(first_frag_len);
+ DMA_REGPAIR_LE(start_bd->addr, pkt->first_frag);
+ start_bd->nbytes = cpu_to_le16(pkt->first_frag_len);
DP_VERBOSE(p_hwfn,
(NETIF_MSG_TX_QUEUED | QED_MSG_LL2),
"LL2 [q 0x%02x cid 0x%08x type 0x%08x] Tx Producer at [0x%04x] - set with a %04x bytes %02x BDs buffer at %08x:%08x\n",
p_ll2->queue_id,
p_ll2->cid,
- p_ll2->conn.conn_type,
+ p_ll2->input.conn_type,
prod_idx,
- first_frag_len,
- num_of_bds,
+ pkt->first_frag_len,
+ pkt->num_of_bds,
le32_to_cpu(start_bd->addr.hi),
le32_to_cpu(start_bd->addr.lo));
- if (p_ll2->tx_queue.cur_send_frag_num == num_of_bds)
+ if (p_ll2->tx_queue.cur_send_frag_num == pkt->num_of_bds)
return;
/* Need to provide the packet with additional BDs for frags */
for (frag_idx = p_ll2->tx_queue.cur_send_frag_num;
- frag_idx < num_of_bds; frag_idx++) {
+ frag_idx < pkt->num_of_bds; frag_idx++) {
struct core_tx_bd **p_bd = &p_curp->bds_set[frag_idx].txq_bd;
*p_bd = (struct core_tx_bd *)qed_chain_produce(p_tx_chain);
@@ -1722,26 +1666,20 @@
(NETIF_MSG_TX_QUEUED | QED_MSG_LL2),
"LL2 [q 0x%02x cid 0x%08x type 0x%08x] Doorbelled [producer 0x%04x]\n",
p_ll2_conn->queue_id,
- p_ll2_conn->cid, p_ll2_conn->conn.conn_type, db_msg.spq_prod);
+ p_ll2_conn->cid,
+ p_ll2_conn->input.conn_type, db_msg.spq_prod);
}
-int qed_ll2_prepare_tx_packet(struct qed_hwfn *p_hwfn,
+int qed_ll2_prepare_tx_packet(void *cxt,
u8 connection_handle,
- u8 num_of_bds,
- u16 vlan,
- u8 bd_flags,
- u16 l4_hdr_offset_w,
- enum qed_ll2_tx_dest e_tx_dest,
- enum qed_ll2_roce_flavor_type qed_roce_flavor,
- dma_addr_t first_frag,
- u16 first_frag_len, void *cookie, u8 notify_fw)
+ struct qed_ll2_tx_pkt_info *pkt,
+ bool notify_fw)
{
+ struct qed_hwfn *p_hwfn = cxt;
struct qed_ll2_tx_packet *p_curp = NULL;
struct qed_ll2_info *p_ll2_conn = NULL;
- enum core_roce_flavor_type roce_flavor;
struct qed_ll2_tx_queue *p_tx;
struct qed_chain *p_tx_chain;
- enum core_tx_dest tx_dest;
unsigned long flags;
int rc = 0;
@@ -1751,7 +1689,7 @@
p_tx = &p_ll2_conn->tx_queue;
p_tx_chain = &p_tx->txq_chain;
- if (num_of_bds > CORE_LL2_TX_MAX_BDS_PER_PACKET)
+ if (pkt->num_of_bds > CORE_LL2_TX_MAX_BDS_PER_PACKET)
return -EIO;
spin_lock_irqsave(&p_tx->lock, flags);
@@ -1764,7 +1702,7 @@
if (!list_empty(&p_tx->free_descq))
p_curp = list_first_entry(&p_tx->free_descq,
struct qed_ll2_tx_packet, list_entry);
- if (p_curp && qed_chain_get_elem_left(p_tx_chain) < num_of_bds)
+ if (p_curp && qed_chain_get_elem_left(p_tx_chain) < pkt->num_of_bds)
p_curp = NULL;
if (!p_curp) {
@@ -1772,26 +1710,10 @@
goto out;
}
- tx_dest = e_tx_dest == QED_LL2_TX_DEST_NW ? CORE_TX_DEST_NW :
- CORE_TX_DEST_LB;
- if (qed_roce_flavor == QED_LL2_ROCE) {
- roce_flavor = CORE_ROCE;
- } else if (qed_roce_flavor == QED_LL2_RROCE) {
- roce_flavor = CORE_RROCE;
- } else {
- rc = -EINVAL;
- goto out;
- }
-
/* Prepare packet and BD, and perhaps send a doorbell to FW */
- qed_ll2_prepare_tx_packet_set(p_hwfn, p_tx, p_curp,
- num_of_bds, first_frag,
- first_frag_len, cookie, notify_fw);
- qed_ll2_prepare_tx_packet_set_bd(p_hwfn, p_ll2_conn, p_curp,
- num_of_bds, tx_dest,
- vlan, bd_flags, l4_hdr_offset_w,
- roce_flavor,
- first_frag, first_frag_len);
+ qed_ll2_prepare_tx_packet_set(p_hwfn, p_tx, p_curp, pkt, notify_fw);
+
+ qed_ll2_prepare_tx_packet_set_bd(p_hwfn, p_ll2_conn, p_curp, pkt);
qed_ll2_tx_packet_notify(p_hwfn, p_ll2_conn);
@@ -1800,11 +1722,12 @@
return rc;
}
-int qed_ll2_set_fragment_of_tx_packet(struct qed_hwfn *p_hwfn,
+int qed_ll2_set_fragment_of_tx_packet(void *cxt,
u8 connection_handle,
dma_addr_t addr, u16 nbytes)
{
struct qed_ll2_tx_packet *p_cur_send_packet = NULL;
+ struct qed_hwfn *p_hwfn = cxt;
struct qed_ll2_info *p_ll2_conn = NULL;
u16 cur_send_frag_num = 0;
struct core_tx_bd *p_bd;
@@ -1839,8 +1762,9 @@
return 0;
}
-int qed_ll2_terminate_connection(struct qed_hwfn *p_hwfn, u8 connection_handle)
+int qed_ll2_terminate_connection(void *cxt, u8 connection_handle)
{
+ struct qed_hwfn *p_hwfn = cxt;
struct qed_ll2_info *p_ll2_conn = NULL;
int rc = -EINVAL;
struct qed_ptt *p_ptt;
@@ -1870,10 +1794,10 @@
qed_ll2_rxq_flush(p_hwfn, connection_handle);
}
- if (p_ll2_conn->conn.conn_type == QED_LL2_TYPE_ISCSI_OOO)
+ if (p_ll2_conn->input.conn_type == QED_LL2_TYPE_ISCSI_OOO)
qed_ooo_release_all_isles(p_hwfn, p_hwfn->p_ooo_info);
- if (p_ll2_conn->conn.conn_type == QED_LL2_TYPE_FCOE) {
+ if (p_ll2_conn->input.conn_type == QED_LL2_TYPE_FCOE) {
qed_llh_remove_protocol_filter(p_hwfn, p_ptt,
0x8906, 0,
QED_LLH_FILTER_ETHERTYPE);
@@ -1887,8 +1811,28 @@
return rc;
}
-void qed_ll2_release_connection(struct qed_hwfn *p_hwfn, u8 connection_handle)
+static void qed_ll2_release_connection_ooo(struct qed_hwfn *p_hwfn,
+ struct qed_ll2_info *p_ll2_conn)
{
+ struct qed_ooo_buffer *p_buffer;
+
+ if (p_ll2_conn->input.conn_type != QED_LL2_TYPE_ISCSI_OOO)
+ return;
+
+ qed_ooo_release_all_isles(p_hwfn, p_hwfn->p_ooo_info);
+ while ((p_buffer = qed_ooo_get_free_buffer(p_hwfn,
+ p_hwfn->p_ooo_info))) {
+ dma_free_coherent(&p_hwfn->cdev->pdev->dev,
+ p_buffer->rx_buffer_size,
+ p_buffer->rx_buffer_virt_addr,
+ p_buffer->rx_buffer_phys_addr);
+ kfree(p_buffer);
+ }
+}
+
+void qed_ll2_release_connection(void *cxt, u8 connection_handle)
+{
+ struct qed_hwfn *p_hwfn = cxt;
struct qed_ll2_info *p_ll2_conn = NULL;
p_ll2_conn = qed_ll2_handle_sanity(p_hwfn, connection_handle);
@@ -1921,7 +1865,7 @@
mutex_unlock(&p_ll2_conn->mutex);
}
-struct qed_ll2_info *qed_ll2_alloc(struct qed_hwfn *p_hwfn)
+int qed_ll2_alloc(struct qed_hwfn *p_hwfn)
{
struct qed_ll2_info *p_ll2_connections;
u8 i;
@@ -1931,28 +1875,52 @@
sizeof(struct qed_ll2_info), GFP_KERNEL);
if (!p_ll2_connections) {
DP_NOTICE(p_hwfn, "Failed to allocate `struct qed_ll2'\n");
- return NULL;
+ return -ENOMEM;
}
for (i = 0; i < QED_MAX_NUM_OF_LL2_CONNECTIONS; i++)
p_ll2_connections[i].my_id = i;
- return p_ll2_connections;
+ p_hwfn->p_ll2_info = p_ll2_connections;
+ return 0;
}
-void qed_ll2_setup(struct qed_hwfn *p_hwfn,
- struct qed_ll2_info *p_ll2_connections)
+void qed_ll2_setup(struct qed_hwfn *p_hwfn)
{
int i;
for (i = 0; i < QED_MAX_NUM_OF_LL2_CONNECTIONS; i++)
- mutex_init(&p_ll2_connections[i].mutex);
+ mutex_init(&p_hwfn->p_ll2_info[i].mutex);
}
-void qed_ll2_free(struct qed_hwfn *p_hwfn,
- struct qed_ll2_info *p_ll2_connections)
+void qed_ll2_free(struct qed_hwfn *p_hwfn)
{
- kfree(p_ll2_connections);
+ if (!p_hwfn->p_ll2_info)
+ return;
+
+ kfree(p_hwfn->p_ll2_info);
+ p_hwfn->p_ll2_info = NULL;
+}
+
+static void _qed_ll2_get_port_stats(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ struct qed_ll2_stats *p_stats)
+{
+ struct core_ll2_port_stats port_stats;
+
+ memset(&port_stats, 0, sizeof(port_stats));
+ qed_memcpy_from(p_hwfn, p_ptt, &port_stats,
+ BAR0_MAP_REG_TSDM_RAM +
+ TSTORM_LL2_PORT_STAT_OFFSET(MFW_PORT(p_hwfn)),
+ sizeof(port_stats));
+
+ p_stats->gsi_invalid_hdr = HILO_64_REGPAIR(port_stats.gsi_invalid_hdr);
+ p_stats->gsi_invalid_pkt_length =
+ HILO_64_REGPAIR(port_stats.gsi_invalid_pkt_length);
+ p_stats->gsi_unsupported_pkt_typ =
+ HILO_64_REGPAIR(port_stats.gsi_unsupported_pkt_typ);
+ p_stats->gsi_crcchksm_error =
+ HILO_64_REGPAIR(port_stats.gsi_crcchksm_error);
}
static void _qed_ll2_get_tstats(struct qed_hwfn *p_hwfn,
@@ -2018,9 +1986,10 @@
p_stats->sent_bcast_pkts = HILO_64_REGPAIR(pstats.sent_bcast_pkts);
}
-int qed_ll2_get_stats(struct qed_hwfn *p_hwfn,
+int qed_ll2_get_stats(void *cxt,
u8 connection_handle, struct qed_ll2_stats *p_stats)
{
+ struct qed_hwfn *p_hwfn = cxt;
struct qed_ll2_info *p_ll2_conn = NULL;
struct qed_ptt *p_ptt;
@@ -2038,6 +2007,8 @@
return -EINVAL;
}
+ if (p_ll2_conn->input.gsi_enable)
+ _qed_ll2_get_port_stats(p_hwfn, p_ptt, p_stats);
_qed_ll2_get_tstats(p_hwfn, p_ptt, p_ll2_conn, p_stats);
_qed_ll2_get_ustats(p_hwfn, p_ptt, p_ll2_conn, p_stats);
if (p_ll2_conn->tx_stats_en)
@@ -2047,6 +2018,17 @@
return 0;
}
+static void qed_ll2b_release_rx_packet(void *cxt,
+ u8 connection_handle,
+ void *cookie,
+ dma_addr_t rx_buf_addr,
+ bool b_last_packet)
+{
+ struct qed_hwfn *p_hwfn = cxt;
+
+ qed_ll2_dealloc_buffer(p_hwfn->cdev, cookie);
+}
+
static void qed_ll2_register_cb_ops(struct qed_dev *cdev,
const struct qed_ll2_cb_ops *ops,
void *cookie)
@@ -2055,21 +2037,86 @@
cdev->ll2->cb_cookie = cookie;
}
+struct qed_ll2_cbs ll2_cbs = {
+ .rx_comp_cb = &qed_ll2b_complete_rx_packet,
+ .rx_release_cb = &qed_ll2b_release_rx_packet,
+ .tx_comp_cb = &qed_ll2b_complete_tx_packet,
+ .tx_release_cb = &qed_ll2b_complete_tx_packet,
+};
+
+static void qed_ll2_set_conn_data(struct qed_dev *cdev,
+ struct qed_ll2_acquire_data *data,
+ struct qed_ll2_params *params,
+ enum qed_ll2_conn_type conn_type,
+ u8 *handle, bool lb)
+{
+ memset(data, 0, sizeof(*data));
+
+ data->input.conn_type = conn_type;
+ data->input.mtu = params->mtu;
+ data->input.rx_num_desc = QED_LL2_RX_SIZE;
+ data->input.rx_drop_ttl0_flg = params->drop_ttl0_packets;
+ data->input.rx_vlan_removal_en = params->rx_vlan_stripping;
+ data->input.tx_num_desc = QED_LL2_TX_SIZE;
+ data->p_connection_handle = handle;
+ data->cbs = &ll2_cbs;
+ ll2_cbs.cookie = QED_LEADING_HWFN(cdev);
+
+ if (lb) {
+ data->input.tx_tc = OOO_LB_TC;
+ data->input.tx_dest = QED_LL2_TX_DEST_LB;
+ } else {
+ data->input.tx_tc = 0;
+ data->input.tx_dest = QED_LL2_TX_DEST_NW;
+ }
+}
+
+static int qed_ll2_start_ooo(struct qed_dev *cdev,
+ struct qed_ll2_params *params)
+{
+ struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
+ u8 *handle = &hwfn->pf_params.iscsi_pf_params.ll2_ooo_queue_id;
+ struct qed_ll2_acquire_data data;
+ int rc;
+
+ qed_ll2_set_conn_data(cdev, &data, params,
+ QED_LL2_TYPE_ISCSI_OOO, handle, true);
+
+ rc = qed_ll2_acquire_connection(hwfn, &data);
+ if (rc) {
+ DP_INFO(cdev, "Failed to acquire LL2 OOO connection\n");
+ goto out;
+ }
+
+ rc = qed_ll2_establish_connection(hwfn, *handle);
+ if (rc) {
+ DP_INFO(cdev, "Failed to establist LL2 OOO connection\n");
+ goto fail;
+ }
+
+ return 0;
+
+fail:
+ qed_ll2_release_connection(hwfn, *handle);
+out:
+ *handle = QED_LL2_UNUSED_HANDLE;
+ return rc;
+}
+
static int qed_ll2_start(struct qed_dev *cdev, struct qed_ll2_params *params)
{
- struct qed_ll2_conn ll2_info;
struct qed_ll2_buffer *buffer, *tmp_buffer;
enum qed_ll2_conn_type conn_type;
+ struct qed_ll2_acquire_data data;
struct qed_ptt *p_ptt;
int rc, i;
- u8 gsi_enable = 1;
+
/* Initialize LL2 locks & lists */
INIT_LIST_HEAD(&cdev->ll2->list);
spin_lock_init(&cdev->ll2->lock);
cdev->ll2->rx_size = NET_SKB_PAD + ETH_HLEN +
L1_CACHE_BYTES + params->mtu;
- cdev->ll2->frags_mapped = params->frags_mapped;
/*Allocate memory for LL2 */
DP_INFO(cdev, "Allocating LL2 buffers of size %08x bytes\n",
@@ -2094,11 +2141,9 @@
switch (QED_LEADING_HWFN(cdev)->hw_info.personality) {
case QED_PCI_FCOE:
conn_type = QED_LL2_TYPE_FCOE;
- gsi_enable = 0;
break;
case QED_PCI_ISCSI:
conn_type = QED_LL2_TYPE_ISCSI;
- gsi_enable = 0;
break;
case QED_PCI_ETH_ROCE:
conn_type = QED_LL2_TYPE_ROCE;
@@ -2107,20 +2152,10 @@
conn_type = QED_LL2_TYPE_TEST;
}
- /* Prepare the temporary ll2 information */
- memset(&ll2_info, 0, sizeof(ll2_info));
+ qed_ll2_set_conn_data(cdev, &data, params, conn_type,
+ &cdev->ll2->handle, false);
- ll2_info.conn_type = conn_type;
- ll2_info.mtu = params->mtu;
- ll2_info.rx_drop_ttl0_flg = params->drop_ttl0_packets;
- ll2_info.rx_vlan_removal_en = params->rx_vlan_stripping;
- ll2_info.tx_tc = 0;
- ll2_info.tx_dest = CORE_TX_DEST_NW;
- ll2_info.gsi_enable = gsi_enable;
-
- rc = qed_ll2_acquire_connection(QED_LEADING_HWFN(cdev), &ll2_info,
- QED_LL2_RX_SIZE, QED_LL2_TX_SIZE,
- &cdev->ll2->handle);
+ rc = qed_ll2_acquire_connection(QED_LEADING_HWFN(cdev), &data);
if (rc) {
DP_INFO(cdev, "Failed to acquire LL2 connection\n");
goto fail;
@@ -2243,6 +2278,7 @@
static int qed_ll2_start_xmit(struct qed_dev *cdev, struct sk_buff *skb)
{
+ struct qed_ll2_tx_pkt_info pkt;
const skb_frag_t *frag;
int rc = -EINVAL, i;
dma_addr_t mapping;
@@ -2277,32 +2313,30 @@
flags |= BIT(CORE_TX_BD_DATA_VLAN_INSERTION_SHIFT);
}
- rc = qed_ll2_prepare_tx_packet(QED_LEADING_HWFN(cdev),
- cdev->ll2->handle,
- 1 + skb_shinfo(skb)->nr_frags,
- vlan, flags, 0, QED_LL2_TX_DEST_NW,
- 0 /* RoCE FLAVOR */,
- mapping, skb->len, skb, 1);
+ memset(&pkt, 0, sizeof(pkt));
+ pkt.num_of_bds = 1 + skb_shinfo(skb)->nr_frags;
+ pkt.vlan = vlan;
+ pkt.bd_flags = flags;
+ pkt.tx_dest = QED_LL2_TX_DEST_NW;
+ pkt.first_frag = mapping;
+ pkt.first_frag_len = skb->len;
+ pkt.cookie = skb;
+
+ rc = qed_ll2_prepare_tx_packet(&cdev->hwfns[0], cdev->ll2->handle,
+ &pkt, 1);
if (rc)
goto err;
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
frag = &skb_shinfo(skb)->frags[i];
- if (!cdev->ll2->frags_mapped) {
- mapping = skb_frag_dma_map(&cdev->pdev->dev, frag, 0,
- skb_frag_size(frag),
- DMA_TO_DEVICE);
- if (unlikely(dma_mapping_error(&cdev->pdev->dev,
- mapping))) {
- DP_NOTICE(cdev,
- "Unable to map frag - dropping packet\n");
- rc = -ENOMEM;
- goto err;
- }
- } else {
- mapping = page_to_phys(skb_frag_page(frag)) |
- frag->page_offset;
+ mapping = skb_frag_dma_map(&cdev->pdev->dev, frag, 0,
+ skb_frag_size(frag), DMA_TO_DEVICE);
+
+ if (unlikely(dma_mapping_error(&cdev->pdev->dev, mapping))) {
+ DP_NOTICE(cdev,
+ "Unable to map frag - dropping packet\n");
+ goto err;
}
rc = qed_ll2_set_fragment_of_tx_packet(QED_LEADING_HWFN(cdev),
diff --git a/drivers/net/ethernet/qlogic/qed/qed_ll2.h b/drivers/net/ethernet/qlogic/qed/qed_ll2.h
index 31a4090..a822528 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_ll2.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_ll2.h
@@ -47,29 +47,6 @@
#define QED_MAX_NUM_OF_LL2_CONNECTIONS (4)
-enum qed_ll2_roce_flavor_type {
- QED_LL2_ROCE,
- QED_LL2_RROCE,
- MAX_QED_LL2_ROCE_FLAVOR_TYPE
-};
-
-enum qed_ll2_conn_type {
- QED_LL2_TYPE_FCOE,
- QED_LL2_TYPE_ISCSI,
- QED_LL2_TYPE_TEST,
- QED_LL2_TYPE_ISCSI_OOO,
- QED_LL2_TYPE_RESERVED2,
- QED_LL2_TYPE_ROCE,
- QED_LL2_TYPE_RESERVED3,
- MAX_QED_LL2_RX_CONN_TYPE
-};
-
-enum qed_ll2_tx_dest {
- QED_LL2_TX_DEST_NW, /* Light L2 TX Destination to the Network */
- QED_LL2_TX_DEST_LB, /* Light L2 TX Destination to the Loopback */
- QED_LL2_TX_DEST_MAX
-};
-
struct qed_ll2_rx_packet {
struct list_head list_entry;
struct core_rx_bd_with_buff_len *rxq_bd;
@@ -135,30 +112,21 @@
bool b_completing_packet;
};
-struct qed_ll2_conn {
- enum qed_ll2_conn_type conn_type;
- u16 mtu;
- u8 rx_drop_ttl0_flg;
- u8 rx_vlan_removal_en;
- u8 tx_tc;
- enum core_tx_dest tx_dest;
- enum core_error_handle ai_err_packet_too_big;
- enum core_error_handle ai_err_no_buf;
- u8 gsi_enable;
-};
-
struct qed_ll2_info {
/* Lock protecting the state of LL2 */
struct mutex mutex;
- struct qed_ll2_conn conn;
+
+ struct qed_ll2_acquire_data_inputs input;
u32 cid;
u8 my_id;
u8 queue_id;
u8 tx_stats_id;
bool b_active;
+ enum core_tx_dest tx_dest;
u8 tx_stats_en;
struct qed_ll2_rx_queue rx_queue;
struct qed_ll2_tx_queue tx_queue;
+ struct qed_ll2_cbs cbs;
};
/**
@@ -166,38 +134,30 @@
* starts rx & tx (if relevant) queues pair. Provides
* connecion handler as output parameter.
*
- * @param p_hwfn
- * @param p_params Contain various configuration properties
- * @param rx_num_desc
- * @param tx_num_desc
*
- * @param p_connection_handle Output container for LL2 connection's handle
- *
- * @return 0 on success, failure otherwise
+ * @param cxt - pointer to the hw-function [opaque to some]
+ * @param data - describes connection parameters
+ * @return int
*/
-int qed_ll2_acquire_connection(struct qed_hwfn *p_hwfn,
- struct qed_ll2_conn *p_params,
- u16 rx_num_desc,
- u16 tx_num_desc,
- u8 *p_connection_handle);
+int qed_ll2_acquire_connection(void *cxt, struct qed_ll2_acquire_data *data);
/**
* @brief qed_ll2_establish_connection - start previously
* allocated LL2 queues pair
*
- * @param p_hwfn
+ * @param cxt - pointer to the hw-function [opaque to some]
* @param p_ptt
* @param connection_handle LL2 connection's handle obtained from
* qed_ll2_require_connection
*
* @return 0 on success, failure otherwise
*/
-int qed_ll2_establish_connection(struct qed_hwfn *p_hwfn, u8 connection_handle);
+int qed_ll2_establish_connection(void *cxt, u8 connection_handle);
/**
* @brief qed_ll2_post_rx_buffers - submit buffers to LL2 Rx queue.
*
- * @param p_hwfn
+ * @param cxt - pointer to the hw-function [opaque to some]
* @param connection_handle LL2 connection's handle obtained from
* qed_ll2_require_connection
* @param addr rx (physical address) buffers to submit
@@ -206,7 +166,7 @@
*
* @return 0 on success, failure otherwise
*/
-int qed_ll2_post_rx_buffer(struct qed_hwfn *p_hwfn,
+int qed_ll2_post_rx_buffer(void *cxt,
u8 connection_handle,
dma_addr_t addr,
u16 buf_len, void *cookie, u8 notify_fw);
@@ -215,53 +175,34 @@
* @brief qed_ll2_prepare_tx_packet - request for start Tx BD
* to prepare Tx packet submission to FW.
*
- * @param p_hwfn
- * @param connection_handle LL2 connection's handle obtained from
- * qed_ll2_require_connection
- * @param num_of_bds a number of requested BD equals a number of
- * fragments in Tx packet
- * @param vlan VLAN to insert to packet (if insertion set)
- * @param bd_flags
- * @param l4_hdr_offset_w L4 Header Offset from start of packet
- * (in words). This is needed if both l4_csum
- * and ipv6_ext are set
- * @param e_tx_dest indicates if the packet is to be transmitted via
- * loopback or to the network
- * @param first_frag
- * @param first_frag_len
- * @param cookie
- *
- * @param notify_fw
+ * @param cxt - pointer to the hw-function [opaque to some]
+ * @param connection_handle
+ * @param pkt - info regarding the tx packet
+ * @param notify_fw - issue doorbell to fw for this packet
*
* @return 0 on success, failure otherwise
*/
-int qed_ll2_prepare_tx_packet(struct qed_hwfn *p_hwfn,
+int qed_ll2_prepare_tx_packet(void *cxt,
u8 connection_handle,
- u8 num_of_bds,
- u16 vlan,
- u8 bd_flags,
- u16 l4_hdr_offset_w,
- enum qed_ll2_tx_dest e_tx_dest,
- enum qed_ll2_roce_flavor_type qed_roce_flavor,
- dma_addr_t first_frag,
- u16 first_frag_len, void *cookie, u8 notify_fw);
+ struct qed_ll2_tx_pkt_info *pkt,
+ bool notify_fw);
/**
* @brief qed_ll2_release_connection - releases resources
* allocated for LL2 connection
*
- * @param p_hwfn
+ * @param cxt - pointer to the hw-function [opaque to some]
* @param connection_handle LL2 connection's handle obtained from
* qed_ll2_require_connection
*/
-void qed_ll2_release_connection(struct qed_hwfn *p_hwfn, u8 connection_handle);
+void qed_ll2_release_connection(void *cxt, u8 connection_handle);
/**
* @brief qed_ll2_set_fragment_of_tx_packet - provides fragments to fill
* Tx BD of BDs requested by
* qed_ll2_prepare_tx_packet
*
- * @param p_hwfn
+ * @param cxt - pointer to the hw-function [opaque to some]
* @param connection_handle LL2 connection's handle
* obtained from
* qed_ll2_require_connection
@@ -270,7 +211,7 @@
*
* @return 0 on success, failure otherwise
*/
-int qed_ll2_set_fragment_of_tx_packet(struct qed_hwfn *p_hwfn,
+int qed_ll2_set_fragment_of_tx_packet(void *cxt,
u8 connection_handle,
dma_addr_t addr, u16 nbytes);
@@ -278,27 +219,27 @@
* @brief qed_ll2_terminate_connection - stops Tx/Rx queues
*
*
- * @param p_hwfn
+ * @param cxt - pointer to the hw-function [opaque to some]
* @param connection_handle LL2 connection's handle
* obtained from
* qed_ll2_require_connection
*
* @return 0 on success, failure otherwise
*/
-int qed_ll2_terminate_connection(struct qed_hwfn *p_hwfn, u8 connection_handle);
+int qed_ll2_terminate_connection(void *cxt, u8 connection_handle);
/**
* @brief qed_ll2_get_stats - get LL2 queue's statistics
*
*
- * @param p_hwfn
+ * @param cxt - pointer to the hw-function [opaque to some]
* @param connection_handle LL2 connection's handle obtained from
* qed_ll2_require_connection
* @param p_stats
*
* @return 0 on success, failure otherwise
*/
-int qed_ll2_get_stats(struct qed_hwfn *p_hwfn,
+int qed_ll2_get_stats(void *cxt,
u8 connection_handle, struct qed_ll2_stats *p_stats);
/**
@@ -306,27 +247,24 @@
*
* @param p_hwfn
*
- * @return pointer to alocated qed_ll2_info or NULL
+ * @return int
*/
-struct qed_ll2_info *qed_ll2_alloc(struct qed_hwfn *p_hwfn);
+int qed_ll2_alloc(struct qed_hwfn *p_hwfn);
/**
* @brief qed_ll2_setup - Inits LL2 connections set
*
* @param p_hwfn
- * @param p_ll2_connections
*
*/
-void qed_ll2_setup(struct qed_hwfn *p_hwfn,
- struct qed_ll2_info *p_ll2_connections);
+void qed_ll2_setup(struct qed_hwfn *p_hwfn);
/**
* @brief qed_ll2_free - Releases LL2 connections set
*
* @param p_hwfn
- * @param p_ll2_connections
*
*/
-void qed_ll2_free(struct qed_hwfn *p_hwfn,
- struct qed_ll2_info *p_ll2_connections);
+void qed_ll2_free(struct qed_hwfn *p_hwfn);
+
#endif
diff --git a/drivers/net/ethernet/qlogic/qed/qed_main.c b/drivers/net/ethernet/qlogic/qed/qed_main.c
index 715b3aa..16cc30b 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_main.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_main.c
@@ -34,7 +34,6 @@
#include <linux/pci.h>
#include <linux/kernel.h>
#include <linux/slab.h>
-#include <linux/version.h>
#include <linux/delay.h>
#include <asm/byteorder.h>
#include <linux/dma-mapping.h>
@@ -123,7 +122,7 @@
{
struct pci_dev *pdev = cdev->pdev;
- if (cdev->doorbells)
+ if (cdev->doorbells && cdev->db_size)
iounmap(cdev->doorbells);
if (cdev->regview)
iounmap(cdev->regview);
@@ -207,16 +206,24 @@
goto err2;
}
- if (IS_PF(cdev)) {
- cdev->db_phys_addr = pci_resource_start(cdev->pdev, 2);
- cdev->db_size = pci_resource_len(cdev->pdev, 2);
- cdev->doorbells = ioremap_wc(cdev->db_phys_addr, cdev->db_size);
- if (!cdev->doorbells) {
- DP_NOTICE(cdev, "Cannot map doorbell space\n");
- return -ENOMEM;
+ cdev->db_phys_addr = pci_resource_start(cdev->pdev, 2);
+ cdev->db_size = pci_resource_len(cdev->pdev, 2);
+ if (!cdev->db_size) {
+ if (IS_PF(cdev)) {
+ DP_NOTICE(cdev, "No Doorbell bar available\n");
+ return -EINVAL;
+ } else {
+ return 0;
}
}
+ cdev->doorbells = ioremap_wc(cdev->db_phys_addr, cdev->db_size);
+
+ if (!cdev->doorbells) {
+ DP_NOTICE(cdev, "Cannot map doorbell space\n");
+ return -ENOMEM;
+ }
+
return 0;
err2:
@@ -270,6 +277,8 @@
if (QED_LEADING_HWFN(cdev)->hw_info.b_wol_support ==
QED_WOL_SUPPORT_PME)
dev_info->wol_support = true;
+
+ dev_info->abs_pf_id = QED_LEADING_HWFN(cdev)->abs_pf_id;
} else {
qed_vf_get_fw_version(&cdev->hwfns[0], &dev_info->fw_major,
&dev_info->fw_minor, &dev_info->fw_rev,
@@ -282,6 +291,9 @@
qed_mcp_get_mfw_ver(QED_LEADING_HWFN(cdev), ptt,
&dev_info->mfw_rev, NULL);
+ qed_mcp_get_mbi_ver(QED_LEADING_HWFN(cdev), ptt,
+ &dev_info->mbi_version);
+
qed_mcp_get_flash_size(QED_LEADING_HWFN(cdev), ptt,
&dev_info->flash_size);
@@ -336,6 +348,7 @@
if (!cdev)
goto err0;
+ cdev->drv_type = DRV_ID_DRV_TYPE_LINUX;
cdev->protocol = params->protocol;
if (params->is_vf)
@@ -607,6 +620,18 @@
return rc;
}
+static void qed_slowpath_tasklet_flush(struct qed_hwfn *p_hwfn)
+{
+ /* Calling the disable function will make sure that any
+ * currently-running function is completed. The following call to the
+ * enable function makes this sequence a flush-like operation.
+ */
+ if (p_hwfn->b_sp_dpc_enabled) {
+ tasklet_disable(p_hwfn->sp_dpc);
+ tasklet_enable(p_hwfn->sp_dpc);
+ }
+}
+
void qed_slowpath_irq_sync(struct qed_hwfn *p_hwfn)
{
struct qed_dev *cdev = p_hwfn->cdev;
@@ -618,6 +643,8 @@
synchronize_irq(cdev->int_params.msix_table[id].vector);
else
synchronize_irq(cdev->pdev->irq);
+
+ qed_slowpath_tasklet_flush(p_hwfn);
}
static void qed_slowpath_irq_free(struct qed_dev *cdev)
@@ -745,7 +772,7 @@
for_each_hwfn(cdev, i) {
memset(&sb_cnt_info, 0, sizeof(sb_cnt_info));
qed_int_get_num_sbs(&cdev->hwfns[i], &sb_cnt_info);
- cdev->int_params.in.num_vectors += sb_cnt_info.sb_cnt;
+ cdev->int_params.in.num_vectors += sb_cnt_info.cnt;
cdev->int_params.in.num_vectors++; /* slowpath */
}
@@ -1112,17 +1139,13 @@
return 0;
}
-static void qed_set_id(struct qed_dev *cdev, char name[NAME_SIZE],
- char ver_str[VER_SIZE])
+static void qed_set_name(struct qed_dev *cdev, char name[NAME_SIZE])
{
int i;
memcpy(cdev->name, name, NAME_SIZE);
for_each_hwfn(cdev, i)
snprintf(cdev->hwfns[i].name, NAME_SIZE, "%s-%d", name, i);
-
- memcpy(cdev->ver_str, ver_str, VER_SIZE);
- cdev->drv_type = DRV_ID_DRV_TYPE_LINUX;
}
static u32 qed_sb_init(struct qed_dev *cdev,
@@ -1520,6 +1543,21 @@
return 0;
}
+static int qed_nvm_get_image(struct qed_dev *cdev, enum qed_nvm_images type,
+ u8 *buf, u16 len)
+{
+ struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
+ struct qed_ptt *ptt = qed_ptt_acquire(hwfn);
+ int rc;
+
+ if (!ptt)
+ return -EAGAIN;
+
+ rc = qed_mcp_get_nvm_image(hwfn, ptt, type, buf, len);
+ qed_ptt_release(hwfn, ptt);
+ return rc;
+}
+
static void qed_get_coalesce(struct qed_dev *cdev, u16 *rx_coal, u16 *tx_coal)
{
*rx_coal = cdev->rx_coalesce_usecs;
@@ -1676,7 +1714,7 @@
.probe = &qed_probe,
.remove = &qed_remove,
.set_power_state = &qed_set_power_state,
- .set_id = &qed_set_id,
+ .set_name = &qed_set_name,
.update_pf_params = &qed_update_pf_params,
.slowpath_start = &qed_slowpath_start,
.slowpath_stop = &qed_slowpath_stop,
@@ -1697,6 +1735,7 @@
.dbg_all_data_size = &qed_dbg_all_data_size,
.chain_alloc = &qed_chain_alloc,
.chain_free = &qed_chain_free,
+ .nvm_get_image = &qed_nvm_get_image,
.get_coalesce = &qed_get_coalesce,
.set_coalesce = &qed_set_coalesce,
.set_led = &qed_set_led,
diff --git a/drivers/net/ethernet/qlogic/qed/qed_mcp.c b/drivers/net/ethernet/qlogic/qed/qed_mcp.c
index 7266b36..9da9104 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_mcp.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_mcp.c
@@ -177,6 +177,7 @@
}
kfree(p_hwfn->mcp_info);
+ p_hwfn->mcp_info = NULL;
return 0;
}
@@ -1397,6 +1398,28 @@
¶m);
}
+static void qed_mcp_update_stag(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
+{
+ struct public_func shmem_info;
+ u32 resp = 0, param = 0;
+
+ qed_mcp_get_shmem_func(p_hwfn, p_ptt, &shmem_info, MCP_PF_ID(p_hwfn));
+
+ p_hwfn->mcp_info->func_info.ovlan = (u16)shmem_info.ovlan_stag &
+ FUNC_MF_CFG_OV_STAG_MASK;
+ p_hwfn->hw_info.ovlan = p_hwfn->mcp_info->func_info.ovlan;
+ if ((p_hwfn->hw_info.hw_mode & BIT(MODE_MF_SD)) &&
+ (p_hwfn->hw_info.ovlan != QED_MCP_VLAN_UNSET)) {
+ qed_wr(p_hwfn, p_ptt,
+ NIG_REG_LLH_FUNC_TAG_VALUE, p_hwfn->hw_info.ovlan);
+ qed_sp_pf_update_stag(p_hwfn);
+ }
+
+ /* Acknowledge the MFW */
+ qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_S_TAG_UPDATE_ACK, 0,
+ &resp, ¶m);
+}
+
int qed_mcp_handle_events(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt)
{
@@ -1452,6 +1475,10 @@
case MFW_DRV_MSG_BW_UPDATE:
qed_mcp_update_bw(p_hwfn, p_ptt);
break;
+ case MFW_DRV_MSG_S_TAG_UPDATE:
+ qed_mcp_update_stag(p_hwfn, p_ptt);
+ break;
+ break;
default:
DP_INFO(p_hwfn, "Unimplemented MFW message %d\n", i);
rc = -EINVAL;
@@ -1522,6 +1549,36 @@
return 0;
}
+int qed_mcp_get_mbi_ver(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt, u32 *p_mbi_ver)
+{
+ u32 nvm_cfg_addr, nvm_cfg1_offset, mbi_ver_addr;
+
+ if (IS_VF(p_hwfn->cdev))
+ return -EINVAL;
+
+ /* Read the address of the nvm_cfg */
+ nvm_cfg_addr = qed_rd(p_hwfn, p_ptt, MISC_REG_GEN_PURP_CR0);
+ if (!nvm_cfg_addr) {
+ DP_NOTICE(p_hwfn, "Shared memory not initialized\n");
+ return -EINVAL;
+ }
+
+ /* Read the offset of nvm_cfg1 */
+ nvm_cfg1_offset = qed_rd(p_hwfn, p_ptt, nvm_cfg_addr + 4);
+
+ mbi_ver_addr = MCP_REG_SCRATCH + nvm_cfg1_offset +
+ offsetof(struct nvm_cfg1, glob) +
+ offsetof(struct nvm_cfg1_glob, mbi_version);
+ *p_mbi_ver = qed_rd(p_hwfn, p_ptt,
+ mbi_ver_addr) &
+ (NVM_CFG1_GLOB_MBI_VERSION_0_MASK |
+ NVM_CFG1_GLOB_MBI_VERSION_1_MASK |
+ NVM_CFG1_GLOB_MBI_VERSION_2_MASK);
+
+ return 0;
+}
+
int qed_mcp_get_media_type(struct qed_dev *cdev, u32 *p_media_type)
{
struct qed_hwfn *p_hwfn = &cdev->hwfns[0];
@@ -1679,10 +1736,10 @@
DP_NOTICE(p_hwfn, "MAC is 0 in shmem\n");
}
- info->wwn_port = (u64)shmem_info.fcoe_wwn_port_name_upper |
- (((u64)shmem_info.fcoe_wwn_port_name_lower) << 32);
- info->wwn_node = (u64)shmem_info.fcoe_wwn_node_name_upper |
- (((u64)shmem_info.fcoe_wwn_node_name_lower) << 32);
+ info->wwn_port = (u64)shmem_info.fcoe_wwn_port_name_lower |
+ (((u64)shmem_info.fcoe_wwn_port_name_upper) << 32);
+ info->wwn_node = (u64)shmem_info.fcoe_wwn_node_name_lower |
+ (((u64)shmem_info.fcoe_wwn_node_name_upper) << 32);
info->ovlan = (u16)(shmem_info.ovlan_stag & FUNC_MF_CFG_OV_STAG_MASK);
@@ -1770,8 +1827,9 @@
return 0;
}
-int qed_mcp_config_vf_msix(struct qed_hwfn *p_hwfn,
- struct qed_ptt *p_ptt, u8 vf_id, u8 num)
+static int
+qed_mcp_config_vf_msix_bb(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt, u8 vf_id, u8 num)
{
u32 resp = 0, param = 0, rc_param = 0;
int rc;
@@ -1801,6 +1859,36 @@
return rc;
}
+static int
+qed_mcp_config_vf_msix_ah(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt, u8 num)
+{
+ u32 resp = 0, param = num, rc_param = 0;
+ int rc;
+
+ rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_CFG_PF_VFS_MSIX,
+ param, &resp, &rc_param);
+
+ if (resp != FW_MSG_CODE_DRV_CFG_PF_VFS_MSIX_DONE) {
+ DP_NOTICE(p_hwfn, "MFW failed to set MSI-X for VFs\n");
+ rc = -EINVAL;
+ } else {
+ DP_VERBOSE(p_hwfn, QED_MSG_IOV,
+ "Requested 0x%02x MSI-x interrupts for VFs\n", num);
+ }
+
+ return rc;
+}
+
+int qed_mcp_config_vf_msix(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt, u8 vf_id, u8 num)
+{
+ if (QED_IS_BB(p_hwfn->cdev))
+ return qed_mcp_config_vf_msix_bb(p_hwfn, p_ptt, vf_id, num);
+ else
+ return qed_mcp_config_vf_msix_ah(p_hwfn, p_ptt, num);
+}
+
int
qed_mcp_send_drv_version(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
@@ -2222,6 +2310,95 @@
return rc;
}
+static int
+qed_mcp_get_nvm_image_att(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ enum qed_nvm_images image_id,
+ struct qed_nvm_image_att *p_image_att)
+{
+ struct bist_nvm_image_att mfw_image_att;
+ enum nvm_image_type type;
+ u32 num_images, i;
+ int rc;
+
+ /* Translate image_id into MFW definitions */
+ switch (image_id) {
+ case QED_NVM_IMAGE_ISCSI_CFG:
+ type = NVM_TYPE_ISCSI_CFG;
+ break;
+ case QED_NVM_IMAGE_FCOE_CFG:
+ type = NVM_TYPE_FCOE_CFG;
+ break;
+ default:
+ DP_NOTICE(p_hwfn, "Unknown request of image_id %08x\n",
+ image_id);
+ return -EINVAL;
+ }
+
+ /* Learn number of images, then traverse and see if one fits */
+ rc = qed_mcp_bist_nvm_test_get_num_images(p_hwfn, p_ptt, &num_images);
+ if (rc || !num_images)
+ return -EINVAL;
+
+ for (i = 0; i < num_images; i++) {
+ rc = qed_mcp_bist_nvm_test_get_image_att(p_hwfn, p_ptt,
+ &mfw_image_att, i);
+ if (rc)
+ return rc;
+
+ if (type == mfw_image_att.image_type)
+ break;
+ }
+ if (i == num_images) {
+ DP_VERBOSE(p_hwfn, QED_MSG_STORAGE,
+ "Failed to find nvram image of type %08x\n",
+ image_id);
+ return -EINVAL;
+ }
+
+ p_image_att->start_addr = mfw_image_att.nvm_start_addr;
+ p_image_att->length = mfw_image_att.len;
+
+ return 0;
+}
+
+int qed_mcp_get_nvm_image(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ enum qed_nvm_images image_id,
+ u8 *p_buffer, u32 buffer_len)
+{
+ struct qed_nvm_image_att image_att;
+ int rc;
+
+ memset(p_buffer, 0, buffer_len);
+
+ rc = qed_mcp_get_nvm_image_att(p_hwfn, p_ptt, image_id, &image_att);
+ if (rc)
+ return rc;
+
+ /* Validate sizes - both the image's and the supplied buffer's */
+ if (image_att.length <= 4) {
+ DP_VERBOSE(p_hwfn, QED_MSG_STORAGE,
+ "Image [%d] is too small - only %d bytes\n",
+ image_id, image_att.length);
+ return -EINVAL;
+ }
+
+ /* Each NVM image is suffixed by CRC; Upper-layer has no need for it */
+ image_att.length -= 4;
+
+ if (image_att.length > buffer_len) {
+ DP_VERBOSE(p_hwfn,
+ QED_MSG_STORAGE,
+ "Image [%d] is too big - %08x bytes where only %08x are available\n",
+ image_id, image_att.length, buffer_len);
+ return -ENOMEM;
+ }
+
+ return qed_mcp_nvm_read(p_hwfn->cdev, image_att.start_addr,
+ p_buffer, image_att.length);
+}
+
static enum resource_id_enum qed_mcp_get_mfw_res_id(enum qed_resources res_id)
{
enum resource_id_enum mfw_res_id = RESOURCE_NUM_INVALID;
diff --git a/drivers/net/ethernet/qlogic/qed/qed_mcp.h b/drivers/net/ethernet/qlogic/qed/qed_mcp.h
index 2b09b85..af03b36 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_mcp.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_mcp.h
@@ -256,6 +256,18 @@
u32 *p_mfw_ver, u32 *p_running_bundle_id);
/**
+ * @brief Get the MBI version value
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ * @param p_mbi_ver - A pointer to a variable to be filled with the MBI version.
+ *
+ * @return int - 0 - operation was successful.
+ */
+int qed_mcp_get_mbi_ver(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt, u32 *p_mbi_ver);
+
+/**
* @brief Get media type value of the port.
*
* @param cdev - qed dev pointer
@@ -418,6 +430,27 @@
*/
int qed_mcp_nvm_read(struct qed_dev *cdev, u32 addr, u8 *p_buf, u32 len);
+struct qed_nvm_image_att {
+ u32 start_addr;
+ u32 length;
+};
+
+/**
+ * @brief Allows reading a whole nvram image
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ * @param image_id - image requested for reading
+ * @param p_buffer - allocated buffer into which to fill data
+ * @param buffer_len - length of the allocated buffer.
+ *
+ * @return 0 iff p_buffer now contains the nvram image.
+ */
+int qed_mcp_get_nvm_image(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ enum qed_nvm_images image_id,
+ u8 *p_buffer, u32 buffer_len);
+
/**
* @brief Bist register test
*
@@ -482,7 +515,7 @@
#define MCP_PF_ID(p_hwfn) MCP_PF_ID_BY_REL(p_hwfn, (p_hwfn)->rel_pf_id)
#define MFW_PORT(_p_hwfn) ((_p_hwfn)->abs_pf_id % \
- ((_p_hwfn)->cdev->num_ports_in_engines * \
+ ((_p_hwfn)->cdev->num_ports_in_engine * \
qed_device_num_engines((_p_hwfn)->cdev)))
struct qed_mcp_info {
diff --git a/drivers/net/ethernet/qlogic/qed/qed_ooo.c b/drivers/net/ethernet/qlogic/qed/qed_ooo.c
index db96670..0006365 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_ooo.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_ooo.c
@@ -99,7 +99,7 @@
p_history->head_idx++;
}
-struct qed_ooo_info *qed_ooo_alloc(struct qed_hwfn *p_hwfn)
+int qed_ooo_alloc(struct qed_hwfn *p_hwfn)
{
u16 max_num_archipelagos = 0, cid_base;
struct qed_ooo_info *p_ooo_info;
@@ -109,7 +109,7 @@
if (p_hwfn->hw_info.personality != QED_PCI_ISCSI) {
DP_NOTICE(p_hwfn,
"Failed to allocate qed_ooo_info: unknown personality\n");
- return NULL;
+ return -EINVAL;
}
max_num_archipelagos = p_hwfn->pf_params.iscsi_pf_params.num_cons;
@@ -119,12 +119,12 @@
if (!max_num_archipelagos) {
DP_NOTICE(p_hwfn,
"Failed to allocate qed_ooo_info: unknown amount of connections\n");
- return NULL;
+ return -EINVAL;
}
p_ooo_info = kzalloc(sizeof(*p_ooo_info), GFP_KERNEL);
if (!p_ooo_info)
- return NULL;
+ return -ENOMEM;
p_ooo_info->cid_base = cid_base;
p_ooo_info->max_num_archipelagos = max_num_archipelagos;
@@ -164,7 +164,8 @@
p_ooo_info->ooo_history.num_of_cqes = QED_MAX_NUM_OOO_HISTORY_ENTRIES;
- return p_ooo_info;
+ p_hwfn->p_ooo_info = p_ooo_info;
+ return 0;
no_history_mem:
kfree(p_ooo_info->p_archipelagos_mem);
@@ -172,7 +173,7 @@
kfree(p_ooo_info->p_isles_mem);
no_isles_mem:
kfree(p_ooo_info);
- return NULL;
+ return -ENOMEM;
}
void qed_ooo_release_connection_isles(struct qed_hwfn *p_hwfn,
@@ -249,19 +250,23 @@
&p_ooo_info->free_buffers_list);
}
-void qed_ooo_setup(struct qed_hwfn *p_hwfn, struct qed_ooo_info *p_ooo_info)
+void qed_ooo_setup(struct qed_hwfn *p_hwfn)
{
- qed_ooo_release_all_isles(p_hwfn, p_ooo_info);
- memset(p_ooo_info->ooo_history.p_cqes, 0,
- p_ooo_info->ooo_history.num_of_cqes *
+ qed_ooo_release_all_isles(p_hwfn, p_hwfn->p_ooo_info);
+ memset(p_hwfn->p_ooo_info->ooo_history.p_cqes, 0,
+ p_hwfn->p_ooo_info->ooo_history.num_of_cqes *
sizeof(struct ooo_opaque));
- p_ooo_info->ooo_history.head_idx = 0;
+ p_hwfn->p_ooo_info->ooo_history.head_idx = 0;
}
-void qed_ooo_free(struct qed_hwfn *p_hwfn, struct qed_ooo_info *p_ooo_info)
+void qed_ooo_free(struct qed_hwfn *p_hwfn)
{
+ struct qed_ooo_info *p_ooo_info = p_hwfn->p_ooo_info;
struct qed_ooo_buffer *p_buffer;
+ if (!p_ooo_info)
+ return;
+
qed_ooo_release_all_isles(p_hwfn, p_ooo_info);
while (!list_empty(&p_ooo_info->free_buffers_list)) {
p_buffer = list_first_entry(&p_ooo_info->free_buffers_list,
@@ -282,6 +287,7 @@
kfree(p_ooo_info->p_archipelagos_mem);
kfree(p_ooo_info->ooo_history.p_cqes);
kfree(p_ooo_info);
+ p_hwfn->p_ooo_info = NULL;
}
void qed_ooo_put_free_buffer(struct qed_hwfn *p_hwfn,
diff --git a/drivers/net/ethernet/qlogic/qed/qed_ooo.h b/drivers/net/ethernet/qlogic/qed/qed_ooo.h
index 791ad0f..e8ed40b 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_ooo.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_ooo.h
@@ -88,7 +88,11 @@
struct qed_ooo_info *p_ooo_info,
struct ooo_opaque *p_cqe);
-struct qed_ooo_info *qed_ooo_alloc(struct qed_hwfn *p_hwfn);
+int qed_ooo_alloc(struct qed_hwfn *p_hwfn);
+
+void qed_ooo_setup(struct qed_hwfn *p_hwfn);
+
+void qed_ooo_free(struct qed_hwfn *p_hwfn);
void qed_ooo_release_connection_isles(struct qed_hwfn *p_hwfn,
struct qed_ooo_info *p_ooo_info,
@@ -97,10 +101,6 @@
void qed_ooo_release_all_isles(struct qed_hwfn *p_hwfn,
struct qed_ooo_info *p_ooo_info);
-void qed_ooo_setup(struct qed_hwfn *p_hwfn, struct qed_ooo_info *p_ooo_info);
-
-void qed_ooo_free(struct qed_hwfn *p_hwfn, struct qed_ooo_info *p_ooo_info);
-
void qed_ooo_put_free_buffer(struct qed_hwfn *p_hwfn,
struct qed_ooo_info *p_ooo_info,
struct qed_ooo_buffer *p_buffer);
@@ -140,8 +140,14 @@
struct qed_ooo_info *p_ooo_info,
struct ooo_opaque *p_cqe) {}
-static inline struct qed_ooo_info *qed_ooo_alloc(
- struct qed_hwfn *p_hwfn) { return NULL; }
+static inline int qed_ooo_alloc(struct qed_hwfn *p_hwfn)
+{
+ return -EINVAL;
+}
+
+static inline void qed_ooo_setup(struct qed_hwfn *p_hwfn) {}
+
+static inline void qed_ooo_free(struct qed_hwfn *p_hwfn) {}
static inline void
qed_ooo_release_connection_isles(struct qed_hwfn *p_hwfn,
@@ -152,12 +158,6 @@
struct qed_ooo_info *p_ooo_info)
{}
-static inline void qed_ooo_setup(struct qed_hwfn *p_hwfn,
- struct qed_ooo_info *p_ooo_info) {}
-
-static inline void qed_ooo_free(struct qed_hwfn *p_hwfn,
- struct qed_ooo_info *p_ooo_info) {}
-
static inline void qed_ooo_put_free_buffer(struct qed_hwfn *p_hwfn,
struct qed_ooo_info *p_ooo_info,
struct qed_ooo_buffer *p_buffer) {}
diff --git a/drivers/net/ethernet/qlogic/qed/qed_ptp.c b/drivers/net/ethernet/qlogic/qed/qed_ptp.c
index 434a164..5a90d69 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_ptp.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_ptp.c
@@ -80,7 +80,7 @@
/* MFW doesn't support resource locking, first PF on the port
* has lock ownership.
*/
- if (p_hwfn->abs_pf_id < p_hwfn->cdev->num_ports_in_engines)
+ if (p_hwfn->abs_pf_id < p_hwfn->cdev->num_ports_in_engine)
return 0;
DP_INFO(p_hwfn, "PF doesn't have lock ownership\n");
@@ -108,7 +108,7 @@
rc = qed_mcp_resc_unlock(p_hwfn, p_ptt, ¶ms);
if (rc == -EINVAL) {
/* MFW doesn't support locking, first PF has lock ownership */
- if (p_hwfn->abs_pf_id < p_hwfn->cdev->num_ports_in_engines) {
+ if (p_hwfn->abs_pf_id < p_hwfn->cdev->num_ports_in_engine) {
rc = 0;
} else {
DP_INFO(p_hwfn, "PF doesn't have lock ownership\n");
diff --git a/drivers/net/ethernet/qlogic/qed/qed_rdma.c b/drivers/net/ethernet/qlogic/qed/qed_rdma.c
new file mode 100644
index 0000000..df76e21
--- /dev/null
+++ b/drivers/net/ethernet/qlogic/qed/qed_rdma.c
@@ -0,0 +1,1722 @@
+/* QLogic qed NIC Driver
+ * Copyright (c) 2015-2017 QLogic Corporation
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and /or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include <linux/types.h>
+#include <asm/byteorder.h>
+#include <linux/bitops.h>
+#include <linux/delay.h>
+#include <linux/dma-mapping.h>
+#include <linux/errno.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/pci.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/string.h>
+#include "qed.h"
+#include "qed_cxt.h"
+#include "qed_hsi.h"
+#include "qed_hw.h"
+#include "qed_init_ops.h"
+#include "qed_int.h"
+#include "qed_ll2.h"
+#include "qed_mcp.h"
+#include "qed_reg_addr.h"
+#include <linux/qed/qed_rdma_if.h>
+#include "qed_rdma.h"
+#include "qed_roce.h"
+#include "qed_sp.h"
+
+
+int qed_rdma_bmap_alloc(struct qed_hwfn *p_hwfn,
+ struct qed_bmap *bmap, u32 max_count, char *name)
+{
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "max_count = %08x\n", max_count);
+
+ bmap->max_count = max_count;
+
+ bmap->bitmap = kcalloc(BITS_TO_LONGS(max_count), sizeof(long),
+ GFP_KERNEL);
+ if (!bmap->bitmap)
+ return -ENOMEM;
+
+ snprintf(bmap->name, QED_RDMA_MAX_BMAP_NAME, "%s", name);
+
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "0\n");
+ return 0;
+}
+
+int qed_rdma_bmap_alloc_id(struct qed_hwfn *p_hwfn,
+ struct qed_bmap *bmap, u32 *id_num)
+{
+ *id_num = find_first_zero_bit(bmap->bitmap, bmap->max_count);
+ if (*id_num >= bmap->max_count)
+ return -EINVAL;
+
+ __set_bit(*id_num, bmap->bitmap);
+
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "%s bitmap: allocated id %d\n",
+ bmap->name, *id_num);
+
+ return 0;
+}
+
+void qed_bmap_set_id(struct qed_hwfn *p_hwfn,
+ struct qed_bmap *bmap, u32 id_num)
+{
+ if (id_num >= bmap->max_count)
+ return;
+
+ __set_bit(id_num, bmap->bitmap);
+}
+
+void qed_bmap_release_id(struct qed_hwfn *p_hwfn,
+ struct qed_bmap *bmap, u32 id_num)
+{
+ bool b_acquired;
+
+ if (id_num >= bmap->max_count)
+ return;
+
+ b_acquired = test_and_clear_bit(id_num, bmap->bitmap);
+ if (!b_acquired) {
+ DP_NOTICE(p_hwfn, "%s bitmap: id %d already released\n",
+ bmap->name, id_num);
+ return;
+ }
+
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "%s bitmap: released id %d\n",
+ bmap->name, id_num);
+}
+
+int qed_bmap_test_id(struct qed_hwfn *p_hwfn,
+ struct qed_bmap *bmap, u32 id_num)
+{
+ if (id_num >= bmap->max_count)
+ return -1;
+
+ return test_bit(id_num, bmap->bitmap);
+}
+
+static bool qed_bmap_is_empty(struct qed_bmap *bmap)
+{
+ return bmap->max_count == find_first_bit(bmap->bitmap, bmap->max_count);
+}
+
+u32 qed_rdma_get_sb_id(void *p_hwfn, u32 rel_sb_id)
+{
+ /* First sb id for RoCE is after all the l2 sb */
+ return FEAT_NUM((struct qed_hwfn *)p_hwfn, QED_PF_L2_QUE) + rel_sb_id;
+}
+
+static int qed_rdma_alloc(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ struct qed_rdma_start_in_params *params)
+{
+ struct qed_rdma_info *p_rdma_info;
+ u32 num_cons, num_tasks;
+ int rc = -ENOMEM;
+
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Allocating RDMA\n");
+
+ /* Allocate a struct with current pf rdma info */
+ p_rdma_info = kzalloc(sizeof(*p_rdma_info), GFP_KERNEL);
+ if (!p_rdma_info)
+ return rc;
+
+ p_hwfn->p_rdma_info = p_rdma_info;
+ p_rdma_info->proto = PROTOCOLID_ROCE;
+
+ num_cons = qed_cxt_get_proto_cid_count(p_hwfn, p_rdma_info->proto,
+ NULL);
+
+ p_rdma_info->num_qps = num_cons / 2;
+
+ num_tasks = qed_cxt_get_proto_tid_count(p_hwfn, PROTOCOLID_ROCE);
+
+ /* Each MR uses a single task */
+ p_rdma_info->num_mrs = num_tasks;
+
+ /* Queue zone lines are shared between RoCE and L2 in such a way that
+ * they can be used by each without obstructing the other.
+ */
+ p_rdma_info->queue_zone_base = (u16)RESC_START(p_hwfn, QED_L2_QUEUE);
+ p_rdma_info->max_queue_zones = (u16)RESC_NUM(p_hwfn, QED_L2_QUEUE);
+
+ /* Allocate a struct with device params and fill it */
+ p_rdma_info->dev = kzalloc(sizeof(*p_rdma_info->dev), GFP_KERNEL);
+ if (!p_rdma_info->dev)
+ goto free_rdma_info;
+
+ /* Allocate a struct with port params and fill it */
+ p_rdma_info->port = kzalloc(sizeof(*p_rdma_info->port), GFP_KERNEL);
+ if (!p_rdma_info->port)
+ goto free_rdma_dev;
+
+ /* Allocate bit map for pd's */
+ rc = qed_rdma_bmap_alloc(p_hwfn, &p_rdma_info->pd_map, RDMA_MAX_PDS,
+ "PD");
+ if (rc) {
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
+ "Failed to allocate pd_map, rc = %d\n",
+ rc);
+ goto free_rdma_port;
+ }
+
+ /* Allocate DPI bitmap */
+ rc = qed_rdma_bmap_alloc(p_hwfn, &p_rdma_info->dpi_map,
+ p_hwfn->dpi_count, "DPI");
+ if (rc) {
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
+ "Failed to allocate DPI bitmap, rc = %d\n", rc);
+ goto free_pd_map;
+ }
+
+ /* Allocate bitmap for cq's. The maximum number of CQs is bounded to
+ * twice the number of QPs.
+ */
+ rc = qed_rdma_bmap_alloc(p_hwfn, &p_rdma_info->cq_map,
+ p_rdma_info->num_qps * 2, "CQ");
+ if (rc) {
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
+ "Failed to allocate cq bitmap, rc = %d\n", rc);
+ goto free_dpi_map;
+ }
+
+ /* Allocate bitmap for toggle bit for cq icids
+ * We toggle the bit every time we create or resize cq for a given icid.
+ * The maximum number of CQs is bounded to twice the number of QPs.
+ */
+ rc = qed_rdma_bmap_alloc(p_hwfn, &p_rdma_info->toggle_bits,
+ p_rdma_info->num_qps * 2, "Toggle");
+ if (rc) {
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
+ "Failed to allocate toogle bits, rc = %d\n", rc);
+ goto free_cq_map;
+ }
+
+ /* Allocate bitmap for itids */
+ rc = qed_rdma_bmap_alloc(p_hwfn, &p_rdma_info->tid_map,
+ p_rdma_info->num_mrs, "MR");
+ if (rc) {
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
+ "Failed to allocate itids bitmaps, rc = %d\n", rc);
+ goto free_toggle_map;
+ }
+
+ /* Allocate bitmap for cids used for qps. */
+ rc = qed_rdma_bmap_alloc(p_hwfn, &p_rdma_info->cid_map, num_cons,
+ "CID");
+ if (rc) {
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
+ "Failed to allocate cid bitmap, rc = %d\n", rc);
+ goto free_tid_map;
+ }
+
+ /* Allocate bitmap for cids used for responders/requesters. */
+ rc = qed_rdma_bmap_alloc(p_hwfn, &p_rdma_info->real_cid_map, num_cons,
+ "REAL_CID");
+ if (rc) {
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
+ "Failed to allocate real cid bitmap, rc = %d\n", rc);
+ goto free_cid_map;
+ }
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Allocation successful\n");
+ return 0;
+
+free_cid_map:
+ kfree(p_rdma_info->cid_map.bitmap);
+free_tid_map:
+ kfree(p_rdma_info->tid_map.bitmap);
+free_toggle_map:
+ kfree(p_rdma_info->toggle_bits.bitmap);
+free_cq_map:
+ kfree(p_rdma_info->cq_map.bitmap);
+free_dpi_map:
+ kfree(p_rdma_info->dpi_map.bitmap);
+free_pd_map:
+ kfree(p_rdma_info->pd_map.bitmap);
+free_rdma_port:
+ kfree(p_rdma_info->port);
+free_rdma_dev:
+ kfree(p_rdma_info->dev);
+free_rdma_info:
+ kfree(p_rdma_info);
+
+ return rc;
+}
+
+void qed_rdma_bmap_free(struct qed_hwfn *p_hwfn,
+ struct qed_bmap *bmap, bool check)
+{
+ int weight = bitmap_weight(bmap->bitmap, bmap->max_count);
+ int last_line = bmap->max_count / (64 * 8);
+ int last_item = last_line * 8 +
+ DIV_ROUND_UP(bmap->max_count % (64 * 8), 64);
+ u64 *pmap = (u64 *)bmap->bitmap;
+ int line, item, offset;
+ u8 str_last_line[200] = { 0 };
+
+ if (!weight || !check)
+ goto end;
+
+ DP_NOTICE(p_hwfn,
+ "%s bitmap not free - size=%d, weight=%d, 512 bits per line\n",
+ bmap->name, bmap->max_count, weight);
+
+ /* print aligned non-zero lines, if any */
+ for (item = 0, line = 0; line < last_line; line++, item += 8)
+ if (bitmap_weight((unsigned long *)&pmap[item], 64 * 8))
+ DP_NOTICE(p_hwfn,
+ "line 0x%04x: 0x%016llx 0x%016llx 0x%016llx 0x%016llx 0x%016llx 0x%016llx 0x%016llx 0x%016llx\n",
+ line,
+ pmap[item],
+ pmap[item + 1],
+ pmap[item + 2],
+ pmap[item + 3],
+ pmap[item + 4],
+ pmap[item + 5],
+ pmap[item + 6], pmap[item + 7]);
+
+ /* print last unaligned non-zero line, if any */
+ if ((bmap->max_count % (64 * 8)) &&
+ (bitmap_weight((unsigned long *)&pmap[item],
+ bmap->max_count - item * 64))) {
+ offset = sprintf(str_last_line, "line 0x%04x: ", line);
+ for (; item < last_item; item++)
+ offset += sprintf(str_last_line + offset,
+ "0x%016llx ", pmap[item]);
+ DP_NOTICE(p_hwfn, "%s\n", str_last_line);
+ }
+
+end:
+ kfree(bmap->bitmap);
+ bmap->bitmap = NULL;
+}
+
+static void qed_rdma_resc_free(struct qed_hwfn *p_hwfn)
+{
+ struct qed_rdma_info *p_rdma_info = p_hwfn->p_rdma_info;
+
+ qed_rdma_bmap_free(p_hwfn, &p_hwfn->p_rdma_info->cid_map, 1);
+ qed_rdma_bmap_free(p_hwfn, &p_hwfn->p_rdma_info->pd_map, 1);
+ qed_rdma_bmap_free(p_hwfn, &p_hwfn->p_rdma_info->dpi_map, 1);
+ qed_rdma_bmap_free(p_hwfn, &p_hwfn->p_rdma_info->cq_map, 1);
+ qed_rdma_bmap_free(p_hwfn, &p_hwfn->p_rdma_info->toggle_bits, 0);
+ qed_rdma_bmap_free(p_hwfn, &p_hwfn->p_rdma_info->tid_map, 1);
+
+ kfree(p_rdma_info->port);
+ kfree(p_rdma_info->dev);
+
+ kfree(p_rdma_info);
+}
+
+static void qed_rdma_free(struct qed_hwfn *p_hwfn)
+{
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Freeing RDMA\n");
+
+ qed_rdma_resc_free(p_hwfn);
+}
+
+static void qed_rdma_get_guid(struct qed_hwfn *p_hwfn, u8 *guid)
+{
+ guid[0] = p_hwfn->hw_info.hw_mac_addr[0] ^ 2;
+ guid[1] = p_hwfn->hw_info.hw_mac_addr[1];
+ guid[2] = p_hwfn->hw_info.hw_mac_addr[2];
+ guid[3] = 0xff;
+ guid[4] = 0xfe;
+ guid[5] = p_hwfn->hw_info.hw_mac_addr[3];
+ guid[6] = p_hwfn->hw_info.hw_mac_addr[4];
+ guid[7] = p_hwfn->hw_info.hw_mac_addr[5];
+}
+
+static void qed_rdma_init_events(struct qed_hwfn *p_hwfn,
+ struct qed_rdma_start_in_params *params)
+{
+ struct qed_rdma_events *events;
+
+ events = &p_hwfn->p_rdma_info->events;
+
+ events->unaffiliated_event = params->events->unaffiliated_event;
+ events->affiliated_event = params->events->affiliated_event;
+ events->context = params->events->context;
+}
+
+static void qed_rdma_init_devinfo(struct qed_hwfn *p_hwfn,
+ struct qed_rdma_start_in_params *params)
+{
+ struct qed_rdma_device *dev = p_hwfn->p_rdma_info->dev;
+ struct qed_dev *cdev = p_hwfn->cdev;
+ u32 pci_status_control;
+ u32 num_qps;
+
+ /* Vendor specific information */
+ dev->vendor_id = cdev->vendor_id;
+ dev->vendor_part_id = cdev->device_id;
+ dev->hw_ver = 0;
+ dev->fw_ver = (FW_MAJOR_VERSION << 24) | (FW_MINOR_VERSION << 16) |
+ (FW_REVISION_VERSION << 8) | (FW_ENGINEERING_VERSION);
+
+ qed_rdma_get_guid(p_hwfn, (u8 *)&dev->sys_image_guid);
+ dev->node_guid = dev->sys_image_guid;
+
+ dev->max_sge = min_t(u32, RDMA_MAX_SGE_PER_SQ_WQE,
+ RDMA_MAX_SGE_PER_RQ_WQE);
+
+ if (cdev->rdma_max_sge)
+ dev->max_sge = min_t(u32, cdev->rdma_max_sge, dev->max_sge);
+
+ dev->max_inline = ROCE_REQ_MAX_INLINE_DATA_SIZE;
+
+ dev->max_inline = (cdev->rdma_max_inline) ?
+ min_t(u32, cdev->rdma_max_inline, dev->max_inline) :
+ dev->max_inline;
+
+ dev->max_wqe = QED_RDMA_MAX_WQE;
+ dev->max_cnq = (u8)FEAT_NUM(p_hwfn, QED_RDMA_CNQ);
+
+ /* The number of QPs may be higher than QED_ROCE_MAX_QPS, because
+ * it is up-aligned to 16 and then to ILT page size within qed cxt.
+ * This is OK in terms of ILT but we don't want to configure the FW
+ * above its abilities
+ */
+ num_qps = ROCE_MAX_QPS;
+ num_qps = min_t(u64, num_qps, p_hwfn->p_rdma_info->num_qps);
+ dev->max_qp = num_qps;
+
+ /* CQs uses the same icids that QPs use hence they are limited by the
+ * number of icids. There are two icids per QP.
+ */
+ dev->max_cq = num_qps * 2;
+
+ /* The number of mrs is smaller by 1 since the first is reserved */
+ dev->max_mr = p_hwfn->p_rdma_info->num_mrs - 1;
+ dev->max_mr_size = QED_RDMA_MAX_MR_SIZE;
+
+ /* The maximum CQE capacity per CQ supported.
+ * max number of cqes will be in two layer pbl,
+ * 8 is the pointer size in bytes
+ * 32 is the size of cq element in bytes
+ */
+ if (params->cq_mode == QED_RDMA_CQ_MODE_32_BITS)
+ dev->max_cqe = QED_RDMA_MAX_CQE_32_BIT;
+ else
+ dev->max_cqe = QED_RDMA_MAX_CQE_16_BIT;
+
+ dev->max_mw = 0;
+ dev->max_fmr = QED_RDMA_MAX_FMR;
+ dev->max_mr_mw_fmr_pbl = (PAGE_SIZE / 8) * (PAGE_SIZE / 8);
+ dev->max_mr_mw_fmr_size = dev->max_mr_mw_fmr_pbl * PAGE_SIZE;
+ dev->max_pkey = QED_RDMA_MAX_P_KEY;
+
+ dev->max_qp_resp_rd_atomic_resc = RDMA_RING_PAGE_SIZE /
+ (RDMA_RESP_RD_ATOMIC_ELM_SIZE * 2);
+ dev->max_qp_req_rd_atomic_resc = RDMA_RING_PAGE_SIZE /
+ RDMA_REQ_RD_ATOMIC_ELM_SIZE;
+ dev->max_dev_resp_rd_atomic_resc = dev->max_qp_resp_rd_atomic_resc *
+ p_hwfn->p_rdma_info->num_qps;
+ dev->page_size_caps = QED_RDMA_PAGE_SIZE_CAPS;
+ dev->dev_ack_delay = QED_RDMA_ACK_DELAY;
+ dev->max_pd = RDMA_MAX_PDS;
+ dev->max_ah = p_hwfn->p_rdma_info->num_qps;
+ dev->max_stats_queues = (u8)RESC_NUM(p_hwfn, QED_RDMA_STATS_QUEUE);
+
+ /* Set capablities */
+ dev->dev_caps = 0;
+ SET_FIELD(dev->dev_caps, QED_RDMA_DEV_CAP_RNR_NAK, 1);
+ SET_FIELD(dev->dev_caps, QED_RDMA_DEV_CAP_PORT_ACTIVE_EVENT, 1);
+ SET_FIELD(dev->dev_caps, QED_RDMA_DEV_CAP_PORT_CHANGE_EVENT, 1);
+ SET_FIELD(dev->dev_caps, QED_RDMA_DEV_CAP_RESIZE_CQ, 1);
+ SET_FIELD(dev->dev_caps, QED_RDMA_DEV_CAP_BASE_MEMORY_EXT, 1);
+ SET_FIELD(dev->dev_caps, QED_RDMA_DEV_CAP_BASE_QUEUE_EXT, 1);
+ SET_FIELD(dev->dev_caps, QED_RDMA_DEV_CAP_ZBVA, 1);
+ SET_FIELD(dev->dev_caps, QED_RDMA_DEV_CAP_LOCAL_INV_FENCE, 1);
+
+ /* Check atomic operations support in PCI configuration space. */
+ pci_read_config_dword(cdev->pdev,
+ cdev->pdev->pcie_cap + PCI_EXP_DEVCTL2,
+ &pci_status_control);
+
+ if (pci_status_control & PCI_EXP_DEVCTL2_LTR_EN)
+ SET_FIELD(dev->dev_caps, QED_RDMA_DEV_CAP_ATOMIC_OP, 1);
+}
+
+static void qed_rdma_init_port(struct qed_hwfn *p_hwfn)
+{
+ struct qed_rdma_port *port = p_hwfn->p_rdma_info->port;
+ struct qed_rdma_device *dev = p_hwfn->p_rdma_info->dev;
+
+ port->port_state = p_hwfn->mcp_info->link_output.link_up ?
+ QED_RDMA_PORT_UP : QED_RDMA_PORT_DOWN;
+
+ port->max_msg_size = min_t(u64,
+ (dev->max_mr_mw_fmr_size *
+ p_hwfn->cdev->rdma_max_sge),
+ BIT(31));
+
+ port->pkey_bad_counter = 0;
+}
+
+static int qed_rdma_init_hw(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
+{
+ u32 ll2_ethertype_en;
+
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Initializing HW\n");
+ p_hwfn->b_rdma_enabled_in_prs = false;
+
+ qed_wr(p_hwfn, p_ptt, PRS_REG_ROCE_DEST_QP_MAX_PF, 0);
+
+ p_hwfn->rdma_prs_search_reg = PRS_REG_SEARCH_ROCE;
+
+ /* We delay writing to this reg until first cid is allocated. See
+ * qed_cxt_dynamic_ilt_alloc function for more details
+ */
+ ll2_ethertype_en = qed_rd(p_hwfn, p_ptt, PRS_REG_LIGHT_L2_ETHERTYPE_EN);
+ qed_wr(p_hwfn, p_ptt, PRS_REG_LIGHT_L2_ETHERTYPE_EN,
+ (ll2_ethertype_en | 0x01));
+
+ if (qed_cxt_get_proto_cid_start(p_hwfn, PROTOCOLID_ROCE) % 2) {
+ DP_NOTICE(p_hwfn, "The first RoCE's cid should be even\n");
+ return -EINVAL;
+ }
+
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Initializing HW - Done\n");
+ return 0;
+}
+
+static int qed_rdma_start_fw(struct qed_hwfn *p_hwfn,
+ struct qed_rdma_start_in_params *params,
+ struct qed_ptt *p_ptt)
+{
+ struct rdma_init_func_ramrod_data *p_ramrod;
+ struct qed_rdma_cnq_params *p_cnq_pbl_list;
+ struct rdma_init_func_hdr *p_params_header;
+ struct rdma_cnq_params *p_cnq_params;
+ struct qed_sp_init_data init_data;
+ struct qed_spq_entry *p_ent;
+ u32 cnq_id, sb_id;
+ u16 igu_sb_id;
+ int rc;
+
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Starting FW\n");
+
+ /* Save the number of cnqs for the function close ramrod */
+ p_hwfn->p_rdma_info->num_cnqs = params->desired_cnq;
+
+ /* Get SPQ entry */
+ memset(&init_data, 0, sizeof(init_data));
+ init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
+ init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
+
+ rc = qed_sp_init_request(p_hwfn, &p_ent, RDMA_RAMROD_FUNC_INIT,
+ p_hwfn->p_rdma_info->proto, &init_data);
+ if (rc)
+ return rc;
+
+ p_ramrod = &p_ent->ramrod.roce_init_func.rdma;
+
+ p_params_header = &p_ramrod->params_header;
+ p_params_header->cnq_start_offset = (u8)RESC_START(p_hwfn,
+ QED_RDMA_CNQ_RAM);
+ p_params_header->num_cnqs = params->desired_cnq;
+
+ if (params->cq_mode == QED_RDMA_CQ_MODE_16_BITS)
+ p_params_header->cq_ring_mode = 1;
+ else
+ p_params_header->cq_ring_mode = 0;
+
+ for (cnq_id = 0; cnq_id < params->desired_cnq; cnq_id++) {
+ sb_id = qed_rdma_get_sb_id(p_hwfn, cnq_id);
+ igu_sb_id = qed_get_igu_sb_id(p_hwfn, sb_id);
+ p_ramrod->cnq_params[cnq_id].sb_num = cpu_to_le16(igu_sb_id);
+ p_cnq_params = &p_ramrod->cnq_params[cnq_id];
+ p_cnq_pbl_list = ¶ms->cnq_pbl_list[cnq_id];
+
+ p_cnq_params->sb_index = p_hwfn->pf_params.rdma_pf_params.gl_pi;
+ p_cnq_params->num_pbl_pages = p_cnq_pbl_list->num_pbl_pages;
+
+ DMA_REGPAIR_LE(p_cnq_params->pbl_base_addr,
+ p_cnq_pbl_list->pbl_ptr);
+
+ /* we assume here that cnq_id and qz_offset are the same */
+ p_cnq_params->queue_zone_num =
+ cpu_to_le16(p_hwfn->p_rdma_info->queue_zone_base +
+ cnq_id);
+ }
+
+ return qed_spq_post(p_hwfn, p_ent, NULL);
+}
+
+static int qed_rdma_alloc_tid(void *rdma_cxt, u32 *itid)
+{
+ struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
+ int rc;
+
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Allocate TID\n");
+
+ spin_lock_bh(&p_hwfn->p_rdma_info->lock);
+ rc = qed_rdma_bmap_alloc_id(p_hwfn,
+ &p_hwfn->p_rdma_info->tid_map, itid);
+ spin_unlock_bh(&p_hwfn->p_rdma_info->lock);
+ if (rc)
+ goto out;
+
+ rc = qed_cxt_dynamic_ilt_alloc(p_hwfn, QED_ELEM_TASK, *itid);
+out:
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Allocate TID - done, rc = %d\n", rc);
+ return rc;
+}
+
+static int qed_rdma_reserve_lkey(struct qed_hwfn *p_hwfn)
+{
+ struct qed_rdma_device *dev = p_hwfn->p_rdma_info->dev;
+
+ /* The first DPI is reserved for the Kernel */
+ __set_bit(0, p_hwfn->p_rdma_info->dpi_map.bitmap);
+
+ /* Tid 0 will be used as the key for "reserved MR".
+ * The driver should allocate memory for it so it can be loaded but no
+ * ramrod should be passed on it.
+ */
+ qed_rdma_alloc_tid(p_hwfn, &dev->reserved_lkey);
+ if (dev->reserved_lkey != RDMA_RESERVED_LKEY) {
+ DP_NOTICE(p_hwfn,
+ "Reserved lkey should be equal to RDMA_RESERVED_LKEY\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int qed_rdma_setup(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ struct qed_rdma_start_in_params *params)
+{
+ int rc;
+
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "RDMA setup\n");
+
+ spin_lock_init(&p_hwfn->p_rdma_info->lock);
+
+ qed_rdma_init_devinfo(p_hwfn, params);
+ qed_rdma_init_port(p_hwfn);
+ qed_rdma_init_events(p_hwfn, params);
+
+ rc = qed_rdma_reserve_lkey(p_hwfn);
+ if (rc)
+ return rc;
+
+ rc = qed_rdma_init_hw(p_hwfn, p_ptt);
+ if (rc)
+ return rc;
+
+ qed_roce_setup(p_hwfn);
+
+ return qed_rdma_start_fw(p_hwfn, params, p_ptt);
+}
+
+int qed_rdma_stop(void *rdma_cxt)
+{
+ struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
+ struct rdma_close_func_ramrod_data *p_ramrod;
+ struct qed_sp_init_data init_data;
+ struct qed_spq_entry *p_ent;
+ struct qed_ptt *p_ptt;
+ u32 ll2_ethertype_en;
+ int rc = -EBUSY;
+
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "RDMA stop\n");
+
+ p_ptt = qed_ptt_acquire(p_hwfn);
+ if (!p_ptt) {
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Failed to acquire PTT\n");
+ return rc;
+ }
+
+ /* Disable RoCE search */
+ qed_wr(p_hwfn, p_ptt, p_hwfn->rdma_prs_search_reg, 0);
+ p_hwfn->b_rdma_enabled_in_prs = false;
+
+ qed_wr(p_hwfn, p_ptt, PRS_REG_ROCE_DEST_QP_MAX_PF, 0);
+
+ ll2_ethertype_en = qed_rd(p_hwfn, p_ptt, PRS_REG_LIGHT_L2_ETHERTYPE_EN);
+
+ qed_wr(p_hwfn, p_ptt, PRS_REG_LIGHT_L2_ETHERTYPE_EN,
+ (ll2_ethertype_en & 0xFFFE));
+
+ qed_roce_stop(p_hwfn);
+ qed_ptt_release(p_hwfn, p_ptt);
+
+ /* Get SPQ entry */
+ memset(&init_data, 0, sizeof(init_data));
+ init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
+ init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
+
+ /* Stop RoCE */
+ rc = qed_sp_init_request(p_hwfn, &p_ent, RDMA_RAMROD_FUNC_CLOSE,
+ p_hwfn->p_rdma_info->proto, &init_data);
+ if (rc)
+ goto out;
+
+ p_ramrod = &p_ent->ramrod.rdma_close_func;
+
+ p_ramrod->num_cnqs = p_hwfn->p_rdma_info->num_cnqs;
+ p_ramrod->cnq_start_offset = (u8)RESC_START(p_hwfn, QED_RDMA_CNQ_RAM);
+
+ rc = qed_spq_post(p_hwfn, p_ent, NULL);
+
+out:
+ qed_rdma_free(p_hwfn);
+
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "RDMA stop done, rc = %d\n", rc);
+ return rc;
+}
+
+static int qed_rdma_add_user(void *rdma_cxt,
+ struct qed_rdma_add_user_out_params *out_params)
+{
+ struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
+ u32 dpi_start_offset;
+ u32 returned_id = 0;
+ int rc;
+
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Adding User\n");
+
+ /* Allocate DPI */
+ spin_lock_bh(&p_hwfn->p_rdma_info->lock);
+ rc = qed_rdma_bmap_alloc_id(p_hwfn, &p_hwfn->p_rdma_info->dpi_map,
+ &returned_id);
+ spin_unlock_bh(&p_hwfn->p_rdma_info->lock);
+
+ out_params->dpi = (u16)returned_id;
+
+ /* Calculate the corresponding DPI address */
+ dpi_start_offset = p_hwfn->dpi_start_offset;
+
+ out_params->dpi_addr = (u64)((u8 __iomem *)p_hwfn->doorbells +
+ dpi_start_offset +
+ ((out_params->dpi) * p_hwfn->dpi_size));
+
+ out_params->dpi_phys_addr = p_hwfn->cdev->db_phys_addr +
+ dpi_start_offset +
+ ((out_params->dpi) * p_hwfn->dpi_size);
+
+ out_params->dpi_size = p_hwfn->dpi_size;
+ out_params->wid_count = p_hwfn->wid_count;
+
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Adding user - done, rc = %d\n", rc);
+ return rc;
+}
+
+static struct qed_rdma_port *qed_rdma_query_port(void *rdma_cxt)
+{
+ struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
+ struct qed_rdma_port *p_port = p_hwfn->p_rdma_info->port;
+
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "RDMA Query port\n");
+
+ /* Link may have changed */
+ p_port->port_state = p_hwfn->mcp_info->link_output.link_up ?
+ QED_RDMA_PORT_UP : QED_RDMA_PORT_DOWN;
+
+ p_port->link_speed = p_hwfn->mcp_info->link_output.speed;
+
+ p_port->max_msg_size = RDMA_MAX_DATA_SIZE_IN_WQE;
+
+ return p_port;
+}
+
+static struct qed_rdma_device *qed_rdma_query_device(void *rdma_cxt)
+{
+ struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
+
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Query device\n");
+
+ /* Return struct with device parameters */
+ return p_hwfn->p_rdma_info->dev;
+}
+
+static void qed_rdma_free_tid(void *rdma_cxt, u32 itid)
+{
+ struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
+
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "itid = %08x\n", itid);
+
+ spin_lock_bh(&p_hwfn->p_rdma_info->lock);
+ qed_bmap_release_id(p_hwfn, &p_hwfn->p_rdma_info->tid_map, itid);
+ spin_unlock_bh(&p_hwfn->p_rdma_info->lock);
+}
+
+static void qed_rdma_cnq_prod_update(void *rdma_cxt, u8 qz_offset, u16 prod)
+{
+ struct qed_hwfn *p_hwfn;
+ u16 qz_num;
+ u32 addr;
+
+ p_hwfn = (struct qed_hwfn *)rdma_cxt;
+
+ if (qz_offset > p_hwfn->p_rdma_info->max_queue_zones) {
+ DP_NOTICE(p_hwfn,
+ "queue zone offset %d is too large (max is %d)\n",
+ qz_offset, p_hwfn->p_rdma_info->max_queue_zones);
+ return;
+ }
+
+ qz_num = p_hwfn->p_rdma_info->queue_zone_base + qz_offset;
+ addr = GTT_BAR0_MAP_REG_USDM_RAM +
+ USTORM_COMMON_QUEUE_CONS_OFFSET(qz_num);
+
+ REG_WR16(p_hwfn, addr, prod);
+
+ /* keep prod updates ordered */
+ wmb();
+}
+
+static int qed_fill_rdma_dev_info(struct qed_dev *cdev,
+ struct qed_dev_rdma_info *info)
+{
+ struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev);
+
+ memset(info, 0, sizeof(*info));
+
+ info->rdma_type = QED_RDMA_TYPE_ROCE;
+ info->user_dpm_enabled = (p_hwfn->db_bar_no_edpm == 0);
+
+ qed_fill_dev_info(cdev, &info->common);
+
+ return 0;
+}
+
+static int qed_rdma_get_sb_start(struct qed_dev *cdev)
+{
+ int feat_num;
+
+ if (cdev->num_hwfns > 1)
+ feat_num = FEAT_NUM(QED_LEADING_HWFN(cdev), QED_PF_L2_QUE);
+ else
+ feat_num = FEAT_NUM(QED_LEADING_HWFN(cdev), QED_PF_L2_QUE) *
+ cdev->num_hwfns;
+
+ return feat_num;
+}
+
+static int qed_rdma_get_min_cnq_msix(struct qed_dev *cdev)
+{
+ int n_cnq = FEAT_NUM(QED_LEADING_HWFN(cdev), QED_RDMA_CNQ);
+ int n_msix = cdev->int_params.rdma_msix_cnt;
+
+ return min_t(int, n_cnq, n_msix);
+}
+
+static int qed_rdma_set_int(struct qed_dev *cdev, u16 cnt)
+{
+ int limit = 0;
+
+ /* Mark the fastpath as free/used */
+ cdev->int_params.fp_initialized = cnt ? true : false;
+
+ if (cdev->int_params.out.int_mode != QED_INT_MODE_MSIX) {
+ DP_ERR(cdev,
+ "qed roce supports only MSI-X interrupts (detected %d).\n",
+ cdev->int_params.out.int_mode);
+ return -EINVAL;
+ } else if (cdev->int_params.fp_msix_cnt) {
+ limit = cdev->int_params.rdma_msix_cnt;
+ }
+
+ if (!limit)
+ return -ENOMEM;
+
+ return min_t(int, cnt, limit);
+}
+
+static int qed_rdma_get_int(struct qed_dev *cdev, struct qed_int_info *info)
+{
+ memset(info, 0, sizeof(*info));
+
+ if (!cdev->int_params.fp_initialized) {
+ DP_INFO(cdev,
+ "Protocol driver requested interrupt information, but its support is not yet configured\n");
+ return -EINVAL;
+ }
+
+ if (cdev->int_params.out.int_mode == QED_INT_MODE_MSIX) {
+ int msix_base = cdev->int_params.rdma_msix_base;
+
+ info->msix_cnt = cdev->int_params.rdma_msix_cnt;
+ info->msix = &cdev->int_params.msix_table[msix_base];
+
+ DP_VERBOSE(cdev, QED_MSG_RDMA, "msix_cnt = %d msix_base=%d\n",
+ info->msix_cnt, msix_base);
+ }
+
+ return 0;
+}
+
+static int qed_rdma_alloc_pd(void *rdma_cxt, u16 *pd)
+{
+ struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
+ u32 returned_id;
+ int rc;
+
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Alloc PD\n");
+
+ /* Allocates an unused protection domain */
+ spin_lock_bh(&p_hwfn->p_rdma_info->lock);
+ rc = qed_rdma_bmap_alloc_id(p_hwfn,
+ &p_hwfn->p_rdma_info->pd_map, &returned_id);
+ spin_unlock_bh(&p_hwfn->p_rdma_info->lock);
+
+ *pd = (u16)returned_id;
+
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Alloc PD - done, rc = %d\n", rc);
+ return rc;
+}
+
+static void qed_rdma_free_pd(void *rdma_cxt, u16 pd)
+{
+ struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
+
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "pd = %08x\n", pd);
+
+ /* Returns a previously allocated protection domain for reuse */
+ spin_lock_bh(&p_hwfn->p_rdma_info->lock);
+ qed_bmap_release_id(p_hwfn, &p_hwfn->p_rdma_info->pd_map, pd);
+ spin_unlock_bh(&p_hwfn->p_rdma_info->lock);
+}
+
+static enum qed_rdma_toggle_bit
+qed_rdma_toggle_bit_create_resize_cq(struct qed_hwfn *p_hwfn, u16 icid)
+{
+ struct qed_rdma_info *p_info = p_hwfn->p_rdma_info;
+ enum qed_rdma_toggle_bit toggle_bit;
+ u32 bmap_id;
+
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "icid = %08x\n", icid);
+
+ /* the function toggle the bit that is related to a given icid
+ * and returns the new toggle bit's value
+ */
+ bmap_id = icid - qed_cxt_get_proto_cid_start(p_hwfn, p_info->proto);
+
+ spin_lock_bh(&p_info->lock);
+ toggle_bit = !test_and_change_bit(bmap_id,
+ p_info->toggle_bits.bitmap);
+ spin_unlock_bh(&p_info->lock);
+
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "QED_RDMA_TOGGLE_BIT_= %d\n",
+ toggle_bit);
+
+ return toggle_bit;
+}
+
+static int qed_rdma_create_cq(void *rdma_cxt,
+ struct qed_rdma_create_cq_in_params *params,
+ u16 *icid)
+{
+ struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
+ struct qed_rdma_info *p_info = p_hwfn->p_rdma_info;
+ struct rdma_create_cq_ramrod_data *p_ramrod;
+ enum qed_rdma_toggle_bit toggle_bit;
+ struct qed_sp_init_data init_data;
+ struct qed_spq_entry *p_ent;
+ u32 returned_id, start_cid;
+ int rc;
+
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "cq_handle = %08x%08x\n",
+ params->cq_handle_hi, params->cq_handle_lo);
+
+ /* Allocate icid */
+ spin_lock_bh(&p_info->lock);
+ rc = qed_rdma_bmap_alloc_id(p_hwfn, &p_info->cq_map, &returned_id);
+ spin_unlock_bh(&p_info->lock);
+
+ if (rc) {
+ DP_NOTICE(p_hwfn, "Can't create CQ, rc = %d\n", rc);
+ return rc;
+ }
+
+ start_cid = qed_cxt_get_proto_cid_start(p_hwfn,
+ p_info->proto);
+ *icid = returned_id + start_cid;
+
+ /* Check if icid requires a page allocation */
+ rc = qed_cxt_dynamic_ilt_alloc(p_hwfn, QED_ELEM_CXT, *icid);
+ if (rc)
+ goto err;
+
+ /* Get SPQ entry */
+ memset(&init_data, 0, sizeof(init_data));
+ init_data.cid = *icid;
+ init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
+ init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
+
+ /* Send create CQ ramrod */
+ rc = qed_sp_init_request(p_hwfn, &p_ent,
+ RDMA_RAMROD_CREATE_CQ,
+ p_info->proto, &init_data);
+ if (rc)
+ goto err;
+
+ p_ramrod = &p_ent->ramrod.rdma_create_cq;
+
+ p_ramrod->cq_handle.hi = cpu_to_le32(params->cq_handle_hi);
+ p_ramrod->cq_handle.lo = cpu_to_le32(params->cq_handle_lo);
+ p_ramrod->dpi = cpu_to_le16(params->dpi);
+ p_ramrod->is_two_level_pbl = params->pbl_two_level;
+ p_ramrod->max_cqes = cpu_to_le32(params->cq_size);
+ DMA_REGPAIR_LE(p_ramrod->pbl_addr, params->pbl_ptr);
+ p_ramrod->pbl_num_pages = cpu_to_le16(params->pbl_num_pages);
+ p_ramrod->cnq_id = (u8)RESC_START(p_hwfn, QED_RDMA_CNQ_RAM) +
+ params->cnq_id;
+ p_ramrod->int_timeout = params->int_timeout;
+
+ /* toggle the bit for every resize or create cq for a given icid */
+ toggle_bit = qed_rdma_toggle_bit_create_resize_cq(p_hwfn, *icid);
+
+ p_ramrod->toggle_bit = toggle_bit;
+
+ rc = qed_spq_post(p_hwfn, p_ent, NULL);
+ if (rc) {
+ /* restore toggle bit */
+ qed_rdma_toggle_bit_create_resize_cq(p_hwfn, *icid);
+ goto err;
+ }
+
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Created CQ, rc = %d\n", rc);
+ return rc;
+
+err:
+ /* release allocated icid */
+ spin_lock_bh(&p_info->lock);
+ qed_bmap_release_id(p_hwfn, &p_info->cq_map, returned_id);
+ spin_unlock_bh(&p_info->lock);
+ DP_NOTICE(p_hwfn, "Create CQ failed, rc = %d\n", rc);
+
+ return rc;
+}
+
+static int
+qed_rdma_destroy_cq(void *rdma_cxt,
+ struct qed_rdma_destroy_cq_in_params *in_params,
+ struct qed_rdma_destroy_cq_out_params *out_params)
+{
+ struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
+ struct rdma_destroy_cq_output_params *p_ramrod_res;
+ struct rdma_destroy_cq_ramrod_data *p_ramrod;
+ struct qed_sp_init_data init_data;
+ struct qed_spq_entry *p_ent;
+ dma_addr_t ramrod_res_phys;
+ enum protocol_type proto;
+ int rc = -ENOMEM;
+
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "icid = %08x\n", in_params->icid);
+
+ p_ramrod_res =
+ (struct rdma_destroy_cq_output_params *)
+ dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
+ sizeof(struct rdma_destroy_cq_output_params),
+ &ramrod_res_phys, GFP_KERNEL);
+ if (!p_ramrod_res) {
+ DP_NOTICE(p_hwfn,
+ "qed destroy cq failed: cannot allocate memory (ramrod)\n");
+ return rc;
+ }
+
+ /* Get SPQ entry */
+ memset(&init_data, 0, sizeof(init_data));
+ init_data.cid = in_params->icid;
+ init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
+ init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
+ proto = p_hwfn->p_rdma_info->proto;
+ /* Send destroy CQ ramrod */
+ rc = qed_sp_init_request(p_hwfn, &p_ent,
+ RDMA_RAMROD_DESTROY_CQ,
+ proto, &init_data);
+ if (rc)
+ goto err;
+
+ p_ramrod = &p_ent->ramrod.rdma_destroy_cq;
+ DMA_REGPAIR_LE(p_ramrod->output_params_addr, ramrod_res_phys);
+
+ rc = qed_spq_post(p_hwfn, p_ent, NULL);
+ if (rc)
+ goto err;
+
+ out_params->num_cq_notif = le16_to_cpu(p_ramrod_res->cnq_num);
+
+ dma_free_coherent(&p_hwfn->cdev->pdev->dev,
+ sizeof(struct rdma_destroy_cq_output_params),
+ p_ramrod_res, ramrod_res_phys);
+
+ /* Free icid */
+ spin_lock_bh(&p_hwfn->p_rdma_info->lock);
+
+ qed_bmap_release_id(p_hwfn,
+ &p_hwfn->p_rdma_info->cq_map,
+ (in_params->icid -
+ qed_cxt_get_proto_cid_start(p_hwfn, proto)));
+
+ spin_unlock_bh(&p_hwfn->p_rdma_info->lock);
+
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Destroyed CQ, rc = %d\n", rc);
+ return rc;
+
+err: dma_free_coherent(&p_hwfn->cdev->pdev->dev,
+ sizeof(struct rdma_destroy_cq_output_params),
+ p_ramrod_res, ramrod_res_phys);
+
+ return rc;
+}
+
+void qed_rdma_set_fw_mac(u16 *p_fw_mac, u8 *p_qed_mac)
+{
+ p_fw_mac[0] = cpu_to_le16((p_qed_mac[0] << 8) + p_qed_mac[1]);
+ p_fw_mac[1] = cpu_to_le16((p_qed_mac[2] << 8) + p_qed_mac[3]);
+ p_fw_mac[2] = cpu_to_le16((p_qed_mac[4] << 8) + p_qed_mac[5]);
+}
+
+static int qed_rdma_query_qp(void *rdma_cxt,
+ struct qed_rdma_qp *qp,
+ struct qed_rdma_query_qp_out_params *out_params)
+{
+ struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
+ int rc;
+
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "icid = %08x\n", qp->icid);
+
+ /* The following fields are filled in from qp and not FW as they can't
+ * be modified by FW
+ */
+ out_params->mtu = qp->mtu;
+ out_params->dest_qp = qp->dest_qp;
+ out_params->incoming_atomic_en = qp->incoming_atomic_en;
+ out_params->e2e_flow_control_en = qp->e2e_flow_control_en;
+ out_params->incoming_rdma_read_en = qp->incoming_rdma_read_en;
+ out_params->incoming_rdma_write_en = qp->incoming_rdma_write_en;
+ out_params->dgid = qp->dgid;
+ out_params->flow_label = qp->flow_label;
+ out_params->hop_limit_ttl = qp->hop_limit_ttl;
+ out_params->traffic_class_tos = qp->traffic_class_tos;
+ out_params->timeout = qp->ack_timeout;
+ out_params->rnr_retry = qp->rnr_retry_cnt;
+ out_params->retry_cnt = qp->retry_cnt;
+ out_params->min_rnr_nak_timer = qp->min_rnr_nak_timer;
+ out_params->pkey_index = 0;
+ out_params->max_rd_atomic = qp->max_rd_atomic_req;
+ out_params->max_dest_rd_atomic = qp->max_rd_atomic_resp;
+ out_params->sqd_async = qp->sqd_async;
+
+ rc = qed_roce_query_qp(p_hwfn, qp, out_params);
+
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Query QP, rc = %d\n", rc);
+ return rc;
+}
+
+static int qed_rdma_destroy_qp(void *rdma_cxt, struct qed_rdma_qp *qp)
+{
+ struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
+ int rc = 0;
+
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "icid = %08x\n", qp->icid);
+
+ rc = qed_roce_destroy_qp(p_hwfn, qp);
+
+ /* free qp params struct */
+ kfree(qp);
+
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "QP destroyed\n");
+ return rc;
+}
+
+static struct qed_rdma_qp *
+qed_rdma_create_qp(void *rdma_cxt,
+ struct qed_rdma_create_qp_in_params *in_params,
+ struct qed_rdma_create_qp_out_params *out_params)
+{
+ struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
+ struct qed_rdma_qp *qp;
+ u8 max_stats_queues;
+ int rc;
+
+ if (!rdma_cxt || !in_params || !out_params || !p_hwfn->p_rdma_info) {
+ DP_ERR(p_hwfn->cdev,
+ "qed roce create qp failed due to NULL entry (rdma_cxt=%p, in=%p, out=%p, roce_info=?\n",
+ rdma_cxt, in_params, out_params);
+ return NULL;
+ }
+
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
+ "qed rdma create qp called with qp_handle = %08x%08x\n",
+ in_params->qp_handle_hi, in_params->qp_handle_lo);
+
+ /* Some sanity checks... */
+ max_stats_queues = p_hwfn->p_rdma_info->dev->max_stats_queues;
+ if (in_params->stats_queue >= max_stats_queues) {
+ DP_ERR(p_hwfn->cdev,
+ "qed rdma create qp failed due to invalid statistics queue %d. maximum is %d\n",
+ in_params->stats_queue, max_stats_queues);
+ return NULL;
+ }
+
+ qp = kzalloc(sizeof(*qp), GFP_KERNEL);
+ if (!qp)
+ return NULL;
+
+ rc = qed_roce_alloc_cid(p_hwfn, &qp->icid);
+ qp->qpid = ((0xFF << 16) | qp->icid);
+
+ DP_INFO(p_hwfn, "ROCE qpid=%x\n", qp->qpid);
+
+ if (rc) {
+ kfree(qp);
+ return NULL;
+ }
+
+ qp->cur_state = QED_ROCE_QP_STATE_RESET;
+ qp->qp_handle.hi = cpu_to_le32(in_params->qp_handle_hi);
+ qp->qp_handle.lo = cpu_to_le32(in_params->qp_handle_lo);
+ qp->qp_handle_async.hi = cpu_to_le32(in_params->qp_handle_async_hi);
+ qp->qp_handle_async.lo = cpu_to_le32(in_params->qp_handle_async_lo);
+ qp->use_srq = in_params->use_srq;
+ qp->signal_all = in_params->signal_all;
+ qp->fmr_and_reserved_lkey = in_params->fmr_and_reserved_lkey;
+ qp->pd = in_params->pd;
+ qp->dpi = in_params->dpi;
+ qp->sq_cq_id = in_params->sq_cq_id;
+ qp->sq_num_pages = in_params->sq_num_pages;
+ qp->sq_pbl_ptr = in_params->sq_pbl_ptr;
+ qp->rq_cq_id = in_params->rq_cq_id;
+ qp->rq_num_pages = in_params->rq_num_pages;
+ qp->rq_pbl_ptr = in_params->rq_pbl_ptr;
+ qp->srq_id = in_params->srq_id;
+ qp->req_offloaded = false;
+ qp->resp_offloaded = false;
+ qp->e2e_flow_control_en = qp->use_srq ? false : true;
+ qp->stats_queue = in_params->stats_queue;
+
+ out_params->icid = qp->icid;
+ out_params->qp_id = qp->qpid;
+
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Create QP, rc = %d\n", rc);
+ return qp;
+}
+
+static int qed_rdma_modify_qp(void *rdma_cxt,
+ struct qed_rdma_qp *qp,
+ struct qed_rdma_modify_qp_in_params *params)
+{
+ struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
+ enum qed_roce_qp_state prev_state;
+ int rc = 0;
+
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "icid = %08x params->new_state=%d\n",
+ qp->icid, params->new_state);
+
+ if (rc) {
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "rc = %d\n", rc);
+ return rc;
+ }
+
+ if (GET_FIELD(params->modify_flags,
+ QED_RDMA_MODIFY_QP_VALID_RDMA_OPS_EN)) {
+ qp->incoming_rdma_read_en = params->incoming_rdma_read_en;
+ qp->incoming_rdma_write_en = params->incoming_rdma_write_en;
+ qp->incoming_atomic_en = params->incoming_atomic_en;
+ }
+
+ /* Update QP structure with the updated values */
+ if (GET_FIELD(params->modify_flags, QED_ROCE_MODIFY_QP_VALID_ROCE_MODE))
+ qp->roce_mode = params->roce_mode;
+ if (GET_FIELD(params->modify_flags, QED_ROCE_MODIFY_QP_VALID_PKEY))
+ qp->pkey = params->pkey;
+ if (GET_FIELD(params->modify_flags,
+ QED_ROCE_MODIFY_QP_VALID_E2E_FLOW_CONTROL_EN))
+ qp->e2e_flow_control_en = params->e2e_flow_control_en;
+ if (GET_FIELD(params->modify_flags, QED_ROCE_MODIFY_QP_VALID_DEST_QP))
+ qp->dest_qp = params->dest_qp;
+ if (GET_FIELD(params->modify_flags,
+ QED_ROCE_MODIFY_QP_VALID_ADDRESS_VECTOR)) {
+ /* Indicates that the following parameters have changed:
+ * Traffic class, flow label, hop limit, source GID,
+ * destination GID, loopback indicator
+ */
+ qp->traffic_class_tos = params->traffic_class_tos;
+ qp->flow_label = params->flow_label;
+ qp->hop_limit_ttl = params->hop_limit_ttl;
+
+ qp->sgid = params->sgid;
+ qp->dgid = params->dgid;
+ qp->udp_src_port = 0;
+ qp->vlan_id = params->vlan_id;
+ qp->mtu = params->mtu;
+ qp->lb_indication = params->lb_indication;
+ memcpy((u8 *)&qp->remote_mac_addr[0],
+ (u8 *)¶ms->remote_mac_addr[0], ETH_ALEN);
+ if (params->use_local_mac) {
+ memcpy((u8 *)&qp->local_mac_addr[0],
+ (u8 *)¶ms->local_mac_addr[0], ETH_ALEN);
+ } else {
+ memcpy((u8 *)&qp->local_mac_addr[0],
+ (u8 *)&p_hwfn->hw_info.hw_mac_addr, ETH_ALEN);
+ }
+ }
+ if (GET_FIELD(params->modify_flags, QED_ROCE_MODIFY_QP_VALID_RQ_PSN))
+ qp->rq_psn = params->rq_psn;
+ if (GET_FIELD(params->modify_flags, QED_ROCE_MODIFY_QP_VALID_SQ_PSN))
+ qp->sq_psn = params->sq_psn;
+ if (GET_FIELD(params->modify_flags,
+ QED_RDMA_MODIFY_QP_VALID_MAX_RD_ATOMIC_REQ))
+ qp->max_rd_atomic_req = params->max_rd_atomic_req;
+ if (GET_FIELD(params->modify_flags,
+ QED_RDMA_MODIFY_QP_VALID_MAX_RD_ATOMIC_RESP))
+ qp->max_rd_atomic_resp = params->max_rd_atomic_resp;
+ if (GET_FIELD(params->modify_flags,
+ QED_ROCE_MODIFY_QP_VALID_ACK_TIMEOUT))
+ qp->ack_timeout = params->ack_timeout;
+ if (GET_FIELD(params->modify_flags, QED_ROCE_MODIFY_QP_VALID_RETRY_CNT))
+ qp->retry_cnt = params->retry_cnt;
+ if (GET_FIELD(params->modify_flags,
+ QED_ROCE_MODIFY_QP_VALID_RNR_RETRY_CNT))
+ qp->rnr_retry_cnt = params->rnr_retry_cnt;
+ if (GET_FIELD(params->modify_flags,
+ QED_ROCE_MODIFY_QP_VALID_MIN_RNR_NAK_TIMER))
+ qp->min_rnr_nak_timer = params->min_rnr_nak_timer;
+
+ qp->sqd_async = params->sqd_async;
+
+ prev_state = qp->cur_state;
+ if (GET_FIELD(params->modify_flags,
+ QED_RDMA_MODIFY_QP_VALID_NEW_STATE)) {
+ qp->cur_state = params->new_state;
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "qp->cur_state=%d\n",
+ qp->cur_state);
+ }
+
+ rc = qed_roce_modify_qp(p_hwfn, qp, prev_state, params);
+
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Modify QP, rc = %d\n", rc);
+ return rc;
+}
+
+static int
+qed_rdma_register_tid(void *rdma_cxt,
+ struct qed_rdma_register_tid_in_params *params)
+{
+ struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
+ struct rdma_register_tid_ramrod_data *p_ramrod;
+ struct qed_sp_init_data init_data;
+ struct qed_spq_entry *p_ent;
+ enum rdma_tid_type tid_type;
+ u8 fw_return_code;
+ int rc;
+
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "itid = %08x\n", params->itid);
+
+ /* Get SPQ entry */
+ memset(&init_data, 0, sizeof(init_data));
+ init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
+ init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
+
+ rc = qed_sp_init_request(p_hwfn, &p_ent, RDMA_RAMROD_REGISTER_MR,
+ p_hwfn->p_rdma_info->proto, &init_data);
+ if (rc) {
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "rc = %d\n", rc);
+ return rc;
+ }
+
+ if (p_hwfn->p_rdma_info->last_tid < params->itid)
+ p_hwfn->p_rdma_info->last_tid = params->itid;
+
+ p_ramrod = &p_ent->ramrod.rdma_register_tid;
+
+ p_ramrod->flags = 0;
+ SET_FIELD(p_ramrod->flags,
+ RDMA_REGISTER_TID_RAMROD_DATA_TWO_LEVEL_PBL,
+ params->pbl_two_level);
+
+ SET_FIELD(p_ramrod->flags,
+ RDMA_REGISTER_TID_RAMROD_DATA_ZERO_BASED, params->zbva);
+
+ SET_FIELD(p_ramrod->flags,
+ RDMA_REGISTER_TID_RAMROD_DATA_PHY_MR, params->phy_mr);
+
+ /* Don't initialize D/C field, as it may override other bits. */
+ if (!(params->tid_type == QED_RDMA_TID_FMR) && !(params->dma_mr))
+ SET_FIELD(p_ramrod->flags,
+ RDMA_REGISTER_TID_RAMROD_DATA_PAGE_SIZE_LOG,
+ params->page_size_log - 12);
+
+ SET_FIELD(p_ramrod->flags,
+ RDMA_REGISTER_TID_RAMROD_DATA_REMOTE_READ,
+ params->remote_read);
+
+ SET_FIELD(p_ramrod->flags,
+ RDMA_REGISTER_TID_RAMROD_DATA_REMOTE_WRITE,
+ params->remote_write);
+
+ SET_FIELD(p_ramrod->flags,
+ RDMA_REGISTER_TID_RAMROD_DATA_REMOTE_ATOMIC,
+ params->remote_atomic);
+
+ SET_FIELD(p_ramrod->flags,
+ RDMA_REGISTER_TID_RAMROD_DATA_LOCAL_WRITE,
+ params->local_write);
+
+ SET_FIELD(p_ramrod->flags,
+ RDMA_REGISTER_TID_RAMROD_DATA_LOCAL_READ, params->local_read);
+
+ SET_FIELD(p_ramrod->flags,
+ RDMA_REGISTER_TID_RAMROD_DATA_ENABLE_MW_BIND,
+ params->mw_bind);
+
+ SET_FIELD(p_ramrod->flags1,
+ RDMA_REGISTER_TID_RAMROD_DATA_PBL_PAGE_SIZE_LOG,
+ params->pbl_page_size_log - 12);
+
+ SET_FIELD(p_ramrod->flags2,
+ RDMA_REGISTER_TID_RAMROD_DATA_DMA_MR, params->dma_mr);
+
+ switch (params->tid_type) {
+ case QED_RDMA_TID_REGISTERED_MR:
+ tid_type = RDMA_TID_REGISTERED_MR;
+ break;
+ case QED_RDMA_TID_FMR:
+ tid_type = RDMA_TID_FMR;
+ break;
+ case QED_RDMA_TID_MW_TYPE1:
+ tid_type = RDMA_TID_MW_TYPE1;
+ break;
+ case QED_RDMA_TID_MW_TYPE2A:
+ tid_type = RDMA_TID_MW_TYPE2A;
+ break;
+ default:
+ rc = -EINVAL;
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "rc = %d\n", rc);
+ return rc;
+ }
+ SET_FIELD(p_ramrod->flags1,
+ RDMA_REGISTER_TID_RAMROD_DATA_TID_TYPE, tid_type);
+
+ p_ramrod->itid = cpu_to_le32(params->itid);
+ p_ramrod->key = params->key;
+ p_ramrod->pd = cpu_to_le16(params->pd);
+ p_ramrod->length_hi = (u8)(params->length >> 32);
+ p_ramrod->length_lo = DMA_LO_LE(params->length);
+ if (params->zbva) {
+ /* Lower 32 bits of the registered MR address.
+ * In case of zero based MR, will hold FBO
+ */
+ p_ramrod->va.hi = 0;
+ p_ramrod->va.lo = cpu_to_le32(params->fbo);
+ } else {
+ DMA_REGPAIR_LE(p_ramrod->va, params->vaddr);
+ }
+ DMA_REGPAIR_LE(p_ramrod->pbl_base, params->pbl_ptr);
+
+ /* DIF */
+ if (params->dif_enabled) {
+ SET_FIELD(p_ramrod->flags2,
+ RDMA_REGISTER_TID_RAMROD_DATA_DIF_ON_HOST_FLG, 1);
+ DMA_REGPAIR_LE(p_ramrod->dif_error_addr,
+ params->dif_error_addr);
+ DMA_REGPAIR_LE(p_ramrod->dif_runt_addr, params->dif_runt_addr);
+ }
+
+ rc = qed_spq_post(p_hwfn, p_ent, &fw_return_code);
+ if (rc)
+ return rc;
+
+ if (fw_return_code != RDMA_RETURN_OK) {
+ DP_NOTICE(p_hwfn, "fw_return_code = %d\n", fw_return_code);
+ return -EINVAL;
+ }
+
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Register TID, rc = %d\n", rc);
+ return rc;
+}
+
+static int qed_rdma_deregister_tid(void *rdma_cxt, u32 itid)
+{
+ struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
+ struct rdma_deregister_tid_ramrod_data *p_ramrod;
+ struct qed_sp_init_data init_data;
+ struct qed_spq_entry *p_ent;
+ struct qed_ptt *p_ptt;
+ u8 fw_return_code;
+ int rc;
+
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "itid = %08x\n", itid);
+
+ /* Get SPQ entry */
+ memset(&init_data, 0, sizeof(init_data));
+ init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
+ init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
+
+ rc = qed_sp_init_request(p_hwfn, &p_ent, RDMA_RAMROD_DEREGISTER_MR,
+ p_hwfn->p_rdma_info->proto, &init_data);
+ if (rc) {
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "rc = %d\n", rc);
+ return rc;
+ }
+
+ p_ramrod = &p_ent->ramrod.rdma_deregister_tid;
+ p_ramrod->itid = cpu_to_le32(itid);
+
+ rc = qed_spq_post(p_hwfn, p_ent, &fw_return_code);
+ if (rc) {
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "rc = %d\n", rc);
+ return rc;
+ }
+
+ if (fw_return_code == RDMA_RETURN_DEREGISTER_MR_BAD_STATE_ERR) {
+ DP_NOTICE(p_hwfn, "fw_return_code = %d\n", fw_return_code);
+ return -EINVAL;
+ } else if (fw_return_code == RDMA_RETURN_NIG_DRAIN_REQ) {
+ /* Bit indicating that the TID is in use and a nig drain is
+ * required before sending the ramrod again
+ */
+ p_ptt = qed_ptt_acquire(p_hwfn);
+ if (!p_ptt) {
+ rc = -EBUSY;
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
+ "Failed to acquire PTT\n");
+ return rc;
+ }
+
+ rc = qed_mcp_drain(p_hwfn, p_ptt);
+ if (rc) {
+ qed_ptt_release(p_hwfn, p_ptt);
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
+ "Drain failed\n");
+ return rc;
+ }
+
+ qed_ptt_release(p_hwfn, p_ptt);
+
+ /* Resend the ramrod */
+ rc = qed_sp_init_request(p_hwfn, &p_ent,
+ RDMA_RAMROD_DEREGISTER_MR,
+ p_hwfn->p_rdma_info->proto,
+ &init_data);
+ if (rc) {
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
+ "Failed to init sp-element\n");
+ return rc;
+ }
+
+ rc = qed_spq_post(p_hwfn, p_ent, &fw_return_code);
+ if (rc) {
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
+ "Ramrod failed\n");
+ return rc;
+ }
+
+ if (fw_return_code != RDMA_RETURN_OK) {
+ DP_NOTICE(p_hwfn, "fw_return_code = %d\n",
+ fw_return_code);
+ return rc;
+ }
+ }
+
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "De-registered TID, rc = %d\n", rc);
+ return rc;
+}
+
+static void *qed_rdma_get_rdma_ctx(struct qed_dev *cdev)
+{
+ return QED_LEADING_HWFN(cdev);
+}
+
+bool qed_rdma_allocated_qps(struct qed_hwfn *p_hwfn)
+{
+ bool result;
+
+ /* if rdma info has not been allocated, naturally there are no qps */
+ if (!p_hwfn->p_rdma_info)
+ return false;
+
+ spin_lock_bh(&p_hwfn->p_rdma_info->lock);
+ if (!p_hwfn->p_rdma_info->cid_map.bitmap)
+ result = false;
+ else
+ result = !qed_bmap_is_empty(&p_hwfn->p_rdma_info->cid_map);
+ spin_unlock_bh(&p_hwfn->p_rdma_info->lock);
+ return result;
+}
+
+void qed_rdma_dpm_conf(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
+{
+ u32 val;
+
+ val = (p_hwfn->dcbx_no_edpm || p_hwfn->db_bar_no_edpm) ? 0 : 1;
+
+ qed_wr(p_hwfn, p_ptt, DORQ_REG_PF_DPM_ENABLE, val);
+ DP_VERBOSE(p_hwfn, (QED_MSG_DCB | QED_MSG_RDMA),
+ "Changing DPM_EN state to %d (DCBX=%d, DB_BAR=%d)\n",
+ val, p_hwfn->dcbx_no_edpm, p_hwfn->db_bar_no_edpm);
+}
+
+
+void qed_rdma_dpm_bar(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
+{
+ p_hwfn->db_bar_no_edpm = true;
+
+ qed_rdma_dpm_conf(p_hwfn, p_ptt);
+}
+
+static int qed_rdma_start(void *rdma_cxt,
+ struct qed_rdma_start_in_params *params)
+{
+ struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
+ struct qed_ptt *p_ptt;
+ int rc = -EBUSY;
+
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
+ "desired_cnq = %08x\n", params->desired_cnq);
+
+ p_ptt = qed_ptt_acquire(p_hwfn);
+ if (!p_ptt)
+ goto err;
+
+ rc = qed_rdma_alloc(p_hwfn, p_ptt, params);
+ if (rc)
+ goto err1;
+
+ rc = qed_rdma_setup(p_hwfn, p_ptt, params);
+ if (rc)
+ goto err2;
+
+ qed_ptt_release(p_hwfn, p_ptt);
+
+ return rc;
+
+err2:
+ qed_rdma_free(p_hwfn);
+err1:
+ qed_ptt_release(p_hwfn, p_ptt);
+err:
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "RDMA start - error, rc = %d\n", rc);
+ return rc;
+}
+
+static int qed_rdma_init(struct qed_dev *cdev,
+ struct qed_rdma_start_in_params *params)
+{
+ return qed_rdma_start(QED_LEADING_HWFN(cdev), params);
+}
+
+static void qed_rdma_remove_user(void *rdma_cxt, u16 dpi)
+{
+ struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
+
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "dpi = %08x\n", dpi);
+
+ spin_lock_bh(&p_hwfn->p_rdma_info->lock);
+ qed_bmap_release_id(p_hwfn, &p_hwfn->p_rdma_info->dpi_map, dpi);
+ spin_unlock_bh(&p_hwfn->p_rdma_info->lock);
+}
+
+static int qed_roce_ll2_set_mac_filter(struct qed_dev *cdev,
+ u8 *old_mac_address,
+ u8 *new_mac_address)
+{
+ struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev);
+ struct qed_ptt *p_ptt;
+ int rc = 0;
+
+ p_ptt = qed_ptt_acquire(p_hwfn);
+ if (!p_ptt) {
+ DP_ERR(cdev,
+ "qed roce ll2 mac filter set: failed to acquire PTT\n");
+ return -EINVAL;
+ }
+
+ if (old_mac_address)
+ qed_llh_remove_mac_filter(p_hwfn, p_ptt, old_mac_address);
+ if (new_mac_address)
+ rc = qed_llh_add_mac_filter(p_hwfn, p_ptt, new_mac_address);
+
+ qed_ptt_release(p_hwfn, p_ptt);
+
+ if (rc)
+ DP_ERR(cdev,
+ "qed roce ll2 mac filter set: failed to add MAC filter\n");
+
+ return rc;
+}
+
+static const struct qed_rdma_ops qed_rdma_ops_pass = {
+ .common = &qed_common_ops_pass,
+ .fill_dev_info = &qed_fill_rdma_dev_info,
+ .rdma_get_rdma_ctx = &qed_rdma_get_rdma_ctx,
+ .rdma_init = &qed_rdma_init,
+ .rdma_add_user = &qed_rdma_add_user,
+ .rdma_remove_user = &qed_rdma_remove_user,
+ .rdma_stop = &qed_rdma_stop,
+ .rdma_query_port = &qed_rdma_query_port,
+ .rdma_query_device = &qed_rdma_query_device,
+ .rdma_get_start_sb = &qed_rdma_get_sb_start,
+ .rdma_get_rdma_int = &qed_rdma_get_int,
+ .rdma_set_rdma_int = &qed_rdma_set_int,
+ .rdma_get_min_cnq_msix = &qed_rdma_get_min_cnq_msix,
+ .rdma_cnq_prod_update = &qed_rdma_cnq_prod_update,
+ .rdma_alloc_pd = &qed_rdma_alloc_pd,
+ .rdma_dealloc_pd = &qed_rdma_free_pd,
+ .rdma_create_cq = &qed_rdma_create_cq,
+ .rdma_destroy_cq = &qed_rdma_destroy_cq,
+ .rdma_create_qp = &qed_rdma_create_qp,
+ .rdma_modify_qp = &qed_rdma_modify_qp,
+ .rdma_query_qp = &qed_rdma_query_qp,
+ .rdma_destroy_qp = &qed_rdma_destroy_qp,
+ .rdma_alloc_tid = &qed_rdma_alloc_tid,
+ .rdma_free_tid = &qed_rdma_free_tid,
+ .rdma_register_tid = &qed_rdma_register_tid,
+ .rdma_deregister_tid = &qed_rdma_deregister_tid,
+ .ll2_acquire_connection = &qed_ll2_acquire_connection,
+ .ll2_establish_connection = &qed_ll2_establish_connection,
+ .ll2_terminate_connection = &qed_ll2_terminate_connection,
+ .ll2_release_connection = &qed_ll2_release_connection,
+ .ll2_post_rx_buffer = &qed_ll2_post_rx_buffer,
+ .ll2_prepare_tx_packet = &qed_ll2_prepare_tx_packet,
+ .ll2_set_fragment_of_tx_packet = &qed_ll2_set_fragment_of_tx_packet,
+ .ll2_set_mac_filter = &qed_roce_ll2_set_mac_filter,
+ .ll2_get_stats = &qed_ll2_get_stats,
+};
+
+const struct qed_rdma_ops *qed_get_rdma_ops(void)
+{
+ return &qed_rdma_ops_pass;
+}
+EXPORT_SYMBOL(qed_get_rdma_ops);
diff --git a/drivers/net/ethernet/qlogic/qed/qed_rdma.h b/drivers/net/ethernet/qlogic/qed/qed_rdma.h
new file mode 100644
index 0000000..d91e5c4
--- /dev/null
+++ b/drivers/net/ethernet/qlogic/qed/qed_rdma.h
@@ -0,0 +1,201 @@
+/* QLogic qed NIC Driver
+ * Copyright (c) 2015-2017 QLogic Corporation
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and /or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef _QED_RDMA_H
+#define _QED_RDMA_H
+#include <linux/types.h>
+#include <linux/bitops.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/qed/qed_if.h>
+#include <linux/qed/qed_rdma_if.h>
+#include "qed.h"
+#include "qed_dev_api.h"
+#include "qed_hsi.h"
+#include "qed_roce.h"
+
+#define QED_RDMA_MAX_FMR (RDMA_MAX_TIDS)
+#define QED_RDMA_MAX_P_KEY (1)
+#define QED_RDMA_MAX_WQE (0x7FFF)
+#define QED_RDMA_MAX_SRQ_WQE_ELEM (0x7FFF)
+#define QED_RDMA_PAGE_SIZE_CAPS (0xFFFFF000)
+#define QED_RDMA_ACK_DELAY (15)
+#define QED_RDMA_MAX_MR_SIZE (0x10000000000ULL)
+#define QED_RDMA_MAX_CQS (RDMA_MAX_CQS)
+#define QED_RDMA_MAX_MRS (RDMA_MAX_TIDS)
+/* Add 1 for header element */
+#define QED_RDMA_MAX_SRQ_ELEM_PER_WQE (RDMA_MAX_SGE_PER_RQ_WQE + 1)
+#define QED_RDMA_MAX_SGE_PER_SRQ_WQE (RDMA_MAX_SGE_PER_RQ_WQE)
+#define QED_RDMA_SRQ_WQE_ELEM_SIZE (16)
+#define QED_RDMA_MAX_SRQS (32 * 1024)
+
+#define QED_RDMA_MAX_CQE_32_BIT (0x7FFFFFFF - 1)
+#define QED_RDMA_MAX_CQE_16_BIT (0x7FFF - 1)
+
+enum qed_rdma_toggle_bit {
+ QED_RDMA_TOGGLE_BIT_CLEAR = 0,
+ QED_RDMA_TOGGLE_BIT_SET = 1
+};
+
+#define QED_RDMA_MAX_BMAP_NAME (10)
+struct qed_bmap {
+ unsigned long *bitmap;
+ u32 max_count;
+ char name[QED_RDMA_MAX_BMAP_NAME];
+};
+
+struct qed_rdma_info {
+ /* spin lock to protect bitmaps */
+ spinlock_t lock;
+
+ struct qed_bmap cq_map;
+ struct qed_bmap pd_map;
+ struct qed_bmap tid_map;
+ struct qed_bmap qp_map;
+ struct qed_bmap srq_map;
+ struct qed_bmap cid_map;
+ struct qed_bmap real_cid_map;
+ struct qed_bmap dpi_map;
+ struct qed_bmap toggle_bits;
+ struct qed_rdma_events events;
+ struct qed_rdma_device *dev;
+ struct qed_rdma_port *port;
+ u32 last_tid;
+ u8 num_cnqs;
+ u32 num_qps;
+ u32 num_mrs;
+ u16 queue_zone_base;
+ u16 max_queue_zones;
+ enum protocol_type proto;
+};
+
+struct qed_rdma_qp {
+ struct regpair qp_handle;
+ struct regpair qp_handle_async;
+ u32 qpid;
+ u16 icid;
+ enum qed_roce_qp_state cur_state;
+ bool use_srq;
+ bool signal_all;
+ bool fmr_and_reserved_lkey;
+
+ bool incoming_rdma_read_en;
+ bool incoming_rdma_write_en;
+ bool incoming_atomic_en;
+ bool e2e_flow_control_en;
+
+ u16 pd;
+ u16 pkey;
+ u32 dest_qp;
+ u16 mtu;
+ u16 srq_id;
+ u8 traffic_class_tos;
+ u8 hop_limit_ttl;
+ u16 dpi;
+ u32 flow_label;
+ bool lb_indication;
+ u16 vlan_id;
+ u32 ack_timeout;
+ u8 retry_cnt;
+ u8 rnr_retry_cnt;
+ u8 min_rnr_nak_timer;
+ bool sqd_async;
+ union qed_gid sgid;
+ union qed_gid dgid;
+ enum roce_mode roce_mode;
+ u16 udp_src_port;
+ u8 stats_queue;
+
+ /* requeseter */
+ u8 max_rd_atomic_req;
+ u32 sq_psn;
+ u16 sq_cq_id;
+ u16 sq_num_pages;
+ dma_addr_t sq_pbl_ptr;
+ void *orq;
+ dma_addr_t orq_phys_addr;
+ u8 orq_num_pages;
+ bool req_offloaded;
+
+ /* responder */
+ u8 max_rd_atomic_resp;
+ u32 rq_psn;
+ u16 rq_cq_id;
+ u16 rq_num_pages;
+ dma_addr_t rq_pbl_ptr;
+ void *irq;
+ dma_addr_t irq_phys_addr;
+ u8 irq_num_pages;
+ bool resp_offloaded;
+ u32 cq_prod;
+
+ u8 remote_mac_addr[6];
+ u8 local_mac_addr[6];
+
+ void *shared_queue;
+ dma_addr_t shared_queue_phys_addr;
+};
+
+#if IS_ENABLED(CONFIG_QED_RDMA)
+void qed_rdma_dpm_bar(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt);
+void qed_rdma_dpm_conf(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt);
+#else
+static inline void qed_rdma_dpm_conf(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) {}
+static inline void qed_rdma_dpm_bar(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt) {}
+#endif
+
+int
+qed_rdma_bmap_alloc(struct qed_hwfn *p_hwfn,
+ struct qed_bmap *bmap, u32 max_count, char *name);
+
+void
+qed_rdma_bmap_free(struct qed_hwfn *p_hwfn, struct qed_bmap *bmap, bool check);
+
+int
+qed_rdma_bmap_alloc_id(struct qed_hwfn *p_hwfn,
+ struct qed_bmap *bmap, u32 *id_num);
+
+void
+qed_bmap_set_id(struct qed_hwfn *p_hwfn, struct qed_bmap *bmap, u32 id_num);
+
+void
+qed_bmap_release_id(struct qed_hwfn *p_hwfn, struct qed_bmap *bmap, u32 id_num);
+
+int
+qed_bmap_test_id(struct qed_hwfn *p_hwfn, struct qed_bmap *bmap, u32 id_num);
+
+void qed_rdma_set_fw_mac(u16 *p_fw_mac, u8 *p_qed_mac);
+
+bool qed_rdma_allocated_qps(struct qed_hwfn *p_hwfn);
+#endif
diff --git a/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h b/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h
index 1ae73b2..0cdb433 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h
@@ -242,6 +242,8 @@
0x50196cUL
#define NIG_REG_LLH_CLS_TYPE_DUALMODE \
0x501964UL
+#define NIG_REG_LLH_FUNC_TAG_EN 0x5019b0UL
+#define NIG_REG_LLH_FUNC_TAG_VALUE 0x5019d0UL
#define NIG_REG_LLH_FUNC_FILTER_VALUE \
0x501a00UL
#define NIG_REG_LLH_FUNC_FILTER_VALUE_SIZE \
@@ -558,6 +560,7 @@
0x2aae60UL
#define PGLUE_B_REG_PF_BAR1_SIZE \
0x2aae64UL
+#define PGLUE_B_REG_VF_BAR1_SIZE 0x2aae68UL
#define PRS_REG_ENCAPSULATION_TYPE_EN 0x1f0730UL
#define PRS_REG_GRE_PROTOCOL 0x1f0734UL
#define PRS_REG_VXLAN_PORT 0x1f0738UL
@@ -592,15 +595,15 @@
#define QM_REG_WFQPFWEIGHT 0x2f4e80UL
#define QM_REG_WFQVPWEIGHT 0x2fa000UL
-#define PGLCS_REG_DBG_SELECT \
+#define PGLCS_REG_DBG_SELECT_K2 \
0x001d14UL
-#define PGLCS_REG_DBG_DWORD_ENABLE \
+#define PGLCS_REG_DBG_DWORD_ENABLE_K2 \
0x001d18UL
-#define PGLCS_REG_DBG_SHIFT \
+#define PGLCS_REG_DBG_SHIFT_K2 \
0x001d1cUL
-#define PGLCS_REG_DBG_FORCE_VALID \
+#define PGLCS_REG_DBG_FORCE_VALID_K2 \
0x001d20UL
-#define PGLCS_REG_DBG_FORCE_FRAME \
+#define PGLCS_REG_DBG_FORCE_FRAME_K2 \
0x001d24UL
#define MISC_REG_RESET_PL_PDA_VMAIN_1 \
0x008070UL
@@ -612,7 +615,7 @@
0x009050UL
#define MISCS_REG_RESET_PL_HV \
0x009060UL
-#define MISCS_REG_RESET_PL_HV_2 \
+#define MISCS_REG_RESET_PL_HV_2_K2 \
0x009150UL
#define DMAE_REG_DBG_SELECT \
0x00c510UL
@@ -644,15 +647,15 @@
0x0500b0UL
#define GRC_REG_DBG_FORCE_FRAME \
0x0500b4UL
-#define UMAC_REG_DBG_SELECT \
+#define UMAC_REG_DBG_SELECT_K2 \
0x051094UL
-#define UMAC_REG_DBG_DWORD_ENABLE \
+#define UMAC_REG_DBG_DWORD_ENABLE_K2 \
0x051098UL
-#define UMAC_REG_DBG_SHIFT \
+#define UMAC_REG_DBG_SHIFT_K2 \
0x05109cUL
-#define UMAC_REG_DBG_FORCE_VALID \
+#define UMAC_REG_DBG_FORCE_VALID_K2 \
0x0510a0UL
-#define UMAC_REG_DBG_FORCE_FRAME \
+#define UMAC_REG_DBG_FORCE_FRAME_K2 \
0x0510a4UL
#define MCP2_REG_DBG_SELECT \
0x052400UL
@@ -924,15 +927,15 @@
0x4c160cUL
#define XYLD_REG_DBG_FORCE_FRAME \
0x4c1610UL
-#define YULD_REG_DBG_SELECT \
+#define YULD_REG_DBG_SELECT_BB_K2 \
0x4c9600UL
-#define YULD_REG_DBG_DWORD_ENABLE \
+#define YULD_REG_DBG_DWORD_ENABLE_BB_K2 \
0x4c9604UL
-#define YULD_REG_DBG_SHIFT \
+#define YULD_REG_DBG_SHIFT_BB_K2 \
0x4c9608UL
-#define YULD_REG_DBG_FORCE_VALID \
+#define YULD_REG_DBG_FORCE_VALID_BB_K2 \
0x4c960cUL
-#define YULD_REG_DBG_FORCE_FRAME \
+#define YULD_REG_DBG_FORCE_FRAME_BB_K2 \
0x4c9610UL
#define TMLD_REG_DBG_SELECT \
0x4d1600UL
@@ -994,35 +997,35 @@
0x580710UL
#define CDU_REG_DBG_FORCE_FRAME \
0x580714UL
-#define WOL_REG_DBG_SELECT \
+#define WOL_REG_DBG_SELECT_K2 \
0x600140UL
-#define WOL_REG_DBG_DWORD_ENABLE \
+#define WOL_REG_DBG_DWORD_ENABLE_K2 \
0x600144UL
-#define WOL_REG_DBG_SHIFT \
+#define WOL_REG_DBG_SHIFT_K2 \
0x600148UL
-#define WOL_REG_DBG_FORCE_VALID \
+#define WOL_REG_DBG_FORCE_VALID_K2 \
0x60014cUL
-#define WOL_REG_DBG_FORCE_FRAME \
+#define WOL_REG_DBG_FORCE_FRAME_K2 \
0x600150UL
-#define BMBN_REG_DBG_SELECT \
+#define BMBN_REG_DBG_SELECT_K2 \
0x610140UL
-#define BMBN_REG_DBG_DWORD_ENABLE \
+#define BMBN_REG_DBG_DWORD_ENABLE_K2 \
0x610144UL
-#define BMBN_REG_DBG_SHIFT \
+#define BMBN_REG_DBG_SHIFT_K2 \
0x610148UL
-#define BMBN_REG_DBG_FORCE_VALID \
+#define BMBN_REG_DBG_FORCE_VALID_K2 \
0x61014cUL
-#define BMBN_REG_DBG_FORCE_FRAME \
+#define BMBN_REG_DBG_FORCE_FRAME_K2 \
0x610150UL
-#define NWM_REG_DBG_SELECT \
+#define NWM_REG_DBG_SELECT_K2 \
0x8000ecUL
-#define NWM_REG_DBG_DWORD_ENABLE \
+#define NWM_REG_DBG_DWORD_ENABLE_K2 \
0x8000f0UL
-#define NWM_REG_DBG_SHIFT \
+#define NWM_REG_DBG_SHIFT_K2 \
0x8000f4UL
-#define NWM_REG_DBG_FORCE_VALID \
+#define NWM_REG_DBG_FORCE_VALID_K2 \
0x8000f8UL
-#define NWM_REG_DBG_FORCE_FRAME \
+#define NWM_REG_DBG_FORCE_FRAME_K2\
0x8000fcUL
#define PBF_REG_DBG_SELECT \
0xd80060UL
@@ -1244,35 +1247,35 @@
0x1901534UL
#define USEM_REG_DBG_FORCE_FRAME \
0x1901538UL
-#define NWS_REG_DBG_SELECT \
+#define NWS_REG_DBG_SELECT_K2 \
0x700128UL
-#define NWS_REG_DBG_DWORD_ENABLE \
+#define NWS_REG_DBG_DWORD_ENABLE_K2 \
0x70012cUL
-#define NWS_REG_DBG_SHIFT \
+#define NWS_REG_DBG_SHIFT_K2 \
0x700130UL
-#define NWS_REG_DBG_FORCE_VALID \
+#define NWS_REG_DBG_FORCE_VALID_K2 \
0x700134UL
-#define NWS_REG_DBG_FORCE_FRAME \
+#define NWS_REG_DBG_FORCE_FRAME_K2 \
0x700138UL
-#define MS_REG_DBG_SELECT \
+#define MS_REG_DBG_SELECT_K2 \
0x6a0228UL
-#define MS_REG_DBG_DWORD_ENABLE \
+#define MS_REG_DBG_DWORD_ENABLE_K2 \
0x6a022cUL
-#define MS_REG_DBG_SHIFT \
+#define MS_REG_DBG_SHIFT_K2 \
0x6a0230UL
-#define MS_REG_DBG_FORCE_VALID \
+#define MS_REG_DBG_FORCE_VALID_K2 \
0x6a0234UL
-#define MS_REG_DBG_FORCE_FRAME \
+#define MS_REG_DBG_FORCE_FRAME_K2 \
0x6a0238UL
-#define PCIE_REG_DBG_COMMON_SELECT \
+#define PCIE_REG_DBG_COMMON_SELECT_K2 \
0x054398UL
-#define PCIE_REG_DBG_COMMON_DWORD_ENABLE \
+#define PCIE_REG_DBG_COMMON_DWORD_ENABLE_K2 \
0x05439cUL
-#define PCIE_REG_DBG_COMMON_SHIFT \
+#define PCIE_REG_DBG_COMMON_SHIFT_K2 \
0x0543a0UL
-#define PCIE_REG_DBG_COMMON_FORCE_VALID \
+#define PCIE_REG_DBG_COMMON_FORCE_VALID_K2 \
0x0543a4UL
-#define PCIE_REG_DBG_COMMON_FORCE_FRAME \
+#define PCIE_REG_DBG_COMMON_FORCE_FRAME_K2 \
0x0543a8UL
#define MISC_REG_RESET_PL_UA \
0x008050UL
@@ -1328,85 +1331,85 @@
0x128170cUL
#define UCM_REG_SM_TASK_CTX \
0x1281710UL
-#define XSEM_REG_SLOW_DBG_EMPTY \
+#define XSEM_REG_SLOW_DBG_EMPTY_BB_K2 \
0x1401140UL
#define XSEM_REG_SYNC_DBG_EMPTY \
0x1401160UL
-#define XSEM_REG_SLOW_DBG_ACTIVE \
+#define XSEM_REG_SLOW_DBG_ACTIVE_BB_K2 \
0x1401400UL
-#define XSEM_REG_SLOW_DBG_MODE \
+#define XSEM_REG_SLOW_DBG_MODE_BB_K2 \
0x1401404UL
-#define XSEM_REG_DBG_FRAME_MODE \
+#define XSEM_REG_DBG_FRAME_MODE_BB_K2 \
0x1401408UL
-#define XSEM_REG_DBG_MODE1_CFG \
+#define XSEM_REG_DBG_MODE1_CFG_BB_K2 \
0x1401420UL
#define XSEM_REG_FAST_MEMORY \
0x1440000UL
#define YSEM_REG_SYNC_DBG_EMPTY \
0x1501160UL
-#define YSEM_REG_SLOW_DBG_ACTIVE \
+#define YSEM_REG_SLOW_DBG_ACTIVE_BB_K2 \
0x1501400UL
-#define YSEM_REG_SLOW_DBG_MODE \
+#define YSEM_REG_SLOW_DBG_MODE_BB_K2 \
0x1501404UL
-#define YSEM_REG_DBG_FRAME_MODE \
+#define YSEM_REG_DBG_FRAME_MODE_BB_K2 \
0x1501408UL
-#define YSEM_REG_DBG_MODE1_CFG \
+#define YSEM_REG_DBG_MODE1_CFG_BB_K2 \
0x1501420UL
#define YSEM_REG_FAST_MEMORY \
0x1540000UL
-#define PSEM_REG_SLOW_DBG_EMPTY \
+#define PSEM_REG_SLOW_DBG_EMPTY_BB_K2 \
0x1601140UL
#define PSEM_REG_SYNC_DBG_EMPTY \
0x1601160UL
-#define PSEM_REG_SLOW_DBG_ACTIVE \
+#define PSEM_REG_SLOW_DBG_ACTIVE_BB_K2 \
0x1601400UL
-#define PSEM_REG_SLOW_DBG_MODE \
+#define PSEM_REG_SLOW_DBG_MODE_BB_K2 \
0x1601404UL
-#define PSEM_REG_DBG_FRAME_MODE \
+#define PSEM_REG_DBG_FRAME_MODE_BB_K2 \
0x1601408UL
-#define PSEM_REG_DBG_MODE1_CFG \
+#define PSEM_REG_DBG_MODE1_CFG_BB_K2 \
0x1601420UL
#define PSEM_REG_FAST_MEMORY \
0x1640000UL
-#define TSEM_REG_SLOW_DBG_EMPTY \
+#define TSEM_REG_SLOW_DBG_EMPTY_BB_K2 \
0x1701140UL
#define TSEM_REG_SYNC_DBG_EMPTY \
0x1701160UL
-#define TSEM_REG_SLOW_DBG_ACTIVE \
+#define TSEM_REG_SLOW_DBG_ACTIVE_BB_K2 \
0x1701400UL
-#define TSEM_REG_SLOW_DBG_MODE \
+#define TSEM_REG_SLOW_DBG_MODE_BB_K2 \
0x1701404UL
-#define TSEM_REG_DBG_FRAME_MODE \
+#define TSEM_REG_DBG_FRAME_MODE_BB_K2 \
0x1701408UL
-#define TSEM_REG_DBG_MODE1_CFG \
+#define TSEM_REG_DBG_MODE1_CFG_BB_K2 \
0x1701420UL
#define TSEM_REG_FAST_MEMORY \
0x1740000UL
-#define MSEM_REG_SLOW_DBG_EMPTY \
+#define MSEM_REG_SLOW_DBG_EMPTY_BB_K2 \
0x1801140UL
#define MSEM_REG_SYNC_DBG_EMPTY \
0x1801160UL
-#define MSEM_REG_SLOW_DBG_ACTIVE \
+#define MSEM_REG_SLOW_DBG_ACTIVE_BB_K2 \
0x1801400UL
-#define MSEM_REG_SLOW_DBG_MODE \
+#define MSEM_REG_SLOW_DBG_MODE_BB_K2 \
0x1801404UL
-#define MSEM_REG_DBG_FRAME_MODE \
+#define MSEM_REG_DBG_FRAME_MODE_BB_K2 \
0x1801408UL
-#define MSEM_REG_DBG_MODE1_CFG \
+#define MSEM_REG_DBG_MODE1_CFG_BB_K2 \
0x1801420UL
#define MSEM_REG_FAST_MEMORY \
0x1840000UL
-#define USEM_REG_SLOW_DBG_EMPTY \
+#define USEM_REG_SLOW_DBG_EMPTY_BB_K2 \
0x1901140UL
#define USEM_REG_SYNC_DBG_EMPTY \
0x1901160UL
-#define USEM_REG_SLOW_DBG_ACTIVE \
+#define USEM_REG_SLOW_DBG_ACTIVE_BB_K2 \
0x1901400UL
-#define USEM_REG_SLOW_DBG_MODE \
+#define USEM_REG_SLOW_DBG_MODE_BB_K2 \
0x1901404UL
-#define USEM_REG_DBG_FRAME_MODE \
+#define USEM_REG_DBG_FRAME_MODE_BB_K2 \
0x1901408UL
-#define USEM_REG_DBG_MODE1_CFG \
+#define USEM_REG_DBG_MODE1_CFG_BB_K2 \
0x1901420UL
#define USEM_REG_FAST_MEMORY \
0x1940000UL
@@ -1430,7 +1433,7 @@
0x340800UL
#define BRB_REG_BIG_RAM_DATA \
0x341500UL
-#define SEM_FAST_REG_STALL_0 \
+#define SEM_FAST_REG_STALL_0_BB_K2 \
0x000488UL
#define SEM_FAST_REG_STALLED \
0x000494UL
@@ -1480,37 +1483,37 @@
4
#define MISC_REG_BLOCK_256B_EN \
0x008c14UL
-#define NWS_REG_NWS_CMU \
+#define NWS_REG_NWS_CMU_K2 \
0x720000UL
-#define PHY_NW_IP_REG_PHY0_TOP_TBUS_ADDR_7_0 \
+#define PHY_NW_IP_REG_PHY0_TOP_TBUS_ADDR_7_0_K2 \
0x000680UL
-#define PHY_NW_IP_REG_PHY0_TOP_TBUS_ADDR_15_8 \
+#define PHY_NW_IP_REG_PHY0_TOP_TBUS_ADDR_15_8_K2 \
0x000684UL
-#define PHY_NW_IP_REG_PHY0_TOP_TBUS_DATA_7_0 \
+#define PHY_NW_IP_REG_PHY0_TOP_TBUS_DATA_7_0_K2 \
0x0006c0UL
-#define PHY_NW_IP_REG_PHY0_TOP_TBUS_DATA_11_8 \
+#define PHY_NW_IP_REG_PHY0_TOP_TBUS_DATA_11_8_K2 \
0x0006c4UL
-#define MS_REG_MS_CMU \
+#define MS_REG_MS_CMU_K2 \
0x6a4000UL
-#define PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X130 \
+#define PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X130_K2 \
0x000208UL
-#define PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X132 \
- 0x000210UL
-#define PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X131 \
+#define PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X131_K2 \
0x00020cUL
-#define PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X133 \
+#define PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X132_K2 \
+ 0x000210UL
+#define PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X133_K2 \
0x000214UL
-#define PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X130 \
+#define PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X130_K2 \
0x000208UL
-#define PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X131 \
+#define PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X131_K2 \
0x00020cUL
-#define PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X132 \
+#define PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X132_K2 \
0x000210UL
-#define PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X133 \
+#define PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X133_K2 \
0x000214UL
-#define PHY_PCIE_REG_PHY0 \
+#define PHY_PCIE_REG_PHY0_K2 \
0x620000UL
-#define PHY_PCIE_REG_PHY1 \
+#define PHY_PCIE_REG_PHY1_K2 \
0x624000UL
#define NIG_REG_ROCE_DUPLICATE_TO_HOST 0x5088f0UL
#define PRS_REG_LIGHT_L2_ETHERTYPE_EN 0x1f0968UL
@@ -1557,9 +1560,16 @@
#define PGLUE_B_REG_PGL_ADDR_EC_F0_K2 0x2aaf9cUL
#define PGLUE_B_REG_PGL_ADDR_F0_F0_K2 0x2aafa0UL
#define PGLUE_B_REG_PGL_ADDR_F4_F0_K2 0x2aafa4UL
+#define PGLUE_B_REG_MASTER_WRITE_PAD_ENABLE 0x2aae30UL
#define NIG_REG_TSGEN_FREECNT_UPDATE_K2 0x509008UL
#define CNIG_REG_NIG_PORT0_CONF_K2 0x218200UL
+#define NIG_REG_TX_EDPM_CTRL 0x501f0cUL
+#define NIG_REG_TX_EDPM_CTRL_TX_EDPM_EN (0x1 << 0)
+#define NIG_REG_TX_EDPM_CTRL_TX_EDPM_EN_SHIFT 0
+#define NIG_REG_TX_EDPM_CTRL_TX_EDPM_TC_EN (0xff << 1)
+#define NIG_REG_TX_EDPM_CTRL_TX_EDPM_TC_EN_SHIFT 1
+
#define PRS_REG_SEARCH_GFT 0x1f11bcUL
#define PRS_REG_CM_HDR_GFT 0x1f11c8UL
#define PRS_REG_GFT_CAM 0x1f1100UL
diff --git a/drivers/net/ethernet/qlogic/qed/qed_roce.c b/drivers/net/ethernet/qlogic/qed/qed_roce.c
index 56289d7..e53adc3 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_roce.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_roce.c
@@ -35,12 +35,7 @@
#include <linux/delay.h>
#include <linux/dma-mapping.h>
#include <linux/errno.h>
-#include <linux/etherdevice.h>
-#include <linux/if_ether.h>
-#include <linux/if_vlan.h>
#include <linux/io.h>
-#include <linux/ip.h>
-#include <linux/ipv6.h>
#include <linux/kernel.h>
#include <linux/list.h>
#include <linux/module.h>
@@ -49,10 +44,6 @@
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/string.h>
-#include <linux/tcp.h>
-#include <linux/bitops.h>
-#include <linux/qed/qed_roce_if.h>
-#include <linux/qed/qed_roce_if.h>
#include "qed.h"
#include "qed_cxt.h"
#include "qed_hsi.h"
@@ -62,18 +53,21 @@
#include "qed_ll2.h"
#include "qed_mcp.h"
#include "qed_reg_addr.h"
-#include "qed_sp.h"
+#include <linux/qed/qed_rdma_if.h>
+#include "qed_rdma.h"
#include "qed_roce.h"
-#include "qed_ll2.h"
+#include "qed_sp.h"
static void qed_roce_free_real_icid(struct qed_hwfn *p_hwfn, u16 icid);
-void qed_roce_async_event(struct qed_hwfn *p_hwfn,
- u8 fw_event_code, union rdma_eqe_data *rdma_data)
+static int
+qed_roce_async_event(struct qed_hwfn *p_hwfn,
+ u8 fw_event_code,
+ u16 echo, union event_ring_data *data, u8 fw_return_code)
{
if (fw_event_code == ROCE_ASYNC_EVENT_DESTROY_QP_DONE) {
u16 icid =
- (u16)le32_to_cpu(rdma_data->rdma_destroy_qp_data.cid);
+ (u16)le32_to_cpu(data->rdma_data.rdma_destroy_qp_data.cid);
/* icid release in this async event can occur only if the icid
* was offloaded to the FW. In case it wasn't offloaded this is
@@ -85,290 +79,15 @@
events->affiliated_event(p_hwfn->p_rdma_info->events.context,
fw_event_code,
- &rdma_data->async_handle);
+ (void *)&data->rdma_data.async_handle);
}
-}
-
-static int qed_rdma_bmap_alloc(struct qed_hwfn *p_hwfn,
- struct qed_bmap *bmap, u32 max_count, char *name)
-{
- DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "max_count = %08x\n", max_count);
-
- bmap->max_count = max_count;
-
- bmap->bitmap = kzalloc(BITS_TO_LONGS(max_count) * sizeof(long),
- GFP_KERNEL);
- if (!bmap->bitmap) {
- DP_NOTICE(p_hwfn,
- "qed bmap alloc failed: cannot allocate memory (bitmap)\n");
- return -ENOMEM;
- }
-
- snprintf(bmap->name, QED_RDMA_MAX_BMAP_NAME, "%s", name);
-
- DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "0\n");
- return 0;
-}
-
-static int qed_rdma_bmap_alloc_id(struct qed_hwfn *p_hwfn,
- struct qed_bmap *bmap, u32 *id_num)
-{
- *id_num = find_first_zero_bit(bmap->bitmap, bmap->max_count);
- if (*id_num >= bmap->max_count)
- return -EINVAL;
-
- __set_bit(*id_num, bmap->bitmap);
-
- DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "%s bitmap: allocated id %d\n",
- bmap->name, *id_num);
return 0;
}
-static void qed_bmap_set_id(struct qed_hwfn *p_hwfn,
- struct qed_bmap *bmap, u32 id_num)
-{
- if (id_num >= bmap->max_count)
- return;
-
- __set_bit(id_num, bmap->bitmap);
-}
-
-static void qed_bmap_release_id(struct qed_hwfn *p_hwfn,
- struct qed_bmap *bmap, u32 id_num)
-{
- bool b_acquired;
-
- if (id_num >= bmap->max_count)
- return;
-
- b_acquired = test_and_clear_bit(id_num, bmap->bitmap);
- if (!b_acquired) {
- DP_NOTICE(p_hwfn, "%s bitmap: id %d already released\n",
- bmap->name, id_num);
- return;
- }
-
- DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "%s bitmap: released id %d\n",
- bmap->name, id_num);
-}
-
-static int qed_bmap_test_id(struct qed_hwfn *p_hwfn,
- struct qed_bmap *bmap, u32 id_num)
-{
- if (id_num >= bmap->max_count)
- return -1;
-
- return test_bit(id_num, bmap->bitmap);
-}
-
-static u32 qed_rdma_get_sb_id(void *p_hwfn, u32 rel_sb_id)
-{
- /* First sb id for RoCE is after all the l2 sb */
- return FEAT_NUM((struct qed_hwfn *)p_hwfn, QED_PF_L2_QUE) + rel_sb_id;
-}
-
-static int qed_rdma_alloc(struct qed_hwfn *p_hwfn,
- struct qed_ptt *p_ptt,
- struct qed_rdma_start_in_params *params)
-{
- struct qed_rdma_info *p_rdma_info;
- u32 num_cons, num_tasks;
- int rc = -ENOMEM;
-
- DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Allocating RDMA\n");
-
- /* Allocate a struct with current pf rdma info */
- p_rdma_info = kzalloc(sizeof(*p_rdma_info), GFP_KERNEL);
- if (!p_rdma_info) {
- DP_NOTICE(p_hwfn,
- "qed rdma alloc failed: cannot allocate memory (rdma info). rc = %d\n",
- rc);
- return rc;
- }
-
- p_hwfn->p_rdma_info = p_rdma_info;
- p_rdma_info->proto = PROTOCOLID_ROCE;
-
- num_cons = qed_cxt_get_proto_cid_count(p_hwfn, p_rdma_info->proto,
- NULL);
-
- p_rdma_info->num_qps = num_cons / 2;
-
- num_tasks = qed_cxt_get_proto_tid_count(p_hwfn, PROTOCOLID_ROCE);
-
- /* Each MR uses a single task */
- p_rdma_info->num_mrs = num_tasks;
-
- /* Queue zone lines are shared between RoCE and L2 in such a way that
- * they can be used by each without obstructing the other.
- */
- p_rdma_info->queue_zone_base = (u16)RESC_START(p_hwfn, QED_L2_QUEUE);
- p_rdma_info->max_queue_zones = (u16)RESC_NUM(p_hwfn, QED_L2_QUEUE);
-
- /* Allocate a struct with device params and fill it */
- p_rdma_info->dev = kzalloc(sizeof(*p_rdma_info->dev), GFP_KERNEL);
- if (!p_rdma_info->dev) {
- DP_NOTICE(p_hwfn,
- "qed rdma alloc failed: cannot allocate memory (rdma info dev). rc = %d\n",
- rc);
- goto free_rdma_info;
- }
-
- /* Allocate a struct with port params and fill it */
- p_rdma_info->port = kzalloc(sizeof(*p_rdma_info->port), GFP_KERNEL);
- if (!p_rdma_info->port) {
- DP_NOTICE(p_hwfn,
- "qed rdma alloc failed: cannot allocate memory (rdma info port). rc = %d\n",
- rc);
- goto free_rdma_dev;
- }
-
- /* Allocate bit map for pd's */
- rc = qed_rdma_bmap_alloc(p_hwfn, &p_rdma_info->pd_map, RDMA_MAX_PDS,
- "PD");
- if (rc) {
- DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
- "Failed to allocate pd_map, rc = %d\n",
- rc);
- goto free_rdma_port;
- }
-
- /* Allocate DPI bitmap */
- rc = qed_rdma_bmap_alloc(p_hwfn, &p_rdma_info->dpi_map,
- p_hwfn->dpi_count, "DPI");
- if (rc) {
- DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
- "Failed to allocate DPI bitmap, rc = %d\n", rc);
- goto free_pd_map;
- }
-
- /* Allocate bitmap for cq's. The maximum number of CQs is bounded to
- * twice the number of QPs.
- */
- rc = qed_rdma_bmap_alloc(p_hwfn, &p_rdma_info->cq_map,
- p_rdma_info->num_qps * 2, "CQ");
- if (rc) {
- DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
- "Failed to allocate cq bitmap, rc = %d\n", rc);
- goto free_dpi_map;
- }
-
- /* Allocate bitmap for toggle bit for cq icids
- * We toggle the bit every time we create or resize cq for a given icid.
- * The maximum number of CQs is bounded to twice the number of QPs.
- */
- rc = qed_rdma_bmap_alloc(p_hwfn, &p_rdma_info->toggle_bits,
- p_rdma_info->num_qps * 2, "Toggle");
- if (rc) {
- DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
- "Failed to allocate toogle bits, rc = %d\n", rc);
- goto free_cq_map;
- }
-
- /* Allocate bitmap for itids */
- rc = qed_rdma_bmap_alloc(p_hwfn, &p_rdma_info->tid_map,
- p_rdma_info->num_mrs, "MR");
- if (rc) {
- DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
- "Failed to allocate itids bitmaps, rc = %d\n", rc);
- goto free_toggle_map;
- }
-
- /* Allocate bitmap for cids used for qps. */
- rc = qed_rdma_bmap_alloc(p_hwfn, &p_rdma_info->cid_map, num_cons,
- "CID");
- if (rc) {
- DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
- "Failed to allocate cid bitmap, rc = %d\n", rc);
- goto free_tid_map;
- }
-
- /* Allocate bitmap for cids used for responders/requesters. */
- rc = qed_rdma_bmap_alloc(p_hwfn, &p_rdma_info->real_cid_map, num_cons,
- "REAL_CID");
- if (rc) {
- DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
- "Failed to allocate real cid bitmap, rc = %d\n", rc);
- goto free_cid_map;
- }
- DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Allocation successful\n");
- return 0;
-
-free_cid_map:
- kfree(p_rdma_info->cid_map.bitmap);
-free_tid_map:
- kfree(p_rdma_info->tid_map.bitmap);
-free_toggle_map:
- kfree(p_rdma_info->toggle_bits.bitmap);
-free_cq_map:
- kfree(p_rdma_info->cq_map.bitmap);
-free_dpi_map:
- kfree(p_rdma_info->dpi_map.bitmap);
-free_pd_map:
- kfree(p_rdma_info->pd_map.bitmap);
-free_rdma_port:
- kfree(p_rdma_info->port);
-free_rdma_dev:
- kfree(p_rdma_info->dev);
-free_rdma_info:
- kfree(p_rdma_info);
-
- return rc;
-}
-
-static void qed_rdma_bmap_free(struct qed_hwfn *p_hwfn,
- struct qed_bmap *bmap, bool check)
-{
- int weight = bitmap_weight(bmap->bitmap, bmap->max_count);
- int last_line = bmap->max_count / (64 * 8);
- int last_item = last_line * 8 +
- DIV_ROUND_UP(bmap->max_count % (64 * 8), 64);
- u64 *pmap = (u64 *)bmap->bitmap;
- int line, item, offset;
- u8 str_last_line[200] = { 0 };
-
- if (!weight || !check)
- goto end;
-
- DP_NOTICE(p_hwfn,
- "%s bitmap not free - size=%d, weight=%d, 512 bits per line\n",
- bmap->name, bmap->max_count, weight);
-
- /* print aligned non-zero lines, if any */
- for (item = 0, line = 0; line < last_line; line++, item += 8)
- if (bitmap_weight((unsigned long *)&pmap[item], 64 * 8))
- DP_NOTICE(p_hwfn,
- "line 0x%04x: 0x%016llx 0x%016llx 0x%016llx 0x%016llx 0x%016llx 0x%016llx 0x%016llx 0x%016llx\n",
- line,
- pmap[item],
- pmap[item + 1],
- pmap[item + 2],
- pmap[item + 3],
- pmap[item + 4],
- pmap[item + 5],
- pmap[item + 6], pmap[item + 7]);
-
- /* print last unaligned non-zero line, if any */
- if ((bmap->max_count % (64 * 8)) &&
- (bitmap_weight((unsigned long *)&pmap[item],
- bmap->max_count - item * 64))) {
- offset = sprintf(str_last_line, "line 0x%04x: ", line);
- for (; item < last_item; item++)
- offset += sprintf(str_last_line + offset,
- "0x%016llx ", pmap[item]);
- DP_NOTICE(p_hwfn, "%s\n", str_last_line);
- }
-
-end:
- kfree(bmap->bitmap);
- bmap->bitmap = NULL;
-}
-
-static void qed_rdma_resc_free(struct qed_hwfn *p_hwfn)
+void qed_roce_stop(struct qed_hwfn *p_hwfn)
{
struct qed_bmap *rcid_map = &p_hwfn->p_rdma_info->real_cid_map;
- struct qed_rdma_info *p_rdma_info = p_hwfn->p_rdma_info;
int wait_count = 0;
/* when destroying a_RoCE QP the control is returned to the user after
@@ -383,780 +102,7 @@
break;
}
}
-
- qed_rdma_bmap_free(p_hwfn, &p_hwfn->p_rdma_info->cid_map, 1);
- qed_rdma_bmap_free(p_hwfn, &p_hwfn->p_rdma_info->pd_map, 1);
- qed_rdma_bmap_free(p_hwfn, &p_hwfn->p_rdma_info->dpi_map, 1);
- qed_rdma_bmap_free(p_hwfn, &p_hwfn->p_rdma_info->cq_map, 1);
- qed_rdma_bmap_free(p_hwfn, &p_hwfn->p_rdma_info->toggle_bits, 0);
- qed_rdma_bmap_free(p_hwfn, &p_hwfn->p_rdma_info->tid_map, 1);
-
- kfree(p_rdma_info->port);
- kfree(p_rdma_info->dev);
-
- kfree(p_rdma_info);
-}
-
-static void qed_rdma_free(struct qed_hwfn *p_hwfn)
-{
- DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Freeing RDMA\n");
-
- qed_rdma_resc_free(p_hwfn);
-}
-
-static void qed_rdma_get_guid(struct qed_hwfn *p_hwfn, u8 *guid)
-{
- guid[0] = p_hwfn->hw_info.hw_mac_addr[0] ^ 2;
- guid[1] = p_hwfn->hw_info.hw_mac_addr[1];
- guid[2] = p_hwfn->hw_info.hw_mac_addr[2];
- guid[3] = 0xff;
- guid[4] = 0xfe;
- guid[5] = p_hwfn->hw_info.hw_mac_addr[3];
- guid[6] = p_hwfn->hw_info.hw_mac_addr[4];
- guid[7] = p_hwfn->hw_info.hw_mac_addr[5];
-}
-
-static void qed_rdma_init_events(struct qed_hwfn *p_hwfn,
- struct qed_rdma_start_in_params *params)
-{
- struct qed_rdma_events *events;
-
- events = &p_hwfn->p_rdma_info->events;
-
- events->unaffiliated_event = params->events->unaffiliated_event;
- events->affiliated_event = params->events->affiliated_event;
- events->context = params->events->context;
-}
-
-static void qed_rdma_init_devinfo(struct qed_hwfn *p_hwfn,
- struct qed_rdma_start_in_params *params)
-{
- struct qed_rdma_device *dev = p_hwfn->p_rdma_info->dev;
- struct qed_dev *cdev = p_hwfn->cdev;
- u32 pci_status_control;
- u32 num_qps;
-
- /* Vendor specific information */
- dev->vendor_id = cdev->vendor_id;
- dev->vendor_part_id = cdev->device_id;
- dev->hw_ver = 0;
- dev->fw_ver = (FW_MAJOR_VERSION << 24) | (FW_MINOR_VERSION << 16) |
- (FW_REVISION_VERSION << 8) | (FW_ENGINEERING_VERSION);
-
- qed_rdma_get_guid(p_hwfn, (u8 *)&dev->sys_image_guid);
- dev->node_guid = dev->sys_image_guid;
-
- dev->max_sge = min_t(u32, RDMA_MAX_SGE_PER_SQ_WQE,
- RDMA_MAX_SGE_PER_RQ_WQE);
-
- if (cdev->rdma_max_sge)
- dev->max_sge = min_t(u32, cdev->rdma_max_sge, dev->max_sge);
-
- dev->max_inline = ROCE_REQ_MAX_INLINE_DATA_SIZE;
-
- dev->max_inline = (cdev->rdma_max_inline) ?
- min_t(u32, cdev->rdma_max_inline, dev->max_inline) :
- dev->max_inline;
-
- dev->max_wqe = QED_RDMA_MAX_WQE;
- dev->max_cnq = (u8)FEAT_NUM(p_hwfn, QED_RDMA_CNQ);
-
- /* The number of QPs may be higher than QED_ROCE_MAX_QPS, because
- * it is up-aligned to 16 and then to ILT page size within qed cxt.
- * This is OK in terms of ILT but we don't want to configure the FW
- * above its abilities
- */
- num_qps = ROCE_MAX_QPS;
- num_qps = min_t(u64, num_qps, p_hwfn->p_rdma_info->num_qps);
- dev->max_qp = num_qps;
-
- /* CQs uses the same icids that QPs use hence they are limited by the
- * number of icids. There are two icids per QP.
- */
- dev->max_cq = num_qps * 2;
-
- /* The number of mrs is smaller by 1 since the first is reserved */
- dev->max_mr = p_hwfn->p_rdma_info->num_mrs - 1;
- dev->max_mr_size = QED_RDMA_MAX_MR_SIZE;
-
- /* The maximum CQE capacity per CQ supported.
- * max number of cqes will be in two layer pbl,
- * 8 is the pointer size in bytes
- * 32 is the size of cq element in bytes
- */
- if (params->cq_mode == QED_RDMA_CQ_MODE_32_BITS)
- dev->max_cqe = QED_RDMA_MAX_CQE_32_BIT;
- else
- dev->max_cqe = QED_RDMA_MAX_CQE_16_BIT;
-
- dev->max_mw = 0;
- dev->max_fmr = QED_RDMA_MAX_FMR;
- dev->max_mr_mw_fmr_pbl = (PAGE_SIZE / 8) * (PAGE_SIZE / 8);
- dev->max_mr_mw_fmr_size = dev->max_mr_mw_fmr_pbl * PAGE_SIZE;
- dev->max_pkey = QED_RDMA_MAX_P_KEY;
-
- dev->max_qp_resp_rd_atomic_resc = RDMA_RING_PAGE_SIZE /
- (RDMA_RESP_RD_ATOMIC_ELM_SIZE * 2);
- dev->max_qp_req_rd_atomic_resc = RDMA_RING_PAGE_SIZE /
- RDMA_REQ_RD_ATOMIC_ELM_SIZE;
- dev->max_dev_resp_rd_atomic_resc = dev->max_qp_resp_rd_atomic_resc *
- p_hwfn->p_rdma_info->num_qps;
- dev->page_size_caps = QED_RDMA_PAGE_SIZE_CAPS;
- dev->dev_ack_delay = QED_RDMA_ACK_DELAY;
- dev->max_pd = RDMA_MAX_PDS;
- dev->max_ah = p_hwfn->p_rdma_info->num_qps;
- dev->max_stats_queues = (u8)RESC_NUM(p_hwfn, QED_RDMA_STATS_QUEUE);
-
- /* Set capablities */
- dev->dev_caps = 0;
- SET_FIELD(dev->dev_caps, QED_RDMA_DEV_CAP_RNR_NAK, 1);
- SET_FIELD(dev->dev_caps, QED_RDMA_DEV_CAP_PORT_ACTIVE_EVENT, 1);
- SET_FIELD(dev->dev_caps, QED_RDMA_DEV_CAP_PORT_CHANGE_EVENT, 1);
- SET_FIELD(dev->dev_caps, QED_RDMA_DEV_CAP_RESIZE_CQ, 1);
- SET_FIELD(dev->dev_caps, QED_RDMA_DEV_CAP_BASE_MEMORY_EXT, 1);
- SET_FIELD(dev->dev_caps, QED_RDMA_DEV_CAP_BASE_QUEUE_EXT, 1);
- SET_FIELD(dev->dev_caps, QED_RDMA_DEV_CAP_ZBVA, 1);
- SET_FIELD(dev->dev_caps, QED_RDMA_DEV_CAP_LOCAL_INV_FENCE, 1);
-
- /* Check atomic operations support in PCI configuration space. */
- pci_read_config_dword(cdev->pdev,
- cdev->pdev->pcie_cap + PCI_EXP_DEVCTL2,
- &pci_status_control);
-
- if (pci_status_control & PCI_EXP_DEVCTL2_LTR_EN)
- SET_FIELD(dev->dev_caps, QED_RDMA_DEV_CAP_ATOMIC_OP, 1);
-}
-
-static void qed_rdma_init_port(struct qed_hwfn *p_hwfn)
-{
- struct qed_rdma_port *port = p_hwfn->p_rdma_info->port;
- struct qed_rdma_device *dev = p_hwfn->p_rdma_info->dev;
-
- port->port_state = p_hwfn->mcp_info->link_output.link_up ?
- QED_RDMA_PORT_UP : QED_RDMA_PORT_DOWN;
-
- port->max_msg_size = min_t(u64,
- (dev->max_mr_mw_fmr_size *
- p_hwfn->cdev->rdma_max_sge),
- BIT(31));
-
- port->pkey_bad_counter = 0;
-}
-
-static int qed_rdma_init_hw(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
-{
- u32 ll2_ethertype_en;
-
- DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Initializing HW\n");
- p_hwfn->b_rdma_enabled_in_prs = false;
-
- qed_wr(p_hwfn, p_ptt, PRS_REG_ROCE_DEST_QP_MAX_PF, 0);
-
- p_hwfn->rdma_prs_search_reg = PRS_REG_SEARCH_ROCE;
-
- /* We delay writing to this reg until first cid is allocated. See
- * qed_cxt_dynamic_ilt_alloc function for more details
- */
- ll2_ethertype_en = qed_rd(p_hwfn, p_ptt, PRS_REG_LIGHT_L2_ETHERTYPE_EN);
- qed_wr(p_hwfn, p_ptt, PRS_REG_LIGHT_L2_ETHERTYPE_EN,
- (ll2_ethertype_en | 0x01));
-
- if (qed_cxt_get_proto_cid_start(p_hwfn, PROTOCOLID_ROCE) % 2) {
- DP_NOTICE(p_hwfn, "The first RoCE's cid should be even\n");
- return -EINVAL;
- }
-
- DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Initializing HW - Done\n");
- return 0;
-}
-
-static int qed_rdma_start_fw(struct qed_hwfn *p_hwfn,
- struct qed_rdma_start_in_params *params,
- struct qed_ptt *p_ptt)
-{
- struct rdma_init_func_ramrod_data *p_ramrod;
- struct qed_rdma_cnq_params *p_cnq_pbl_list;
- struct rdma_init_func_hdr *p_params_header;
- struct rdma_cnq_params *p_cnq_params;
- struct qed_sp_init_data init_data;
- struct qed_spq_entry *p_ent;
- u32 cnq_id, sb_id;
- int rc;
-
- DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Starting FW\n");
-
- /* Save the number of cnqs for the function close ramrod */
- p_hwfn->p_rdma_info->num_cnqs = params->desired_cnq;
-
- /* Get SPQ entry */
- memset(&init_data, 0, sizeof(init_data));
- init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
- init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
-
- rc = qed_sp_init_request(p_hwfn, &p_ent, RDMA_RAMROD_FUNC_INIT,
- p_hwfn->p_rdma_info->proto, &init_data);
- if (rc)
- return rc;
-
- p_ramrod = &p_ent->ramrod.roce_init_func.rdma;
-
- p_params_header = &p_ramrod->params_header;
- p_params_header->cnq_start_offset = (u8)RESC_START(p_hwfn,
- QED_RDMA_CNQ_RAM);
- p_params_header->num_cnqs = params->desired_cnq;
-
- if (params->cq_mode == QED_RDMA_CQ_MODE_16_BITS)
- p_params_header->cq_ring_mode = 1;
- else
- p_params_header->cq_ring_mode = 0;
-
- for (cnq_id = 0; cnq_id < params->desired_cnq; cnq_id++) {
- sb_id = qed_rdma_get_sb_id(p_hwfn, cnq_id);
- p_cnq_params = &p_ramrod->cnq_params[cnq_id];
- p_cnq_pbl_list = ¶ms->cnq_pbl_list[cnq_id];
- p_cnq_params->sb_num =
- cpu_to_le16(p_hwfn->sbs_info[sb_id]->igu_sb_id);
-
- p_cnq_params->sb_index = p_hwfn->pf_params.rdma_pf_params.gl_pi;
- p_cnq_params->num_pbl_pages = p_cnq_pbl_list->num_pbl_pages;
-
- DMA_REGPAIR_LE(p_cnq_params->pbl_base_addr,
- p_cnq_pbl_list->pbl_ptr);
-
- /* we assume here that cnq_id and qz_offset are the same */
- p_cnq_params->queue_zone_num =
- cpu_to_le16(p_hwfn->p_rdma_info->queue_zone_base +
- cnq_id);
- }
-
- return qed_spq_post(p_hwfn, p_ent, NULL);
-}
-
-static int qed_rdma_alloc_tid(void *rdma_cxt, u32 *itid)
-{
- struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
- int rc;
-
- DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Allocate TID\n");
-
- spin_lock_bh(&p_hwfn->p_rdma_info->lock);
- rc = qed_rdma_bmap_alloc_id(p_hwfn,
- &p_hwfn->p_rdma_info->tid_map, itid);
- spin_unlock_bh(&p_hwfn->p_rdma_info->lock);
- if (rc)
- goto out;
-
- rc = qed_cxt_dynamic_ilt_alloc(p_hwfn, QED_ELEM_TASK, *itid);
-out:
- DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Allocate TID - done, rc = %d\n", rc);
- return rc;
-}
-
-static int qed_rdma_reserve_lkey(struct qed_hwfn *p_hwfn)
-{
- struct qed_rdma_device *dev = p_hwfn->p_rdma_info->dev;
-
- /* The first DPI is reserved for the Kernel */
- __set_bit(0, p_hwfn->p_rdma_info->dpi_map.bitmap);
-
- /* Tid 0 will be used as the key for "reserved MR".
- * The driver should allocate memory for it so it can be loaded but no
- * ramrod should be passed on it.
- */
- qed_rdma_alloc_tid(p_hwfn, &dev->reserved_lkey);
- if (dev->reserved_lkey != RDMA_RESERVED_LKEY) {
- DP_NOTICE(p_hwfn,
- "Reserved lkey should be equal to RDMA_RESERVED_LKEY\n");
- return -EINVAL;
- }
-
- return 0;
-}
-
-static int qed_rdma_setup(struct qed_hwfn *p_hwfn,
- struct qed_ptt *p_ptt,
- struct qed_rdma_start_in_params *params)
-{
- int rc;
-
- DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "RDMA setup\n");
-
- spin_lock_init(&p_hwfn->p_rdma_info->lock);
-
- qed_rdma_init_devinfo(p_hwfn, params);
- qed_rdma_init_port(p_hwfn);
- qed_rdma_init_events(p_hwfn, params);
-
- rc = qed_rdma_reserve_lkey(p_hwfn);
- if (rc)
- return rc;
-
- rc = qed_rdma_init_hw(p_hwfn, p_ptt);
- if (rc)
- return rc;
-
- return qed_rdma_start_fw(p_hwfn, params, p_ptt);
-}
-
-static int qed_rdma_stop(void *rdma_cxt)
-{
- struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
- struct rdma_close_func_ramrod_data *p_ramrod;
- struct qed_sp_init_data init_data;
- struct qed_spq_entry *p_ent;
- struct qed_ptt *p_ptt;
- u32 ll2_ethertype_en;
- int rc = -EBUSY;
-
- DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "RDMA stop\n");
-
- p_ptt = qed_ptt_acquire(p_hwfn);
- if (!p_ptt) {
- DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Failed to acquire PTT\n");
- return rc;
- }
-
- /* Disable RoCE search */
- qed_wr(p_hwfn, p_ptt, p_hwfn->rdma_prs_search_reg, 0);
- p_hwfn->b_rdma_enabled_in_prs = false;
-
- qed_wr(p_hwfn, p_ptt, PRS_REG_ROCE_DEST_QP_MAX_PF, 0);
-
- ll2_ethertype_en = qed_rd(p_hwfn, p_ptt, PRS_REG_LIGHT_L2_ETHERTYPE_EN);
-
- qed_wr(p_hwfn, p_ptt, PRS_REG_LIGHT_L2_ETHERTYPE_EN,
- (ll2_ethertype_en & 0xFFFE));
-
- qed_ptt_release(p_hwfn, p_ptt);
-
- /* Get SPQ entry */
- memset(&init_data, 0, sizeof(init_data));
- init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
- init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
-
- /* Stop RoCE */
- rc = qed_sp_init_request(p_hwfn, &p_ent, RDMA_RAMROD_FUNC_CLOSE,
- p_hwfn->p_rdma_info->proto, &init_data);
- if (rc)
- goto out;
-
- p_ramrod = &p_ent->ramrod.rdma_close_func;
-
- p_ramrod->num_cnqs = p_hwfn->p_rdma_info->num_cnqs;
- p_ramrod->cnq_start_offset = (u8)RESC_START(p_hwfn, QED_RDMA_CNQ_RAM);
-
- rc = qed_spq_post(p_hwfn, p_ent, NULL);
-
-out:
- qed_rdma_free(p_hwfn);
-
- DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "RDMA stop done, rc = %d\n", rc);
- return rc;
-}
-
-static int qed_rdma_add_user(void *rdma_cxt,
- struct qed_rdma_add_user_out_params *out_params)
-{
- struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
- u32 dpi_start_offset;
- u32 returned_id = 0;
- int rc;
-
- DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Adding User\n");
-
- /* Allocate DPI */
- spin_lock_bh(&p_hwfn->p_rdma_info->lock);
- rc = qed_rdma_bmap_alloc_id(p_hwfn, &p_hwfn->p_rdma_info->dpi_map,
- &returned_id);
- spin_unlock_bh(&p_hwfn->p_rdma_info->lock);
-
- out_params->dpi = (u16)returned_id;
-
- /* Calculate the corresponding DPI address */
- dpi_start_offset = p_hwfn->dpi_start_offset;
-
- out_params->dpi_addr = (u64)((u8 __iomem *)p_hwfn->doorbells +
- dpi_start_offset +
- ((out_params->dpi) * p_hwfn->dpi_size));
-
- out_params->dpi_phys_addr = p_hwfn->cdev->db_phys_addr +
- dpi_start_offset +
- ((out_params->dpi) * p_hwfn->dpi_size);
-
- out_params->dpi_size = p_hwfn->dpi_size;
- out_params->wid_count = p_hwfn->wid_count;
-
- DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Adding user - done, rc = %d\n", rc);
- return rc;
-}
-
-static struct qed_rdma_port *qed_rdma_query_port(void *rdma_cxt)
-{
- struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
- struct qed_rdma_port *p_port = p_hwfn->p_rdma_info->port;
-
- DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "RDMA Query port\n");
-
- /* Link may have changed */
- p_port->port_state = p_hwfn->mcp_info->link_output.link_up ?
- QED_RDMA_PORT_UP : QED_RDMA_PORT_DOWN;
-
- p_port->link_speed = p_hwfn->mcp_info->link_output.speed;
-
- p_port->max_msg_size = RDMA_MAX_DATA_SIZE_IN_WQE;
-
- return p_port;
-}
-
-static struct qed_rdma_device *qed_rdma_query_device(void *rdma_cxt)
-{
- struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
-
- DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Query device\n");
-
- /* Return struct with device parameters */
- return p_hwfn->p_rdma_info->dev;
-}
-
-static void qed_rdma_free_tid(void *rdma_cxt, u32 itid)
-{
- struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
-
- DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "itid = %08x\n", itid);
-
- spin_lock_bh(&p_hwfn->p_rdma_info->lock);
- qed_bmap_release_id(p_hwfn, &p_hwfn->p_rdma_info->tid_map, itid);
- spin_unlock_bh(&p_hwfn->p_rdma_info->lock);
-}
-
-static void qed_rdma_cnq_prod_update(void *rdma_cxt, u8 qz_offset, u16 prod)
-{
- struct qed_hwfn *p_hwfn;
- u16 qz_num;
- u32 addr;
-
- p_hwfn = (struct qed_hwfn *)rdma_cxt;
-
- if (qz_offset > p_hwfn->p_rdma_info->max_queue_zones) {
- DP_NOTICE(p_hwfn,
- "queue zone offset %d is too large (max is %d)\n",
- qz_offset, p_hwfn->p_rdma_info->max_queue_zones);
- return;
- }
-
- qz_num = p_hwfn->p_rdma_info->queue_zone_base + qz_offset;
- addr = GTT_BAR0_MAP_REG_USDM_RAM +
- USTORM_COMMON_QUEUE_CONS_OFFSET(qz_num);
-
- REG_WR16(p_hwfn, addr, prod);
-
- /* keep prod updates ordered */
- wmb();
-}
-
-static int qed_fill_rdma_dev_info(struct qed_dev *cdev,
- struct qed_dev_rdma_info *info)
-{
- struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev);
-
- memset(info, 0, sizeof(*info));
-
- info->rdma_type = QED_RDMA_TYPE_ROCE;
- info->user_dpm_enabled = (p_hwfn->db_bar_no_edpm == 0);
-
- qed_fill_dev_info(cdev, &info->common);
-
- return 0;
-}
-
-static int qed_rdma_get_sb_start(struct qed_dev *cdev)
-{
- int feat_num;
-
- if (cdev->num_hwfns > 1)
- feat_num = FEAT_NUM(QED_LEADING_HWFN(cdev), QED_PF_L2_QUE);
- else
- feat_num = FEAT_NUM(QED_LEADING_HWFN(cdev), QED_PF_L2_QUE) *
- cdev->num_hwfns;
-
- return feat_num;
-}
-
-static int qed_rdma_get_min_cnq_msix(struct qed_dev *cdev)
-{
- int n_cnq = FEAT_NUM(QED_LEADING_HWFN(cdev), QED_RDMA_CNQ);
- int n_msix = cdev->int_params.rdma_msix_cnt;
-
- return min_t(int, n_cnq, n_msix);
-}
-
-static int qed_rdma_set_int(struct qed_dev *cdev, u16 cnt)
-{
- int limit = 0;
-
- /* Mark the fastpath as free/used */
- cdev->int_params.fp_initialized = cnt ? true : false;
-
- if (cdev->int_params.out.int_mode != QED_INT_MODE_MSIX) {
- DP_ERR(cdev,
- "qed roce supports only MSI-X interrupts (detected %d).\n",
- cdev->int_params.out.int_mode);
- return -EINVAL;
- } else if (cdev->int_params.fp_msix_cnt) {
- limit = cdev->int_params.rdma_msix_cnt;
- }
-
- if (!limit)
- return -ENOMEM;
-
- return min_t(int, cnt, limit);
-}
-
-static int qed_rdma_get_int(struct qed_dev *cdev, struct qed_int_info *info)
-{
- memset(info, 0, sizeof(*info));
-
- if (!cdev->int_params.fp_initialized) {
- DP_INFO(cdev,
- "Protocol driver requested interrupt information, but its support is not yet configured\n");
- return -EINVAL;
- }
-
- if (cdev->int_params.out.int_mode == QED_INT_MODE_MSIX) {
- int msix_base = cdev->int_params.rdma_msix_base;
-
- info->msix_cnt = cdev->int_params.rdma_msix_cnt;
- info->msix = &cdev->int_params.msix_table[msix_base];
-
- DP_VERBOSE(cdev, QED_MSG_RDMA, "msix_cnt = %d msix_base=%d\n",
- info->msix_cnt, msix_base);
- }
-
- return 0;
-}
-
-static int qed_rdma_alloc_pd(void *rdma_cxt, u16 *pd)
-{
- struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
- u32 returned_id;
- int rc;
-
- DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Alloc PD\n");
-
- /* Allocates an unused protection domain */
- spin_lock_bh(&p_hwfn->p_rdma_info->lock);
- rc = qed_rdma_bmap_alloc_id(p_hwfn,
- &p_hwfn->p_rdma_info->pd_map, &returned_id);
- spin_unlock_bh(&p_hwfn->p_rdma_info->lock);
-
- *pd = (u16)returned_id;
-
- DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Alloc PD - done, rc = %d\n", rc);
- return rc;
-}
-
-static void qed_rdma_free_pd(void *rdma_cxt, u16 pd)
-{
- struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
-
- DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "pd = %08x\n", pd);
-
- /* Returns a previously allocated protection domain for reuse */
- spin_lock_bh(&p_hwfn->p_rdma_info->lock);
- qed_bmap_release_id(p_hwfn, &p_hwfn->p_rdma_info->pd_map, pd);
- spin_unlock_bh(&p_hwfn->p_rdma_info->lock);
-}
-
-static enum qed_rdma_toggle_bit
-qed_rdma_toggle_bit_create_resize_cq(struct qed_hwfn *p_hwfn, u16 icid)
-{
- struct qed_rdma_info *p_info = p_hwfn->p_rdma_info;
- enum qed_rdma_toggle_bit toggle_bit;
- u32 bmap_id;
-
- DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "icid = %08x\n", icid);
-
- /* the function toggle the bit that is related to a given icid
- * and returns the new toggle bit's value
- */
- bmap_id = icid - qed_cxt_get_proto_cid_start(p_hwfn, p_info->proto);
-
- spin_lock_bh(&p_info->lock);
- toggle_bit = !test_and_change_bit(bmap_id,
- p_info->toggle_bits.bitmap);
- spin_unlock_bh(&p_info->lock);
-
- DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "QED_RDMA_TOGGLE_BIT_= %d\n",
- toggle_bit);
-
- return toggle_bit;
-}
-
-static int qed_rdma_create_cq(void *rdma_cxt,
- struct qed_rdma_create_cq_in_params *params,
- u16 *icid)
-{
- struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
- struct qed_rdma_info *p_info = p_hwfn->p_rdma_info;
- struct rdma_create_cq_ramrod_data *p_ramrod;
- enum qed_rdma_toggle_bit toggle_bit;
- struct qed_sp_init_data init_data;
- struct qed_spq_entry *p_ent;
- u32 returned_id, start_cid;
- int rc;
-
- DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "cq_handle = %08x%08x\n",
- params->cq_handle_hi, params->cq_handle_lo);
-
- /* Allocate icid */
- spin_lock_bh(&p_info->lock);
- rc = qed_rdma_bmap_alloc_id(p_hwfn, &p_info->cq_map, &returned_id);
- spin_unlock_bh(&p_info->lock);
-
- if (rc) {
- DP_NOTICE(p_hwfn, "Can't create CQ, rc = %d\n", rc);
- return rc;
- }
-
- start_cid = qed_cxt_get_proto_cid_start(p_hwfn,
- p_info->proto);
- *icid = returned_id + start_cid;
-
- /* Check if icid requires a page allocation */
- rc = qed_cxt_dynamic_ilt_alloc(p_hwfn, QED_ELEM_CXT, *icid);
- if (rc)
- goto err;
-
- /* Get SPQ entry */
- memset(&init_data, 0, sizeof(init_data));
- init_data.cid = *icid;
- init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
- init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
-
- /* Send create CQ ramrod */
- rc = qed_sp_init_request(p_hwfn, &p_ent,
- RDMA_RAMROD_CREATE_CQ,
- p_info->proto, &init_data);
- if (rc)
- goto err;
-
- p_ramrod = &p_ent->ramrod.rdma_create_cq;
-
- p_ramrod->cq_handle.hi = cpu_to_le32(params->cq_handle_hi);
- p_ramrod->cq_handle.lo = cpu_to_le32(params->cq_handle_lo);
- p_ramrod->dpi = cpu_to_le16(params->dpi);
- p_ramrod->is_two_level_pbl = params->pbl_two_level;
- p_ramrod->max_cqes = cpu_to_le32(params->cq_size);
- DMA_REGPAIR_LE(p_ramrod->pbl_addr, params->pbl_ptr);
- p_ramrod->pbl_num_pages = cpu_to_le16(params->pbl_num_pages);
- p_ramrod->cnq_id = (u8)RESC_START(p_hwfn, QED_RDMA_CNQ_RAM) +
- params->cnq_id;
- p_ramrod->int_timeout = params->int_timeout;
-
- /* toggle the bit for every resize or create cq for a given icid */
- toggle_bit = qed_rdma_toggle_bit_create_resize_cq(p_hwfn, *icid);
-
- p_ramrod->toggle_bit = toggle_bit;
-
- rc = qed_spq_post(p_hwfn, p_ent, NULL);
- if (rc) {
- /* restore toggle bit */
- qed_rdma_toggle_bit_create_resize_cq(p_hwfn, *icid);
- goto err;
- }
-
- DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Created CQ, rc = %d\n", rc);
- return rc;
-
-err:
- /* release allocated icid */
- spin_lock_bh(&p_info->lock);
- qed_bmap_release_id(p_hwfn, &p_info->cq_map, returned_id);
- spin_unlock_bh(&p_info->lock);
- DP_NOTICE(p_hwfn, "Create CQ failed, rc = %d\n", rc);
-
- return rc;
-}
-
-static int
-qed_rdma_destroy_cq(void *rdma_cxt,
- struct qed_rdma_destroy_cq_in_params *in_params,
- struct qed_rdma_destroy_cq_out_params *out_params)
-{
- struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
- struct rdma_destroy_cq_output_params *p_ramrod_res;
- struct rdma_destroy_cq_ramrod_data *p_ramrod;
- struct qed_sp_init_data init_data;
- struct qed_spq_entry *p_ent;
- dma_addr_t ramrod_res_phys;
- int rc = -ENOMEM;
-
- DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "icid = %08x\n", in_params->icid);
-
- p_ramrod_res =
- (struct rdma_destroy_cq_output_params *)
- dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
- sizeof(struct rdma_destroy_cq_output_params),
- &ramrod_res_phys, GFP_KERNEL);
- if (!p_ramrod_res) {
- DP_NOTICE(p_hwfn,
- "qed destroy cq failed: cannot allocate memory (ramrod)\n");
- return rc;
- }
-
- /* Get SPQ entry */
- memset(&init_data, 0, sizeof(init_data));
- init_data.cid = in_params->icid;
- init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
- init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
-
- /* Send destroy CQ ramrod */
- rc = qed_sp_init_request(p_hwfn, &p_ent,
- RDMA_RAMROD_DESTROY_CQ,
- p_hwfn->p_rdma_info->proto, &init_data);
- if (rc)
- goto err;
-
- p_ramrod = &p_ent->ramrod.rdma_destroy_cq;
- DMA_REGPAIR_LE(p_ramrod->output_params_addr, ramrod_res_phys);
-
- rc = qed_spq_post(p_hwfn, p_ent, NULL);
- if (rc)
- goto err;
-
- out_params->num_cq_notif = le16_to_cpu(p_ramrod_res->cnq_num);
-
- dma_free_coherent(&p_hwfn->cdev->pdev->dev,
- sizeof(struct rdma_destroy_cq_output_params),
- p_ramrod_res, ramrod_res_phys);
-
- /* Free icid */
- spin_lock_bh(&p_hwfn->p_rdma_info->lock);
-
- qed_bmap_release_id(p_hwfn,
- &p_hwfn->p_rdma_info->cq_map,
- (in_params->icid -
- qed_cxt_get_proto_cid_start(p_hwfn,
- p_hwfn->
- p_rdma_info->proto)));
-
- spin_unlock_bh(&p_hwfn->p_rdma_info->lock);
-
- DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Destroyed CQ, rc = %d\n", rc);
- return rc;
-
-err: dma_free_coherent(&p_hwfn->cdev->pdev->dev,
- sizeof(struct rdma_destroy_cq_output_params),
- p_ramrod_res, ramrod_res_phys);
-
- return rc;
-}
-
-static void qed_rdma_set_fw_mac(u16 *p_fw_mac, u8 *p_qed_mac)
-{
- p_fw_mac[0] = cpu_to_le16((p_qed_mac[0] << 8) + p_qed_mac[1]);
- p_fw_mac[1] = cpu_to_le16((p_qed_mac[2] << 8) + p_qed_mac[3]);
- p_fw_mac[2] = cpu_to_le16((p_qed_mac[4] << 8) + p_qed_mac[5]);
+ qed_spq_unregister_async_cb(p_hwfn, PROTOCOLID_ROCE);
}
static void qed_rdma_copy_gids(struct qed_rdma_qp *qp, __le32 *src_gid,
@@ -1210,7 +156,7 @@
spin_unlock_bh(&p_hwfn->p_rdma_info->lock);
}
-static int qed_roce_alloc_cid(struct qed_hwfn *p_hwfn, u16 *cid)
+int qed_roce_alloc_cid(struct qed_hwfn *p_hwfn, u16 *cid)
{
struct qed_rdma_info *p_rdma_info = p_hwfn->p_rdma_info;
u32 responder_icid;
@@ -1870,9 +816,9 @@
return rc;
}
-static int qed_roce_query_qp(struct qed_hwfn *p_hwfn,
- struct qed_rdma_qp *qp,
- struct qed_rdma_query_qp_out_params *out_params)
+int qed_roce_query_qp(struct qed_hwfn *p_hwfn,
+ struct qed_rdma_qp *qp,
+ struct qed_rdma_query_qp_out_params *out_params)
{
struct roce_query_qp_resp_output_params *p_resp_ramrod_res;
struct roce_query_qp_req_output_params *p_req_ramrod_res;
@@ -2011,7 +957,7 @@
return rc;
}
-static int qed_roce_destroy_qp(struct qed_hwfn *p_hwfn, struct qed_rdma_qp *qp)
+int qed_roce_destroy_qp(struct qed_hwfn *p_hwfn, struct qed_rdma_qp *qp)
{
u32 num_invalidated_mw = 0;
u32 num_bound_mw = 0;
@@ -2050,138 +996,10 @@
return 0;
}
-static int qed_rdma_query_qp(void *rdma_cxt,
- struct qed_rdma_qp *qp,
- struct qed_rdma_query_qp_out_params *out_params)
-{
- struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
- int rc;
-
- DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "icid = %08x\n", qp->icid);
-
- /* The following fields are filled in from qp and not FW as they can't
- * be modified by FW
- */
- out_params->mtu = qp->mtu;
- out_params->dest_qp = qp->dest_qp;
- out_params->incoming_atomic_en = qp->incoming_atomic_en;
- out_params->e2e_flow_control_en = qp->e2e_flow_control_en;
- out_params->incoming_rdma_read_en = qp->incoming_rdma_read_en;
- out_params->incoming_rdma_write_en = qp->incoming_rdma_write_en;
- out_params->dgid = qp->dgid;
- out_params->flow_label = qp->flow_label;
- out_params->hop_limit_ttl = qp->hop_limit_ttl;
- out_params->traffic_class_tos = qp->traffic_class_tos;
- out_params->timeout = qp->ack_timeout;
- out_params->rnr_retry = qp->rnr_retry_cnt;
- out_params->retry_cnt = qp->retry_cnt;
- out_params->min_rnr_nak_timer = qp->min_rnr_nak_timer;
- out_params->pkey_index = 0;
- out_params->max_rd_atomic = qp->max_rd_atomic_req;
- out_params->max_dest_rd_atomic = qp->max_rd_atomic_resp;
- out_params->sqd_async = qp->sqd_async;
-
- rc = qed_roce_query_qp(p_hwfn, qp, out_params);
-
- DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Query QP, rc = %d\n", rc);
- return rc;
-}
-
-static int qed_rdma_destroy_qp(void *rdma_cxt, struct qed_rdma_qp *qp)
-{
- struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
- int rc = 0;
-
- DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "icid = %08x\n", qp->icid);
-
- rc = qed_roce_destroy_qp(p_hwfn, qp);
-
- /* free qp params struct */
- kfree(qp);
-
- DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "QP destroyed\n");
- return rc;
-}
-
-static struct qed_rdma_qp *
-qed_rdma_create_qp(void *rdma_cxt,
- struct qed_rdma_create_qp_in_params *in_params,
- struct qed_rdma_create_qp_out_params *out_params)
-{
- struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
- struct qed_rdma_qp *qp;
- u8 max_stats_queues;
- int rc;
-
- if (!rdma_cxt || !in_params || !out_params || !p_hwfn->p_rdma_info) {
- DP_ERR(p_hwfn->cdev,
- "qed roce create qp failed due to NULL entry (rdma_cxt=%p, in=%p, out=%p, roce_info=?\n",
- rdma_cxt, in_params, out_params);
- return NULL;
- }
-
- DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
- "qed rdma create qp called with qp_handle = %08x%08x\n",
- in_params->qp_handle_hi, in_params->qp_handle_lo);
-
- /* Some sanity checks... */
- max_stats_queues = p_hwfn->p_rdma_info->dev->max_stats_queues;
- if (in_params->stats_queue >= max_stats_queues) {
- DP_ERR(p_hwfn->cdev,
- "qed rdma create qp failed due to invalid statistics queue %d. maximum is %d\n",
- in_params->stats_queue, max_stats_queues);
- return NULL;
- }
-
- qp = kzalloc(sizeof(*qp), GFP_KERNEL);
- if (!qp) {
- DP_NOTICE(p_hwfn, "Failed to allocate qed_rdma_qp\n");
- return NULL;
- }
-
- rc = qed_roce_alloc_cid(p_hwfn, &qp->icid);
- qp->qpid = ((0xFF << 16) | qp->icid);
-
- DP_INFO(p_hwfn, "ROCE qpid=%x\n", qp->qpid);
-
- if (rc) {
- kfree(qp);
- return NULL;
- }
-
- qp->cur_state = QED_ROCE_QP_STATE_RESET;
- qp->qp_handle.hi = cpu_to_le32(in_params->qp_handle_hi);
- qp->qp_handle.lo = cpu_to_le32(in_params->qp_handle_lo);
- qp->qp_handle_async.hi = cpu_to_le32(in_params->qp_handle_async_hi);
- qp->qp_handle_async.lo = cpu_to_le32(in_params->qp_handle_async_lo);
- qp->use_srq = in_params->use_srq;
- qp->signal_all = in_params->signal_all;
- qp->fmr_and_reserved_lkey = in_params->fmr_and_reserved_lkey;
- qp->pd = in_params->pd;
- qp->dpi = in_params->dpi;
- qp->sq_cq_id = in_params->sq_cq_id;
- qp->sq_num_pages = in_params->sq_num_pages;
- qp->sq_pbl_ptr = in_params->sq_pbl_ptr;
- qp->rq_cq_id = in_params->rq_cq_id;
- qp->rq_num_pages = in_params->rq_num_pages;
- qp->rq_pbl_ptr = in_params->rq_pbl_ptr;
- qp->srq_id = in_params->srq_id;
- qp->req_offloaded = false;
- qp->resp_offloaded = false;
- qp->e2e_flow_control_en = qp->use_srq ? false : true;
- qp->stats_queue = in_params->stats_queue;
-
- out_params->icid = qp->icid;
- out_params->qp_id = qp->qpid;
-
- DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Create QP, rc = %d\n", rc);
- return qp;
-}
-
-static int qed_roce_modify_qp(struct qed_hwfn *p_hwfn,
- struct qed_rdma_qp *qp,
- enum qed_roce_qp_state prev_state,
- struct qed_rdma_modify_qp_in_params *params)
+int qed_roce_modify_qp(struct qed_hwfn *p_hwfn,
+ struct qed_rdma_qp *qp,
+ enum qed_roce_qp_state prev_state,
+ struct qed_rdma_modify_qp_in_params *params)
{
u32 num_invalidated_mw = 0, num_bound_mw = 0;
int rc = 0;
@@ -2286,331 +1104,6 @@
return rc;
}
-static int qed_rdma_modify_qp(void *rdma_cxt,
- struct qed_rdma_qp *qp,
- struct qed_rdma_modify_qp_in_params *params)
-{
- struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
- enum qed_roce_qp_state prev_state;
- int rc = 0;
-
- DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "icid = %08x params->new_state=%d\n",
- qp->icid, params->new_state);
-
- if (rc) {
- DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "rc = %d\n", rc);
- return rc;
- }
-
- if (GET_FIELD(params->modify_flags,
- QED_RDMA_MODIFY_QP_VALID_RDMA_OPS_EN)) {
- qp->incoming_rdma_read_en = params->incoming_rdma_read_en;
- qp->incoming_rdma_write_en = params->incoming_rdma_write_en;
- qp->incoming_atomic_en = params->incoming_atomic_en;
- }
-
- /* Update QP structure with the updated values */
- if (GET_FIELD(params->modify_flags, QED_ROCE_MODIFY_QP_VALID_ROCE_MODE))
- qp->roce_mode = params->roce_mode;
- if (GET_FIELD(params->modify_flags, QED_ROCE_MODIFY_QP_VALID_PKEY))
- qp->pkey = params->pkey;
- if (GET_FIELD(params->modify_flags,
- QED_ROCE_MODIFY_QP_VALID_E2E_FLOW_CONTROL_EN))
- qp->e2e_flow_control_en = params->e2e_flow_control_en;
- if (GET_FIELD(params->modify_flags, QED_ROCE_MODIFY_QP_VALID_DEST_QP))
- qp->dest_qp = params->dest_qp;
- if (GET_FIELD(params->modify_flags,
- QED_ROCE_MODIFY_QP_VALID_ADDRESS_VECTOR)) {
- /* Indicates that the following parameters have changed:
- * Traffic class, flow label, hop limit, source GID,
- * destination GID, loopback indicator
- */
- qp->traffic_class_tos = params->traffic_class_tos;
- qp->flow_label = params->flow_label;
- qp->hop_limit_ttl = params->hop_limit_ttl;
-
- qp->sgid = params->sgid;
- qp->dgid = params->dgid;
- qp->udp_src_port = 0;
- qp->vlan_id = params->vlan_id;
- qp->mtu = params->mtu;
- qp->lb_indication = params->lb_indication;
- memcpy((u8 *)&qp->remote_mac_addr[0],
- (u8 *)¶ms->remote_mac_addr[0], ETH_ALEN);
- if (params->use_local_mac) {
- memcpy((u8 *)&qp->local_mac_addr[0],
- (u8 *)¶ms->local_mac_addr[0], ETH_ALEN);
- } else {
- memcpy((u8 *)&qp->local_mac_addr[0],
- (u8 *)&p_hwfn->hw_info.hw_mac_addr, ETH_ALEN);
- }
- }
- if (GET_FIELD(params->modify_flags, QED_ROCE_MODIFY_QP_VALID_RQ_PSN))
- qp->rq_psn = params->rq_psn;
- if (GET_FIELD(params->modify_flags, QED_ROCE_MODIFY_QP_VALID_SQ_PSN))
- qp->sq_psn = params->sq_psn;
- if (GET_FIELD(params->modify_flags,
- QED_RDMA_MODIFY_QP_VALID_MAX_RD_ATOMIC_REQ))
- qp->max_rd_atomic_req = params->max_rd_atomic_req;
- if (GET_FIELD(params->modify_flags,
- QED_RDMA_MODIFY_QP_VALID_MAX_RD_ATOMIC_RESP))
- qp->max_rd_atomic_resp = params->max_rd_atomic_resp;
- if (GET_FIELD(params->modify_flags,
- QED_ROCE_MODIFY_QP_VALID_ACK_TIMEOUT))
- qp->ack_timeout = params->ack_timeout;
- if (GET_FIELD(params->modify_flags, QED_ROCE_MODIFY_QP_VALID_RETRY_CNT))
- qp->retry_cnt = params->retry_cnt;
- if (GET_FIELD(params->modify_flags,
- QED_ROCE_MODIFY_QP_VALID_RNR_RETRY_CNT))
- qp->rnr_retry_cnt = params->rnr_retry_cnt;
- if (GET_FIELD(params->modify_flags,
- QED_ROCE_MODIFY_QP_VALID_MIN_RNR_NAK_TIMER))
- qp->min_rnr_nak_timer = params->min_rnr_nak_timer;
-
- qp->sqd_async = params->sqd_async;
-
- prev_state = qp->cur_state;
- if (GET_FIELD(params->modify_flags,
- QED_RDMA_MODIFY_QP_VALID_NEW_STATE)) {
- qp->cur_state = params->new_state;
- DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "qp->cur_state=%d\n",
- qp->cur_state);
- }
-
- rc = qed_roce_modify_qp(p_hwfn, qp, prev_state, params);
-
- DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Modify QP, rc = %d\n", rc);
- return rc;
-}
-
-static int
-qed_rdma_register_tid(void *rdma_cxt,
- struct qed_rdma_register_tid_in_params *params)
-{
- struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
- struct rdma_register_tid_ramrod_data *p_ramrod;
- struct qed_sp_init_data init_data;
- struct qed_spq_entry *p_ent;
- enum rdma_tid_type tid_type;
- u8 fw_return_code;
- int rc;
-
- DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "itid = %08x\n", params->itid);
-
- /* Get SPQ entry */
- memset(&init_data, 0, sizeof(init_data));
- init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
- init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
-
- rc = qed_sp_init_request(p_hwfn, &p_ent, RDMA_RAMROD_REGISTER_MR,
- p_hwfn->p_rdma_info->proto, &init_data);
- if (rc) {
- DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "rc = %d\n", rc);
- return rc;
- }
-
- if (p_hwfn->p_rdma_info->last_tid < params->itid)
- p_hwfn->p_rdma_info->last_tid = params->itid;
-
- p_ramrod = &p_ent->ramrod.rdma_register_tid;
-
- p_ramrod->flags = 0;
- SET_FIELD(p_ramrod->flags,
- RDMA_REGISTER_TID_RAMROD_DATA_TWO_LEVEL_PBL,
- params->pbl_two_level);
-
- SET_FIELD(p_ramrod->flags,
- RDMA_REGISTER_TID_RAMROD_DATA_ZERO_BASED, params->zbva);
-
- SET_FIELD(p_ramrod->flags,
- RDMA_REGISTER_TID_RAMROD_DATA_PHY_MR, params->phy_mr);
-
- /* Don't initialize D/C field, as it may override other bits. */
- if (!(params->tid_type == QED_RDMA_TID_FMR) && !(params->dma_mr))
- SET_FIELD(p_ramrod->flags,
- RDMA_REGISTER_TID_RAMROD_DATA_PAGE_SIZE_LOG,
- params->page_size_log - 12);
-
- SET_FIELD(p_ramrod->flags,
- RDMA_REGISTER_TID_RAMROD_DATA_MAX_ID,
- p_hwfn->p_rdma_info->last_tid);
-
- SET_FIELD(p_ramrod->flags,
- RDMA_REGISTER_TID_RAMROD_DATA_REMOTE_READ,
- params->remote_read);
-
- SET_FIELD(p_ramrod->flags,
- RDMA_REGISTER_TID_RAMROD_DATA_REMOTE_WRITE,
- params->remote_write);
-
- SET_FIELD(p_ramrod->flags,
- RDMA_REGISTER_TID_RAMROD_DATA_REMOTE_ATOMIC,
- params->remote_atomic);
-
- SET_FIELD(p_ramrod->flags,
- RDMA_REGISTER_TID_RAMROD_DATA_LOCAL_WRITE,
- params->local_write);
-
- SET_FIELD(p_ramrod->flags,
- RDMA_REGISTER_TID_RAMROD_DATA_LOCAL_READ, params->local_read);
-
- SET_FIELD(p_ramrod->flags,
- RDMA_REGISTER_TID_RAMROD_DATA_ENABLE_MW_BIND,
- params->mw_bind);
-
- SET_FIELD(p_ramrod->flags1,
- RDMA_REGISTER_TID_RAMROD_DATA_PBL_PAGE_SIZE_LOG,
- params->pbl_page_size_log - 12);
-
- SET_FIELD(p_ramrod->flags2,
- RDMA_REGISTER_TID_RAMROD_DATA_DMA_MR, params->dma_mr);
-
- switch (params->tid_type) {
- case QED_RDMA_TID_REGISTERED_MR:
- tid_type = RDMA_TID_REGISTERED_MR;
- break;
- case QED_RDMA_TID_FMR:
- tid_type = RDMA_TID_FMR;
- break;
- case QED_RDMA_TID_MW_TYPE1:
- tid_type = RDMA_TID_MW_TYPE1;
- break;
- case QED_RDMA_TID_MW_TYPE2A:
- tid_type = RDMA_TID_MW_TYPE2A;
- break;
- default:
- rc = -EINVAL;
- DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "rc = %d\n", rc);
- return rc;
- }
- SET_FIELD(p_ramrod->flags1,
- RDMA_REGISTER_TID_RAMROD_DATA_TID_TYPE, tid_type);
-
- p_ramrod->itid = cpu_to_le32(params->itid);
- p_ramrod->key = params->key;
- p_ramrod->pd = cpu_to_le16(params->pd);
- p_ramrod->length_hi = (u8)(params->length >> 32);
- p_ramrod->length_lo = DMA_LO_LE(params->length);
- if (params->zbva) {
- /* Lower 32 bits of the registered MR address.
- * In case of zero based MR, will hold FBO
- */
- p_ramrod->va.hi = 0;
- p_ramrod->va.lo = cpu_to_le32(params->fbo);
- } else {
- DMA_REGPAIR_LE(p_ramrod->va, params->vaddr);
- }
- DMA_REGPAIR_LE(p_ramrod->pbl_base, params->pbl_ptr);
-
- /* DIF */
- if (params->dif_enabled) {
- SET_FIELD(p_ramrod->flags2,
- RDMA_REGISTER_TID_RAMROD_DATA_DIF_ON_HOST_FLG, 1);
- DMA_REGPAIR_LE(p_ramrod->dif_error_addr,
- params->dif_error_addr);
- DMA_REGPAIR_LE(p_ramrod->dif_runt_addr, params->dif_runt_addr);
- }
-
- rc = qed_spq_post(p_hwfn, p_ent, &fw_return_code);
- if (rc)
- return rc;
-
- if (fw_return_code != RDMA_RETURN_OK) {
- DP_NOTICE(p_hwfn, "fw_return_code = %d\n", fw_return_code);
- return -EINVAL;
- }
-
- DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Register TID, rc = %d\n", rc);
- return rc;
-}
-
-static int qed_rdma_deregister_tid(void *rdma_cxt, u32 itid)
-{
- struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
- struct rdma_deregister_tid_ramrod_data *p_ramrod;
- struct qed_sp_init_data init_data;
- struct qed_spq_entry *p_ent;
- struct qed_ptt *p_ptt;
- u8 fw_return_code;
- int rc;
-
- DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "itid = %08x\n", itid);
-
- /* Get SPQ entry */
- memset(&init_data, 0, sizeof(init_data));
- init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
- init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
-
- rc = qed_sp_init_request(p_hwfn, &p_ent, RDMA_RAMROD_DEREGISTER_MR,
- p_hwfn->p_rdma_info->proto, &init_data);
- if (rc) {
- DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "rc = %d\n", rc);
- return rc;
- }
-
- p_ramrod = &p_ent->ramrod.rdma_deregister_tid;
- p_ramrod->itid = cpu_to_le32(itid);
-
- rc = qed_spq_post(p_hwfn, p_ent, &fw_return_code);
- if (rc) {
- DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "rc = %d\n", rc);
- return rc;
- }
-
- if (fw_return_code == RDMA_RETURN_DEREGISTER_MR_BAD_STATE_ERR) {
- DP_NOTICE(p_hwfn, "fw_return_code = %d\n", fw_return_code);
- return -EINVAL;
- } else if (fw_return_code == RDMA_RETURN_NIG_DRAIN_REQ) {
- /* Bit indicating that the TID is in use and a nig drain is
- * required before sending the ramrod again
- */
- p_ptt = qed_ptt_acquire(p_hwfn);
- if (!p_ptt) {
- rc = -EBUSY;
- DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
- "Failed to acquire PTT\n");
- return rc;
- }
-
- rc = qed_mcp_drain(p_hwfn, p_ptt);
- if (rc) {
- qed_ptt_release(p_hwfn, p_ptt);
- DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
- "Drain failed\n");
- return rc;
- }
-
- qed_ptt_release(p_hwfn, p_ptt);
-
- /* Resend the ramrod */
- rc = qed_sp_init_request(p_hwfn, &p_ent,
- RDMA_RAMROD_DEREGISTER_MR,
- p_hwfn->p_rdma_info->proto,
- &init_data);
- if (rc) {
- DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
- "Failed to init sp-element\n");
- return rc;
- }
-
- rc = qed_spq_post(p_hwfn, p_ent, &fw_return_code);
- if (rc) {
- DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
- "Ramrod failed\n");
- return rc;
- }
-
- if (fw_return_code != RDMA_RETURN_OK) {
- DP_NOTICE(p_hwfn, "fw_return_code = %d\n",
- fw_return_code);
- return rc;
- }
- }
-
- DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "De-registered TID, rc = %d\n", rc);
- return rc;
-}
-
static void qed_roce_free_real_icid(struct qed_hwfn *p_hwfn, u16 icid)
{
struct qed_rdma_info *p_rdma_info = p_hwfn->p_rdma_info;
@@ -2636,414 +1129,23 @@
spin_unlock_bh(&p_hwfn->p_rdma_info->lock);
}
-static void *qed_rdma_get_rdma_ctx(struct qed_dev *cdev)
+void qed_roce_dpm_dcbx(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
{
- return QED_LEADING_HWFN(cdev);
-}
+ u8 val;
-static void qed_rdma_dpm_conf(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
-{
- u32 val;
-
- val = (p_hwfn->dcbx_no_edpm || p_hwfn->db_bar_no_edpm) ? 0 : 1;
-
- qed_wr(p_hwfn, p_ptt, DORQ_REG_PF_DPM_ENABLE, val);
- DP_VERBOSE(p_hwfn, (QED_MSG_DCB | QED_MSG_RDMA),
- "Changing DPM_EN state to %d (DCBX=%d, DB_BAR=%d)\n",
- val, p_hwfn->dcbx_no_edpm, p_hwfn->db_bar_no_edpm);
-}
-
-void qed_rdma_dpm_bar(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
-{
- p_hwfn->db_bar_no_edpm = true;
+ /* if any QPs are already active, we want to disable DPM, since their
+ * context information contains information from before the latest DCBx
+ * update. Otherwise enable it.
+ */
+ val = qed_rdma_allocated_qps(p_hwfn) ? true : false;
+ p_hwfn->dcbx_no_edpm = (u8)val;
qed_rdma_dpm_conf(p_hwfn, p_ptt);
}
-static int qed_rdma_start(void *rdma_cxt,
- struct qed_rdma_start_in_params *params)
+int qed_roce_setup(struct qed_hwfn *p_hwfn)
{
- struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
- struct qed_ptt *p_ptt;
- int rc = -EBUSY;
-
- DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
- "desired_cnq = %08x\n", params->desired_cnq);
-
- p_ptt = qed_ptt_acquire(p_hwfn);
- if (!p_ptt)
- goto err;
-
- rc = qed_rdma_alloc(p_hwfn, p_ptt, params);
- if (rc)
- goto err1;
-
- rc = qed_rdma_setup(p_hwfn, p_ptt, params);
- if (rc)
- goto err2;
-
- qed_ptt_release(p_hwfn, p_ptt);
-
- return rc;
-
-err2:
- qed_rdma_free(p_hwfn);
-err1:
- qed_ptt_release(p_hwfn, p_ptt);
-err:
- DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "RDMA start - error, rc = %d\n", rc);
- return rc;
+ return qed_spq_register_async_cb(p_hwfn, PROTOCOLID_ROCE,
+ qed_roce_async_event);
}
-static int qed_rdma_init(struct qed_dev *cdev,
- struct qed_rdma_start_in_params *params)
-{
- return qed_rdma_start(QED_LEADING_HWFN(cdev), params);
-}
-
-static void qed_rdma_remove_user(void *rdma_cxt, u16 dpi)
-{
- struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
-
- DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "dpi = %08x\n", dpi);
-
- spin_lock_bh(&p_hwfn->p_rdma_info->lock);
- qed_bmap_release_id(p_hwfn, &p_hwfn->p_rdma_info->dpi_map, dpi);
- spin_unlock_bh(&p_hwfn->p_rdma_info->lock);
-}
-
-void qed_ll2b_complete_tx_gsi_packet(struct qed_hwfn *p_hwfn,
- u8 connection_handle,
- void *cookie,
- dma_addr_t first_frag_addr,
- bool b_last_fragment, bool b_last_packet)
-{
- struct qed_roce_ll2_packet *packet = cookie;
- struct qed_roce_ll2_info *roce_ll2 = p_hwfn->ll2;
-
- roce_ll2->cbs.tx_cb(roce_ll2->cb_cookie, packet);
-}
-
-void qed_ll2b_release_tx_gsi_packet(struct qed_hwfn *p_hwfn,
- u8 connection_handle,
- void *cookie,
- dma_addr_t first_frag_addr,
- bool b_last_fragment, bool b_last_packet)
-{
- qed_ll2b_complete_tx_gsi_packet(p_hwfn, connection_handle,
- cookie, first_frag_addr,
- b_last_fragment, b_last_packet);
-}
-
-void qed_ll2b_complete_rx_gsi_packet(struct qed_hwfn *p_hwfn,
- u8 connection_handle,
- void *cookie,
- dma_addr_t rx_buf_addr,
- u16 data_length,
- u8 data_length_error,
- u16 parse_flags,
- u16 vlan,
- u32 src_mac_addr_hi,
- u16 src_mac_addr_lo, bool b_last_packet)
-{
- struct qed_roce_ll2_info *roce_ll2 = p_hwfn->ll2;
- struct qed_roce_ll2_rx_params params;
- struct qed_dev *cdev = p_hwfn->cdev;
- struct qed_roce_ll2_packet pkt;
-
- DP_VERBOSE(cdev,
- QED_MSG_LL2,
- "roce ll2 rx complete: bus_addr=%p, len=%d, data_len_err=%d\n",
- (void *)(uintptr_t)rx_buf_addr,
- data_length, data_length_error);
-
- memset(&pkt, 0, sizeof(pkt));
- pkt.n_seg = 1;
- pkt.payload[0].baddr = rx_buf_addr;
- pkt.payload[0].len = data_length;
-
- memset(¶ms, 0, sizeof(params));
- params.vlan_id = vlan;
- *((u32 *)¶ms.smac[0]) = ntohl(src_mac_addr_hi);
- *((u16 *)¶ms.smac[4]) = ntohs(src_mac_addr_lo);
-
- if (data_length_error) {
- DP_ERR(cdev,
- "roce ll2 rx complete: data length error %d, length=%d\n",
- data_length_error, data_length);
- params.rc = -EINVAL;
- }
-
- roce_ll2->cbs.rx_cb(roce_ll2->cb_cookie, &pkt, ¶ms);
-}
-
-static int qed_roce_ll2_set_mac_filter(struct qed_dev *cdev,
- u8 *old_mac_address,
- u8 *new_mac_address)
-{
- struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
- struct qed_ptt *p_ptt;
- int rc = 0;
-
- if (!hwfn->ll2 || hwfn->ll2->handle == QED_LL2_UNUSED_HANDLE) {
- DP_ERR(cdev,
- "qed roce mac filter failed - roce_info/ll2 NULL\n");
- return -EINVAL;
- }
-
- p_ptt = qed_ptt_acquire(QED_LEADING_HWFN(cdev));
- if (!p_ptt) {
- DP_ERR(cdev,
- "qed roce ll2 mac filter set: failed to acquire PTT\n");
- return -EINVAL;
- }
-
- mutex_lock(&hwfn->ll2->lock);
- if (old_mac_address)
- qed_llh_remove_mac_filter(QED_LEADING_HWFN(cdev), p_ptt,
- old_mac_address);
- if (new_mac_address)
- rc = qed_llh_add_mac_filter(QED_LEADING_HWFN(cdev), p_ptt,
- new_mac_address);
- mutex_unlock(&hwfn->ll2->lock);
-
- qed_ptt_release(QED_LEADING_HWFN(cdev), p_ptt);
-
- if (rc)
- DP_ERR(cdev,
- "qed roce ll2 mac filter set: failed to add mac filter\n");
-
- return rc;
-}
-
-static int qed_roce_ll2_start(struct qed_dev *cdev,
- struct qed_roce_ll2_params *params)
-{
- struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
- struct qed_roce_ll2_info *roce_ll2;
- struct qed_ll2_conn ll2_params;
- int rc;
-
- if (!params) {
- DP_ERR(cdev, "qed roce ll2 start: failed due to NULL params\n");
- return -EINVAL;
- }
- if (!params->cbs.tx_cb || !params->cbs.rx_cb) {
- DP_ERR(cdev,
- "qed roce ll2 start: failed due to NULL tx/rx. tx_cb=%p, rx_cb=%p\n",
- params->cbs.tx_cb, params->cbs.rx_cb);
- return -EINVAL;
- }
- if (!is_valid_ether_addr(params->mac_address)) {
- DP_ERR(cdev,
- "qed roce ll2 start: failed due to invalid Ethernet address %pM\n",
- params->mac_address);
- return -EINVAL;
- }
-
- /* Initialize */
- roce_ll2 = kzalloc(sizeof(*roce_ll2), GFP_ATOMIC);
- if (!roce_ll2) {
- DP_ERR(cdev, "qed roce ll2 start: failed memory allocation\n");
- return -ENOMEM;
- }
- roce_ll2->handle = QED_LL2_UNUSED_HANDLE;
- roce_ll2->cbs = params->cbs;
- roce_ll2->cb_cookie = params->cb_cookie;
- mutex_init(&roce_ll2->lock);
-
- memset(&ll2_params, 0, sizeof(ll2_params));
- ll2_params.conn_type = QED_LL2_TYPE_ROCE;
- ll2_params.mtu = params->mtu;
- ll2_params.rx_drop_ttl0_flg = true;
- ll2_params.rx_vlan_removal_en = false;
- ll2_params.tx_dest = CORE_TX_DEST_NW;
- ll2_params.ai_err_packet_too_big = LL2_DROP_PACKET;
- ll2_params.ai_err_no_buf = LL2_DROP_PACKET;
- ll2_params.gsi_enable = true;
-
- rc = qed_ll2_acquire_connection(QED_LEADING_HWFN(cdev), &ll2_params,
- params->max_rx_buffers,
- params->max_tx_buffers,
- &roce_ll2->handle);
- if (rc) {
- DP_ERR(cdev,
- "qed roce ll2 start: failed to acquire LL2 connection (rc=%d)\n",
- rc);
- goto err;
- }
-
- rc = qed_ll2_establish_connection(QED_LEADING_HWFN(cdev),
- roce_ll2->handle);
- if (rc) {
- DP_ERR(cdev,
- "qed roce ll2 start: failed to establish LL2 connection (rc=%d)\n",
- rc);
- goto err1;
- }
-
- hwfn->ll2 = roce_ll2;
-
- rc = qed_roce_ll2_set_mac_filter(cdev, NULL, params->mac_address);
- if (rc) {
- hwfn->ll2 = NULL;
- goto err2;
- }
- ether_addr_copy(roce_ll2->mac_address, params->mac_address);
-
- return 0;
-
-err2:
- qed_ll2_terminate_connection(QED_LEADING_HWFN(cdev), roce_ll2->handle);
-err1:
- qed_ll2_release_connection(QED_LEADING_HWFN(cdev), roce_ll2->handle);
-err:
- kfree(roce_ll2);
- return rc;
-}
-
-static int qed_roce_ll2_stop(struct qed_dev *cdev)
-{
- struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
- struct qed_roce_ll2_info *roce_ll2 = hwfn->ll2;
- int rc;
-
- if (roce_ll2->handle == QED_LL2_UNUSED_HANDLE) {
- DP_ERR(cdev, "qed roce ll2 stop: cannot stop an unused LL2\n");
- return -EINVAL;
- }
-
- /* remove LL2 MAC address filter */
- rc = qed_roce_ll2_set_mac_filter(cdev, roce_ll2->mac_address, NULL);
- eth_zero_addr(roce_ll2->mac_address);
-
- rc = qed_ll2_terminate_connection(QED_LEADING_HWFN(cdev),
- roce_ll2->handle);
- if (rc)
- DP_ERR(cdev,
- "qed roce ll2 stop: failed to terminate LL2 connection (rc=%d)\n",
- rc);
-
- qed_ll2_release_connection(QED_LEADING_HWFN(cdev), roce_ll2->handle);
-
- roce_ll2->handle = QED_LL2_UNUSED_HANDLE;
-
- kfree(roce_ll2);
-
- return rc;
-}
-
-static int qed_roce_ll2_tx(struct qed_dev *cdev,
- struct qed_roce_ll2_packet *pkt,
- struct qed_roce_ll2_tx_params *params)
-{
- struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
- struct qed_roce_ll2_info *roce_ll2 = hwfn->ll2;
- enum qed_ll2_roce_flavor_type qed_roce_flavor;
- u8 flags = 0;
- int rc;
- int i;
-
- if (!pkt || !params) {
- DP_ERR(cdev,
- "roce ll2 tx: failed tx because one of the following is NULL - drv=%p, pkt=%p, params=%p\n",
- cdev, pkt, params);
- return -EINVAL;
- }
-
- qed_roce_flavor = (pkt->roce_mode == ROCE_V1) ? QED_LL2_ROCE
- : QED_LL2_RROCE;
-
- if (pkt->roce_mode == ROCE_V2_IPV4)
- flags |= BIT(CORE_TX_BD_DATA_IP_CSUM_SHIFT);
-
- /* Tx header */
- rc = qed_ll2_prepare_tx_packet(QED_LEADING_HWFN(cdev), roce_ll2->handle,
- 1 + pkt->n_seg, 0, flags, 0,
- QED_LL2_TX_DEST_NW,
- qed_roce_flavor, pkt->header.baddr,
- pkt->header.len, pkt, 1);
- if (rc) {
- DP_ERR(cdev, "roce ll2 tx: header failed (rc=%d)\n", rc);
- return QED_ROCE_TX_HEAD_FAILURE;
- }
-
- /* Tx payload */
- for (i = 0; i < pkt->n_seg; i++) {
- rc = qed_ll2_set_fragment_of_tx_packet(QED_LEADING_HWFN(cdev),
- roce_ll2->handle,
- pkt->payload[i].baddr,
- pkt->payload[i].len);
- if (rc) {
- /* If failed not much to do here, partial packet has
- * been posted * we can't free memory, will need to wait
- * for completion
- */
- DP_ERR(cdev,
- "roce ll2 tx: payload failed (rc=%d)\n", rc);
- return QED_ROCE_TX_FRAG_FAILURE;
- }
- }
-
- return 0;
-}
-
-static int qed_roce_ll2_post_rx_buffer(struct qed_dev *cdev,
- struct qed_roce_ll2_buffer *buf,
- u64 cookie, u8 notify_fw)
-{
- return qed_ll2_post_rx_buffer(QED_LEADING_HWFN(cdev),
- QED_LEADING_HWFN(cdev)->ll2->handle,
- buf->baddr, buf->len,
- (void *)(uintptr_t)cookie, notify_fw);
-}
-
-static int qed_roce_ll2_stats(struct qed_dev *cdev, struct qed_ll2_stats *stats)
-{
- struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
- struct qed_roce_ll2_info *roce_ll2 = hwfn->ll2;
-
- return qed_ll2_get_stats(QED_LEADING_HWFN(cdev),
- roce_ll2->handle, stats);
-}
-
-static const struct qed_rdma_ops qed_rdma_ops_pass = {
- .common = &qed_common_ops_pass,
- .fill_dev_info = &qed_fill_rdma_dev_info,
- .rdma_get_rdma_ctx = &qed_rdma_get_rdma_ctx,
- .rdma_init = &qed_rdma_init,
- .rdma_add_user = &qed_rdma_add_user,
- .rdma_remove_user = &qed_rdma_remove_user,
- .rdma_stop = &qed_rdma_stop,
- .rdma_query_port = &qed_rdma_query_port,
- .rdma_query_device = &qed_rdma_query_device,
- .rdma_get_start_sb = &qed_rdma_get_sb_start,
- .rdma_get_rdma_int = &qed_rdma_get_int,
- .rdma_set_rdma_int = &qed_rdma_set_int,
- .rdma_get_min_cnq_msix = &qed_rdma_get_min_cnq_msix,
- .rdma_cnq_prod_update = &qed_rdma_cnq_prod_update,
- .rdma_alloc_pd = &qed_rdma_alloc_pd,
- .rdma_dealloc_pd = &qed_rdma_free_pd,
- .rdma_create_cq = &qed_rdma_create_cq,
- .rdma_destroy_cq = &qed_rdma_destroy_cq,
- .rdma_create_qp = &qed_rdma_create_qp,
- .rdma_modify_qp = &qed_rdma_modify_qp,
- .rdma_query_qp = &qed_rdma_query_qp,
- .rdma_destroy_qp = &qed_rdma_destroy_qp,
- .rdma_alloc_tid = &qed_rdma_alloc_tid,
- .rdma_free_tid = &qed_rdma_free_tid,
- .rdma_register_tid = &qed_rdma_register_tid,
- .rdma_deregister_tid = &qed_rdma_deregister_tid,
- .roce_ll2_start = &qed_roce_ll2_start,
- .roce_ll2_stop = &qed_roce_ll2_stop,
- .roce_ll2_tx = &qed_roce_ll2_tx,
- .roce_ll2_post_rx_buffer = &qed_roce_ll2_post_rx_buffer,
- .roce_ll2_set_mac_filter = &qed_roce_ll2_set_mac_filter,
- .roce_ll2_stats = &qed_roce_ll2_stats,
-};
-
-const struct qed_rdma_ops *qed_get_rdma_ops(void)
-{
- return &qed_rdma_ops_pass;
-}
-EXPORT_SYMBOL(qed_get_rdma_ops);
diff --git a/drivers/net/ethernet/qlogic/qed/qed_roce.h b/drivers/net/ethernet/qlogic/qed/qed_roce.h
index 9742af5..f801f39 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_roce.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_roce.h
@@ -32,191 +32,28 @@
#ifndef _QED_ROCE_H
#define _QED_ROCE_H
#include <linux/types.h>
-#include <linux/bitops.h>
-#include <linux/kernel.h>
-#include <linux/list.h>
#include <linux/slab.h>
-#include <linux/spinlock.h>
-#include <linux/qed/qed_if.h>
-#include <linux/qed/qed_roce_if.h>
-#include "qed.h"
-#include "qed_dev_api.h"
-#include "qed_hsi.h"
-#include "qed_ll2.h"
-
-#define QED_RDMA_MAX_FMR (RDMA_MAX_TIDS)
-#define QED_RDMA_MAX_P_KEY (1)
-#define QED_RDMA_MAX_WQE (0x7FFF)
-#define QED_RDMA_MAX_SRQ_WQE_ELEM (0x7FFF)
-#define QED_RDMA_PAGE_SIZE_CAPS (0xFFFFF000)
-#define QED_RDMA_ACK_DELAY (15)
-#define QED_RDMA_MAX_MR_SIZE (0x10000000000ULL)
-#define QED_RDMA_MAX_CQS (RDMA_MAX_CQS)
-#define QED_RDMA_MAX_MRS (RDMA_MAX_TIDS)
-/* Add 1 for header element */
-#define QED_RDMA_MAX_SRQ_ELEM_PER_WQE (RDMA_MAX_SGE_PER_RQ_WQE + 1)
-#define QED_RDMA_MAX_SGE_PER_SRQ_WQE (RDMA_MAX_SGE_PER_RQ_WQE)
-#define QED_RDMA_SRQ_WQE_ELEM_SIZE (16)
-#define QED_RDMA_MAX_SRQS (32 * 1024)
-
-#define QED_RDMA_MAX_CQE_32_BIT (0x7FFFFFFF - 1)
-#define QED_RDMA_MAX_CQE_16_BIT (0x7FFF - 1)
-
-enum qed_rdma_toggle_bit {
- QED_RDMA_TOGGLE_BIT_CLEAR = 0,
- QED_RDMA_TOGGLE_BIT_SET = 1
-};
-
-#define QED_RDMA_MAX_BMAP_NAME (10)
-struct qed_bmap {
- unsigned long *bitmap;
- u32 max_count;
- char name[QED_RDMA_MAX_BMAP_NAME];
-};
-
-struct qed_rdma_info {
- /* spin lock to protect bitmaps */
- spinlock_t lock;
-
- struct qed_bmap cq_map;
- struct qed_bmap pd_map;
- struct qed_bmap tid_map;
- struct qed_bmap qp_map;
- struct qed_bmap srq_map;
- struct qed_bmap cid_map;
- struct qed_bmap real_cid_map;
- struct qed_bmap dpi_map;
- struct qed_bmap toggle_bits;
- struct qed_rdma_events events;
- struct qed_rdma_device *dev;
- struct qed_rdma_port *port;
- u32 last_tid;
- u8 num_cnqs;
- u32 num_qps;
- u32 num_mrs;
- u16 queue_zone_base;
- u16 max_queue_zones;
- enum protocol_type proto;
-};
-
-struct qed_rdma_qp {
- struct regpair qp_handle;
- struct regpair qp_handle_async;
- u32 qpid;
- u16 icid;
- enum qed_roce_qp_state cur_state;
- bool use_srq;
- bool signal_all;
- bool fmr_and_reserved_lkey;
-
- bool incoming_rdma_read_en;
- bool incoming_rdma_write_en;
- bool incoming_atomic_en;
- bool e2e_flow_control_en;
-
- u16 pd;
- u16 pkey;
- u32 dest_qp;
- u16 mtu;
- u16 srq_id;
- u8 traffic_class_tos;
- u8 hop_limit_ttl;
- u16 dpi;
- u32 flow_label;
- bool lb_indication;
- u16 vlan_id;
- u32 ack_timeout;
- u8 retry_cnt;
- u8 rnr_retry_cnt;
- u8 min_rnr_nak_timer;
- bool sqd_async;
- union qed_gid sgid;
- union qed_gid dgid;
- enum roce_mode roce_mode;
- u16 udp_src_port;
- u8 stats_queue;
-
- /* requeseter */
- u8 max_rd_atomic_req;
- u32 sq_psn;
- u16 sq_cq_id;
- u16 sq_num_pages;
- dma_addr_t sq_pbl_ptr;
- void *orq;
- dma_addr_t orq_phys_addr;
- u8 orq_num_pages;
- bool req_offloaded;
-
- /* responder */
- u8 max_rd_atomic_resp;
- u32 rq_psn;
- u16 rq_cq_id;
- u16 rq_num_pages;
- dma_addr_t rq_pbl_ptr;
- void *irq;
- dma_addr_t irq_phys_addr;
- u8 irq_num_pages;
- bool resp_offloaded;
- u32 cq_prod;
-
- u8 remote_mac_addr[6];
- u8 local_mac_addr[6];
-
- void *shared_queue;
- dma_addr_t shared_queue_phys_addr;
-};
#if IS_ENABLED(CONFIG_QED_RDMA)
-void qed_rdma_dpm_bar(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt);
-void qed_roce_async_event(struct qed_hwfn *p_hwfn,
- u8 fw_event_code, union rdma_eqe_data *rdma_data);
-void qed_ll2b_complete_tx_gsi_packet(struct qed_hwfn *p_hwfn,
- u8 connection_handle,
- void *cookie,
- dma_addr_t first_frag_addr,
- bool b_last_fragment, bool b_last_packet);
-void qed_ll2b_release_tx_gsi_packet(struct qed_hwfn *p_hwfn,
- u8 connection_handle,
- void *cookie,
- dma_addr_t first_frag_addr,
- bool b_last_fragment, bool b_last_packet);
-void qed_ll2b_complete_rx_gsi_packet(struct qed_hwfn *p_hwfn,
- u8 connection_handle,
- void *cookie,
- dma_addr_t rx_buf_addr,
- u16 data_length,
- u8 data_length_error,
- u16 parse_flags,
- u16 vlan,
- u32 src_mac_addr_hi,
- u16 src_mac_addr_lo, bool b_last_packet);
+void qed_roce_dpm_dcbx(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt);
#else
-static inline void qed_rdma_dpm_bar(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) {}
-static inline void qed_roce_async_event(struct qed_hwfn *p_hwfn,
- u8 fw_event_code,
- union rdma_eqe_data *rdma_data) {}
-static inline void qed_ll2b_complete_tx_gsi_packet(struct qed_hwfn *p_hwfn,
- u8 connection_handle,
- void *cookie,
- dma_addr_t first_frag_addr,
- bool b_last_fragment,
- bool b_last_packet) {}
-static inline void qed_ll2b_release_tx_gsi_packet(struct qed_hwfn *p_hwfn,
- u8 connection_handle,
- void *cookie,
- dma_addr_t first_frag_addr,
- bool b_last_fragment,
- bool b_last_packet) {}
-static inline void qed_ll2b_complete_rx_gsi_packet(struct qed_hwfn *p_hwfn,
- u8 connection_handle,
- void *cookie,
- dma_addr_t rx_buf_addr,
- u16 data_length,
- u8 data_length_error,
- u16 parse_flags,
- u16 vlan,
- u32 src_mac_addr_hi,
- u16 src_mac_addr_lo,
- bool b_last_packet) {}
+static inline void qed_roce_dpm_dcbx(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt) {}
#endif
+
+int qed_roce_setup(struct qed_hwfn *p_hwfn);
+void qed_roce_stop(struct qed_hwfn *p_hwfn);
+int qed_roce_init_hw(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt);
+int qed_roce_alloc_cid(struct qed_hwfn *p_hwfn, u16 *cid);
+int qed_roce_destroy_qp(struct qed_hwfn *p_hwfn, struct qed_rdma_qp *qp);
+
+int qed_roce_query_qp(struct qed_hwfn *p_hwfn,
+ struct qed_rdma_qp *qp,
+ struct qed_rdma_query_qp_out_params *out_params);
+
+int qed_roce_modify_qp(struct qed_hwfn *p_hwfn,
+ struct qed_rdma_qp *qp,
+ enum qed_roce_qp_state prev_state,
+ struct qed_rdma_modify_qp_in_params *params);
+
#endif
diff --git a/drivers/net/ethernet/qlogic/qed/qed_sp.h b/drivers/net/ethernet/qlogic/qed/qed_sp.h
index 3357bbe..56c95fb 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_sp.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_sp.h
@@ -120,6 +120,7 @@
struct iscsi_spe_func_dstry iscsi_destroy;
struct iscsi_spe_conn_offload iscsi_conn_offload;
struct iscsi_conn_update_ramrod_params iscsi_conn_update;
+ struct iscsi_spe_conn_mac_update iscsi_conn_mac_update;
struct iscsi_spe_conn_termination iscsi_conn_terminate;
struct vf_start_ramrod_data vf_start;
@@ -173,6 +174,22 @@
struct qed_chain chain;
};
+typedef int
+(*qed_spq_async_comp_cb)(struct qed_hwfn *p_hwfn,
+ u8 opcode,
+ u16 echo,
+ union event_ring_data *data,
+ u8 fw_return_code);
+
+int
+qed_spq_register_async_cb(struct qed_hwfn *p_hwfn,
+ enum protocol_type protocol_id,
+ qed_spq_async_comp_cb cb);
+
+void
+qed_spq_unregister_async_cb(struct qed_hwfn *p_hwfn,
+ enum protocol_type protocol_id);
+
struct qed_spq {
spinlock_t lock; /* SPQ lock */
@@ -202,6 +219,7 @@
u32 comp_count;
u32 cid;
+ qed_spq_async_comp_cb async_comp_cb[MAX_PROTOCOL_TYPE];
};
/**
@@ -270,28 +288,23 @@
* @param p_hwfn
* @param num_elem number of elements in the eq
*
- * @return struct qed_eq* - a newly allocated structure; NULL upon error.
+ * @return int
*/
-struct qed_eq *qed_eq_alloc(struct qed_hwfn *p_hwfn,
- u16 num_elem);
+int qed_eq_alloc(struct qed_hwfn *p_hwfn, u16 num_elem);
/**
- * @brief qed_eq_setup - Reset the SPQ to its start state.
+ * @brief qed_eq_setup - Reset the EQ to its start state.
*
* @param p_hwfn
- * @param p_eq
*/
-void qed_eq_setup(struct qed_hwfn *p_hwfn,
- struct qed_eq *p_eq);
+void qed_eq_setup(struct qed_hwfn *p_hwfn);
/**
- * @brief qed_eq_deallocate - deallocates the given EQ struct.
+ * @brief qed_eq_free - deallocates the given EQ struct.
*
* @param p_hwfn
- * @param p_eq
*/
-void qed_eq_free(struct qed_hwfn *p_hwfn,
- struct qed_eq *p_eq);
+void qed_eq_free(struct qed_hwfn *p_hwfn);
/**
* @brief qed_eq_prod_update - update the FW with default EQ producer
@@ -342,28 +355,23 @@
*
* @param p_hwfn
*
- * @return struct qed_eq* - a newly allocated structure; NULL upon error.
+ * @return int
*/
-struct qed_consq *qed_consq_alloc(struct qed_hwfn *p_hwfn);
+int qed_consq_alloc(struct qed_hwfn *p_hwfn);
/**
- * @brief qed_consq_setup - Reset the ConsQ to its start
- * state.
+ * @brief qed_consq_setup - Reset the ConsQ to its start state.
*
* @param p_hwfn
- * @param p_eq
*/
-void qed_consq_setup(struct qed_hwfn *p_hwfn,
- struct qed_consq *p_consq);
+void qed_consq_setup(struct qed_hwfn *p_hwfn);
/**
* @brief qed_consq_free - deallocates the given ConsQ struct.
*
* @param p_hwfn
- * @param p_eq
*/
-void qed_consq_free(struct qed_hwfn *p_hwfn,
- struct qed_consq *p_consq);
+void qed_consq_free(struct qed_hwfn *p_hwfn);
/**
* @file
@@ -401,6 +409,7 @@
* to the internal RAM of the UStorm by the Function Start Ramrod.
*
* @param p_hwfn
+ * @param p_ptt
* @param p_tunn
* @param mode
* @param allow_npar_tx_switch
@@ -409,6 +418,7 @@
*/
int qed_sp_pf_start(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
struct qed_tunnel_info *p_tunn,
enum qed_mf_mode mode, bool allow_npar_tx_switch);
@@ -426,6 +436,15 @@
int qed_sp_pf_update(struct qed_hwfn *p_hwfn);
/**
+ * @brief qed_sp_pf_update_stag - Update firmware of new outer tag
+ *
+ * @param p_hwfn
+ *
+ * @return int
+ */
+int qed_sp_pf_update_stag(struct qed_hwfn *p_hwfn);
+
+/**
* @brief qed_sp_pf_stop - PF Function Stop Ramrod
*
* This ramrod is sent to close a Physical Function (PF). It is the last ramrod
@@ -442,6 +461,7 @@
int qed_sp_pf_stop(struct qed_hwfn *p_hwfn);
int qed_sp_pf_update_tunn_cfg(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
struct qed_tunnel_info *p_tunn,
enum spq_mode comp_mode,
struct qed_spq_comp_cb *p_comp_data);
diff --git a/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c b/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c
index bc3694e..46d0c3c 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c
@@ -185,22 +185,20 @@
}
static void
-__qed_set_ramrod_tunnel_param(u8 *p_tunn_cls, u8 *p_enable_tx_clas,
+__qed_set_ramrod_tunnel_param(u8 *p_tunn_cls,
struct qed_tunn_update_type *tun_type)
{
*p_tunn_cls = tun_type->tun_cls;
-
- if (tun_type->b_mode_enabled)
- *p_enable_tx_clas = 1;
}
static void
-qed_set_ramrod_tunnel_param(u8 *p_tunn_cls, u8 *p_enable_tx_clas,
+qed_set_ramrod_tunnel_param(u8 *p_tunn_cls,
struct qed_tunn_update_type *tun_type,
- u8 *p_update_port, __le16 *p_port,
+ u8 *p_update_port,
+ __le16 *p_port,
struct qed_tunn_update_udp_port *p_udp_port)
{
- __qed_set_ramrod_tunnel_param(p_tunn_cls, p_enable_tx_clas, tun_type);
+ __qed_set_ramrod_tunnel_param(p_tunn_cls, tun_type);
if (p_udp_port->b_update_port) {
*p_update_port = 1;
*p_port = cpu_to_le16(p_udp_port->port);
@@ -219,33 +217,27 @@
qed_set_tunn_ports(p_tun, p_src);
qed_set_ramrod_tunnel_param(&p_tunn_cfg->tunnel_clss_vxlan,
- &p_tunn_cfg->tx_enable_vxlan,
&p_tun->vxlan,
&p_tunn_cfg->set_vxlan_udp_port_flg,
&p_tunn_cfg->vxlan_udp_port,
&p_tun->vxlan_port);
qed_set_ramrod_tunnel_param(&p_tunn_cfg->tunnel_clss_l2geneve,
- &p_tunn_cfg->tx_enable_l2geneve,
&p_tun->l2_geneve,
&p_tunn_cfg->set_geneve_udp_port_flg,
&p_tunn_cfg->geneve_udp_port,
&p_tun->geneve_port);
__qed_set_ramrod_tunnel_param(&p_tunn_cfg->tunnel_clss_ipgeneve,
- &p_tunn_cfg->tx_enable_ipgeneve,
&p_tun->ip_geneve);
__qed_set_ramrod_tunnel_param(&p_tunn_cfg->tunnel_clss_l2gre,
- &p_tunn_cfg->tx_enable_l2gre,
&p_tun->l2_gre);
__qed_set_ramrod_tunnel_param(&p_tunn_cfg->tunnel_clss_ipgre,
- &p_tunn_cfg->tx_enable_ipgre,
&p_tun->ip_gre);
p_tunn_cfg->update_rx_pf_clss = p_tun->b_update_rx_cls;
- p_tunn_cfg->update_tx_pf_clss = p_tun->b_update_tx_cls;
}
static void qed_set_hw_tunn_mode(struct qed_hwfn *p_hwfn,
@@ -261,17 +253,18 @@
}
static void qed_set_hw_tunn_mode_port(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
struct qed_tunnel_info *p_tunn)
{
if (p_tunn->vxlan_port.b_update_port)
- qed_set_vxlan_dest_port(p_hwfn, p_hwfn->p_main_ptt,
+ qed_set_vxlan_dest_port(p_hwfn, p_ptt,
p_tunn->vxlan_port.port);
if (p_tunn->geneve_port.b_update_port)
- qed_set_geneve_dest_port(p_hwfn, p_hwfn->p_main_ptt,
+ qed_set_geneve_dest_port(p_hwfn, p_ptt,
p_tunn->geneve_port.port);
- qed_set_hw_tunn_mode(p_hwfn, p_hwfn->p_main_ptt, p_tunn);
+ qed_set_hw_tunn_mode(p_hwfn, p_ptt, p_tunn);
}
static void
@@ -289,33 +282,29 @@
qed_set_tunn_ports(p_tun, p_src);
qed_set_ramrod_tunnel_param(&p_tunn_cfg->tunnel_clss_vxlan,
- &p_tunn_cfg->tx_enable_vxlan,
&p_tun->vxlan,
&p_tunn_cfg->set_vxlan_udp_port_flg,
&p_tunn_cfg->vxlan_udp_port,
&p_tun->vxlan_port);
qed_set_ramrod_tunnel_param(&p_tunn_cfg->tunnel_clss_l2geneve,
- &p_tunn_cfg->tx_enable_l2geneve,
&p_tun->l2_geneve,
&p_tunn_cfg->set_geneve_udp_port_flg,
&p_tunn_cfg->geneve_udp_port,
&p_tun->geneve_port);
__qed_set_ramrod_tunnel_param(&p_tunn_cfg->tunnel_clss_ipgeneve,
- &p_tunn_cfg->tx_enable_ipgeneve,
&p_tun->ip_geneve);
__qed_set_ramrod_tunnel_param(&p_tunn_cfg->tunnel_clss_l2gre,
- &p_tunn_cfg->tx_enable_l2gre,
&p_tun->l2_gre);
__qed_set_ramrod_tunnel_param(&p_tunn_cfg->tunnel_clss_ipgre,
- &p_tunn_cfg->tx_enable_ipgre,
&p_tun->ip_gre);
}
int qed_sp_pf_start(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
struct qed_tunnel_info *p_tunn,
enum qed_mf_mode mode, bool allow_npar_tx_switch)
{
@@ -412,7 +401,8 @@
rc = qed_spq_post(p_hwfn, p_ent, NULL);
if (p_tunn)
- qed_set_hw_tunn_mode_port(p_hwfn, &p_hwfn->cdev->tunnel);
+ qed_set_hw_tunn_mode_port(p_hwfn, p_ptt,
+ &p_hwfn->cdev->tunnel);
return rc;
}
@@ -443,6 +433,7 @@
/* Set pf update ramrod command params */
int qed_sp_pf_update_tunn_cfg(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
struct qed_tunnel_info *p_tunn,
enum spq_mode comp_mode,
struct qed_spq_comp_cb *p_comp_data)
@@ -477,7 +468,7 @@
if (rc)
return rc;
- qed_set_hw_tunn_mode_port(p_hwfn, &p_hwfn->cdev->tunnel);
+ qed_set_hw_tunn_mode_port(p_hwfn, p_ptt, &p_hwfn->cdev->tunnel);
return rc;
}
@@ -523,3 +514,27 @@
return qed_spq_post(p_hwfn, p_ent, NULL);
}
+
+int qed_sp_pf_update_stag(struct qed_hwfn *p_hwfn)
+{
+ struct qed_spq_entry *p_ent = NULL;
+ struct qed_sp_init_data init_data;
+ int rc = -EINVAL;
+
+ /* Get SPQ entry */
+ memset(&init_data, 0, sizeof(init_data));
+ init_data.cid = qed_spq_get_cid(p_hwfn);
+ init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
+ init_data.comp_mode = QED_SPQ_MODE_CB;
+
+ rc = qed_sp_init_request(p_hwfn, &p_ent,
+ COMMON_RAMROD_PF_UPDATE, PROTOCOLID_COMMON,
+ &init_data);
+ if (rc)
+ return rc;
+
+ p_ent->ramrod.pf_update.update_mf_vlan_flag = true;
+ p_ent->ramrod.pf_update.mf_vlan = cpu_to_le16(p_hwfn->hw_info.ovlan);
+
+ return qed_spq_post(p_hwfn, p_ent, NULL);
+}
diff --git a/drivers/net/ethernet/qlogic/qed/qed_spq.c b/drivers/net/ethernet/qlogic/qed/qed_spq.c
index f6423a1..be48d9a 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_spq.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_spq.c
@@ -54,7 +54,7 @@
#include "qed_reg_addr.h"
#include "qed_sp.h"
#include "qed_sriov.h"
-#include "qed_roce.h"
+#include "qed_rdma.h"
/***************************************************************************
* Structures & Definitions
@@ -302,32 +302,16 @@
qed_async_event_completion(struct qed_hwfn *p_hwfn,
struct event_ring_entry *p_eqe)
{
- switch (p_eqe->protocol_id) {
-#if IS_ENABLED(CONFIG_QED_RDMA)
- case PROTOCOLID_ROCE:
- qed_roce_async_event(p_hwfn, p_eqe->opcode,
- &p_eqe->data.rdma_data);
- return 0;
-#endif
- case PROTOCOLID_COMMON:
- return qed_sriov_eqe_event(p_hwfn,
- p_eqe->opcode,
- p_eqe->echo, &p_eqe->data);
- case PROTOCOLID_ISCSI:
- if (!IS_ENABLED(CONFIG_QED_ISCSI))
- return -EINVAL;
+ qed_spq_async_comp_cb cb;
- if (p_hwfn->p_iscsi_info->event_cb) {
- struct qed_iscsi_info *p_iscsi = p_hwfn->p_iscsi_info;
+ if (!p_hwfn->p_spq || (p_eqe->protocol_id >= MAX_PROTOCOL_TYPE))
+ return -EINVAL;
- return p_iscsi->event_cb(p_iscsi->event_context,
- p_eqe->opcode, &p_eqe->data);
- } else {
- DP_NOTICE(p_hwfn,
- "iSCSI async completion is not set\n");
- return -EINVAL;
- }
- default:
+ cb = p_hwfn->p_spq->async_comp_cb[p_eqe->protocol_id];
+ if (cb) {
+ return cb(p_hwfn, p_eqe->opcode, p_eqe->echo,
+ &p_eqe->data, p_eqe->fw_return_code);
+ } else {
DP_NOTICE(p_hwfn,
"Unknown Async completion for protocol: %d\n",
p_eqe->protocol_id);
@@ -335,6 +319,28 @@
}
}
+int
+qed_spq_register_async_cb(struct qed_hwfn *p_hwfn,
+ enum protocol_type protocol_id,
+ qed_spq_async_comp_cb cb)
+{
+ if (!p_hwfn->p_spq || (protocol_id >= MAX_PROTOCOL_TYPE))
+ return -EINVAL;
+
+ p_hwfn->p_spq->async_comp_cb[protocol_id] = cb;
+ return 0;
+}
+
+void
+qed_spq_unregister_async_cb(struct qed_hwfn *p_hwfn,
+ enum protocol_type protocol_id)
+{
+ if (!p_hwfn->p_spq || (protocol_id >= MAX_PROTOCOL_TYPE))
+ return;
+
+ p_hwfn->p_spq->async_comp_cb[protocol_id] = NULL;
+}
+
/***************************************************************************
* EQ API
***************************************************************************/
@@ -403,14 +409,14 @@
return rc;
}
-struct qed_eq *qed_eq_alloc(struct qed_hwfn *p_hwfn, u16 num_elem)
+int qed_eq_alloc(struct qed_hwfn *p_hwfn, u16 num_elem)
{
struct qed_eq *p_eq;
/* Allocate EQ struct */
p_eq = kzalloc(sizeof(*p_eq), GFP_KERNEL);
if (!p_eq)
- return NULL;
+ return -ENOMEM;
/* Allocate and initialize EQ chain*/
if (qed_chain_alloc(p_hwfn->cdev,
@@ -419,31 +425,35 @@
QED_CHAIN_CNT_TYPE_U16,
num_elem,
sizeof(union event_ring_element),
- &p_eq->chain))
+ &p_eq->chain, NULL))
goto eq_allocate_fail;
/* register EQ completion on the SP SB */
qed_int_register_cb(p_hwfn, qed_eq_completion,
p_eq, &p_eq->eq_sb_index, &p_eq->p_fw_cons);
- return p_eq;
+ p_hwfn->p_eq = p_eq;
+ return 0;
eq_allocate_fail:
- qed_eq_free(p_hwfn, p_eq);
- return NULL;
-}
-
-void qed_eq_setup(struct qed_hwfn *p_hwfn, struct qed_eq *p_eq)
-{
- qed_chain_reset(&p_eq->chain);
-}
-
-void qed_eq_free(struct qed_hwfn *p_hwfn, struct qed_eq *p_eq)
-{
- if (!p_eq)
- return;
- qed_chain_free(p_hwfn->cdev, &p_eq->chain);
kfree(p_eq);
+ return -ENOMEM;
+}
+
+void qed_eq_setup(struct qed_hwfn *p_hwfn)
+{
+ qed_chain_reset(&p_hwfn->p_eq->chain);
+}
+
+void qed_eq_free(struct qed_hwfn *p_hwfn)
+{
+ if (!p_hwfn->p_eq)
+ return;
+
+ qed_chain_free(p_hwfn->cdev, &p_hwfn->p_eq->chain);
+
+ kfree(p_hwfn->p_eq);
+ p_hwfn->p_eq = NULL;
}
/***************************************************************************
@@ -543,7 +553,7 @@
QED_CHAIN_CNT_TYPE_U16,
0, /* N/A when the mode is SINGLE */
sizeof(struct slow_path_element),
- &p_spq->chain))
+ &p_spq->chain, NULL))
goto spq_allocate_fail;
/* allocate and fill the SPQ elements (incl. ramrod data list) */
@@ -583,8 +593,8 @@
}
qed_chain_free(p_hwfn->cdev, &p_spq->chain);
- ;
kfree(p_spq);
+ p_hwfn->p_spq = NULL;
}
int qed_spq_get_entry(struct qed_hwfn *p_hwfn, struct qed_spq_entry **pp_ent)
@@ -934,14 +944,14 @@
return rc;
}
-struct qed_consq *qed_consq_alloc(struct qed_hwfn *p_hwfn)
+int qed_consq_alloc(struct qed_hwfn *p_hwfn)
{
struct qed_consq *p_consq;
/* Allocate ConsQ struct */
p_consq = kzalloc(sizeof(*p_consq), GFP_KERNEL);
if (!p_consq)
- return NULL;
+ return -ENOMEM;
/* Allocate and initialize EQ chain*/
if (qed_chain_alloc(p_hwfn->cdev,
@@ -949,25 +959,29 @@
QED_CHAIN_MODE_PBL,
QED_CHAIN_CNT_TYPE_U16,
QED_CHAIN_PAGE_SIZE / 0x80,
- 0x80, &p_consq->chain))
+ 0x80, &p_consq->chain, NULL))
goto consq_allocate_fail;
- return p_consq;
+ p_hwfn->p_consq = p_consq;
+ return 0;
consq_allocate_fail:
- qed_consq_free(p_hwfn, p_consq);
- return NULL;
-}
-
-void qed_consq_setup(struct qed_hwfn *p_hwfn, struct qed_consq *p_consq)
-{
- qed_chain_reset(&p_consq->chain);
-}
-
-void qed_consq_free(struct qed_hwfn *p_hwfn, struct qed_consq *p_consq)
-{
- if (!p_consq)
- return;
- qed_chain_free(p_hwfn->cdev, &p_consq->chain);
kfree(p_consq);
+ return -ENOMEM;
+}
+
+void qed_consq_setup(struct qed_hwfn *p_hwfn)
+{
+ qed_chain_reset(&p_hwfn->p_consq->chain);
+}
+
+void qed_consq_free(struct qed_hwfn *p_hwfn)
+{
+ if (!p_hwfn->p_consq)
+ return;
+
+ qed_chain_free(p_hwfn->cdev, &p_hwfn->p_consq->chain);
+
+ kfree(p_hwfn->p_consq);
+ p_hwfn->p_consq = NULL;
}
diff --git a/drivers/net/ethernet/qlogic/qed/qed_sriov.c b/drivers/net/ethernet/qlogic/qed/qed_sriov.c
index f5ed54d..2cfd3bd 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_sriov.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_sriov.c
@@ -44,6 +44,26 @@
#include "qed_sp.h"
#include "qed_sriov.h"
#include "qed_vf.h"
+static int qed_sriov_eqe_event(struct qed_hwfn *p_hwfn,
+ u8 opcode,
+ __le16 echo,
+ union event_ring_data *data, u8 fw_return_code);
+
+
+static u8 qed_vf_calculate_legacy(struct qed_vf_info *p_vf)
+{
+ u8 legacy = 0;
+
+ if (p_vf->acquire.vfdev_info.eth_fp_hsi_minor ==
+ ETH_HSI_VER_NO_PKT_LEN_TUNN)
+ legacy |= QED_QCID_LEGACY_VF_RX_PROD;
+
+ if (!(p_vf->acquire.vfdev_info.capabilities &
+ VFPF_ACQUIRE_CAP_QUEUE_QIDS))
+ legacy |= QED_QCID_LEGACY_VF_CID;
+
+ return legacy;
+}
/* IOV ramrods */
static int qed_sp_vf_start(struct qed_hwfn *p_hwfn, struct qed_vf_info *p_vf)
@@ -178,6 +198,19 @@
return vf;
}
+static struct qed_queue_cid *
+qed_iov_get_vf_rx_queue_cid(struct qed_vf_queue *p_queue)
+{
+ int i;
+
+ for (i = 0; i < MAX_QUEUES_PER_QZONE; i++) {
+ if (p_queue->cids[i].p_cid && !p_queue->cids[i].b_is_tx)
+ return p_queue->cids[i].p_cid;
+ }
+
+ return NULL;
+}
+
enum qed_iov_validate_q_mode {
QED_IOV_VALIDATE_Q_NA,
QED_IOV_VALIDATE_Q_ENABLE,
@@ -190,12 +223,24 @@
enum qed_iov_validate_q_mode mode,
bool b_is_tx)
{
+ int i;
+
if (mode == QED_IOV_VALIDATE_Q_NA)
return true;
- if ((b_is_tx && p_vf->vf_queues[qid].p_tx_cid) ||
- (!b_is_tx && p_vf->vf_queues[qid].p_rx_cid))
+ for (i = 0; i < MAX_QUEUES_PER_QZONE; i++) {
+ struct qed_vf_queue_cid *p_qcid;
+
+ p_qcid = &p_vf->vf_queues[qid].cids[i];
+
+ if (!p_qcid->p_cid)
+ continue;
+
+ if (p_qcid->b_is_tx != b_is_tx)
+ continue;
+
return mode == QED_IOV_VALIDATE_Q_ENABLE;
+ }
/* In case we haven't found any valid cid, then its disabled */
return mode == QED_IOV_VALIDATE_Q_DISABLE;
@@ -378,33 +423,6 @@
return 0;
}
-static void qed_iov_clear_vf_igu_blocks(struct qed_hwfn *p_hwfn,
- struct qed_ptt *p_ptt)
-{
- struct qed_igu_block *p_sb;
- u16 sb_id;
- u32 val;
-
- if (!p_hwfn->hw_info.p_igu_info) {
- DP_ERR(p_hwfn,
- "qed_iov_clear_vf_igu_blocks IGU Info not initialized\n");
- return;
- }
-
- for (sb_id = 0; sb_id < QED_MAPPING_MEMORY_SIZE(p_hwfn->cdev);
- sb_id++) {
- p_sb = &p_hwfn->hw_info.p_igu_info->igu_map.igu_blocks[sb_id];
- if ((p_sb->status & QED_IGU_STATUS_FREE) &&
- !(p_sb->status & QED_IGU_STATUS_PF)) {
- val = qed_rd(p_hwfn, p_ptt,
- IGU_REG_MAPPING_MEMORY + sb_id * 4);
- SET_FIELD(val, IGU_MAPPING_LINE_VALID, 0);
- qed_wr(p_hwfn, p_ptt,
- IGU_REG_MAPPING_MEMORY + 4 * sb_id, val);
- }
- }
-}
-
static void qed_iov_setup_vfdb(struct qed_hwfn *p_hwfn)
{
struct qed_hw_sriov_info *p_iov = p_hwfn->cdev->p_iov_info;
@@ -552,20 +570,24 @@
p_hwfn->pf_iov_info = p_sriov;
+ qed_spq_register_async_cb(p_hwfn, PROTOCOLID_COMMON,
+ qed_sriov_eqe_event);
+
return qed_iov_allocate_vfdb(p_hwfn);
}
-void qed_iov_setup(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
+void qed_iov_setup(struct qed_hwfn *p_hwfn)
{
if (!IS_PF_SRIOV(p_hwfn) || !IS_PF_SRIOV_ALLOC(p_hwfn))
return;
qed_iov_setup_vfdb(p_hwfn);
- qed_iov_clear_vf_igu_blocks(p_hwfn, p_ptt);
}
void qed_iov_free(struct qed_hwfn *p_hwfn)
{
+ qed_spq_unregister_async_cb(p_hwfn, PROTOCOLID_COMMON);
+
if (IS_PF_SRIOV_ALLOC(p_hwfn)) {
qed_iov_free_vfdb(p_hwfn);
kfree(p_hwfn->pf_iov_info);
@@ -747,6 +769,35 @@
qed_fid_pretend(p_hwfn, p_ptt, (u16) p_hwfn->hw_info.concrete_fid);
}
+static int
+qed_iov_enable_vf_access_msix(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt, u8 abs_vf_id, u8 num_sbs)
+{
+ u8 current_max = 0;
+ int i;
+
+ /* For AH onward, configuration is per-PF. Find maximum of all
+ * the currently enabled child VFs, and set the number to be that.
+ */
+ if (!QED_IS_BB(p_hwfn->cdev)) {
+ qed_for_each_vf(p_hwfn, i) {
+ struct qed_vf_info *p_vf;
+
+ p_vf = qed_iov_get_vf_info(p_hwfn, (u16)i, true);
+ if (!p_vf)
+ continue;
+
+ current_max = max_t(u8, current_max, p_vf->num_sbs);
+ }
+ }
+
+ if (num_sbs > current_max)
+ return qed_mcp_config_vf_msix(p_hwfn, p_ptt,
+ abs_vf_id, num_sbs);
+
+ return 0;
+}
+
static int qed_iov_enable_vf_access(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
struct qed_vf_info *vf)
@@ -771,7 +822,8 @@
qed_iov_vf_igu_reset(p_hwfn, p_ptt, vf);
- rc = qed_mcp_config_vf_msix(p_hwfn, p_ptt, vf->abs_vf_id, vf->num_sbs);
+ rc = qed_iov_enable_vf_access_msix(p_hwfn, p_ptt,
+ vf->abs_vf_id, vf->num_sbs);
if (rc)
return rc;
@@ -838,45 +890,36 @@
struct qed_ptt *p_ptt,
struct qed_vf_info *vf, u16 num_rx_queues)
{
- struct qed_igu_block *igu_blocks;
- int qid = 0, igu_id = 0;
+ struct qed_igu_block *p_block;
+ struct cau_sb_entry sb_entry;
+ int qid = 0;
u32 val = 0;
- igu_blocks = p_hwfn->hw_info.p_igu_info->igu_map.igu_blocks;
-
- if (num_rx_queues > p_hwfn->hw_info.p_igu_info->free_blks)
- num_rx_queues = p_hwfn->hw_info.p_igu_info->free_blks;
- p_hwfn->hw_info.p_igu_info->free_blks -= num_rx_queues;
+ if (num_rx_queues > p_hwfn->hw_info.p_igu_info->usage.free_cnt_iov)
+ num_rx_queues = p_hwfn->hw_info.p_igu_info->usage.free_cnt_iov;
+ p_hwfn->hw_info.p_igu_info->usage.free_cnt_iov -= num_rx_queues;
SET_FIELD(val, IGU_MAPPING_LINE_FUNCTION_NUMBER, vf->abs_vf_id);
SET_FIELD(val, IGU_MAPPING_LINE_VALID, 1);
SET_FIELD(val, IGU_MAPPING_LINE_PF_VALID, 0);
- while ((qid < num_rx_queues) &&
- (igu_id < QED_MAPPING_MEMORY_SIZE(p_hwfn->cdev))) {
- if (igu_blocks[igu_id].status & QED_IGU_STATUS_FREE) {
- struct cau_sb_entry sb_entry;
+ for (qid = 0; qid < num_rx_queues; qid++) {
+ p_block = qed_get_igu_free_sb(p_hwfn, false);
+ vf->igu_sbs[qid] = p_block->igu_sb_id;
+ p_block->status &= ~QED_IGU_STATUS_FREE;
+ SET_FIELD(val, IGU_MAPPING_LINE_VECTOR_NUMBER, qid);
- vf->igu_sbs[qid] = (u16)igu_id;
- igu_blocks[igu_id].status &= ~QED_IGU_STATUS_FREE;
+ qed_wr(p_hwfn, p_ptt,
+ IGU_REG_MAPPING_MEMORY +
+ sizeof(u32) * p_block->igu_sb_id, val);
- SET_FIELD(val, IGU_MAPPING_LINE_VECTOR_NUMBER, qid);
-
- qed_wr(p_hwfn, p_ptt,
- IGU_REG_MAPPING_MEMORY + sizeof(u32) * igu_id,
- val);
-
- /* Configure igu sb in CAU which were marked valid */
- qed_init_cau_sb_entry(p_hwfn, &sb_entry,
- p_hwfn->rel_pf_id,
- vf->abs_vf_id, 1);
- qed_dmae_host2grc(p_hwfn, p_ptt,
- (u64)(uintptr_t)&sb_entry,
- CAU_REG_SB_VAR_MEMORY +
- igu_id * sizeof(u64), 2, 0);
- qid++;
- }
- igu_id++;
+ /* Configure igu sb in CAU which were marked valid */
+ qed_init_cau_sb_entry(p_hwfn, &sb_entry,
+ p_hwfn->rel_pf_id, vf->abs_vf_id, 1);
+ qed_dmae_host2grc(p_hwfn, p_ptt,
+ (u64)(uintptr_t)&sb_entry,
+ CAU_REG_SB_VAR_MEMORY +
+ p_block->igu_sb_id * sizeof(u64), 2, 0);
}
vf->num_sbs = (u8) num_rx_queues;
@@ -901,10 +944,8 @@
SET_FIELD(val, IGU_MAPPING_LINE_VALID, 0);
qed_wr(p_hwfn, p_ptt, addr, val);
- p_info->igu_map.igu_blocks[igu_id].status |=
- QED_IGU_STATUS_FREE;
-
- p_hwfn->hw_info.p_igu_info->free_blks++;
+ p_info->entry[igu_id].status |= QED_IGU_STATUS_FREE;
+ p_hwfn->hw_info.p_igu_info->usage.free_cnt_iov++;
}
vf->num_sbs = 0;
@@ -1028,20 +1069,15 @@
vf->num_txqs = num_of_vf_avaiable_chains;
for (i = 0; i < vf->num_rxqs; i++) {
- struct qed_vf_q_info *p_queue = &vf->vf_queues[i];
+ struct qed_vf_queue *p_queue = &vf->vf_queues[i];
p_queue->fw_rx_qid = p_params->req_rx_queue[i];
p_queue->fw_tx_qid = p_params->req_tx_queue[i];
- /* CIDs are per-VF, so no problem having them 0-based. */
- p_queue->fw_cid = i;
-
DP_VERBOSE(p_hwfn, QED_MSG_IOV,
- "VF[%d] - Q[%d] SB %04x, qid [Rx %04x Tx %04x] CID %04x\n",
- vf->relative_vf_id,
- i, vf->igu_sbs[i],
- p_queue->fw_rx_qid,
- p_queue->fw_tx_qid, p_queue->fw_cid);
+ "VF[%d] - Q[%d] SB %04x, qid [Rx %04x Tx %04x]\n",
+ vf->relative_vf_id, i, vf->igu_sbs[i],
+ p_queue->fw_rx_qid, p_queue->fw_tx_qid);
}
/* Update the link configuration in bulletin */
@@ -1328,7 +1364,7 @@
static void qed_iov_vf_cleanup(struct qed_hwfn *p_hwfn,
struct qed_vf_info *p_vf)
{
- u32 i;
+ u32 i, j;
p_vf->vf_bulletin = 0;
p_vf->vport_instance = 0;
@@ -1341,16 +1377,15 @@
p_vf->num_active_rxqs = 0;
for (i = 0; i < QED_MAX_VF_CHAINS_PER_PF; i++) {
- struct qed_vf_q_info *p_queue = &p_vf->vf_queues[i];
+ struct qed_vf_queue *p_queue = &p_vf->vf_queues[i];
- if (p_queue->p_rx_cid) {
- qed_eth_queue_cid_release(p_hwfn, p_queue->p_rx_cid);
- p_queue->p_rx_cid = NULL;
- }
+ for (j = 0; j < MAX_QUEUES_PER_QZONE; j++) {
+ if (!p_queue->cids[j].p_cid)
+ continue;
- if (p_queue->p_tx_cid) {
- qed_eth_queue_cid_release(p_hwfn, p_queue->p_tx_cid);
- p_queue->p_tx_cid = NULL;
+ qed_eth_queue_cid_release(p_hwfn,
+ p_queue->cids[j].p_cid);
+ p_queue->cids[j].p_cid = NULL;
}
}
@@ -1359,13 +1394,67 @@
qed_iov_clean_vf(p_hwfn, p_vf->relative_vf_id);
}
+/* Returns either 0, or log(size) */
+static u32 qed_iov_vf_db_bar_size(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt)
+{
+ u32 val = qed_rd(p_hwfn, p_ptt, PGLUE_B_REG_VF_BAR1_SIZE);
+
+ if (val)
+ return val + 11;
+ return 0;
+}
+
+static void
+qed_iov_vf_mbx_acquire_resc_cids(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ struct qed_vf_info *p_vf,
+ struct vf_pf_resc_request *p_req,
+ struct pf_vf_resc *p_resp)
+{
+ u8 num_vf_cons = p_hwfn->pf_params.eth_pf_params.num_vf_cons;
+ u8 db_size = qed_db_addr_vf(1, DQ_DEMS_LEGACY) -
+ qed_db_addr_vf(0, DQ_DEMS_LEGACY);
+ u32 bar_size;
+
+ p_resp->num_cids = min_t(u8, p_req->num_cids, num_vf_cons);
+
+ /* If VF didn't bother asking for QIDs than don't bother limiting
+ * number of CIDs. The VF doesn't care about the number, and this
+ * has the likely result of causing an additional acquisition.
+ */
+ if (!(p_vf->acquire.vfdev_info.capabilities &
+ VFPF_ACQUIRE_CAP_QUEUE_QIDS))
+ return;
+
+ /* If doorbell bar was mapped by VF, limit the VF CIDs to an amount
+ * that would make sure doorbells for all CIDs fall within the bar.
+ * If it doesn't, make sure regview window is sufficient.
+ */
+ if (p_vf->acquire.vfdev_info.capabilities &
+ VFPF_ACQUIRE_CAP_PHYSICAL_BAR) {
+ bar_size = qed_iov_vf_db_bar_size(p_hwfn, p_ptt);
+ if (bar_size)
+ bar_size = 1 << bar_size;
+
+ if (p_hwfn->cdev->num_hwfns > 1)
+ bar_size /= 2;
+ } else {
+ bar_size = PXP_VF_BAR0_DQ_LENGTH;
+ }
+
+ if (bar_size / db_size < 256)
+ p_resp->num_cids = min_t(u8, p_resp->num_cids,
+ (u8)(bar_size / db_size));
+}
+
static u8 qed_iov_vf_mbx_acquire_resc(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
struct qed_vf_info *p_vf,
struct vf_pf_resc_request *p_req,
struct pf_vf_resc *p_resp)
{
- int i;
+ u8 i;
/* Queue related information */
p_resp->num_rxqs = p_vf->num_rxqs;
@@ -1383,7 +1472,7 @@
for (i = 0; i < p_resp->num_rxqs; i++) {
qed_fw_l2_queue(p_hwfn, p_vf->vf_queues[i].fw_rx_qid,
(u16 *)&p_resp->hw_qid[i]);
- p_resp->cid[i] = p_vf->vf_queues[i].fw_cid;
+ p_resp->cid[i] = i;
}
/* Filter related information */
@@ -1392,6 +1481,8 @@
p_resp->num_vlan_filters = min_t(u8, p_vf->num_vlan_filters,
p_req->num_vlan_filters);
+ qed_iov_vf_mbx_acquire_resc_cids(p_hwfn, p_ptt, p_vf, p_req, p_resp);
+
/* This isn't really needed/enforced, but some legacy VFs might depend
* on the correct filling of this field.
*/
@@ -1403,10 +1494,11 @@
p_resp->num_sbs < p_req->num_sbs ||
p_resp->num_mac_filters < p_req->num_mac_filters ||
p_resp->num_vlan_filters < p_req->num_vlan_filters ||
- p_resp->num_mc_filters < p_req->num_mc_filters) {
+ p_resp->num_mc_filters < p_req->num_mc_filters ||
+ p_resp->num_cids < p_req->num_cids) {
DP_VERBOSE(p_hwfn,
QED_MSG_IOV,
- "VF[%d] - Insufficient resources: rxq [%02x/%02x] txq [%02x/%02x] sbs [%02x/%02x] mac [%02x/%02x] vlan [%02x/%02x] mc [%02x/%02x]\n",
+ "VF[%d] - Insufficient resources: rxq [%02x/%02x] txq [%02x/%02x] sbs [%02x/%02x] mac [%02x/%02x] vlan [%02x/%02x] mc [%02x/%02x] cids [%02x/%02x]\n",
p_vf->abs_vf_id,
p_req->num_rxqs,
p_resp->num_rxqs,
@@ -1418,7 +1510,9 @@
p_resp->num_mac_filters,
p_req->num_vlan_filters,
p_resp->num_vlan_filters,
- p_req->num_mc_filters, p_resp->num_mc_filters);
+ p_req->num_mc_filters,
+ p_resp->num_mc_filters,
+ p_req->num_cids, p_resp->num_cids);
/* Some legacy OSes are incapable of correctly handling this
* failure.
@@ -1534,6 +1628,15 @@
if (p_hwfn->cdev->num_hwfns > 1)
pfdev_info->capabilities |= PFVF_ACQUIRE_CAP_100G;
+ /* Share our ability to use multiple queue-ids only with VFs
+ * that request it.
+ */
+ if (req->vfdev_info.capabilities & VFPF_ACQUIRE_CAP_QUEUE_QIDS)
+ pfdev_info->capabilities |= PFVF_ACQUIRE_CAP_QUEUE_QIDS;
+
+ /* Share the sizes of the bars with VF */
+ resp->pfdev_info.bar_size = qed_iov_vf_db_bar_size(p_hwfn, p_ptt);
+
qed_iov_vf_mbx_acquire_stats(p_hwfn, &pfdev_info->stats_info);
memcpy(pfdev_info->port_mac, p_hwfn->hw_info.hw_mac_addr, ETH_ALEN);
@@ -1758,9 +1861,11 @@
/* Update all the Rx queues */
for (i = 0; i < QED_MAX_VF_CHAINS_PER_PF; i++) {
- struct qed_queue_cid *p_cid;
+ struct qed_vf_queue *p_queue = &p_vf->vf_queues[i];
+ struct qed_queue_cid *p_cid = NULL;
- p_cid = p_vf->vf_queues[i].p_rx_cid;
+ /* There can be at most 1 Rx queue on qzone. Find it */
+ p_cid = qed_iov_get_vf_rx_queue_cid(p_queue);
if (!p_cid)
continue;
@@ -1951,16 +2056,55 @@
qed_iov_send_response(p_hwfn, p_ptt, vf, length, status);
}
+static u8 qed_iov_vf_mbx_qid(struct qed_hwfn *p_hwfn,
+ struct qed_vf_info *p_vf, bool b_is_tx)
+{
+ struct qed_iov_vf_mbx *p_mbx = &p_vf->vf_mbx;
+ struct vfpf_qid_tlv *p_qid_tlv;
+
+ /* Search for the qid if the VF published its going to provide it */
+ if (!(p_vf->acquire.vfdev_info.capabilities &
+ VFPF_ACQUIRE_CAP_QUEUE_QIDS)) {
+ if (b_is_tx)
+ return QED_IOV_LEGACY_QID_TX;
+ else
+ return QED_IOV_LEGACY_QID_RX;
+ }
+
+ p_qid_tlv = (struct vfpf_qid_tlv *)
+ qed_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt,
+ CHANNEL_TLV_QID);
+ if (!p_qid_tlv) {
+ DP_VERBOSE(p_hwfn, QED_MSG_IOV,
+ "VF[%2x]: Failed to provide qid\n",
+ p_vf->relative_vf_id);
+
+ return QED_IOV_QID_INVALID;
+ }
+
+ if (p_qid_tlv->qid >= MAX_QUEUES_PER_QZONE) {
+ DP_VERBOSE(p_hwfn, QED_MSG_IOV,
+ "VF[%02x]: Provided qid out-of-bounds %02x\n",
+ p_vf->relative_vf_id, p_qid_tlv->qid);
+ return QED_IOV_QID_INVALID;
+ }
+
+ return p_qid_tlv->qid;
+}
+
static void qed_iov_vf_mbx_start_rxq(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
struct qed_vf_info *vf)
{
struct qed_queue_start_common_params params;
+ struct qed_queue_cid_vf_params vf_params;
struct qed_iov_vf_mbx *mbx = &vf->vf_mbx;
u8 status = PFVF_STATUS_NO_RESOURCE;
- struct qed_vf_q_info *p_queue;
+ u8 qid_usage_idx, vf_legacy = 0;
struct vfpf_start_rxq_tlv *req;
- bool b_legacy_vf = false;
+ struct qed_vf_queue *p_queue;
+ struct qed_queue_cid *p_cid;
+ struct qed_sb_info sb_dummy;
int rc;
req = &mbx->req_virt->start_rxq;
@@ -1970,53 +2114,64 @@
!qed_iov_validate_sb(p_hwfn, vf, req->hw_sb))
goto out;
- /* Acquire a new queue-cid */
- p_queue = &vf->vf_queues[req->rx_qid];
+ qid_usage_idx = qed_iov_vf_mbx_qid(p_hwfn, vf, false);
+ if (qid_usage_idx == QED_IOV_QID_INVALID)
+ goto out;
+ p_queue = &vf->vf_queues[req->rx_qid];
+ if (p_queue->cids[qid_usage_idx].p_cid)
+ goto out;
+
+ vf_legacy = qed_vf_calculate_legacy(vf);
+
+ /* Acquire a new queue-cid */
memset(¶ms, 0, sizeof(params));
params.queue_id = p_queue->fw_rx_qid;
params.vport_id = vf->vport_id;
params.stats_id = vf->abs_vf_id + 0x10;
- params.sb = req->hw_sb;
+ /* Since IGU index is passed via sb_info, construct a dummy one */
+ memset(&sb_dummy, 0, sizeof(sb_dummy));
+ sb_dummy.igu_sb_id = req->hw_sb;
+ params.p_sb = &sb_dummy;
params.sb_idx = req->sb_index;
- p_queue->p_rx_cid = _qed_eth_queue_to_cid(p_hwfn,
- vf->opaque_fid,
- p_queue->fw_cid,
- req->rx_qid, ¶ms);
- if (!p_queue->p_rx_cid)
+ memset(&vf_params, 0, sizeof(vf_params));
+ vf_params.vfid = vf->relative_vf_id;
+ vf_params.vf_qid = (u8)req->rx_qid;
+ vf_params.vf_legacy = vf_legacy;
+ vf_params.qid_usage_idx = qid_usage_idx;
+ p_cid = qed_eth_queue_to_cid(p_hwfn, vf->opaque_fid,
+ ¶ms, true, &vf_params);
+ if (!p_cid)
goto out;
/* Legacy VFs have their Producers in a different location, which they
* calculate on their own and clean the producer prior to this.
*/
- if (vf->acquire.vfdev_info.eth_fp_hsi_minor ==
- ETH_HSI_VER_NO_PKT_LEN_TUNN) {
- b_legacy_vf = true;
- } else {
+ if (!(vf_legacy & QED_QCID_LEGACY_VF_RX_PROD))
REG_WR(p_hwfn,
GTT_BAR0_MAP_REG_MSDM_RAM +
MSTORM_ETH_VF_PRODS_OFFSET(vf->abs_vf_id, req->rx_qid),
0);
- }
- p_queue->p_rx_cid->b_legacy_vf = b_legacy_vf;
- rc = qed_eth_rxq_start_ramrod(p_hwfn,
- p_queue->p_rx_cid,
+ rc = qed_eth_rxq_start_ramrod(p_hwfn, p_cid,
req->bd_max_bytes,
req->rxq_addr,
req->cqe_pbl_addr, req->cqe_pbl_size);
if (rc) {
status = PFVF_STATUS_FAILURE;
- qed_eth_queue_cid_release(p_hwfn, p_queue->p_rx_cid);
- p_queue->p_rx_cid = NULL;
+ qed_eth_queue_cid_release(p_hwfn, p_cid);
} else {
+ p_queue->cids[qid_usage_idx].p_cid = p_cid;
+ p_queue->cids[qid_usage_idx].b_is_tx = false;
status = PFVF_STATUS_SUCCESS;
vf->num_active_rxqs++;
}
out:
- qed_iov_vf_mbx_start_rxq_resp(p_hwfn, p_ptt, vf, status, b_legacy_vf);
+ qed_iov_vf_mbx_start_rxq_resp(p_hwfn, p_ptt, vf, status,
+ !!(vf_legacy &
+ QED_QCID_LEGACY_VF_RX_PROD));
}
static void
@@ -2209,7 +2364,7 @@
if (b_update_required) {
u16 geneve_port;
- rc = qed_sp_pf_update_tunn_cfg(p_hwfn, &tunn,
+ rc = qed_sp_pf_update_tunn_cfg(p_hwfn, p_ptt, &tunn,
QED_SPQ_MODE_EBLOCK, NULL);
if (rc)
status = PFVF_STATUS_FAILURE;
@@ -2235,7 +2390,8 @@
static void qed_iov_vf_mbx_start_txq_resp(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
- struct qed_vf_info *p_vf, u8 status)
+ struct qed_vf_info *p_vf,
+ u32 cid, u8 status)
{
struct qed_iov_vf_mbx *mbx = &p_vf->vf_mbx;
struct pfvf_start_queue_resp_tlv *p_tlv;
@@ -2263,12 +2419,8 @@
sizeof(struct channel_list_end_tlv));
/* Update the TLV with the response */
- if ((status == PFVF_STATUS_SUCCESS) && !b_legacy) {
- u16 qid = mbx->req_virt->start_txq.tx_qid;
-
- p_tlv->offset = qed_db_addr_vf(p_vf->vf_queues[qid].fw_cid,
- DQ_DEMS_LEGACY);
- }
+ if ((status == PFVF_STATUS_SUCCESS) && !b_legacy)
+ p_tlv->offset = qed_db_addr_vf(cid, DQ_DEMS_LEGACY);
qed_iov_send_response(p_hwfn, p_ptt, p_vf, length, status);
}
@@ -2278,10 +2430,15 @@
struct qed_vf_info *vf)
{
struct qed_queue_start_common_params params;
+ struct qed_queue_cid_vf_params vf_params;
struct qed_iov_vf_mbx *mbx = &vf->vf_mbx;
u8 status = PFVF_STATUS_NO_RESOURCE;
struct vfpf_start_txq_tlv *req;
- struct qed_vf_q_info *p_queue;
+ struct qed_vf_queue *p_queue;
+ struct qed_queue_cid *p_cid;
+ struct qed_sb_info sb_dummy;
+ u8 qid_usage_idx, vf_legacy;
+ u32 cid = 0;
int rc;
u16 pq;
@@ -2289,89 +2446,126 @@
req = &mbx->req_virt->start_txq;
if (!qed_iov_validate_txq(p_hwfn, vf, req->tx_qid,
- QED_IOV_VALIDATE_Q_DISABLE) ||
+ QED_IOV_VALIDATE_Q_NA) ||
!qed_iov_validate_sb(p_hwfn, vf, req->hw_sb))
goto out;
- /* Acquire a new queue-cid */
- p_queue = &vf->vf_queues[req->tx_qid];
+ qid_usage_idx = qed_iov_vf_mbx_qid(p_hwfn, vf, true);
+ if (qid_usage_idx == QED_IOV_QID_INVALID)
+ goto out;
+ p_queue = &vf->vf_queues[req->tx_qid];
+ if (p_queue->cids[qid_usage_idx].p_cid)
+ goto out;
+
+ vf_legacy = qed_vf_calculate_legacy(vf);
+
+ /* Acquire a new queue-cid */
params.queue_id = p_queue->fw_tx_qid;
params.vport_id = vf->vport_id;
params.stats_id = vf->abs_vf_id + 0x10;
- params.sb = req->hw_sb;
+
+ /* Since IGU index is passed via sb_info, construct a dummy one */
+ memset(&sb_dummy, 0, sizeof(sb_dummy));
+ sb_dummy.igu_sb_id = req->hw_sb;
+ params.p_sb = &sb_dummy;
params.sb_idx = req->sb_index;
- p_queue->p_tx_cid = _qed_eth_queue_to_cid(p_hwfn,
- vf->opaque_fid,
- p_queue->fw_cid,
- req->tx_qid, ¶ms);
- if (!p_queue->p_tx_cid)
+ memset(&vf_params, 0, sizeof(vf_params));
+ vf_params.vfid = vf->relative_vf_id;
+ vf_params.vf_qid = (u8)req->tx_qid;
+ vf_params.vf_legacy = vf_legacy;
+ vf_params.qid_usage_idx = qid_usage_idx;
+
+ p_cid = qed_eth_queue_to_cid(p_hwfn, vf->opaque_fid,
+ ¶ms, false, &vf_params);
+ if (!p_cid)
goto out;
pq = qed_get_cm_pq_idx_vf(p_hwfn, vf->relative_vf_id);
- rc = qed_eth_txq_start_ramrod(p_hwfn, p_queue->p_tx_cid,
+ rc = qed_eth_txq_start_ramrod(p_hwfn, p_cid,
req->pbl_addr, req->pbl_size, pq);
if (rc) {
status = PFVF_STATUS_FAILURE;
- qed_eth_queue_cid_release(p_hwfn, p_queue->p_tx_cid);
- p_queue->p_tx_cid = NULL;
+ qed_eth_queue_cid_release(p_hwfn, p_cid);
} else {
status = PFVF_STATUS_SUCCESS;
+ p_queue->cids[qid_usage_idx].p_cid = p_cid;
+ p_queue->cids[qid_usage_idx].b_is_tx = true;
+ cid = p_cid->cid;
}
out:
- qed_iov_vf_mbx_start_txq_resp(p_hwfn, p_ptt, vf, status);
+ qed_iov_vf_mbx_start_txq_resp(p_hwfn, p_ptt, vf, cid, status);
}
static int qed_iov_vf_stop_rxqs(struct qed_hwfn *p_hwfn,
struct qed_vf_info *vf,
- u16 rxq_id, bool cqe_completion)
+ u16 rxq_id,
+ u8 qid_usage_idx, bool cqe_completion)
{
- struct qed_vf_q_info *p_queue;
+ struct qed_vf_queue *p_queue;
int rc = 0;
- if (!qed_iov_validate_rxq(p_hwfn, vf, rxq_id,
- QED_IOV_VALIDATE_Q_ENABLE)) {
+ if (!qed_iov_validate_rxq(p_hwfn, vf, rxq_id, QED_IOV_VALIDATE_Q_NA)) {
DP_VERBOSE(p_hwfn,
QED_MSG_IOV,
- "VF[%d] Tried Closing Rx 0x%04x which is inactive\n",
- vf->relative_vf_id, rxq_id);
+ "VF[%d] Tried Closing Rx 0x%04x.%02x which is inactive\n",
+ vf->relative_vf_id, rxq_id, qid_usage_idx);
return -EINVAL;
}
p_queue = &vf->vf_queues[rxq_id];
+ /* We've validated the index and the existence of the active RXQ -
+ * now we need to make sure that it's using the correct qid.
+ */
+ if (!p_queue->cids[qid_usage_idx].p_cid ||
+ p_queue->cids[qid_usage_idx].b_is_tx) {
+ struct qed_queue_cid *p_cid;
+
+ p_cid = qed_iov_get_vf_rx_queue_cid(p_queue);
+ DP_VERBOSE(p_hwfn,
+ QED_MSG_IOV,
+ "VF[%d] - Tried Closing Rx 0x%04x.%02x, but Rx is at %04x.%02x\n",
+ vf->relative_vf_id,
+ rxq_id, qid_usage_idx, rxq_id, p_cid->qid_usage_idx);
+ return -EINVAL;
+ }
+
+ /* Now that we know we have a valid Rx-queue - close it */
rc = qed_eth_rx_queue_stop(p_hwfn,
- p_queue->p_rx_cid,
+ p_queue->cids[qid_usage_idx].p_cid,
false, cqe_completion);
if (rc)
return rc;
- p_queue->p_rx_cid = NULL;
+ p_queue->cids[qid_usage_idx].p_cid = NULL;
vf->num_active_rxqs--;
return 0;
}
static int qed_iov_vf_stop_txqs(struct qed_hwfn *p_hwfn,
- struct qed_vf_info *vf, u16 txq_id)
+ struct qed_vf_info *vf,
+ u16 txq_id, u8 qid_usage_idx)
{
- struct qed_vf_q_info *p_queue;
+ struct qed_vf_queue *p_queue;
int rc = 0;
- if (!qed_iov_validate_txq(p_hwfn, vf, txq_id,
- QED_IOV_VALIDATE_Q_ENABLE))
+ if (!qed_iov_validate_txq(p_hwfn, vf, txq_id, QED_IOV_VALIDATE_Q_NA))
return -EINVAL;
p_queue = &vf->vf_queues[txq_id];
+ if (!p_queue->cids[qid_usage_idx].p_cid ||
+ !p_queue->cids[qid_usage_idx].b_is_tx)
+ return -EINVAL;
- rc = qed_eth_tx_queue_stop(p_hwfn, p_queue->p_tx_cid);
+ rc = qed_eth_tx_queue_stop(p_hwfn, p_queue->cids[qid_usage_idx].p_cid);
if (rc)
return rc;
- p_queue->p_tx_cid = NULL;
-
+ p_queue->cids[qid_usage_idx].p_cid = NULL;
return 0;
}
@@ -2383,6 +2577,7 @@
struct qed_iov_vf_mbx *mbx = &vf->vf_mbx;
u8 status = PFVF_STATUS_FAILURE;
struct vfpf_stop_rxqs_tlv *req;
+ u8 qid_usage_idx;
int rc;
/* There has never been an official driver that used this interface
@@ -2398,8 +2593,13 @@
goto out;
}
+ /* Find which qid-index is associated with the queue */
+ qid_usage_idx = qed_iov_vf_mbx_qid(p_hwfn, vf, false);
+ if (qid_usage_idx == QED_IOV_QID_INVALID)
+ goto out;
+
rc = qed_iov_vf_stop_rxqs(p_hwfn, vf, req->rx_qid,
- req->cqe_completion);
+ qid_usage_idx, req->cqe_completion);
if (!rc)
status = PFVF_STATUS_SUCCESS;
out:
@@ -2415,6 +2615,7 @@
struct qed_iov_vf_mbx *mbx = &vf->vf_mbx;
u8 status = PFVF_STATUS_FAILURE;
struct vfpf_stop_txqs_tlv *req;
+ u8 qid_usage_idx;
int rc;
/* There has never been an official driver that used this interface
@@ -2429,7 +2630,13 @@
status = PFVF_STATUS_NOT_SUPPORTED;
goto out;
}
- rc = qed_iov_vf_stop_txqs(p_hwfn, vf, req->tx_qid);
+
+ /* Find which qid-index is associated with the queue */
+ qid_usage_idx = qed_iov_vf_mbx_qid(p_hwfn, vf, true);
+ if (qid_usage_idx == QED_IOV_QID_INVALID)
+ goto out;
+
+ rc = qed_iov_vf_stop_txqs(p_hwfn, vf, req->tx_qid, qid_usage_idx);
if (!rc)
status = PFVF_STATUS_SUCCESS;
@@ -2449,7 +2656,7 @@
u8 status = PFVF_STATUS_FAILURE;
u8 complete_event_flg;
u8 complete_cqe_flg;
- u16 qid;
+ u8 qid_usage_idx;
int rc;
u8 i;
@@ -2457,19 +2664,42 @@
complete_cqe_flg = !!(req->flags & VFPF_RXQ_UPD_COMPLETE_CQE_FLAG);
complete_event_flg = !!(req->flags & VFPF_RXQ_UPD_COMPLETE_EVENT_FLAG);
- /* Validate inputs */
- for (i = req->rx_qid; i < req->rx_qid + req->num_rxqs; i++)
+ qid_usage_idx = qed_iov_vf_mbx_qid(p_hwfn, vf, false);
+ if (qid_usage_idx == QED_IOV_QID_INVALID)
+ goto out;
+
+ /* There shouldn't exist a VF that uses queue-qids yet uses this
+ * API with multiple Rx queues. Validate this.
+ */
+ if ((vf->acquire.vfdev_info.capabilities &
+ VFPF_ACQUIRE_CAP_QUEUE_QIDS) && req->num_rxqs != 1) {
+ DP_VERBOSE(p_hwfn, QED_MSG_IOV,
+ "VF[%d] supports QIDs but sends multiple queues\n",
+ vf->relative_vf_id);
+ goto out;
+ }
+
+ /* Validate inputs - for the legacy case this is still true since
+ * qid_usage_idx for each Rx queue would be LEGACY_QID_RX.
+ */
+ for (i = req->rx_qid; i < req->rx_qid + req->num_rxqs; i++) {
if (!qed_iov_validate_rxq(p_hwfn, vf, i,
- QED_IOV_VALIDATE_Q_ENABLE)) {
- DP_INFO(p_hwfn, "VF[%d]: Incorrect Rxqs [%04x, %02x]\n",
- vf->relative_vf_id, req->rx_qid, req->num_rxqs);
+ QED_IOV_VALIDATE_Q_NA) ||
+ !vf->vf_queues[i].cids[qid_usage_idx].p_cid ||
+ vf->vf_queues[i].cids[qid_usage_idx].b_is_tx) {
+ DP_VERBOSE(p_hwfn, QED_MSG_IOV,
+ "VF[%d]: Incorrect Rxqs [%04x, %02x]\n",
+ vf->relative_vf_id, req->rx_qid,
+ req->num_rxqs);
goto out;
}
+ }
/* Prepare the handlers */
for (i = 0; i < req->num_rxqs; i++) {
- qid = req->rx_qid + i;
- handlers[i] = vf->vf_queues[qid].p_rx_cid;
+ u16 qid = req->rx_qid + i;
+
+ handlers[i] = vf->vf_queues[qid].cids[qid_usage_idx].p_cid;
}
rc = qed_sp_eth_rx_queues_update(p_hwfn, (void **)&handlers,
@@ -2683,6 +2913,8 @@
(1 << p_rss_tlv->rss_table_size_log));
for (i = 0; i < table_size; i++) {
+ struct qed_queue_cid *p_cid;
+
q_idx = p_rss_tlv->rss_ind_table[i];
if (!qed_iov_validate_rxq(p_hwfn, vf, q_idx,
QED_IOV_VALIDATE_Q_ENABLE)) {
@@ -2694,7 +2926,8 @@
goto out;
}
- p_rss->rss_ind_table[i] = vf->vf_queues[q_idx].p_rx_cid;
+ p_cid = qed_iov_get_vf_rx_queue_cid(&vf->vf_queues[q_idx]);
+ p_rss->rss_ind_table[i] = p_cid;
}
p_data->rss_params = p_rss;
@@ -3610,8 +3843,10 @@
}
}
-int qed_sriov_eqe_event(struct qed_hwfn *p_hwfn,
- u8 opcode, __le16 echo, union event_ring_data *data)
+static int qed_sriov_eqe_event(struct qed_hwfn *p_hwfn,
+ u8 opcode,
+ __le16 echo,
+ union event_ring_data *data, u8 fw_return_code)
{
switch (opcode) {
case COMMON_EVENT_VF_PF_CHANNEL:
diff --git a/drivers/net/ethernet/qlogic/qed/qed_sriov.h b/drivers/net/ethernet/qlogic/qed/qed_sriov.h
index 81a497c..c2e44bc 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_sriov.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_sriov.h
@@ -149,12 +149,21 @@
struct vfpf_first_tlv first_tlv;
};
-struct qed_vf_q_info {
+#define QED_IOV_LEGACY_QID_RX (0)
+#define QED_IOV_LEGACY_QID_TX (1)
+#define QED_IOV_QID_INVALID (0xFE)
+
+struct qed_vf_queue_cid {
+ bool b_is_tx;
+ struct qed_queue_cid *p_cid;
+};
+
+/* Describes a qzone associated with the VF */
+struct qed_vf_queue {
u16 fw_rx_qid;
- struct qed_queue_cid *p_rx_cid;
u16 fw_tx_qid;
- struct qed_queue_cid *p_tx_cid;
- u8 fw_cid;
+
+ struct qed_vf_queue_cid cids[MAX_QUEUES_PER_QZONE];
};
enum vf_state {
@@ -212,7 +221,8 @@
u8 num_mac_filters;
u8 num_vlan_filters;
- struct qed_vf_q_info vf_queues[QED_MAX_VF_CHAINS_PER_PF];
+
+ struct qed_vf_queue vf_queues[QED_MAX_VF_CHAINS_PER_PF];
u16 igu_sbs[QED_MAX_VF_CHAINS_PER_PF];
u8 num_active_rxqs;
struct qed_public_vf_info p_vf_info;
@@ -316,9 +326,8 @@
* @brief qed_iov_setup - setup sriov related resources
*
* @param p_hwfn
- * @param p_ptt
*/
-void qed_iov_setup(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt);
+void qed_iov_setup(struct qed_hwfn *p_hwfn);
/**
* @brief qed_iov_free - free sriov related resources
@@ -335,17 +344,6 @@
void qed_iov_free_hw_info(struct qed_dev *cdev);
/**
- * @brief qed_sriov_eqe_event - handle async sriov event arrived on eqe.
- *
- * @param p_hwfn
- * @param opcode
- * @param echo
- * @param data
- */
-int qed_sriov_eqe_event(struct qed_hwfn *p_hwfn,
- u8 opcode, __le16 echo, union event_ring_data *data);
-
-/**
* @brief Mark structs of vfs that have been FLR-ed.
*
* @param p_hwfn
@@ -397,7 +395,7 @@
return 0;
}
-static inline void qed_iov_setup(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
+static inline void qed_iov_setup(struct qed_hwfn *p_hwfn)
{
}
@@ -409,13 +407,6 @@
{
}
-static inline int qed_sriov_eqe_event(struct qed_hwfn *p_hwfn,
- u8 opcode,
- __le16 echo, union event_ring_data *data)
-{
- return -EINVAL;
-}
-
static inline bool qed_iov_mark_vf_flr(struct qed_hwfn *p_hwfn,
u32 *disabled_vfs)
{
diff --git a/drivers/net/ethernet/qlogic/qed/qed_vf.c b/drivers/net/ethernet/qlogic/qed/qed_vf.c
index 11d71e5..1926d1e 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_vf.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_vf.c
@@ -153,6 +153,77 @@
return rc;
}
+static void qed_vf_pf_add_qid(struct qed_hwfn *p_hwfn,
+ struct qed_queue_cid *p_cid)
+{
+ struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info;
+ struct vfpf_qid_tlv *p_qid_tlv;
+
+ /* Only add QIDs for the queue if it was negotiated with PF */
+ if (!(p_iov->acquire_resp.pfdev_info.capabilities &
+ PFVF_ACQUIRE_CAP_QUEUE_QIDS))
+ return;
+
+ p_qid_tlv = qed_add_tlv(p_hwfn, &p_iov->offset,
+ CHANNEL_TLV_QID, sizeof(*p_qid_tlv));
+ p_qid_tlv->qid = p_cid->qid_usage_idx;
+}
+
+int _qed_vf_pf_release(struct qed_hwfn *p_hwfn, bool b_final)
+{
+ struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info;
+ struct pfvf_def_resp_tlv *resp;
+ struct vfpf_first_tlv *req;
+ u32 size;
+ int rc;
+
+ /* clear mailbox and prep first tlv */
+ req = qed_vf_pf_prep(p_hwfn, CHANNEL_TLV_RELEASE, sizeof(*req));
+
+ /* add list termination tlv */
+ qed_add_tlv(p_hwfn, &p_iov->offset,
+ CHANNEL_TLV_LIST_END, sizeof(struct channel_list_end_tlv));
+
+ resp = &p_iov->pf2vf_reply->default_resp;
+ rc = qed_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp));
+
+ if (!rc && resp->hdr.status != PFVF_STATUS_SUCCESS)
+ rc = -EAGAIN;
+
+ qed_vf_pf_req_end(p_hwfn, rc);
+ if (!b_final)
+ return rc;
+
+ p_hwfn->b_int_enabled = 0;
+
+ if (p_iov->vf2pf_request)
+ dma_free_coherent(&p_hwfn->cdev->pdev->dev,
+ sizeof(union vfpf_tlvs),
+ p_iov->vf2pf_request,
+ p_iov->vf2pf_request_phys);
+ if (p_iov->pf2vf_reply)
+ dma_free_coherent(&p_hwfn->cdev->pdev->dev,
+ sizeof(union pfvf_tlvs),
+ p_iov->pf2vf_reply, p_iov->pf2vf_reply_phys);
+
+ if (p_iov->bulletin.p_virt) {
+ size = sizeof(struct qed_bulletin_content);
+ dma_free_coherent(&p_hwfn->cdev->pdev->dev,
+ size,
+ p_iov->bulletin.p_virt, p_iov->bulletin.phys);
+ }
+
+ kfree(p_hwfn->vf_iov_info);
+ p_hwfn->vf_iov_info = NULL;
+
+ return rc;
+}
+
+int qed_vf_pf_release(struct qed_hwfn *p_hwfn)
+{
+ return _qed_vf_pf_release(p_hwfn, true);
+}
+
#define VF_ACQUIRE_THRESH 3
static void qed_vf_pf_acquire_reduce_resc(struct qed_hwfn *p_hwfn,
struct vf_pf_resc_request *p_req,
@@ -160,7 +231,7 @@
{
DP_VERBOSE(p_hwfn,
QED_MSG_IOV,
- "PF unwilling to fullill resource request: rxq [%02x/%02x] txq [%02x/%02x] sbs [%02x/%02x] mac [%02x/%02x] vlan [%02x/%02x] mc [%02x/%02x]. Try PF recommended amount\n",
+ "PF unwilling to fullill resource request: rxq [%02x/%02x] txq [%02x/%02x] sbs [%02x/%02x] mac [%02x/%02x] vlan [%02x/%02x] mc [%02x/%02x] cids [%02x/%02x]. Try PF recommended amount\n",
p_req->num_rxqs,
p_resp->num_rxqs,
p_req->num_rxqs,
@@ -171,7 +242,8 @@
p_resp->num_mac_filters,
p_req->num_vlan_filters,
p_resp->num_vlan_filters,
- p_req->num_mc_filters, p_resp->num_mc_filters);
+ p_req->num_mc_filters,
+ p_resp->num_mc_filters, p_req->num_cids, p_resp->num_cids);
/* humble our request */
p_req->num_txqs = p_resp->num_txqs;
@@ -180,6 +252,7 @@
p_req->num_mac_filters = p_resp->num_mac_filters;
p_req->num_vlan_filters = p_resp->num_vlan_filters;
p_req->num_mc_filters = p_resp->num_mc_filters;
+ p_req->num_cids = p_resp->num_cids;
}
static int qed_vf_pf_acquire(struct qed_hwfn *p_hwfn)
@@ -204,6 +277,7 @@
p_resc->num_sbs = QED_MAX_VF_CHAINS_PER_PF;
p_resc->num_mac_filters = QED_ETH_VF_NUM_MAC_FILTERS;
p_resc->num_vlan_filters = QED_ETH_VF_NUM_VLAN_FILTERS;
+ p_resc->num_cids = QED_ETH_VF_DEFAULT_NUM_CIDS;
req->vfdev_info.os_type = VFPF_ACQUIRE_OS_LINUX;
req->vfdev_info.fw_major = FW_MAJOR_VERSION;
@@ -216,6 +290,13 @@
/* Fill capability field with any non-deprecated config we support */
req->vfdev_info.capabilities |= VFPF_ACQUIRE_CAP_100G;
+ /* If we've mapped the doorbell bar, try using queue qids */
+ if (p_iov->b_doorbell_bar) {
+ req->vfdev_info.capabilities |= VFPF_ACQUIRE_CAP_PHYSICAL_BAR |
+ VFPF_ACQUIRE_CAP_QUEUE_QIDS;
+ p_resc->num_cids = QED_ETH_VF_MAX_NUM_CIDS;
+ }
+
/* pf 2 vf bulletin board address */
req->bulletin_addr = p_iov->bulletin.phys;
req->bulletin_size = p_iov->bulletin.size;
@@ -307,6 +388,13 @@
if (req->vfdev_info.capabilities & VFPF_ACQUIRE_CAP_PRE_FP_HSI)
p_iov->b_pre_fp_hsi = true;
+ /* In case PF doesn't support multi-queue Tx, update the number of
+ * CIDs to reflect the number of queues [older PFs didn't fill that
+ * field].
+ */
+ if (!(resp->pfdev_info.capabilities & PFVF_ACQUIRE_CAP_QUEUE_QIDS))
+ resp->resc.num_cids = resp->resc.num_rxqs + resp->resc.num_txqs;
+
/* Update bulletin board size with response from PF */
p_iov->bulletin.size = resp->bulletin_size;
@@ -338,10 +426,27 @@
return rc;
}
+u32 qed_vf_hw_bar_size(struct qed_hwfn *p_hwfn, enum BAR_ID bar_id)
+{
+ u32 bar_size;
+
+ /* Regview size is fixed */
+ if (bar_id == BAR_ID_0)
+ return 1 << 17;
+
+ /* Doorbell is received from PF */
+ bar_size = p_hwfn->vf_iov_info->acquire_resp.pfdev_info.bar_size;
+ if (bar_size)
+ return 1 << bar_size;
+ return 0;
+}
+
int qed_vf_hw_prepare(struct qed_hwfn *p_hwfn)
{
+ struct qed_hwfn *p_lead = QED_LEADING_HWFN(p_hwfn->cdev);
struct qed_vf_iov *p_iov;
u32 reg;
+ int rc;
/* Set number of hwfns - might be overriden once leading hwfn learns
* actual configuration from PF.
@@ -349,10 +454,6 @@
if (IS_LEAD_HWFN(p_hwfn))
p_hwfn->cdev->num_hwfns = 1;
- /* Set the doorbell bar. Assumption: regview is set */
- p_hwfn->doorbells = (u8 __iomem *)p_hwfn->regview +
- PXP_VF_BAR0_START_DQ;
-
reg = PXP_VF_BAR0_ME_OPAQUE_ADDRESS;
p_hwfn->hw_info.opaque_fid = (u16)REG_RD(p_hwfn, reg);
@@ -364,6 +465,30 @@
if (!p_iov)
return -ENOMEM;
+ /* Doorbells are tricky; Upper-layer has alreday set the hwfn doorbell
+ * value, but there are several incompatibily scenarios where that
+ * would be incorrect and we'd need to override it.
+ */
+ if (!p_hwfn->doorbells) {
+ p_hwfn->doorbells = (u8 __iomem *)p_hwfn->regview +
+ PXP_VF_BAR0_START_DQ;
+ } else if (p_hwfn == p_lead) {
+ /* For leading hw-function, value is always correct, but need
+ * to handle scenario where legacy PF would not support 100g
+ * mapped bars later.
+ */
+ p_iov->b_doorbell_bar = true;
+ } else {
+ /* here, value would be correct ONLY if the leading hwfn
+ * received indication that mapped-bars are supported.
+ */
+ if (p_lead->vf_iov_info->b_doorbell_bar)
+ p_iov->b_doorbell_bar = true;
+ else
+ p_hwfn->doorbells = (u8 __iomem *)
+ p_hwfn->regview + PXP_VF_BAR0_START_DQ;
+ }
+
/* Allocate vf2pf msg */
p_iov->vf2pf_request = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
sizeof(union vfpf_tlvs),
@@ -403,7 +528,33 @@
p_hwfn->hw_info.personality = QED_PCI_ETH;
- return qed_vf_pf_acquire(p_hwfn);
+ rc = qed_vf_pf_acquire(p_hwfn);
+
+ /* If VF is 100g using a mapped bar and PF is too old to support that,
+ * acquisition would succeed - but the VF would have no way knowing
+ * the size of the doorbell bar configured in HW and thus will not
+ * know how to split it for 2nd hw-function.
+ * In this case we re-try without the indication of the mapped
+ * doorbell.
+ */
+ if (!rc && p_iov->b_doorbell_bar &&
+ !qed_vf_hw_bar_size(p_hwfn, BAR_ID_1) &&
+ (p_hwfn->cdev->num_hwfns > 1)) {
+ rc = _qed_vf_pf_release(p_hwfn, false);
+ if (rc)
+ return rc;
+
+ p_iov->b_doorbell_bar = false;
+ p_hwfn->doorbells = (u8 __iomem *)p_hwfn->regview +
+ PXP_VF_BAR0_START_DQ;
+ rc = qed_vf_pf_acquire(p_hwfn);
+ }
+
+ DP_VERBOSE(p_hwfn, QED_MSG_IOV,
+ "Regview [%p], Doorbell [%p], Device-doorbell [%p]\n",
+ p_hwfn->regview, p_hwfn->doorbells, p_hwfn->cdev->doorbells);
+
+ return rc;
free_vf2pf_request:
dma_free_coherent(&p_hwfn->cdev->pdev->dev,
@@ -588,8 +739,8 @@
req->cqe_pbl_addr = cqe_pbl_addr;
req->cqe_pbl_size = cqe_pbl_size;
req->rxq_addr = bd_chain_phys_addr;
- req->hw_sb = p_cid->rel.sb;
- req->sb_index = p_cid->rel.sb_idx;
+ req->hw_sb = p_cid->sb_igu_id;
+ req->sb_index = p_cid->sb_idx;
req->bd_max_bytes = bd_max_bytes;
req->stat_id = -1;
@@ -609,6 +760,9 @@
__internal_ram_wr(p_hwfn, *pp_prod, sizeof(u32),
(u32 *)(&init_prod_val));
}
+
+ qed_vf_pf_add_qid(p_hwfn, p_cid);
+
/* add list termination tlv */
qed_add_tlv(p_hwfn, &p_iov->offset,
CHANNEL_TLV_LIST_END, sizeof(struct channel_list_end_tlv));
@@ -657,6 +811,8 @@
req->num_rxqs = 1;
req->cqe_completion = cqe_completion;
+ qed_vf_pf_add_qid(p_hwfn, p_cid);
+
/* add list termination tlv */
qed_add_tlv(p_hwfn, &p_iov->offset,
CHANNEL_TLV_LIST_END, sizeof(struct channel_list_end_tlv));
@@ -697,8 +853,10 @@
/* Tx */
req->pbl_addr = pbl_addr;
req->pbl_size = pbl_size;
- req->hw_sb = p_cid->rel.sb;
- req->sb_index = p_cid->rel.sb_idx;
+ req->hw_sb = p_cid->sb_igu_id;
+ req->sb_index = p_cid->sb_idx;
+
+ qed_vf_pf_add_qid(p_hwfn, p_cid);
/* add list termination tlv */
qed_add_tlv(p_hwfn, &p_iov->offset,
@@ -728,8 +886,8 @@
}
DP_VERBOSE(p_hwfn, QED_MSG_IOV,
- "Txq[0x%02x]: doorbell at %p [offset 0x%08x]\n",
- qid, *pp_doorbell, resp->offset);
+ "Txq[0x%02x.%02x]: doorbell at %p [offset 0x%08x]\n",
+ qid, p_cid->qid_usage_idx, *pp_doorbell, resp->offset);
exit:
qed_vf_pf_req_end(p_hwfn, rc);
@@ -749,6 +907,8 @@
req->tx_qid = p_cid->rel.queue_id;
req->num_txqs = 1;
+ qed_vf_pf_add_qid(p_hwfn, p_cid);
+
/* add list termination tlv */
qed_add_tlv(p_hwfn, &p_iov->offset,
CHANNEL_TLV_LIST_END, sizeof(struct channel_list_end_tlv));
@@ -792,9 +952,12 @@
req->only_untagged = only_untagged;
/* status blocks */
- for (i = 0; i < p_hwfn->vf_iov_info->acquire_resp.resc.num_sbs; i++)
- if (p_hwfn->sbs_info[i])
- req->sb_addr[i] = p_hwfn->sbs_info[i]->sb_phys;
+ for (i = 0; i < p_hwfn->vf_iov_info->acquire_resp.resc.num_sbs; i++) {
+ struct qed_sb_info *p_sb = p_hwfn->vf_iov_info->sbs_info[i];
+
+ if (p_sb)
+ req->sb_addr[i] = p_sb->sb_phys;
+ }
/* add list termination tlv */
qed_add_tlv(p_hwfn, &p_iov->offset,
@@ -1095,54 +1258,6 @@
return rc;
}
-int qed_vf_pf_release(struct qed_hwfn *p_hwfn)
-{
- struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info;
- struct pfvf_def_resp_tlv *resp;
- struct vfpf_first_tlv *req;
- u32 size;
- int rc;
-
- /* clear mailbox and prep first tlv */
- req = qed_vf_pf_prep(p_hwfn, CHANNEL_TLV_RELEASE, sizeof(*req));
-
- /* add list termination tlv */
- qed_add_tlv(p_hwfn, &p_iov->offset,
- CHANNEL_TLV_LIST_END, sizeof(struct channel_list_end_tlv));
-
- resp = &p_iov->pf2vf_reply->default_resp;
- rc = qed_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp));
-
- if (!rc && resp->hdr.status != PFVF_STATUS_SUCCESS)
- rc = -EAGAIN;
-
- qed_vf_pf_req_end(p_hwfn, rc);
-
- p_hwfn->b_int_enabled = 0;
-
- if (p_iov->vf2pf_request)
- dma_free_coherent(&p_hwfn->cdev->pdev->dev,
- sizeof(union vfpf_tlvs),
- p_iov->vf2pf_request,
- p_iov->vf2pf_request_phys);
- if (p_iov->pf2vf_reply)
- dma_free_coherent(&p_hwfn->cdev->pdev->dev,
- sizeof(union pfvf_tlvs),
- p_iov->pf2vf_reply, p_iov->pf2vf_reply_phys);
-
- if (p_iov->bulletin.p_virt) {
- size = sizeof(struct qed_bulletin_content);
- dma_free_coherent(&p_hwfn->cdev->pdev->dev,
- size,
- p_iov->bulletin.p_virt, p_iov->bulletin.phys);
- }
-
- kfree(p_hwfn->vf_iov_info);
- p_hwfn->vf_iov_info = NULL;
-
- return rc;
-}
-
void qed_vf_pf_filter_mcast(struct qed_hwfn *p_hwfn,
struct qed_filter_mcast *p_filter_cmd)
{
@@ -1240,6 +1355,24 @@
return p_iov->acquire_resp.resc.hw_sbs[sb_id].hw_sb_id;
}
+void qed_vf_set_sb_info(struct qed_hwfn *p_hwfn,
+ u16 sb_id, struct qed_sb_info *p_sb)
+{
+ struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info;
+
+ if (!p_iov) {
+ DP_NOTICE(p_hwfn, "vf_sriov_info isn't initialized\n");
+ return;
+ }
+
+ if (sb_id >= PFVF_MAX_SBS_PER_VF) {
+ DP_NOTICE(p_hwfn, "Can't configure SB %04x\n", sb_id);
+ return;
+ }
+
+ p_iov->sbs_info[sb_id] = p_sb;
+}
+
int qed_vf_read_bulletin(struct qed_hwfn *p_hwfn, u8 *p_change)
{
struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info;
@@ -1342,6 +1475,16 @@
*num_rxqs = p_hwfn->vf_iov_info->acquire_resp.resc.num_rxqs;
}
+void qed_vf_get_num_txqs(struct qed_hwfn *p_hwfn, u8 *num_txqs)
+{
+ *num_txqs = p_hwfn->vf_iov_info->acquire_resp.resc.num_txqs;
+}
+
+void qed_vf_get_num_cids(struct qed_hwfn *p_hwfn, u8 *num_cids)
+{
+ *num_cids = p_hwfn->vf_iov_info->acquire_resp.resc.num_cids;
+}
+
void qed_vf_get_port_mac(struct qed_hwfn *p_hwfn, u8 *port_mac)
{
memcpy(port_mac,
diff --git a/drivers/net/ethernet/qlogic/qed/qed_vf.h b/drivers/net/ethernet/qlogic/qed/qed_vf.h
index 34ac70b..34d9b88 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_vf.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_vf.h
@@ -46,7 +46,8 @@
u8 num_mac_filters;
u8 num_vlan_filters;
u8 num_mc_filters;
- u16 padding;
+ u8 num_cids;
+ u8 padding;
};
struct hw_sb_info {
@@ -113,6 +114,17 @@
struct vf_pf_vfdev_info {
#define VFPF_ACQUIRE_CAP_PRE_FP_HSI (1 << 0) /* VF pre-FP hsi version */
#define VFPF_ACQUIRE_CAP_100G (1 << 1) /* VF can support 100g */
+ /* A requirement for supporting multi-Tx queues on a single queue-zone,
+ * VF would pass qids as additional information whenever passing queue
+ * references.
+ */
+#define VFPF_ACQUIRE_CAP_QUEUE_QIDS BIT(2)
+
+ /* The VF is using the physical bar. While this is mostly internal
+ * to the VF, might affect the number of CIDs supported assuming
+ * QUEUE_QIDS is set.
+ */
+#define VFPF_ACQUIRE_CAP_PHYSICAL_BAR BIT(3)
u64 capabilities;
u8 fw_major;
u8 fw_minor;
@@ -185,6 +197,9 @@
*/
#define PFVF_ACQUIRE_CAP_POST_FW_OVERRIDE BIT(2)
+ /* PF expects queues to be received with additional qids */
+#define PFVF_ACQUIRE_CAP_QUEUE_QIDS BIT(3)
+
u16 db_size;
u8 indices_per_sb;
u8 os_type;
@@ -193,7 +208,8 @@
u16 chip_rev;
u8 dev_type;
- u8 padding;
+ /* Doorbell bar size configured in HW: log(size) or 0 */
+ u8 bar_size;
struct pfvf_stats_info stats_info;
@@ -221,7 +237,8 @@
u8 num_mac_filters;
u8 num_vlan_filters;
u8 num_mc_filters;
- u8 padding[2];
+ u8 num_cids;
+ u8 padding;
} resc;
u32 bulletin_size;
@@ -234,6 +251,16 @@
u8 padding[4];
};
+/* Extended queue information - additional index for reference inside qzone.
+ * If commmunicated between VF/PF, each TLV relating to queues should be
+ * extended by one such [or have a future base TLV that already contains info].
+ */
+struct vfpf_qid_tlv {
+ struct channel_tlv tl;
+ u8 qid;
+ u8 padding[3];
+};
+
/* Setup Queue */
struct vfpf_start_rxq_tlv {
struct vfpf_first_tlv first_tlv;
@@ -597,6 +624,8 @@
CHANNEL_TLV_VPORT_UPDATE_ACCEPT_ANY_VLAN,
CHANNEL_TLV_VPORT_UPDATE_SGE_TPA,
CHANNEL_TLV_UPDATE_TUNN_PARAM,
+ CHANNEL_TLV_RESERVED,
+ CHANNEL_TLV_QID,
CHANNEL_TLV_MAX,
/* Required for iterating over vport-update tlvs.
@@ -605,6 +634,12 @@
CHANNEL_TLV_VPORT_UPDATE_MAX = CHANNEL_TLV_VPORT_UPDATE_SGE_TPA + 1,
};
+/* Default number of CIDs [total of both Rx and Tx] to be requested
+ * by default, and maximum possible number.
+ */
+#define QED_ETH_VF_DEFAULT_NUM_CIDS (32)
+#define QED_ETH_VF_MAX_NUM_CIDS (250)
+
/* This data is held in the qed_hwfn structure for VFs only. */
struct qed_vf_iov {
union vfpf_tlvs *vf2pf_request;
@@ -627,6 +662,19 @@
* this has to be propagated as it affects the fastpath.
*/
bool b_pre_fp_hsi;
+
+ /* Current day VFs are passing the SBs physical address on vport
+ * start, and as they lack an IGU mapping they need to store the
+ * addresses of previously registered SBs.
+ * Even if we were to change configuration flow, due to backward
+ * compatibility [with older PFs] we'd still need to store these.
+ */
+ struct qed_sb_info *sbs_info[PFVF_MAX_SBS_PER_VF];
+
+ /* Determines whether VF utilizes doorbells via limited register
+ * bar or via the doorbell bar.
+ */
+ bool b_doorbell_bar;
};
#ifdef CONFIG_QED_SRIOV
@@ -676,6 +724,22 @@
void qed_vf_get_num_rxqs(struct qed_hwfn *p_hwfn, u8 *num_rxqs);
/**
+ * @brief Get number of Rx queues allocated for VF by qed
+ *
+ * @param p_hwfn
+ * @param num_txqs - allocated RX queues
+ */
+void qed_vf_get_num_txqs(struct qed_hwfn *p_hwfn, u8 *num_txqs);
+
+/**
+ * @brief Get number of available connections [both Rx and Tx] for VF
+ *
+ * @param p_hwfn
+ * @param num_cids - allocated number of connections
+ */
+void qed_vf_get_num_cids(struct qed_hwfn *p_hwfn, u8 *num_cids);
+
+/**
* @brief Get port mac address for VF
*
* @param p_hwfn
@@ -837,6 +901,16 @@
u16 qed_vf_get_igu_sb_id(struct qed_hwfn *p_hwfn, u16 sb_id);
/**
+ * @brief Stores [or removes] a configured sb_info.
+ *
+ * @param p_hwfn
+ * @param sb_id - zero-based SB index [for fastpath]
+ * @param sb_info - may be NULL [during removal].
+ */
+void qed_vf_set_sb_info(struct qed_hwfn *p_hwfn,
+ u16 sb_id, struct qed_sb_info *p_sb);
+
+/**
* @brief qed_vf_pf_vport_start - perform vport start for VF.
*
* @param p_hwfn
@@ -917,6 +991,8 @@
void qed_vf_set_vf_start_tunn_update_param(struct qed_tunnel_info *p_tun);
int qed_vf_pf_tunnel_param_update(struct qed_hwfn *p_hwfn,
struct qed_tunnel_info *p_tunn);
+
+u32 qed_vf_hw_bar_size(struct qed_hwfn *p_hwfn, enum BAR_ID bar_id);
#else
static inline void qed_vf_get_link_params(struct qed_hwfn *p_hwfn,
struct qed_mcp_link_params *params)
@@ -938,6 +1014,14 @@
{
}
+static inline void qed_vf_get_num_txqs(struct qed_hwfn *p_hwfn, u8 *num_txqs)
+{
+}
+
+static inline void qed_vf_get_num_cids(struct qed_hwfn *p_hwfn, u8 *num_cids)
+{
+}
+
static inline void qed_vf_get_port_mac(struct qed_hwfn *p_hwfn, u8 *port_mac)
{
}
@@ -1021,6 +1105,11 @@
return 0;
}
+static inline void qed_vf_set_sb_info(struct qed_hwfn *p_hwfn, u16 sb_id,
+ struct qed_sb_info *p_sb)
+{
+}
+
static inline int qed_vf_pf_vport_start(struct qed_hwfn *p_hwfn,
u8 vport_id,
u16 mtu,
@@ -1089,6 +1178,13 @@
{
return -EINVAL;
}
+
+static inline u32
+qed_vf_hw_bar_size(struct qed_hwfn *p_hwfn,
+ enum BAR_ID bar_id)
+{
+ return 0;
+}
#endif
#endif
diff --git a/drivers/net/ethernet/qlogic/qede/Makefile b/drivers/net/ethernet/qlogic/qede/Makefile
index bc5f7c3..75408fb 100644
--- a/drivers/net/ethernet/qlogic/qede/Makefile
+++ b/drivers/net/ethernet/qlogic/qede/Makefile
@@ -2,4 +2,4 @@
qede-y := qede_main.o qede_fp.o qede_filter.o qede_ethtool.o qede_ptp.o
qede-$(CONFIG_DCB) += qede_dcbnl.o
-qede-$(CONFIG_QED_RDMA) += qede_roce.o
+qede-$(CONFIG_QED_RDMA) += qede_rdma.o
diff --git a/drivers/net/ethernet/qlogic/qede/qede.h b/drivers/net/ethernet/qlogic/qede/qede.h
index 9b4f08b..4dfb238 100644
--- a/drivers/net/ethernet/qlogic/qede/qede.h
+++ b/drivers/net/ethernet/qlogic/qede/qede.h
@@ -40,6 +40,7 @@
#include <linux/kernel.h>
#include <linux/mutex.h>
#include <linux/bpf.h>
+#include <linux/qed/qede_rdma.h>
#include <linux/io.h>
#ifdef CONFIG_RFS_ACCEL
#include <linux/cpu_rmap.h>
@@ -153,8 +154,8 @@
struct qede_rdma_dev {
struct qedr_dev *qedr_dev;
struct list_head entry;
- struct list_head roce_event_list;
- struct workqueue_struct *roce_wq;
+ struct list_head rdma_event_list;
+ struct workqueue_struct *rdma_wq;
};
struct qede_ptp;
@@ -197,7 +198,6 @@
#define QEDE_TSS_COUNT(edev) ((edev)->num_queues - (edev)->fp_num_rx)
struct qed_int_info int_info;
- unsigned char primary_mac[ETH_ALEN];
/* Smaller private varaiant of the RTNL lock */
struct mutex qede_lock;
diff --git a/drivers/net/ethernet/qlogic/qede/qede_dcbnl.c b/drivers/net/ethernet/qlogic/qede/qede_dcbnl.c
index a9e7379..6e7747b 100644
--- a/drivers/net/ethernet/qlogic/qede/qede_dcbnl.c
+++ b/drivers/net/ethernet/qlogic/qede/qede_dcbnl.c
@@ -313,7 +313,6 @@
.ieee_setets = qede_dcbnl_ieee_setets,
.ieee_getapp = qede_dcbnl_ieee_getapp,
.ieee_setapp = qede_dcbnl_ieee_setapp,
- .getdcbx = qede_dcbnl_getdcbx,
.ieee_peer_getpfc = qede_dcbnl_ieee_peer_getpfc,
.ieee_peer_getets = qede_dcbnl_ieee_peer_getets,
.getstate = qede_dcbnl_getstate,
diff --git a/drivers/net/ethernet/qlogic/qede/qede_ethtool.c b/drivers/net/ethernet/qlogic/qede/qede_ethtool.c
index 172b292..6a03d3e 100644
--- a/drivers/net/ethernet/qlogic/qede/qede_ethtool.c
+++ b/drivers/net/ethernet/qlogic/qede/qede_ethtool.c
@@ -506,6 +506,14 @@
params.autoneg = false;
params.forced_speed = base->speed;
switch (base->speed) {
+ case SPEED_1000:
+ if (!(current_link.supported_caps &
+ QED_LM_1000baseT_Full_BIT)) {
+ DP_INFO(edev, "1G speed not supported\n");
+ return -EINVAL;
+ }
+ params.adv_speeds = QED_LM_1000baseT_Full_BIT;
+ break;
case SPEED_10000:
if (!(current_link.supported_caps &
QED_LM_10000baseKR_Full_BIT)) {
@@ -1282,7 +1290,8 @@
struct qede_tx_queue *txq = NULL;
struct eth_tx_1st_bd *first_bd;
dma_addr_t mapping;
- int i, idx, val;
+ int i, idx;
+ u16 val;
for_each_queue(i) {
if (edev->fp_array[i].type & QEDE_FASTPATH_TX) {
@@ -1297,14 +1306,15 @@
}
/* Fill the entry in the SW ring and the BDs in the FW ring */
- idx = txq->sw_tx_prod & NUM_TX_BDS_MAX;
+ idx = txq->sw_tx_prod;
txq->sw_tx_ring.skbs[idx].skb = skb;
first_bd = qed_chain_produce(&txq->tx_pbl);
memset(first_bd, 0, sizeof(*first_bd));
val = 1 << ETH_TX_1ST_BD_FLAGS_START_BD_SHIFT;
first_bd->data.bd_flags.bitfields = val;
val = skb->len & ETH_TX_DATA_1ST_BD_PKT_LEN_MASK;
- first_bd->data.bitfields |= (val << ETH_TX_DATA_1ST_BD_PKT_LEN_SHIFT);
+ val = val << ETH_TX_DATA_1ST_BD_PKT_LEN_SHIFT;
+ first_bd->data.bitfields |= cpu_to_le16(val);
/* Map skb linear data for DMA and set in the first BD */
mapping = dma_map_single(&edev->pdev->dev, skb->data,
@@ -1317,10 +1327,10 @@
/* update the first BD with the actual num BDs */
first_bd->data.nbds = 1;
- txq->sw_tx_prod++;
+ txq->sw_tx_prod = (txq->sw_tx_prod + 1) % txq->num_tx_buffers;
/* 'next page' entries are counted in the producer value */
- val = cpu_to_le16(qed_chain_get_prod_idx(&txq->tx_pbl));
- txq->tx_db.data.bd_prod = val;
+ val = qed_chain_get_prod_idx(&txq->tx_pbl);
+ txq->tx_db.data.bd_prod = cpu_to_le16(val);
/* wmb makes sure that the BDs data is updated before updating the
* producer, otherwise FW may read old data from the BDs.
@@ -1351,7 +1361,7 @@
first_bd = (struct eth_tx_1st_bd *)qed_chain_consume(&txq->tx_pbl);
dma_unmap_single(&edev->pdev->dev, BD_UNMAP_ADDR(first_bd),
BD_UNMAP_LEN(first_bd), DMA_TO_DEVICE);
- txq->sw_tx_cons++;
+ txq->sw_tx_cons = (txq->sw_tx_cons + 1) % txq->num_tx_buffers;
txq->sw_tx_ring.skbs[idx].skb = NULL;
return 0;
diff --git a/drivers/net/ethernet/qlogic/qede/qede_filter.c b/drivers/net/ethernet/qlogic/qede/qede_filter.c
index 333876c..f939db5 100644
--- a/drivers/net/ethernet/qlogic/qede/qede_filter.c
+++ b/drivers/net/ethernet/qlogic/qede/qede_filter.c
@@ -495,12 +495,16 @@
{
struct qede_dev *edev = dev;
+ __qede_lock(edev);
+
/* MAC hints take effect only if we haven't set one already */
- if (is_valid_ether_addr(edev->ndev->dev_addr) && !forced)
+ if (is_valid_ether_addr(edev->ndev->dev_addr) && !forced) {
+ __qede_unlock(edev);
return;
+ }
ether_addr_copy(edev->ndev->dev_addr, mac);
- ether_addr_copy(edev->primary_mac, mac);
+ __qede_unlock(edev);
}
void qede_fill_rss_params(struct qede_dev *edev,
@@ -1033,6 +1037,7 @@
return qede_xdp_set(edev, xdp->prog);
case XDP_QUERY_PROG:
xdp->prog_attached = !!edev->xdp_prog;
+ xdp->prog_id = edev->xdp_prog ? edev->xdp_prog->aux->id : 0;
return 0;
default:
return -EINVAL;
@@ -1061,41 +1066,51 @@
{
struct qede_dev *edev = netdev_priv(ndev);
struct sockaddr *addr = p;
- int rc;
+ int rc = 0;
- ASSERT_RTNL(); /* @@@TBD To be removed */
-
- DP_INFO(edev, "Set_mac_addr called\n");
+ /* Make sure the state doesn't transition while changing the MAC.
+ * Also, all flows accessing the dev_addr field are doing that under
+ * this lock.
+ */
+ __qede_lock(edev);
if (!is_valid_ether_addr(addr->sa_data)) {
DP_NOTICE(edev, "The MAC address is not valid\n");
- return -EFAULT;
+ rc = -EFAULT;
+ goto out;
}
if (!edev->ops->check_mac(edev->cdev, addr->sa_data)) {
- DP_NOTICE(edev, "qed prevents setting MAC\n");
- return -EINVAL;
+ DP_NOTICE(edev, "qed prevents setting MAC %pM\n",
+ addr->sa_data);
+ rc = -EINVAL;
+ goto out;
+ }
+
+ if (edev->state == QEDE_STATE_OPEN) {
+ /* Remove the previous primary mac */
+ rc = qede_set_ucast_rx_mac(edev, QED_FILTER_XCAST_TYPE_DEL,
+ ndev->dev_addr);
+ if (rc)
+ goto out;
}
ether_addr_copy(ndev->dev_addr, addr->sa_data);
+ DP_INFO(edev, "Setting device MAC to %pM\n", addr->sa_data);
- if (!netif_running(ndev)) {
- DP_NOTICE(edev, "The device is currently down\n");
- return 0;
+ if (edev->state != QEDE_STATE_OPEN) {
+ DP_VERBOSE(edev, NETIF_MSG_IFDOWN,
+ "The device is currently down\n");
+ goto out;
}
- /* Remove the previous primary mac */
- rc = qede_set_ucast_rx_mac(edev, QED_FILTER_XCAST_TYPE_DEL,
- edev->primary_mac);
- if (rc)
- return rc;
+ edev->ops->common->update_mac(edev->cdev, ndev->dev_addr);
- edev->ops->common->update_mac(edev->cdev, addr->sa_data);
-
- /* Add MAC filter according to the new unicast HW MAC address */
- ether_addr_copy(edev->primary_mac, ndev->dev_addr);
- return qede_set_ucast_rx_mac(edev, QED_FILTER_XCAST_TYPE_ADD,
- edev->primary_mac);
+ rc = qede_set_ucast_rx_mac(edev, QED_FILTER_XCAST_TYPE_ADD,
+ ndev->dev_addr);
+out:
+ __qede_unlock(edev);
+ return rc;
}
static int
@@ -1200,7 +1215,7 @@
* (configrue / leave the primary mac)
*/
rc = qede_set_ucast_rx_mac(edev, QED_FILTER_XCAST_TYPE_REPLACE,
- edev->primary_mac);
+ edev->ndev->dev_addr);
if (rc)
goto out;
diff --git a/drivers/net/ethernet/qlogic/qede/qede_fp.c b/drivers/net/ethernet/qlogic/qede/qede_fp.c
index 7b6f41d..6fc854b 100644
--- a/drivers/net/ethernet/qlogic/qede/qede_fp.c
+++ b/drivers/net/ethernet/qlogic/qede/qede_fp.c
@@ -99,7 +99,7 @@
/* Unmap the data and free skb */
int qede_free_tx_pkt(struct qede_dev *edev, struct qede_tx_queue *txq, int *len)
{
- u16 idx = txq->sw_tx_cons & NUM_TX_BDS_MAX;
+ u16 idx = txq->sw_tx_cons;
struct sk_buff *skb = txq->sw_tx_ring.skbs[idx].skb;
struct eth_tx_1st_bd *first_bd;
struct eth_tx_bd *tx_data_bd;
@@ -156,7 +156,7 @@
struct eth_tx_1st_bd *first_bd,
int nbd, bool data_split)
{
- u16 idx = txq->sw_tx_prod & NUM_TX_BDS_MAX;
+ u16 idx = txq->sw_tx_prod;
struct sk_buff *skb = txq->sw_tx_ring.skbs[idx].skb;
struct eth_tx_bd *tx_data_bd;
int i, split_bd_len = 0;
@@ -333,8 +333,9 @@
struct sw_rx_data *metadata, u16 padding, u16 length)
{
struct qede_tx_queue *txq = fp->xdp_tx;
- u16 idx = txq->sw_tx_prod & NUM_TX_BDS_MAX;
struct eth_tx_1st_bd *first_bd;
+ u16 idx = txq->sw_tx_prod;
+ u16 val;
if (!qed_chain_get_elem_left(&txq->tx_pbl)) {
txq->stopped_cnt++;
@@ -346,9 +347,11 @@
memset(first_bd, 0, sizeof(*first_bd));
first_bd->data.bd_flags.bitfields =
BIT(ETH_TX_1ST_BD_FLAGS_START_BD_SHIFT);
- first_bd->data.bitfields |=
- (length & ETH_TX_DATA_1ST_BD_PKT_LEN_MASK) <<
- ETH_TX_DATA_1ST_BD_PKT_LEN_SHIFT;
+
+ val = (length & ETH_TX_DATA_1ST_BD_PKT_LEN_MASK) <<
+ ETH_TX_DATA_1ST_BD_PKT_LEN_SHIFT;
+
+ first_bd->data.bitfields |= cpu_to_le16(val);
first_bd->data.nbds = 1;
/* We can safely ignore the offset, as it's 0 for XDP */
@@ -363,7 +366,7 @@
txq->sw_tx_ring.xdp[idx].page = metadata->data;
txq->sw_tx_ring.xdp[idx].mapping = metadata->mapping;
- txq->sw_tx_prod++;
+ txq->sw_tx_prod = (txq->sw_tx_prod + 1) % txq->num_tx_buffers;
/* Mark the fastpath for future XDP doorbell */
fp->xdp_xmit = 1;
@@ -393,14 +396,14 @@
while (hw_bd_cons != qed_chain_get_cons_idx(&txq->tx_pbl)) {
qed_chain_consume(&txq->tx_pbl);
- idx = txq->sw_tx_cons & NUM_TX_BDS_MAX;
+ idx = txq->sw_tx_cons;
dma_unmap_page(&edev->pdev->dev,
txq->sw_tx_ring.xdp[idx].mapping,
PAGE_SIZE, DMA_BIDIRECTIONAL);
__free_page(txq->sw_tx_ring.xdp[idx].page);
- txq->sw_tx_cons++;
+ txq->sw_tx_cons = (txq->sw_tx_cons + 1) % txq->num_tx_buffers;
txq->xmit_pkts++;
}
}
@@ -430,7 +433,7 @@
bytes_compl += len;
pkts_compl++;
- txq->sw_tx_cons++;
+ txq->sw_tx_cons = (txq->sw_tx_cons + 1) % txq->num_tx_buffers;
txq->xmit_pkts++;
}
@@ -1076,8 +1079,7 @@
* re-use the already allcoated & mapped memory.
*/
if (len + pad <= edev->rx_copybreak) {
- memcpy(skb_put(skb, len),
- page_address(page) + offset, len);
+ skb_put_data(skb, page_address(page) + offset, len);
qede_reuse_page(rxq, bd);
goto out;
}
@@ -1424,7 +1426,7 @@
struct eth_tx_2nd_bd *second_bd = NULL;
struct eth_tx_3rd_bd *third_bd = NULL;
struct eth_tx_bd *tx_data_bd = NULL;
- u16 txq_index;
+ u16 txq_index, val = 0;
u8 nbd = 0;
dma_addr_t mapping;
int rc, frag_idx = 0, ipv6_ext = 0;
@@ -1455,7 +1457,7 @@
#endif
/* Fill the entry in the SW ring and the BDs in the FW ring */
- idx = txq->sw_tx_prod & NUM_TX_BDS_MAX;
+ idx = txq->sw_tx_prod;
txq->sw_tx_ring.skbs[idx].skb = skb;
first_bd = (struct eth_tx_1st_bd *)
qed_chain_produce(&txq->tx_pbl);
@@ -1513,8 +1515,8 @@
if (xmit_type & XMIT_ENC) {
first_bd->data.bd_flags.bitfields |=
1 << ETH_TX_1ST_BD_FLAGS_IP_CSUM_SHIFT;
- first_bd->data.bitfields |=
- 1 << ETH_TX_DATA_1ST_BD_TUNN_FLAG_SHIFT;
+
+ val |= (1 << ETH_TX_DATA_1ST_BD_TUNN_FLAG_SHIFT);
}
/* Legacy FW had flipped behavior in regard to this bit -
@@ -1522,8 +1524,7 @@
* packets when it didn't need to.
*/
if (unlikely(txq->is_legacy))
- first_bd->data.bitfields ^=
- 1 << ETH_TX_DATA_1ST_BD_TUNN_FLAG_SHIFT;
+ val ^= (1 << ETH_TX_DATA_1ST_BD_TUNN_FLAG_SHIFT);
/* If the packet is IPv6 with extension header, indicate that
* to FW and pass few params, since the device cracker doesn't
@@ -1587,11 +1588,12 @@
data_split = true;
}
} else {
- first_bd->data.bitfields |=
- (skb->len & ETH_TX_DATA_1ST_BD_PKT_LEN_MASK) <<
- ETH_TX_DATA_1ST_BD_PKT_LEN_SHIFT;
+ val |= ((skb->len & ETH_TX_DATA_1ST_BD_PKT_LEN_MASK) <<
+ ETH_TX_DATA_1ST_BD_PKT_LEN_SHIFT);
}
+ first_bd->data.bitfields = cpu_to_le16(val);
+
/* Handle fragmented skb */
/* special handle for frags inside 2nd and 3rd bds.. */
while (tx_data_bd && frag_idx < skb_shinfo(skb)->nr_frags) {
@@ -1639,7 +1641,7 @@
/* Advance packet producer only before sending the packet since mapping
* of pages may fail.
*/
- txq->sw_tx_prod++;
+ txq->sw_tx_prod = (txq->sw_tx_prod + 1) % txq->num_tx_buffers;
/* 'next page' entries are counted in the producer value */
txq->tx_db.data.bd_prod =
diff --git a/drivers/net/ethernet/qlogic/qede/qede_main.c b/drivers/net/ethernet/qlogic/qede/qede_main.c
index 38b77bb..06ca13d 100644
--- a/drivers/net/ethernet/qlogic/qede/qede_main.c
+++ b/drivers/net/ethernet/qlogic/qede/qede_main.c
@@ -60,7 +60,6 @@
#include <net/ip6_checksum.h>
#include <linux/bitops.h>
#include <linux/vmalloc.h>
-#include <linux/qed/qede_roce.h>
#include "qede.h"
#include "qede_ptp.h"
@@ -259,11 +258,11 @@
/* Notify qed of the name change */
if (!edev->ops || !edev->ops->common)
goto done;
- edev->ops->common->set_id(edev->cdev, edev->ndev->name, "qede");
+ edev->ops->common->set_name(edev->cdev, edev->ndev->name);
break;
case NETDEV_CHANGEADDR:
edev = netdev_priv(ndev);
- qede_roce_event_changeaddr(edev);
+ qede_rdma_event_changeaddr(edev);
break;
}
@@ -580,6 +579,24 @@
.ndo_features_check = qede_features_check,
};
+static const struct net_device_ops qede_netdev_vf_xdp_ops = {
+ .ndo_open = qede_open,
+ .ndo_stop = qede_close,
+ .ndo_start_xmit = qede_start_xmit,
+ .ndo_set_rx_mode = qede_set_rx_mode,
+ .ndo_set_mac_address = qede_set_mac_addr,
+ .ndo_validate_addr = eth_validate_addr,
+ .ndo_change_mtu = qede_change_mtu,
+ .ndo_vlan_rx_add_vid = qede_vlan_rx_add_vid,
+ .ndo_vlan_rx_kill_vid = qede_vlan_rx_kill_vid,
+ .ndo_set_features = qede_set_features,
+ .ndo_get_stats64 = qede_get_stats64,
+ .ndo_udp_tunnel_add = qede_udp_tunnel_add,
+ .ndo_udp_tunnel_del = qede_udp_tunnel_del,
+ .ndo_features_check = qede_features_check,
+ .ndo_xdp = qede_xdp,
+};
+
/* -------------------------------------------------------------------------
* START OF PROBE / REMOVE
* -------------------------------------------------------------------------
@@ -618,6 +635,12 @@
memset(&edev->stats, 0, sizeof(edev->stats));
memcpy(&edev->dev_info, info, sizeof(*info));
+ /* As ethtool doesn't have the ability to show WoL behavior as
+ * 'default', if device supports it declare it's enabled.
+ */
+ if (edev->dev_info.common.wol_support)
+ edev->wol_enabled = true;
+
INIT_LIST_HEAD(&edev->vlan_list);
return edev;
@@ -639,10 +662,14 @@
ndev->watchdog_timeo = TX_TIMEOUT;
- if (IS_VF(edev))
- ndev->netdev_ops = &qede_netdev_vf_ops;
- else
+ if (IS_VF(edev)) {
+ if (edev->dev_info.xdp_supported)
+ ndev->netdev_ops = &qede_netdev_vf_xdp_ops;
+ else
+ ndev->netdev_ops = &qede_netdev_vf_ops;
+ } else {
ndev->netdev_ops = &qede_netdev_ops;
+ }
qede_set_ethtool_ops(ndev);
@@ -840,12 +867,55 @@
/* 64 rx + 64 tx + 64 XDP */
memset(&pf_params, 0, sizeof(struct qed_pf_params));
pf_params.eth_pf_params.num_cons = (MAX_SB_PER_PF_MIMD - 1) * 3;
+
+ /* Same for VFs - make sure they'll have sufficient connections
+ * to support XDP Tx queues.
+ */
+ pf_params.eth_pf_params.num_vf_cons = 48;
+
#ifdef CONFIG_RFS_ACCEL
pf_params.eth_pf_params.num_arfs_filters = QEDE_RFS_MAX_FLTR;
#endif
qed_ops->common->update_pf_params(cdev, &pf_params);
}
+#define QEDE_FW_VER_STR_SIZE 80
+
+static void qede_log_probe(struct qede_dev *edev)
+{
+ struct qed_dev_info *p_dev_info = &edev->dev_info.common;
+ u8 buf[QEDE_FW_VER_STR_SIZE];
+ size_t left_size;
+
+ snprintf(buf, QEDE_FW_VER_STR_SIZE,
+ "Storm FW %d.%d.%d.%d, Management FW %d.%d.%d.%d",
+ p_dev_info->fw_major, p_dev_info->fw_minor, p_dev_info->fw_rev,
+ p_dev_info->fw_eng,
+ (p_dev_info->mfw_rev & QED_MFW_VERSION_3_MASK) >>
+ QED_MFW_VERSION_3_OFFSET,
+ (p_dev_info->mfw_rev & QED_MFW_VERSION_2_MASK) >>
+ QED_MFW_VERSION_2_OFFSET,
+ (p_dev_info->mfw_rev & QED_MFW_VERSION_1_MASK) >>
+ QED_MFW_VERSION_1_OFFSET,
+ (p_dev_info->mfw_rev & QED_MFW_VERSION_0_MASK) >>
+ QED_MFW_VERSION_0_OFFSET);
+
+ left_size = QEDE_FW_VER_STR_SIZE - strlen(buf);
+ if (p_dev_info->mbi_version && left_size)
+ snprintf(buf + strlen(buf), left_size,
+ " [MBI %d.%d.%d]",
+ (p_dev_info->mbi_version & QED_MBI_VERSION_2_MASK) >>
+ QED_MBI_VERSION_2_OFFSET,
+ (p_dev_info->mbi_version & QED_MBI_VERSION_1_MASK) >>
+ QED_MBI_VERSION_1_OFFSET,
+ (p_dev_info->mbi_version & QED_MBI_VERSION_0_MASK) >>
+ QED_MBI_VERSION_0_OFFSET);
+
+ pr_info("qede %02x:%02x.%02x: %s [%s]\n", edev->pdev->bus->number,
+ PCI_SLOT(edev->pdev->devfn), PCI_FUNC(edev->pdev->devfn),
+ buf, edev->ndev->name);
+}
+
enum qede_probe_mode {
QEDE_PROBE_NORMAL,
};
@@ -907,7 +977,7 @@
qede_init_ndev(edev);
- rc = qede_roce_dev_add(edev);
+ rc = qede_rdma_dev_add(edev);
if (rc)
goto err3;
@@ -924,7 +994,7 @@
goto err4;
}
- edev->ops->common->set_id(cdev, edev->ndev->name, DRV_MODULE_VERSION);
+ edev->ops->common->set_name(cdev, edev->ndev->name);
/* PTP not supported on VFs */
if (!is_vf)
@@ -939,12 +1009,11 @@
edev->rx_copybreak = QEDE_RX_HDR_SIZE;
- DP_INFO(edev, "Ending successfully qede probe\n");
-
+ qede_log_probe(edev);
return 0;
err4:
- qede_roce_dev_remove(edev);
+ qede_rdma_dev_remove(edev);
err3:
free_netdev(edev->ndev);
err2:
@@ -995,7 +1064,7 @@
qede_ptp_disable(edev);
- qede_roce_dev_remove(edev);
+ qede_rdma_dev_remove(edev);
edev->ops->common->set_power_state(cdev, PCI_D0);
@@ -1066,12 +1135,15 @@
return rc;
}
-static void qede_free_mem_sb(struct qede_dev *edev,
- struct qed_sb_info *sb_info)
+static void qede_free_mem_sb(struct qede_dev *edev, struct qed_sb_info *sb_info,
+ u16 sb_id)
{
- if (sb_info->sb_virt)
+ if (sb_info->sb_virt) {
+ edev->ops->common->sb_release(edev->cdev, sb_info, sb_id);
dma_free_coherent(&edev->pdev->dev, sizeof(*sb_info->sb_virt),
(void *)sb_info->sb_virt, sb_info->sb_phys);
+ memset(sb_info, 0, sizeof(*sb_info));
+ }
}
/* This function allocates fast-path status block memory */
@@ -1244,8 +1316,7 @@
QED_CHAIN_CNT_TYPE_U16,
RX_RING_SIZE,
sizeof(struct eth_rx_bd),
- &rxq->rx_bd_ring);
-
+ &rxq->rx_bd_ring, NULL);
if (rc)
goto err;
@@ -1256,7 +1327,7 @@
QED_CHAIN_CNT_TYPE_U16,
RX_RING_SIZE,
sizeof(union eth_rx_cqe),
- &rxq->rx_comp_ring);
+ &rxq->rx_comp_ring, NULL);
if (rc)
goto err;
@@ -1298,12 +1369,12 @@
/* Allocate the parallel driver ring for Tx buffers */
if (txq->is_xdp) {
- size = sizeof(*txq->sw_tx_ring.xdp) * TX_RING_SIZE;
+ size = sizeof(*txq->sw_tx_ring.xdp) * txq->num_tx_buffers;
txq->sw_tx_ring.xdp = kzalloc(size, GFP_KERNEL);
if (!txq->sw_tx_ring.xdp)
goto err;
} else {
- size = sizeof(*txq->sw_tx_ring.skbs) * TX_RING_SIZE;
+ size = sizeof(*txq->sw_tx_ring.skbs) * txq->num_tx_buffers;
txq->sw_tx_ring.skbs = kzalloc(size, GFP_KERNEL);
if (!txq->sw_tx_ring.skbs)
goto err;
@@ -1313,8 +1384,9 @@
QED_CHAIN_USE_TO_CONSUME_PRODUCE,
QED_CHAIN_MODE_PBL,
QED_CHAIN_CNT_TYPE_U16,
- TX_RING_SIZE,
- sizeof(*p_virt), &txq->tx_pbl);
+ txq->num_tx_buffers,
+ sizeof(*p_virt),
+ &txq->tx_pbl, NULL);
if (rc)
goto err;
@@ -1328,7 +1400,7 @@
/* This function frees all memory of a single fp */
static void qede_free_mem_fp(struct qede_dev *edev, struct qede_fastpath *fp)
{
- qede_free_mem_sb(edev, fp->sb_info);
+ qede_free_mem_sb(edev, fp->sb_info, fp->id);
if (fp->type & QEDE_FASTPATH_RX)
qede_free_mem_rxq(edev, fp->rxq);
@@ -1725,7 +1797,7 @@
else
params.queue_id = txq->index;
- params.sb = fp->sb_info->igu_sb_id;
+ params.p_sb = fp->sb_info;
params.sb_idx = sb_idx;
rc = edev->ops->q_tx_start(edev->cdev, rss_id, ¶ms, phys_table,
@@ -1804,7 +1876,7 @@
memset(&q_params, 0, sizeof(q_params));
q_params.queue_id = rxq->rxq_id;
q_params.vport_id = 0;
- q_params.sb = fp->sb_info->igu_sb_id;
+ q_params.p_sb = fp->sb_info;
q_params.sb_idx = RX_PI;
p_phys_table =
@@ -1890,9 +1962,10 @@
if (!is_locked)
__qede_lock(edev);
- qede_roce_dev_event_close(edev);
edev->state = QEDE_STATE_CLOSED;
+ qede_rdma_dev_event_close(edev);
+
/* Close OS Tx */
netif_tx_disable(edev->ndev);
netif_carrier_off(edev->ndev);
@@ -1988,9 +2061,6 @@
goto err4;
DP_INFO(edev, "Start VPORT, RXQ and TXQ succeeded\n");
- /* Add primary mac and set Rx filters */
- ether_addr_copy(edev->primary_mac, edev->ndev->dev_addr);
-
/* Program un-configured VLANs */
qede_configure_vlan_filters(edev);
@@ -1999,7 +2069,7 @@
link_params.link_up = true;
edev->ops->common->set_link(edev->cdev, &link_params);
- qede_roce_dev_event_open(edev);
+ qede_rdma_dev_event_open(edev);
edev->state = QEDE_STATE_OPEN;
diff --git a/drivers/net/ethernet/qlogic/qede/qede_ptp.c b/drivers/net/ethernet/qlogic/qede/qede_ptp.c
index 24f06e2..9b2280b 100644
--- a/drivers/net/ethernet/qlogic/qede/qede_ptp.c
+++ b/drivers/net/ethernet/qlogic/qede/qede_ptp.c
@@ -244,6 +244,7 @@
break;
case HWTSTAMP_FILTER_ALL:
case HWTSTAMP_FILTER_SOME:
+ case HWTSTAMP_FILTER_NTP_ALL:
ptp->rx_filter = HWTSTAMP_FILTER_NONE;
rx_filter = QED_PTP_FILTER_ALL;
break;
diff --git a/drivers/net/ethernet/qlogic/qede/qede_rdma.c b/drivers/net/ethernet/qlogic/qede/qede_rdma.c
new file mode 100644
index 0000000..50b142f
--- /dev/null
+++ b/drivers/net/ethernet/qlogic/qede/qede_rdma.c
@@ -0,0 +1,314 @@
+/* QLogic qedr NIC Driver
+ * Copyright (c) 2015-2017 QLogic Corporation
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and /or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include <linux/pci.h>
+#include <linux/netdevice.h>
+#include <linux/list.h>
+#include <linux/mutex.h>
+#include <linux/qed/qede_rdma.h>
+#include "qede.h"
+
+static struct qedr_driver *qedr_drv;
+static LIST_HEAD(qedr_dev_list);
+static DEFINE_MUTEX(qedr_dev_list_lock);
+
+bool qede_rdma_supported(struct qede_dev *dev)
+{
+ return dev->dev_info.common.rdma_supported;
+}
+
+static void _qede_rdma_dev_add(struct qede_dev *edev)
+{
+ if (!qedr_drv)
+ return;
+
+ edev->rdma_info.qedr_dev = qedr_drv->add(edev->cdev, edev->pdev,
+ edev->ndev);
+}
+
+static int qede_rdma_create_wq(struct qede_dev *edev)
+{
+ INIT_LIST_HEAD(&edev->rdma_info.rdma_event_list);
+ edev->rdma_info.rdma_wq = create_singlethread_workqueue("rdma_wq");
+ if (!edev->rdma_info.rdma_wq) {
+ DP_NOTICE(edev, "qedr: Could not create workqueue\n");
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+static void qede_rdma_cleanup_event(struct qede_dev *edev)
+{
+ struct list_head *head = &edev->rdma_info.rdma_event_list;
+ struct qede_rdma_event_work *event_node;
+
+ flush_workqueue(edev->rdma_info.rdma_wq);
+ while (!list_empty(head)) {
+ event_node = list_entry(head->next, struct qede_rdma_event_work,
+ list);
+ cancel_work_sync(&event_node->work);
+ list_del(&event_node->list);
+ kfree(event_node);
+ }
+}
+
+static void qede_rdma_destroy_wq(struct qede_dev *edev)
+{
+ qede_rdma_cleanup_event(edev);
+ destroy_workqueue(edev->rdma_info.rdma_wq);
+}
+
+int qede_rdma_dev_add(struct qede_dev *edev)
+{
+ int rc = 0;
+
+ if (qede_rdma_supported(edev)) {
+ rc = qede_rdma_create_wq(edev);
+ if (rc)
+ return rc;
+
+ INIT_LIST_HEAD(&edev->rdma_info.entry);
+ mutex_lock(&qedr_dev_list_lock);
+ list_add_tail(&edev->rdma_info.entry, &qedr_dev_list);
+ _qede_rdma_dev_add(edev);
+ mutex_unlock(&qedr_dev_list_lock);
+ }
+
+ return rc;
+}
+
+static void _qede_rdma_dev_remove(struct qede_dev *edev)
+{
+ if (qedr_drv && qedr_drv->remove && edev->rdma_info.qedr_dev)
+ qedr_drv->remove(edev->rdma_info.qedr_dev);
+ edev->rdma_info.qedr_dev = NULL;
+}
+
+void qede_rdma_dev_remove(struct qede_dev *edev)
+{
+ if (!qede_rdma_supported(edev))
+ return;
+
+ qede_rdma_destroy_wq(edev);
+ mutex_lock(&qedr_dev_list_lock);
+ _qede_rdma_dev_remove(edev);
+ list_del(&edev->rdma_info.entry);
+ mutex_unlock(&qedr_dev_list_lock);
+}
+
+static void _qede_rdma_dev_open(struct qede_dev *edev)
+{
+ if (qedr_drv && edev->rdma_info.qedr_dev && qedr_drv->notify)
+ qedr_drv->notify(edev->rdma_info.qedr_dev, QEDE_UP);
+}
+
+static void qede_rdma_dev_open(struct qede_dev *edev)
+{
+ if (!qede_rdma_supported(edev))
+ return;
+
+ mutex_lock(&qedr_dev_list_lock);
+ _qede_rdma_dev_open(edev);
+ mutex_unlock(&qedr_dev_list_lock);
+}
+
+static void _qede_rdma_dev_close(struct qede_dev *edev)
+{
+ if (qedr_drv && edev->rdma_info.qedr_dev && qedr_drv->notify)
+ qedr_drv->notify(edev->rdma_info.qedr_dev, QEDE_DOWN);
+}
+
+static void qede_rdma_dev_close(struct qede_dev *edev)
+{
+ if (!qede_rdma_supported(edev))
+ return;
+
+ mutex_lock(&qedr_dev_list_lock);
+ _qede_rdma_dev_close(edev);
+ mutex_unlock(&qedr_dev_list_lock);
+}
+
+static void qede_rdma_dev_shutdown(struct qede_dev *edev)
+{
+ if (!qede_rdma_supported(edev))
+ return;
+
+ mutex_lock(&qedr_dev_list_lock);
+ if (qedr_drv && edev->rdma_info.qedr_dev && qedr_drv->notify)
+ qedr_drv->notify(edev->rdma_info.qedr_dev, QEDE_CLOSE);
+ mutex_unlock(&qedr_dev_list_lock);
+}
+
+int qede_rdma_register_driver(struct qedr_driver *drv)
+{
+ struct qede_dev *edev;
+ u8 qedr_counter = 0;
+
+ mutex_lock(&qedr_dev_list_lock);
+ if (qedr_drv) {
+ mutex_unlock(&qedr_dev_list_lock);
+ return -EINVAL;
+ }
+ qedr_drv = drv;
+
+ list_for_each_entry(edev, &qedr_dev_list, rdma_info.entry) {
+ struct net_device *ndev;
+
+ qedr_counter++;
+ _qede_rdma_dev_add(edev);
+ ndev = edev->ndev;
+ if (netif_running(ndev) && netif_oper_up(ndev))
+ _qede_rdma_dev_open(edev);
+ }
+ mutex_unlock(&qedr_dev_list_lock);
+
+ pr_notice("qedr: discovered and registered %d RDMA funcs\n",
+ qedr_counter);
+
+ return 0;
+}
+EXPORT_SYMBOL(qede_rdma_register_driver);
+
+void qede_rdma_unregister_driver(struct qedr_driver *drv)
+{
+ struct qede_dev *edev;
+
+ mutex_lock(&qedr_dev_list_lock);
+ list_for_each_entry(edev, &qedr_dev_list, rdma_info.entry) {
+ if (edev->rdma_info.qedr_dev)
+ _qede_rdma_dev_remove(edev);
+ }
+ qedr_drv = NULL;
+ mutex_unlock(&qedr_dev_list_lock);
+}
+EXPORT_SYMBOL(qede_rdma_unregister_driver);
+
+static void qede_rdma_changeaddr(struct qede_dev *edev)
+{
+ if (!qede_rdma_supported(edev))
+ return;
+
+ if (qedr_drv && edev->rdma_info.qedr_dev && qedr_drv->notify)
+ qedr_drv->notify(edev->rdma_info.qedr_dev, QEDE_CHANGE_ADDR);
+}
+
+static struct qede_rdma_event_work *
+qede_rdma_get_free_event_node(struct qede_dev *edev)
+{
+ struct qede_rdma_event_work *event_node = NULL;
+ struct list_head *list_node = NULL;
+ bool found = false;
+
+ list_for_each(list_node, &edev->rdma_info.rdma_event_list) {
+ event_node = list_entry(list_node, struct qede_rdma_event_work,
+ list);
+ if (!work_pending(&event_node->work)) {
+ found = true;
+ break;
+ }
+ }
+
+ if (!found) {
+ event_node = kzalloc(sizeof(*event_node), GFP_KERNEL);
+ if (!event_node) {
+ DP_NOTICE(edev,
+ "qedr: Could not allocate memory for rdma work\n");
+ return NULL;
+ }
+ list_add_tail(&event_node->list,
+ &edev->rdma_info.rdma_event_list);
+ }
+
+ return event_node;
+}
+
+static void qede_rdma_handle_event(struct work_struct *work)
+{
+ struct qede_rdma_event_work *event_node;
+ enum qede_rdma_event event;
+ struct qede_dev *edev;
+
+ event_node = container_of(work, struct qede_rdma_event_work, work);
+ event = event_node->event;
+ edev = event_node->ptr;
+
+ switch (event) {
+ case QEDE_UP:
+ qede_rdma_dev_open(edev);
+ break;
+ case QEDE_DOWN:
+ qede_rdma_dev_close(edev);
+ break;
+ case QEDE_CLOSE:
+ qede_rdma_dev_shutdown(edev);
+ break;
+ case QEDE_CHANGE_ADDR:
+ qede_rdma_changeaddr(edev);
+ break;
+ default:
+ DP_NOTICE(edev, "Invalid rdma event %d", event);
+ }
+}
+
+static void qede_rdma_add_event(struct qede_dev *edev,
+ enum qede_rdma_event event)
+{
+ struct qede_rdma_event_work *event_node;
+
+ if (!edev->rdma_info.qedr_dev)
+ return;
+
+ event_node = qede_rdma_get_free_event_node(edev);
+ if (!event_node)
+ return;
+
+ event_node->event = event;
+ event_node->ptr = edev;
+
+ INIT_WORK(&event_node->work, qede_rdma_handle_event);
+ queue_work(edev->rdma_info.rdma_wq, &event_node->work);
+}
+
+void qede_rdma_dev_event_open(struct qede_dev *edev)
+{
+ qede_rdma_add_event(edev, QEDE_UP);
+}
+
+void qede_rdma_dev_event_close(struct qede_dev *edev)
+{
+ qede_rdma_add_event(edev, QEDE_DOWN);
+}
+
+void qede_rdma_event_changeaddr(struct qede_dev *edev)
+{
+ qede_rdma_add_event(edev, QEDE_CHANGE_ADDR);
+}
diff --git a/drivers/net/ethernet/qlogic/qede/qede_roce.c b/drivers/net/ethernet/qlogic/qede/qede_roce.c
deleted file mode 100644
index f00657c..0000000
--- a/drivers/net/ethernet/qlogic/qede/qede_roce.c
+++ /dev/null
@@ -1,314 +0,0 @@
-/* QLogic qedr NIC Driver
- * Copyright (c) 2015-2017 QLogic Corporation
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and /or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#include <linux/pci.h>
-#include <linux/netdevice.h>
-#include <linux/list.h>
-#include <linux/mutex.h>
-#include <linux/qed/qede_roce.h>
-#include "qede.h"
-
-static struct qedr_driver *qedr_drv;
-static LIST_HEAD(qedr_dev_list);
-static DEFINE_MUTEX(qedr_dev_list_lock);
-
-bool qede_roce_supported(struct qede_dev *dev)
-{
- return dev->dev_info.common.rdma_supported;
-}
-
-static void _qede_roce_dev_add(struct qede_dev *edev)
-{
- if (!qedr_drv)
- return;
-
- edev->rdma_info.qedr_dev = qedr_drv->add(edev->cdev, edev->pdev,
- edev->ndev);
-}
-
-static int qede_roce_create_wq(struct qede_dev *edev)
-{
- INIT_LIST_HEAD(&edev->rdma_info.roce_event_list);
- edev->rdma_info.roce_wq = create_singlethread_workqueue("roce_wq");
- if (!edev->rdma_info.roce_wq) {
- DP_NOTICE(edev, "qedr: Could not create workqueue\n");
- return -ENOMEM;
- }
-
- return 0;
-}
-
-static void qede_roce_cleanup_event(struct qede_dev *edev)
-{
- struct list_head *head = &edev->rdma_info.roce_event_list;
- struct qede_roce_event_work *event_node;
-
- flush_workqueue(edev->rdma_info.roce_wq);
- while (!list_empty(head)) {
- event_node = list_entry(head->next, struct qede_roce_event_work,
- list);
- cancel_work_sync(&event_node->work);
- list_del(&event_node->list);
- kfree(event_node);
- }
-}
-
-static void qede_roce_destroy_wq(struct qede_dev *edev)
-{
- qede_roce_cleanup_event(edev);
- destroy_workqueue(edev->rdma_info.roce_wq);
-}
-
-int qede_roce_dev_add(struct qede_dev *edev)
-{
- int rc = 0;
-
- if (qede_roce_supported(edev)) {
- rc = qede_roce_create_wq(edev);
- if (rc)
- return rc;
-
- INIT_LIST_HEAD(&edev->rdma_info.entry);
- mutex_lock(&qedr_dev_list_lock);
- list_add_tail(&edev->rdma_info.entry, &qedr_dev_list);
- _qede_roce_dev_add(edev);
- mutex_unlock(&qedr_dev_list_lock);
- }
-
- return rc;
-}
-
-static void _qede_roce_dev_remove(struct qede_dev *edev)
-{
- if (qedr_drv && qedr_drv->remove && edev->rdma_info.qedr_dev)
- qedr_drv->remove(edev->rdma_info.qedr_dev);
- edev->rdma_info.qedr_dev = NULL;
-}
-
-void qede_roce_dev_remove(struct qede_dev *edev)
-{
- if (!qede_roce_supported(edev))
- return;
-
- qede_roce_destroy_wq(edev);
- mutex_lock(&qedr_dev_list_lock);
- _qede_roce_dev_remove(edev);
- list_del(&edev->rdma_info.entry);
- mutex_unlock(&qedr_dev_list_lock);
-}
-
-static void _qede_roce_dev_open(struct qede_dev *edev)
-{
- if (qedr_drv && edev->rdma_info.qedr_dev && qedr_drv->notify)
- qedr_drv->notify(edev->rdma_info.qedr_dev, QEDE_UP);
-}
-
-static void qede_roce_dev_open(struct qede_dev *edev)
-{
- if (!qede_roce_supported(edev))
- return;
-
- mutex_lock(&qedr_dev_list_lock);
- _qede_roce_dev_open(edev);
- mutex_unlock(&qedr_dev_list_lock);
-}
-
-static void _qede_roce_dev_close(struct qede_dev *edev)
-{
- if (qedr_drv && edev->rdma_info.qedr_dev && qedr_drv->notify)
- qedr_drv->notify(edev->rdma_info.qedr_dev, QEDE_DOWN);
-}
-
-static void qede_roce_dev_close(struct qede_dev *edev)
-{
- if (!qede_roce_supported(edev))
- return;
-
- mutex_lock(&qedr_dev_list_lock);
- _qede_roce_dev_close(edev);
- mutex_unlock(&qedr_dev_list_lock);
-}
-
-static void qede_roce_dev_shutdown(struct qede_dev *edev)
-{
- if (!qede_roce_supported(edev))
- return;
-
- mutex_lock(&qedr_dev_list_lock);
- if (qedr_drv && edev->rdma_info.qedr_dev && qedr_drv->notify)
- qedr_drv->notify(edev->rdma_info.qedr_dev, QEDE_CLOSE);
- mutex_unlock(&qedr_dev_list_lock);
-}
-
-int qede_roce_register_driver(struct qedr_driver *drv)
-{
- struct qede_dev *edev;
- u8 qedr_counter = 0;
-
- mutex_lock(&qedr_dev_list_lock);
- if (qedr_drv) {
- mutex_unlock(&qedr_dev_list_lock);
- return -EINVAL;
- }
- qedr_drv = drv;
-
- list_for_each_entry(edev, &qedr_dev_list, rdma_info.entry) {
- struct net_device *ndev;
-
- qedr_counter++;
- _qede_roce_dev_add(edev);
- ndev = edev->ndev;
- if (netif_running(ndev) && netif_oper_up(ndev))
- _qede_roce_dev_open(edev);
- }
- mutex_unlock(&qedr_dev_list_lock);
-
- pr_notice("qedr: discovered and registered %d RoCE funcs\n",
- qedr_counter);
-
- return 0;
-}
-EXPORT_SYMBOL(qede_roce_register_driver);
-
-void qede_roce_unregister_driver(struct qedr_driver *drv)
-{
- struct qede_dev *edev;
-
- mutex_lock(&qedr_dev_list_lock);
- list_for_each_entry(edev, &qedr_dev_list, rdma_info.entry) {
- if (edev->rdma_info.qedr_dev)
- _qede_roce_dev_remove(edev);
- }
- qedr_drv = NULL;
- mutex_unlock(&qedr_dev_list_lock);
-}
-EXPORT_SYMBOL(qede_roce_unregister_driver);
-
-static void qede_roce_changeaddr(struct qede_dev *edev)
-{
- if (!qede_roce_supported(edev))
- return;
-
- if (qedr_drv && edev->rdma_info.qedr_dev && qedr_drv->notify)
- qedr_drv->notify(edev->rdma_info.qedr_dev, QEDE_CHANGE_ADDR);
-}
-
-struct qede_roce_event_work *qede_roce_get_free_event_node(struct qede_dev
- *edev)
-{
- struct qede_roce_event_work *event_node = NULL;
- struct list_head *list_node = NULL;
- bool found = false;
-
- list_for_each(list_node, &edev->rdma_info.roce_event_list) {
- event_node = list_entry(list_node, struct qede_roce_event_work,
- list);
- if (!work_pending(&event_node->work)) {
- found = true;
- break;
- }
- }
-
- if (!found) {
- event_node = kzalloc(sizeof(*event_node), GFP_KERNEL);
- if (!event_node) {
- DP_NOTICE(edev,
- "qedr: Could not allocate memory for roce work\n");
- return NULL;
- }
- list_add_tail(&event_node->list,
- &edev->rdma_info.roce_event_list);
- }
-
- return event_node;
-}
-
-static void qede_roce_handle_event(struct work_struct *work)
-{
- struct qede_roce_event_work *event_node;
- enum qede_roce_event event;
- struct qede_dev *edev;
-
- event_node = container_of(work, struct qede_roce_event_work, work);
- event = event_node->event;
- edev = event_node->ptr;
-
- switch (event) {
- case QEDE_UP:
- qede_roce_dev_open(edev);
- break;
- case QEDE_DOWN:
- qede_roce_dev_close(edev);
- break;
- case QEDE_CLOSE:
- qede_roce_dev_shutdown(edev);
- break;
- case QEDE_CHANGE_ADDR:
- qede_roce_changeaddr(edev);
- break;
- default:
- DP_NOTICE(edev, "Invalid roce event %d", event);
- }
-}
-
-static void qede_roce_add_event(struct qede_dev *edev,
- enum qede_roce_event event)
-{
- struct qede_roce_event_work *event_node;
-
- if (!edev->rdma_info.qedr_dev)
- return;
-
- event_node = qede_roce_get_free_event_node(edev);
- if (!event_node)
- return;
-
- event_node->event = event;
- event_node->ptr = edev;
-
- INIT_WORK(&event_node->work, qede_roce_handle_event);
- queue_work(edev->rdma_info.roce_wq, &event_node->work);
-}
-
-void qede_roce_dev_event_open(struct qede_dev *edev)
-{
- qede_roce_add_event(edev, QEDE_UP);
-}
-
-void qede_roce_dev_event_close(struct qede_dev *edev)
-{
- qede_roce_add_event(edev, QEDE_DOWN);
-}
-
-void qede_roce_event_changeaddr(struct qede_dev *edev)
-{
- qede_roce_add_event(edev, QEDE_CHANGE_ADDR);
-}
diff --git a/drivers/net/ethernet/qlogic/qlge/qlge_main.c b/drivers/net/ethernet/qlogic/qlge/qlge_main.c
index 1188d42..9feec70 100644
--- a/drivers/net/ethernet/qlogic/qlge/qlge_main.c
+++ b/drivers/net/ethernet/qlogic/qlge/qlge_main.c
@@ -1577,7 +1577,7 @@
rx_ring->rx_dropped++;
goto err_out;
}
- memcpy(skb_put(skb, hlen), addr, hlen);
+ skb_put_data(skb, addr, hlen);
netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
"%d bytes of headers and data in large. Chain page to new skb and pull tail.\n",
length);
@@ -1654,7 +1654,7 @@
dma_unmap_len(sbq_desc, maplen),
PCI_DMA_FROMDEVICE);
- memcpy(skb_put(new_skb, length), skb->data, length);
+ skb_put_data(new_skb, skb->data, length);
pci_dma_sync_single_for_device(qdev->pdev,
dma_unmap_addr(sbq_desc, mapaddr),
@@ -1817,8 +1817,7 @@
dma_unmap_len
(sbq_desc, maplen),
PCI_DMA_FROMDEVICE);
- memcpy(skb_put(skb, length),
- sbq_desc->p.skb->data, length);
+ skb_put_data(skb, sbq_desc->p.skb->data, length);
pci_dma_sync_single_for_device(qdev->pdev,
dma_unmap_addr
(sbq_desc,
diff --git a/drivers/net/ethernet/qualcomm/Kconfig b/drivers/net/ethernet/qualcomm/Kconfig
index d7720bf..877675a 100644
--- a/drivers/net/ethernet/qualcomm/Kconfig
+++ b/drivers/net/ethernet/qualcomm/Kconfig
@@ -16,7 +16,13 @@
if NET_VENDOR_QUALCOMM
config QCA7000
- tristate "Qualcomm Atheros QCA7000 support"
+ tristate
+ help
+ This enables support for the Qualcomm Atheros QCA7000.
+
+config QCA7000_SPI
+ tristate "Qualcomm Atheros QCA7000 SPI support"
+ select QCA7000
depends on SPI_MASTER && OF
---help---
This SPI protocol driver supports the Qualcomm Atheros QCA7000.
@@ -24,6 +30,22 @@
To compile this driver as a module, choose M here. The module
will be called qcaspi.
+config QCA7000_UART
+ tristate "Qualcomm Atheros QCA7000 UART support"
+ select QCA7000
+ depends on SERIAL_DEV_BUS && OF
+ ---help---
+ This UART protocol driver supports the Qualcomm Atheros QCA7000.
+
+ Currently the driver assumes these device UART settings:
+ Data bits: 8
+ Parity: None
+ Stop bits: 1
+ Flow control: None
+
+ To compile this driver as a module, choose M here. The module
+ will be called qcauart.
+
config QCOM_EMAC
tristate "Qualcomm Technologies, Inc. EMAC Gigabit Ethernet support"
depends on HAS_DMA && HAS_IOMEM
diff --git a/drivers/net/ethernet/qualcomm/Makefile b/drivers/net/ethernet/qualcomm/Makefile
index aacb0a5..92fa7c4 100644
--- a/drivers/net/ethernet/qualcomm/Makefile
+++ b/drivers/net/ethernet/qualcomm/Makefile
@@ -2,7 +2,10 @@
# Makefile for the Qualcomm network device drivers.
#
-obj-$(CONFIG_QCA7000) += qcaspi.o
-qcaspi-objs := qca_spi.o qca_framing.o qca_7k.o qca_debug.o
+obj-$(CONFIG_QCA7000) += qca_7k_common.o
+obj-$(CONFIG_QCA7000_SPI) += qcaspi.o
+qcaspi-objs := qca_7k.o qca_debug.o qca_spi.o
+obj-$(CONFIG_QCA7000_UART) += qcauart.o
+qcauart-objs := qca_uart.o
obj-y += emac/
diff --git a/drivers/net/ethernet/qualcomm/qca_7k.c b/drivers/net/ethernet/qualcomm/qca_7k.c
index f0066fb..ffe7a16 100644
--- a/drivers/net/ethernet/qualcomm/qca_7k.c
+++ b/drivers/net/ethernet/qualcomm/qca_7k.c
@@ -23,11 +23,9 @@
* kernel-based SPI device.
*/
-#include <linux/init.h>
-#include <linux/module.h>
-#include <linux/moduleparam.h>
+#include <linux/kernel.h>
+#include <linux/netdevice.h>
#include <linux/spi/spi.h>
-#include <linux/version.h>
#include "qca_7k.h"
@@ -123,27 +121,3 @@
return ret;
}
-
-int
-qcaspi_tx_cmd(struct qcaspi *qca, u16 cmd)
-{
- __be16 tx_data;
- struct spi_message *msg = &qca->spi_msg1;
- struct spi_transfer *transfer = &qca->spi_xfer1;
- int ret;
-
- tx_data = cpu_to_be16(cmd);
- transfer->len = sizeof(tx_data);
- transfer->tx_buf = &tx_data;
- transfer->rx_buf = NULL;
-
- ret = spi_sync(qca->spi_dev, msg);
-
- if (!ret)
- ret = msg->status;
-
- if (ret)
- qcaspi_spi_error(qca);
-
- return ret;
-}
diff --git a/drivers/net/ethernet/qualcomm/qca_7k.h b/drivers/net/ethernet/qualcomm/qca_7k.h
index 1cad851..27124c2 100644
--- a/drivers/net/ethernet/qualcomm/qca_7k.h
+++ b/drivers/net/ethernet/qualcomm/qca_7k.h
@@ -54,19 +54,18 @@
#define SPI_REG_ACTION_CTRL 0x1B00
/* SPI_CONFIG register definition; */
-#define QCASPI_SLAVE_RESET_BIT (1 << 6)
+#define QCASPI_SLAVE_RESET_BIT BIT(6)
/* INTR_CAUSE/ENABLE register definition. */
-#define SPI_INT_WRBUF_BELOW_WM (1 << 10)
-#define SPI_INT_CPU_ON (1 << 6)
-#define SPI_INT_ADDR_ERR (1 << 3)
-#define SPI_INT_WRBUF_ERR (1 << 2)
-#define SPI_INT_RDBUF_ERR (1 << 1)
-#define SPI_INT_PKT_AVLBL (1 << 0)
+#define SPI_INT_WRBUF_BELOW_WM BIT(10)
+#define SPI_INT_CPU_ON BIT(6)
+#define SPI_INT_ADDR_ERR BIT(3)
+#define SPI_INT_WRBUF_ERR BIT(2)
+#define SPI_INT_RDBUF_ERR BIT(1)
+#define SPI_INT_PKT_AVLBL BIT(0)
void qcaspi_spi_error(struct qcaspi *qca);
int qcaspi_read_register(struct qcaspi *qca, u16 reg, u16 *result);
int qcaspi_write_register(struct qcaspi *qca, u16 reg, u16 value);
-int qcaspi_tx_cmd(struct qcaspi *qca, u16 cmd);
#endif /* _QCA_7K_H */
diff --git a/drivers/net/ethernet/qualcomm/qca_framing.c b/drivers/net/ethernet/qualcomm/qca_7k_common.c
similarity index 84%
rename from drivers/net/ethernet/qualcomm/qca_framing.c
rename to drivers/net/ethernet/qualcomm/qca_7k_common.c
index faa924c..6b511f0 100644
--- a/drivers/net/ethernet/qualcomm/qca_framing.c
+++ b/drivers/net/ethernet/qualcomm/qca_7k_common.c
@@ -21,9 +21,11 @@
* by an atheros frame while transmitted over a serial channel;
*/
+#include <linux/init.h>
#include <linux/kernel.h>
+#include <linux/module.h>
-#include "qca_framing.h"
+#include "qca_7k_common.h"
u16
qcafrm_create_header(u8 *buf, u16 length)
@@ -46,6 +48,7 @@
return QCAFRM_HEADER_LEN;
}
+EXPORT_SYMBOL_GPL(qcafrm_create_header);
u16
qcafrm_create_footer(u8 *buf)
@@ -57,6 +60,7 @@
buf[1] = 0x55;
return QCAFRM_FOOTER_LEN;
}
+EXPORT_SYMBOL_GPL(qcafrm_create_footer);
/* Gather received bytes and try to extract a full ethernet frame by
* following a simple state machine.
@@ -83,7 +87,7 @@
if (recv_byte != 0x00) {
/* first two bytes of length must be 0 */
- handle->state = QCAFRM_HW_LEN0;
+ handle->state = handle->init;
}
break;
case QCAFRM_HW_LEN2:
@@ -97,7 +101,7 @@
case QCAFRM_WAIT_AA4:
if (recv_byte != 0xAA) {
ret = QCAFRM_NOHEAD;
- handle->state = QCAFRM_HW_LEN0;
+ handle->state = handle->init;
} else {
handle->state--;
}
@@ -117,9 +121,9 @@
break;
case QCAFRM_WAIT_RSVD_BYTE2:
len = handle->offset;
- if (len > buf_len || len < QCAFRM_ETHMINLEN) {
+ if (len > buf_len || len < QCAFRM_MIN_LEN) {
ret = QCAFRM_INVLEN;
- handle->state = QCAFRM_HW_LEN0;
+ handle->state = handle->init;
} else {
handle->state = (enum qcafrm_state)(len + 1);
/* Remaining number of bytes. */
@@ -135,7 +139,7 @@
case QCAFRM_WAIT_551:
if (recv_byte != 0x55) {
ret = QCAFRM_NOTAIL;
- handle->state = QCAFRM_HW_LEN0;
+ handle->state = handle->init;
} else {
handle->state = QCAFRM_WAIT_552;
}
@@ -143,14 +147,20 @@
case QCAFRM_WAIT_552:
if (recv_byte != 0x55) {
ret = QCAFRM_NOTAIL;
- handle->state = QCAFRM_HW_LEN0;
+ handle->state = handle->init;
} else {
ret = handle->offset;
/* Frame is fully received. */
- handle->state = QCAFRM_HW_LEN0;
+ handle->state = handle->init;
}
break;
}
return ret;
}
+EXPORT_SYMBOL_GPL(qcafrm_fsm_decode);
+
+MODULE_DESCRIPTION("Qualcomm Atheros QCA7000 common");
+MODULE_AUTHOR("Qualcomm Atheros Communications");
+MODULE_AUTHOR("Stefan Wahren <stefan.wahren@i2se.com>");
+MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/net/ethernet/qualcomm/qca_framing.h b/drivers/net/ethernet/qualcomm/qca_7k_common.h
similarity index 85%
rename from drivers/net/ethernet/qualcomm/qca_framing.h
rename to drivers/net/ethernet/qualcomm/qca_7k_common.h
index d5e795d..928554f 100644
--- a/drivers/net/ethernet/qualcomm/qca_framing.h
+++ b/drivers/net/ethernet/qualcomm/qca_7k_common.h
@@ -44,12 +44,12 @@
#define QCAFRM_INVFRAME (QCAFRM_ERR_BASE - 4)
/* Min/Max Ethernet MTU: 46/1500 */
-#define QCAFRM_ETHMINMTU (ETH_ZLEN - ETH_HLEN)
-#define QCAFRM_ETHMAXMTU ETH_DATA_LEN
+#define QCAFRM_MIN_MTU (ETH_ZLEN - ETH_HLEN)
+#define QCAFRM_MAX_MTU ETH_DATA_LEN
/* Min/Max frame lengths */
-#define QCAFRM_ETHMINLEN (QCAFRM_ETHMINMTU + ETH_HLEN)
-#define QCAFRM_ETHMAXLEN (QCAFRM_ETHMAXMTU + VLAN_ETH_HLEN)
+#define QCAFRM_MIN_LEN (QCAFRM_MIN_MTU + ETH_HLEN)
+#define QCAFRM_MAX_LEN (QCAFRM_MAX_MTU + VLAN_ETH_HLEN)
/* QCA7K header len */
#define QCAFRM_HEADER_LEN 8
@@ -61,6 +61,7 @@
#define QCAFRM_ERR_BASE -1000
enum qcafrm_state {
+ /* HW length is only available on SPI */
QCAFRM_HW_LEN0 = 0x8000,
QCAFRM_HW_LEN1 = QCAFRM_HW_LEN0 - 1,
QCAFRM_HW_LEN2 = QCAFRM_HW_LEN1 - 1,
@@ -101,9 +102,11 @@
struct qcafrm_handle {
/* Current decoding state */
enum qcafrm_state state;
+ /* Initial state depends on connection type */
+ enum qcafrm_state init;
/* Offset in buffer (borrowed for length too) */
- s16 offset;
+ u16 offset;
/* Frame length as kept by this module */
u16 len;
@@ -113,9 +116,16 @@
u16 qcafrm_create_footer(u8 *buf);
-static inline void qcafrm_fsm_init(struct qcafrm_handle *handle)
+static inline void qcafrm_fsm_init_spi(struct qcafrm_handle *handle)
{
- handle->state = QCAFRM_HW_LEN0;
+ handle->init = QCAFRM_HW_LEN0;
+ handle->state = handle->init;
+}
+
+static inline void qcafrm_fsm_init_uart(struct qcafrm_handle *handle)
+{
+ handle->init = QCAFRM_WAIT_AA1;
+ handle->state = handle->init;
}
/* Gather received bytes and try to extract a full Ethernet frame
diff --git a/drivers/net/ethernet/qualcomm/qca_debug.c b/drivers/net/ethernet/qualcomm/qca_debug.c
index d145df9..92b6be9 100644
--- a/drivers/net/ethernet/qualcomm/qca_debug.c
+++ b/drivers/net/ethernet/qualcomm/qca_debug.c
@@ -275,6 +275,7 @@
static int
qcaspi_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ring)
{
+ const struct net_device_ops *ops = dev->netdev_ops;
struct qcaspi *qca = netdev_priv(dev);
if ((ring->rx_pending) ||
@@ -283,13 +284,13 @@
return -EINVAL;
if (netif_running(dev))
- qcaspi_netdev_close(dev);
+ ops->ndo_stop(dev);
qca->txr.count = max_t(u32, ring->tx_pending, TX_RING_MIN_LEN);
qca->txr.count = min_t(u16, qca->txr.count, TX_RING_MAX_LEN);
if (netif_running(dev))
- qcaspi_netdev_open(dev);
+ ops->ndo_open(dev);
return 0;
}
diff --git a/drivers/net/ethernet/qualcomm/qca_spi.c b/drivers/net/ethernet/qualcomm/qca_spi.c
index 24ca7df..9c23629 100644
--- a/drivers/net/ethernet/qualcomm/qca_spi.c
+++ b/drivers/net/ethernet/qualcomm/qca_spi.c
@@ -43,8 +43,8 @@
#include <linux/types.h>
#include "qca_7k.h"
+#include "qca_7k_common.h"
#include "qca_debug.h"
-#include "qca_framing.h"
#include "qca_spi.h"
#define MAX_DMA_BURST_LEN 5000
@@ -69,7 +69,6 @@
module_param(qcaspi_pluggable, int, 0);
MODULE_PARM_DESC(qcaspi_pluggable, "Pluggable SPI connection (yes/no).");
-#define QCASPI_MTU QCAFRM_ETHMAXMTU
#define QCASPI_TX_TIMEOUT (1 * HZ)
#define QCASPI_QCA7K_REBOOT_TIME_MS 1000
@@ -193,6 +192,30 @@
}
static int
+qcaspi_tx_cmd(struct qcaspi *qca, u16 cmd)
+{
+ __be16 tx_data;
+ struct spi_message *msg = &qca->spi_msg1;
+ struct spi_transfer *transfer = &qca->spi_xfer1;
+ int ret;
+
+ tx_data = cpu_to_be16(cmd);
+ transfer->len = sizeof(tx_data);
+ transfer->tx_buf = &tx_data;
+ transfer->rx_buf = NULL;
+
+ ret = spi_sync(qca->spi_dev, msg);
+
+ if (!ret)
+ ret = msg->status;
+
+ if (ret)
+ qcaspi_spi_error(qca);
+
+ return ret;
+}
+
+static int
qcaspi_tx_frame(struct qcaspi *qca, struct sk_buff *skb)
{
u32 count;
@@ -403,7 +426,7 @@
if (txr->skb[txr->tail])
return 0;
- return (txr->size + QCAFRM_ETHMAXLEN < QCASPI_HW_BUF_LEN) ? 1 : 0;
+ return (txr->size + QCAFRM_MAX_LEN < QCASPI_HW_BUF_LEN) ? 1 : 0;
}
/* Flush the tx ring. This function is only safe to
@@ -603,7 +626,7 @@
return IRQ_HANDLED;
}
-int
+static int
qcaspi_netdev_open(struct net_device *dev)
{
struct qcaspi *qca = netdev_priv(dev);
@@ -615,7 +638,7 @@
qca->intr_req = 1;
qca->intr_svc = 0;
qca->sync = QCASPI_SYNC_UNKNOWN;
- qcafrm_fsm_init(&qca->frm_handle);
+ qcafrm_fsm_init_spi(&qca->frm_handle);
qca->spi_thread = kthread_run((void *)qcaspi_spi_thread,
qca, "%s", dev->name);
@@ -640,7 +663,7 @@
return 0;
}
-int
+static int
qcaspi_netdev_close(struct net_device *dev)
{
struct qcaspi *qca = netdev_priv(dev);
@@ -667,8 +690,8 @@
struct sk_buff *tskb;
u8 pad_len = 0;
- if (skb->len < QCAFRM_ETHMINLEN)
- pad_len = QCAFRM_ETHMINLEN - skb->len;
+ if (skb->len < QCAFRM_MIN_LEN)
+ pad_len = QCAFRM_MIN_LEN - skb->len;
if (qca->txr.skb[qca->txr.tail]) {
netdev_warn(qca->net_dev, "queue was unexpectedly full!\n");
@@ -696,8 +719,7 @@
qcafrm_create_header(ptmp, frame_len);
if (pad_len) {
- ptmp = skb_put(skb, pad_len);
- memset(ptmp, 0, pad_len);
+ ptmp = skb_put_zero(skb, pad_len);
}
ptmp = skb_put(skb, QCAFRM_FOOTER_LEN);
@@ -746,7 +768,7 @@
{
struct qcaspi *qca = netdev_priv(dev);
- dev->mtu = QCASPI_MTU;
+ dev->mtu = QCAFRM_MAX_MTU;
dev->type = ARPHRD_ETHER;
qca->clkspeed = qcaspi_clkspeed;
qca->burst_len = qcaspi_burst_len;
@@ -805,8 +827,8 @@
dev->tx_queue_len = 100;
/* MTU range: 46 - 1500 */
- dev->min_mtu = QCAFRM_ETHMINMTU;
- dev->max_mtu = QCAFRM_ETHMAXMTU;
+ dev->min_mtu = QCAFRM_MIN_MTU;
+ dev->max_mtu = QCAFRM_MAX_MTU;
qca = netdev_priv(dev);
memset(qca, 0, sizeof(struct qcaspi));
@@ -894,6 +916,7 @@
return -ENOMEM;
qcaspi_netdev_setup(qcaspi_devs);
+ SET_NETDEV_DEV(qcaspi_devs, &spi->dev);
qca = netdev_priv(qcaspi_devs);
if (!qca) {
@@ -975,7 +998,7 @@
};
module_spi_driver(qca_spi_driver);
-MODULE_DESCRIPTION("Qualcomm Atheros SPI Driver");
+MODULE_DESCRIPTION("Qualcomm Atheros QCA7000 SPI Driver");
MODULE_AUTHOR("Qualcomm Atheros Communications");
MODULE_AUTHOR("Stefan Wahren <stefan.wahren@i2se.com>");
MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/net/ethernet/qualcomm/qca_spi.h b/drivers/net/ethernet/qualcomm/qca_spi.h
index 6e31a0e..fc4beb1 100644
--- a/drivers/net/ethernet/qualcomm/qca_spi.h
+++ b/drivers/net/ethernet/qualcomm/qca_spi.h
@@ -32,7 +32,7 @@
#include <linux/spi/spi.h>
#include <linux/types.h>
-#include "qca_framing.h"
+#include "qca_7k_common.h"
#define QCASPI_DRV_VERSION "0.2.7-i"
#define QCASPI_DRV_NAME "qcaspi"
@@ -108,7 +108,4 @@
u16 burst_len;
};
-int qcaspi_netdev_open(struct net_device *dev);
-int qcaspi_netdev_close(struct net_device *dev);
-
#endif /* _QCA_SPI_H */
diff --git a/drivers/net/ethernet/qualcomm/qca_uart.c b/drivers/net/ethernet/qualcomm/qca_uart.c
new file mode 100644
index 0000000..db6068c
--- /dev/null
+++ b/drivers/net/ethernet/qualcomm/qca_uart.c
@@ -0,0 +1,423 @@
+/*
+ * Copyright (c) 2011, 2012, Qualcomm Atheros Communications Inc.
+ * Copyright (c) 2017, I2SE GmbH
+ *
+ * Permission to use, copy, modify, and/or distribute this software
+ * for any purpose with or without fee is hereby granted, provided
+ * that the above copyright notice and this permission notice appear
+ * in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
+ * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL
+ * THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR
+ * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+ * LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT,
+ * NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
+ * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+/* This module implements the Qualcomm Atheros UART protocol for
+ * kernel-based UART device; it is essentially an Ethernet-to-UART
+ * serial converter;
+ */
+
+#include <linux/device.h>
+#include <linux/errno.h>
+#include <linux/etherdevice.h>
+#include <linux/if_arp.h>
+#include <linux/if_ether.h>
+#include <linux/jiffies.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/netdevice.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/of_net.h>
+#include <linux/sched.h>
+#include <linux/serdev.h>
+#include <linux/skbuff.h>
+#include <linux/types.h>
+
+#include "qca_7k_common.h"
+
+#define QCAUART_DRV_VERSION "0.1.0"
+#define QCAUART_DRV_NAME "qcauart"
+#define QCAUART_TX_TIMEOUT (1 * HZ)
+
+struct qcauart {
+ struct net_device *net_dev;
+ spinlock_t lock; /* transmit lock */
+ struct work_struct tx_work; /* Flushes transmit buffer */
+
+ struct serdev_device *serdev;
+ struct qcafrm_handle frm_handle;
+ struct sk_buff *rx_skb;
+
+ unsigned char *tx_head; /* pointer to next XMIT byte */
+ int tx_left; /* bytes left in XMIT queue */
+ unsigned char *tx_buffer;
+};
+
+static int
+qca_tty_receive(struct serdev_device *serdev, const unsigned char *data,
+ size_t count)
+{
+ struct qcauart *qca = serdev_device_get_drvdata(serdev);
+ struct net_device *netdev = qca->net_dev;
+ struct net_device_stats *n_stats = &netdev->stats;
+ size_t i;
+
+ if (!qca->rx_skb) {
+ qca->rx_skb = netdev_alloc_skb_ip_align(netdev,
+ netdev->mtu +
+ VLAN_ETH_HLEN);
+ if (!qca->rx_skb) {
+ n_stats->rx_errors++;
+ n_stats->rx_dropped++;
+ return 0;
+ }
+ }
+
+ for (i = 0; i < count; i++) {
+ s32 retcode;
+
+ retcode = qcafrm_fsm_decode(&qca->frm_handle,
+ qca->rx_skb->data,
+ skb_tailroom(qca->rx_skb),
+ data[i]);
+
+ switch (retcode) {
+ case QCAFRM_GATHER:
+ case QCAFRM_NOHEAD:
+ break;
+ case QCAFRM_NOTAIL:
+ netdev_dbg(netdev, "recv: no RX tail\n");
+ n_stats->rx_errors++;
+ n_stats->rx_dropped++;
+ break;
+ case QCAFRM_INVLEN:
+ netdev_dbg(netdev, "recv: invalid RX length\n");
+ n_stats->rx_errors++;
+ n_stats->rx_dropped++;
+ break;
+ default:
+ n_stats->rx_packets++;
+ n_stats->rx_bytes += retcode;
+ skb_put(qca->rx_skb, retcode);
+ qca->rx_skb->protocol = eth_type_trans(
+ qca->rx_skb, qca->rx_skb->dev);
+ qca->rx_skb->ip_summed = CHECKSUM_UNNECESSARY;
+ netif_rx_ni(qca->rx_skb);
+ qca->rx_skb = netdev_alloc_skb_ip_align(netdev,
+ netdev->mtu +
+ VLAN_ETH_HLEN);
+ if (!qca->rx_skb) {
+ netdev_dbg(netdev, "recv: out of RX resources\n");
+ n_stats->rx_errors++;
+ return i;
+ }
+ }
+ }
+
+ return i;
+}
+
+/* Write out any remaining transmit buffer. Scheduled when tty is writable */
+static void qcauart_transmit(struct work_struct *work)
+{
+ struct qcauart *qca = container_of(work, struct qcauart, tx_work);
+ struct net_device_stats *n_stats = &qca->net_dev->stats;
+ int written;
+
+ spin_lock_bh(&qca->lock);
+
+ /* First make sure we're connected. */
+ if (!netif_running(qca->net_dev)) {
+ spin_unlock_bh(&qca->lock);
+ return;
+ }
+
+ if (qca->tx_left <= 0) {
+ /* Now serial buffer is almost free & we can start
+ * transmission of another packet
+ */
+ n_stats->tx_packets++;
+ spin_unlock_bh(&qca->lock);
+ netif_wake_queue(qca->net_dev);
+ return;
+ }
+
+ written = serdev_device_write_buf(qca->serdev, qca->tx_head,
+ qca->tx_left);
+ if (written > 0) {
+ qca->tx_left -= written;
+ qca->tx_head += written;
+ }
+ spin_unlock_bh(&qca->lock);
+}
+
+/* Called by the driver when there's room for more data.
+ * Schedule the transmit.
+ */
+static void qca_tty_wakeup(struct serdev_device *serdev)
+{
+ struct qcauart *qca = serdev_device_get_drvdata(serdev);
+
+ schedule_work(&qca->tx_work);
+}
+
+static struct serdev_device_ops qca_serdev_ops = {
+ .receive_buf = qca_tty_receive,
+ .write_wakeup = qca_tty_wakeup,
+};
+
+static int qcauart_netdev_open(struct net_device *dev)
+{
+ struct qcauart *qca = netdev_priv(dev);
+
+ netif_start_queue(qca->net_dev);
+
+ return 0;
+}
+
+static int qcauart_netdev_close(struct net_device *dev)
+{
+ struct qcauart *qca = netdev_priv(dev);
+
+ netif_stop_queue(dev);
+ flush_work(&qca->tx_work);
+
+ spin_lock_bh(&qca->lock);
+ qca->tx_left = 0;
+ spin_unlock_bh(&qca->lock);
+
+ return 0;
+}
+
+static netdev_tx_t
+qcauart_netdev_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ struct net_device_stats *n_stats = &dev->stats;
+ struct qcauart *qca = netdev_priv(dev);
+ u8 pad_len = 0;
+ int written;
+ u8 *pos;
+
+ spin_lock(&qca->lock);
+
+ WARN_ON(qca->tx_left);
+
+ if (!netif_running(dev)) {
+ spin_unlock(&qca->lock);
+ netdev_warn(qca->net_dev, "xmit: iface is down\n");
+ goto out;
+ }
+
+ pos = qca->tx_buffer;
+
+ if (skb->len < QCAFRM_MIN_LEN)
+ pad_len = QCAFRM_MIN_LEN - skb->len;
+
+ pos += qcafrm_create_header(pos, skb->len + pad_len);
+
+ memcpy(pos, skb->data, skb->len);
+ pos += skb->len;
+
+ if (pad_len) {
+ memset(pos, 0, pad_len);
+ pos += pad_len;
+ }
+
+ pos += qcafrm_create_footer(pos);
+
+ netif_stop_queue(qca->net_dev);
+
+ written = serdev_device_write_buf(qca->serdev, qca->tx_buffer,
+ pos - qca->tx_buffer);
+ if (written > 0) {
+ qca->tx_left = (pos - qca->tx_buffer) - written;
+ qca->tx_head = qca->tx_buffer + written;
+ n_stats->tx_bytes += written;
+ }
+ spin_unlock(&qca->lock);
+
+ netif_trans_update(dev);
+out:
+ dev_kfree_skb_any(skb);
+ return NETDEV_TX_OK;
+}
+
+static void qcauart_netdev_tx_timeout(struct net_device *dev)
+{
+ struct qcauart *qca = netdev_priv(dev);
+
+ netdev_info(qca->net_dev, "Transmit timeout at %ld, latency %ld\n",
+ jiffies, dev_trans_start(dev));
+ dev->stats.tx_errors++;
+ dev->stats.tx_dropped++;
+}
+
+static int qcauart_netdev_init(struct net_device *dev)
+{
+ struct qcauart *qca = netdev_priv(dev);
+ size_t len;
+
+ /* Finish setting up the device info. */
+ dev->mtu = QCAFRM_MAX_MTU;
+ dev->type = ARPHRD_ETHER;
+
+ len = QCAFRM_HEADER_LEN + QCAFRM_MAX_LEN + QCAFRM_FOOTER_LEN;
+ qca->tx_buffer = devm_kmalloc(&qca->serdev->dev, len, GFP_KERNEL);
+ if (!qca->tx_buffer)
+ return -ENOMEM;
+
+ qca->rx_skb = netdev_alloc_skb_ip_align(qca->net_dev,
+ qca->net_dev->mtu +
+ VLAN_ETH_HLEN);
+ if (!qca->rx_skb)
+ return -ENOBUFS;
+
+ return 0;
+}
+
+static void qcauart_netdev_uninit(struct net_device *dev)
+{
+ struct qcauart *qca = netdev_priv(dev);
+
+ if (qca->rx_skb)
+ dev_kfree_skb(qca->rx_skb);
+}
+
+static const struct net_device_ops qcauart_netdev_ops = {
+ .ndo_init = qcauart_netdev_init,
+ .ndo_uninit = qcauart_netdev_uninit,
+ .ndo_open = qcauart_netdev_open,
+ .ndo_stop = qcauart_netdev_close,
+ .ndo_start_xmit = qcauart_netdev_xmit,
+ .ndo_set_mac_address = eth_mac_addr,
+ .ndo_tx_timeout = qcauart_netdev_tx_timeout,
+ .ndo_validate_addr = eth_validate_addr,
+};
+
+static void qcauart_netdev_setup(struct net_device *dev)
+{
+ dev->netdev_ops = &qcauart_netdev_ops;
+ dev->watchdog_timeo = QCAUART_TX_TIMEOUT;
+ dev->priv_flags &= ~IFF_TX_SKB_SHARING;
+ dev->tx_queue_len = 100;
+
+ /* MTU range: 46 - 1500 */
+ dev->min_mtu = QCAFRM_MIN_MTU;
+ dev->max_mtu = QCAFRM_MAX_MTU;
+}
+
+static const struct of_device_id qca_uart_of_match[] = {
+ {
+ .compatible = "qca,qca7000",
+ },
+ {}
+};
+MODULE_DEVICE_TABLE(of, qca_uart_of_match);
+
+static int qca_uart_probe(struct serdev_device *serdev)
+{
+ struct net_device *qcauart_dev = alloc_etherdev(sizeof(struct qcauart));
+ struct qcauart *qca;
+ const char *mac;
+ u32 speed = 115200;
+ int ret;
+
+ if (!qcauart_dev)
+ return -ENOMEM;
+
+ qcauart_netdev_setup(qcauart_dev);
+ SET_NETDEV_DEV(qcauart_dev, &serdev->dev);
+
+ qca = netdev_priv(qcauart_dev);
+ if (!qca) {
+ pr_err("qca_uart: Fail to retrieve private structure\n");
+ ret = -ENOMEM;
+ goto free;
+ }
+ qca->net_dev = qcauart_dev;
+ qca->serdev = serdev;
+ qcafrm_fsm_init_uart(&qca->frm_handle);
+
+ spin_lock_init(&qca->lock);
+ INIT_WORK(&qca->tx_work, qcauart_transmit);
+
+ of_property_read_u32(serdev->dev.of_node, "current-speed", &speed);
+
+ mac = of_get_mac_address(serdev->dev.of_node);
+
+ if (mac)
+ ether_addr_copy(qca->net_dev->dev_addr, mac);
+
+ if (!is_valid_ether_addr(qca->net_dev->dev_addr)) {
+ eth_hw_addr_random(qca->net_dev);
+ dev_info(&serdev->dev, "Using random MAC address: %pM\n",
+ qca->net_dev->dev_addr);
+ }
+
+ netif_carrier_on(qca->net_dev);
+ serdev_device_set_drvdata(serdev, qca);
+ serdev_device_set_client_ops(serdev, &qca_serdev_ops);
+
+ ret = serdev_device_open(serdev);
+ if (ret) {
+ dev_err(&serdev->dev, "Unable to open device %s\n",
+ qcauart_dev->name);
+ goto free;
+ }
+
+ speed = serdev_device_set_baudrate(serdev, speed);
+ dev_info(&serdev->dev, "Using baudrate: %u\n", speed);
+
+ serdev_device_set_flow_control(serdev, false);
+
+ ret = register_netdev(qcauart_dev);
+ if (ret) {
+ dev_err(&serdev->dev, "Unable to register net device %s\n",
+ qcauart_dev->name);
+ serdev_device_close(serdev);
+ cancel_work_sync(&qca->tx_work);
+ goto free;
+ }
+
+ return 0;
+
+free:
+ free_netdev(qcauart_dev);
+ return ret;
+}
+
+static void qca_uart_remove(struct serdev_device *serdev)
+{
+ struct qcauart *qca = serdev_device_get_drvdata(serdev);
+
+ unregister_netdev(qca->net_dev);
+
+ /* Flush any pending characters in the driver. */
+ serdev_device_close(serdev);
+ cancel_work_sync(&qca->tx_work);
+
+ free_netdev(qca->net_dev);
+}
+
+static struct serdev_device_driver qca_uart_driver = {
+ .probe = qca_uart_probe,
+ .remove = qca_uart_remove,
+ .driver = {
+ .name = QCAUART_DRV_NAME,
+ .of_match_table = of_match_ptr(qca_uart_of_match),
+ },
+};
+
+module_serdev_device_driver(qca_uart_driver);
+
+MODULE_DESCRIPTION("Qualcomm Atheros QCA7000 UART Driver");
+MODULE_AUTHOR("Qualcomm Atheros Communications");
+MODULE_AUTHOR("Stefan Wahren <stefan.wahren@i2se.com>");
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_VERSION(QCAUART_DRV_VERSION);
diff --git a/drivers/net/ethernet/realtek/8139cp.c b/drivers/net/ethernet/realtek/8139cp.c
index 72233ab..e7ab23e 100644
--- a/drivers/net/ethernet/realtek/8139cp.c
+++ b/drivers/net/ethernet/realtek/8139cp.c
@@ -1410,14 +1410,13 @@
struct ethtool_link_ksettings *cmd)
{
struct cp_private *cp = netdev_priv(dev);
- int rc;
unsigned long flags;
spin_lock_irqsave(&cp->lock, flags);
- rc = mii_ethtool_get_link_ksettings(&cp->mii_if, cmd);
+ mii_ethtool_get_link_ksettings(&cp->mii_if, cmd);
spin_unlock_irqrestore(&cp->lock, flags);
- return rc;
+ return 0;
}
static int cp_set_link_ksettings(struct net_device *dev,
diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c
index 0a8f281..bd07a15 100644
--- a/drivers/net/ethernet/realtek/r8169.c
+++ b/drivers/net/ethernet/realtek/r8169.c
@@ -2148,7 +2148,9 @@
{
struct rtl8169_private *tp = netdev_priv(dev);
- return mii_ethtool_get_link_ksettings(&tp->mii, cmd);
+ mii_ethtool_get_link_ksettings(&tp->mii, cmd);
+
+ return 0;
}
static int rtl8169_get_link_ksettings(struct net_device *dev,
diff --git a/drivers/net/ethernet/renesas/ravb_main.c b/drivers/net/ethernet/renesas/ravb_main.c
index 784782d..5931e85 100644
--- a/drivers/net/ethernet/renesas/ravb_main.c
+++ b/drivers/net/ethernet/renesas/ravb_main.c
@@ -1076,16 +1076,16 @@
struct ethtool_link_ksettings *cmd)
{
struct ravb_private *priv = netdev_priv(ndev);
- int error = -ENODEV;
unsigned long flags;
- if (ndev->phydev) {
- spin_lock_irqsave(&priv->lock, flags);
- error = phy_ethtool_ksettings_get(ndev->phydev, cmd);
- spin_unlock_irqrestore(&priv->lock, flags);
- }
+ if (!ndev->phydev)
+ return -ENODEV;
- return error;
+ spin_lock_irqsave(&priv->lock, flags);
+ phy_ethtool_ksettings_get(ndev->phydev, cmd);
+ spin_unlock_irqrestore(&priv->lock, flags);
+
+ return 0;
}
static int ravb_set_link_ksettings(struct net_device *ndev,
diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c
index 2d686cc..d2dc0a8 100644
--- a/drivers/net/ethernet/renesas/sh_eth.c
+++ b/drivers/net/ethernet/renesas/sh_eth.c
@@ -1915,16 +1915,15 @@
{
struct sh_eth_private *mdp = netdev_priv(ndev);
unsigned long flags;
- int ret;
if (!ndev->phydev)
return -ENODEV;
spin_lock_irqsave(&mdp->lock, flags);
- ret = phy_ethtool_ksettings_get(ndev->phydev, cmd);
+ phy_ethtool_ksettings_get(ndev->phydev, cmd);
spin_unlock_irqrestore(&mdp->lock, flags);
- return ret;
+ return 0;
}
static int sh_eth_set_link_ksettings(struct net_device *ndev,
@@ -2558,6 +2557,17 @@
return phy_mii_ioctl(phydev, rq, cmd);
}
+static int sh_eth_change_mtu(struct net_device *ndev, int new_mtu)
+{
+ if (netif_running(ndev))
+ return -EBUSY;
+
+ ndev->mtu = new_mtu;
+ netdev_update_features(ndev);
+
+ return 0;
+}
+
/* For TSU_POSTn. Please refer to the manual about this (strange) bitfields */
static void *sh_eth_tsu_get_post_reg_offset(struct sh_eth_private *mdp,
int entry)
@@ -3029,6 +3039,7 @@
.ndo_set_rx_mode = sh_eth_set_rx_mode,
.ndo_tx_timeout = sh_eth_tx_timeout,
.ndo_do_ioctl = sh_eth_do_ioctl,
+ .ndo_change_mtu = sh_eth_change_mtu,
.ndo_validate_addr = eth_validate_addr,
.ndo_set_mac_address = eth_mac_addr,
};
@@ -3043,6 +3054,7 @@
.ndo_vlan_rx_kill_vid = sh_eth_vlan_rx_kill_vid,
.ndo_tx_timeout = sh_eth_tx_timeout,
.ndo_do_ioctl = sh_eth_do_ioctl,
+ .ndo_change_mtu = sh_eth_change_mtu,
.ndo_validate_addr = eth_validate_addr,
.ndo_set_mac_address = eth_mac_addr,
};
@@ -3171,6 +3183,13 @@
}
sh_eth_set_default_cpu_data(mdp->cd);
+ /* User's manual states max MTU should be 2048 but due to the
+ * alignment calculations in sh_eth_ring_init() the practical
+ * MTU is a bit less. Maybe this can be optimized some more.
+ */
+ ndev->max_mtu = 2000 - (ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN);
+ ndev->min_mtu = ETH_MIN_MTU;
+
/* set function */
if (mdp->cd->tsu)
ndev->netdev_ops = &sh_eth_netdev_ops_tsu;
diff --git a/drivers/net/ethernet/rocker/rocker.h b/drivers/net/ethernet/rocker/rocker.h
index ee9675d..748fb12 100644
--- a/drivers/net/ethernet/rocker/rocker.h
+++ b/drivers/net/ethernet/rocker/rocker.h
@@ -105,32 +105,27 @@
int (*port_open)(struct rocker_port *rocker_port);
void (*port_stop)(struct rocker_port *rocker_port);
int (*port_attr_stp_state_set)(struct rocker_port *rocker_port,
- u8 state,
- struct switchdev_trans *trans);
+ u8 state);
int (*port_attr_bridge_flags_set)(struct rocker_port *rocker_port,
unsigned long brport_flags,
struct switchdev_trans *trans);
int (*port_attr_bridge_flags_get)(const struct rocker_port *rocker_port,
unsigned long *p_brport_flags);
+ int (*port_attr_bridge_flags_support_get)(const struct rocker_port *
+ rocker_port,
+ unsigned long *
+ p_brport_flags);
int (*port_attr_bridge_ageing_time_set)(struct rocker_port *rocker_port,
u32 ageing_time,
struct switchdev_trans *trans);
int (*port_obj_vlan_add)(struct rocker_port *rocker_port,
- const struct switchdev_obj_port_vlan *vlan,
- struct switchdev_trans *trans);
+ const struct switchdev_obj_port_vlan *vlan);
int (*port_obj_vlan_del)(struct rocker_port *rocker_port,
const struct switchdev_obj_port_vlan *vlan);
- int (*port_obj_vlan_dump)(const struct rocker_port *rocker_port,
- struct switchdev_obj_port_vlan *vlan,
- switchdev_obj_dump_cb_t *cb);
int (*port_obj_fdb_add)(struct rocker_port *rocker_port,
- const struct switchdev_obj_port_fdb *fdb,
- struct switchdev_trans *trans);
+ u16 vid, const unsigned char *addr);
int (*port_obj_fdb_del)(struct rocker_port *rocker_port,
- const struct switchdev_obj_port_fdb *fdb);
- int (*port_obj_fdb_dump)(const struct rocker_port *rocker_port,
- struct switchdev_obj_port_fdb *fdb,
- switchdev_obj_dump_cb_t *cb);
+ u16 vid, const unsigned char *addr);
int (*port_master_linked)(struct rocker_port *rocker_port,
struct net_device *master);
int (*port_master_unlinked)(struct rocker_port *rocker_port,
diff --git a/drivers/net/ethernet/rocker/rocker_main.c b/drivers/net/ethernet/rocker/rocker_main.c
index bab1361..b1e5c07 100644
--- a/drivers/net/ethernet/rocker/rocker_main.c
+++ b/drivers/net/ethernet/rocker/rocker_main.c
@@ -1557,7 +1557,11 @@
if (!wops->port_attr_stp_state_set)
return -EOPNOTSUPP;
- return wops->port_attr_stp_state_set(rocker_port, state, trans);
+
+ if (switchdev_trans_ph_prepare(trans))
+ return 0;
+
+ return wops->port_attr_stp_state_set(rocker_port, state);
}
static int
@@ -1569,6 +1573,10 @@
if (!wops->port_attr_bridge_flags_set)
return -EOPNOTSUPP;
+
+ if (switchdev_trans_ph_prepare(trans))
+ return 0;
+
return wops->port_attr_bridge_flags_set(rocker_port, brport_flags,
trans);
}
@@ -1585,6 +1593,20 @@
}
static int
+rocker_world_port_attr_bridge_flags_support_get(const struct rocker_port *
+ rocker_port,
+ unsigned long *
+ p_brport_flags_support)
+{
+ struct rocker_world_ops *wops = rocker_port->rocker->wops;
+
+ if (!wops->port_attr_bridge_flags_support_get)
+ return -EOPNOTSUPP;
+ return wops->port_attr_bridge_flags_support_get(rocker_port,
+ p_brport_flags_support);
+}
+
+static int
rocker_world_port_attr_bridge_ageing_time_set(struct rocker_port *rocker_port,
u32 ageing_time,
struct switchdev_trans *trans)
@@ -1594,6 +1616,10 @@
if (!wops->port_attr_bridge_ageing_time_set)
return -EOPNOTSUPP;
+
+ if (switchdev_trans_ph_prepare(trans))
+ return 0;
+
return wops->port_attr_bridge_ageing_time_set(rocker_port, ageing_time,
trans);
}
@@ -1607,7 +1633,11 @@
if (!wops->port_obj_vlan_add)
return -EOPNOTSUPP;
- return wops->port_obj_vlan_add(rocker_port, vlan, trans);
+
+ if (switchdev_trans_ph_prepare(trans))
+ return 0;
+
+ return wops->port_obj_vlan_add(rocker_port, vlan);
}
static int
@@ -1622,50 +1652,26 @@
}
static int
-rocker_world_port_obj_vlan_dump(const struct rocker_port *rocker_port,
- struct switchdev_obj_port_vlan *vlan,
- switchdev_obj_dump_cb_t *cb)
-{
- struct rocker_world_ops *wops = rocker_port->rocker->wops;
-
- if (!wops->port_obj_vlan_dump)
- return -EOPNOTSUPP;
- return wops->port_obj_vlan_dump(rocker_port, vlan, cb);
-}
-
-static int
-rocker_world_port_obj_fdb_add(struct rocker_port *rocker_port,
- const struct switchdev_obj_port_fdb *fdb,
- struct switchdev_trans *trans)
+rocker_world_port_fdb_add(struct rocker_port *rocker_port,
+ struct switchdev_notifier_fdb_info *info)
{
struct rocker_world_ops *wops = rocker_port->rocker->wops;
if (!wops->port_obj_fdb_add)
return -EOPNOTSUPP;
- return wops->port_obj_fdb_add(rocker_port, fdb, trans);
+
+ return wops->port_obj_fdb_add(rocker_port, info->vid, info->addr);
}
static int
-rocker_world_port_obj_fdb_del(struct rocker_port *rocker_port,
- const struct switchdev_obj_port_fdb *fdb)
+rocker_world_port_fdb_del(struct rocker_port *rocker_port,
+ struct switchdev_notifier_fdb_info *info)
{
struct rocker_world_ops *wops = rocker_port->rocker->wops;
if (!wops->port_obj_fdb_del)
return -EOPNOTSUPP;
- return wops->port_obj_fdb_del(rocker_port, fdb);
-}
-
-static int
-rocker_world_port_obj_fdb_dump(const struct rocker_port *rocker_port,
- struct switchdev_obj_port_fdb *fdb,
- switchdev_obj_dump_cb_t *cb)
-{
- struct rocker_world_ops *wops = rocker_port->rocker->wops;
-
- if (!wops->port_obj_fdb_dump)
- return -EOPNOTSUPP;
- return wops->port_obj_fdb_dump(rocker_port, fdb, cb);
+ return wops->port_obj_fdb_del(rocker_port, info->vid, info->addr);
}
static int rocker_world_port_master_linked(struct rocker_port *rocker_port,
@@ -2022,12 +2028,6 @@
.ndo_start_xmit = rocker_port_xmit,
.ndo_set_mac_address = rocker_port_set_mac_address,
.ndo_change_mtu = rocker_port_change_mtu,
- .ndo_bridge_getlink = switchdev_port_bridge_getlink,
- .ndo_bridge_setlink = switchdev_port_bridge_setlink,
- .ndo_bridge_dellink = switchdev_port_bridge_dellink,
- .ndo_fdb_add = switchdev_port_fdb_add,
- .ndo_fdb_del = switchdev_port_fdb_del,
- .ndo_fdb_dump = switchdev_port_fdb_dump,
.ndo_get_phys_port_name = rocker_port_get_phys_port_name,
.ndo_change_proto_down = rocker_port_change_proto_down,
.ndo_neigh_destroy = rocker_port_neigh_destroy,
@@ -2053,6 +2053,10 @@
err = rocker_world_port_attr_bridge_flags_get(rocker_port,
&attr->u.brport_flags);
break;
+ case SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS_SUPPORT:
+ err = rocker_world_port_attr_bridge_flags_support_get(rocker_port,
+ &attr->u.brport_flags_support);
+ break;
default:
return -EOPNOTSUPP;
}
@@ -2104,11 +2108,6 @@
SWITCHDEV_OBJ_PORT_VLAN(obj),
trans);
break;
- case SWITCHDEV_OBJ_ID_PORT_FDB:
- err = rocker_world_port_obj_fdb_add(rocker_port,
- SWITCHDEV_OBJ_PORT_FDB(obj),
- trans);
- break;
default:
err = -EOPNOTSUPP;
break;
@@ -2128,36 +2127,6 @@
err = rocker_world_port_obj_vlan_del(rocker_port,
SWITCHDEV_OBJ_PORT_VLAN(obj));
break;
- case SWITCHDEV_OBJ_ID_PORT_FDB:
- err = rocker_world_port_obj_fdb_del(rocker_port,
- SWITCHDEV_OBJ_PORT_FDB(obj));
- break;
- default:
- err = -EOPNOTSUPP;
- break;
- }
-
- return err;
-}
-
-static int rocker_port_obj_dump(struct net_device *dev,
- struct switchdev_obj *obj,
- switchdev_obj_dump_cb_t *cb)
-{
- const struct rocker_port *rocker_port = netdev_priv(dev);
- int err = 0;
-
- switch (obj->id) {
- case SWITCHDEV_OBJ_ID_PORT_FDB:
- err = rocker_world_port_obj_fdb_dump(rocker_port,
- SWITCHDEV_OBJ_PORT_FDB(obj),
- cb);
- break;
- case SWITCHDEV_OBJ_ID_PORT_VLAN:
- err = rocker_world_port_obj_vlan_dump(rocker_port,
- SWITCHDEV_OBJ_PORT_VLAN(obj),
- cb);
- break;
default:
err = -EOPNOTSUPP;
break;
@@ -2171,7 +2140,6 @@
.switchdev_port_attr_set = rocker_port_attr_set,
.switchdev_port_obj_add = rocker_port_obj_add,
.switchdev_port_obj_del = rocker_port_obj_del,
- .switchdev_port_obj_dump = rocker_port_obj_dump,
};
struct rocker_fib_event_work {
@@ -2729,6 +2697,109 @@
kfree(rocker->msix_entries);
}
+static bool rocker_port_dev_check(const struct net_device *dev)
+{
+ return dev->netdev_ops == &rocker_port_netdev_ops;
+}
+
+struct rocker_switchdev_event_work {
+ struct work_struct work;
+ struct switchdev_notifier_fdb_info fdb_info;
+ struct rocker_port *rocker_port;
+ unsigned long event;
+};
+
+static void
+rocker_fdb_offload_notify(struct rocker_port *rocker_port,
+ struct switchdev_notifier_fdb_info *recv_info)
+{
+ struct switchdev_notifier_fdb_info info;
+
+ info.addr = recv_info->addr;
+ info.vid = recv_info->vid;
+ call_switchdev_notifiers(SWITCHDEV_FDB_OFFLOADED,
+ rocker_port->dev, &info.info);
+}
+
+static void rocker_switchdev_event_work(struct work_struct *work)
+{
+ struct rocker_switchdev_event_work *switchdev_work =
+ container_of(work, struct rocker_switchdev_event_work, work);
+ struct rocker_port *rocker_port = switchdev_work->rocker_port;
+ struct switchdev_notifier_fdb_info *fdb_info;
+ int err;
+
+ rtnl_lock();
+ switch (switchdev_work->event) {
+ case SWITCHDEV_FDB_ADD_TO_DEVICE:
+ fdb_info = &switchdev_work->fdb_info;
+ err = rocker_world_port_fdb_add(rocker_port, fdb_info);
+ if (err) {
+ netdev_dbg(rocker_port->dev, "fdb add failed err=%d\n", err);
+ break;
+ }
+ rocker_fdb_offload_notify(rocker_port, fdb_info);
+ break;
+ case SWITCHDEV_FDB_DEL_TO_DEVICE:
+ fdb_info = &switchdev_work->fdb_info;
+ err = rocker_world_port_fdb_del(rocker_port, fdb_info);
+ if (err)
+ netdev_dbg(rocker_port->dev, "fdb add failed err=%d\n", err);
+ break;
+ }
+ rtnl_unlock();
+
+ kfree(switchdev_work->fdb_info.addr);
+ kfree(switchdev_work);
+ dev_put(rocker_port->dev);
+}
+
+/* called under rcu_read_lock() */
+static int rocker_switchdev_event(struct notifier_block *unused,
+ unsigned long event, void *ptr)
+{
+ struct net_device *dev = switchdev_notifier_info_to_dev(ptr);
+ struct rocker_switchdev_event_work *switchdev_work;
+ struct switchdev_notifier_fdb_info *fdb_info = ptr;
+ struct rocker_port *rocker_port;
+
+ if (!rocker_port_dev_check(dev))
+ return NOTIFY_DONE;
+
+ rocker_port = netdev_priv(dev);
+ switchdev_work = kzalloc(sizeof(*switchdev_work), GFP_ATOMIC);
+ if (WARN_ON(!switchdev_work))
+ return NOTIFY_BAD;
+
+ INIT_WORK(&switchdev_work->work, rocker_switchdev_event_work);
+ switchdev_work->rocker_port = rocker_port;
+ switchdev_work->event = event;
+
+ switch (event) {
+ case SWITCHDEV_FDB_ADD_TO_DEVICE: /* fall through */
+ case SWITCHDEV_FDB_DEL_TO_DEVICE:
+ memcpy(&switchdev_work->fdb_info, ptr,
+ sizeof(switchdev_work->fdb_info));
+ switchdev_work->fdb_info.addr = kzalloc(ETH_ALEN, GFP_ATOMIC);
+ ether_addr_copy((u8 *)switchdev_work->fdb_info.addr,
+ fdb_info->addr);
+ /* Take a reference on the rocker device */
+ dev_hold(dev);
+ break;
+ default:
+ kfree(switchdev_work);
+ return NOTIFY_DONE;
+ }
+
+ queue_work(rocker_port->rocker->rocker_owq,
+ &switchdev_work->work);
+ return NOTIFY_DONE;
+}
+
+static struct notifier_block rocker_switchdev_notifier = {
+ .notifier_call = rocker_switchdev_event,
+};
+
static int rocker_probe(struct pci_dev *pdev, const struct pci_device_id *id)
{
struct rocker *rocker;
@@ -2834,6 +2905,12 @@
if (err)
goto err_register_fib_notifier;
+ err = register_switchdev_notifier(&rocker_switchdev_notifier);
+ if (err) {
+ dev_err(&pdev->dev, "Failed to register switchdev notifier\n");
+ goto err_register_switchdev_notifier;
+ }
+
rocker->hw.id = rocker_read64(rocker, SWITCH_ID);
err = rocker_probe_ports(rocker);
@@ -2848,6 +2925,8 @@
return 0;
err_probe_ports:
+ unregister_switchdev_notifier(&rocker_switchdev_notifier);
+err_register_switchdev_notifier:
unregister_fib_notifier(&rocker->fib_nb);
err_register_fib_notifier:
destroy_workqueue(rocker->rocker_owq);
@@ -2878,6 +2957,7 @@
struct rocker *rocker = pci_get_drvdata(pdev);
rocker_remove_ports(rocker);
+ unregister_switchdev_notifier(&rocker_switchdev_notifier);
unregister_fib_notifier(&rocker->fib_nb);
rocker_write32(rocker, CONTROL, ROCKER_CONTROL_RESET);
destroy_workqueue(rocker->rocker_owq);
@@ -2902,11 +2982,6 @@
* Net device notifier event handler
************************************/
-static bool rocker_port_dev_check(const struct net_device *dev)
-{
- return dev->netdev_ops == &rocker_port_netdev_ops;
-}
-
static bool rocker_port_dev_check_under(const struct net_device *dev,
struct rocker *rocker)
{
diff --git a/drivers/net/ethernet/rocker/rocker_ofdpa.c b/drivers/net/ethernet/rocker/rocker_ofdpa.c
index 2ae8524..bd0e3f1 100644
--- a/drivers/net/ethernet/rocker/rocker_ofdpa.c
+++ b/drivers/net/ethernet/rocker/rocker_ofdpa.c
@@ -300,64 +300,6 @@
return flags & OFDPA_OP_FLAG_NOWAIT;
}
-static void *__ofdpa_mem_alloc(struct switchdev_trans *trans, int flags,
- size_t size)
-{
- struct switchdev_trans_item *elem = NULL;
- gfp_t gfp_flags = (flags & OFDPA_OP_FLAG_NOWAIT) ?
- GFP_ATOMIC : GFP_KERNEL;
-
- /* If in transaction prepare phase, allocate the memory
- * and enqueue it on a transaction. If in transaction
- * commit phase, dequeue the memory from the transaction
- * rather than re-allocating the memory. The idea is the
- * driver code paths for prepare and commit are identical
- * so the memory allocated in the prepare phase is the
- * memory used in the commit phase.
- */
-
- if (!trans) {
- elem = kzalloc(size + sizeof(*elem), gfp_flags);
- } else if (switchdev_trans_ph_prepare(trans)) {
- elem = kzalloc(size + sizeof(*elem), gfp_flags);
- if (!elem)
- return NULL;
- switchdev_trans_item_enqueue(trans, elem, kfree, elem);
- } else {
- elem = switchdev_trans_item_dequeue(trans);
- }
-
- return elem ? elem + 1 : NULL;
-}
-
-static void *ofdpa_kzalloc(struct switchdev_trans *trans, int flags,
- size_t size)
-{
- return __ofdpa_mem_alloc(trans, flags, size);
-}
-
-static void *ofdpa_kcalloc(struct switchdev_trans *trans, int flags,
- size_t n, size_t size)
-{
- return __ofdpa_mem_alloc(trans, flags, n * size);
-}
-
-static void ofdpa_kfree(struct switchdev_trans *trans, const void *mem)
-{
- struct switchdev_trans_item *elem;
-
- /* Frees are ignored if in transaction prepare phase. The
- * memory remains on the per-port list until freed in the
- * commit phase.
- */
-
- if (switchdev_trans_ph_prepare(trans))
- return;
-
- elem = (struct switchdev_trans_item *) mem - 1;
- kfree(elem);
-}
-
/*************************************************************
* Flow, group, FDB, internal VLAN and neigh command prepares
*************************************************************/
@@ -815,8 +757,7 @@
}
static int ofdpa_flow_tbl_add(struct ofdpa_port *ofdpa_port,
- struct switchdev_trans *trans, int flags,
- struct ofdpa_flow_tbl_entry *match)
+ int flags, struct ofdpa_flow_tbl_entry *match)
{
struct ofdpa *ofdpa = ofdpa_port->ofdpa;
struct ofdpa_flow_tbl_entry *found;
@@ -831,9 +772,8 @@
if (found) {
match->cookie = found->cookie;
- if (!switchdev_trans_ph_prepare(trans))
- hash_del(&found->entry);
- ofdpa_kfree(trans, found);
+ hash_del(&found->entry);
+ kfree(found);
found = match;
found->cmd = ROCKER_TLV_CMD_TYPE_OF_DPA_FLOW_MOD;
} else {
@@ -842,22 +782,18 @@
found->cmd = ROCKER_TLV_CMD_TYPE_OF_DPA_FLOW_ADD;
}
- if (!switchdev_trans_ph_prepare(trans))
- hash_add(ofdpa->flow_tbl, &found->entry, found->key_crc32);
-
+ hash_add(ofdpa->flow_tbl, &found->entry, found->key_crc32);
spin_unlock_irqrestore(&ofdpa->flow_tbl_lock, lock_flags);
- if (!switchdev_trans_ph_prepare(trans))
- return rocker_cmd_exec(ofdpa_port->rocker_port,
- ofdpa_flags_nowait(flags),
- ofdpa_cmd_flow_tbl_add,
- found, NULL, NULL);
+ return rocker_cmd_exec(ofdpa_port->rocker_port,
+ ofdpa_flags_nowait(flags),
+ ofdpa_cmd_flow_tbl_add,
+ found, NULL, NULL);
return 0;
}
static int ofdpa_flow_tbl_del(struct ofdpa_port *ofdpa_port,
- struct switchdev_trans *trans, int flags,
- struct ofdpa_flow_tbl_entry *match)
+ int flags, struct ofdpa_flow_tbl_entry *match)
{
struct ofdpa *ofdpa = ofdpa_port->ofdpa;
struct ofdpa_flow_tbl_entry *found;
@@ -872,45 +808,41 @@
found = ofdpa_flow_tbl_find(ofdpa, match);
if (found) {
- if (!switchdev_trans_ph_prepare(trans))
- hash_del(&found->entry);
+ hash_del(&found->entry);
found->cmd = ROCKER_TLV_CMD_TYPE_OF_DPA_FLOW_DEL;
}
spin_unlock_irqrestore(&ofdpa->flow_tbl_lock, lock_flags);
- ofdpa_kfree(trans, match);
+ kfree(match);
if (found) {
- if (!switchdev_trans_ph_prepare(trans))
- err = rocker_cmd_exec(ofdpa_port->rocker_port,
- ofdpa_flags_nowait(flags),
- ofdpa_cmd_flow_tbl_del,
- found, NULL, NULL);
- ofdpa_kfree(trans, found);
+ err = rocker_cmd_exec(ofdpa_port->rocker_port,
+ ofdpa_flags_nowait(flags),
+ ofdpa_cmd_flow_tbl_del,
+ found, NULL, NULL);
+ kfree(found);
}
return err;
}
-static int ofdpa_flow_tbl_do(struct ofdpa_port *ofdpa_port,
- struct switchdev_trans *trans, int flags,
+static int ofdpa_flow_tbl_do(struct ofdpa_port *ofdpa_port, int flags,
struct ofdpa_flow_tbl_entry *entry)
{
if (flags & OFDPA_OP_FLAG_REMOVE)
- return ofdpa_flow_tbl_del(ofdpa_port, trans, flags, entry);
+ return ofdpa_flow_tbl_del(ofdpa_port, flags, entry);
else
- return ofdpa_flow_tbl_add(ofdpa_port, trans, flags, entry);
+ return ofdpa_flow_tbl_add(ofdpa_port, flags, entry);
}
-static int ofdpa_flow_tbl_ig_port(struct ofdpa_port *ofdpa_port,
- struct switchdev_trans *trans, int flags,
+static int ofdpa_flow_tbl_ig_port(struct ofdpa_port *ofdpa_port, int flags,
u32 in_pport, u32 in_pport_mask,
enum rocker_of_dpa_table_id goto_tbl)
{
struct ofdpa_flow_tbl_entry *entry;
- entry = ofdpa_kzalloc(trans, flags, sizeof(*entry));
+ entry = kzalloc(sizeof(*entry), GFP_KERNEL);
if (!entry)
return -ENOMEM;
@@ -920,11 +852,11 @@
entry->key.ig_port.in_pport_mask = in_pport_mask;
entry->key.ig_port.goto_tbl = goto_tbl;
- return ofdpa_flow_tbl_do(ofdpa_port, trans, flags, entry);
+ return ofdpa_flow_tbl_do(ofdpa_port, flags, entry);
}
static int ofdpa_flow_tbl_vlan(struct ofdpa_port *ofdpa_port,
- struct switchdev_trans *trans, int flags,
+ int flags,
u32 in_pport, __be16 vlan_id,
__be16 vlan_id_mask,
enum rocker_of_dpa_table_id goto_tbl,
@@ -932,7 +864,7 @@
{
struct ofdpa_flow_tbl_entry *entry;
- entry = ofdpa_kzalloc(trans, flags, sizeof(*entry));
+ entry = kzalloc(sizeof(*entry), GFP_KERNEL);
if (!entry)
return -ENOMEM;
@@ -946,11 +878,10 @@
entry->key.vlan.untagged = untagged;
entry->key.vlan.new_vlan_id = new_vlan_id;
- return ofdpa_flow_tbl_do(ofdpa_port, trans, flags, entry);
+ return ofdpa_flow_tbl_do(ofdpa_port, flags, entry);
}
static int ofdpa_flow_tbl_term_mac(struct ofdpa_port *ofdpa_port,
- struct switchdev_trans *trans,
u32 in_pport, u32 in_pport_mask,
__be16 eth_type, const u8 *eth_dst,
const u8 *eth_dst_mask, __be16 vlan_id,
@@ -959,7 +890,7 @@
{
struct ofdpa_flow_tbl_entry *entry;
- entry = ofdpa_kzalloc(trans, flags, sizeof(*entry));
+ entry = kzalloc(sizeof(*entry), GFP_KERNEL);
if (!entry)
return -ENOMEM;
@@ -983,13 +914,13 @@
entry->key.term_mac.vlan_id_mask = vlan_id_mask;
entry->key.term_mac.copy_to_cpu = copy_to_cpu;
- return ofdpa_flow_tbl_do(ofdpa_port, trans, flags, entry);
+ return ofdpa_flow_tbl_do(ofdpa_port, flags, entry);
}
static int ofdpa_flow_tbl_bridge(struct ofdpa_port *ofdpa_port,
- struct switchdev_trans *trans, int flags,
- const u8 *eth_dst, const u8 *eth_dst_mask,
- __be16 vlan_id, u32 tunnel_id,
+ int flags, const u8 *eth_dst,
+ const u8 *eth_dst_mask, __be16 vlan_id,
+ u32 tunnel_id,
enum rocker_of_dpa_table_id goto_tbl,
u32 group_id, bool copy_to_cpu)
{
@@ -999,7 +930,7 @@
bool dflt = !eth_dst || (eth_dst && eth_dst_mask);
bool wild = false;
- entry = ofdpa_kzalloc(trans, flags, sizeof(*entry));
+ entry = kzalloc(sizeof(*entry), GFP_ATOMIC);
if (!entry)
return -ENOMEM;
@@ -1037,11 +968,10 @@
entry->key.bridge.group_id = group_id;
entry->key.bridge.copy_to_cpu = copy_to_cpu;
- return ofdpa_flow_tbl_do(ofdpa_port, trans, flags, entry);
+ return ofdpa_flow_tbl_do(ofdpa_port, flags, entry);
}
static int ofdpa_flow_tbl_ucast4_routing(struct ofdpa_port *ofdpa_port,
- struct switchdev_trans *trans,
__be16 eth_type, __be32 dst,
__be32 dst_mask, u32 priority,
enum rocker_of_dpa_table_id goto_tbl,
@@ -1050,7 +980,7 @@
{
struct ofdpa_flow_tbl_entry *entry;
- entry = ofdpa_kzalloc(trans, flags, sizeof(*entry));
+ entry = kzalloc(sizeof(*entry), GFP_KERNEL);
if (!entry)
return -ENOMEM;
@@ -1065,11 +995,10 @@
ucast_routing.group_id);
entry->fi = fi;
- return ofdpa_flow_tbl_do(ofdpa_port, trans, flags, entry);
+ return ofdpa_flow_tbl_do(ofdpa_port, flags, entry);
}
-static int ofdpa_flow_tbl_acl(struct ofdpa_port *ofdpa_port,
- struct switchdev_trans *trans, int flags,
+static int ofdpa_flow_tbl_acl(struct ofdpa_port *ofdpa_port, int flags,
u32 in_pport, u32 in_pport_mask,
const u8 *eth_src, const u8 *eth_src_mask,
const u8 *eth_dst, const u8 *eth_dst_mask,
@@ -1081,7 +1010,7 @@
u32 priority;
struct ofdpa_flow_tbl_entry *entry;
- entry = ofdpa_kzalloc(trans, flags, sizeof(*entry));
+ entry = kzalloc(sizeof(*entry), GFP_KERNEL);
if (!entry)
return -ENOMEM;
@@ -1116,7 +1045,7 @@
entry->key.acl.ip_tos_mask = ip_tos_mask;
entry->key.acl.group_id = group_id;
- return ofdpa_flow_tbl_do(ofdpa_port, trans, flags, entry);
+ return ofdpa_flow_tbl_do(ofdpa_port, flags, entry);
}
static struct ofdpa_group_tbl_entry *
@@ -1134,22 +1063,20 @@
return NULL;
}
-static void ofdpa_group_tbl_entry_free(struct switchdev_trans *trans,
- struct ofdpa_group_tbl_entry *entry)
+static void ofdpa_group_tbl_entry_free(struct ofdpa_group_tbl_entry *entry)
{
switch (ROCKER_GROUP_TYPE_GET(entry->group_id)) {
case ROCKER_OF_DPA_GROUP_TYPE_L2_FLOOD:
case ROCKER_OF_DPA_GROUP_TYPE_L2_MCAST:
- ofdpa_kfree(trans, entry->group_ids);
+ kfree(entry->group_ids);
break;
default:
break;
}
- ofdpa_kfree(trans, entry);
+ kfree(entry);
}
-static int ofdpa_group_tbl_add(struct ofdpa_port *ofdpa_port,
- struct switchdev_trans *trans, int flags,
+static int ofdpa_group_tbl_add(struct ofdpa_port *ofdpa_port, int flags,
struct ofdpa_group_tbl_entry *match)
{
struct ofdpa *ofdpa = ofdpa_port->ofdpa;
@@ -1161,9 +1088,8 @@
found = ofdpa_group_tbl_find(ofdpa, match);
if (found) {
- if (!switchdev_trans_ph_prepare(trans))
- hash_del(&found->entry);
- ofdpa_group_tbl_entry_free(trans, found);
+ hash_del(&found->entry);
+ ofdpa_group_tbl_entry_free(found);
found = match;
found->cmd = ROCKER_TLV_CMD_TYPE_OF_DPA_GROUP_MOD;
} else {
@@ -1171,21 +1097,17 @@
found->cmd = ROCKER_TLV_CMD_TYPE_OF_DPA_GROUP_ADD;
}
- if (!switchdev_trans_ph_prepare(trans))
- hash_add(ofdpa->group_tbl, &found->entry, found->group_id);
+ hash_add(ofdpa->group_tbl, &found->entry, found->group_id);
spin_unlock_irqrestore(&ofdpa->group_tbl_lock, lock_flags);
- if (!switchdev_trans_ph_prepare(trans))
- return rocker_cmd_exec(ofdpa_port->rocker_port,
- ofdpa_flags_nowait(flags),
- ofdpa_cmd_group_tbl_add,
- found, NULL, NULL);
- return 0;
+ return rocker_cmd_exec(ofdpa_port->rocker_port,
+ ofdpa_flags_nowait(flags),
+ ofdpa_cmd_group_tbl_add,
+ found, NULL, NULL);
}
-static int ofdpa_group_tbl_del(struct ofdpa_port *ofdpa_port,
- struct switchdev_trans *trans, int flags,
+static int ofdpa_group_tbl_del(struct ofdpa_port *ofdpa_port, int flags,
struct ofdpa_group_tbl_entry *match)
{
struct ofdpa *ofdpa = ofdpa_port->ofdpa;
@@ -1198,97 +1120,90 @@
found = ofdpa_group_tbl_find(ofdpa, match);
if (found) {
- if (!switchdev_trans_ph_prepare(trans))
- hash_del(&found->entry);
+ hash_del(&found->entry);
found->cmd = ROCKER_TLV_CMD_TYPE_OF_DPA_GROUP_DEL;
}
spin_unlock_irqrestore(&ofdpa->group_tbl_lock, lock_flags);
- ofdpa_group_tbl_entry_free(trans, match);
+ ofdpa_group_tbl_entry_free(match);
if (found) {
- if (!switchdev_trans_ph_prepare(trans))
- err = rocker_cmd_exec(ofdpa_port->rocker_port,
- ofdpa_flags_nowait(flags),
- ofdpa_cmd_group_tbl_del,
- found, NULL, NULL);
- ofdpa_group_tbl_entry_free(trans, found);
+ err = rocker_cmd_exec(ofdpa_port->rocker_port,
+ ofdpa_flags_nowait(flags),
+ ofdpa_cmd_group_tbl_del,
+ found, NULL, NULL);
+ ofdpa_group_tbl_entry_free(found);
}
return err;
}
-static int ofdpa_group_tbl_do(struct ofdpa_port *ofdpa_port,
- struct switchdev_trans *trans, int flags,
+static int ofdpa_group_tbl_do(struct ofdpa_port *ofdpa_port, int flags,
struct ofdpa_group_tbl_entry *entry)
{
if (flags & OFDPA_OP_FLAG_REMOVE)
- return ofdpa_group_tbl_del(ofdpa_port, trans, flags, entry);
+ return ofdpa_group_tbl_del(ofdpa_port, flags, entry);
else
- return ofdpa_group_tbl_add(ofdpa_port, trans, flags, entry);
+ return ofdpa_group_tbl_add(ofdpa_port, flags, entry);
}
static int ofdpa_group_l2_interface(struct ofdpa_port *ofdpa_port,
- struct switchdev_trans *trans, int flags,
- __be16 vlan_id, u32 out_pport,
- int pop_vlan)
+ int flags, __be16 vlan_id,
+ u32 out_pport, int pop_vlan)
{
struct ofdpa_group_tbl_entry *entry;
- entry = ofdpa_kzalloc(trans, flags, sizeof(*entry));
+ entry = kzalloc(sizeof(*entry), GFP_KERNEL);
if (!entry)
return -ENOMEM;
entry->group_id = ROCKER_GROUP_L2_INTERFACE(vlan_id, out_pport);
entry->l2_interface.pop_vlan = pop_vlan;
- return ofdpa_group_tbl_do(ofdpa_port, trans, flags, entry);
+ return ofdpa_group_tbl_do(ofdpa_port, flags, entry);
}
static int ofdpa_group_l2_fan_out(struct ofdpa_port *ofdpa_port,
- struct switchdev_trans *trans,
int flags, u8 group_count,
const u32 *group_ids, u32 group_id)
{
struct ofdpa_group_tbl_entry *entry;
- entry = ofdpa_kzalloc(trans, flags, sizeof(*entry));
+ entry = kzalloc(sizeof(*entry), GFP_KERNEL);
if (!entry)
return -ENOMEM;
entry->group_id = group_id;
entry->group_count = group_count;
- entry->group_ids = ofdpa_kcalloc(trans, flags,
- group_count, sizeof(u32));
+ entry->group_ids = kcalloc(flags, group_count, sizeof(u32));
if (!entry->group_ids) {
- ofdpa_kfree(trans, entry);
+ kfree(entry);
return -ENOMEM;
}
memcpy(entry->group_ids, group_ids, group_count * sizeof(u32));
- return ofdpa_group_tbl_do(ofdpa_port, trans, flags, entry);
+ return ofdpa_group_tbl_do(ofdpa_port, flags, entry);
}
static int ofdpa_group_l2_flood(struct ofdpa_port *ofdpa_port,
- struct switchdev_trans *trans, int flags,
- __be16 vlan_id, u8 group_count,
- const u32 *group_ids, u32 group_id)
+ int flags, __be16 vlan_id,
+ u8 group_count, const u32 *group_ids,
+ u32 group_id)
{
- return ofdpa_group_l2_fan_out(ofdpa_port, trans, flags,
+ return ofdpa_group_l2_fan_out(ofdpa_port, flags,
group_count, group_ids,
group_id);
}
-static int ofdpa_group_l3_unicast(struct ofdpa_port *ofdpa_port,
- struct switchdev_trans *trans, int flags,
+static int ofdpa_group_l3_unicast(struct ofdpa_port *ofdpa_port, int flags,
u32 index, const u8 *src_mac, const u8 *dst_mac,
__be16 vlan_id, bool ttl_check, u32 pport)
{
struct ofdpa_group_tbl_entry *entry;
- entry = ofdpa_kzalloc(trans, flags, sizeof(*entry));
+ entry = kzalloc(sizeof(*entry), GFP_KERNEL);
if (!entry)
return -ENOMEM;
@@ -1301,7 +1216,7 @@
entry->l3_unicast.ttl_check = ttl_check;
entry->l3_unicast.group_id = ROCKER_GROUP_L2_INTERFACE(vlan_id, pport);
- return ofdpa_group_tbl_do(ofdpa_port, trans, flags, entry);
+ return ofdpa_group_tbl_do(ofdpa_port, flags, entry);
}
static struct ofdpa_neigh_tbl_entry *
@@ -1318,43 +1233,34 @@
}
static void ofdpa_neigh_add(struct ofdpa *ofdpa,
- struct switchdev_trans *trans,
struct ofdpa_neigh_tbl_entry *entry)
{
- if (!switchdev_trans_ph_commit(trans))
- entry->index = ofdpa->neigh_tbl_next_index++;
- if (switchdev_trans_ph_prepare(trans))
- return;
+ entry->index = ofdpa->neigh_tbl_next_index++;
entry->ref_count++;
hash_add(ofdpa->neigh_tbl, &entry->entry,
be32_to_cpu(entry->ip_addr));
}
-static void ofdpa_neigh_del(struct switchdev_trans *trans,
- struct ofdpa_neigh_tbl_entry *entry)
+static void ofdpa_neigh_del(struct ofdpa_neigh_tbl_entry *entry)
{
- if (switchdev_trans_ph_prepare(trans))
- return;
if (--entry->ref_count == 0) {
hash_del(&entry->entry);
- ofdpa_kfree(trans, entry);
+ kfree(entry);
}
}
static void ofdpa_neigh_update(struct ofdpa_neigh_tbl_entry *entry,
- struct switchdev_trans *trans,
const u8 *eth_dst, bool ttl_check)
{
if (eth_dst) {
ether_addr_copy(entry->eth_dst, eth_dst);
entry->ttl_check = ttl_check;
- } else if (!switchdev_trans_ph_prepare(trans)) {
+ } else {
entry->ref_count++;
}
}
static int ofdpa_port_ipv4_neigh(struct ofdpa_port *ofdpa_port,
- struct switchdev_trans *trans,
int flags, __be32 ip_addr, const u8 *eth_dst)
{
struct ofdpa *ofdpa = ofdpa_port->ofdpa;
@@ -1371,7 +1277,7 @@
bool removing;
int err = 0;
- entry = ofdpa_kzalloc(trans, flags, sizeof(*entry));
+ entry = kzalloc(sizeof(*entry), GFP_KERNEL);
if (!entry)
return -ENOMEM;
@@ -1388,12 +1294,12 @@
entry->dev = ofdpa_port->dev;
ether_addr_copy(entry->eth_dst, eth_dst);
entry->ttl_check = true;
- ofdpa_neigh_add(ofdpa, trans, entry);
+ ofdpa_neigh_add(ofdpa, entry);
} else if (removing) {
memcpy(entry, found, sizeof(*entry));
- ofdpa_neigh_del(trans, found);
+ ofdpa_neigh_del(found);
} else if (updating) {
- ofdpa_neigh_update(found, trans, eth_dst, true);
+ ofdpa_neigh_update(found, eth_dst, true);
memcpy(entry, found, sizeof(*entry));
} else {
err = -ENOENT;
@@ -1410,7 +1316,7 @@
* other routes' nexthops.
*/
- err = ofdpa_group_l3_unicast(ofdpa_port, trans, flags,
+ err = ofdpa_group_l3_unicast(ofdpa_port, flags,
entry->index,
ofdpa_port->dev->dev_addr,
entry->eth_dst,
@@ -1425,7 +1331,7 @@
if (adding || removing) {
group_id = ROCKER_GROUP_L3_UNICAST(entry->index);
- err = ofdpa_flow_tbl_ucast4_routing(ofdpa_port, trans,
+ err = ofdpa_flow_tbl_ucast4_routing(ofdpa_port,
eth_type, ip_addr,
inet_make_mask(32),
priority, goto_tbl,
@@ -1438,13 +1344,12 @@
err_out:
if (!adding)
- ofdpa_kfree(trans, entry);
+ kfree(entry);
return err;
}
static int ofdpa_port_ipv4_resolve(struct ofdpa_port *ofdpa_port,
- struct switchdev_trans *trans,
__be32 ip_addr)
{
struct net_device *dev = ofdpa_port->dev;
@@ -1463,7 +1368,7 @@
*/
if (n->nud_state & NUD_VALID)
- err = ofdpa_port_ipv4_neigh(ofdpa_port, trans, 0,
+ err = ofdpa_port_ipv4_neigh(ofdpa_port, 0,
ip_addr, n->ha);
else
neigh_event_send(n, NULL);
@@ -1473,8 +1378,7 @@
}
static int ofdpa_port_ipv4_nh(struct ofdpa_port *ofdpa_port,
- struct switchdev_trans *trans, int flags,
- __be32 ip_addr, u32 *index)
+ int flags, __be32 ip_addr, u32 *index)
{
struct ofdpa *ofdpa = ofdpa_port->ofdpa;
struct ofdpa_neigh_tbl_entry *entry;
@@ -1486,7 +1390,7 @@
bool resolved = true;
int err = 0;
- entry = ofdpa_kzalloc(trans, flags, sizeof(*entry));
+ entry = kzalloc(sizeof(*entry), GFP_KERNEL);
if (!entry)
return -ENOMEM;
@@ -1501,14 +1405,14 @@
if (adding) {
entry->ip_addr = ip_addr;
entry->dev = ofdpa_port->dev;
- ofdpa_neigh_add(ofdpa, trans, entry);
+ ofdpa_neigh_add(ofdpa, entry);
*index = entry->index;
resolved = false;
} else if (removing) {
- ofdpa_neigh_del(trans, found);
+ ofdpa_neigh_del(found);
*index = found->index;
} else if (updating) {
- ofdpa_neigh_update(found, trans, NULL, false);
+ ofdpa_neigh_update(found, NULL, false);
resolved = !is_zero_ether_addr(found->eth_dst);
*index = found->index;
} else {
@@ -1518,7 +1422,7 @@
spin_unlock_irqrestore(&ofdpa->neigh_tbl_lock, lock_flags);
if (!adding)
- ofdpa_kfree(trans, entry);
+ kfree(entry);
if (err)
return err;
@@ -1526,7 +1430,7 @@
/* Resolved means neigh ip_addr is resolved to neigh mac. */
if (!resolved)
- err = ofdpa_port_ipv4_resolve(ofdpa_port, trans, ip_addr);
+ err = ofdpa_port_ipv4_resolve(ofdpa_port, ip_addr);
return err;
}
@@ -1541,7 +1445,6 @@
}
static int ofdpa_port_vlan_flood_group(struct ofdpa_port *ofdpa_port,
- struct switchdev_trans *trans,
int flags, __be16 vlan_id)
{
struct ofdpa_port *p;
@@ -1553,7 +1456,7 @@
int err = 0;
int i;
- group_ids = ofdpa_kcalloc(trans, flags, port_count, sizeof(u32));
+ group_ids = kcalloc(flags, port_count, sizeof(u32));
if (!group_ids)
return -ENOMEM;
@@ -1578,18 +1481,17 @@
if (group_count == 0)
goto no_ports_in_vlan;
- err = ofdpa_group_l2_flood(ofdpa_port, trans, flags, vlan_id,
+ err = ofdpa_group_l2_flood(ofdpa_port, flags, vlan_id,
group_count, group_ids, group_id);
if (err)
netdev_err(ofdpa_port->dev, "Error (%d) port VLAN l2 flood group\n", err);
no_ports_in_vlan:
- ofdpa_kfree(trans, group_ids);
+ kfree(group_ids);
return err;
}
-static int ofdpa_port_vlan_l2_groups(struct ofdpa_port *ofdpa_port,
- struct switchdev_trans *trans, int flags,
+static int ofdpa_port_vlan_l2_groups(struct ofdpa_port *ofdpa_port, int flags,
__be16 vlan_id, bool pop_vlan)
{
const struct ofdpa *ofdpa = ofdpa_port->ofdpa;
@@ -1608,7 +1510,7 @@
if (ofdpa_port->stp_state == BR_STATE_LEARNING ||
ofdpa_port->stp_state == BR_STATE_FORWARDING) {
out_pport = ofdpa_port->pport;
- err = ofdpa_group_l2_interface(ofdpa_port, trans, flags,
+ err = ofdpa_group_l2_interface(ofdpa_port, flags,
vlan_id, out_pport, pop_vlan);
if (err) {
netdev_err(ofdpa_port->dev, "Error (%d) port VLAN l2 group for pport %d\n",
@@ -1632,7 +1534,7 @@
return 0;
out_pport = 0;
- err = ofdpa_group_l2_interface(ofdpa_port, trans, flags,
+ err = ofdpa_group_l2_interface(ofdpa_port, flags,
vlan_id, out_pport, pop_vlan);
if (err) {
netdev_err(ofdpa_port->dev, "Error (%d) port VLAN l2 group for CPU port\n", err);
@@ -1693,8 +1595,7 @@
},
};
-static int ofdpa_port_ctrl_vlan_acl(struct ofdpa_port *ofdpa_port,
- struct switchdev_trans *trans, int flags,
+static int ofdpa_port_ctrl_vlan_acl(struct ofdpa_port *ofdpa_port, int flags,
const struct ofdpa_ctrl *ctrl, __be16 vlan_id)
{
u32 in_pport = ofdpa_port->pport;
@@ -1710,7 +1611,7 @@
u32 group_id = ROCKER_GROUP_L2_INTERFACE(vlan_id, out_pport);
int err;
- err = ofdpa_flow_tbl_acl(ofdpa_port, trans, flags,
+ err = ofdpa_flow_tbl_acl(ofdpa_port, flags,
in_pport, in_pport_mask,
eth_src, eth_src_mask,
ctrl->eth_dst, ctrl->eth_dst_mask,
@@ -1727,9 +1628,7 @@
}
static int ofdpa_port_ctrl_vlan_bridge(struct ofdpa_port *ofdpa_port,
- struct switchdev_trans *trans,
- int flags,
- const struct ofdpa_ctrl *ctrl,
+ int flags, const struct ofdpa_ctrl *ctrl,
__be16 vlan_id)
{
enum rocker_of_dpa_table_id goto_tbl =
@@ -1741,7 +1640,7 @@
if (!ofdpa_port_is_bridged(ofdpa_port))
return 0;
- err = ofdpa_flow_tbl_bridge(ofdpa_port, trans, flags,
+ err = ofdpa_flow_tbl_bridge(ofdpa_port, flags,
ctrl->eth_dst, ctrl->eth_dst_mask,
vlan_id, tunnel_id,
goto_tbl, group_id, ctrl->copy_to_cpu);
@@ -1752,8 +1651,7 @@
return err;
}
-static int ofdpa_port_ctrl_vlan_term(struct ofdpa_port *ofdpa_port,
- struct switchdev_trans *trans, int flags,
+static int ofdpa_port_ctrl_vlan_term(struct ofdpa_port *ofdpa_port, int flags,
const struct ofdpa_ctrl *ctrl, __be16 vlan_id)
{
u32 in_pport_mask = 0xffffffff;
@@ -1763,8 +1661,7 @@
if (ntohs(vlan_id) == 0)
vlan_id = ofdpa_port->internal_vlan_id;
- err = ofdpa_flow_tbl_term_mac(ofdpa_port, trans,
- ofdpa_port->pport, in_pport_mask,
+ err = ofdpa_flow_tbl_term_mac(ofdpa_port, ofdpa_port->pport, in_pport_mask,
ctrl->eth_type, ctrl->eth_dst,
ctrl->eth_dst_mask, vlan_id,
vlan_id_mask, ctrl->copy_to_cpu,
@@ -1776,26 +1673,24 @@
return err;
}
-static int ofdpa_port_ctrl_vlan(struct ofdpa_port *ofdpa_port,
- struct switchdev_trans *trans, int flags,
+static int ofdpa_port_ctrl_vlan(struct ofdpa_port *ofdpa_port, int flags,
const struct ofdpa_ctrl *ctrl, __be16 vlan_id)
{
if (ctrl->acl)
- return ofdpa_port_ctrl_vlan_acl(ofdpa_port, trans, flags,
+ return ofdpa_port_ctrl_vlan_acl(ofdpa_port, flags,
ctrl, vlan_id);
if (ctrl->bridge)
- return ofdpa_port_ctrl_vlan_bridge(ofdpa_port, trans, flags,
+ return ofdpa_port_ctrl_vlan_bridge(ofdpa_port, flags,
ctrl, vlan_id);
if (ctrl->term)
- return ofdpa_port_ctrl_vlan_term(ofdpa_port, trans, flags,
+ return ofdpa_port_ctrl_vlan_term(ofdpa_port, flags,
ctrl, vlan_id);
return -EOPNOTSUPP;
}
-static int ofdpa_port_ctrl_vlan_add(struct ofdpa_port *ofdpa_port,
- struct switchdev_trans *trans, int flags,
+static int ofdpa_port_ctrl_vlan_add(struct ofdpa_port *ofdpa_port, int flags,
__be16 vlan_id)
{
int err = 0;
@@ -1803,7 +1698,7 @@
for (i = 0; i < OFDPA_CTRL_MAX; i++) {
if (ofdpa_port->ctrls[i]) {
- err = ofdpa_port_ctrl_vlan(ofdpa_port, trans, flags,
+ err = ofdpa_port_ctrl_vlan(ofdpa_port, flags,
&ofdpa_ctrls[i], vlan_id);
if (err)
return err;
@@ -1813,8 +1708,7 @@
return err;
}
-static int ofdpa_port_ctrl(struct ofdpa_port *ofdpa_port,
- struct switchdev_trans *trans, int flags,
+static int ofdpa_port_ctrl(struct ofdpa_port *ofdpa_port, int flags,
const struct ofdpa_ctrl *ctrl)
{
u16 vid;
@@ -1823,7 +1717,7 @@
for (vid = 1; vid < VLAN_N_VID; vid++) {
if (!test_bit(vid, ofdpa_port->vlan_bitmap))
continue;
- err = ofdpa_port_ctrl_vlan(ofdpa_port, trans, flags,
+ err = ofdpa_port_ctrl_vlan(ofdpa_port, flags,
ctrl, htons(vid));
if (err)
break;
@@ -1832,8 +1726,8 @@
return err;
}
-static int ofdpa_port_vlan(struct ofdpa_port *ofdpa_port,
- struct switchdev_trans *trans, int flags, u16 vid)
+static int ofdpa_port_vlan(struct ofdpa_port *ofdpa_port, int flags,
+ u16 vid)
{
enum rocker_of_dpa_table_id goto_tbl =
ROCKER_OF_DPA_TABLE_ID_TERMINATION_MAC;
@@ -1857,43 +1751,44 @@
change_bit(ntohs(internal_vlan_id), ofdpa_port->vlan_bitmap);
if (adding) {
- err = ofdpa_port_ctrl_vlan_add(ofdpa_port, trans, flags,
+ err = ofdpa_port_ctrl_vlan_add(ofdpa_port, flags,
internal_vlan_id);
if (err) {
netdev_err(ofdpa_port->dev, "Error (%d) port ctrl vlan add\n", err);
- goto err_out;
+ goto err_vlan_add;
}
}
- err = ofdpa_port_vlan_l2_groups(ofdpa_port, trans, flags,
+ err = ofdpa_port_vlan_l2_groups(ofdpa_port, flags,
internal_vlan_id, untagged);
if (err) {
netdev_err(ofdpa_port->dev, "Error (%d) port VLAN l2 groups\n", err);
- goto err_out;
+ goto err_vlan_l2_groups;
}
- err = ofdpa_port_vlan_flood_group(ofdpa_port, trans, flags,
+ err = ofdpa_port_vlan_flood_group(ofdpa_port, flags,
internal_vlan_id);
if (err) {
netdev_err(ofdpa_port->dev, "Error (%d) port VLAN l2 flood group\n", err);
- goto err_out;
+ goto err_flood_group;
}
- err = ofdpa_flow_tbl_vlan(ofdpa_port, trans, flags,
+ err = ofdpa_flow_tbl_vlan(ofdpa_port, flags,
in_pport, vlan_id, vlan_id_mask,
goto_tbl, untagged, internal_vlan_id);
if (err)
netdev_err(ofdpa_port->dev, "Error (%d) port VLAN table\n", err);
-err_out:
- if (switchdev_trans_ph_prepare(trans))
- change_bit(ntohs(internal_vlan_id), ofdpa_port->vlan_bitmap);
+ return 0;
+err_vlan_add:
+err_vlan_l2_groups:
+err_flood_group:
+ change_bit(ntohs(internal_vlan_id), ofdpa_port->vlan_bitmap);
return err;
}
-static int ofdpa_port_ig_tbl(struct ofdpa_port *ofdpa_port,
- struct switchdev_trans *trans, int flags)
+static int ofdpa_port_ig_tbl(struct ofdpa_port *ofdpa_port, int flags)
{
enum rocker_of_dpa_table_id goto_tbl;
u32 in_pport;
@@ -1908,7 +1803,7 @@
in_pport_mask = 0xffff0000;
goto_tbl = ROCKER_OF_DPA_TABLE_ID_VLAN;
- err = ofdpa_flow_tbl_ig_port(ofdpa_port, trans, flags,
+ err = ofdpa_flow_tbl_ig_port(ofdpa_port, flags,
in_pport, in_pport_mask,
goto_tbl);
if (err)
@@ -1920,7 +1815,6 @@
struct ofdpa_fdb_learn_work {
struct work_struct work;
struct ofdpa_port *ofdpa_port;
- struct switchdev_trans *trans;
int flags;
u8 addr[ETH_ALEN];
u16 vid;
@@ -1939,19 +1833,18 @@
rtnl_lock();
if (learned && removing)
- call_switchdev_notifiers(SWITCHDEV_FDB_DEL,
+ call_switchdev_notifiers(SWITCHDEV_FDB_DEL_TO_BRIDGE,
lw->ofdpa_port->dev, &info.info);
else if (learned && !removing)
- call_switchdev_notifiers(SWITCHDEV_FDB_ADD,
+ call_switchdev_notifiers(SWITCHDEV_FDB_ADD_TO_BRIDGE,
lw->ofdpa_port->dev, &info.info);
rtnl_unlock();
- ofdpa_kfree(lw->trans, work);
+ kfree(work);
}
static int ofdpa_port_fdb_learn(struct ofdpa_port *ofdpa_port,
- struct switchdev_trans *trans, int flags,
- const u8 *addr, __be16 vlan_id)
+ int flags, const u8 *addr, __be16 vlan_id)
{
struct ofdpa_fdb_learn_work *lw;
enum rocker_of_dpa_table_id goto_tbl =
@@ -1959,7 +1852,6 @@
u32 out_pport = ofdpa_port->pport;
u32 tunnel_id = 0;
u32 group_id = ROCKER_GROUP_NONE;
- bool syncing = !!(ofdpa_port->brport_flags & BR_LEARNING_SYNC);
bool copy_to_cpu = false;
int err;
@@ -1967,36 +1859,28 @@
group_id = ROCKER_GROUP_L2_INTERFACE(vlan_id, out_pport);
if (!(flags & OFDPA_OP_FLAG_REFRESH)) {
- err = ofdpa_flow_tbl_bridge(ofdpa_port, trans, flags, addr,
+ err = ofdpa_flow_tbl_bridge(ofdpa_port, flags, addr,
NULL, vlan_id, tunnel_id, goto_tbl,
group_id, copy_to_cpu);
if (err)
return err;
}
- if (!syncing)
- return 0;
-
if (!ofdpa_port_is_bridged(ofdpa_port))
return 0;
- lw = ofdpa_kzalloc(trans, flags, sizeof(*lw));
+ lw = kzalloc(sizeof(*lw), GFP_ATOMIC);
if (!lw)
return -ENOMEM;
INIT_WORK(&lw->work, ofdpa_port_fdb_learn_work);
lw->ofdpa_port = ofdpa_port;
- lw->trans = trans;
lw->flags = flags;
ether_addr_copy(lw->addr, addr);
lw->vid = ofdpa_port_vlan_to_vid(ofdpa_port, vlan_id);
- if (switchdev_trans_ph_prepare(trans))
- ofdpa_kfree(trans, lw);
- else
- schedule_work(&lw->work);
-
+ schedule_work(&lw->work);
return 0;
}
@@ -2014,7 +1898,6 @@
}
static int ofdpa_port_fdb(struct ofdpa_port *ofdpa_port,
- struct switchdev_trans *trans,
const unsigned char *addr,
__be16 vlan_id, int flags)
{
@@ -2024,7 +1907,7 @@
bool removing = (flags & OFDPA_OP_FLAG_REMOVE);
unsigned long lock_flags;
- fdb = ofdpa_kzalloc(trans, flags, sizeof(*fdb));
+ fdb = kzalloc(sizeof(*fdb), GFP_KERNEL);
if (!fdb)
return -ENOMEM;
@@ -2042,32 +1925,29 @@
if (found) {
found->touched = jiffies;
if (removing) {
- ofdpa_kfree(trans, fdb);
- if (!switchdev_trans_ph_prepare(trans))
- hash_del(&found->entry);
+ kfree(fdb);
+ hash_del(&found->entry);
}
} else if (!removing) {
- if (!switchdev_trans_ph_prepare(trans))
- hash_add(ofdpa->fdb_tbl, &fdb->entry,
- fdb->key_crc32);
+ hash_add(ofdpa->fdb_tbl, &fdb->entry,
+ fdb->key_crc32);
}
spin_unlock_irqrestore(&ofdpa->fdb_tbl_lock, lock_flags);
/* Check if adding and already exists, or removing and can't find */
if (!found != !removing) {
- ofdpa_kfree(trans, fdb);
+ kfree(fdb);
if (!found && removing)
return 0;
/* Refreshing existing to update aging timers */
flags |= OFDPA_OP_FLAG_REFRESH;
}
- return ofdpa_port_fdb_learn(ofdpa_port, trans, flags, addr, vlan_id);
+ return ofdpa_port_fdb_learn(ofdpa_port, flags, addr, vlan_id);
}
-static int ofdpa_port_fdb_flush(struct ofdpa_port *ofdpa_port,
- struct switchdev_trans *trans, int flags)
+static int ofdpa_port_fdb_flush(struct ofdpa_port *ofdpa_port, int flags)
{
struct ofdpa *ofdpa = ofdpa_port->ofdpa;
struct ofdpa_fdb_tbl_entry *found;
@@ -2089,13 +1969,12 @@
continue;
if (!found->learned)
continue;
- err = ofdpa_port_fdb_learn(ofdpa_port, trans, flags,
+ err = ofdpa_port_fdb_learn(ofdpa_port, flags,
found->key.addr,
found->key.vlan_id);
if (err)
goto err_out;
- if (!switchdev_trans_ph_prepare(trans))
- hash_del(&found->entry);
+ hash_del(&found->entry);
}
err_out:
@@ -2125,8 +2004,8 @@
ofdpa_port = entry->key.ofdpa_port;
expires = entry->touched + ofdpa_port->ageing_time;
if (time_before_eq(expires, jiffies)) {
- ofdpa_port_fdb_learn(ofdpa_port, NULL,
- flags, entry->key.addr,
+ ofdpa_port_fdb_learn(ofdpa_port, flags,
+ entry->key.addr,
entry->key.vlan_id);
hash_del(&entry->entry);
} else if (time_before(expires, next_timer)) {
@@ -2140,8 +2019,7 @@
}
static int ofdpa_port_router_mac(struct ofdpa_port *ofdpa_port,
- struct switchdev_trans *trans, int flags,
- __be16 vlan_id)
+ int flags, __be16 vlan_id)
{
u32 in_pport_mask = 0xffffffff;
__be16 eth_type;
@@ -2154,26 +2032,25 @@
vlan_id = ofdpa_port->internal_vlan_id;
eth_type = htons(ETH_P_IP);
- err = ofdpa_flow_tbl_term_mac(ofdpa_port, trans,
- ofdpa_port->pport, in_pport_mask,
- eth_type, ofdpa_port->dev->dev_addr,
+ err = ofdpa_flow_tbl_term_mac(ofdpa_port, ofdpa_port->pport,
+ in_pport_mask, eth_type,
+ ofdpa_port->dev->dev_addr,
dst_mac_mask, vlan_id, vlan_id_mask,
copy_to_cpu, flags);
if (err)
return err;
eth_type = htons(ETH_P_IPV6);
- err = ofdpa_flow_tbl_term_mac(ofdpa_port, trans,
- ofdpa_port->pport, in_pport_mask,
- eth_type, ofdpa_port->dev->dev_addr,
+ err = ofdpa_flow_tbl_term_mac(ofdpa_port, ofdpa_port->pport,
+ in_pport_mask, eth_type,
+ ofdpa_port->dev->dev_addr,
dst_mac_mask, vlan_id, vlan_id_mask,
copy_to_cpu, flags);
return err;
}
-static int ofdpa_port_fwding(struct ofdpa_port *ofdpa_port,
- struct switchdev_trans *trans, int flags)
+static int ofdpa_port_fwding(struct ofdpa_port *ofdpa_port, int flags)
{
bool pop_vlan;
u32 out_pport;
@@ -2198,7 +2075,7 @@
continue;
vlan_id = htons(vid);
pop_vlan = ofdpa_vlan_id_is_internal(vlan_id);
- err = ofdpa_group_l2_interface(ofdpa_port, trans, flags,
+ err = ofdpa_group_l2_interface(ofdpa_port, flags,
vlan_id, out_pport, pop_vlan);
if (err) {
netdev_err(ofdpa_port->dev, "Error (%d) port VLAN l2 group for pport %d\n",
@@ -2211,7 +2088,6 @@
}
static int ofdpa_port_stp_update(struct ofdpa_port *ofdpa_port,
- struct switchdev_trans *trans,
int flags, u8 state)
{
bool want[OFDPA_CTRL_MAX] = { 0, };
@@ -2220,11 +2096,12 @@
int err;
int i;
+ memcpy(prev_ctrls, ofdpa_port->ctrls, sizeof(prev_ctrls));
prev_state = ofdpa_port->stp_state;
- if (prev_state == state)
+
+ if (ofdpa_port->stp_state == state)
return 0;
- memcpy(prev_ctrls, ofdpa_port->ctrls, sizeof(prev_ctrls));
ofdpa_port->stp_state = state;
switch (state) {
@@ -2254,26 +2131,29 @@
if (want[i] != ofdpa_port->ctrls[i]) {
int ctrl_flags = flags |
(want[i] ? 0 : OFDPA_OP_FLAG_REMOVE);
- err = ofdpa_port_ctrl(ofdpa_port, trans, ctrl_flags,
+ err = ofdpa_port_ctrl(ofdpa_port, ctrl_flags,
&ofdpa_ctrls[i]);
if (err)
- goto err_out;
+ goto err_port_ctrl;
ofdpa_port->ctrls[i] = want[i];
}
}
- err = ofdpa_port_fdb_flush(ofdpa_port, trans, flags);
+ err = ofdpa_port_fdb_flush(ofdpa_port, flags);
if (err)
- goto err_out;
+ goto err_fdb_flush;
- err = ofdpa_port_fwding(ofdpa_port, trans, flags);
+ err = ofdpa_port_fwding(ofdpa_port, flags);
+ if (err)
+ goto err_port_fwding;
-err_out:
- if (switchdev_trans_ph_prepare(trans)) {
- memcpy(ofdpa_port->ctrls, prev_ctrls, sizeof(prev_ctrls));
- ofdpa_port->stp_state = prev_state;
- }
+ return 0;
+err_port_ctrl:
+err_fdb_flush:
+err_port_fwding:
+ memcpy(ofdpa_port->ctrls, prev_ctrls, sizeof(prev_ctrls));
+ ofdpa_port->stp_state = prev_state;
return err;
}
@@ -2284,7 +2164,7 @@
return 0;
/* port is not bridged, so simulate going to FORWARDING state */
- return ofdpa_port_stp_update(ofdpa_port, NULL, flags,
+ return ofdpa_port_stp_update(ofdpa_port, flags,
BR_STATE_FORWARDING);
}
@@ -2295,25 +2175,24 @@
return 0;
/* port is not bridged, so simulate going to DISABLED state */
- return ofdpa_port_stp_update(ofdpa_port, NULL, flags,
+ return ofdpa_port_stp_update(ofdpa_port, flags,
BR_STATE_DISABLED);
}
static int ofdpa_port_vlan_add(struct ofdpa_port *ofdpa_port,
- struct switchdev_trans *trans,
u16 vid, u16 flags)
{
int err;
/* XXX deal with flags for PVID and untagged */
- err = ofdpa_port_vlan(ofdpa_port, trans, 0, vid);
+ err = ofdpa_port_vlan(ofdpa_port, 0, vid);
if (err)
return err;
- err = ofdpa_port_router_mac(ofdpa_port, trans, 0, htons(vid));
+ err = ofdpa_port_router_mac(ofdpa_port, 0, htons(vid));
if (err)
- ofdpa_port_vlan(ofdpa_port, trans,
+ ofdpa_port_vlan(ofdpa_port,
OFDPA_OP_FLAG_REMOVE, vid);
return err;
@@ -2324,13 +2203,13 @@
{
int err;
- err = ofdpa_port_router_mac(ofdpa_port, NULL,
- OFDPA_OP_FLAG_REMOVE, htons(vid));
+ err = ofdpa_port_router_mac(ofdpa_port, OFDPA_OP_FLAG_REMOVE,
+ htons(vid));
if (err)
return err;
- return ofdpa_port_vlan(ofdpa_port, NULL,
- OFDPA_OP_FLAG_REMOVE, vid);
+ return ofdpa_port_vlan(ofdpa_port, OFDPA_OP_FLAG_REMOVE,
+ vid);
}
static struct ofdpa_internal_vlan_tbl_entry *
@@ -2389,10 +2268,9 @@
return found->vlan_id;
}
-static int ofdpa_port_fib_ipv4(struct ofdpa_port *ofdpa_port,
- struct switchdev_trans *trans, __be32 dst,
- int dst_len, struct fib_info *fi,
- u32 tb_id, int flags)
+static int ofdpa_port_fib_ipv4(struct ofdpa_port *ofdpa_port, __be32 dst,
+ int dst_len, struct fib_info *fi, u32 tb_id,
+ int flags)
{
const struct fib_nh *nh;
__be16 eth_type = htons(ETH_P_IP);
@@ -2414,7 +2292,7 @@
has_gw = !!nh->nh_gw;
if (has_gw && nh_on_port) {
- err = ofdpa_port_ipv4_nh(ofdpa_port, trans, flags,
+ err = ofdpa_port_ipv4_nh(ofdpa_port, flags,
nh->nh_gw, &index);
if (err)
return err;
@@ -2425,7 +2303,7 @@
group_id = ROCKER_GROUP_L2_INTERFACE(internal_vlan_id, 0);
}
- err = ofdpa_flow_tbl_ucast4_routing(ofdpa_port, trans, eth_type, dst,
+ err = ofdpa_flow_tbl_ucast4_routing(ofdpa_port, eth_type, dst,
dst_mask, priority, goto_tbl,
group_id, fi, flags);
if (err)
@@ -2550,7 +2428,7 @@
ofdpa_port->rocker_port = rocker_port;
ofdpa_port->dev = rocker_port->dev;
ofdpa_port->pport = rocker_port->pport;
- ofdpa_port->brport_flags = BR_LEARNING | BR_LEARNING_SYNC;
+ ofdpa_port->brport_flags = BR_LEARNING;
ofdpa_port->ageing_time = BR_DEFAULT_AGEING_TIME;
return 0;
}
@@ -2563,7 +2441,7 @@
rocker_port_set_learning(rocker_port,
!!(ofdpa_port->brport_flags & BR_LEARNING));
- err = ofdpa_port_ig_tbl(ofdpa_port, NULL, 0);
+ err = ofdpa_port_ig_tbl(ofdpa_port, 0);
if (err) {
netdev_err(ofdpa_port->dev, "install ig port table failed\n");
return err;
@@ -2573,7 +2451,7 @@
ofdpa_port_internal_vlan_id_get(ofdpa_port,
ofdpa_port->dev->ifindex);
- err = ofdpa_port_vlan_add(ofdpa_port, NULL, OFDPA_UNTAGGED_VID, 0);
+ err = ofdpa_port_vlan_add(ofdpa_port, OFDPA_UNTAGGED_VID, 0);
if (err) {
netdev_err(ofdpa_port->dev, "install untagged VLAN failed\n");
goto err_untagged_vlan;
@@ -2581,7 +2459,7 @@
return 0;
err_untagged_vlan:
- ofdpa_port_ig_tbl(ofdpa_port, NULL, OFDPA_OP_FLAG_REMOVE);
+ ofdpa_port_ig_tbl(ofdpa_port, OFDPA_OP_FLAG_REMOVE);
return err;
}
@@ -2589,7 +2467,7 @@
{
struct ofdpa_port *ofdpa_port = rocker_port->wpriv;
- ofdpa_port_ig_tbl(ofdpa_port, NULL, OFDPA_OP_FLAG_REMOVE);
+ ofdpa_port_ig_tbl(ofdpa_port, OFDPA_OP_FLAG_REMOVE);
}
static int ofdpa_port_open(struct rocker_port *rocker_port)
@@ -2607,12 +2485,11 @@
}
static int ofdpa_port_attr_stp_state_set(struct rocker_port *rocker_port,
- u8 state,
- struct switchdev_trans *trans)
+ u8 state)
{
struct ofdpa_port *ofdpa_port = rocker_port->wpriv;
- return ofdpa_port_stp_update(ofdpa_port, trans, 0, state);
+ return ofdpa_port_stp_update(ofdpa_port, 0, state);
}
static int ofdpa_port_attr_bridge_flags_set(struct rocker_port *rocker_port,
@@ -2647,6 +2524,16 @@
}
static int
+ofdpa_port_attr_bridge_flags_support_get(const struct rocker_port *
+ rocker_port,
+ unsigned long *
+ p_brport_flags_support)
+{
+ *p_brport_flags_support = BR_LEARNING;
+ return 0;
+}
+
+static int
ofdpa_port_attr_bridge_ageing_time_set(struct rocker_port *rocker_port,
u32 ageing_time,
struct switchdev_trans *trans)
@@ -2665,15 +2552,14 @@
}
static int ofdpa_port_obj_vlan_add(struct rocker_port *rocker_port,
- const struct switchdev_obj_port_vlan *vlan,
- struct switchdev_trans *trans)
+ const struct switchdev_obj_port_vlan *vlan)
{
struct ofdpa_port *ofdpa_port = rocker_port->wpriv;
u16 vid;
int err;
for (vid = vlan->vid_begin; vid <= vlan->vid_end; vid++) {
- err = ofdpa_port_vlan_add(ofdpa_port, trans, vid, vlan->flags);
+ err = ofdpa_port_vlan_add(ofdpa_port, vid, vlan->flags);
if (err)
return err;
}
@@ -2697,82 +2583,29 @@
return 0;
}
-static int ofdpa_port_obj_vlan_dump(const struct rocker_port *rocker_port,
- struct switchdev_obj_port_vlan *vlan,
- switchdev_obj_dump_cb_t *cb)
-{
- const struct ofdpa_port *ofdpa_port = rocker_port->wpriv;
- u16 vid;
- int err = 0;
-
- for (vid = 1; vid < VLAN_N_VID; vid++) {
- if (!test_bit(vid, ofdpa_port->vlan_bitmap))
- continue;
- vlan->flags = 0;
- if (ofdpa_vlan_id_is_internal(htons(vid)))
- vlan->flags |= BRIDGE_VLAN_INFO_PVID;
- vlan->vid_begin = vlan->vid_end = vid;
- err = cb(&vlan->obj);
- if (err)
- break;
- }
-
- return err;
-}
-
static int ofdpa_port_obj_fdb_add(struct rocker_port *rocker_port,
- const struct switchdev_obj_port_fdb *fdb,
- struct switchdev_trans *trans)
+ u16 vid, const unsigned char *addr)
{
struct ofdpa_port *ofdpa_port = rocker_port->wpriv;
- __be16 vlan_id = ofdpa_port_vid_to_vlan(ofdpa_port, fdb->vid, NULL);
+ __be16 vlan_id = ofdpa_port_vid_to_vlan(ofdpa_port, vid, NULL);
if (!ofdpa_port_is_bridged(ofdpa_port))
return -EINVAL;
- return ofdpa_port_fdb(ofdpa_port, trans, fdb->addr, vlan_id, 0);
+ return ofdpa_port_fdb(ofdpa_port, addr, vlan_id, 0);
}
static int ofdpa_port_obj_fdb_del(struct rocker_port *rocker_port,
- const struct switchdev_obj_port_fdb *fdb)
+ u16 vid, const unsigned char *addr)
{
struct ofdpa_port *ofdpa_port = rocker_port->wpriv;
- __be16 vlan_id = ofdpa_port_vid_to_vlan(ofdpa_port, fdb->vid, NULL);
+ __be16 vlan_id = ofdpa_port_vid_to_vlan(ofdpa_port, vid, NULL);
int flags = OFDPA_OP_FLAG_REMOVE;
if (!ofdpa_port_is_bridged(ofdpa_port))
return -EINVAL;
- return ofdpa_port_fdb(ofdpa_port, NULL, fdb->addr, vlan_id, flags);
-}
-
-static int ofdpa_port_obj_fdb_dump(const struct rocker_port *rocker_port,
- struct switchdev_obj_port_fdb *fdb,
- switchdev_obj_dump_cb_t *cb)
-{
- const struct ofdpa_port *ofdpa_port = rocker_port->wpriv;
- struct ofdpa *ofdpa = ofdpa_port->ofdpa;
- struct ofdpa_fdb_tbl_entry *found;
- struct hlist_node *tmp;
- unsigned long lock_flags;
- int bkt;
- int err = 0;
-
- spin_lock_irqsave(&ofdpa->fdb_tbl_lock, lock_flags);
- hash_for_each_safe(ofdpa->fdb_tbl, bkt, tmp, found, entry) {
- if (found->key.ofdpa_port != ofdpa_port)
- continue;
- ether_addr_copy(fdb->addr, found->key.addr);
- fdb->ndm_state = NUD_REACHABLE;
- fdb->vid = ofdpa_port_vlan_to_vid(ofdpa_port,
- found->key.vlan_id);
- err = cb(&fdb->obj);
- if (err)
- break;
- }
- spin_unlock_irqrestore(&ofdpa->fdb_tbl_lock, lock_flags);
-
- return err;
+ return ofdpa_port_fdb(ofdpa_port, addr, vlan_id, flags);
}
static int ofdpa_port_bridge_join(struct ofdpa_port *ofdpa_port,
@@ -2797,7 +2630,7 @@
ofdpa_port->bridge_dev = bridge;
- return ofdpa_port_vlan_add(ofdpa_port, NULL, OFDPA_UNTAGGED_VID, 0);
+ return ofdpa_port_vlan_add(ofdpa_port, OFDPA_UNTAGGED_VID, 0);
}
static int ofdpa_port_bridge_leave(struct ofdpa_port *ofdpa_port)
@@ -2816,7 +2649,7 @@
ofdpa_port->bridge_dev = NULL;
- err = ofdpa_port_vlan_add(ofdpa_port, NULL, OFDPA_UNTAGGED_VID, 0);
+ err = ofdpa_port_vlan_add(ofdpa_port, OFDPA_UNTAGGED_VID, 0);
if (err)
return err;
@@ -2875,7 +2708,7 @@
OFDPA_OP_FLAG_NOWAIT;
__be32 ip_addr = *(__be32 *) n->primary_key;
- return ofdpa_port_ipv4_neigh(ofdpa_port, NULL, flags, ip_addr, n->ha);
+ return ofdpa_port_ipv4_neigh(ofdpa_port, flags, ip_addr, n->ha);
}
static int ofdpa_port_neigh_destroy(struct rocker_port *rocker_port,
@@ -2885,7 +2718,7 @@
int flags = OFDPA_OP_FLAG_REMOVE | OFDPA_OP_FLAG_NOWAIT;
__be32 ip_addr = *(__be32 *) n->primary_key;
- return ofdpa_port_ipv4_neigh(ofdpa_port, NULL, flags, ip_addr, n->ha);
+ return ofdpa_port_ipv4_neigh(ofdpa_port, flags, ip_addr, n->ha);
}
static int ofdpa_port_ev_mac_vlan_seen(struct rocker_port *rocker_port,
@@ -2899,7 +2732,7 @@
ofdpa_port->stp_state != BR_STATE_FORWARDING)
return 0;
- return ofdpa_port_fdb(ofdpa_port, NULL, addr, vlan_id, flags);
+ return ofdpa_port_fdb(ofdpa_port, addr, vlan_id, flags);
}
static struct ofdpa_port *ofdpa_port_dev_lower_find(struct net_device *dev,
@@ -2923,7 +2756,7 @@
ofdpa_port = ofdpa_port_dev_lower_find(fen_info->fi->fib_dev, rocker);
if (!ofdpa_port)
return 0;
- err = ofdpa_port_fib_ipv4(ofdpa_port, NULL, htonl(fen_info->dst),
+ err = ofdpa_port_fib_ipv4(ofdpa_port, htonl(fen_info->dst),
fen_info->dst_len, fen_info->fi,
fen_info->tb_id, 0);
if (err)
@@ -2944,7 +2777,7 @@
if (!ofdpa_port)
return 0;
fib_info_offload_dec(fen_info->fi);
- return ofdpa_port_fib_ipv4(ofdpa_port, NULL, htonl(fen_info->dst),
+ return ofdpa_port_fib_ipv4(ofdpa_port, htonl(fen_info->dst),
fen_info->dst_len, fen_info->fi,
fen_info->tb_id, OFDPA_OP_FLAG_REMOVE);
}
@@ -2971,7 +2804,7 @@
if (!ofdpa_port)
continue;
fib_info_offload_dec(flow_entry->fi);
- ofdpa_flow_tbl_del(ofdpa_port, NULL, OFDPA_OP_FLAG_REMOVE,
+ ofdpa_flow_tbl_del(ofdpa_port, OFDPA_OP_FLAG_REMOVE,
flow_entry);
}
spin_unlock_irqrestore(&ofdpa->flow_tbl_lock, flags);
@@ -2993,13 +2826,12 @@
.port_attr_stp_state_set = ofdpa_port_attr_stp_state_set,
.port_attr_bridge_flags_set = ofdpa_port_attr_bridge_flags_set,
.port_attr_bridge_flags_get = ofdpa_port_attr_bridge_flags_get,
+ .port_attr_bridge_flags_support_get = ofdpa_port_attr_bridge_flags_support_get,
.port_attr_bridge_ageing_time_set = ofdpa_port_attr_bridge_ageing_time_set,
.port_obj_vlan_add = ofdpa_port_obj_vlan_add,
.port_obj_vlan_del = ofdpa_port_obj_vlan_del,
- .port_obj_vlan_dump = ofdpa_port_obj_vlan_dump,
.port_obj_fdb_add = ofdpa_port_obj_fdb_add,
.port_obj_fdb_del = ofdpa_port_obj_fdb_del,
- .port_obj_fdb_dump = ofdpa_port_obj_fdb_dump,
.port_master_linked = ofdpa_port_master_linked,
.port_master_unlinked = ofdpa_port_master_unlinked,
.port_neigh_update = ofdpa_port_neigh_update,
diff --git a/drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c b/drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c
index 1e59435..89831ad 100644
--- a/drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c
+++ b/drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c
@@ -1418,8 +1418,7 @@
priv->hw->desc->tx_enable_tstamp(first_desc);
}
- if (!tqueue->hwts_tx_en)
- skb_tx_timestamp(skb);
+ skb_tx_timestamp(skb);
priv->hw->dma->enable_dma_transmission(priv->ioaddr, txq_index);
diff --git a/drivers/net/ethernet/sfc/ef10.c b/drivers/net/ethernet/sfc/ef10.c
index 78efb28..ad9c4de 100644
--- a/drivers/net/ethernet/sfc/ef10.c
+++ b/drivers/net/ethernet/sfc/ef10.c
@@ -6068,6 +6068,7 @@
case HWTSTAMP_FILTER_PTP_V2_EVENT:
case HWTSTAMP_FILTER_PTP_V2_SYNC:
case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
+ case HWTSTAMP_FILTER_NTP_ALL:
init->rx_filter = HWTSTAMP_FILTER_ALL;
rc = efx_ptp_change_mode(efx, true, 0);
if (!rc)
diff --git a/drivers/net/ethernet/sfc/efx.h b/drivers/net/ethernet/sfc/efx.h
index a0c52e3..fcea937 100644
--- a/drivers/net/ethernet/sfc/efx.h
+++ b/drivers/net/ethernet/sfc/efx.h
@@ -32,8 +32,8 @@
struct net_device *net_dev);
netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb);
void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index);
-int efx_setup_tc(struct net_device *net_dev, u32 handle, __be16 proto,
- struct tc_to_netdev *tc);
+int efx_setup_tc(struct net_device *net_dev, u32 handle, u32 chain_index,
+ __be16 proto, struct tc_to_netdev *tc);
unsigned int efx_tx_max_skb_descs(struct efx_nic *efx);
extern unsigned int efx_piobuf_size;
extern bool efx_separate_tx_channels;
diff --git a/drivers/net/ethernet/sfc/falcon/efx.h b/drivers/net/ethernet/sfc/falcon/efx.h
index c89456f..e5a7a40 100644
--- a/drivers/net/ethernet/sfc/falcon/efx.h
+++ b/drivers/net/ethernet/sfc/falcon/efx.h
@@ -32,8 +32,8 @@
struct net_device *net_dev);
netdev_tx_t ef4_enqueue_skb(struct ef4_tx_queue *tx_queue, struct sk_buff *skb);
void ef4_xmit_done(struct ef4_tx_queue *tx_queue, unsigned int index);
-int ef4_setup_tc(struct net_device *net_dev, u32 handle, __be16 proto,
- struct tc_to_netdev *tc);
+int ef4_setup_tc(struct net_device *net_dev, u32 handle, u32 chain_index,
+ __be16 proto, struct tc_to_netdev *tc);
unsigned int ef4_tx_max_skb_descs(struct ef4_nic *efx);
extern bool ef4_separate_tx_channels;
diff --git a/drivers/net/ethernet/sfc/falcon/selftest.c b/drivers/net/ethernet/sfc/falcon/selftest.c
index 92bc34c..55c0fbb 100644
--- a/drivers/net/ethernet/sfc/falcon/selftest.c
+++ b/drivers/net/ethernet/sfc/falcon/selftest.c
@@ -431,8 +431,7 @@
/* Copy the payload in, incrementing the source address to
* exercise the rss vectors */
- payload = ((struct ef4_loopback_payload *)
- skb_put(skb, sizeof(state->payload)));
+ payload = skb_put(skb, sizeof(state->payload));
memcpy(payload, &state->payload, sizeof(state->payload));
payload->ip.saddr = htonl(INADDR_LOOPBACK | (i << 2));
diff --git a/drivers/net/ethernet/sfc/falcon/tx.c b/drivers/net/ethernet/sfc/falcon/tx.c
index f6daf09..f1520a4 100644
--- a/drivers/net/ethernet/sfc/falcon/tx.c
+++ b/drivers/net/ethernet/sfc/falcon/tx.c
@@ -425,8 +425,8 @@
efx->n_tx_channels : 0));
}
-int ef4_setup_tc(struct net_device *net_dev, u32 handle, __be16 proto,
- struct tc_to_netdev *ntc)
+int ef4_setup_tc(struct net_device *net_dev, u32 handle, u32 chain_index,
+ __be16 proto, struct tc_to_netdev *ntc)
{
struct ef4_nic *efx = netdev_priv(net_dev);
struct ef4_channel *channel;
diff --git a/drivers/net/ethernet/sfc/selftest.c b/drivers/net/ethernet/sfc/selftest.c
index dab286a..f693694 100644
--- a/drivers/net/ethernet/sfc/selftest.c
+++ b/drivers/net/ethernet/sfc/selftest.c
@@ -431,8 +431,7 @@
/* Copy the payload in, incrementing the source address to
* exercise the rss vectors */
- payload = ((struct efx_loopback_payload *)
- skb_put(skb, sizeof(state->payload)));
+ payload = skb_put(skb, sizeof(state->payload));
memcpy(payload, &state->payload, sizeof(state->payload));
payload->ip.saddr = htonl(INADDR_LOOPBACK | (i << 2));
diff --git a/drivers/net/ethernet/sfc/tx.c b/drivers/net/ethernet/sfc/tx.c
index 3bdf87f..02d41eb 100644
--- a/drivers/net/ethernet/sfc/tx.c
+++ b/drivers/net/ethernet/sfc/tx.c
@@ -653,8 +653,8 @@
efx->n_tx_channels : 0));
}
-int efx_setup_tc(struct net_device *net_dev, u32 handle, __be16 proto,
- struct tc_to_netdev *ntc)
+int efx_setup_tc(struct net_device *net_dev, u32 handle, u32 chain_index,
+ __be16 proto, struct tc_to_netdev *ntc)
{
struct efx_nic *efx = netdev_priv(net_dev);
struct efx_channel *channel;
diff --git a/drivers/net/ethernet/sgi/ioc3-eth.c b/drivers/net/ethernet/sgi/ioc3-eth.c
index 52ead55..b607936 100644
--- a/drivers/net/ethernet/sgi/ioc3-eth.c
+++ b/drivers/net/ethernet/sgi/ioc3-eth.c
@@ -1562,13 +1562,12 @@
struct ethtool_link_ksettings *cmd)
{
struct ioc3_private *ip = netdev_priv(dev);
- int rc;
spin_lock_irq(&ip->ioc3_lock);
- rc = mii_ethtool_get_link_ksettings(&ip->mii, cmd);
+ mii_ethtool_get_link_ksettings(&ip->mii, cmd);
spin_unlock_irq(&ip->ioc3_lock);
- return rc;
+ return 0;
}
static int ioc3_set_link_ksettings(struct net_device *dev,
diff --git a/drivers/net/ethernet/silan/sc92031.c b/drivers/net/ethernet/silan/sc92031.c
index 751c818..c07fd59 100644
--- a/drivers/net/ethernet/silan/sc92031.c
+++ b/drivers/net/ethernet/silan/sc92031.c
@@ -795,12 +795,12 @@
}
if ((rx_ring_offset + pkt_size) > RX_BUF_LEN) {
- memcpy(skb_put(skb, RX_BUF_LEN - rx_ring_offset),
- rx_ring + rx_ring_offset, RX_BUF_LEN - rx_ring_offset);
- memcpy(skb_put(skb, pkt_size - (RX_BUF_LEN - rx_ring_offset)),
- rx_ring, pkt_size - (RX_BUF_LEN - rx_ring_offset));
+ skb_put_data(skb, rx_ring + rx_ring_offset,
+ RX_BUF_LEN - rx_ring_offset);
+ skb_put_data(skb, rx_ring,
+ pkt_size - (RX_BUF_LEN - rx_ring_offset));
} else {
- memcpy(skb_put(skb, pkt_size), rx_ring + rx_ring_offset, pkt_size);
+ skb_put_data(skb, rx_ring + rx_ring_offset, pkt_size);
}
skb->protocol = eth_type_trans(skb, dev);
diff --git a/drivers/net/ethernet/sis/sis190.c b/drivers/net/ethernet/sis/sis190.c
index 02da106..445109b 100644
--- a/drivers/net/ethernet/sis/sis190.c
+++ b/drivers/net/ethernet/sis/sis190.c
@@ -1739,7 +1739,9 @@
{
struct sis190_private *tp = netdev_priv(dev);
- return mii_ethtool_get_link_ksettings(&tp->mii_if, cmd);
+ mii_ethtool_get_link_ksettings(&tp->mii_if, cmd);
+
+ return 0;
}
static int sis190_set_link_ksettings(struct net_device *dev,
diff --git a/drivers/net/ethernet/smsc/epic100.c b/drivers/net/ethernet/smsc/epic100.c
index db6dcb0..6a0e1d4 100644
--- a/drivers/net/ethernet/smsc/epic100.c
+++ b/drivers/net/ethernet/smsc/epic100.c
@@ -1391,13 +1391,12 @@
struct ethtool_link_ksettings *cmd)
{
struct epic_private *np = netdev_priv(dev);
- int rc;
spin_lock_irq(&np->lock);
- rc = mii_ethtool_get_link_ksettings(&np->mii, cmd);
+ mii_ethtool_get_link_ksettings(&np->mii, cmd);
spin_unlock_irq(&np->lock);
- return rc;
+ return 0;
}
static int netdev_set_link_ksettings(struct net_device *dev,
diff --git a/drivers/net/ethernet/smsc/smc911x.c b/drivers/net/ethernet/smsc/smc911x.c
index 36307d3..0515744 100644
--- a/drivers/net/ethernet/smsc/smc911x.c
+++ b/drivers/net/ethernet/smsc/smc911x.c
@@ -1450,7 +1450,7 @@
struct ethtool_link_ksettings *cmd)
{
struct smc911x_local *lp = netdev_priv(dev);
- int ret, status;
+ int status;
unsigned long flags;
u32 supported;
@@ -1458,7 +1458,7 @@
if (lp->phy_type != 0) {
spin_lock_irqsave(&lp->lock, flags);
- ret = mii_ethtool_get_link_ksettings(&lp->mii, cmd);
+ mii_ethtool_get_link_ksettings(&lp->mii, cmd);
spin_unlock_irqrestore(&lp->lock, flags);
} else {
supported = SUPPORTED_10baseT_Half |
@@ -1480,10 +1480,9 @@
ethtool_convert_legacy_u32_to_link_mode(
cmd->link_modes.supported, supported);
- ret = 0;
}
- return ret;
+ return 0;
}
static int
diff --git a/drivers/net/ethernet/smsc/smc91c92_cs.c b/drivers/net/ethernet/smsc/smc91c92_cs.c
index 976aa87..92c927a 100644
--- a/drivers/net/ethernet/smsc/smc91c92_cs.c
+++ b/drivers/net/ethernet/smsc/smc91c92_cs.c
@@ -1843,8 +1843,8 @@
}
}
-static int smc_netdev_get_ecmd(struct net_device *dev,
- struct ethtool_link_ksettings *ecmd)
+static void smc_netdev_get_ecmd(struct net_device *dev,
+ struct ethtool_link_ksettings *ecmd)
{
u16 tmp;
unsigned int ioaddr = dev->base_addr;
@@ -1865,8 +1865,6 @@
ethtool_convert_legacy_u32_to_link_mode(ecmd->link_modes.supported,
supported);
-
- return 0;
}
static int smc_netdev_set_ecmd(struct net_device *dev,
@@ -1918,18 +1916,17 @@
struct smc_private *smc = netdev_priv(dev);
unsigned int ioaddr = dev->base_addr;
u16 saved_bank = inw(ioaddr + BANK_SELECT);
- int ret;
unsigned long flags;
spin_lock_irqsave(&smc->lock, flags);
SMC_SELECT_BANK(3);
if (smc->cfg & CFG_MII_SELECT)
- ret = mii_ethtool_get_link_ksettings(&smc->mii_if, ecmd);
+ mii_ethtool_get_link_ksettings(&smc->mii_if, ecmd);
else
- ret = smc_netdev_get_ecmd(dev, ecmd);
+ smc_netdev_get_ecmd(dev, ecmd);
SMC_SELECT_BANK(saved_bank);
spin_unlock_irqrestore(&smc->lock, flags);
- return ret;
+ return 0;
}
static int smc_set_link_ksettings(struct net_device *dev,
diff --git a/drivers/net/ethernet/smsc/smc91x.c b/drivers/net/ethernet/smsc/smc91x.c
index 91e9bd7..0d230b1 100644
--- a/drivers/net/ethernet/smsc/smc91x.c
+++ b/drivers/net/ethernet/smsc/smc91x.c
@@ -1539,11 +1539,10 @@
struct ethtool_link_ksettings *cmd)
{
struct smc_local *lp = netdev_priv(dev);
- int ret;
if (lp->phy_type != 0) {
spin_lock_irq(&lp->lock);
- ret = mii_ethtool_get_link_ksettings(&lp->mii, cmd);
+ mii_ethtool_get_link_ksettings(&lp->mii, cmd);
spin_unlock_irq(&lp->lock);
} else {
u32 supported = SUPPORTED_10baseT_Half |
@@ -1562,11 +1561,9 @@
ethtool_convert_legacy_u32_to_link_mode(
cmd->link_modes.supported, supported);
-
- ret = 0;
}
- return ret;
+ return 0;
}
static int
diff --git a/drivers/net/ethernet/stmicro/stmmac/Kconfig b/drivers/net/ethernet/stmicro/stmmac/Kconfig
index cfbe363..85c0e41 100644
--- a/drivers/net/ethernet/stmicro/stmmac/Kconfig
+++ b/drivers/net/ethernet/stmicro/stmmac/Kconfig
@@ -145,6 +145,17 @@
This selects Allwinner SoC glue layer support for the
stmmac device driver. This driver is used for A20/A31
GMAC ethernet controller.
+
+config DWMAC_SUN8I
+ tristate "Allwinner sun8i GMAC support"
+ default ARCH_SUNXI
+ depends on OF && (ARCH_SUNXI || COMPILE_TEST)
+ ---help---
+ Support for Allwinner H3 A83T A64 EMAC ethernet controllers.
+
+ This selects Allwinner SoC glue layer support for the
+ stmmac device driver. This driver is used for H3/A83T/A64
+ EMAC ethernet controller.
endif
config STMMAC_PCI
diff --git a/drivers/net/ethernet/stmicro/stmmac/Makefile b/drivers/net/ethernet/stmicro/stmmac/Makefile
index 700c603..fd4937a 100644
--- a/drivers/net/ethernet/stmicro/stmmac/Makefile
+++ b/drivers/net/ethernet/stmicro/stmmac/Makefile
@@ -16,6 +16,7 @@
obj-$(CONFIG_DWMAC_STI) += dwmac-sti.o
obj-$(CONFIG_DWMAC_STM32) += dwmac-stm32.o
obj-$(CONFIG_DWMAC_SUNXI) += dwmac-sunxi.o
+obj-$(CONFIG_DWMAC_SUN8I) += dwmac-sun8i.o
obj-$(CONFIG_DWMAC_DWC_QOS_ETH) += dwmac-dwc-qos-eth.o
obj-$(CONFIG_DWMAC_GENERIC) += dwmac-generic.o
stmmac-platform-objs:= stmmac_platform.o
diff --git a/drivers/net/ethernet/stmicro/stmmac/common.h b/drivers/net/ethernet/stmicro/stmmac/common.h
index b7ce3fb..e82b4b7 100644
--- a/drivers/net/ethernet/stmicro/stmmac/common.h
+++ b/drivers/net/ethernet/stmicro/stmmac/common.h
@@ -549,9 +549,11 @@
extern const struct stmmac_mode_ops dwmac4_ring_mode_ops;
struct mac_link {
- int port;
- int duplex;
- int speed;
+ u32 speed_mask;
+ u32 speed10;
+ u32 speed100;
+ u32 speed1000;
+ u32 duplex;
};
struct mii_regs {
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c
new file mode 100644
index 0000000..fffd6d5
--- /dev/null
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c
@@ -0,0 +1,1007 @@
+/*
+ * dwmac-sun8i.c - Allwinner sun8i DWMAC specific glue layer
+ *
+ * Copyright (C) 2017 Corentin Labbe <clabbe.montjoie@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/clk.h>
+#include <linux/io.h>
+#include <linux/iopoll.h>
+#include <linux/mfd/syscon.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/of_mdio.h>
+#include <linux/of_net.h>
+#include <linux/phy.h>
+#include <linux/platform_device.h>
+#include <linux/regulator/consumer.h>
+#include <linux/regmap.h>
+#include <linux/stmmac.h>
+
+#include "stmmac.h"
+#include "stmmac_platform.h"
+
+/* General notes on dwmac-sun8i:
+ * Locking: no locking is necessary in this file because all necessary locking
+ * is done in the "stmmac files"
+ */
+
+/* struct emac_variant - Descrive dwmac-sun8i hardware variant
+ * @default_syscon_value: The default value of the EMAC register in syscon
+ * This value is used for disabling properly EMAC
+ * and used as a good starting value in case of the
+ * boot process(uboot) leave some stuff.
+ * @internal_phy: Does the MAC embed an internal PHY
+ * @support_mii: Does the MAC handle MII
+ * @support_rmii: Does the MAC handle RMII
+ * @support_rgmii: Does the MAC handle RGMII
+ */
+struct emac_variant {
+ u32 default_syscon_value;
+ int internal_phy;
+ bool support_mii;
+ bool support_rmii;
+ bool support_rgmii;
+};
+
+/* struct sunxi_priv_data - hold all sunxi private data
+ * @tx_clk: reference to MAC TX clock
+ * @ephy_clk: reference to the optional EPHY clock for the internal PHY
+ * @regulator: reference to the optional regulator
+ * @rst_ephy: reference to the optional EPHY reset for the internal PHY
+ * @variant: reference to the current board variant
+ * @regmap: regmap for using the syscon
+ * @use_internal_phy: Does the current PHY choice imply using the internal PHY
+ */
+struct sunxi_priv_data {
+ struct clk *tx_clk;
+ struct clk *ephy_clk;
+ struct regulator *regulator;
+ struct reset_control *rst_ephy;
+ const struct emac_variant *variant;
+ struct regmap *regmap;
+ bool use_internal_phy;
+};
+
+static const struct emac_variant emac_variant_h3 = {
+ .default_syscon_value = 0x58000,
+ .internal_phy = PHY_INTERFACE_MODE_MII,
+ .support_mii = true,
+ .support_rmii = true,
+ .support_rgmii = true
+};
+
+static const struct emac_variant emac_variant_v3s = {
+ .default_syscon_value = 0x38000,
+ .internal_phy = PHY_INTERFACE_MODE_MII,
+ .support_mii = true
+};
+
+static const struct emac_variant emac_variant_a83t = {
+ .default_syscon_value = 0,
+ .internal_phy = 0,
+ .support_mii = true,
+ .support_rgmii = true
+};
+
+static const struct emac_variant emac_variant_a64 = {
+ .default_syscon_value = 0,
+ .internal_phy = 0,
+ .support_mii = true,
+ .support_rmii = true,
+ .support_rgmii = true
+};
+
+#define EMAC_BASIC_CTL0 0x00
+#define EMAC_BASIC_CTL1 0x04
+#define EMAC_INT_STA 0x08
+#define EMAC_INT_EN 0x0C
+#define EMAC_TX_CTL0 0x10
+#define EMAC_TX_CTL1 0x14
+#define EMAC_TX_FLOW_CTL 0x1C
+#define EMAC_TX_DESC_LIST 0x20
+#define EMAC_RX_CTL0 0x24
+#define EMAC_RX_CTL1 0x28
+#define EMAC_RX_DESC_LIST 0x34
+#define EMAC_RX_FRM_FLT 0x38
+#define EMAC_MDIO_CMD 0x48
+#define EMAC_MDIO_DATA 0x4C
+#define EMAC_MACADDR_HI(reg) (0x50 + (reg) * 8)
+#define EMAC_MACADDR_LO(reg) (0x54 + (reg) * 8)
+#define EMAC_TX_DMA_STA 0xB0
+#define EMAC_TX_CUR_DESC 0xB4
+#define EMAC_TX_CUR_BUF 0xB8
+#define EMAC_RX_DMA_STA 0xC0
+#define EMAC_RX_CUR_DESC 0xC4
+#define EMAC_RX_CUR_BUF 0xC8
+
+/* Use in EMAC_BASIC_CTL0 */
+#define EMAC_DUPLEX_FULL BIT(0)
+#define EMAC_LOOPBACK BIT(1)
+#define EMAC_SPEED_1000 0
+#define EMAC_SPEED_100 (0x03 << 2)
+#define EMAC_SPEED_10 (0x02 << 2)
+
+/* Use in EMAC_BASIC_CTL1 */
+#define EMAC_BURSTLEN_SHIFT 24
+
+/* Used in EMAC_RX_FRM_FLT */
+#define EMAC_FRM_FLT_RXALL BIT(0)
+#define EMAC_FRM_FLT_CTL BIT(13)
+#define EMAC_FRM_FLT_MULTICAST BIT(16)
+
+/* Used in RX_CTL1*/
+#define EMAC_RX_MD BIT(1)
+#define EMAC_RX_TH_MASK GENMASK(4, 5)
+#define EMAC_RX_TH_32 0
+#define EMAC_RX_TH_64 (0x1 << 4)
+#define EMAC_RX_TH_96 (0x2 << 4)
+#define EMAC_RX_TH_128 (0x3 << 4)
+#define EMAC_RX_DMA_EN BIT(30)
+#define EMAC_RX_DMA_START BIT(31)
+
+/* Used in TX_CTL1*/
+#define EMAC_TX_MD BIT(1)
+#define EMAC_TX_NEXT_FRM BIT(2)
+#define EMAC_TX_TH_MASK GENMASK(8, 10)
+#define EMAC_TX_TH_64 0
+#define EMAC_TX_TH_128 (0x1 << 8)
+#define EMAC_TX_TH_192 (0x2 << 8)
+#define EMAC_TX_TH_256 (0x3 << 8)
+#define EMAC_TX_DMA_EN BIT(30)
+#define EMAC_TX_DMA_START BIT(31)
+
+/* Used in RX_CTL0 */
+#define EMAC_RX_RECEIVER_EN BIT(31)
+#define EMAC_RX_DO_CRC BIT(27)
+#define EMAC_RX_FLOW_CTL_EN BIT(16)
+
+/* Used in TX_CTL0 */
+#define EMAC_TX_TRANSMITTER_EN BIT(31)
+
+/* Used in EMAC_TX_FLOW_CTL */
+#define EMAC_TX_FLOW_CTL_EN BIT(0)
+
+/* Used in EMAC_INT_STA */
+#define EMAC_TX_INT BIT(0)
+#define EMAC_TX_DMA_STOP_INT BIT(1)
+#define EMAC_TX_BUF_UA_INT BIT(2)
+#define EMAC_TX_TIMEOUT_INT BIT(3)
+#define EMAC_TX_UNDERFLOW_INT BIT(4)
+#define EMAC_TX_EARLY_INT BIT(5)
+#define EMAC_RX_INT BIT(8)
+#define EMAC_RX_BUF_UA_INT BIT(9)
+#define EMAC_RX_DMA_STOP_INT BIT(10)
+#define EMAC_RX_TIMEOUT_INT BIT(11)
+#define EMAC_RX_OVERFLOW_INT BIT(12)
+#define EMAC_RX_EARLY_INT BIT(13)
+#define EMAC_RGMII_STA_INT BIT(16)
+
+#define MAC_ADDR_TYPE_DST BIT(31)
+
+/* H3 specific bits for EPHY */
+#define H3_EPHY_ADDR_SHIFT 20
+#define H3_EPHY_CLK_SEL BIT(18) /* 1: 24MHz, 0: 25MHz */
+#define H3_EPHY_LED_POL BIT(17) /* 1: active low, 0: active high */
+#define H3_EPHY_SHUTDOWN BIT(16) /* 1: shutdown, 0: power up */
+#define H3_EPHY_SELECT BIT(15) /* 1: internal PHY, 0: external PHY */
+
+/* H3/A64 specific bits */
+#define SYSCON_RMII_EN BIT(13) /* 1: enable RMII (overrides EPIT) */
+
+/* Generic system control EMAC_CLK bits */
+#define SYSCON_ETXDC_MASK GENMASK(2, 0)
+#define SYSCON_ETXDC_SHIFT 10
+#define SYSCON_ERXDC_MASK GENMASK(4, 0)
+#define SYSCON_ERXDC_SHIFT 5
+/* EMAC PHY Interface Type */
+#define SYSCON_EPIT BIT(2) /* 1: RGMII, 0: MII */
+#define SYSCON_ETCS_MASK GENMASK(1, 0)
+#define SYSCON_ETCS_MII 0x0
+#define SYSCON_ETCS_EXT_GMII 0x1
+#define SYSCON_ETCS_INT_GMII 0x2
+#define SYSCON_EMAC_REG 0x30
+
+/* sun8i_dwmac_dma_reset() - reset the EMAC
+ * Called from stmmac via stmmac_dma_ops->reset
+ */
+static int sun8i_dwmac_dma_reset(void __iomem *ioaddr)
+{
+ writel(0, ioaddr + EMAC_RX_CTL1);
+ writel(0, ioaddr + EMAC_TX_CTL1);
+ writel(0, ioaddr + EMAC_RX_FRM_FLT);
+ writel(0, ioaddr + EMAC_RX_DESC_LIST);
+ writel(0, ioaddr + EMAC_TX_DESC_LIST);
+ writel(0, ioaddr + EMAC_INT_EN);
+ writel(0x1FFFFFF, ioaddr + EMAC_INT_STA);
+ return 0;
+}
+
+/* sun8i_dwmac_dma_init() - initialize the EMAC
+ * Called from stmmac via stmmac_dma_ops->init
+ */
+static void sun8i_dwmac_dma_init(void __iomem *ioaddr,
+ struct stmmac_dma_cfg *dma_cfg,
+ u32 dma_tx, u32 dma_rx, int atds)
+{
+ /* Write TX and RX descriptors address */
+ writel(dma_rx, ioaddr + EMAC_RX_DESC_LIST);
+ writel(dma_tx, ioaddr + EMAC_TX_DESC_LIST);
+
+ writel(EMAC_RX_INT | EMAC_TX_INT, ioaddr + EMAC_INT_EN);
+ writel(0x1FFFFFF, ioaddr + EMAC_INT_STA);
+}
+
+/* sun8i_dwmac_dump_regs() - Dump EMAC address space
+ * Called from stmmac_dma_ops->dump_regs
+ * Used for ethtool
+ */
+static void sun8i_dwmac_dump_regs(void __iomem *ioaddr, u32 *reg_space)
+{
+ int i;
+
+ for (i = 0; i < 0xC8; i += 4) {
+ if (i == 0x32 || i == 0x3C)
+ continue;
+ reg_space[i / 4] = readl(ioaddr + i);
+ }
+}
+
+/* sun8i_dwmac_dump_mac_regs() - Dump EMAC address space
+ * Called from stmmac_ops->dump_regs
+ * Used for ethtool
+ */
+static void sun8i_dwmac_dump_mac_regs(struct mac_device_info *hw,
+ u32 *reg_space)
+{
+ int i;
+ void __iomem *ioaddr = hw->pcsr;
+
+ for (i = 0; i < 0xC8; i += 4) {
+ if (i == 0x32 || i == 0x3C)
+ continue;
+ reg_space[i / 4] = readl(ioaddr + i);
+ }
+}
+
+static void sun8i_dwmac_enable_dma_irq(void __iomem *ioaddr, u32 chan)
+{
+ writel(EMAC_RX_INT | EMAC_TX_INT, ioaddr + EMAC_INT_EN);
+}
+
+static void sun8i_dwmac_disable_dma_irq(void __iomem *ioaddr, u32 chan)
+{
+ writel(0, ioaddr + EMAC_INT_EN);
+}
+
+static void sun8i_dwmac_dma_start_tx(void __iomem *ioaddr, u32 chan)
+{
+ u32 v;
+
+ v = readl(ioaddr + EMAC_TX_CTL1);
+ v |= EMAC_TX_DMA_START;
+ v |= EMAC_TX_DMA_EN;
+ writel(v, ioaddr + EMAC_TX_CTL1);
+}
+
+static void sun8i_dwmac_enable_dma_transmission(void __iomem *ioaddr)
+{
+ u32 v;
+
+ v = readl(ioaddr + EMAC_TX_CTL1);
+ v |= EMAC_TX_DMA_START;
+ v |= EMAC_TX_DMA_EN;
+ writel(v, ioaddr + EMAC_TX_CTL1);
+}
+
+static void sun8i_dwmac_dma_stop_tx(void __iomem *ioaddr, u32 chan)
+{
+ u32 v;
+
+ v = readl(ioaddr + EMAC_TX_CTL1);
+ v &= ~EMAC_TX_DMA_EN;
+ writel(v, ioaddr + EMAC_TX_CTL1);
+}
+
+static void sun8i_dwmac_dma_start_rx(void __iomem *ioaddr, u32 chan)
+{
+ u32 v;
+
+ v = readl(ioaddr + EMAC_RX_CTL1);
+ v |= EMAC_RX_DMA_START;
+ v |= EMAC_RX_DMA_EN;
+ writel(v, ioaddr + EMAC_RX_CTL1);
+}
+
+static void sun8i_dwmac_dma_stop_rx(void __iomem *ioaddr, u32 chan)
+{
+ u32 v;
+
+ v = readl(ioaddr + EMAC_RX_CTL1);
+ v &= ~EMAC_RX_DMA_EN;
+ writel(v, ioaddr + EMAC_RX_CTL1);
+}
+
+static int sun8i_dwmac_dma_interrupt(void __iomem *ioaddr,
+ struct stmmac_extra_stats *x, u32 chan)
+{
+ u32 v;
+ int ret = 0;
+
+ v = readl(ioaddr + EMAC_INT_STA);
+
+ if (v & EMAC_TX_INT) {
+ ret |= handle_tx;
+ x->tx_normal_irq_n++;
+ }
+
+ if (v & EMAC_TX_DMA_STOP_INT)
+ x->tx_process_stopped_irq++;
+
+ if (v & EMAC_TX_BUF_UA_INT)
+ x->tx_process_stopped_irq++;
+
+ if (v & EMAC_TX_TIMEOUT_INT)
+ ret |= tx_hard_error;
+
+ if (v & EMAC_TX_UNDERFLOW_INT) {
+ ret |= tx_hard_error;
+ x->tx_undeflow_irq++;
+ }
+
+ if (v & EMAC_TX_EARLY_INT)
+ x->tx_early_irq++;
+
+ if (v & EMAC_RX_INT) {
+ ret |= handle_rx;
+ x->rx_normal_irq_n++;
+ }
+
+ if (v & EMAC_RX_BUF_UA_INT)
+ x->rx_buf_unav_irq++;
+
+ if (v & EMAC_RX_DMA_STOP_INT)
+ x->rx_process_stopped_irq++;
+
+ if (v & EMAC_RX_TIMEOUT_INT)
+ ret |= tx_hard_error;
+
+ if (v & EMAC_RX_OVERFLOW_INT) {
+ ret |= tx_hard_error;
+ x->rx_overflow_irq++;
+ }
+
+ if (v & EMAC_RX_EARLY_INT)
+ x->rx_early_irq++;
+
+ if (v & EMAC_RGMII_STA_INT)
+ x->irq_rgmii_n++;
+
+ writel(v, ioaddr + EMAC_INT_STA);
+
+ return ret;
+}
+
+static void sun8i_dwmac_dma_operation_mode(void __iomem *ioaddr, int txmode,
+ int rxmode, int rxfifosz)
+{
+ u32 v;
+
+ v = readl(ioaddr + EMAC_TX_CTL1);
+ if (txmode == SF_DMA_MODE) {
+ v |= EMAC_TX_MD;
+ /* Undocumented bit (called TX_NEXT_FRM in BSP), the original
+ * comment is
+ * "Operating on second frame increase the performance
+ * especially when transmit store-and-forward is used."
+ */
+ v |= EMAC_TX_NEXT_FRM;
+ } else {
+ v &= ~EMAC_TX_MD;
+ v &= ~EMAC_TX_TH_MASK;
+ if (txmode < 64)
+ v |= EMAC_TX_TH_64;
+ else if (txmode < 128)
+ v |= EMAC_TX_TH_128;
+ else if (txmode < 192)
+ v |= EMAC_TX_TH_192;
+ else if (txmode < 256)
+ v |= EMAC_TX_TH_256;
+ }
+ writel(v, ioaddr + EMAC_TX_CTL1);
+
+ v = readl(ioaddr + EMAC_RX_CTL1);
+ if (rxmode == SF_DMA_MODE) {
+ v |= EMAC_RX_MD;
+ } else {
+ v &= ~EMAC_RX_MD;
+ v &= ~EMAC_RX_TH_MASK;
+ if (rxmode < 32)
+ v |= EMAC_RX_TH_32;
+ else if (rxmode < 64)
+ v |= EMAC_RX_TH_64;
+ else if (rxmode < 96)
+ v |= EMAC_RX_TH_96;
+ else if (rxmode < 128)
+ v |= EMAC_RX_TH_128;
+ }
+ writel(v, ioaddr + EMAC_RX_CTL1);
+}
+
+static const struct stmmac_dma_ops sun8i_dwmac_dma_ops = {
+ .reset = sun8i_dwmac_dma_reset,
+ .init = sun8i_dwmac_dma_init,
+ .dump_regs = sun8i_dwmac_dump_regs,
+ .dma_mode = sun8i_dwmac_dma_operation_mode,
+ .enable_dma_transmission = sun8i_dwmac_enable_dma_transmission,
+ .enable_dma_irq = sun8i_dwmac_enable_dma_irq,
+ .disable_dma_irq = sun8i_dwmac_disable_dma_irq,
+ .start_tx = sun8i_dwmac_dma_start_tx,
+ .stop_tx = sun8i_dwmac_dma_stop_tx,
+ .start_rx = sun8i_dwmac_dma_start_rx,
+ .stop_rx = sun8i_dwmac_dma_stop_rx,
+ .dma_interrupt = sun8i_dwmac_dma_interrupt,
+};
+
+static int sun8i_dwmac_init(struct platform_device *pdev, void *priv)
+{
+ struct sunxi_priv_data *gmac = priv;
+ int ret;
+
+ if (gmac->regulator) {
+ ret = regulator_enable(gmac->regulator);
+ if (ret) {
+ dev_err(&pdev->dev, "Fail to enable regulator\n");
+ return ret;
+ }
+ }
+
+ ret = clk_prepare_enable(gmac->tx_clk);
+ if (ret) {
+ if (gmac->regulator)
+ regulator_disable(gmac->regulator);
+ dev_err(&pdev->dev, "Could not enable AHB clock\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static void sun8i_dwmac_core_init(struct mac_device_info *hw, int mtu)
+{
+ void __iomem *ioaddr = hw->pcsr;
+ u32 v;
+
+ v = (8 << EMAC_BURSTLEN_SHIFT); /* burst len */
+ writel(v, ioaddr + EMAC_BASIC_CTL1);
+}
+
+static void sun8i_dwmac_set_mac(void __iomem *ioaddr, bool enable)
+{
+ u32 t, r;
+
+ t = readl(ioaddr + EMAC_TX_CTL0);
+ r = readl(ioaddr + EMAC_RX_CTL0);
+ if (enable) {
+ t |= EMAC_TX_TRANSMITTER_EN;
+ r |= EMAC_RX_RECEIVER_EN;
+ } else {
+ t &= ~EMAC_TX_TRANSMITTER_EN;
+ r &= ~EMAC_RX_RECEIVER_EN;
+ }
+ writel(t, ioaddr + EMAC_TX_CTL0);
+ writel(r, ioaddr + EMAC_RX_CTL0);
+}
+
+/* Set MAC address at slot reg_n
+ * All slot > 0 need to be enabled with MAC_ADDR_TYPE_DST
+ * If addr is NULL, clear the slot
+ */
+static void sun8i_dwmac_set_umac_addr(struct mac_device_info *hw,
+ unsigned char *addr,
+ unsigned int reg_n)
+{
+ void __iomem *ioaddr = hw->pcsr;
+ u32 v;
+
+ if (!addr) {
+ writel(0, ioaddr + EMAC_MACADDR_HI(reg_n));
+ return;
+ }
+
+ stmmac_set_mac_addr(ioaddr, addr, EMAC_MACADDR_HI(reg_n),
+ EMAC_MACADDR_LO(reg_n));
+ if (reg_n > 0) {
+ v = readl(ioaddr + EMAC_MACADDR_HI(reg_n));
+ v |= MAC_ADDR_TYPE_DST;
+ writel(v, ioaddr + EMAC_MACADDR_HI(reg_n));
+ }
+}
+
+static void sun8i_dwmac_get_umac_addr(struct mac_device_info *hw,
+ unsigned char *addr,
+ unsigned int reg_n)
+{
+ void __iomem *ioaddr = hw->pcsr;
+
+ stmmac_get_mac_addr(ioaddr, addr, EMAC_MACADDR_HI(reg_n),
+ EMAC_MACADDR_LO(reg_n));
+}
+
+/* caution this function must return non 0 to work */
+static int sun8i_dwmac_rx_ipc_enable(struct mac_device_info *hw)
+{
+ void __iomem *ioaddr = hw->pcsr;
+ u32 v;
+
+ v = readl(ioaddr + EMAC_RX_CTL0);
+ v |= EMAC_RX_DO_CRC;
+ writel(v, ioaddr + EMAC_RX_CTL0);
+
+ return 1;
+}
+
+static void sun8i_dwmac_set_filter(struct mac_device_info *hw,
+ struct net_device *dev)
+{
+ void __iomem *ioaddr = hw->pcsr;
+ u32 v;
+ int i = 1;
+ struct netdev_hw_addr *ha;
+ int macaddrs = netdev_uc_count(dev) + netdev_mc_count(dev) + 1;
+
+ v = EMAC_FRM_FLT_CTL;
+
+ if (dev->flags & IFF_PROMISC) {
+ v = EMAC_FRM_FLT_RXALL;
+ } else if (dev->flags & IFF_ALLMULTI) {
+ v |= EMAC_FRM_FLT_MULTICAST;
+ } else if (macaddrs <= hw->unicast_filter_entries) {
+ if (!netdev_mc_empty(dev)) {
+ netdev_for_each_mc_addr(ha, dev) {
+ sun8i_dwmac_set_umac_addr(hw, ha->addr, i);
+ i++;
+ }
+ }
+ if (!netdev_uc_empty(dev)) {
+ netdev_for_each_uc_addr(ha, dev) {
+ sun8i_dwmac_set_umac_addr(hw, ha->addr, i);
+ i++;
+ }
+ }
+ } else {
+ netdev_info(dev, "Too many address, switching to promiscuous\n");
+ v = EMAC_FRM_FLT_RXALL;
+ }
+
+ /* Disable unused address filter slots */
+ while (i < hw->unicast_filter_entries)
+ sun8i_dwmac_set_umac_addr(hw, NULL, i++);
+
+ writel(v, ioaddr + EMAC_RX_FRM_FLT);
+}
+
+static void sun8i_dwmac_flow_ctrl(struct mac_device_info *hw,
+ unsigned int duplex, unsigned int fc,
+ unsigned int pause_time, u32 tx_cnt)
+{
+ void __iomem *ioaddr = hw->pcsr;
+ u32 v;
+
+ v = readl(ioaddr + EMAC_RX_CTL0);
+ if (fc == FLOW_AUTO)
+ v |= EMAC_RX_FLOW_CTL_EN;
+ else
+ v &= ~EMAC_RX_FLOW_CTL_EN;
+ writel(v, ioaddr + EMAC_RX_CTL0);
+
+ v = readl(ioaddr + EMAC_TX_FLOW_CTL);
+ if (fc == FLOW_AUTO)
+ v |= EMAC_TX_FLOW_CTL_EN;
+ else
+ v &= ~EMAC_TX_FLOW_CTL_EN;
+ writel(v, ioaddr + EMAC_TX_FLOW_CTL);
+}
+
+static int sun8i_dwmac_reset(struct stmmac_priv *priv)
+{
+ u32 v;
+ int err;
+
+ v = readl(priv->ioaddr + EMAC_BASIC_CTL1);
+ writel(v | 0x01, priv->ioaddr + EMAC_BASIC_CTL1);
+
+ /* The timeout was previoulsy set to 10ms, but some board (OrangePI0)
+ * need more if no cable plugged. 100ms seems OK
+ */
+ err = readl_poll_timeout(priv->ioaddr + EMAC_BASIC_CTL1, v,
+ !(v & 0x01), 100, 100000);
+
+ if (err) {
+ dev_err(priv->device, "EMAC reset timeout\n");
+ return -EFAULT;
+ }
+ return 0;
+}
+
+static int sun8i_dwmac_set_syscon(struct stmmac_priv *priv)
+{
+ struct sunxi_priv_data *gmac = priv->plat->bsp_priv;
+ struct device_node *node = priv->device->of_node;
+ int ret;
+ u32 reg, val;
+
+ regmap_read(gmac->regmap, SYSCON_EMAC_REG, &val);
+ reg = gmac->variant->default_syscon_value;
+ if (reg != val)
+ dev_warn(priv->device,
+ "Current syscon value is not the default %x (expect %x)\n",
+ val, reg);
+
+ if (gmac->variant->internal_phy) {
+ if (!gmac->use_internal_phy) {
+ /* switch to external PHY interface */
+ reg &= ~H3_EPHY_SELECT;
+ } else {
+ reg |= H3_EPHY_SELECT;
+ reg &= ~H3_EPHY_SHUTDOWN;
+ dev_dbg(priv->device, "Select internal_phy %x\n", reg);
+
+ if (of_property_read_bool(priv->plat->phy_node,
+ "allwinner,leds-active-low"))
+ reg |= H3_EPHY_LED_POL;
+ else
+ reg &= ~H3_EPHY_LED_POL;
+
+ /* Force EPHY xtal frequency to 24MHz. */
+ reg |= H3_EPHY_CLK_SEL;
+
+ ret = of_mdio_parse_addr(priv->device,
+ priv->plat->phy_node);
+ if (ret < 0) {
+ dev_err(priv->device, "Could not parse MDIO addr\n");
+ return ret;
+ }
+ /* of_mdio_parse_addr returns a valid (0 ~ 31) PHY
+ * address. No need to mask it again.
+ */
+ reg |= ret << H3_EPHY_ADDR_SHIFT;
+ }
+ }
+
+ if (!of_property_read_u32(node, "allwinner,tx-delay-ps", &val)) {
+ if (val % 100) {
+ dev_err(priv->device, "tx-delay must be a multiple of 100\n");
+ return -EINVAL;
+ }
+ val /= 100;
+ dev_dbg(priv->device, "set tx-delay to %x\n", val);
+ if (val <= SYSCON_ETXDC_MASK) {
+ reg &= ~(SYSCON_ETXDC_MASK << SYSCON_ETXDC_SHIFT);
+ reg |= (val << SYSCON_ETXDC_SHIFT);
+ } else {
+ dev_err(priv->device, "Invalid TX clock delay: %d\n",
+ val);
+ return -EINVAL;
+ }
+ }
+
+ if (!of_property_read_u32(node, "allwinner,rx-delay-ps", &val)) {
+ if (val % 100) {
+ dev_err(priv->device, "rx-delay must be a multiple of 100\n");
+ return -EINVAL;
+ }
+ val /= 100;
+ dev_dbg(priv->device, "set rx-delay to %x\n", val);
+ if (val <= SYSCON_ERXDC_MASK) {
+ reg &= ~(SYSCON_ERXDC_MASK << SYSCON_ERXDC_SHIFT);
+ reg |= (val << SYSCON_ERXDC_SHIFT);
+ } else {
+ dev_err(priv->device, "Invalid RX clock delay: %d\n",
+ val);
+ return -EINVAL;
+ }
+ }
+
+ /* Clear interface mode bits */
+ reg &= ~(SYSCON_ETCS_MASK | SYSCON_EPIT);
+ if (gmac->variant->support_rmii)
+ reg &= ~SYSCON_RMII_EN;
+
+ switch (priv->plat->interface) {
+ case PHY_INTERFACE_MODE_MII:
+ /* default */
+ break;
+ case PHY_INTERFACE_MODE_RGMII:
+ reg |= SYSCON_EPIT | SYSCON_ETCS_INT_GMII;
+ break;
+ case PHY_INTERFACE_MODE_RMII:
+ reg |= SYSCON_RMII_EN | SYSCON_ETCS_EXT_GMII;
+ break;
+ default:
+ dev_err(priv->device, "Unsupported interface mode: %s",
+ phy_modes(priv->plat->interface));
+ return -EINVAL;
+ }
+
+ regmap_write(gmac->regmap, SYSCON_EMAC_REG, reg);
+
+ return 0;
+}
+
+static void sun8i_dwmac_unset_syscon(struct sunxi_priv_data *gmac)
+{
+ u32 reg = gmac->variant->default_syscon_value;
+
+ regmap_write(gmac->regmap, SYSCON_EMAC_REG, reg);
+}
+
+static int sun8i_dwmac_power_internal_phy(struct stmmac_priv *priv)
+{
+ struct sunxi_priv_data *gmac = priv->plat->bsp_priv;
+ int ret;
+
+ if (!gmac->use_internal_phy)
+ return 0;
+
+ ret = clk_prepare_enable(gmac->ephy_clk);
+ if (ret) {
+ dev_err(priv->device, "Cannot enable ephy\n");
+ return ret;
+ }
+
+ /* Make sure the EPHY is properly reseted, as U-Boot may leave
+ * it at deasserted state, and thus it may fail to reset EMAC.
+ */
+ reset_control_assert(gmac->rst_ephy);
+
+ ret = reset_control_deassert(gmac->rst_ephy);
+ if (ret) {
+ dev_err(priv->device, "Cannot deassert ephy\n");
+ clk_disable_unprepare(gmac->ephy_clk);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int sun8i_dwmac_unpower_internal_phy(struct sunxi_priv_data *gmac)
+{
+ if (!gmac->use_internal_phy)
+ return 0;
+
+ clk_disable_unprepare(gmac->ephy_clk);
+ reset_control_assert(gmac->rst_ephy);
+ return 0;
+}
+
+/* sun8i_power_phy() - Activate the PHY:
+ * In case of error, no need to call sun8i_unpower_phy(),
+ * it will be called anyway by sun8i_dwmac_exit()
+ */
+static int sun8i_power_phy(struct stmmac_priv *priv)
+{
+ int ret;
+
+ ret = sun8i_dwmac_power_internal_phy(priv);
+ if (ret)
+ return ret;
+
+ ret = sun8i_dwmac_set_syscon(priv);
+ if (ret)
+ return ret;
+
+ /* After changing syscon value, the MAC need reset or it will use
+ * the last value (and so the last PHY set.
+ */
+ ret = sun8i_dwmac_reset(priv);
+ if (ret)
+ return ret;
+ return 0;
+}
+
+static void sun8i_unpower_phy(struct sunxi_priv_data *gmac)
+{
+ sun8i_dwmac_unset_syscon(gmac);
+ sun8i_dwmac_unpower_internal_phy(gmac);
+}
+
+static void sun8i_dwmac_exit(struct platform_device *pdev, void *priv)
+{
+ struct sunxi_priv_data *gmac = priv;
+
+ sun8i_unpower_phy(gmac);
+
+ clk_disable_unprepare(gmac->tx_clk);
+
+ if (gmac->regulator)
+ regulator_disable(gmac->regulator);
+}
+
+static const struct stmmac_ops sun8i_dwmac_ops = {
+ .core_init = sun8i_dwmac_core_init,
+ .set_mac = sun8i_dwmac_set_mac,
+ .dump_regs = sun8i_dwmac_dump_mac_regs,
+ .rx_ipc = sun8i_dwmac_rx_ipc_enable,
+ .set_filter = sun8i_dwmac_set_filter,
+ .flow_ctrl = sun8i_dwmac_flow_ctrl,
+ .set_umac_addr = sun8i_dwmac_set_umac_addr,
+ .get_umac_addr = sun8i_dwmac_get_umac_addr,
+};
+
+static struct mac_device_info *sun8i_dwmac_setup(void *ppriv)
+{
+ struct mac_device_info *mac;
+ struct stmmac_priv *priv = ppriv;
+ int ret;
+
+ mac = devm_kzalloc(priv->device, sizeof(*mac), GFP_KERNEL);
+ if (!mac)
+ return NULL;
+
+ ret = sun8i_power_phy(priv);
+ if (ret)
+ return NULL;
+
+ mac->pcsr = priv->ioaddr;
+ mac->mac = &sun8i_dwmac_ops;
+ mac->dma = &sun8i_dwmac_dma_ops;
+
+ /* The loopback bit seems to be re-set when link change
+ * Simply mask it each time
+ * Speed 10/100/1000 are set in BIT(2)/BIT(3)
+ */
+ mac->link.speed_mask = GENMASK(3, 2) | EMAC_LOOPBACK;
+ mac->link.speed10 = EMAC_SPEED_10;
+ mac->link.speed100 = EMAC_SPEED_100;
+ mac->link.speed1000 = EMAC_SPEED_1000;
+ mac->link.duplex = EMAC_DUPLEX_FULL;
+ mac->mii.addr = EMAC_MDIO_CMD;
+ mac->mii.data = EMAC_MDIO_DATA;
+ mac->mii.reg_shift = 4;
+ mac->mii.reg_mask = GENMASK(8, 4);
+ mac->mii.addr_shift = 12;
+ mac->mii.addr_mask = GENMASK(16, 12);
+ mac->mii.clk_csr_shift = 20;
+ mac->mii.clk_csr_mask = GENMASK(22, 20);
+ mac->unicast_filter_entries = 8;
+
+ /* Synopsys Id is not available */
+ priv->synopsys_id = 0;
+
+ return mac;
+}
+
+static int sun8i_dwmac_probe(struct platform_device *pdev)
+{
+ struct plat_stmmacenet_data *plat_dat;
+ struct stmmac_resources stmmac_res;
+ struct sunxi_priv_data *gmac;
+ struct device *dev = &pdev->dev;
+ int ret;
+
+ ret = stmmac_get_platform_resources(pdev, &stmmac_res);
+ if (ret)
+ return ret;
+
+ plat_dat = stmmac_probe_config_dt(pdev, &stmmac_res.mac);
+ if (IS_ERR(plat_dat))
+ return PTR_ERR(plat_dat);
+
+ gmac = devm_kzalloc(dev, sizeof(*gmac), GFP_KERNEL);
+ if (!gmac)
+ return -ENOMEM;
+
+ gmac->variant = of_device_get_match_data(&pdev->dev);
+ if (!gmac->variant) {
+ dev_err(&pdev->dev, "Missing dwmac-sun8i variant\n");
+ return -EINVAL;
+ }
+
+ gmac->tx_clk = devm_clk_get(dev, "stmmaceth");
+ if (IS_ERR(gmac->tx_clk)) {
+ dev_err(dev, "Could not get TX clock\n");
+ return PTR_ERR(gmac->tx_clk);
+ }
+
+ /* Optional regulator for PHY */
+ gmac->regulator = devm_regulator_get_optional(dev, "phy");
+ if (IS_ERR(gmac->regulator)) {
+ if (PTR_ERR(gmac->regulator) == -EPROBE_DEFER)
+ return -EPROBE_DEFER;
+ dev_info(dev, "No regulator found\n");
+ gmac->regulator = NULL;
+ }
+
+ gmac->regmap = syscon_regmap_lookup_by_phandle(pdev->dev.of_node,
+ "syscon");
+ if (IS_ERR(gmac->regmap)) {
+ ret = PTR_ERR(gmac->regmap);
+ dev_err(&pdev->dev, "Unable to map syscon: %d\n", ret);
+ return ret;
+ }
+
+ plat_dat->interface = of_get_phy_mode(dev->of_node);
+ if (plat_dat->interface == gmac->variant->internal_phy) {
+ dev_info(&pdev->dev, "Will use internal PHY\n");
+ gmac->use_internal_phy = true;
+ gmac->ephy_clk = of_clk_get(plat_dat->phy_node, 0);
+ if (IS_ERR(gmac->ephy_clk)) {
+ ret = PTR_ERR(gmac->ephy_clk);
+ dev_err(&pdev->dev, "Cannot get EPHY clock: %d\n", ret);
+ return -EINVAL;
+ }
+
+ gmac->rst_ephy = of_reset_control_get(plat_dat->phy_node, NULL);
+ if (IS_ERR(gmac->rst_ephy)) {
+ ret = PTR_ERR(gmac->rst_ephy);
+ if (ret == -EPROBE_DEFER)
+ return ret;
+ dev_err(&pdev->dev, "No EPHY reset control found %d\n",
+ ret);
+ return -EINVAL;
+ }
+ } else {
+ dev_info(&pdev->dev, "Will use external PHY\n");
+ gmac->use_internal_phy = false;
+ }
+
+ /* platform data specifying hardware features and callbacks.
+ * hardware features were copied from Allwinner drivers.
+ */
+ plat_dat->rx_coe = STMMAC_RX_COE_TYPE2;
+ plat_dat->tx_coe = 1;
+ plat_dat->has_sun8i = true;
+ plat_dat->bsp_priv = gmac;
+ plat_dat->init = sun8i_dwmac_init;
+ plat_dat->exit = sun8i_dwmac_exit;
+ plat_dat->setup = sun8i_dwmac_setup;
+
+ ret = sun8i_dwmac_init(pdev, plat_dat->bsp_priv);
+ if (ret)
+ return ret;
+
+ ret = stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res);
+ if (ret)
+ sun8i_dwmac_exit(pdev, plat_dat->bsp_priv);
+
+ return ret;
+}
+
+static const struct of_device_id sun8i_dwmac_match[] = {
+ { .compatible = "allwinner,sun8i-h3-emac",
+ .data = &emac_variant_h3 },
+ { .compatible = "allwinner,sun8i-v3s-emac",
+ .data = &emac_variant_v3s },
+ { .compatible = "allwinner,sun8i-a83t-emac",
+ .data = &emac_variant_a83t },
+ { .compatible = "allwinner,sun50i-a64-emac",
+ .data = &emac_variant_a64 },
+ { }
+};
+MODULE_DEVICE_TABLE(of, sun8i_dwmac_match);
+
+static struct platform_driver sun8i_dwmac_driver = {
+ .probe = sun8i_dwmac_probe,
+ .remove = stmmac_pltfr_remove,
+ .driver = {
+ .name = "dwmac-sun8i",
+ .pm = &stmmac_pltfr_pm_ops,
+ .of_match_table = sun8i_dwmac_match,
+ },
+};
+module_platform_driver(sun8i_dwmac_driver);
+
+MODULE_AUTHOR("Corentin Labbe <clabbe.montjoie@gmail.com>");
+MODULE_DESCRIPTION("Allwinner sun8i DWMAC specific glue layer");
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c
index f3d9305..8a86340 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c
@@ -45,15 +45,17 @@
if (hw->ps) {
value |= GMAC_CONTROL_TE;
- if (hw->ps == SPEED_1000) {
- value &= ~GMAC_CONTROL_PS;
- } else {
- value |= GMAC_CONTROL_PS;
-
- if (hw->ps == SPEED_10)
- value &= ~GMAC_CONTROL_FES;
- else
- value |= GMAC_CONTROL_FES;
+ value &= ~hw->link.speed_mask;
+ switch (hw->ps) {
+ case SPEED_1000:
+ value |= hw->link.speed1000;
+ break;
+ case SPEED_100:
+ value |= hw->link.speed100;
+ break;
+ case SPEED_10:
+ value |= hw->link.speed10;
+ break;
}
}
@@ -531,9 +533,11 @@
mac->mac = &dwmac1000_ops;
mac->dma = &dwmac1000_dma_ops;
- mac->link.port = GMAC_CONTROL_PS;
mac->link.duplex = GMAC_CONTROL_DM;
- mac->link.speed = GMAC_CONTROL_FES;
+ mac->link.speed10 = GMAC_CONTROL_PS;
+ mac->link.speed100 = GMAC_CONTROL_PS | GMAC_CONTROL_FES;
+ mac->link.speed1000 = 0;
+ mac->link.speed_mask = GMAC_CONTROL_PS | GMAC_CONTROL_FES;
mac->mii.addr = GMAC_MII_ADDR;
mac->mii.data = GMAC_MII_DATA;
mac->mii.addr_shift = 11;
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac100_core.c b/drivers/net/ethernet/stmicro/stmmac/dwmac100_core.c
index 1b36091..8ef5173 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac100_core.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac100_core.c
@@ -175,9 +175,11 @@
mac->mac = &dwmac100_ops;
mac->dma = &dwmac100_dma_ops;
- mac->link.port = MAC_CONTROL_PS;
mac->link.duplex = MAC_CONTROL_F;
- mac->link.speed = 0;
+ mac->link.speed10 = 0;
+ mac->link.speed100 = 0;
+ mac->link.speed1000 = 0;
+ mac->link.speed_mask = MAC_CONTROL_PS;
mac->mii.addr = MAC_MII_ADDR;
mac->mii.data = MAC_MII_DATA;
mac->mii.addr_shift = 11;
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c
index 48793f2..f233bf8 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c
@@ -35,15 +35,17 @@
if (hw->ps) {
value |= GMAC_CONFIG_TE;
- if (hw->ps == SPEED_1000) {
- value &= ~GMAC_CONFIG_PS;
- } else {
- value |= GMAC_CONFIG_PS;
-
- if (hw->ps == SPEED_10)
- value &= ~GMAC_CONFIG_FES;
- else
- value |= GMAC_CONFIG_FES;
+ value &= hw->link.speed_mask;
+ switch (hw->ps) {
+ case SPEED_1000:
+ value |= hw->link.speed1000;
+ break;
+ case SPEED_100:
+ value |= hw->link.speed100;
+ break;
+ case SPEED_10:
+ value |= hw->link.speed10;
+ break;
}
}
@@ -747,9 +749,11 @@
if (mac->multicast_filter_bins)
mac->mcast_bits_log2 = ilog2(mac->multicast_filter_bins);
- mac->link.port = GMAC_CONFIG_PS;
mac->link.duplex = GMAC_CONFIG_DM;
- mac->link.speed = GMAC_CONFIG_FES;
+ mac->link.speed10 = GMAC_CONFIG_PS;
+ mac->link.speed100 = GMAC_CONFIG_FES | GMAC_CONFIG_PS;
+ mac->link.speed1000 = 0;
+ mac->link.speed_mask = GMAC_CONFIG_FES | GMAC_CONFIG_PS;
mac->mii.addr = GMAC_MDIO_ADDR;
mac->mii.data = GMAC_MDIO_DATA;
mac->mii.addr_shift = 21;
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac_lib.c b/drivers/net/ethernet/stmicro/stmmac/dwmac_lib.c
index 38f9430..67af0bd 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac_lib.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac_lib.c
@@ -248,6 +248,7 @@
data = (addr[3] << 24) | (addr[2] << 16) | (addr[1] << 8) | addr[0];
writel(data, ioaddr + low);
}
+EXPORT_SYMBOL_GPL(stmmac_set_mac_addr);
/* Enable disable MAC RX/TX */
void stmmac_set_mac(void __iomem *ioaddr, bool enable)
@@ -279,4 +280,4 @@
addr[4] = hi_addr & 0xff;
addr[5] = (hi_addr >> 8) & 0xff;
}
-
+EXPORT_SYMBOL_GPL(stmmac_get_mac_addr);
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac.h b/drivers/net/ethernet/stmicro/stmmac/stmmac.h
index 33efe70..a916e13 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac.h
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac.h
@@ -104,7 +104,7 @@
/* TX Queue */
struct stmmac_tx_queue tx_queue[MTL_MAX_TX_QUEUES];
- int oldlink;
+ bool oldlink;
int speed;
int oldduplex;
unsigned int flow_ctrl;
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
index 16808e4..743170d 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
@@ -273,7 +273,6 @@
{
struct stmmac_priv *priv = netdev_priv(dev);
struct phy_device *phy = dev->phydev;
- int rc;
if (priv->hw->pcs & STMMAC_PCS_RGMII ||
priv->hw->pcs & STMMAC_PCS_SGMII) {
@@ -364,8 +363,8 @@
"link speed / duplex setting\n", dev->name);
return -EBUSY;
}
- rc = phy_ethtool_ksettings_get(phy, cmd);
- return rc;
+ phy_ethtool_ksettings_get(phy, cmd);
+ return 0;
}
static int
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index 6e4cbc6..19bba62 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -235,6 +235,17 @@
else if ((clk_rate >= CSR_F_250M) && (clk_rate < CSR_F_300M))
priv->clk_csr = STMMAC_CSR_250_300M;
}
+
+ if (priv->plat->has_sun8i) {
+ if (clk_rate > 160000000)
+ priv->clk_csr = 0x03;
+ else if (clk_rate > 80000000)
+ priv->clk_csr = 0x02;
+ else if (clk_rate > 40000000)
+ priv->clk_csr = 0x01;
+ else
+ priv->clk_csr = 0;
+ }
}
static void print_pkt(unsigned char *buf, int len)
@@ -653,6 +664,7 @@
ptp_over_ethernet = PTP_TCR_TSIPENA;
break;
+ case HWTSTAMP_FILTER_NTP_ALL:
case HWTSTAMP_FILTER_ALL:
/* time stamp any incoming packet */
config.rx_filter = HWTSTAMP_FILTER_ALL;
@@ -783,7 +795,7 @@
struct stmmac_priv *priv = netdev_priv(dev);
struct phy_device *phydev = dev->phydev;
unsigned long flags;
- int new_state = 0;
+ bool new_state = false;
if (!phydev)
return;
@@ -796,8 +808,8 @@
/* Now we make sure that we can be in full duplex mode.
* If not, we operate in half-duplex mode. */
if (phydev->duplex != priv->oldduplex) {
- new_state = 1;
- if (!(phydev->duplex))
+ new_state = true;
+ if (!phydev->duplex)
ctrl &= ~priv->hw->link.duplex;
else
ctrl |= priv->hw->link.duplex;
@@ -808,30 +820,17 @@
stmmac_mac_flow_ctrl(priv, phydev->duplex);
if (phydev->speed != priv->speed) {
- new_state = 1;
+ new_state = true;
+ ctrl &= ~priv->hw->link.speed_mask;
switch (phydev->speed) {
- case 1000:
- if (priv->plat->has_gmac ||
- priv->plat->has_gmac4)
- ctrl &= ~priv->hw->link.port;
+ case SPEED_1000:
+ ctrl |= priv->hw->link.speed1000;
break;
- case 100:
- if (priv->plat->has_gmac ||
- priv->plat->has_gmac4) {
- ctrl |= priv->hw->link.port;
- ctrl |= priv->hw->link.speed;
- } else {
- ctrl &= ~priv->hw->link.port;
- }
+ case SPEED_100:
+ ctrl |= priv->hw->link.speed100;
break;
- case 10:
- if (priv->plat->has_gmac ||
- priv->plat->has_gmac4) {
- ctrl |= priv->hw->link.port;
- ctrl &= ~(priv->hw->link.speed);
- } else {
- ctrl &= ~priv->hw->link.port;
- }
+ case SPEED_10:
+ ctrl |= priv->hw->link.speed10;
break;
default:
netif_warn(priv, link, priv->dev,
@@ -847,12 +846,12 @@
writel(ctrl, priv->ioaddr + MAC_CTRL_REG);
if (!priv->oldlink) {
- new_state = 1;
- priv->oldlink = 1;
+ new_state = true;
+ priv->oldlink = true;
}
} else if (priv->oldlink) {
- new_state = 1;
- priv->oldlink = 0;
+ new_state = true;
+ priv->oldlink = false;
priv->speed = SPEED_UNKNOWN;
priv->oldduplex = DUPLEX_UNKNOWN;
}
@@ -915,7 +914,7 @@
char bus_id[MII_BUS_ID_SIZE];
int interface = priv->plat->interface;
int max_speed = priv->plat->max_speed;
- priv->oldlink = 0;
+ priv->oldlink = false;
priv->speed = SPEED_UNKNOWN;
priv->oldduplex = DUPLEX_UNKNOWN;
@@ -2895,8 +2894,7 @@
priv->xstats.tx_set_ic_bit++;
}
- if (!priv->hwts_tx_en)
- skb_tx_timestamp(skb);
+ skb_tx_timestamp(skb);
if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
priv->hwts_tx_en)) {
@@ -2974,7 +2972,7 @@
/* Manage oversized TCP frames for GMAC4 device */
if (skb_is_gso(skb) && priv->tso) {
- if (ip_hdr(skb)->protocol == IPPROTO_TCP)
+ if (skb_shinfo(skb)->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6))
return stmmac_tso_xmit(skb, dev);
}
@@ -3105,8 +3103,7 @@
priv->xstats.tx_set_ic_bit++;
}
- if (!priv->hwts_tx_en)
- skb_tx_timestamp(skb);
+ skb_tx_timestamp(skb);
/* Ready to fill the first descriptor and set the OWN bit w/o any
* problems because all the descriptors are actually ready to be
@@ -3969,7 +3966,9 @@
struct mac_device_info *mac;
/* Identify the MAC HW device */
- if (priv->plat->has_gmac) {
+ if (priv->plat->setup) {
+ mac = priv->plat->setup(priv);
+ } else if (priv->plat->has_gmac) {
priv->dev->priv_flags |= IFF_UNICAST_FLT;
mac = dwmac1000_setup(priv->ioaddr,
priv->plat->multicast_filter_bins,
@@ -3989,6 +3988,10 @@
priv->hw = mac;
+ /* dwmac-sun8i only work in chain mode */
+ if (priv->plat->has_sun8i)
+ chain_mode = 1;
+
/* To use the chained or ring mode */
if (priv->synopsys_id >= DWMAC_CORE_4_00) {
priv->hw->mode = &dwmac4_ring_mode_ops;
@@ -4135,7 +4138,7 @@
NETIF_F_RXCSUM;
if ((priv->plat->tso_en) && (priv->dma_cap.tsoen)) {
- ndev->hw_features |= NETIF_F_TSO;
+ ndev->hw_features |= NETIF_F_TSO | NETIF_F_TSO6;
priv->tso = true;
dev_info(priv->device, "TSO feature enabled\n");
}
@@ -4314,7 +4317,7 @@
}
spin_unlock_irqrestore(&priv->lock, flags);
- priv->oldlink = 0;
+ priv->oldlink = false;
priv->speed = SPEED_UNKNOWN;
priv->oldduplex = DUPLEX_UNKNOWN;
return 0;
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
index 7fc3a1e..a366b37 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
@@ -309,6 +309,13 @@
struct device_node *np, struct device *dev)
{
bool mdio = true;
+ static const struct of_device_id need_mdio_ids[] = {
+ { .compatible = "snps,dwc-qos-ethernet-4.10" },
+ { .compatible = "allwinner,sun8i-a83t-emac" },
+ { .compatible = "allwinner,sun8i-h3-emac" },
+ { .compatible = "allwinner,sun8i-v3s-emac" },
+ { .compatible = "allwinner,sun50i-a64-emac" },
+ };
/* If phy-handle property is passed from DT, use it as the PHY */
plat->phy_node = of_parse_phandle(np, "phy-handle", 0);
@@ -325,8 +332,7 @@
mdio = false;
}
- /* exception for dwmac-dwc-qos-eth glue logic */
- if (of_device_is_compatible(np, "snps,dwc-qos-ethernet-4.10")) {
+ if (of_match_node(need_mdio_ids, np)) {
plat->mdio_node = of_get_child_by_name(np, "mdio");
} else {
/**
diff --git a/drivers/net/ethernet/sun/ldmvsw.c b/drivers/net/ethernet/sun/ldmvsw.c
index 5b56c24..8603e39 100644
--- a/drivers/net/ethernet/sun/ldmvsw.c
+++ b/drivers/net/ethernet/sun/ldmvsw.c
@@ -248,7 +248,7 @@
dev->ethtool_ops = &vsw_ethtool_ops;
dev->watchdog_timeo = VSW_TX_TIMEOUT;
- dev->hw_features = NETIF_F_HW_CSUM | NETIF_F_SG;
+ dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_SG;
dev->features = dev->hw_features;
/* MTU range: 68 - 65535 */
diff --git a/drivers/net/ethernet/sun/niu.c b/drivers/net/ethernet/sun/niu.c
index 2dcca24..46cb7f8 100644
--- a/drivers/net/ethernet/sun/niu.c
+++ b/drivers/net/ethernet/sun/niu.c
@@ -6667,7 +6667,7 @@
headroom = align + sizeof(struct tx_pkt_hdr);
ehdr = (struct ethhdr *) skb->data;
- tp = (struct tx_pkt_hdr *) skb_push(skb, headroom);
+ tp = skb_push(skb, headroom);
len = skb->len - sizeof(struct tx_pkt_hdr);
tp->flags = cpu_to_le64(niu_compute_tx_flags(skb, ehdr, align, len));
diff --git a/drivers/net/ethernet/sun/sunvnet.c b/drivers/net/ethernet/sun/sunvnet.c
index 0b95105..75b167e 100644
--- a/drivers/net/ethernet/sun/sunvnet.c
+++ b/drivers/net/ethernet/sun/sunvnet.c
@@ -312,7 +312,7 @@
dev->watchdog_timeo = VNET_TX_TIMEOUT;
dev->hw_features = NETIF_F_TSO | NETIF_F_GSO | NETIF_F_GSO_SOFTWARE |
- NETIF_F_HW_CSUM | NETIF_F_SG;
+ NETIF_F_IP_CSUM | NETIF_F_SG;
dev->features = dev->hw_features;
/* MTU range: 68 - 65535 */
diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c
index f4d7aec..b7a0f5e 100644
--- a/drivers/net/ethernet/ti/cpsw.c
+++ b/drivers/net/ethernet/ti/cpsw.c
@@ -1731,10 +1731,14 @@
cpts_rx_enable(cpts, 0);
break;
case HWTSTAMP_FILTER_ALL:
+ case HWTSTAMP_FILTER_NTP_ALL:
+ return -ERANGE;
case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
- return -ERANGE;
+ cpts_rx_enable(cpts, HWTSTAMP_FILTER_PTP_V1_L4_EVENT);
+ cfg.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
+ break;
case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
@@ -1744,7 +1748,7 @@
case HWTSTAMP_FILTER_PTP_V2_EVENT:
case HWTSTAMP_FILTER_PTP_V2_SYNC:
case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
- cpts_rx_enable(cpts, 1);
+ cpts_rx_enable(cpts, HWTSTAMP_FILTER_PTP_V2_EVENT);
cfg.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
break;
default:
@@ -1783,7 +1787,7 @@
cfg.tx_type = cpts_is_tx_enabled(cpts) ?
HWTSTAMP_TX_ON : HWTSTAMP_TX_OFF;
cfg.rx_filter = (cpts_is_rx_enabled(cpts) ?
- HWTSTAMP_FILTER_PTP_V2_EVENT : HWTSTAMP_FILTER_NONE);
+ cpts->rx_enable : HWTSTAMP_FILTER_NONE);
return copy_to_user(ifr->ifr_data, &cfg, sizeof(cfg)) ? -EFAULT : 0;
}
@@ -2140,6 +2144,7 @@
(1 << HWTSTAMP_TX_ON);
info->rx_filters =
(1 << HWTSTAMP_FILTER_NONE) |
+ (1 << HWTSTAMP_FILTER_PTP_V1_L4_EVENT) |
(1 << HWTSTAMP_FILTER_PTP_V2_EVENT);
return 0;
}
@@ -2165,11 +2170,11 @@
struct cpsw_common *cpsw = priv->cpsw;
int slave_no = cpsw_slave_index(cpsw, priv);
- if (cpsw->slaves[slave_no].phy)
- return phy_ethtool_ksettings_get(cpsw->slaves[slave_no].phy,
- ecmd);
- else
+ if (!cpsw->slaves[slave_no].phy)
return -EOPNOTSUPP;
+
+ phy_ethtool_ksettings_get(cpsw->slaves[slave_no].phy, ecmd);
+ return 0;
}
static int cpsw_set_link_ksettings(struct net_device *ndev,
diff --git a/drivers/net/ethernet/ti/davinci_cpdma.c b/drivers/net/ethernet/ti/davinci_cpdma.c
index 7ecc6b7..e4d6edf 100644
--- a/drivers/net/ethernet/ti/davinci_cpdma.c
+++ b/drivers/net/ethernet/ti/davinci_cpdma.c
@@ -645,7 +645,7 @@
int cpdma_ctlr_int_ctrl(struct cpdma_ctlr *ctlr, bool enable)
{
unsigned long flags;
- int i, reg;
+ int i;
spin_lock_irqsave(&ctlr->lock, flags);
if (ctlr->state != CPDMA_STATE_ACTIVE) {
@@ -653,9 +653,6 @@
return -EINVAL;
}
- reg = enable ? CPDMA_DMAINTMASKSET : CPDMA_DMAINTMASKCLEAR;
- dma_reg_write(ctlr, reg, CPDMA_DMAINT_HOSTERR);
-
for (i = 0; i < ARRAY_SIZE(ctlr->channels); i++) {
if (ctlr->channels[i])
cpdma_chan_int_ctrl(ctlr->channels[i], enable);
diff --git a/drivers/net/ethernet/ti/netcp_core.c b/drivers/net/ethernet/ti/netcp_core.c
index e6222e5..9d52c3a 100644
--- a/drivers/net/ethernet/ti/netcp_core.c
+++ b/drivers/net/ethernet/ti/netcp_core.c
@@ -1877,8 +1877,8 @@
return 0;
}
-static int netcp_setup_tc(struct net_device *dev, u32 handle, __be16 proto,
- struct tc_to_netdev *tc)
+static int netcp_setup_tc(struct net_device *dev, u32 handle, u32 chain_index,
+ __be16 proto, struct tc_to_netdev *tc)
{
u8 num_tc;
int i;
diff --git a/drivers/net/ethernet/ti/netcp_ethss.c b/drivers/net/ethernet/ti/netcp_ethss.c
index dd92950..0847a8f 100644
--- a/drivers/net/ethernet/ti/netcp_ethss.c
+++ b/drivers/net/ethernet/ti/netcp_ethss.c
@@ -1927,7 +1927,6 @@
struct netcp_intf *netcp = netdev_priv(ndev);
struct phy_device *phy = ndev->phydev;
struct gbe_intf *gbe_intf;
- int ret;
if (!phy)
return -EINVAL;
@@ -1939,11 +1938,10 @@
if (!gbe_intf->slave)
return -EINVAL;
- ret = phy_ethtool_ksettings_get(phy, cmd);
- if (!ret)
- cmd->base.port = gbe_intf->slave->phy_port_t;
+ phy_ethtool_ksettings_get(phy, cmd);
+ cmd->base.port = gbe_intf->slave->phy_port_t;
- return ret;
+ return 0;
}
static int keystone_set_link_ksettings(struct net_device *ndev,
diff --git a/drivers/net/ethernet/tile/tilegx.c b/drivers/net/ethernet/tile/tilegx.c
index 7c634bc..aec9538 100644
--- a/drivers/net/ethernet/tile/tilegx.c
+++ b/drivers/net/ethernet/tile/tilegx.c
@@ -512,6 +512,7 @@
case HWTSTAMP_FILTER_PTP_V2_EVENT:
case HWTSTAMP_FILTER_PTP_V2_SYNC:
case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
+ case HWTSTAMP_FILTER_NTP_ALL:
config.rx_filter = HWTSTAMP_FILTER_ALL;
break;
default:
diff --git a/drivers/net/ethernet/toshiba/ps3_gelic_net.c b/drivers/net/ethernet/toshiba/ps3_gelic_net.c
index fa6a065..88d74ae 100644
--- a/drivers/net/ethernet/toshiba/ps3_gelic_net.c
+++ b/drivers/net/ethernet/toshiba/ps3_gelic_net.c
@@ -754,7 +754,7 @@
return NULL;
dev_kfree_skb_any(sk_tmp);
}
- veth = (struct vlan_ethhdr *)skb_push(skb, VLAN_HLEN);
+ veth = skb_push(skb, VLAN_HLEN);
/* Move the mac addresses to the top of buffer */
memmove(skb->data, skb->data + VLAN_HLEN, 2 * ETH_ALEN);
diff --git a/drivers/net/ethernet/tundra/tsi108_eth.c b/drivers/net/ethernet/tundra/tsi108_eth.c
index 5ac6eaa..c2d15d9 100644
--- a/drivers/net/ethernet/tundra/tsi108_eth.c
+++ b/drivers/net/ethernet/tundra/tsi108_eth.c
@@ -1504,13 +1504,12 @@
{
struct tsi108_prv_data *data = netdev_priv(dev);
unsigned long flags;
- int rc;
spin_lock_irqsave(&data->txlock, flags);
- rc = mii_ethtool_get_link_ksettings(&data->mii_if, cmd);
+ mii_ethtool_get_link_ksettings(&data->mii_if, cmd);
spin_unlock_irqrestore(&data->txlock, flags);
- return rc;
+ return 0;
}
static int tsi108_set_link_ksettings(struct net_device *dev,
diff --git a/drivers/net/ethernet/via/via-rhine.c b/drivers/net/ethernet/via/via-rhine.c
index 4cf41f7..acd29d6 100644
--- a/drivers/net/ethernet/via/via-rhine.c
+++ b/drivers/net/ethernet/via/via-rhine.c
@@ -2307,13 +2307,12 @@
struct ethtool_link_ksettings *cmd)
{
struct rhine_private *rp = netdev_priv(dev);
- int rc;
mutex_lock(&rp->task_lock);
- rc = mii_ethtool_get_link_ksettings(&rp->mii_if, cmd);
+ mii_ethtool_get_link_ksettings(&rp->mii_if, cmd);
mutex_unlock(&rp->task_lock);
- return rc;
+ return 0;
}
static int netdev_set_link_ksettings(struct net_device *dev,
diff --git a/drivers/net/fjes/fjes_main.c b/drivers/net/fjes/fjes_main.c
index ae48c80..750954b 100644
--- a/drivers/net/fjes/fjes_main.c
+++ b/drivers/net/fjes/fjes_main.c
@@ -1156,8 +1156,7 @@
hw->ep_shm_info[cur_epid].net_stats
.rx_errors += 1;
} else {
- memcpy(skb_put(skb, frame_len),
- frame, frame_len);
+ skb_put_data(skb, frame, frame_len);
skb->protocol = eth_type_trans(skb, netdev);
skb->ip_summed = CHECKSUM_UNNECESSARY;
@@ -1348,7 +1347,6 @@
netdev->mtu = fjes_support_mtu[3];
netdev->min_mtu = fjes_support_mtu[0];
netdev->max_mtu = fjes_support_mtu[3];
- netdev->flags |= IFF_BROADCAST;
netdev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
}
diff --git a/drivers/net/geneve.c b/drivers/net/geneve.c
index 199459b..d586ad9 100644
--- a/drivers/net/geneve.c
+++ b/drivers/net/geneve.c
@@ -212,6 +212,7 @@
struct genevehdr *gnvh = geneve_hdr(skb);
struct metadata_dst *tun_dst = NULL;
struct pcpu_sw_netstats *stats;
+ unsigned int len;
int err = 0;
void *oiph;
@@ -225,8 +226,10 @@
tun_dst = udp_tun_rx_dst(skb, geneve_get_sk_family(gs), flags,
vni_to_tunnel_id(gnvh->vni),
gnvh->opt_len * 4);
- if (!tun_dst)
+ if (!tun_dst) {
+ geneve->dev->stats.rx_dropped++;
goto drop;
+ }
/* Update tunnel dst according to Geneve options. */
ip_tunnel_info_opts_set(&tun_dst->u.tun_info,
gnvh->options, gnvh->opt_len * 4);
@@ -234,8 +237,11 @@
/* Drop packets w/ critical options,
* since we don't support any...
*/
- if (gnvh->critical)
+ if (gnvh->critical) {
+ geneve->dev->stats.rx_frame_errors++;
+ geneve->dev->stats.rx_errors++;
goto drop;
+ }
}
skb_reset_mac_header(skb);
@@ -246,8 +252,10 @@
skb_dst_set(skb, &tun_dst->dst);
/* Ignore packet loops (and multicast echo) */
- if (ether_addr_equal(eth_hdr(skb)->h_source, geneve->dev->dev_addr))
+ if (ether_addr_equal(eth_hdr(skb)->h_source, geneve->dev->dev_addr)) {
+ geneve->dev->stats.rx_errors++;
goto drop;
+ }
oiph = skb_network_header(skb);
skb_reset_network_header(skb);
@@ -279,13 +287,15 @@
}
}
- stats = this_cpu_ptr(geneve->dev->tstats);
- u64_stats_update_begin(&stats->syncp);
- stats->rx_packets++;
- stats->rx_bytes += skb->len;
- u64_stats_update_end(&stats->syncp);
-
- gro_cells_receive(&geneve->gro_cells, skb);
+ len = skb->len;
+ err = gro_cells_receive(&geneve->gro_cells, skb);
+ if (likely(err == NET_RX_SUCCESS)) {
+ stats = this_cpu_ptr(geneve->dev->tstats);
+ u64_stats_update_begin(&stats->syncp);
+ stats->rx_packets++;
+ stats->rx_bytes += len;
+ u64_stats_update_end(&stats->syncp);
+ }
return;
drop:
/* Consume bad packet */
@@ -334,7 +344,7 @@
struct geneve_sock *gs;
int opts_len;
- /* Need Geneve and inner Ethernet header to be present */
+ /* Need UDP and Geneve header to be present */
if (unlikely(!pskb_may_pull(skb, GENEVE_BASE_HLEN)))
goto drop;
@@ -357,8 +367,10 @@
opts_len = geneveh->opt_len * 4;
if (iptunnel_pull_header(skb, GENEVE_BASE_HLEN + opts_len,
htons(ETH_P_TEB),
- !net_eq(geneve->net, dev_net(geneve->dev))))
+ !net_eq(geneve->net, dev_net(geneve->dev)))) {
+ geneve->dev->stats.rx_dropped++;
goto drop;
+ }
geneve_rx(geneve, gs, skb);
return 0;
@@ -675,8 +687,7 @@
if (err)
goto free_dst;
- gnvh = (struct genevehdr *)__skb_push(skb, sizeof(*gnvh) +
- info->options_len);
+ gnvh = __skb_push(skb, sizeof(*gnvh) + info->options_len);
geneve_build_header(gnvh, info);
skb_set_inner_protocol(skb, htons(ETH_P_TEB));
return 0;
diff --git a/drivers/net/gtp.c b/drivers/net/gtp.c
index ca110cd..8e333a8 100644
--- a/drivers/net/gtp.c
+++ b/drivers/net/gtp.c
@@ -398,7 +398,7 @@
int payload_len = skb->len;
struct gtp0_header *gtp0;
- gtp0 = (struct gtp0_header *) skb_push(skb, sizeof(*gtp0));
+ gtp0 = skb_push(skb, sizeof(*gtp0));
gtp0->flags = 0x1e; /* v0, GTP-non-prime. */
gtp0->type = GTP_TPDU;
@@ -415,7 +415,7 @@
int payload_len = skb->len;
struct gtp1_header *gtp1;
- gtp1 = (struct gtp1_header *) skb_push(skb, sizeof(*gtp1));
+ gtp1 = skb_push(skb, sizeof(*gtp1));
/* Bits 8 7 6 5 4 3 2 1
* +--+--+--+--+--+--+--+--+
diff --git a/drivers/net/hamradio/mkiss.c b/drivers/net/hamradio/mkiss.c
index 4a40a3d..aec6c26 100644
--- a/drivers/net/hamradio/mkiss.c
+++ b/drivers/net/hamradio/mkiss.c
@@ -298,7 +298,7 @@
return;
}
- memcpy(skb_put(skb,count), ax->rbuff, count);
+ skb_put_data(skb, ax->rbuff, count);
skb->protocol = ax25_type_trans(skb, ax->dev);
netif_rx(skb);
ax->dev->stats.rx_packets++;
diff --git a/drivers/net/hamradio/scc.c b/drivers/net/hamradio/scc.c
index 6754cd0..295f267 100644
--- a/drivers/net/hamradio/scc.c
+++ b/drivers/net/hamradio/scc.c
@@ -540,7 +540,7 @@
}
scc->rx_buff = skb;
- *(skb_put(skb, 1)) = 0; /* KISS data */
+ skb_put_u8(skb, 0); /* KISS data */
}
if (skb->len >= scc->stat.bufsize)
@@ -555,7 +555,7 @@
return;
}
- *(skb_put(skb, 1)) = Inb(scc->data);
+ skb_put_u8(skb, Inb(scc->data));
}
diff --git a/drivers/net/hippi/rrunner.c b/drivers/net/hippi/rrunner.c
index 1ce6239..71ddadb 100644
--- a/drivers/net/hippi/rrunner.c
+++ b/drivers/net/hippi/rrunner.c
@@ -962,8 +962,8 @@
pkt_len,
PCI_DMA_FROMDEVICE);
- memcpy(skb_put(skb, pkt_len),
- rx_skb->data, pkt_len);
+ skb_put_data(skb, rx_skb->data,
+ pkt_len);
pci_dma_sync_single_for_device(rrpriv->pci_dev,
desc->addr.addrlo,
@@ -1422,7 +1422,7 @@
skb = new_skb;
}
- ifield = (u32 *)skb_push(skb, 8);
+ ifield = skb_push(skb, 8);
ifield[0] = 0;
ifield[1] = hcb->ifield;
diff --git a/drivers/net/hyperv/hyperv_net.h b/drivers/net/hyperv/hyperv_net.h
index 6066f1b..b30a3c2 100644
--- a/drivers/net/hyperv/hyperv_net.h
+++ b/drivers/net/hyperv/hyperv_net.h
@@ -764,8 +764,7 @@
refcount_t sc_offered;
- /* Holds rndis device info */
- void *extension;
+ struct rndis_device *extension;
int ring_size;
diff --git a/drivers/net/hyperv/netvsc.c b/drivers/net/hyperv/netvsc.c
index 652453d..7c5ed8f 100644
--- a/drivers/net/hyperv/netvsc.c
+++ b/drivers/net/hyperv/netvsc.c
@@ -97,16 +97,6 @@
call_rcu(&nvdev->rcu, free_netvsc_device);
}
-static struct netvsc_device *get_outbound_net_device(struct hv_device *device)
-{
- struct netvsc_device *net_device = hv_device_to_netvsc_device(device);
-
- if (net_device && net_device->destroy)
- net_device = NULL;
-
- return net_device;
-}
-
static void netvsc_destroy_buf(struct hv_device *device)
{
struct nvsp_message *revoke_packet;
@@ -243,18 +233,15 @@
kfree(net_device->send_section_map);
}
-static int netvsc_init_buf(struct hv_device *device)
+static int netvsc_init_buf(struct hv_device *device,
+ struct netvsc_device *net_device)
{
int ret = 0;
- struct netvsc_device *net_device;
struct nvsp_message *init_packet;
struct net_device *ndev;
size_t map_words;
int node;
- net_device = get_outbound_net_device(device);
- if (!net_device)
- return -ENODEV;
ndev = hv_get_drvdata(device);
node = cpu_to_node(device->channel->target_cpu);
@@ -285,9 +272,7 @@
/* Notify the NetVsp of the gpadl handle */
init_packet = &net_device->channel_init_pkt;
-
memset(init_packet, 0, sizeof(struct nvsp_message));
-
init_packet->hdr.msg_type = NVSP_MSG1_TYPE_SEND_RECV_BUF;
init_packet->msg.v1_msg.send_recv_buf.
gpadl_handle = net_device->recv_buf_gpadl_handle;
@@ -486,20 +471,15 @@
return ret;
}
-static int netvsc_connect_vsp(struct hv_device *device)
+static int netvsc_connect_vsp(struct hv_device *device,
+ struct netvsc_device *net_device)
{
- int ret;
- struct netvsc_device *net_device;
- struct nvsp_message *init_packet;
- int ndis_version;
const u32 ver_list[] = {
NVSP_PROTOCOL_VERSION_1, NVSP_PROTOCOL_VERSION_2,
- NVSP_PROTOCOL_VERSION_4, NVSP_PROTOCOL_VERSION_5 };
- int i;
-
- net_device = get_outbound_net_device(device);
- if (!net_device)
- return -ENODEV;
+ NVSP_PROTOCOL_VERSION_4, NVSP_PROTOCOL_VERSION_5
+ };
+ struct nvsp_message *init_packet;
+ int ndis_version, i, ret;
init_packet = &net_device->channel_init_pkt;
@@ -549,7 +529,7 @@
net_device->recv_buf_size = NETVSC_RECEIVE_BUFFER_SIZE;
net_device->send_buf_size = NETVSC_SEND_BUFFER_SIZE;
- ret = netvsc_init_buf(device);
+ ret = netvsc_init_buf(device, net_device);
cleanup:
return ret;
@@ -843,7 +823,7 @@
struct hv_page_buffer **pb,
struct sk_buff *skb)
{
- struct netvsc_device *net_device;
+ struct netvsc_device *net_device = hv_device_to_netvsc_device(device);
int ret = 0;
struct netvsc_channel *nvchan;
u32 pktlen = packet->total_data_buflen, msd_len = 0;
@@ -854,15 +834,15 @@
bool try_batch;
bool xmit_more = (skb != NULL) ? skb->xmit_more : false;
- net_device = get_outbound_net_device(device);
- if (!net_device)
+ /* If device is rescinded, return error and packet will get dropped. */
+ if (unlikely(net_device->destroy))
return -ENODEV;
/* We may race with netvsc_connect_vsp()/netvsc_init_buf() and get
* here before the negotiation with the host is finished and
* send_section_map may not be allocated yet.
*/
- if (!net_device->send_section_map)
+ if (unlikely(!net_device->send_section_map))
return -EAGAIN;
nvchan = &net_device->chan_table[packet->q_idx];
@@ -1349,7 +1329,7 @@
rcu_assign_pointer(net_device_ctx->nvdev, net_device);
/* Connect with the NetVsp */
- ret = netvsc_connect_vsp(device);
+ ret = netvsc_connect_vsp(device, net_device);
if (ret != 0) {
netdev_err(ndev,
"unable to connect to NetVSP - %d\n", ret);
@@ -1368,4 +1348,5 @@
free_netvsc_device(&net_device->rcu);
return ret;
+
}
diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c
index 82d6c02..9a6c586 100644
--- a/drivers/net/hyperv/netvsc_drv.c
+++ b/drivers/net/hyperv/netvsc_drv.c
@@ -37,6 +37,8 @@
#include <net/route.h>
#include <net/sock.h>
#include <net/pkt_sched.h>
+#include <net/checksum.h>
+#include <net/ip6_checksum.h>
#include "hyperv_net.h"
@@ -93,7 +95,7 @@
struct net_device_context *net_device_ctx = netdev_priv(net);
struct netvsc_device *nvdev = rtnl_dereference(net_device_ctx->nvdev);
int ret;
- u32 aread, awrite, i, msec = 10, retry = 0, retry_max = 20;
+ u32 aread, i, msec = 10, retry = 0, retry_max = 20;
struct vmbus_channel *chn;
netif_tx_disable(net);
@@ -112,15 +114,11 @@
if (!chn)
continue;
- hv_get_ringbuffer_availbytes(&chn->inbound, &aread,
- &awrite);
-
+ aread = hv_get_bytes_to_read(&chn->inbound);
if (aread)
break;
- hv_get_ringbuffer_availbytes(&chn->outbound, &aread,
- &awrite);
-
+ aread = hv_get_bytes_to_read(&chn->outbound);
if (aread)
break;
}
@@ -316,34 +314,14 @@
return slots_used;
}
-static int count_skb_frag_slots(struct sk_buff *skb)
+/* Estimate number of page buffers neede to transmit
+ * Need at most 2 for RNDIS header plus skb body and fragments.
+ */
+static unsigned int netvsc_get_slots(const struct sk_buff *skb)
{
- int i, frags = skb_shinfo(skb)->nr_frags;
- int pages = 0;
-
- for (i = 0; i < frags; i++) {
- skb_frag_t *frag = skb_shinfo(skb)->frags + i;
- unsigned long size = skb_frag_size(frag);
- unsigned long offset = frag->page_offset;
-
- /* Skip unused frames from start of page */
- offset &= ~PAGE_MASK;
- pages += PFN_UP(offset + size);
- }
- return pages;
-}
-
-static int netvsc_get_slots(struct sk_buff *skb)
-{
- char *data = skb->data;
- unsigned int offset = offset_in_page(data);
- unsigned int len = skb_headlen(skb);
- int slots;
- int frag_slots;
-
- slots = DIV_ROUND_UP(offset + len, PAGE_SIZE);
- frag_slots = count_skb_frag_slots(skb);
- return slots + frag_slots;
+ return PFN_UP(offset_in_page(skb->data) + skb_headlen(skb))
+ + skb_shinfo(skb)->nr_frags
+ + 2;
}
static u32 net_checksum_info(struct sk_buff *skb)
@@ -381,21 +359,18 @@
struct hv_page_buffer page_buf[MAX_PAGE_BUFFER_COUNT];
struct hv_page_buffer *pb = page_buf;
- /* We will atmost need two pages to describe the rndis
- * header. We can only transmit MAX_PAGE_BUFFER_COUNT number
+ /* We can only transmit MAX_PAGE_BUFFER_COUNT number
* of pages in a single packet. If skb is scattered around
* more pages we try linearizing it.
*/
-
- num_data_pgs = netvsc_get_slots(skb) + 2;
-
+ num_data_pgs = netvsc_get_slots(skb);
if (unlikely(num_data_pgs > MAX_PAGE_BUFFER_COUNT)) {
++net_device_ctx->eth_stats.tx_scattered;
if (skb_linearize(skb))
goto no_memory;
- num_data_pgs = netvsc_get_slots(skb) + 2;
+ num_data_pgs = netvsc_get_slots(skb);
if (num_data_pgs > MAX_PAGE_BUFFER_COUNT) {
++net_device_ctx->eth_stats.tx_too_big;
goto drop;
@@ -618,7 +593,7 @@
* Copy to skb. This copy is needed here since the memory pointed by
* hv_netvsc_packet cannot be deallocated
*/
- memcpy(skb_put(skb, buflen), data, buflen);
+ skb_put_data(skb, data, buflen);
skb->protocol = eth_type_trans(skb, net);
diff --git a/drivers/net/ieee802154/at86rf230.c b/drivers/net/ieee802154/at86rf230.c
index 76ba7ec..548d9d0 100644
--- a/drivers/net/ieee802154/at86rf230.c
+++ b/drivers/net/ieee802154/at86rf230.c
@@ -723,7 +723,7 @@
return;
}
- memcpy(skb_put(skb, len), buf + 2, len);
+ skb_put_data(skb, buf + 2, len);
ieee802154_rx_irqsafe(lp->hw, skb, lqi);
kfree(ctx);
}
diff --git a/drivers/net/ieee802154/ca8210.c b/drivers/net/ieee802154/ca8210.c
index 25fd3b0..a626c53 100644
--- a/drivers/net/ieee802154/ca8210.c
+++ b/drivers/net/ieee802154/ca8210.c
@@ -912,7 +912,7 @@
)
{
int i, status = 0;
- struct ca8210_priv *priv = spi_get_drvdata(spi);
+ struct ca8210_priv *priv;
struct cas_control *cas_ctl;
if (!spi) {
@@ -923,6 +923,7 @@
return -ENODEV;
}
+ priv = spi_get_drvdata(spi);
reinit_completion(&priv->spi_transfer_complete);
dev_dbg(&spi->dev, "ca8210_spi_transfer called\n");
@@ -1808,10 +1809,9 @@
/* Allocate mtu size buffer for every rx packet */
skb = dev_alloc_skb(IEEE802154_MTU + sizeof(hdr));
- if (!skb) {
- dev_crit(&priv->spi->dev, "dev_alloc_skb failed\n");
+ if (!skb)
return -ENOMEM;
- }
+
skb_reserve(skb, sizeof(hdr));
msdulen = data_ind[22]; /* msdu_length */
@@ -1875,7 +1875,7 @@
copy_payload:
/* Add <msdulen> bytes of space to the back of the buffer */
/* Copy msdu to skb */
- memcpy(skb_put(skb, msdulen), &data_ind[29], msdulen);
+ skb_put_data(skb, &data_ind[29], msdulen);
ieee802154_rx_irqsafe(hw, skb, mpdulinkquality);
return 0;
@@ -3143,10 +3143,6 @@
pdata = kmalloc(sizeof(*pdata), GFP_KERNEL);
if (!pdata) {
- dev_crit(
- &spi_device->dev,
- "Could not allocate platform data\n"
- );
ret = -ENOMEM;
goto error;
}
diff --git a/drivers/net/ieee802154/mrf24j40.c b/drivers/net/ieee802154/mrf24j40.c
index bd63289..7d33496 100644
--- a/drivers/net/ieee802154/mrf24j40.c
+++ b/drivers/net/ieee802154/mrf24j40.c
@@ -774,7 +774,7 @@
return;
}
- memcpy(skb_put(skb, len), rx_local_buf, len);
+ skb_put_data(skb, rx_local_buf, len);
ieee802154_rx_irqsafe(devrec->hw, skb, 0);
#ifdef DEBUG
diff --git a/drivers/net/ipvlan/ipvlan_main.c b/drivers/net/ipvlan/ipvlan_main.c
index 7c7680c..dc888dd 100644
--- a/drivers/net/ipvlan/ipvlan_main.c
+++ b/drivers/net/ipvlan/ipvlan_main.c
@@ -824,6 +824,33 @@
return NOTIFY_OK;
}
+static int ipvlan_addr6_validator_event(struct notifier_block *unused,
+ unsigned long event, void *ptr)
+{
+ struct in6_validator_info *i6vi = (struct in6_validator_info *)ptr;
+ struct net_device *dev = (struct net_device *)i6vi->i6vi_dev->dev;
+ struct ipvl_dev *ipvlan = netdev_priv(dev);
+
+ /* FIXME IPv6 autoconf calls us from bh without RTNL */
+ if (in_softirq())
+ return NOTIFY_DONE;
+
+ if (!netif_is_ipvlan(dev))
+ return NOTIFY_DONE;
+
+ if (!ipvlan || !ipvlan->port)
+ return NOTIFY_DONE;
+
+ switch (event) {
+ case NETDEV_UP:
+ if (ipvlan_addr_busy(ipvlan->port, &i6vi->i6vi_addr, true))
+ return notifier_from_errno(-EADDRINUSE);
+ break;
+ }
+
+ return NOTIFY_OK;
+}
+
static int ipvlan_add_addr4(struct ipvl_dev *ipvlan, struct in_addr *ip4_addr)
{
if (ipvlan_addr_busy(ipvlan->port, ip4_addr, false)) {
@@ -871,10 +898,37 @@
return NOTIFY_OK;
}
+static int ipvlan_addr4_validator_event(struct notifier_block *unused,
+ unsigned long event, void *ptr)
+{
+ struct in_validator_info *ivi = (struct in_validator_info *)ptr;
+ struct net_device *dev = (struct net_device *)ivi->ivi_dev->dev;
+ struct ipvl_dev *ipvlan = netdev_priv(dev);
+
+ if (!netif_is_ipvlan(dev))
+ return NOTIFY_DONE;
+
+ if (!ipvlan || !ipvlan->port)
+ return NOTIFY_DONE;
+
+ switch (event) {
+ case NETDEV_UP:
+ if (ipvlan_addr_busy(ipvlan->port, &ivi->ivi_addr, false))
+ return notifier_from_errno(-EADDRINUSE);
+ break;
+ }
+
+ return NOTIFY_OK;
+}
+
static struct notifier_block ipvlan_addr4_notifier_block __read_mostly = {
.notifier_call = ipvlan_addr4_event,
};
+static struct notifier_block ipvlan_addr4_vtor_notifier_block __read_mostly = {
+ .notifier_call = ipvlan_addr4_validator_event,
+};
+
static struct notifier_block ipvlan_notifier_block __read_mostly = {
.notifier_call = ipvlan_device_event,
};
@@ -883,6 +937,10 @@
.notifier_call = ipvlan_addr6_event,
};
+static struct notifier_block ipvlan_addr6_vtor_notifier_block __read_mostly = {
+ .notifier_call = ipvlan_addr6_validator_event,
+};
+
static void ipvlan_ns_exit(struct net *net)
{
struct ipvlan_netns *vnet = net_generic(net, ipvlan_netid);
@@ -907,7 +965,10 @@
ipvlan_init_secret();
register_netdevice_notifier(&ipvlan_notifier_block);
register_inet6addr_notifier(&ipvlan_addr6_notifier_block);
+ register_inet6addr_validator_notifier(
+ &ipvlan_addr6_vtor_notifier_block);
register_inetaddr_notifier(&ipvlan_addr4_notifier_block);
+ register_inetaddr_validator_notifier(&ipvlan_addr4_vtor_notifier_block);
err = register_pernet_subsys(&ipvlan_net_ops);
if (err < 0)
@@ -922,7 +983,11 @@
return 0;
error:
unregister_inetaddr_notifier(&ipvlan_addr4_notifier_block);
+ unregister_inetaddr_validator_notifier(
+ &ipvlan_addr4_vtor_notifier_block);
unregister_inet6addr_notifier(&ipvlan_addr6_notifier_block);
+ unregister_inet6addr_validator_notifier(
+ &ipvlan_addr6_vtor_notifier_block);
unregister_netdevice_notifier(&ipvlan_notifier_block);
return err;
}
@@ -933,7 +998,11 @@
unregister_pernet_subsys(&ipvlan_net_ops);
unregister_netdevice_notifier(&ipvlan_notifier_block);
unregister_inetaddr_notifier(&ipvlan_addr4_notifier_block);
+ unregister_inetaddr_validator_notifier(
+ &ipvlan_addr4_vtor_notifier_block);
unregister_inet6addr_notifier(&ipvlan_addr6_notifier_block);
+ unregister_inet6addr_validator_notifier(
+ &ipvlan_addr6_vtor_notifier_block);
}
module_init(ipvlan_init_module);
diff --git a/drivers/net/irda/smsc-ircc2.c b/drivers/net/irda/smsc-ircc2.c
index 23ed89a..19a55cb 100644
--- a/drivers/net/irda/smsc-ircc2.c
+++ b/drivers/net/irda/smsc-ircc2.c
@@ -1459,7 +1459,7 @@
/* Make sure IP header gets aligned */
skb_reserve(skb, 1);
- memcpy(skb_put(skb, len), self->rx_buff.data, len);
+ skb_put_data(skb, self->rx_buff.data, len);
self->netdev->stats.rx_packets++;
self->netdev->stats.rx_bytes += len;
diff --git a/drivers/net/irda/vlsi_ir.c b/drivers/net/irda/vlsi_ir.c
index 15b9200..6638784 100644
--- a/drivers/net/irda/vlsi_ir.c
+++ b/drivers/net/irda/vlsi_ir.c
@@ -578,7 +578,7 @@
skb = rd->skb;
rd->skb = NULL;
skb->dev = ndev;
- memcpy(skb_put(skb,len), rd->buf, len);
+ skb_put_data(skb, rd->buf, len);
skb_reset_mac_header(skb);
if (in_interrupt())
netif_rx(skb);
diff --git a/drivers/net/macsec.c b/drivers/net/macsec.c
index 7941167..e370d7c 100644
--- a/drivers/net/macsec.c
+++ b/drivers/net/macsec.c
@@ -588,8 +588,6 @@
stats->tx_packets++;
stats->tx_bytes += len;
u64_stats_update_end(&stats->syncp);
- } else {
- dev->stats.tx_dropped++;
}
}
@@ -699,7 +697,7 @@
unprotected_len = skb->len;
eth = eth_hdr(skb);
sci_present = send_sci(secy);
- hh = (struct macsec_eth_header *)skb_push(skb, macsec_extra_len(sci_present));
+ hh = skb_push(skb, macsec_extra_len(sci_present));
memmove(hh, eth, 2 * ETH_ALEN);
pn = tx_sa_update_pn(tx_sa, secy);
@@ -742,7 +740,12 @@
macsec_fill_iv(iv, secy->sci, pn);
sg_init_table(sg, ret);
- skb_to_sgvec(skb, sg, 0, skb->len);
+ ret = skb_to_sgvec(skb, sg, 0, skb->len);
+ if (unlikely(ret < 0)) {
+ macsec_txsa_put(tx_sa);
+ kfree_skb(skb);
+ return ERR_PTR(ret);
+ }
if (tx_sc->encrypt) {
int len = skb->len - macsec_hdr_len(sci_present) -
@@ -883,7 +886,7 @@
struct macsec_dev *macsec = macsec_priv(dev);
struct macsec_rx_sa *rx_sa = macsec_skb_cb(skb)->rx_sa;
struct macsec_rx_sc *rx_sc = rx_sa->sc;
- int len, ret;
+ int len;
u32 pn;
aead_request_free(macsec_skb_cb(skb)->req);
@@ -904,11 +907,8 @@
macsec_reset_skb(skb, macsec->secy.netdev);
len = skb->len;
- ret = gro_cells_receive(&macsec->gro_cells, skb);
- if (ret == NET_RX_SUCCESS)
+ if (gro_cells_receive(&macsec->gro_cells, skb) == NET_RX_SUCCESS)
count_rx(dev, len);
- else
- macsec->secy.netdev->stats.rx_dropped++;
rcu_read_unlock_bh();
@@ -952,7 +952,11 @@
macsec_fill_iv(iv, sci, ntohl(hdr->packet_number));
sg_init_table(sg, ret);
- skb_to_sgvec(skb, sg, 0, skb->len);
+ ret = skb_to_sgvec(skb, sg, 0, skb->len);
+ if (unlikely(ret < 0)) {
+ kfree_skb(skb);
+ return ERR_PTR(ret);
+ }
if (hdr->tci_an & MACSEC_TCI_E) {
/* confidentiality: ethernet + macsec header
@@ -1037,7 +1041,6 @@
*/
list_for_each_entry_rcu(macsec, &rxd->secys, secys) {
struct sk_buff *nskb;
- int ret;
struct pcpu_secy_stats *secy_stats = this_cpu_ptr(macsec->stats);
if (macsec->secy.validate_frames == MACSEC_VALIDATE_STRICT) {
@@ -1054,13 +1057,10 @@
nskb->dev = macsec->secy.netdev;
- ret = netif_rx(nskb);
- if (ret == NET_RX_SUCCESS) {
+ if (netif_rx(nskb) == NET_RX_SUCCESS) {
u64_stats_update_begin(&secy_stats->syncp);
secy_stats->stats.InPktsUntagged++;
u64_stats_update_end(&secy_stats->syncp);
- } else {
- macsec->secy.netdev->stats.rx_dropped++;
}
}
diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c
index 67bf7eb..8ca274c 100644
--- a/drivers/net/macvlan.c
+++ b/drivers/net/macvlan.c
@@ -703,10 +703,8 @@
if (!is_valid_ether_addr(addr->sa_data))
return -EADDRNOTAVAIL;
- if (vlan->mode == MACVLAN_MODE_PASSTHRU) {
- dev_set_mac_address(vlan->lowerdev, addr);
- return 0;
- }
+ if (vlan->mode == MACVLAN_MODE_PASSTHRU)
+ return dev_set_mac_address(vlan->lowerdev, addr);
return macvlan_sync_address(dev, addr->sa_data);
}
diff --git a/drivers/net/mii.c b/drivers/net/mii.c
index 6d953c5..44612122 100644
--- a/drivers/net/mii.c
+++ b/drivers/net/mii.c
@@ -141,11 +141,9 @@
*
* The @cmd parameter is expected to have been cleared before calling
* mii_ethtool_get_link_ksettings().
- *
- * Returns 0 for success, negative on error.
*/
-int mii_ethtool_get_link_ksettings(struct mii_if_info *mii,
- struct ethtool_link_ksettings *cmd)
+void mii_ethtool_get_link_ksettings(struct mii_if_info *mii,
+ struct ethtool_link_ksettings *cmd)
{
struct net_device *dev = mii->dev;
u16 bmcr, bmsr, ctrl1000 = 0, stat1000 = 0;
@@ -227,8 +225,6 @@
lp_advertising);
/* ignore maxtxpkt, maxrxpkt for now */
-
- return 0;
}
/**
diff --git a/drivers/net/phy/Kconfig b/drivers/net/phy/Kconfig
index 3ab6c58..2dda720 100644
--- a/drivers/net/phy/Kconfig
+++ b/drivers/net/phy/Kconfig
@@ -235,6 +235,11 @@
---help---
Currently supports the cis8204
+config CORTINA_PHY
+ tristate "Cortina EDC CDR 10G Ethernet PHY"
+ ---help---
+ Currently supports the CS4340 phy.
+
config DAVICOM_PHY
tristate "Davicom PHYs"
---help---
@@ -288,6 +293,11 @@
---help---
Currently has a driver for the 88E1011S
+config MARVELL_10G_PHY
+ tristate "Marvell Alaska 10Gbit PHYs"
+ ---help---
+ Support for the Marvell Alaska MV88X3310 and compatible PHYs.
+
config MESON_GXL_PHY
tristate "Amlogic Meson GXL Internal PHY"
depends on ARCH_MESON || COMPILE_TEST
diff --git a/drivers/net/phy/Makefile b/drivers/net/phy/Makefile
index e36db9a..8e9b9f3 100644
--- a/drivers/net/phy/Makefile
+++ b/drivers/net/phy/Makefile
@@ -1,6 +1,6 @@
# Makefile for Linux PHY drivers and MDIO bus drivers
-libphy-y := phy.o phy-core.o phy_device.o
+libphy-y := phy.o phy-c45.o phy-core.o phy_device.o
mdio-bus-y += mdio_bus.o mdio_device.o
ifdef CONFIG_MDIO_DEVICE
@@ -46,6 +46,7 @@
obj-$(CONFIG_BCM_NET_PHYLIB) += bcm-phy-lib.o
obj-$(CONFIG_BROADCOM_PHY) += broadcom.o
obj-$(CONFIG_CICADA_PHY) += cicada.o
+obj-$(CONFIG_CORTINA_PHY) += cortina.o
obj-$(CONFIG_DAVICOM_PHY) += davicom.o
obj-$(CONFIG_DP83640_PHY) += dp83640.o
obj-$(CONFIG_DP83848_PHY) += dp83848.o
@@ -56,6 +57,7 @@
obj-$(CONFIG_LSI_ET1011C_PHY) += et1011c.o
obj-$(CONFIG_LXT_PHY) += lxt.o
obj-$(CONFIG_MARVELL_PHY) += marvell.o
+obj-$(CONFIG_MARVELL_10G_PHY) += marvell10g.o
obj-$(CONFIG_MESON_GXL_PHY) += meson-gxl.o
obj-$(CONFIG_MICREL_KS8995MA) += spi_ks8995.o
obj-$(CONFIG_MICREL_PHY) += micrel.o
diff --git a/drivers/net/phy/broadcom.c b/drivers/net/phy/broadcom.c
index a32dc5d..1e9ad30 100644
--- a/drivers/net/phy/broadcom.c
+++ b/drivers/net/phy/broadcom.c
@@ -540,7 +540,7 @@
.phy_id_mask = 0xfffffff0,
.name = "Broadcom BCM5411",
.features = PHY_GBIT_FEATURES,
- .flags = PHY_HAS_MAGICANEG | PHY_HAS_INTERRUPT,
+ .flags = PHY_HAS_INTERRUPT,
.config_init = bcm54xx_config_init,
.config_aneg = genphy_config_aneg,
.read_status = genphy_read_status,
@@ -551,7 +551,7 @@
.phy_id_mask = 0xfffffff0,
.name = "Broadcom BCM5421",
.features = PHY_GBIT_FEATURES,
- .flags = PHY_HAS_MAGICANEG | PHY_HAS_INTERRUPT,
+ .flags = PHY_HAS_INTERRUPT,
.config_init = bcm54xx_config_init,
.config_aneg = genphy_config_aneg,
.read_status = genphy_read_status,
@@ -562,7 +562,7 @@
.phy_id_mask = 0xfffffff0,
.name = "Broadcom BCM54210E",
.features = PHY_GBIT_FEATURES,
- .flags = PHY_HAS_MAGICANEG | PHY_HAS_INTERRUPT,
+ .flags = PHY_HAS_INTERRUPT,
.config_init = bcm54xx_config_init,
.config_aneg = genphy_config_aneg,
.read_status = genphy_read_status,
@@ -573,7 +573,7 @@
.phy_id_mask = 0xfffffff0,
.name = "Broadcom BCM5461",
.features = PHY_GBIT_FEATURES,
- .flags = PHY_HAS_MAGICANEG | PHY_HAS_INTERRUPT,
+ .flags = PHY_HAS_INTERRUPT,
.config_init = bcm54xx_config_init,
.config_aneg = genphy_config_aneg,
.read_status = genphy_read_status,
@@ -584,7 +584,7 @@
.phy_id_mask = 0xfffffff0,
.name = "Broadcom BCM54612E",
.features = PHY_GBIT_FEATURES,
- .flags = PHY_HAS_MAGICANEG | PHY_HAS_INTERRUPT,
+ .flags = PHY_HAS_INTERRUPT,
.config_init = bcm54xx_config_init,
.config_aneg = genphy_config_aneg,
.read_status = genphy_read_status,
@@ -595,7 +595,7 @@
.phy_id_mask = 0xfffffff0,
.name = "Broadcom BCM54616S",
.features = PHY_GBIT_FEATURES,
- .flags = PHY_HAS_MAGICANEG | PHY_HAS_INTERRUPT,
+ .flags = PHY_HAS_INTERRUPT,
.config_init = bcm54xx_config_init,
.config_aneg = genphy_config_aneg,
.read_status = genphy_read_status,
@@ -606,7 +606,7 @@
.phy_id_mask = 0xfffffff0,
.name = "Broadcom BCM5464",
.features = PHY_GBIT_FEATURES,
- .flags = PHY_HAS_MAGICANEG | PHY_HAS_INTERRUPT,
+ .flags = PHY_HAS_INTERRUPT,
.config_init = bcm54xx_config_init,
.config_aneg = genphy_config_aneg,
.read_status = genphy_read_status,
@@ -617,7 +617,7 @@
.phy_id_mask = 0xfffffff0,
.name = "Broadcom BCM5481",
.features = PHY_GBIT_FEATURES,
- .flags = PHY_HAS_MAGICANEG | PHY_HAS_INTERRUPT,
+ .flags = PHY_HAS_INTERRUPT,
.config_init = bcm54xx_config_init,
.config_aneg = bcm5481_config_aneg,
.read_status = genphy_read_status,
@@ -628,7 +628,7 @@
.phy_id_mask = 0xfffffff0,
.name = "Broadcom BCM54810",
.features = PHY_GBIT_FEATURES,
- .flags = PHY_HAS_MAGICANEG | PHY_HAS_INTERRUPT,
+ .flags = PHY_HAS_INTERRUPT,
.config_init = bcm54xx_config_init,
.config_aneg = bcm5481_config_aneg,
.read_status = genphy_read_status,
@@ -639,7 +639,7 @@
.phy_id_mask = 0xfffffff0,
.name = "Broadcom BCM5482",
.features = PHY_GBIT_FEATURES,
- .flags = PHY_HAS_MAGICANEG | PHY_HAS_INTERRUPT,
+ .flags = PHY_HAS_INTERRUPT,
.config_init = bcm5482_config_init,
.config_aneg = genphy_config_aneg,
.read_status = bcm5482_read_status,
@@ -650,7 +650,7 @@
.phy_id_mask = 0xfffffff0,
.name = "Broadcom BCM50610",
.features = PHY_GBIT_FEATURES,
- .flags = PHY_HAS_MAGICANEG | PHY_HAS_INTERRUPT,
+ .flags = PHY_HAS_INTERRUPT,
.config_init = bcm54xx_config_init,
.config_aneg = genphy_config_aneg,
.read_status = genphy_read_status,
@@ -661,7 +661,7 @@
.phy_id_mask = 0xfffffff0,
.name = "Broadcom BCM50610M",
.features = PHY_GBIT_FEATURES,
- .flags = PHY_HAS_MAGICANEG | PHY_HAS_INTERRUPT,
+ .flags = PHY_HAS_INTERRUPT,
.config_init = bcm54xx_config_init,
.config_aneg = genphy_config_aneg,
.read_status = genphy_read_status,
@@ -672,7 +672,7 @@
.phy_id_mask = 0xfffffff0,
.name = "Broadcom BCM57780",
.features = PHY_GBIT_FEATURES,
- .flags = PHY_HAS_MAGICANEG | PHY_HAS_INTERRUPT,
+ .flags = PHY_HAS_INTERRUPT,
.config_init = bcm54xx_config_init,
.config_aneg = genphy_config_aneg,
.read_status = genphy_read_status,
@@ -683,7 +683,7 @@
.phy_id_mask = 0xfffffff0,
.name = "Broadcom BCMAC131",
.features = PHY_BASIC_FEATURES,
- .flags = PHY_HAS_MAGICANEG | PHY_HAS_INTERRUPT,
+ .flags = PHY_HAS_INTERRUPT,
.config_init = brcm_fet_config_init,
.config_aneg = genphy_config_aneg,
.read_status = genphy_read_status,
@@ -694,7 +694,7 @@
.phy_id_mask = 0xfffffff0,
.name = "Broadcom BCM5241",
.features = PHY_BASIC_FEATURES,
- .flags = PHY_HAS_MAGICANEG | PHY_HAS_INTERRUPT,
+ .flags = PHY_HAS_INTERRUPT,
.config_init = brcm_fet_config_init,
.config_aneg = genphy_config_aneg,
.read_status = genphy_read_status,
diff --git a/drivers/net/phy/cortina.c b/drivers/net/phy/cortina.c
new file mode 100644
index 0000000..72f4228
--- /dev/null
+++ b/drivers/net/phy/cortina.c
@@ -0,0 +1,118 @@
+/*
+ * Copyright 2017 NXP
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * CORTINA is a registered trademark of Cortina Systems, Inc.
+ *
+ */
+#include <linux/module.h>
+#include <linux/phy.h>
+
+#define PHY_ID_CS4340 0x13e51002
+
+#define VILLA_GLOBAL_CHIP_ID_LSB 0x0
+#define VILLA_GLOBAL_CHIP_ID_MSB 0x1
+
+#define VILLA_GLOBAL_GPIO_1_INTS 0x017
+
+static int cortina_read_reg(struct phy_device *phydev, u16 regnum)
+{
+ return mdiobus_read(phydev->mdio.bus, phydev->mdio.addr,
+ MII_ADDR_C45 | regnum);
+}
+
+static int cortina_config_aneg(struct phy_device *phydev)
+{
+ phydev->supported = SUPPORTED_10000baseT_Full;
+ phydev->advertising = SUPPORTED_10000baseT_Full;
+
+ return 0;
+}
+
+static int cortina_read_status(struct phy_device *phydev)
+{
+ int gpio_int_status, ret = 0;
+
+ gpio_int_status = cortina_read_reg(phydev, VILLA_GLOBAL_GPIO_1_INTS);
+ if (gpio_int_status < 0) {
+ ret = gpio_int_status;
+ goto err;
+ }
+
+ if (gpio_int_status & 0x8) {
+ /* up when edc_convergedS set */
+ phydev->speed = SPEED_10000;
+ phydev->duplex = DUPLEX_FULL;
+ phydev->link = 1;
+ } else {
+ phydev->link = 0;
+ }
+
+err:
+ return ret;
+}
+
+static int cortina_soft_reset(struct phy_device *phydev)
+{
+ return 0;
+}
+
+static int cortina_probe(struct phy_device *phydev)
+{
+ u32 phy_id = 0;
+ int id_lsb = 0, id_msb = 0;
+
+ /* Read device id from phy registers. */
+ id_lsb = cortina_read_reg(phydev, VILLA_GLOBAL_CHIP_ID_LSB);
+ if (id_lsb < 0)
+ return -ENXIO;
+
+ phy_id = id_lsb << 16;
+
+ id_msb = cortina_read_reg(phydev, VILLA_GLOBAL_CHIP_ID_MSB);
+ if (id_msb < 0)
+ return -ENXIO;
+
+ phy_id |= id_msb;
+
+ /* Make sure the device tree binding matched the driver with the
+ * right device.
+ */
+ if (phy_id != phydev->drv->phy_id) {
+ phydev_err(phydev, "Error matching phy with %s driver\n",
+ phydev->drv->name);
+ return -ENODEV;
+ }
+
+ return 0;
+}
+
+static struct phy_driver cortina_driver[] = {
+{
+ .phy_id = PHY_ID_CS4340,
+ .phy_id_mask = 0xffffffff,
+ .name = "Cortina CS4340",
+ .config_aneg = cortina_config_aneg,
+ .read_status = cortina_read_status,
+ .soft_reset = cortina_soft_reset,
+ .probe = cortina_probe,
+},
+};
+
+module_phy_driver(cortina_driver);
+
+static struct mdio_device_id __maybe_unused cortina_tbl[] = {
+ { PHY_ID_CS4340, 0xffffffff},
+ {},
+};
+
+MODULE_DEVICE_TABLE(mdio, cortina_tbl);
diff --git a/drivers/net/phy/lxt.c b/drivers/net/phy/lxt.c
index 8d198a1f..09d2151 100644
--- a/drivers/net/phy/lxt.c
+++ b/drivers/net/phy/lxt.c
@@ -152,7 +152,6 @@
int adv;
int err;
int lpa;
- int lpagb = 0;
/* Update the link, but return if there was an error */
err = lxt973a2_update_link(phydev);
@@ -178,18 +177,15 @@
*/
} while (lpa == adv && retry--);
+ phydev->lp_advertising = mii_lpa_to_ethtool_lpa_t(lpa);
+
lpa &= adv;
phydev->speed = SPEED_10;
phydev->duplex = DUPLEX_HALF;
phydev->pause = phydev->asym_pause = 0;
- if (lpagb & (LPA_1000FULL | LPA_1000HALF)) {
- phydev->speed = SPEED_1000;
-
- if (lpagb & LPA_1000FULL)
- phydev->duplex = DUPLEX_FULL;
- } else if (lpa & (LPA_100FULL | LPA_100HALF)) {
+ if (lpa & (LPA_100FULL | LPA_100HALF)) {
phydev->speed = SPEED_100;
if (lpa & LPA_100FULL)
@@ -222,6 +218,7 @@
phydev->speed = SPEED_10;
phydev->pause = phydev->asym_pause = 0;
+ phydev->lp_advertising = 0;
}
return 0;
diff --git a/drivers/net/phy/marvell.c b/drivers/net/phy/marvell.c
index 57297ba..8400403 100644
--- a/drivers/net/phy/marvell.c
+++ b/drivers/net/phy/marvell.c
@@ -41,6 +41,12 @@
#include <linux/uaccess.h>
#define MII_MARVELL_PHY_PAGE 22
+#define MII_MARVELL_COPPER_PAGE 0x00
+#define MII_MARVELL_FIBER_PAGE 0x01
+#define MII_MARVELL_MSCR_PAGE 0x02
+#define MII_MARVELL_LED_PAGE 0x03
+#define MII_MARVELL_MISC_TEST_PAGE 0x06
+#define MII_MARVELL_WOL_PAGE 0x11
#define MII_M1011_IEVENT 0x13
#define MII_M1011_IEVENT_CLEAR 0x0000
@@ -54,7 +60,6 @@
#define MII_M1011_PHY_SCR_MDI_X 0x0020
#define MII_M1011_PHY_SCR_AUTO_CROSS 0x0060
-#define MII_M1145_PHY_EXT_ADDR_PAGE 0x16
#define MII_M1145_PHY_EXT_SR 0x1b
#define MII_M1145_PHY_EXT_CR 0x14
#define MII_M1145_RGMII_RX_DELAY 0x0080
@@ -83,10 +88,6 @@
#define MII_M1111_HWCFG_FIBER_COPPER_AUTO 0x8000
#define MII_M1111_HWCFG_FIBER_COPPER_RES 0x2000
-#define MII_M1111_COPPER 0
-#define MII_M1111_FIBER 1
-
-#define MII_88E1121_PHY_MSCR_PAGE 2
#define MII_88E1121_PHY_MSCR_REG 21
#define MII_88E1121_PHY_MSCR_RX_DELAY BIT(5)
#define MII_88E1121_PHY_MSCR_TX_DELAY BIT(4)
@@ -112,7 +113,6 @@
#define MII_88E1318S_PHY_CSIER_WOL_EIE BIT(7)
/* LED Timer Control Register */
-#define MII_88E1318S_PHY_LED_PAGE 0x03
#define MII_88E1318S_PHY_LED_TCR 0x12
#define MII_88E1318S_PHY_LED_TCR_FORCE_INT BIT(15)
#define MII_88E1318S_PHY_LED_TCR_INTn_ENABLE BIT(7)
@@ -123,13 +123,11 @@
#define MII_88E1318S_PHY_MAGIC_PACKET_WORD1 0x18
#define MII_88E1318S_PHY_MAGIC_PACKET_WORD0 0x19
-#define MII_88E1318S_PHY_WOL_PAGE 0x11
#define MII_88E1318S_PHY_WOL_CTRL 0x10
#define MII_88E1318S_PHY_WOL_CTRL_CLEAR_WOL_STATUS BIT(12)
#define MII_88E1318S_PHY_WOL_CTRL_MAGIC_PACKET_MATCH_ENABLE BIT(14)
#define MII_88E1121_PHY_LED_CTRL 16
-#define MII_88E1121_PHY_LED_PAGE 3
#define MII_88E1121_PHY_LED_DEF 0x0030
#define MII_M1011_PHY_STATUS 0x11
@@ -189,6 +187,29 @@
struct device *hwmon_dev;
};
+static int marvell_get_page(struct phy_device *phydev)
+{
+ return phy_read(phydev, MII_MARVELL_PHY_PAGE);
+}
+
+static int marvell_set_page(struct phy_device *phydev, int page)
+{
+ return phy_write(phydev, MII_MARVELL_PHY_PAGE, page);
+}
+
+static int marvell_get_set_page(struct phy_device *phydev, int page)
+{
+ int oldpage = marvell_get_page(phydev);
+
+ if (oldpage < 0)
+ return oldpage;
+
+ if (page != oldpage)
+ return marvell_set_page(phydev, page);
+
+ return 0;
+}
+
static int marvell_ack_interrupt(struct phy_device *phydev)
{
int err;
@@ -207,9 +228,11 @@
int err;
if (phydev->interrupts == PHY_INTERRUPT_ENABLED)
- err = phy_write(phydev, MII_M1011_IMASK, MII_M1011_IMASK_INIT);
+ err = phy_write(phydev, MII_M1011_IMASK,
+ MII_M1011_IMASK_INIT);
else
- err = phy_write(phydev, MII_M1011_IMASK, MII_M1011_IMASK_CLEAR);
+ err = phy_write(phydev, MII_M1011_IMASK,
+ MII_M1011_IMASK_CLEAR);
return err;
}
@@ -271,8 +294,7 @@
if (phydev->autoneg != AUTONEG_ENABLE) {
int bmcr;
- /*
- * A write to speed/duplex bits (that is performed by
+ /* A write to speed/duplex bits (that is performed by
* genphy_config_aneg() call above) must be followed by
* a software reset. Otherwise, the write has no effect.
*/
@@ -367,8 +389,7 @@
}
#ifdef CONFIG_OF_MDIO
-/*
- * Set and/or override some configuration registers based on the
+/* Set and/or override some configuration registers based on the
* marvell,reg-init property stored in the of_node for the phydev.
*
* marvell,reg-init = <reg-page reg mask value>,...;
@@ -394,7 +415,7 @@
if (!paddr || len < (4 * sizeof(*paddr)))
return 0;
- saved_page = phy_read(phydev, MII_MARVELL_PHY_PAGE);
+ saved_page = marvell_get_page(phydev);
if (saved_page < 0)
return saved_page;
current_page = saved_page;
@@ -402,15 +423,15 @@
ret = 0;
len /= sizeof(*paddr);
for (i = 0; i < len - 3; i += 4) {
- u16 reg_page = be32_to_cpup(paddr + i);
+ u16 page = be32_to_cpup(paddr + i);
u16 reg = be32_to_cpup(paddr + i + 1);
u16 mask = be32_to_cpup(paddr + i + 2);
u16 val_bits = be32_to_cpup(paddr + i + 3);
int val;
- if (reg_page != current_page) {
- current_page = reg_page;
- ret = phy_write(phydev, MII_MARVELL_PHY_PAGE, reg_page);
+ if (page != current_page) {
+ current_page = page;
+ ret = marvell_set_page(phydev, page);
if (ret < 0)
goto err;
}
@@ -429,11 +450,10 @@
ret = phy_write(phydev, reg, val);
if (ret < 0)
goto err;
-
}
err:
if (current_page != saved_page) {
- i = phy_write(phydev, MII_MARVELL_PHY_PAGE, saved_page);
+ i = marvell_set_page(phydev, saved_page);
if (ret == 0)
ret = i;
}
@@ -450,15 +470,11 @@
{
int err, oldpage, mscr;
- oldpage = phy_read(phydev, MII_MARVELL_PHY_PAGE);
-
- err = phy_write(phydev, MII_MARVELL_PHY_PAGE,
- MII_88E1121_PHY_MSCR_PAGE);
- if (err < 0)
- return err;
+ oldpage = marvell_get_set_page(phydev, MII_MARVELL_MSCR_PAGE);
+ if (oldpage < 0)
+ return oldpage;
if (phy_interface_is_rgmii(phydev)) {
-
mscr = phy_read(phydev, MII_88E1121_PHY_MSCR_REG) &
MII_88E1121_PHY_MSCR_DELAY_MASK;
@@ -475,7 +491,7 @@
return err;
}
- phy_write(phydev, MII_MARVELL_PHY_PAGE, oldpage);
+ marvell_set_page(phydev, oldpage);
err = phy_write(phydev, MII_BMCR, BMCR_RESET);
if (err < 0)
@@ -493,12 +509,9 @@
{
int err, oldpage, mscr;
- oldpage = phy_read(phydev, MII_MARVELL_PHY_PAGE);
-
- err = phy_write(phydev, MII_MARVELL_PHY_PAGE,
- MII_88E1121_PHY_MSCR_PAGE);
- if (err < 0)
- return err;
+ oldpage = marvell_get_set_page(phydev, MII_MARVELL_MSCR_PAGE);
+ if (oldpage < 0)
+ return oldpage;
mscr = phy_read(phydev, MII_88E1318S_PHY_MSCR1_REG);
mscr |= MII_88E1318S_PHY_MSCR1_PAD_ODD;
@@ -507,7 +520,7 @@
if (err < 0)
return err;
- err = phy_write(phydev, MII_MARVELL_PHY_PAGE, oldpage);
+ err = marvell_set_page(phydev, oldpage);
if (err < 0)
return err;
@@ -607,7 +620,7 @@
{
int err;
- err = phy_write(phydev, MII_MARVELL_PHY_PAGE, MII_M1111_COPPER);
+ err = marvell_set_page(phydev, MII_MARVELL_COPPER_PAGE);
if (err < 0)
goto error;
@@ -617,7 +630,7 @@
goto error;
/* Then the fiber link */
- err = phy_write(phydev, MII_MARVELL_PHY_PAGE, MII_M1111_FIBER);
+ err = marvell_set_page(phydev, MII_MARVELL_FIBER_PAGE);
if (err < 0)
goto error;
@@ -625,10 +638,10 @@
if (err < 0)
goto error;
- return phy_write(phydev, MII_MARVELL_PHY_PAGE, MII_M1111_COPPER);
+ return marvell_set_page(phydev, MII_MARVELL_COPPER_PAGE);
error:
- phy_write(phydev, MII_MARVELL_PHY_PAGE, MII_M1111_COPPER);
+ marvell_set_page(phydev, MII_MARVELL_COPPER_PAGE);
return err;
}
@@ -651,7 +664,7 @@
mdelay(500);
- err = phy_write(phydev, MII_MARVELL_PHY_PAGE, 0);
+ err = marvell_set_page(phydev, MII_MARVELL_COPPER_PAGE);
if (err < 0)
return err;
@@ -663,7 +676,7 @@
if (err < 0)
return err;
- err = phy_write(phydev, MII_MARVELL_PHY_PAGE, 2);
+ err = marvell_set_page(phydev, MII_MARVELL_MSCR_PAGE);
if (err < 0)
return err;
temp = phy_read(phydev, MII_M1116R_CONTROL_REG_MAC);
@@ -672,7 +685,7 @@
err = phy_write(phydev, MII_M1116R_CONTROL_REG_MAC, temp);
if (err < 0)
return err;
- err = phy_write(phydev, MII_MARVELL_PHY_PAGE, 0);
+ err = marvell_set_page(phydev, MII_MARVELL_COPPER_PAGE);
if (err < 0)
return err;
@@ -706,103 +719,129 @@
return marvell_config_init(phydev);
}
-static int m88e1111_config_init(struct phy_device *phydev)
+static int m88e1111_config_init_rgmii(struct phy_device *phydev)
{
int err;
int temp;
+ temp = phy_read(phydev, MII_M1111_PHY_EXT_CR);
+ if (temp < 0)
+ return temp;
+
+ if (phydev->interface == PHY_INTERFACE_MODE_RGMII_ID) {
+ temp |= (MII_M1111_RX_DELAY | MII_M1111_TX_DELAY);
+ } else if (phydev->interface == PHY_INTERFACE_MODE_RGMII_RXID) {
+ temp &= ~MII_M1111_TX_DELAY;
+ temp |= MII_M1111_RX_DELAY;
+ } else if (phydev->interface == PHY_INTERFACE_MODE_RGMII_TXID) {
+ temp &= ~MII_M1111_RX_DELAY;
+ temp |= MII_M1111_TX_DELAY;
+ }
+
+ err = phy_write(phydev, MII_M1111_PHY_EXT_CR, temp);
+ if (err < 0)
+ return err;
+
+ temp = phy_read(phydev, MII_M1111_PHY_EXT_SR);
+ if (temp < 0)
+ return temp;
+
+ temp &= ~(MII_M1111_HWCFG_MODE_MASK);
+
+ if (temp & MII_M1111_HWCFG_FIBER_COPPER_RES)
+ temp |= MII_M1111_HWCFG_MODE_FIBER_RGMII;
+ else
+ temp |= MII_M1111_HWCFG_MODE_COPPER_RGMII;
+
+ return phy_write(phydev, MII_M1111_PHY_EXT_SR, temp);
+}
+
+static int m88e1111_config_init_sgmii(struct phy_device *phydev)
+{
+ int err;
+ int temp;
+
+ temp = phy_read(phydev, MII_M1111_PHY_EXT_SR);
+ if (temp < 0)
+ return temp;
+
+ temp &= ~(MII_M1111_HWCFG_MODE_MASK);
+ temp |= MII_M1111_HWCFG_MODE_SGMII_NO_CLK;
+ temp |= MII_M1111_HWCFG_FIBER_COPPER_AUTO;
+
+ err = phy_write(phydev, MII_M1111_PHY_EXT_SR, temp);
+ if (err < 0)
+ return err;
+
+ /* make sure copper is selected */
+ return marvell_set_page(phydev, MII_MARVELL_COPPER_PAGE);
+}
+
+static int m88e1111_config_init_rtbi(struct phy_device *phydev)
+{
+ int err;
+ int temp;
+
+ temp = phy_read(phydev, MII_M1111_PHY_EXT_CR);
+ if (temp < 0)
+ return temp;
+
+ temp |= (MII_M1111_RX_DELAY | MII_M1111_TX_DELAY);
+ err = phy_write(phydev, MII_M1111_PHY_EXT_CR, temp);
+ if (err < 0)
+ return err;
+
+ temp = phy_read(phydev, MII_M1111_PHY_EXT_SR);
+ if (temp < 0)
+ return temp;
+
+ temp &= ~(MII_M1111_HWCFG_MODE_MASK |
+ MII_M1111_HWCFG_FIBER_COPPER_RES);
+ temp |= 0x7 | MII_M1111_HWCFG_FIBER_COPPER_AUTO;
+
+ err = phy_write(phydev, MII_M1111_PHY_EXT_SR, temp);
+ if (err < 0)
+ return err;
+
+ /* soft reset */
+ err = phy_write(phydev, MII_BMCR, BMCR_RESET);
+ if (err < 0)
+ return err;
+
+ do
+ temp = phy_read(phydev, MII_BMCR);
+ while (temp & BMCR_RESET);
+
+ temp = phy_read(phydev, MII_M1111_PHY_EXT_SR);
+ if (temp < 0)
+ return temp;
+
+ temp &= ~(MII_M1111_HWCFG_MODE_MASK |
+ MII_M1111_HWCFG_FIBER_COPPER_RES);
+ temp |= MII_M1111_HWCFG_MODE_COPPER_RTBI |
+ MII_M1111_HWCFG_FIBER_COPPER_AUTO;
+
+ return phy_write(phydev, MII_M1111_PHY_EXT_SR, temp);
+}
+
+static int m88e1111_config_init(struct phy_device *phydev)
+{
+ int err;
+
if (phy_interface_is_rgmii(phydev)) {
-
- temp = phy_read(phydev, MII_M1111_PHY_EXT_CR);
- if (temp < 0)
- return temp;
-
- if (phydev->interface == PHY_INTERFACE_MODE_RGMII_ID) {
- temp |= (MII_M1111_RX_DELAY | MII_M1111_TX_DELAY);
- } else if (phydev->interface == PHY_INTERFACE_MODE_RGMII_RXID) {
- temp &= ~MII_M1111_TX_DELAY;
- temp |= MII_M1111_RX_DELAY;
- } else if (phydev->interface == PHY_INTERFACE_MODE_RGMII_TXID) {
- temp &= ~MII_M1111_RX_DELAY;
- temp |= MII_M1111_TX_DELAY;
- }
-
- err = phy_write(phydev, MII_M1111_PHY_EXT_CR, temp);
- if (err < 0)
- return err;
-
- temp = phy_read(phydev, MII_M1111_PHY_EXT_SR);
- if (temp < 0)
- return temp;
-
- temp &= ~(MII_M1111_HWCFG_MODE_MASK);
-
- if (temp & MII_M1111_HWCFG_FIBER_COPPER_RES)
- temp |= MII_M1111_HWCFG_MODE_FIBER_RGMII;
- else
- temp |= MII_M1111_HWCFG_MODE_COPPER_RGMII;
-
- err = phy_write(phydev, MII_M1111_PHY_EXT_SR, temp);
- if (err < 0)
+ err = m88e1111_config_init_rgmii(phydev);
+ if (err)
return err;
}
if (phydev->interface == PHY_INTERFACE_MODE_SGMII) {
- temp = phy_read(phydev, MII_M1111_PHY_EXT_SR);
- if (temp < 0)
- return temp;
-
- temp &= ~(MII_M1111_HWCFG_MODE_MASK);
- temp |= MII_M1111_HWCFG_MODE_SGMII_NO_CLK;
- temp |= MII_M1111_HWCFG_FIBER_COPPER_AUTO;
-
- err = phy_write(phydev, MII_M1111_PHY_EXT_SR, temp);
- if (err < 0)
- return err;
-
- /* make sure copper is selected */
- err = phy_read(phydev, MII_M1145_PHY_EXT_ADDR_PAGE);
- if (err < 0)
- return err;
-
- err = phy_write(phydev, MII_M1145_PHY_EXT_ADDR_PAGE,
- err & (~0xff));
+ err = m88e1111_config_init_sgmii(phydev);
if (err < 0)
return err;
}
if (phydev->interface == PHY_INTERFACE_MODE_RTBI) {
- temp = phy_read(phydev, MII_M1111_PHY_EXT_CR);
- if (temp < 0)
- return temp;
- temp |= (MII_M1111_RX_DELAY | MII_M1111_TX_DELAY);
- err = phy_write(phydev, MII_M1111_PHY_EXT_CR, temp);
- if (err < 0)
- return err;
-
- temp = phy_read(phydev, MII_M1111_PHY_EXT_SR);
- if (temp < 0)
- return temp;
- temp &= ~(MII_M1111_HWCFG_MODE_MASK | MII_M1111_HWCFG_FIBER_COPPER_RES);
- temp |= 0x7 | MII_M1111_HWCFG_FIBER_COPPER_AUTO;
- err = phy_write(phydev, MII_M1111_PHY_EXT_SR, temp);
- if (err < 0)
- return err;
-
- /* soft reset */
- err = phy_write(phydev, MII_BMCR, BMCR_RESET);
- if (err < 0)
- return err;
- do
- temp = phy_read(phydev, MII_BMCR);
- while (temp & BMCR_RESET);
-
- temp = phy_read(phydev, MII_M1111_PHY_EXT_SR);
- if (temp < 0)
- return temp;
- temp &= ~(MII_M1111_HWCFG_MODE_MASK | MII_M1111_HWCFG_FIBER_COPPER_RES);
- temp |= MII_M1111_HWCFG_MODE_COPPER_RTBI | MII_M1111_HWCFG_FIBER_COPPER_AUTO;
- err = phy_write(phydev, MII_M1111_PHY_EXT_SR, temp);
+ err = m88e1111_config_init_rtbi(phydev);
if (err < 0)
return err;
}
@@ -818,11 +857,9 @@
{
int err, oldpage;
- oldpage = phy_read(phydev, MII_MARVELL_PHY_PAGE);
-
- err = phy_write(phydev, MII_MARVELL_PHY_PAGE, MII_88E1121_PHY_LED_PAGE);
- if (err < 0)
- return err;
+ oldpage = marvell_get_set_page(phydev, MII_MARVELL_LED_PAGE);
+ if (oldpage < 0)
+ return oldpage;
/* Default PHY LED config: LED[0] .. Link, LED[1] .. Activity */
err = phy_write(phydev, MII_88E1121_PHY_LED_CTRL,
@@ -830,7 +867,7 @@
if (err < 0)
return err;
- phy_write(phydev, MII_MARVELL_PHY_PAGE, oldpage);
+ marvell_set_page(phydev, oldpage);
/* Set marvell,reg-init configuration from device tree */
return marvell_config_init(phydev);
@@ -844,7 +881,7 @@
/* SGMII-to-Copper mode initialization */
if (phydev->interface == PHY_INTERFACE_MODE_SGMII) {
/* Select page 18 */
- err = phy_write(phydev, MII_MARVELL_PHY_PAGE, 18);
+ err = marvell_set_page(phydev, 18);
if (err < 0)
return err;
@@ -863,7 +900,7 @@
return err;
/* Reset page selection */
- err = phy_write(phydev, MII_MARVELL_PHY_PAGE, 0);
+ err = marvell_set_page(phydev, MII_MARVELL_COPPER_PAGE);
if (err < 0)
return err;
}
@@ -893,7 +930,7 @@
int err;
/* Change address */
- err = phy_write(phydev, MII_MARVELL_PHY_PAGE, 0x0002);
+ err = marvell_set_page(phydev, MII_MARVELL_MSCR_PAGE);
if (err < 0)
return err;
@@ -903,7 +940,7 @@
return err;
/* Change address */
- err = phy_write(phydev, MII_MARVELL_PHY_PAGE, 0x0003);
+ err = marvell_set_page(phydev, MII_MARVELL_LED_PAGE);
if (err < 0)
return err;
@@ -920,7 +957,7 @@
return err;
/* Reset address */
- err = phy_write(phydev, MII_MARVELL_PHY_PAGE, 0x0);
+ err = marvell_set_page(phydev, MII_MARVELL_COPPER_PAGE);
if (err < 0)
return err;
@@ -932,7 +969,7 @@
int err;
/* Change address */
- err = phy_write(phydev, MII_MARVELL_PHY_PAGE, 0x0002);
+ err = marvell_set_page(phydev, MII_MARVELL_MSCR_PAGE);
if (err < 0)
return err;
@@ -946,17 +983,70 @@
return err;
/* Reset address */
- err = phy_write(phydev, MII_MARVELL_PHY_PAGE, 0x0);
+ err = marvell_set_page(phydev, MII_MARVELL_COPPER_PAGE);
if (err < 0)
return err;
return phy_write(phydev, MII_BMCR, BMCR_RESET);
}
+static int m88e1145_config_init_rgmii(struct phy_device *phydev)
+{
+ int err;
+ int temp = phy_read(phydev, MII_M1145_PHY_EXT_CR);
+
+ if (temp < 0)
+ return temp;
+
+ temp |= (MII_M1145_RGMII_RX_DELAY | MII_M1145_RGMII_TX_DELAY);
+
+ err = phy_write(phydev, MII_M1145_PHY_EXT_CR, temp);
+ if (err < 0)
+ return err;
+
+ if (phydev->dev_flags & MARVELL_PHY_M1145_FLAGS_RESISTANCE) {
+ err = phy_write(phydev, 0x1d, 0x0012);
+ if (err < 0)
+ return err;
+
+ temp = phy_read(phydev, 0x1e);
+ if (temp < 0)
+ return temp;
+
+ temp &= 0xf03f;
+ temp |= 2 << 9; /* 36 ohm */
+ temp |= 2 << 6; /* 39 ohm */
+
+ err = phy_write(phydev, 0x1e, temp);
+ if (err < 0)
+ return err;
+
+ err = phy_write(phydev, 0x1d, 0x3);
+ if (err < 0)
+ return err;
+
+ err = phy_write(phydev, 0x1e, 0x8000);
+ }
+ return err;
+}
+
+static int m88e1145_config_init_sgmii(struct phy_device *phydev)
+{
+ int temp = phy_read(phydev, MII_M1145_PHY_EXT_SR);
+
+ if (temp < 0)
+ return temp;
+
+ temp &= ~MII_M1145_HWCFG_MODE_MASK;
+ temp |= MII_M1145_HWCFG_MODE_SGMII_NO_CLK;
+ temp |= MII_M1145_HWCFG_FIBER_COPPER_AUTO;
+
+ return phy_write(phydev, MII_M1145_PHY_EXT_SR, temp);
+}
+
static int m88e1145_config_init(struct phy_device *phydev)
{
int err;
- int temp;
/* Take care of errata E0 & E1 */
err = phy_write(phydev, 0x1d, 0x001b);
@@ -976,53 +1066,13 @@
return err;
if (phydev->interface == PHY_INTERFACE_MODE_RGMII_ID) {
- int temp = phy_read(phydev, MII_M1145_PHY_EXT_CR);
- if (temp < 0)
- return temp;
-
- temp |= (MII_M1145_RGMII_RX_DELAY | MII_M1145_RGMII_TX_DELAY);
-
- err = phy_write(phydev, MII_M1145_PHY_EXT_CR, temp);
+ err = m88e1145_config_init_rgmii(phydev);
if (err < 0)
return err;
-
- if (phydev->dev_flags & MARVELL_PHY_M1145_FLAGS_RESISTANCE) {
- err = phy_write(phydev, 0x1d, 0x0012);
- if (err < 0)
- return err;
-
- temp = phy_read(phydev, 0x1e);
- if (temp < 0)
- return temp;
-
- temp &= 0xf03f;
- temp |= 2 << 9; /* 36 ohm */
- temp |= 2 << 6; /* 39 ohm */
-
- err = phy_write(phydev, 0x1e, temp);
- if (err < 0)
- return err;
-
- err = phy_write(phydev, 0x1d, 0x3);
- if (err < 0)
- return err;
-
- err = phy_write(phydev, 0x1e, 0x8000);
- if (err < 0)
- return err;
- }
}
if (phydev->interface == PHY_INTERFACE_MODE_SGMII) {
- temp = phy_read(phydev, MII_M1145_PHY_EXT_SR);
- if (temp < 0)
- return temp;
-
- temp &= ~MII_M1145_HWCFG_MODE_MASK;
- temp |= MII_M1145_HWCFG_MODE_SGMII_NO_CLK;
- temp |= MII_M1145_HWCFG_FIBER_COPPER_AUTO;
-
- err = phy_write(phydev, MII_M1145_PHY_EXT_SR, temp);
+ err = m88e1145_config_init_sgmii(phydev);
if (err < 0)
return err;
}
@@ -1065,7 +1115,8 @@
int status;
/* Use the generic register for copper link, or specific
- * register for fiber case */
+ * register for fiber case
+ */
if (fiber) {
status = phy_read(phydev, MII_M1011_PHY_STATUS);
if (status < 0)
@@ -1082,6 +1133,103 @@
return 0;
}
+static int marvell_read_status_page_an(struct phy_device *phydev,
+ int fiber)
+{
+ int status;
+ int lpa;
+ int lpagb;
+
+ status = phy_read(phydev, MII_M1011_PHY_STATUS);
+ if (status < 0)
+ return status;
+
+ lpa = phy_read(phydev, MII_LPA);
+ if (lpa < 0)
+ return lpa;
+
+ lpagb = phy_read(phydev, MII_STAT1000);
+ if (lpagb < 0)
+ return lpagb;
+
+ if (status & MII_M1011_PHY_STATUS_FULLDUPLEX)
+ phydev->duplex = DUPLEX_FULL;
+ else
+ phydev->duplex = DUPLEX_HALF;
+
+ status = status & MII_M1011_PHY_STATUS_SPD_MASK;
+ phydev->pause = 0;
+ phydev->asym_pause = 0;
+
+ switch (status) {
+ case MII_M1011_PHY_STATUS_1000:
+ phydev->speed = SPEED_1000;
+ break;
+
+ case MII_M1011_PHY_STATUS_100:
+ phydev->speed = SPEED_100;
+ break;
+
+ default:
+ phydev->speed = SPEED_10;
+ break;
+ }
+
+ if (!fiber) {
+ phydev->lp_advertising =
+ mii_stat1000_to_ethtool_lpa_t(lpagb) |
+ mii_lpa_to_ethtool_lpa_t(lpa);
+
+ if (phydev->duplex == DUPLEX_FULL) {
+ phydev->pause = lpa & LPA_PAUSE_CAP ? 1 : 0;
+ phydev->asym_pause = lpa & LPA_PAUSE_ASYM ? 1 : 0;
+ }
+ } else {
+ /* The fiber link is only 1000M capable */
+ phydev->lp_advertising = fiber_lpa_to_ethtool_lpa_t(lpa);
+
+ if (phydev->duplex == DUPLEX_FULL) {
+ if (!(lpa & LPA_PAUSE_FIBER)) {
+ phydev->pause = 0;
+ phydev->asym_pause = 0;
+ } else if ((lpa & LPA_PAUSE_ASYM_FIBER)) {
+ phydev->pause = 1;
+ phydev->asym_pause = 1;
+ } else {
+ phydev->pause = 1;
+ phydev->asym_pause = 0;
+ }
+ }
+ }
+ return 0;
+}
+
+static int marvell_read_status_page_fixed(struct phy_device *phydev)
+{
+ int bmcr = phy_read(phydev, MII_BMCR);
+
+ if (bmcr < 0)
+ return bmcr;
+
+ if (bmcr & BMCR_FULLDPLX)
+ phydev->duplex = DUPLEX_FULL;
+ else
+ phydev->duplex = DUPLEX_HALF;
+
+ if (bmcr & BMCR_SPEED1000)
+ phydev->speed = SPEED_1000;
+ else if (bmcr & BMCR_SPEED100)
+ phydev->speed = SPEED_100;
+ else
+ phydev->speed = SPEED_10;
+
+ phydev->pause = 0;
+ phydev->asym_pause = 0;
+ phydev->lp_advertising = 0;
+
+ return 0;
+}
+
/* marvell_read_status_page
*
* Description:
@@ -1092,16 +1240,13 @@
*/
static int marvell_read_status_page(struct phy_device *phydev, int page)
{
- int adv;
- int err;
- int lpa;
- int lpagb;
- int status = 0;
int fiber;
+ int err;
/* Detect and update the link, but return if there
- * was an error */
- if (page == MII_M1111_FIBER)
+ * was an error
+ */
+ if (page == MII_MARVELL_FIBER_PAGE)
fiber = 1;
else
fiber = 0;
@@ -1110,93 +1255,12 @@
if (err)
return err;
- if (AUTONEG_ENABLE == phydev->autoneg) {
- status = phy_read(phydev, MII_M1011_PHY_STATUS);
- if (status < 0)
- return status;
+ if (phydev->autoneg == AUTONEG_ENABLE)
+ err = marvell_read_status_page_an(phydev, fiber);
+ else
+ err = marvell_read_status_page_fixed(phydev);
- lpa = phy_read(phydev, MII_LPA);
- if (lpa < 0)
- return lpa;
-
- lpagb = phy_read(phydev, MII_STAT1000);
- if (lpagb < 0)
- return lpagb;
-
- adv = phy_read(phydev, MII_ADVERTISE);
- if (adv < 0)
- return adv;
-
- if (status & MII_M1011_PHY_STATUS_FULLDUPLEX)
- phydev->duplex = DUPLEX_FULL;
- else
- phydev->duplex = DUPLEX_HALF;
-
- status = status & MII_M1011_PHY_STATUS_SPD_MASK;
- phydev->pause = phydev->asym_pause = 0;
-
- switch (status) {
- case MII_M1011_PHY_STATUS_1000:
- phydev->speed = SPEED_1000;
- break;
-
- case MII_M1011_PHY_STATUS_100:
- phydev->speed = SPEED_100;
- break;
-
- default:
- phydev->speed = SPEED_10;
- break;
- }
-
- if (!fiber) {
- phydev->lp_advertising = mii_stat1000_to_ethtool_lpa_t(lpagb) |
- mii_lpa_to_ethtool_lpa_t(lpa);
-
- if (phydev->duplex == DUPLEX_FULL) {
- phydev->pause = lpa & LPA_PAUSE_CAP ? 1 : 0;
- phydev->asym_pause = lpa & LPA_PAUSE_ASYM ? 1 : 0;
- }
- } else {
- /* The fiber link is only 1000M capable */
- phydev->lp_advertising = fiber_lpa_to_ethtool_lpa_t(lpa);
-
- if (phydev->duplex == DUPLEX_FULL) {
- if (!(lpa & LPA_PAUSE_FIBER)) {
- phydev->pause = 0;
- phydev->asym_pause = 0;
- } else if ((lpa & LPA_PAUSE_ASYM_FIBER)) {
- phydev->pause = 1;
- phydev->asym_pause = 1;
- } else {
- phydev->pause = 1;
- phydev->asym_pause = 0;
- }
- }
- }
- } else {
- int bmcr = phy_read(phydev, MII_BMCR);
-
- if (bmcr < 0)
- return bmcr;
-
- if (bmcr & BMCR_FULLDPLX)
- phydev->duplex = DUPLEX_FULL;
- else
- phydev->duplex = DUPLEX_HALF;
-
- if (bmcr & BMCR_SPEED1000)
- phydev->speed = SPEED_1000;
- else if (bmcr & BMCR_SPEED100)
- phydev->speed = SPEED_100;
- else
- phydev->speed = SPEED_10;
-
- phydev->pause = phydev->asym_pause = 0;
- phydev->lp_advertising = 0;
- }
-
- return 0;
+ return err;
}
/* marvell_read_status
@@ -1215,33 +1279,34 @@
/* Check the fiber mode first */
if (phydev->supported & SUPPORTED_FIBRE &&
phydev->interface != PHY_INTERFACE_MODE_SGMII) {
- err = phy_write(phydev, MII_MARVELL_PHY_PAGE, MII_M1111_FIBER);
+ err = marvell_set_page(phydev, MII_MARVELL_FIBER_PAGE);
if (err < 0)
goto error;
- err = marvell_read_status_page(phydev, MII_M1111_FIBER);
+ err = marvell_read_status_page(phydev, MII_MARVELL_FIBER_PAGE);
if (err < 0)
goto error;
- /* If the fiber link is up, it is the selected and used link.
- * In this case, we need to stay in the fiber page.
- * Please to be careful about that, avoid to restore Copper page
- * in other functions which could break the behaviour
- * for some fiber phy like 88E1512.
- * */
+ /* If the fiber link is up, it is the selected and
+ * used link. In this case, we need to stay in the
+ * fiber page. Please to be careful about that, avoid
+ * to restore Copper page in other functions which
+ * could break the behaviour for some fiber phy like
+ * 88E1512.
+ */
if (phydev->link)
return 0;
/* If fiber link is down, check and save copper mode state */
- err = phy_write(phydev, MII_MARVELL_PHY_PAGE, MII_M1111_COPPER);
+ err = marvell_set_page(phydev, MII_MARVELL_COPPER_PAGE);
if (err < 0)
goto error;
}
- return marvell_read_status_page(phydev, MII_M1111_COPPER);
+ return marvell_read_status_page(phydev, MII_MARVELL_COPPER_PAGE);
error:
- phy_write(phydev, MII_MARVELL_PHY_PAGE, MII_M1111_COPPER);
+ marvell_set_page(phydev, MII_MARVELL_COPPER_PAGE);
return err;
}
@@ -1256,7 +1321,7 @@
/* Suspend the fiber mode first */
if (!(phydev->supported & SUPPORTED_FIBRE)) {
- err = phy_write(phydev, MII_MARVELL_PHY_PAGE, MII_M1111_FIBER);
+ err = marvell_set_page(phydev, MII_MARVELL_FIBER_PAGE);
if (err < 0)
goto error;
@@ -1266,7 +1331,7 @@
goto error;
/* Then, the copper link */
- err = phy_write(phydev, MII_MARVELL_PHY_PAGE, MII_M1111_COPPER);
+ err = marvell_set_page(phydev, MII_MARVELL_COPPER_PAGE);
if (err < 0)
goto error;
}
@@ -1275,7 +1340,7 @@
return genphy_suspend(phydev);
error:
- phy_write(phydev, MII_MARVELL_PHY_PAGE, MII_M1111_COPPER);
+ marvell_set_page(phydev, MII_MARVELL_COPPER_PAGE);
return err;
}
@@ -1290,7 +1355,7 @@
/* Resume the fiber mode first */
if (!(phydev->supported & SUPPORTED_FIBRE)) {
- err = phy_write(phydev, MII_MARVELL_PHY_PAGE, MII_M1111_FIBER);
+ err = marvell_set_page(phydev, MII_MARVELL_FIBER_PAGE);
if (err < 0)
goto error;
@@ -1300,7 +1365,7 @@
goto error;
/* Then, the copper link */
- err = phy_write(phydev, MII_MARVELL_PHY_PAGE, MII_M1111_COPPER);
+ err = marvell_set_page(phydev, MII_MARVELL_COPPER_PAGE);
if (err < 0)
goto error;
}
@@ -1309,13 +1374,14 @@
return genphy_resume(phydev);
error:
- phy_write(phydev, MII_MARVELL_PHY_PAGE, MII_M1111_COPPER);
+ marvell_set_page(phydev, MII_MARVELL_COPPER_PAGE);
return err;
}
static int marvell_aneg_done(struct phy_device *phydev)
{
int retval = phy_read(phydev, MII_M1011_PHY_STATUS);
+
return (retval < 0) ? retval : (retval & MII_M1011_PHY_STATUS_RESOLVED);
}
@@ -1331,32 +1397,33 @@
return 0;
}
-static void m88e1318_get_wol(struct phy_device *phydev, struct ethtool_wolinfo *wol)
+static void m88e1318_get_wol(struct phy_device *phydev,
+ struct ethtool_wolinfo *wol)
{
wol->supported = WAKE_MAGIC;
wol->wolopts = 0;
- if (phy_write(phydev, MII_MARVELL_PHY_PAGE,
- MII_88E1318S_PHY_WOL_PAGE) < 0)
+ if (marvell_set_page(phydev, MII_MARVELL_WOL_PAGE) < 0)
return;
if (phy_read(phydev, MII_88E1318S_PHY_WOL_CTRL) &
MII_88E1318S_PHY_WOL_CTRL_MAGIC_PACKET_MATCH_ENABLE)
wol->wolopts |= WAKE_MAGIC;
- if (phy_write(phydev, MII_MARVELL_PHY_PAGE, 0x00) < 0)
+ if (marvell_set_page(phydev, MII_MARVELL_COPPER_PAGE) < 0)
return;
}
-static int m88e1318_set_wol(struct phy_device *phydev, struct ethtool_wolinfo *wol)
+static int m88e1318_set_wol(struct phy_device *phydev,
+ struct ethtool_wolinfo *wol)
{
int err, oldpage, temp;
- oldpage = phy_read(phydev, MII_MARVELL_PHY_PAGE);
+ oldpage = marvell_get_page(phydev);
if (wol->wolopts & WAKE_MAGIC) {
/* Explicitly switch to page 0x00, just to be sure */
- err = phy_write(phydev, MII_MARVELL_PHY_PAGE, 0x00);
+ err = marvell_set_page(phydev, MII_MARVELL_COPPER_PAGE);
if (err < 0)
return err;
@@ -1367,8 +1434,7 @@
if (err < 0)
return err;
- err = phy_write(phydev, MII_MARVELL_PHY_PAGE,
- MII_88E1318S_PHY_LED_PAGE);
+ err = marvell_set_page(phydev, MII_MARVELL_LED_PAGE);
if (err < 0)
return err;
@@ -1381,8 +1447,7 @@
if (err < 0)
return err;
- err = phy_write(phydev, MII_MARVELL_PHY_PAGE,
- MII_88E1318S_PHY_WOL_PAGE);
+ err = marvell_set_page(phydev, MII_MARVELL_WOL_PAGE);
if (err < 0)
return err;
@@ -1411,8 +1476,7 @@
if (err < 0)
return err;
} else {
- err = phy_write(phydev, MII_MARVELL_PHY_PAGE,
- MII_88E1318S_PHY_WOL_PAGE);
+ err = marvell_set_page(phydev, MII_MARVELL_WOL_PAGE);
if (err < 0)
return err;
@@ -1425,7 +1489,7 @@
return err;
}
- err = phy_write(phydev, MII_MARVELL_PHY_PAGE, oldpage);
+ err = marvell_set_page(phydev, oldpage);
if (err < 0)
return err;
@@ -1457,13 +1521,11 @@
{
struct marvell_hw_stat stat = marvell_hw_stats[i];
struct marvell_priv *priv = phydev->priv;
- int err, oldpage, val;
+ int oldpage, val;
u64 ret;
- oldpage = phy_read(phydev, MII_MARVELL_PHY_PAGE);
- err = phy_write(phydev, MII_MARVELL_PHY_PAGE,
- stat.page);
- if (err < 0)
+ oldpage = marvell_get_set_page(phydev, stat.page);
+ if (oldpage < 0)
return UINT64_MAX;
val = phy_read(phydev, stat.reg);
@@ -1475,7 +1537,7 @@
ret = priv->stats[i];
}
- phy_write(phydev, MII_MARVELL_PHY_PAGE, oldpage);
+ marvell_set_page(phydev, oldpage);
return ret;
}
@@ -1492,6 +1554,7 @@
#ifdef CONFIG_HWMON
static int m88e1121_get_temp(struct phy_device *phydev, long *temp)
{
+ int oldpage;
int ret;
int val;
@@ -1499,9 +1562,11 @@
mutex_lock(&phydev->lock);
- ret = phy_write(phydev, MII_M1145_PHY_EXT_ADDR_PAGE, 0x6);
- if (ret < 0)
- goto error;
+ oldpage = marvell_get_set_page(phydev, MII_MARVELL_MISC_TEST_PAGE);
+ if (oldpage < 0) {
+ mutex_unlock(&phydev->lock);
+ return oldpage;
+ }
/* Enable temperature sensor */
ret = phy_read(phydev, MII_88E1121_MISC_TEST);
@@ -1531,7 +1596,7 @@
*temp = ((val & MII_88E1121_MISC_TEST_TEMP_MASK) - 5) * 5000;
error:
- phy_write(phydev, MII_M1145_PHY_EXT_ADDR_PAGE, 0x0);
+ marvell_set_page(phydev, oldpage);
mutex_unlock(&phydev->lock);
return ret;
@@ -1608,15 +1673,18 @@
static int m88e1510_get_temp(struct phy_device *phydev, long *temp)
{
+ int oldpage;
int ret;
*temp = 0;
mutex_lock(&phydev->lock);
- ret = phy_write(phydev, MII_M1145_PHY_EXT_ADDR_PAGE, 0x6);
- if (ret < 0)
- goto error;
+ oldpage = marvell_get_set_page(phydev, MII_MARVELL_MISC_TEST_PAGE);
+ if (oldpage < 0) {
+ mutex_unlock(&phydev->lock);
+ return oldpage;
+ }
ret = phy_read(phydev, MII_88E1510_TEMP_SENSOR);
if (ret < 0)
@@ -1625,23 +1693,26 @@
*temp = ((ret & MII_88E1510_TEMP_SENSOR_MASK) - 25) * 1000;
error:
- phy_write(phydev, MII_M1145_PHY_EXT_ADDR_PAGE, 0x0);
+ marvell_set_page(phydev, oldpage);
mutex_unlock(&phydev->lock);
return ret;
}
-int m88e1510_get_temp_critical(struct phy_device *phydev, long *temp)
+static int m88e1510_get_temp_critical(struct phy_device *phydev, long *temp)
{
+ int oldpage;
int ret;
*temp = 0;
mutex_lock(&phydev->lock);
- ret = phy_write(phydev, MII_M1145_PHY_EXT_ADDR_PAGE, 0x6);
- if (ret < 0)
- goto error;
+ oldpage = marvell_get_set_page(phydev, MII_MARVELL_MISC_TEST_PAGE);
+ if (oldpage < 0) {
+ mutex_unlock(&phydev->lock);
+ return oldpage;
+ }
ret = phy_read(phydev, MII_88E1121_MISC_TEST);
if (ret < 0)
@@ -1653,21 +1724,24 @@
*temp *= 1000;
error:
- phy_write(phydev, MII_M1145_PHY_EXT_ADDR_PAGE, 0x0);
+ marvell_set_page(phydev, oldpage);
mutex_unlock(&phydev->lock);
return ret;
}
-int m88e1510_set_temp_critical(struct phy_device *phydev, long temp)
+static int m88e1510_set_temp_critical(struct phy_device *phydev, long temp)
{
+ int oldpage;
int ret;
mutex_lock(&phydev->lock);
- ret = phy_write(phydev, MII_M1145_PHY_EXT_ADDR_PAGE, 0x6);
- if (ret < 0)
- goto error;
+ oldpage = marvell_get_set_page(phydev, MII_MARVELL_MISC_TEST_PAGE);
+ if (oldpage < 0) {
+ mutex_unlock(&phydev->lock);
+ return oldpage;
+ }
ret = phy_read(phydev, MII_88E1121_MISC_TEST);
if (ret < 0)
@@ -1680,23 +1754,26 @@
(temp << MII_88E1510_MISC_TEST_TEMP_THRESHOLD_SHIFT));
error:
- phy_write(phydev, MII_M1145_PHY_EXT_ADDR_PAGE, 0x0);
+ marvell_set_page(phydev, oldpage);
mutex_unlock(&phydev->lock);
return ret;
}
-int m88e1510_get_temp_alarm(struct phy_device *phydev, long *alarm)
+static int m88e1510_get_temp_alarm(struct phy_device *phydev, long *alarm)
{
+ int oldpage;
int ret;
*alarm = false;
mutex_lock(&phydev->lock);
- ret = phy_write(phydev, MII_M1145_PHY_EXT_ADDR_PAGE, 0x6);
- if (ret < 0)
- goto error;
+ oldpage = marvell_get_set_page(phydev, MII_MARVELL_MISC_TEST_PAGE);
+ if (oldpage < 0) {
+ mutex_unlock(&phydev->lock);
+ return oldpage;
+ }
ret = phy_read(phydev, MII_88E1121_MISC_TEST);
if (ret < 0)
@@ -1704,7 +1781,7 @@
*alarm = !!(ret & MII_88E1510_MISC_TEST_TEMP_IRQ);
error:
- phy_write(phydev, MII_M1145_PHY_EXT_ADDR_PAGE, 0x0);
+ marvell_set_page(phydev, oldpage);
mutex_unlock(&phydev->lock);
return ret;
diff --git a/drivers/net/phy/marvell10g.c b/drivers/net/phy/marvell10g.c
new file mode 100644
index 0000000..aebc08b
--- /dev/null
+++ b/drivers/net/phy/marvell10g.c
@@ -0,0 +1,368 @@
+/*
+ * Marvell 10G 88x3310 PHY driver
+ *
+ * Based upon the ID registers, this PHY appears to be a mixture of IPs
+ * from two different companies.
+ *
+ * There appears to be several different data paths through the PHY which
+ * are automatically managed by the PHY. The following has been determined
+ * via observation and experimentation:
+ *
+ * SGMII PHYXS -- BASE-T PCS -- 10G PMA -- AN -- Copper (for <= 1G)
+ * 10GBASE-KR PHYXS -- BASE-T PCS -- 10G PMA -- AN -- Copper (for 10G)
+ * 10GBASE-KR PHYXS -- BASE-R PCS -- Fiber
+ *
+ * If both the fiber and copper ports are connected, the first to gain
+ * link takes priority and the other port is completely locked out.
+ */
+#include <linux/phy.h>
+
+enum {
+ MV_PCS_BASE_T = 0x0000,
+ MV_PCS_BASE_R = 0x1000,
+ MV_PCS_1000BASEX = 0x2000,
+
+ /* These registers appear at 0x800X and 0xa00X - the 0xa00X control
+ * registers appear to set themselves to the 0x800X when AN is
+ * restarted, but status registers appear readable from either.
+ */
+ MV_AN_CTRL1000 = 0x8000, /* 1000base-T control register */
+ MV_AN_STAT1000 = 0x8001, /* 1000base-T status register */
+
+ /* This register appears to reflect the copper status */
+ MV_AN_RESULT = 0xa016,
+ MV_AN_RESULT_SPD_10 = BIT(12),
+ MV_AN_RESULT_SPD_100 = BIT(13),
+ MV_AN_RESULT_SPD_1000 = BIT(14),
+ MV_AN_RESULT_SPD_10000 = BIT(15),
+};
+
+static int mv3310_modify(struct phy_device *phydev, int devad, u16 reg,
+ u16 mask, u16 bits)
+{
+ int old, val, ret;
+
+ old = phy_read_mmd(phydev, devad, reg);
+ if (old < 0)
+ return old;
+
+ val = (old & ~mask) | (bits & mask);
+ if (val == old)
+ return 0;
+
+ ret = phy_write_mmd(phydev, devad, reg, val);
+
+ return ret < 0 ? ret : 1;
+}
+
+static int mv3310_probe(struct phy_device *phydev)
+{
+ u32 mmd_mask = MDIO_DEVS_PMAPMD | MDIO_DEVS_AN;
+
+ if (!phydev->is_c45 ||
+ (phydev->c45_ids.devices_in_package & mmd_mask) != mmd_mask)
+ return -ENODEV;
+
+ return 0;
+}
+
+/*
+ * Resetting the MV88X3310 causes it to become non-responsive. Avoid
+ * setting the reset bit(s).
+ */
+static int mv3310_soft_reset(struct phy_device *phydev)
+{
+ return 0;
+}
+
+static int mv3310_config_init(struct phy_device *phydev)
+{
+ __ETHTOOL_DECLARE_LINK_MODE_MASK(supported) = { 0, };
+ u32 mask;
+ int val;
+
+ /* Check that the PHY interface type is compatible */
+ if (phydev->interface != PHY_INTERFACE_MODE_SGMII &&
+ phydev->interface != PHY_INTERFACE_MODE_XGMII &&
+ phydev->interface != PHY_INTERFACE_MODE_XAUI &&
+ phydev->interface != PHY_INTERFACE_MODE_RXAUI &&
+ phydev->interface != PHY_INTERFACE_MODE_10GKR)
+ return -ENODEV;
+
+ __set_bit(ETHTOOL_LINK_MODE_Pause_BIT, supported);
+ __set_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT, supported);
+
+ if (phydev->c45_ids.devices_in_package & MDIO_DEVS_AN) {
+ val = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_STAT1);
+ if (val < 0)
+ return val;
+
+ if (val & MDIO_AN_STAT1_ABLE)
+ __set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, supported);
+ }
+
+ val = phy_read_mmd(phydev, MDIO_MMD_PMAPMD, MDIO_STAT2);
+ if (val < 0)
+ return val;
+
+ /* Ethtool does not support the WAN mode bits */
+ if (val & (MDIO_PMA_STAT2_10GBSR | MDIO_PMA_STAT2_10GBLR |
+ MDIO_PMA_STAT2_10GBER | MDIO_PMA_STAT2_10GBLX4 |
+ MDIO_PMA_STAT2_10GBSW | MDIO_PMA_STAT2_10GBLW |
+ MDIO_PMA_STAT2_10GBEW))
+ __set_bit(ETHTOOL_LINK_MODE_FIBRE_BIT, supported);
+ if (val & MDIO_PMA_STAT2_10GBSR)
+ __set_bit(ETHTOOL_LINK_MODE_10000baseSR_Full_BIT, supported);
+ if (val & MDIO_PMA_STAT2_10GBLR)
+ __set_bit(ETHTOOL_LINK_MODE_10000baseLR_Full_BIT, supported);
+ if (val & MDIO_PMA_STAT2_10GBER)
+ __set_bit(ETHTOOL_LINK_MODE_10000baseER_Full_BIT, supported);
+
+ if (val & MDIO_PMA_STAT2_EXTABLE) {
+ val = phy_read_mmd(phydev, MDIO_MMD_PMAPMD, MDIO_PMA_EXTABLE);
+ if (val < 0)
+ return val;
+
+ if (val & (MDIO_PMA_EXTABLE_10GBT | MDIO_PMA_EXTABLE_1000BT |
+ MDIO_PMA_EXTABLE_100BTX | MDIO_PMA_EXTABLE_10BT))
+ __set_bit(ETHTOOL_LINK_MODE_TP_BIT, supported);
+ if (val & MDIO_PMA_EXTABLE_10GBLRM)
+ __set_bit(ETHTOOL_LINK_MODE_FIBRE_BIT, supported);
+ if (val & (MDIO_PMA_EXTABLE_10GBKX4 | MDIO_PMA_EXTABLE_10GBKR |
+ MDIO_PMA_EXTABLE_1000BKX))
+ __set_bit(ETHTOOL_LINK_MODE_Backplane_BIT, supported);
+ if (val & MDIO_PMA_EXTABLE_10GBLRM)
+ __set_bit(ETHTOOL_LINK_MODE_10000baseLRM_Full_BIT,
+ supported);
+ if (val & MDIO_PMA_EXTABLE_10GBT)
+ __set_bit(ETHTOOL_LINK_MODE_10000baseT_Full_BIT,
+ supported);
+ if (val & MDIO_PMA_EXTABLE_10GBKX4)
+ __set_bit(ETHTOOL_LINK_MODE_10000baseKX4_Full_BIT,
+ supported);
+ if (val & MDIO_PMA_EXTABLE_10GBKR)
+ __set_bit(ETHTOOL_LINK_MODE_10000baseKR_Full_BIT,
+ supported);
+ if (val & MDIO_PMA_EXTABLE_1000BT)
+ __set_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT,
+ supported);
+ if (val & MDIO_PMA_EXTABLE_1000BKX)
+ __set_bit(ETHTOOL_LINK_MODE_1000baseKX_Full_BIT,
+ supported);
+ if (val & MDIO_PMA_EXTABLE_100BTX)
+ __set_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT,
+ supported);
+ if (val & MDIO_PMA_EXTABLE_10BT)
+ __set_bit(ETHTOOL_LINK_MODE_10baseT_Full_BIT,
+ supported);
+ }
+
+ if (!ethtool_convert_link_mode_to_legacy_u32(&mask, supported))
+ dev_warn(&phydev->mdio.dev,
+ "PHY supports (%*pb) more modes than phylib supports, some modes not supported.\n",
+ __ETHTOOL_LINK_MODE_MASK_NBITS, supported);
+
+ phydev->supported &= mask;
+ phydev->advertising &= phydev->supported;
+
+ return 0;
+}
+
+static int mv3310_config_aneg(struct phy_device *phydev)
+{
+ bool changed = false;
+ u32 advertising;
+ int ret;
+
+ if (phydev->autoneg == AUTONEG_DISABLE) {
+ ret = genphy_c45_pma_setup_forced(phydev);
+ if (ret < 0)
+ return ret;
+
+ return genphy_c45_an_disable_aneg(phydev);
+ }
+
+ phydev->advertising &= phydev->supported;
+ advertising = phydev->advertising;
+
+ ret = mv3310_modify(phydev, MDIO_MMD_AN, MDIO_AN_ADVERTISE,
+ ADVERTISE_ALL | ADVERTISE_100BASE4 |
+ ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM,
+ ethtool_adv_to_mii_adv_t(advertising));
+ if (ret < 0)
+ return ret;
+ if (ret > 0)
+ changed = true;
+
+ ret = mv3310_modify(phydev, MDIO_MMD_AN, MV_AN_CTRL1000,
+ ADVERTISE_1000FULL | ADVERTISE_1000HALF,
+ ethtool_adv_to_mii_ctrl1000_t(advertising));
+ if (ret < 0)
+ return ret;
+ if (ret > 0)
+ changed = true;
+
+ /* 10G control register */
+ ret = mv3310_modify(phydev, MDIO_MMD_AN, MDIO_AN_10GBT_CTRL,
+ MDIO_AN_10GBT_CTRL_ADV10G,
+ advertising & ADVERTISED_10000baseT_Full ?
+ MDIO_AN_10GBT_CTRL_ADV10G : 0);
+ if (ret < 0)
+ return ret;
+ if (ret > 0)
+ changed = true;
+
+ if (changed)
+ ret = genphy_c45_restart_aneg(phydev);
+
+ return ret;
+}
+
+static int mv3310_aneg_done(struct phy_device *phydev)
+{
+ int val;
+
+ val = phy_read_mmd(phydev, MDIO_MMD_PCS, MV_PCS_BASE_R + MDIO_STAT1);
+ if (val < 0)
+ return val;
+
+ if (val & MDIO_STAT1_LSTATUS)
+ return 1;
+
+ return genphy_c45_aneg_done(phydev);
+}
+
+/* 10GBASE-ER,LR,LRM,SR do not support autonegotiation. */
+static int mv3310_read_10gbr_status(struct phy_device *phydev)
+{
+ phydev->link = 1;
+ phydev->speed = SPEED_10000;
+ phydev->duplex = DUPLEX_FULL;
+
+ if (phydev->interface == PHY_INTERFACE_MODE_SGMII)
+ phydev->interface = PHY_INTERFACE_MODE_10GKR;
+
+ return 0;
+}
+
+static int mv3310_read_status(struct phy_device *phydev)
+{
+ u32 mmd_mask = phydev->c45_ids.devices_in_package;
+ int val;
+
+ /* The vendor devads do not report link status. Avoid the PHYXS
+ * instance as there are three, and its status depends on the MAC
+ * being appropriately configured for the negotiated speed.
+ */
+ mmd_mask &= ~(BIT(MDIO_MMD_VEND1) | BIT(MDIO_MMD_VEND2) |
+ BIT(MDIO_MMD_PHYXS));
+
+ phydev->speed = SPEED_UNKNOWN;
+ phydev->duplex = DUPLEX_UNKNOWN;
+ phydev->lp_advertising = 0;
+ phydev->link = 0;
+ phydev->pause = 0;
+ phydev->asym_pause = 0;
+
+ val = phy_read_mmd(phydev, MDIO_MMD_PCS, MV_PCS_BASE_R + MDIO_STAT1);
+ if (val < 0)
+ return val;
+
+ if (val & MDIO_STAT1_LSTATUS)
+ return mv3310_read_10gbr_status(phydev);
+
+ val = genphy_c45_read_link(phydev, mmd_mask);
+ if (val < 0)
+ return val;
+
+ phydev->link = val > 0 ? 1 : 0;
+
+ val = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_STAT1);
+ if (val < 0)
+ return val;
+
+ if (val & MDIO_AN_STAT1_COMPLETE) {
+ val = genphy_c45_read_lpa(phydev);
+ if (val < 0)
+ return val;
+
+ /* Read the link partner's 1G advertisment */
+ val = phy_read_mmd(phydev, MDIO_MMD_AN, MV_AN_STAT1000);
+ if (val < 0)
+ return val;
+
+ phydev->lp_advertising |= mii_stat1000_to_ethtool_lpa_t(val);
+
+ if (phydev->autoneg == AUTONEG_ENABLE) {
+ val = phy_read_mmd(phydev, MDIO_MMD_AN, MV_AN_RESULT);
+ if (val < 0)
+ return val;
+
+ if (val & MV_AN_RESULT_SPD_10000)
+ phydev->speed = SPEED_10000;
+ else if (val & MV_AN_RESULT_SPD_1000)
+ phydev->speed = SPEED_1000;
+ else if (val & MV_AN_RESULT_SPD_100)
+ phydev->speed = SPEED_100;
+ else if (val & MV_AN_RESULT_SPD_10)
+ phydev->speed = SPEED_10;
+
+ phydev->duplex = DUPLEX_FULL;
+ }
+ }
+
+ if (phydev->autoneg != AUTONEG_ENABLE) {
+ val = genphy_c45_read_pma(phydev);
+ if (val < 0)
+ return val;
+ }
+
+ if ((phydev->interface == PHY_INTERFACE_MODE_SGMII ||
+ phydev->interface == PHY_INTERFACE_MODE_10GKR) && phydev->link) {
+ /* The PHY automatically switches its serdes interface (and
+ * active PHYXS instance) between Cisco SGMII and 10GBase-KR
+ * modes according to the speed. Florian suggests setting
+ * phydev->interface to communicate this to the MAC. Only do
+ * this if we are already in either SGMII or 10GBase-KR mode.
+ */
+ if (phydev->speed == SPEED_10000)
+ phydev->interface = PHY_INTERFACE_MODE_10GKR;
+ else if (phydev->speed >= SPEED_10 &&
+ phydev->speed < SPEED_10000)
+ phydev->interface = PHY_INTERFACE_MODE_SGMII;
+ }
+
+ return 0;
+}
+
+static struct phy_driver mv3310_drivers[] = {
+ {
+ .phy_id = 0x002b09aa,
+ .phy_id_mask = 0xffffffff,
+ .name = "mv88x3310",
+ .features = SUPPORTED_10baseT_Full |
+ SUPPORTED_100baseT_Full |
+ SUPPORTED_1000baseT_Full |
+ SUPPORTED_Autoneg |
+ SUPPORTED_TP |
+ SUPPORTED_FIBRE |
+ SUPPORTED_10000baseT_Full |
+ SUPPORTED_Backplane,
+ .probe = mv3310_probe,
+ .soft_reset = mv3310_soft_reset,
+ .config_init = mv3310_config_init,
+ .config_aneg = mv3310_config_aneg,
+ .aneg_done = mv3310_aneg_done,
+ .read_status = mv3310_read_status,
+ },
+};
+
+module_phy_driver(mv3310_drivers);
+
+static struct mdio_device_id __maybe_unused mv3310_tbl[] = {
+ { 0x002b09aa, 0xffffffff },
+ { },
+};
+MODULE_DEVICE_TABLE(mdio, mv3310_tbl);
+MODULE_DESCRIPTION("Marvell Alaska X 10Gigabit Ethernet PHY driver (MV88X3310)");
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/phy/mdio-mux.c b/drivers/net/phy/mdio-mux.c
index 599ce24..00755b6 100644
--- a/drivers/net/phy/mdio-mux.c
+++ b/drivers/net/phy/mdio-mux.c
@@ -133,29 +133,35 @@
ret_val = -ENODEV;
for_each_available_child_of_node(dev->of_node, child_bus_node) {
- u32 v;
+ int v;
- r = of_property_read_u32(child_bus_node, "reg", &v);
- if (r)
+ v = of_mdio_parse_addr(dev, child_bus_node);
+ if (v < 0) {
+ dev_err(dev,
+ "Error: Failed to find reg for child %s\n",
+ of_node_full_name(child_bus_node));
continue;
+ }
cb = devm_kzalloc(dev, sizeof(*cb), GFP_KERNEL);
if (cb == NULL) {
dev_err(dev,
- "Error: Failed to allocate memory for child\n");
+ "Error: Failed to allocate memory for child %s\n",
+ of_node_full_name(child_bus_node));
ret_val = -ENOMEM;
- of_node_put(child_bus_node);
- break;
+ continue;
}
cb->bus_number = v;
cb->parent = pb;
cb->mii_bus = mdiobus_alloc();
if (!cb->mii_bus) {
+ dev_err(dev,
+ "Error: Failed to allocate MDIO bus for child %s\n",
+ of_node_full_name(child_bus_node));
ret_val = -ENOMEM;
devm_kfree(dev, cb);
- of_node_put(child_bus_node);
- break;
+ continue;
}
cb->mii_bus->priv = cb;
@@ -167,6 +173,9 @@
cb->mii_bus->write = mdio_mux_write;
r = of_mdiobus_register(cb->mii_bus, child_bus_node);
if (r) {
+ dev_err(dev,
+ "Error: Failed to register MDIO bus for child %s\n",
+ of_node_full_name(child_bus_node));
mdiobus_free(cb->mii_bus);
devm_kfree(dev, cb);
} else {
@@ -180,6 +189,7 @@
return 0;
}
+ dev_err(dev, "Error: No acceptable child buses found\n");
devm_kfree(dev, pb);
err_pb_kz:
/* balance the reference of_mdio_find_bus() took */
diff --git a/drivers/net/phy/mdio-xgene.c b/drivers/net/phy/mdio-xgene.c
index 3e2ac07..bfd3090 100644
--- a/drivers/net/phy/mdio-xgene.c
+++ b/drivers/net/phy/mdio-xgene.c
@@ -34,76 +34,73 @@
static bool xgene_mdio_status;
-static u32 xgene_enet_rd_mac(void __iomem *base_addr, u32 rd_addr)
+u32 xgene_mdio_rd_mac(struct xgene_mdio_pdata *pdata, u32 rd_addr)
{
void __iomem *addr, *rd, *cmd, *cmd_done;
u32 done, rd_data = BUSY_MASK;
u8 wait = 10;
- addr = base_addr + MAC_ADDR_REG_OFFSET;
- rd = base_addr + MAC_READ_REG_OFFSET;
- cmd = base_addr + MAC_COMMAND_REG_OFFSET;
- cmd_done = base_addr + MAC_COMMAND_DONE_REG_OFFSET;
+ addr = pdata->mac_csr_addr + MAC_ADDR_REG_OFFSET;
+ rd = pdata->mac_csr_addr + MAC_READ_REG_OFFSET;
+ cmd = pdata->mac_csr_addr + MAC_COMMAND_REG_OFFSET;
+ cmd_done = pdata->mac_csr_addr + MAC_COMMAND_DONE_REG_OFFSET;
+ spin_lock(&pdata->mac_lock);
iowrite32(rd_addr, addr);
iowrite32(XGENE_ENET_RD_CMD, cmd);
- while (wait--) {
- done = ioread32(cmd_done);
- if (done)
- break;
+ while (!(done = ioread32(cmd_done)) && wait--)
udelay(1);
- }
- if (!done)
- return rd_data;
+ if (done)
+ rd_data = ioread32(rd);
- rd_data = ioread32(rd);
iowrite32(0, cmd);
+ spin_unlock(&pdata->mac_lock);
return rd_data;
}
+EXPORT_SYMBOL(xgene_mdio_rd_mac);
-static void xgene_enet_wr_mac(void __iomem *base_addr, u32 wr_addr, u32 wr_data)
+void xgene_mdio_wr_mac(struct xgene_mdio_pdata *pdata, u32 wr_addr, u32 data)
{
void __iomem *addr, *wr, *cmd, *cmd_done;
u8 wait = 10;
u32 done;
- addr = base_addr + MAC_ADDR_REG_OFFSET;
- wr = base_addr + MAC_WRITE_REG_OFFSET;
- cmd = base_addr + MAC_COMMAND_REG_OFFSET;
- cmd_done = base_addr + MAC_COMMAND_DONE_REG_OFFSET;
+ addr = pdata->mac_csr_addr + MAC_ADDR_REG_OFFSET;
+ wr = pdata->mac_csr_addr + MAC_WRITE_REG_OFFSET;
+ cmd = pdata->mac_csr_addr + MAC_COMMAND_REG_OFFSET;
+ cmd_done = pdata->mac_csr_addr + MAC_COMMAND_DONE_REG_OFFSET;
+ spin_lock(&pdata->mac_lock);
iowrite32(wr_addr, addr);
- iowrite32(wr_data, wr);
+ iowrite32(data, wr);
iowrite32(XGENE_ENET_WR_CMD, cmd);
- while (wait--) {
- done = ioread32(cmd_done);
- if (done)
- break;
+ while (!(done = ioread32(cmd_done)) && wait--)
udelay(1);
- }
if (!done)
pr_err("MCX mac write failed, addr: 0x%04x\n", wr_addr);
iowrite32(0, cmd);
+ spin_unlock(&pdata->mac_lock);
}
+EXPORT_SYMBOL(xgene_mdio_wr_mac);
int xgene_mdio_rgmii_read(struct mii_bus *bus, int phy_id, int reg)
{
- void __iomem *addr = (void __iomem *)bus->priv;
+ struct xgene_mdio_pdata *pdata = (struct xgene_mdio_pdata *)bus->priv;
u32 data, done;
u8 wait = 10;
data = SET_VAL(PHY_ADDR, phy_id) | SET_VAL(REG_ADDR, reg);
- xgene_enet_wr_mac(addr, MII_MGMT_ADDRESS_ADDR, data);
- xgene_enet_wr_mac(addr, MII_MGMT_COMMAND_ADDR, READ_CYCLE_MASK);
+ xgene_mdio_wr_mac(pdata, MII_MGMT_ADDRESS_ADDR, data);
+ xgene_mdio_wr_mac(pdata, MII_MGMT_COMMAND_ADDR, READ_CYCLE_MASK);
do {
usleep_range(5, 10);
- done = xgene_enet_rd_mac(addr, MII_MGMT_INDICATORS_ADDR);
+ done = xgene_mdio_rd_mac(pdata, MII_MGMT_INDICATORS_ADDR);
} while ((done & BUSY_MASK) && wait--);
if (done & BUSY_MASK) {
@@ -111,8 +108,8 @@
return -EBUSY;
}
- data = xgene_enet_rd_mac(addr, MII_MGMT_STATUS_ADDR);
- xgene_enet_wr_mac(addr, MII_MGMT_COMMAND_ADDR, 0);
+ data = xgene_mdio_rd_mac(pdata, MII_MGMT_STATUS_ADDR);
+ xgene_mdio_wr_mac(pdata, MII_MGMT_COMMAND_ADDR, 0);
return data;
}
@@ -120,17 +117,17 @@
int xgene_mdio_rgmii_write(struct mii_bus *bus, int phy_id, int reg, u16 data)
{
- void __iomem *addr = (void __iomem *)bus->priv;
+ struct xgene_mdio_pdata *pdata = (struct xgene_mdio_pdata *)bus->priv;
u32 val, done;
u8 wait = 10;
val = SET_VAL(PHY_ADDR, phy_id) | SET_VAL(REG_ADDR, reg);
- xgene_enet_wr_mac(addr, MII_MGMT_ADDRESS_ADDR, val);
+ xgene_mdio_wr_mac(pdata, MII_MGMT_ADDRESS_ADDR, val);
- xgene_enet_wr_mac(addr, MII_MGMT_CONTROL_ADDR, data);
+ xgene_mdio_wr_mac(pdata, MII_MGMT_CONTROL_ADDR, data);
do {
usleep_range(5, 10);
- done = xgene_enet_rd_mac(addr, MII_MGMT_INDICATORS_ADDR);
+ done = xgene_mdio_rd_mac(pdata, MII_MGMT_INDICATORS_ADDR);
} while ((done & BUSY_MASK) && wait--);
if (done & BUSY_MASK) {
@@ -174,8 +171,8 @@
static void xgene_gmac_reset(struct xgene_mdio_pdata *pdata)
{
- xgene_enet_wr_mac(pdata->mac_csr_addr, MAC_CONFIG_1_ADDR, SOFT_RESET);
- xgene_enet_wr_mac(pdata->mac_csr_addr, MAC_CONFIG_1_ADDR, 0);
+ xgene_mdio_wr_mac(pdata, MAC_CONFIG_1_ADDR, SOFT_RESET);
+ xgene_mdio_wr_mac(pdata, MAC_CONFIG_1_ADDR, 0);
}
static int xgene_mdio_reset(struct xgene_mdio_pdata *pdata)
@@ -375,6 +372,9 @@
pdata->mdio_csr_addr = csr_base + BLOCK_XG_MDIO_CSR_OFFSET;
pdata->diag_csr_addr = csr_base + BLOCK_DIAG_CSR_OFFSET;
+ if (mdio_id == XGENE_MDIO_RGMII)
+ spin_lock_init(&pdata->mac_lock);
+
if (dev->of_node) {
pdata->clk = devm_clk_get(dev, NULL);
if (IS_ERR(pdata->clk)) {
@@ -396,7 +396,7 @@
if (mdio_id == XGENE_MDIO_RGMII) {
mdio_bus->read = xgene_mdio_rgmii_read;
mdio_bus->write = xgene_mdio_rgmii_write;
- mdio_bus->priv = (void __force *)pdata->mac_csr_addr;
+ mdio_bus->priv = (void __force *)pdata;
snprintf(mdio_bus->id, MII_BUS_ID_SIZE, "%s",
"xgene-mii-rgmii");
} else {
diff --git a/drivers/net/phy/mdio-xgene.h b/drivers/net/phy/mdio-xgene.h
index 594a11d..3c85f3e 100644
--- a/drivers/net/phy/mdio-xgene.h
+++ b/drivers/net/phy/mdio-xgene.h
@@ -102,6 +102,7 @@
void __iomem *mdio_csr_addr;
struct mii_bus *mdio_bus;
int mdio_id;
+ spinlock_t mac_lock; /* mac lock */
};
/* Set the specified value into a bit-field defined by its starting position
@@ -132,6 +133,8 @@
#define GET_BIT(field, src) \
xgene_enet_get_field_value(field ## _POS, 1, src)
+u32 xgene_mdio_rd_mac(struct xgene_mdio_pdata *pdata, u32 rd_addr);
+void xgene_mdio_wr_mac(struct xgene_mdio_pdata *pdata, u32 wr_addr, u32 data);
int xgene_mdio_rgmii_read(struct mii_bus *bus, int phy_id, int reg);
int xgene_mdio_rgmii_write(struct mii_bus *bus, int phy_id, int reg, u16 data);
struct phy_device *xgene_enet_phy_register(struct mii_bus *bus, int phy_addr);
diff --git a/drivers/net/phy/mdio_bus.c b/drivers/net/phy/mdio_bus.c
index f99c21f..2df7b62c 100644
--- a/drivers/net/phy/mdio_bus.c
+++ b/drivers/net/phy/mdio_bus.c
@@ -263,21 +263,10 @@
for_each_available_child_of_node(bus->dev.of_node, child) {
int addr;
- int ret;
- ret = of_property_read_u32(child, "reg", &addr);
- if (ret < 0) {
- dev_err(dev, "%s has invalid MDIO address\n",
- child->full_name);
+ addr = of_mdio_parse_addr(dev, child);
+ if (addr < 0)
continue;
- }
-
- /* A MDIO device must have a reg property in the range [0-31] */
- if (addr >= PHY_MAX_ADDR) {
- dev_err(dev, "%s MDIO address %i is too large\n",
- child->full_name, addr);
- continue;
- }
if (addr == mdiodev->addr) {
dev->of_node = child;
@@ -364,33 +353,18 @@
mutex_init(&bus->mdio_lock);
- /* de-assert bus level PHY GPIO resets */
- if (bus->num_reset_gpios > 0) {
- bus->reset_gpiod = devm_kcalloc(&bus->dev,
- bus->num_reset_gpios,
- sizeof(struct gpio_desc *),
- GFP_KERNEL);
- if (!bus->reset_gpiod)
- return -ENOMEM;
- }
+ /* de-assert bus level PHY GPIO reset */
+ gpiod = devm_gpiod_get_optional(&bus->dev, "reset", GPIOD_OUT_LOW);
+ if (IS_ERR(gpiod)) {
+ dev_err(&bus->dev, "mii_bus %s couldn't get reset GPIO\n",
+ bus->id);
+ return PTR_ERR(gpiod);
+ } else if (gpiod) {
+ bus->reset_gpiod = gpiod;
- for (i = 0; i < bus->num_reset_gpios; i++) {
- gpiod = devm_gpiod_get_index(&bus->dev, "reset", i,
- GPIOD_OUT_LOW);
- if (IS_ERR(gpiod)) {
- err = PTR_ERR(gpiod);
- if (err != -ENOENT) {
- dev_err(&bus->dev,
- "mii_bus %s couldn't get reset GPIO\n",
- bus->id);
- return err;
- }
- } else {
- bus->reset_gpiod[i] = gpiod;
- gpiod_set_value_cansleep(gpiod, 1);
- udelay(bus->reset_delay_us);
- gpiod_set_value_cansleep(gpiod, 0);
- }
+ gpiod_set_value_cansleep(gpiod, 1);
+ udelay(bus->reset_delay_us);
+ gpiod_set_value_cansleep(gpiod, 0);
}
if (bus->reset)
@@ -425,10 +399,8 @@
}
/* Put PHYs in RESET to save power */
- for (i = 0; i < bus->num_reset_gpios; i++) {
- if (bus->reset_gpiod[i])
- gpiod_set_value_cansleep(bus->reset_gpiod[i], 1);
- }
+ if (bus->reset_gpiod)
+ gpiod_set_value_cansleep(bus->reset_gpiod, 1);
device_del(&bus->dev);
return err;
@@ -453,10 +425,8 @@
}
/* Put PHYs in RESET to save power */
- for (i = 0; i < bus->num_reset_gpios; i++) {
- if (bus->reset_gpiod[i])
- gpiod_set_value_cansleep(bus->reset_gpiod[i], 1);
- }
+ if (bus->reset_gpiod)
+ gpiod_set_value_cansleep(bus->reset_gpiod, 1);
device_del(&bus->dev);
}
diff --git a/drivers/net/phy/micrel.c b/drivers/net/phy/micrel.c
index b9252b8d8..9365b07 100644
--- a/drivers/net/phy/micrel.c
+++ b/drivers/net/phy/micrel.c
@@ -20,6 +20,7 @@
* ksz8081, ksz8091,
* ksz8061,
* Switch : ksz8873, ksz886x
+ * ksz9477
*/
#include <linux/kernel.h>
@@ -793,7 +794,7 @@
.phy_id_mask = MICREL_PHY_ID_MASK,
.name = "Micrel KS8737",
.features = PHY_BASIC_FEATURES,
- .flags = PHY_HAS_MAGICANEG | PHY_HAS_INTERRUPT,
+ .flags = PHY_HAS_INTERRUPT,
.driver_data = &ks8737_type,
.config_init = kszphy_config_init,
.config_aneg = genphy_config_aneg,
@@ -807,7 +808,7 @@
.phy_id_mask = 0x00ffffff,
.name = "Micrel KSZ8021 or KSZ8031",
.features = PHY_BASIC_FEATURES,
- .flags = PHY_HAS_MAGICANEG | PHY_HAS_INTERRUPT,
+ .flags = PHY_HAS_INTERRUPT,
.driver_data = &ksz8021_type,
.probe = kszphy_probe,
.config_init = kszphy_config_init,
@@ -825,7 +826,7 @@
.phy_id_mask = 0x00ffffff,
.name = "Micrel KSZ8031",
.features = PHY_BASIC_FEATURES,
- .flags = PHY_HAS_MAGICANEG | PHY_HAS_INTERRUPT,
+ .flags = PHY_HAS_INTERRUPT,
.driver_data = &ksz8021_type,
.probe = kszphy_probe,
.config_init = kszphy_config_init,
@@ -843,7 +844,7 @@
.phy_id_mask = MICREL_PHY_ID_MASK,
.name = "Micrel KSZ8041",
.features = PHY_BASIC_FEATURES,
- .flags = PHY_HAS_MAGICANEG | PHY_HAS_INTERRUPT,
+ .flags = PHY_HAS_INTERRUPT,
.driver_data = &ksz8041_type,
.probe = kszphy_probe,
.config_init = ksz8041_config_init,
@@ -861,7 +862,7 @@
.phy_id_mask = MICREL_PHY_ID_MASK,
.name = "Micrel KSZ8041RNLI",
.features = PHY_BASIC_FEATURES,
- .flags = PHY_HAS_MAGICANEG | PHY_HAS_INTERRUPT,
+ .flags = PHY_HAS_INTERRUPT,
.driver_data = &ksz8041_type,
.probe = kszphy_probe,
.config_init = kszphy_config_init,
@@ -879,7 +880,7 @@
.phy_id_mask = MICREL_PHY_ID_MASK,
.name = "Micrel KSZ8051",
.features = PHY_BASIC_FEATURES,
- .flags = PHY_HAS_MAGICANEG | PHY_HAS_INTERRUPT,
+ .flags = PHY_HAS_INTERRUPT,
.driver_data = &ksz8051_type,
.probe = kszphy_probe,
.config_init = kszphy_config_init,
@@ -897,7 +898,7 @@
.name = "Micrel KSZ8001 or KS8721",
.phy_id_mask = 0x00fffffc,
.features = PHY_BASIC_FEATURES,
- .flags = PHY_HAS_MAGICANEG | PHY_HAS_INTERRUPT,
+ .flags = PHY_HAS_INTERRUPT,
.driver_data = &ksz8041_type,
.probe = kszphy_probe,
.config_init = kszphy_config_init,
@@ -915,7 +916,7 @@
.name = "Micrel KSZ8081 or KSZ8091",
.phy_id_mask = MICREL_PHY_ID_MASK,
.features = PHY_BASIC_FEATURES,
- .flags = PHY_HAS_MAGICANEG | PHY_HAS_INTERRUPT,
+ .flags = PHY_HAS_INTERRUPT,
.driver_data = &ksz8081_type,
.probe = kszphy_probe,
.config_init = kszphy_config_init,
@@ -933,7 +934,7 @@
.name = "Micrel KSZ8061",
.phy_id_mask = MICREL_PHY_ID_MASK,
.features = PHY_BASIC_FEATURES,
- .flags = PHY_HAS_MAGICANEG | PHY_HAS_INTERRUPT,
+ .flags = PHY_HAS_INTERRUPT,
.config_init = kszphy_config_init,
.config_aneg = genphy_config_aneg,
.read_status = genphy_read_status,
@@ -946,7 +947,7 @@
.phy_id_mask = 0x000ffffe,
.name = "Micrel KSZ9021 Gigabit PHY",
.features = PHY_GBIT_FEATURES,
- .flags = PHY_HAS_MAGICANEG | PHY_HAS_INTERRUPT,
+ .flags = PHY_HAS_INTERRUPT,
.driver_data = &ksz9021_type,
.probe = kszphy_probe,
.config_init = ksz9021_config_init,
@@ -966,7 +967,7 @@
.phy_id_mask = MICREL_PHY_ID_MASK,
.name = "Micrel KSZ9031 Gigabit PHY",
.features = PHY_GBIT_FEATURES,
- .flags = PHY_HAS_MAGICANEG | PHY_HAS_INTERRUPT,
+ .flags = PHY_HAS_INTERRUPT,
.driver_data = &ksz9021_type,
.probe = kszphy_probe,
.config_init = ksz9031_config_init,
@@ -983,7 +984,6 @@
.phy_id = PHY_ID_KSZ8873MLL,
.phy_id_mask = MICREL_PHY_ID_MASK,
.name = "Micrel KSZ8873MLL Switch",
- .flags = PHY_HAS_MAGICANEG,
.config_init = kszphy_config_init,
.config_aneg = ksz8873mll_config_aneg,
.read_status = ksz8873mll_read_status,
@@ -994,7 +994,7 @@
.phy_id_mask = MICREL_PHY_ID_MASK,
.name = "Micrel KSZ886X Switch",
.features = PHY_BASIC_FEATURES,
- .flags = PHY_HAS_MAGICANEG | PHY_HAS_INTERRUPT,
+ .flags = PHY_HAS_INTERRUPT,
.config_init = kszphy_config_init,
.config_aneg = genphy_config_aneg,
.read_status = genphy_read_status,
@@ -1005,12 +1005,22 @@
.phy_id_mask = MICREL_PHY_ID_MASK,
.name = "Micrel KSZ8795",
.features = PHY_BASIC_FEATURES,
- .flags = PHY_HAS_MAGICANEG | PHY_HAS_INTERRUPT,
+ .flags = PHY_HAS_INTERRUPT,
.config_init = kszphy_config_init,
.config_aneg = ksz8873mll_config_aneg,
.read_status = ksz8873mll_read_status,
.suspend = genphy_suspend,
.resume = genphy_resume,
+}, {
+ .phy_id = PHY_ID_KSZ9477,
+ .phy_id_mask = MICREL_PHY_ID_MASK,
+ .name = "Microchip KSZ9477",
+ .features = PHY_GBIT_FEATURES,
+ .config_init = kszphy_config_init,
+ .config_aneg = genphy_config_aneg,
+ .read_status = genphy_read_status,
+ .suspend = genphy_suspend,
+ .resume = genphy_resume,
} };
module_phy_driver(ksphy_driver);
diff --git a/drivers/net/phy/microchip.c b/drivers/net/phy/microchip.c
index 2b2f543..37ee856 100644
--- a/drivers/net/phy/microchip.c
+++ b/drivers/net/phy/microchip.c
@@ -146,7 +146,7 @@
.name = "Microchip LAN88xx",
.features = PHY_GBIT_FEATURES,
- .flags = PHY_HAS_INTERRUPT | PHY_HAS_MAGICANEG,
+ .flags = PHY_HAS_INTERRUPT,
.probe = lan88xx_probe,
.remove = lan88xx_remove,
diff --git a/drivers/net/phy/phy-c45.c b/drivers/net/phy/phy-c45.c
new file mode 100644
index 0000000..dada819
--- /dev/null
+++ b/drivers/net/phy/phy-c45.c
@@ -0,0 +1,298 @@
+/*
+ * Clause 45 PHY support
+ */
+#include <linux/ethtool.h>
+#include <linux/export.h>
+#include <linux/mdio.h>
+#include <linux/mii.h>
+#include <linux/phy.h>
+
+/**
+ * genphy_c45_setup_forced - configures a forced speed
+ * @phydev: target phy_device struct
+ */
+int genphy_c45_pma_setup_forced(struct phy_device *phydev)
+{
+ int ctrl1, ctrl2, ret;
+
+ /* Half duplex is not supported */
+ if (phydev->duplex != DUPLEX_FULL)
+ return -EINVAL;
+
+ ctrl1 = phy_read_mmd(phydev, MDIO_MMD_PMAPMD, MDIO_CTRL1);
+ if (ctrl1 < 0)
+ return ctrl1;
+
+ ctrl2 = phy_read_mmd(phydev, MDIO_MMD_PMAPMD, MDIO_CTRL2);
+ if (ctrl2 < 0)
+ return ctrl2;
+
+ ctrl1 &= ~MDIO_CTRL1_SPEEDSEL;
+ /*
+ * PMA/PMD type selection is 1.7.5:0 not 1.7.3:0. See 45.2.1.6.1
+ * in 802.3-2012 and 802.3-2015.
+ */
+ ctrl2 &= ~(MDIO_PMA_CTRL2_TYPE | 0x30);
+
+ switch (phydev->speed) {
+ case SPEED_10:
+ ctrl2 |= MDIO_PMA_CTRL2_10BT;
+ break;
+ case SPEED_100:
+ ctrl1 |= MDIO_PMA_CTRL1_SPEED100;
+ ctrl2 |= MDIO_PMA_CTRL2_100BTX;
+ break;
+ case SPEED_1000:
+ ctrl1 |= MDIO_PMA_CTRL1_SPEED1000;
+ /* Assume 1000base-T */
+ ctrl2 |= MDIO_PMA_CTRL2_1000BT;
+ break;
+ case SPEED_10000:
+ ctrl1 |= MDIO_CTRL1_SPEED10G;
+ /* Assume 10Gbase-T */
+ ctrl2 |= MDIO_PMA_CTRL2_10GBT;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ ret = phy_write_mmd(phydev, MDIO_MMD_PMAPMD, MDIO_CTRL1, ctrl1);
+ if (ret < 0)
+ return ret;
+
+ return phy_write_mmd(phydev, MDIO_MMD_PMAPMD, MDIO_CTRL2, ctrl2);
+}
+EXPORT_SYMBOL_GPL(genphy_c45_pma_setup_forced);
+
+/**
+ * genphy_c45_an_disable_aneg - disable auto-negotiation
+ * @phydev: target phy_device struct
+ *
+ * Disable auto-negotiation in the Clause 45 PHY. The link parameters
+ * parameters are controlled through the PMA/PMD MMD registers.
+ *
+ * Returns zero on success, negative errno code on failure.
+ */
+int genphy_c45_an_disable_aneg(struct phy_device *phydev)
+{
+ int val;
+
+ val = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_CTRL1);
+ if (val < 0)
+ return val;
+
+ val &= ~(MDIO_AN_CTRL1_ENABLE | MDIO_AN_CTRL1_RESTART);
+
+ return phy_write_mmd(phydev, MDIO_MMD_AN, MDIO_CTRL1, val);
+}
+EXPORT_SYMBOL_GPL(genphy_c45_an_disable_aneg);
+
+/**
+ * genphy_c45_restart_aneg - Enable and restart auto-negotiation
+ * @phydev: target phy_device struct
+ *
+ * This assumes that the auto-negotiation MMD is present.
+ *
+ * Enable and restart auto-negotiation.
+ */
+int genphy_c45_restart_aneg(struct phy_device *phydev)
+{
+ int val;
+
+ val = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_CTRL1);
+ if (val < 0)
+ return val;
+
+ val |= MDIO_AN_CTRL1_ENABLE | MDIO_AN_CTRL1_RESTART;
+
+ return phy_write_mmd(phydev, MDIO_MMD_AN, MDIO_CTRL1, val);
+}
+EXPORT_SYMBOL_GPL(genphy_c45_restart_aneg);
+
+/**
+ * genphy_c45_aneg_done - return auto-negotiation complete status
+ * @phydev: target phy_device struct
+ *
+ * This assumes that the auto-negotiation MMD is present.
+ *
+ * Reads the status register from the auto-negotiation MMD, returning:
+ * - positive if auto-negotiation is complete
+ * - negative errno code on error
+ * - zero otherwise
+ */
+int genphy_c45_aneg_done(struct phy_device *phydev)
+{
+ int val = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_STAT1);
+
+ return val < 0 ? val : val & MDIO_AN_STAT1_COMPLETE ? 1 : 0;
+}
+EXPORT_SYMBOL_GPL(genphy_c45_aneg_done);
+
+/**
+ * genphy_c45_read_link - read the overall link status from the MMDs
+ * @phydev: target phy_device struct
+ * @mmd_mask: MMDs to read status from
+ *
+ * Read the link status from the specified MMDs, and if they all indicate
+ * that the link is up, return positive. If an error is encountered,
+ * a negative errno will be returned, otherwise zero.
+ */
+int genphy_c45_read_link(struct phy_device *phydev, u32 mmd_mask)
+{
+ int val, devad;
+ bool link = true;
+
+ while (mmd_mask) {
+ devad = __ffs(mmd_mask);
+ mmd_mask &= ~BIT(devad);
+
+ /* The link state is latched low so that momentary link
+ * drops can be detected. Do not double-read the status
+ * register if the link is down.
+ */
+ val = phy_read_mmd(phydev, devad, MDIO_STAT1);
+ if (val < 0)
+ return val;
+
+ if (!(val & MDIO_STAT1_LSTATUS))
+ link = false;
+ }
+
+ return link;
+}
+EXPORT_SYMBOL_GPL(genphy_c45_read_link);
+
+/**
+ * genphy_c45_read_lpa - read the link partner advertisment and pause
+ * @phydev: target phy_device struct
+ *
+ * Read the Clause 45 defined base (7.19) and 10G (7.33) status registers,
+ * filling in the link partner advertisment, pause and asym_pause members
+ * in @phydev. This assumes that the auto-negotiation MMD is present, and
+ * the backplane bit (7.48.0) is clear. Clause 45 PHY drivers are expected
+ * to fill in the remainder of the link partner advert from vendor registers.
+ */
+int genphy_c45_read_lpa(struct phy_device *phydev)
+{
+ int val;
+
+ /* Read the link partner's base page advertisment */
+ val = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_LPA);
+ if (val < 0)
+ return val;
+
+ phydev->lp_advertising = mii_lpa_to_ethtool_lpa_t(val);
+ phydev->pause = val & LPA_PAUSE_CAP ? 1 : 0;
+ phydev->asym_pause = val & LPA_PAUSE_ASYM ? 1 : 0;
+
+ /* Read the link partner's 10G advertisment */
+ val = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_10GBT_STAT);
+ if (val < 0)
+ return val;
+
+ if (val & MDIO_AN_10GBT_STAT_LP10G)
+ phydev->lp_advertising |= ADVERTISED_10000baseT_Full;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(genphy_c45_read_lpa);
+
+/**
+ * genphy_c45_read_pma - read link speed etc from PMA
+ * @phydev: target phy_device struct
+ */
+int genphy_c45_read_pma(struct phy_device *phydev)
+{
+ int val;
+
+ val = phy_read_mmd(phydev, MDIO_MMD_PMAPMD, MDIO_CTRL1);
+ if (val < 0)
+ return val;
+
+ switch (val & MDIO_CTRL1_SPEEDSEL) {
+ case 0:
+ phydev->speed = SPEED_10;
+ break;
+ case MDIO_PMA_CTRL1_SPEED100:
+ phydev->speed = SPEED_100;
+ break;
+ case MDIO_PMA_CTRL1_SPEED1000:
+ phydev->speed = SPEED_1000;
+ break;
+ case MDIO_CTRL1_SPEED10G:
+ phydev->speed = SPEED_10000;
+ break;
+ default:
+ phydev->speed = SPEED_UNKNOWN;
+ break;
+ }
+
+ phydev->duplex = DUPLEX_FULL;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(genphy_c45_read_pma);
+
+/* The gen10g_* functions are the old Clause 45 stub */
+
+static int gen10g_config_aneg(struct phy_device *phydev)
+{
+ return 0;
+}
+
+static int gen10g_read_status(struct phy_device *phydev)
+{
+ u32 mmd_mask = phydev->c45_ids.devices_in_package;
+ int ret;
+
+ /* For now just lie and say it's 10G all the time */
+ phydev->speed = SPEED_10000;
+ phydev->duplex = DUPLEX_FULL;
+
+ /* Avoid reading the vendor MMDs */
+ mmd_mask &= ~(BIT(MDIO_MMD_VEND1) | BIT(MDIO_MMD_VEND2));
+
+ ret = genphy_c45_read_link(phydev, mmd_mask);
+
+ phydev->link = ret > 0 ? 1 : 0;
+
+ return 0;
+}
+
+static int gen10g_soft_reset(struct phy_device *phydev)
+{
+ /* Do nothing for now */
+ return 0;
+}
+
+static int gen10g_config_init(struct phy_device *phydev)
+{
+ /* Temporarily just say we support everything */
+ phydev->supported = SUPPORTED_10000baseT_Full;
+ phydev->advertising = SUPPORTED_10000baseT_Full;
+
+ return 0;
+}
+
+static int gen10g_suspend(struct phy_device *phydev)
+{
+ return 0;
+}
+
+static int gen10g_resume(struct phy_device *phydev)
+{
+ return 0;
+}
+
+struct phy_driver genphy_10g_driver = {
+ .phy_id = 0xffffffff,
+ .phy_id_mask = 0xffffffff,
+ .name = "Generic 10G PHY",
+ .soft_reset = gen10g_soft_reset,
+ .config_init = gen10g_config_init,
+ .features = 0,
+ .config_aneg = gen10g_config_aneg,
+ .read_status = gen10g_read_status,
+ .suspend = gen10g_suspend,
+ .resume = gen10g_resume,
+};
diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c
index eebb0e1..a9dc366 100644
--- a/drivers/net/phy/phy.c
+++ b/drivers/net/phy/phy.c
@@ -151,6 +151,25 @@
return 0;
}
+/**
+ * phy_restart_aneg - restart auto-negotiation
+ * @phydev: target phy_device struct
+ *
+ * Restart the autonegotiation on @phydev. Returns >= 0 on success or
+ * negative errno on error.
+ */
+int phy_restart_aneg(struct phy_device *phydev)
+{
+ int ret;
+
+ if (phydev->is_c45 && !(phydev->c45_ids.devices_in_package & BIT(0)))
+ ret = genphy_c45_restart_aneg(phydev);
+ else
+ ret = genphy_restart_aneg(phydev);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(phy_restart_aneg);
/**
* phy_aneg_done - return auto-negotiation status
@@ -165,6 +184,12 @@
if (phydev->drv && phydev->drv->aneg_done)
return phydev->drv->aneg_done(phydev);
+ /* Avoid genphy_aneg_done() if the Clause 45 PHY does not
+ * implement Clause 22 registers
+ */
+ if (phydev->is_c45 && !(phydev->c45_ids.devices_in_package & BIT(0)))
+ return -EINVAL;
+
return genphy_aneg_done(phydev);
}
EXPORT_SYMBOL(phy_aneg_done);
@@ -486,32 +511,8 @@
}
EXPORT_SYMBOL(phy_ethtool_ksettings_set);
-int phy_ethtool_gset(struct phy_device *phydev, struct ethtool_cmd *cmd)
-{
- cmd->supported = phydev->supported;
-
- cmd->advertising = phydev->advertising;
- cmd->lp_advertising = phydev->lp_advertising;
-
- ethtool_cmd_speed_set(cmd, phydev->speed);
- cmd->duplex = phydev->duplex;
- if (phydev->interface == PHY_INTERFACE_MODE_MOCA)
- cmd->port = PORT_BNC;
- else
- cmd->port = PORT_MII;
- cmd->phy_address = phydev->mdio.addr;
- cmd->transceiver = phy_is_internal(phydev) ?
- XCVR_INTERNAL : XCVR_EXTERNAL;
- cmd->autoneg = phydev->autoneg;
- cmd->eth_tp_mdix_ctrl = phydev->mdix_ctrl;
- cmd->eth_tp_mdix = phydev->mdix;
-
- return 0;
-}
-EXPORT_SYMBOL(phy_ethtool_gset);
-
-int phy_ethtool_ksettings_get(struct phy_device *phydev,
- struct ethtool_link_ksettings *cmd)
+void phy_ethtool_ksettings_get(struct phy_device *phydev,
+ struct ethtool_link_ksettings *cmd)
{
ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
phydev->supported);
@@ -533,8 +534,6 @@
cmd->base.autoneg = phydev->autoneg;
cmd->base.eth_tp_mdix_ctrl = phydev->mdix_ctrl;
cmd->base.eth_tp_mdix = phydev->mdix;
-
- return 0;
}
EXPORT_SYMBOL(phy_ethtool_ksettings_get);
@@ -1417,7 +1416,7 @@
/* Restart autonegotiation so the new modes get sent to the
* link partner.
*/
- ret = genphy_restart_aneg(phydev);
+ ret = phy_restart_aneg(phydev);
if (ret < 0)
return ret;
}
@@ -1450,7 +1449,9 @@
if (!phydev)
return -ENODEV;
- return phy_ethtool_ksettings_get(phydev, cmd);
+ phy_ethtool_ksettings_get(phydev, cmd);
+
+ return 0;
}
EXPORT_SYMBOL(phy_ethtool_get_link_ksettings);
@@ -1476,6 +1477,6 @@
if (!phydev->drv)
return -EIO;
- return genphy_restart_aneg(phydev);
+ return phy_restart_aneg(phydev);
}
EXPORT_SYMBOL(phy_ethtool_nway_reset);
diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c
index 1219eea..acf00f0 100644
--- a/drivers/net/phy/phy_device.c
+++ b/drivers/net/phy/phy_device.c
@@ -69,13 +69,8 @@
phy_device_remove(phydev);
}
-enum genphy_driver {
- GENPHY_DRV_1G,
- GENPHY_DRV_10G,
- GENPHY_DRV_MAX
-};
-
-static struct phy_driver genphy_driver[GENPHY_DRV_MAX];
+static struct phy_driver genphy_driver;
+extern struct phy_driver genphy_10g_driver;
static LIST_HEAD(phy_fixup_list);
static DEFINE_MUTEX(phy_fixup_lock);
@@ -928,11 +923,9 @@
*/
if (!d->driver) {
if (phydev->is_c45)
- d->driver =
- &genphy_driver[GENPHY_DRV_10G].mdiodrv.driver;
+ d->driver = &genphy_10g_driver.mdiodrv.driver;
else
- d->driver =
- &genphy_driver[GENPHY_DRV_1G].mdiodrv.driver;
+ d->driver = &genphy_driver.mdiodrv.driver;
using_genphy = true;
}
@@ -961,6 +954,27 @@
phydev->attached_dev = dev;
dev->phydev = phydev;
+ /* Some Ethernet drivers try to connect to a PHY device before
+ * calling register_netdevice() -> netdev_register_kobject() and
+ * does the dev->dev.kobj initialization. Here we only check for
+ * success which indicates that the network device kobject is
+ * ready. Once we do that we still need to keep track of whether
+ * links were successfully set up or not for phy_detach() to
+ * remove them accordingly.
+ */
+ phydev->sysfs_links = false;
+
+ err = sysfs_create_link(&phydev->mdio.dev.kobj, &dev->dev.kobj,
+ "attached_dev");
+ if (!err) {
+ err = sysfs_create_link(&dev->dev.kobj, &phydev->mdio.dev.kobj,
+ "phydev");
+ if (err)
+ goto error;
+
+ phydev->sysfs_links = true;
+ }
+
phydev->dev_flags = flags;
phydev->interface = interface;
@@ -1048,8 +1062,11 @@
struct net_device *dev = phydev->attached_dev;
struct module *ndev_owner = dev->dev.parent->driver->owner;
struct mii_bus *bus;
- int i;
+ if (phydev->sysfs_links) {
+ sysfs_remove_link(&dev->dev.kobj, "phydev");
+ sysfs_remove_link(&phydev->mdio.dev.kobj, "attached_dev");
+ }
phydev->attached_dev->phydev = NULL;
phydev->attached_dev = NULL;
phy_suspend(phydev);
@@ -1063,13 +1080,9 @@
* from the generic driver so that there's a chance a
* real driver could be loaded
*/
- for (i = 0; i < ARRAY_SIZE(genphy_driver); i++) {
- if (phydev->mdio.dev.driver ==
- &genphy_driver[i].mdiodrv.driver) {
- device_release_driver(&phydev->mdio.dev);
- break;
- }
- }
+ if (phydev->mdio.dev.driver == &genphy_10g_driver.mdiodrv.driver ||
+ phydev->mdio.dev.driver == &genphy_driver.mdiodrv.driver)
+ device_release_driver(&phydev->mdio.dev);
/*
* The phydev might go away on the put_device() below, so avoid
@@ -1343,11 +1356,6 @@
}
EXPORT_SYMBOL(genphy_aneg_done);
-static int gen10g_config_aneg(struct phy_device *phydev)
-{
- return 0;
-}
-
/**
* genphy_update_link - update link status in @phydev
* @phydev: target phy_device struct
@@ -1481,33 +1489,6 @@
}
EXPORT_SYMBOL(genphy_read_status);
-static int gen10g_read_status(struct phy_device *phydev)
-{
- int devad, reg;
- u32 mmd_mask = phydev->c45_ids.devices_in_package;
-
- phydev->link = 1;
-
- /* For now just lie and say it's 10G all the time */
- phydev->speed = SPEED_10000;
- phydev->duplex = DUPLEX_FULL;
-
- for (devad = 0; mmd_mask; devad++, mmd_mask = mmd_mask >> 1) {
- if (!(mmd_mask & 1))
- continue;
-
- /* Read twice because link state is latched and a
- * read moves the current state into the register
- */
- phy_read_mmd(phydev, devad, MDIO_STAT1);
- reg = phy_read_mmd(phydev, devad, MDIO_STAT1);
- if (reg < 0 || !(reg & MDIO_STAT1_LSTATUS))
- phydev->link = 0;
- }
-
- return 0;
-}
-
/**
* genphy_soft_reset - software reset the PHY via BMCR_RESET bit
* @phydev: target phy_device struct
@@ -1571,23 +1552,8 @@
return 0;
}
-
-static int gen10g_soft_reset(struct phy_device *phydev)
-{
- /* Do nothing for now */
- return 0;
-}
EXPORT_SYMBOL(genphy_config_init);
-static int gen10g_config_init(struct phy_device *phydev)
-{
- /* Temporarily just say we support everything */
- phydev->supported = SUPPORTED_10000baseT_Full;
- phydev->advertising = SUPPORTED_10000baseT_Full;
-
- return 0;
-}
-
int genphy_suspend(struct phy_device *phydev)
{
int value;
@@ -1603,11 +1569,6 @@
}
EXPORT_SYMBOL(genphy_suspend);
-static int gen10g_suspend(struct phy_device *phydev)
-{
- return 0;
-}
-
int genphy_resume(struct phy_device *phydev)
{
int value;
@@ -1623,11 +1584,6 @@
}
EXPORT_SYMBOL(genphy_resume);
-static int gen10g_resume(struct phy_device *phydev)
-{
- return 0;
-}
-
static int __set_phy_supported(struct phy_device *phydev, u32 max_speed)
{
/* The default values for phydev->supported are provided by the PHY
@@ -1859,8 +1815,7 @@
}
EXPORT_SYMBOL(phy_drivers_unregister);
-static struct phy_driver genphy_driver[] = {
-{
+static struct phy_driver genphy_driver = {
.phy_id = 0xffffffff,
.phy_id_mask = 0xffffffff,
.name = "Generic PHY",
@@ -1874,18 +1829,7 @@
.read_status = genphy_read_status,
.suspend = genphy_suspend,
.resume = genphy_resume,
-}, {
- .phy_id = 0xffffffff,
- .phy_id_mask = 0xffffffff,
- .name = "Generic 10G PHY",
- .soft_reset = gen10g_soft_reset,
- .config_init = gen10g_config_init,
- .features = 0,
- .config_aneg = gen10g_config_aneg,
- .read_status = gen10g_read_status,
- .suspend = gen10g_suspend,
- .resume = gen10g_resume,
-} };
+};
static int __init phy_init(void)
{
@@ -1895,18 +1839,24 @@
if (rc)
return rc;
- rc = phy_drivers_register(genphy_driver,
- ARRAY_SIZE(genphy_driver), THIS_MODULE);
+ rc = phy_driver_register(&genphy_10g_driver, THIS_MODULE);
if (rc)
+ goto err_10g;
+
+ rc = phy_driver_register(&genphy_driver, THIS_MODULE);
+ if (rc) {
+ phy_driver_unregister(&genphy_10g_driver);
+err_10g:
mdio_bus_exit();
+ }
return rc;
}
static void __exit phy_exit(void)
{
- phy_drivers_unregister(genphy_driver,
- ARRAY_SIZE(genphy_driver));
+ phy_driver_unregister(&genphy_10g_driver);
+ phy_driver_unregister(&genphy_driver);
mdio_bus_exit();
}
diff --git a/drivers/net/phy/smsc.c b/drivers/net/phy/smsc.c
index cef6967..1b8204b 100644
--- a/drivers/net/phy/smsc.c
+++ b/drivers/net/phy/smsc.c
@@ -25,6 +25,16 @@
#include <linux/netdevice.h>
#include <linux/smscphy.h>
+struct smsc_hw_stat {
+ const char *string;
+ u8 reg;
+ u8 bits;
+};
+
+static struct smsc_hw_stat smsc_hw_stats[] = {
+ { "phy_symbol_errors", 26, 16},
+};
+
struct smsc_phy_priv {
bool energy_enable;
};
@@ -143,6 +153,48 @@
return err;
}
+static int smsc_get_sset_count(struct phy_device *phydev)
+{
+ return ARRAY_SIZE(smsc_hw_stats);
+}
+
+static void smsc_get_strings(struct phy_device *phydev, u8 *data)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(smsc_hw_stats); i++) {
+ memcpy(data + i * ETH_GSTRING_LEN,
+ smsc_hw_stats[i].string, ETH_GSTRING_LEN);
+ }
+}
+
+#ifndef UINT64_MAX
+#define UINT64_MAX (u64)(~((u64)0))
+#endif
+static u64 smsc_get_stat(struct phy_device *phydev, int i)
+{
+ struct smsc_hw_stat stat = smsc_hw_stats[i];
+ int val;
+ u64 ret;
+
+ val = phy_read(phydev, stat.reg);
+ if (val < 0)
+ ret = UINT64_MAX;
+ else
+ ret = val;
+
+ return ret;
+}
+
+static void smsc_get_stats(struct phy_device *phydev,
+ struct ethtool_stats *stats, u64 *data)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(smsc_hw_stats); i++)
+ data[i] = smsc_get_stat(phydev, i);
+}
+
static int smsc_phy_probe(struct phy_device *phydev)
{
struct device *dev = &phydev->mdio.dev;
@@ -170,7 +222,7 @@
.name = "SMSC LAN83C185",
.features = PHY_BASIC_FEATURES,
- .flags = PHY_HAS_INTERRUPT | PHY_HAS_MAGICANEG,
+ .flags = PHY_HAS_INTERRUPT,
.probe = smsc_phy_probe,
@@ -192,7 +244,7 @@
.name = "SMSC LAN8187",
.features = PHY_BASIC_FEATURES,
- .flags = PHY_HAS_INTERRUPT | PHY_HAS_MAGICANEG,
+ .flags = PHY_HAS_INTERRUPT,
.probe = smsc_phy_probe,
@@ -206,6 +258,11 @@
.ack_interrupt = smsc_phy_ack_interrupt,
.config_intr = smsc_phy_config_intr,
+ /* Statistics */
+ .get_sset_count = smsc_get_sset_count,
+ .get_strings = smsc_get_strings,
+ .get_stats = smsc_get_stats,
+
.suspend = genphy_suspend,
.resume = genphy_resume,
}, {
@@ -214,7 +271,7 @@
.name = "SMSC LAN8700",
.features = PHY_BASIC_FEATURES,
- .flags = PHY_HAS_INTERRUPT | PHY_HAS_MAGICANEG,
+ .flags = PHY_HAS_INTERRUPT,
.probe = smsc_phy_probe,
@@ -228,6 +285,11 @@
.ack_interrupt = smsc_phy_ack_interrupt,
.config_intr = smsc_phy_config_intr,
+ /* Statistics */
+ .get_sset_count = smsc_get_sset_count,
+ .get_strings = smsc_get_strings,
+ .get_stats = smsc_get_stats,
+
.suspend = genphy_suspend,
.resume = genphy_resume,
}, {
@@ -236,7 +298,7 @@
.name = "SMSC LAN911x Internal PHY",
.features = PHY_BASIC_FEATURES,
- .flags = PHY_HAS_INTERRUPT | PHY_HAS_MAGICANEG,
+ .flags = PHY_HAS_INTERRUPT,
.probe = smsc_phy_probe,
@@ -257,7 +319,7 @@
.name = "SMSC LAN8710/LAN8720",
.features = PHY_BASIC_FEATURES,
- .flags = PHY_HAS_INTERRUPT | PHY_HAS_MAGICANEG,
+ .flags = PHY_HAS_INTERRUPT,
.probe = smsc_phy_probe,
@@ -271,6 +333,11 @@
.ack_interrupt = smsc_phy_ack_interrupt,
.config_intr = smsc_phy_config_intr,
+ /* Statistics */
+ .get_sset_count = smsc_get_sset_count,
+ .get_strings = smsc_get_strings,
+ .get_stats = smsc_get_stats,
+
.suspend = genphy_suspend,
.resume = genphy_resume,
}, {
@@ -279,7 +346,7 @@
.name = "SMSC LAN8740",
.features = PHY_BASIC_FEATURES,
- .flags = PHY_HAS_INTERRUPT | PHY_HAS_MAGICANEG,
+ .flags = PHY_HAS_INTERRUPT,
.probe = smsc_phy_probe,
@@ -293,6 +360,11 @@
.ack_interrupt = smsc_phy_ack_interrupt,
.config_intr = smsc_phy_config_intr,
+ /* Statistics */
+ .get_sset_count = smsc_get_sset_count,
+ .get_strings = smsc_get_strings,
+ .get_stats = smsc_get_stats,
+
.suspend = genphy_suspend,
.resume = genphy_resume,
} };
diff --git a/drivers/net/ppp/ppp_async.c b/drivers/net/ppp/ppp_async.c
index feb9569..814fd8f 100644
--- a/drivers/net/ppp/ppp_async.c
+++ b/drivers/net/ppp/ppp_async.c
@@ -802,7 +802,7 @@
proto = p[0];
if (proto & 1) {
/* protocol is compressed */
- skb_push(skb, 1)[0] = 0;
+ *(u8 *)skb_push(skb, 1) = 0;
} else {
if (skb->len < 2)
goto err;
@@ -894,8 +894,7 @@
/* packet overflowed MRU */
ap->state |= SC_TOSS;
} else {
- sp = skb_put(skb, n);
- memcpy(sp, buf, n);
+ sp = skb_put_data(skb, buf, n);
if (ap->state & SC_ESCAPE) {
sp[0] ^= PPP_TRANS;
ap->state &= ~SC_ESCAPE;
diff --git a/drivers/net/ppp/ppp_generic.c b/drivers/net/ppp/ppp_generic.c
index f9c0e62..d42091f 100644
--- a/drivers/net/ppp/ppp_generic.c
+++ b/drivers/net/ppp/ppp_generic.c
@@ -1490,7 +1490,7 @@
/* check if we should pass this packet */
/* the filter instructions are constructed assuming
a four-byte PPP header on each packet */
- *skb_push(skb, 2) = 1;
+ *(u8 *)skb_push(skb, 2) = 1;
if (ppp->pass_filter &&
BPF_PROG_RUN(ppp->pass_filter, skb) == 0) {
if (ppp->debug & 1)
@@ -1618,7 +1618,7 @@
list = list->next;
pch = list_entry(list, struct channel, clist);
- spin_lock_bh(&pch->downl);
+ spin_lock(&pch->downl);
if (pch->chan) {
if (pch->chan->ops->start_xmit(pch->chan, skb))
ppp->xmit_pending = NULL;
@@ -1627,7 +1627,7 @@
kfree_skb(skb);
ppp->xmit_pending = NULL;
}
- spin_unlock_bh(&pch->downl);
+ spin_unlock(&pch->downl);
return;
}
@@ -1757,7 +1757,7 @@
}
/* check the channel's mtu and whether it is still attached. */
- spin_lock_bh(&pch->downl);
+ spin_lock(&pch->downl);
if (pch->chan == NULL) {
/* can't use this channel, it's being deregistered */
if (pch->speed == 0)
@@ -1765,7 +1765,7 @@
else
totspeed -= pch->speed;
- spin_unlock_bh(&pch->downl);
+ spin_unlock(&pch->downl);
pch->avail = 0;
totlen = len;
totfree--;
@@ -1816,7 +1816,7 @@
*/
if (flen <= 0) {
pch->avail = 2;
- spin_unlock_bh(&pch->downl);
+ spin_unlock(&pch->downl);
continue;
}
@@ -1861,14 +1861,14 @@
len -= flen;
++ppp->nxseq;
bits = 0;
- spin_unlock_bh(&pch->downl);
+ spin_unlock(&pch->downl);
}
ppp->nxchan = i;
return 1;
noskb:
- spin_unlock_bh(&pch->downl);
+ spin_unlock(&pch->downl);
if (ppp->debug & 1)
netdev_err(ppp->dev, "PPP: no memory (fragment)\n");
++ppp->dev->stats.tx_errors;
@@ -1883,7 +1883,7 @@
struct sk_buff *skb;
struct ppp *ppp;
- spin_lock_bh(&pch->downl);
+ spin_lock(&pch->downl);
if (pch->chan) {
while (!skb_queue_empty(&pch->file.xq)) {
skb = skb_dequeue(&pch->file.xq);
@@ -1897,14 +1897,14 @@
/* channel got deregistered */
skb_queue_purge(&pch->file.xq);
}
- spin_unlock_bh(&pch->downl);
+ spin_unlock(&pch->downl);
/* see if there is anything from the attached unit to be sent */
if (skb_queue_empty(&pch->file.xq)) {
- read_lock_bh(&pch->upl);
+ read_lock(&pch->upl);
ppp = pch->ppp;
if (ppp)
__ppp_xmit_process(ppp);
- read_unlock_bh(&pch->upl);
+ read_unlock(&pch->upl);
}
}
@@ -2133,7 +2133,7 @@
if (skb_unclone(skb, GFP_ATOMIC))
goto err;
- *skb_push(skb, 2) = 0;
+ *(u8 *)skb_push(skb, 2) = 0;
if (ppp->pass_filter &&
BPF_PROG_RUN(ppp->pass_filter, skb) == 0) {
if (ppp->debug & 1)
@@ -2267,7 +2267,7 @@
* Do protocol ID decompression on the first fragment of each packet.
*/
if ((PPP_MP_CB(skb)->BEbits & B) && (skb->data[0] & 1))
- *skb_push(skb, 1) = 0;
+ *(u8 *)skb_push(skb, 1) = 0;
/*
* Expand sequence number to 32 bits, making it as close
diff --git a/drivers/net/ppp/ppp_mppe.c b/drivers/net/ppp/ppp_mppe.c
index f60f766..6c7fd98 100644
--- a/drivers/net/ppp/ppp_mppe.c
+++ b/drivers/net/ppp/ppp_mppe.c
@@ -298,21 +298,14 @@
mppe_rekey(state, 1);
if (debug) {
- int i;
- char mkey[sizeof(state->master_key) * 2 + 1];
- char skey[sizeof(state->session_key) * 2 + 1];
-
printk(KERN_DEBUG "%s[%d]: initialized with %d-bit %s mode\n",
debugstr, unit, (state->keylen == 16) ? 128 : 40,
(state->stateful) ? "stateful" : "stateless");
-
- for (i = 0; i < sizeof(state->master_key); i++)
- sprintf(mkey + i * 2, "%02x", state->master_key[i]);
- for (i = 0; i < sizeof(state->session_key); i++)
- sprintf(skey + i * 2, "%02x", state->session_key[i]);
printk(KERN_DEBUG
- "%s[%d]: keys: master: %s initial session: %s\n",
- debugstr, unit, mkey, skey);
+ "%s[%d]: keys: master: %*phN initial session: %*phN\n",
+ debugstr, unit,
+ (int)sizeof(state->master_key), state->master_key,
+ (int)sizeof(state->session_key), state->session_key);
}
/*
diff --git a/drivers/net/ppp/ppp_synctty.c b/drivers/net/ppp/ppp_synctty.c
index 9ae5398..7868c29 100644
--- a/drivers/net/ppp/ppp_synctty.c
+++ b/drivers/net/ppp/ppp_synctty.c
@@ -697,8 +697,7 @@
goto err;
}
- p = skb_put(skb, count);
- memcpy(p, buf, count);
+ skb_put_data(skb, buf, count);
/* strip address/control field if present */
p = skb->data;
@@ -712,7 +711,7 @@
/* decompress protocol field if compressed */
if (p[0] & 1) {
/* protocol is compressed */
- skb_push(skb, 1)[0] = 0;
+ *(u8 *)skb_push(skb, 1) = 0;
} else if (skb->len < 2)
goto err;
diff --git a/drivers/net/ppp/pppoe.c b/drivers/net/ppp/pppoe.c
index d7e4052..4e1da16 100644
--- a/drivers/net/ppp/pppoe.c
+++ b/drivers/net/ppp/pppoe.c
@@ -877,7 +877,7 @@
skb->priority = sk->sk_priority;
skb->protocol = cpu_to_be16(ETH_P_PPP_SES);
- ph = (struct pppoe_hdr *)skb_put(skb, total_len + sizeof(struct pppoe_hdr));
+ ph = skb_put(skb, total_len + sizeof(struct pppoe_hdr));
start = (char *)&ph->tag[0];
error = memcpy_from_msg(start, m, total_len);
diff --git a/drivers/net/ppp/pptp.c b/drivers/net/ppp/pptp.c
index 1951b10..eac499c 100644
--- a/drivers/net/ppp/pptp.c
+++ b/drivers/net/ppp/pptp.c
@@ -328,7 +328,7 @@
if ((*skb->data) & 1) {
/* protocol is compressed */
- skb_push(skb, 1)[0] = 0;
+ *(u8 *)skb_push(skb, 1) = 0;
}
skb->ip_summed = CHECKSUM_NONE;
@@ -506,7 +506,6 @@
{
struct sock *sk = sock->sk;
struct pppox_sock *po;
- struct pptp_opt *opt;
int error = 0;
if (!sk)
@@ -520,7 +519,6 @@
}
po = pppox_sk(sk);
- opt = &po->proto.pptp;
del_chan(po);
pppox_unbind_sock(sk);
diff --git a/drivers/net/slip/slip.c b/drivers/net/slip/slip.c
index 74b9072..436dd78 100644
--- a/drivers/net/slip/slip.c
+++ b/drivers/net/slip/slip.c
@@ -364,7 +364,7 @@
return;
}
skb->dev = dev;
- memcpy(skb_put(skb, count), sl->rbuff, count);
+ skb_put_data(skb, sl->rbuff, count);
skb_reset_mac_header(skb);
skb->protocol = htons(ETH_P_IP);
netif_rx_ni(skb);
diff --git a/drivers/net/sungem_phy.c b/drivers/net/sungem_phy.c
index 92578d72..63a8ff8 100644
--- a/drivers/net/sungem_phy.c
+++ b/drivers/net/sungem_phy.c
@@ -886,7 +886,7 @@
SUPPORTED_1000baseT_Half | SUPPORTED_1000baseT_Full)
/* Broadcom BCM 5201 */
-static struct mii_phy_ops bcm5201_phy_ops = {
+static const struct mii_phy_ops bcm5201_phy_ops = {
.init = bcm5201_init,
.suspend = bcm5201_suspend,
.setup_aneg = genmii_setup_aneg,
@@ -905,7 +905,7 @@
};
/* Broadcom BCM 5221 */
-static struct mii_phy_ops bcm5221_phy_ops = {
+static const struct mii_phy_ops bcm5221_phy_ops = {
.suspend = bcm5221_suspend,
.init = bcm5221_init,
.setup_aneg = genmii_setup_aneg,
@@ -924,7 +924,7 @@
};
/* Broadcom BCM 5241 */
-static struct mii_phy_ops bcm5241_phy_ops = {
+static const struct mii_phy_ops bcm5241_phy_ops = {
.suspend = bcm5241_suspend,
.init = bcm5241_init,
.setup_aneg = genmii_setup_aneg,
@@ -942,7 +942,7 @@
};
/* Broadcom BCM 5400 */
-static struct mii_phy_ops bcm5400_phy_ops = {
+static const struct mii_phy_ops bcm5400_phy_ops = {
.init = bcm5400_init,
.suspend = bcm5400_suspend,
.setup_aneg = bcm54xx_setup_aneg,
@@ -961,7 +961,7 @@
};
/* Broadcom BCM 5401 */
-static struct mii_phy_ops bcm5401_phy_ops = {
+static const struct mii_phy_ops bcm5401_phy_ops = {
.init = bcm5401_init,
.suspend = bcm5401_suspend,
.setup_aneg = bcm54xx_setup_aneg,
@@ -980,7 +980,7 @@
};
/* Broadcom BCM 5411 */
-static struct mii_phy_ops bcm5411_phy_ops = {
+static const struct mii_phy_ops bcm5411_phy_ops = {
.init = bcm5411_init,
.suspend = generic_suspend,
.setup_aneg = bcm54xx_setup_aneg,
@@ -999,7 +999,7 @@
};
/* Broadcom BCM 5421 */
-static struct mii_phy_ops bcm5421_phy_ops = {
+static const struct mii_phy_ops bcm5421_phy_ops = {
.init = bcm5421_init,
.suspend = generic_suspend,
.setup_aneg = bcm54xx_setup_aneg,
@@ -1019,7 +1019,7 @@
};
/* Broadcom BCM 5421 built-in K2 */
-static struct mii_phy_ops bcm5421k2_phy_ops = {
+static const struct mii_phy_ops bcm5421k2_phy_ops = {
.init = bcm5421_init,
.suspend = generic_suspend,
.setup_aneg = bcm54xx_setup_aneg,
@@ -1037,7 +1037,7 @@
.ops = &bcm5421k2_phy_ops
};
-static struct mii_phy_ops bcm5461_phy_ops = {
+static const struct mii_phy_ops bcm5461_phy_ops = {
.init = bcm5421_init,
.suspend = generic_suspend,
.setup_aneg = bcm54xx_setup_aneg,
@@ -1057,7 +1057,7 @@
};
/* Broadcom BCM 5462 built-in Vesta */
-static struct mii_phy_ops bcm5462V_phy_ops = {
+static const struct mii_phy_ops bcm5462V_phy_ops = {
.init = bcm5421_init,
.suspend = generic_suspend,
.setup_aneg = bcm54xx_setup_aneg,
@@ -1076,7 +1076,7 @@
};
/* Marvell 88E1101 amd 88E1111 */
-static struct mii_phy_ops marvell88e1101_phy_ops = {
+static const struct mii_phy_ops marvell88e1101_phy_ops = {
.suspend = generic_suspend,
.setup_aneg = marvell_setup_aneg,
.setup_forced = marvell_setup_forced,
@@ -1084,7 +1084,7 @@
.read_link = marvell_read_link
};
-static struct mii_phy_ops marvell88e1111_phy_ops = {
+static const struct mii_phy_ops marvell88e1111_phy_ops = {
.init = marvell88e1111_init,
.suspend = generic_suspend,
.setup_aneg = marvell_setup_aneg,
@@ -1122,7 +1122,7 @@
};
/* Generic implementation for most 10/100 PHYs */
-static struct mii_phy_ops generic_phy_ops = {
+static const struct mii_phy_ops generic_phy_ops = {
.setup_aneg = genmii_setup_aneg,
.setup_forced = genmii_setup_forced,
.poll_link = genmii_poll_link,
diff --git a/drivers/net/tap.c b/drivers/net/tap.c
index 4d4173d..9af3239 100644
--- a/drivers/net/tap.c
+++ b/drivers/net/tap.c
@@ -824,15 +824,17 @@
static ssize_t tap_do_read(struct tap_queue *q,
struct iov_iter *to,
- int noblock)
+ int noblock, struct sk_buff *skb)
{
DEFINE_WAIT(wait);
- struct sk_buff *skb;
ssize_t ret = 0;
if (!iov_iter_count(to))
return 0;
+ if (skb)
+ goto put;
+
while (1) {
if (!noblock)
prepare_to_wait(sk_sleep(&q->sk), &wait,
@@ -856,6 +858,7 @@
if (!noblock)
finish_wait(sk_sleep(&q->sk), &wait);
+put:
if (skb) {
ret = tap_put_user(q, skb, to);
if (unlikely(ret < 0))
@@ -872,7 +875,7 @@
struct tap_queue *q = file->private_data;
ssize_t len = iov_iter_count(to), ret;
- ret = tap_do_read(q, to, file->f_flags & O_NONBLOCK);
+ ret = tap_do_read(q, to, file->f_flags & O_NONBLOCK, NULL);
ret = min_t(ssize_t, ret, len);
if (ret > 0)
iocb->ki_pos = ret;
@@ -1155,7 +1158,8 @@
int ret;
if (flags & ~(MSG_DONTWAIT|MSG_TRUNC))
return -EINVAL;
- ret = tap_do_read(q, &m->msg_iter, flags & MSG_DONTWAIT);
+ ret = tap_do_read(q, &m->msg_iter, flags & MSG_DONTWAIT,
+ m->msg_control);
if (ret > total_len) {
m->msg_flags |= MSG_TRUNC;
ret = flags & MSG_TRUNC ? ret : total_len;
@@ -1193,6 +1197,19 @@
}
EXPORT_SYMBOL_GPL(tap_get_socket);
+struct skb_array *tap_get_skb_array(struct file *file)
+{
+ struct tap_queue *q;
+
+ if (file->f_op != &tap_fops)
+ return ERR_PTR(-EINVAL);
+ q = file->private_data;
+ if (!q)
+ return ERR_PTR(-EBADFD);
+ return &q->skb_array;
+}
+EXPORT_SYMBOL_GPL(tap_get_skb_array);
+
int tap_queue_resize(struct tap_dev *tap)
{
struct net_device *dev = tap->dev;
diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c
index fba8c13..629a412 100644
--- a/drivers/net/team/team.c
+++ b/drivers/net/team/team.c
@@ -2004,12 +2004,6 @@
.ndo_del_slave = team_del_slave,
.ndo_fix_features = team_fix_features,
.ndo_change_carrier = team_change_carrier,
- .ndo_bridge_setlink = switchdev_port_bridge_setlink,
- .ndo_bridge_getlink = switchdev_port_bridge_getlink,
- .ndo_bridge_dellink = switchdev_port_bridge_dellink,
- .ndo_fdb_add = switchdev_port_fdb_add,
- .ndo_fdb_del = switchdev_port_fdb_del,
- .ndo_fdb_dump = switchdev_port_fdb_dump,
.ndo_features_check = passthru_features_check,
};
diff --git a/drivers/net/team/team_mode_activebackup.c b/drivers/net/team/team_mode_activebackup.c
index 3f18982..ddd16a0 100644
--- a/drivers/net/team/team_mode_activebackup.c
+++ b/drivers/net/team/team_mode_activebackup.c
@@ -146,4 +146,4 @@
MODULE_LICENSE("GPL v2");
MODULE_AUTHOR("Jiri Pirko <jpirko@redhat.com>");
MODULE_DESCRIPTION("Active-backup mode for team");
-MODULE_ALIAS("team-mode-activebackup");
+MODULE_ALIAS_TEAM_MODE("activebackup");
diff --git a/drivers/net/team/team_mode_broadcast.c b/drivers/net/team/team_mode_broadcast.c
index 302ff35..e4eac3d 100644
--- a/drivers/net/team/team_mode_broadcast.c
+++ b/drivers/net/team/team_mode_broadcast.c
@@ -75,4 +75,4 @@
MODULE_LICENSE("GPL v2");
MODULE_AUTHOR("Jiri Pirko <jpirko@redhat.com>");
MODULE_DESCRIPTION("Broadcast mode for team");
-MODULE_ALIAS("team-mode-broadcast");
+MODULE_ALIAS_TEAM_MODE("broadcast");
diff --git a/drivers/net/team/team_mode_loadbalance.c b/drivers/net/team/team_mode_loadbalance.c
index b228bea..1468ddf 100644
--- a/drivers/net/team/team_mode_loadbalance.c
+++ b/drivers/net/team/team_mode_loadbalance.c
@@ -695,4 +695,4 @@
MODULE_LICENSE("GPL v2");
MODULE_AUTHOR("Jiri Pirko <jpirko@redhat.com>");
MODULE_DESCRIPTION("Load-balancing mode for team");
-MODULE_ALIAS("team-mode-loadbalance");
+MODULE_ALIAS_TEAM_MODE("loadbalance");
diff --git a/drivers/net/team/team_mode_random.c b/drivers/net/team/team_mode_random.c
index 215f845..c20b944 100644
--- a/drivers/net/team/team_mode_random.c
+++ b/drivers/net/team/team_mode_random.c
@@ -65,4 +65,4 @@
MODULE_LICENSE("GPL v2");
MODULE_AUTHOR("Jiri Pirko <jiri@resnulli.us>");
MODULE_DESCRIPTION("Random mode for team");
-MODULE_ALIAS("team-mode-random");
+MODULE_ALIAS_TEAM_MODE("random");
diff --git a/drivers/net/team/team_mode_roundrobin.c b/drivers/net/team/team_mode_roundrobin.c
index 0aa2341..66c3209 100644
--- a/drivers/net/team/team_mode_roundrobin.c
+++ b/drivers/net/team/team_mode_roundrobin.c
@@ -77,4 +77,4 @@
MODULE_LICENSE("GPL v2");
MODULE_AUTHOR("Jiri Pirko <jpirko@redhat.com>");
MODULE_DESCRIPTION("Round-robin mode for team");
-MODULE_ALIAS("team-mode-roundrobin");
+MODULE_ALIAS_TEAM_MODE("roundrobin");
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index 9ee7d42..ae49f4b 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -465,7 +465,7 @@
rcu_read_lock();
numqueues = ACCESS_ONCE(tun->numqueues);
- txq = skb_get_hash(skb);
+ txq = __skb_get_hash_symmetric(skb);
if (txq) {
e = tun_flow_find(&tun->flows[tun_hashfn(txq)], txq);
if (e) {
@@ -867,7 +867,7 @@
*/
__u32 rxhash;
- rxhash = skb_get_hash(skb);
+ rxhash = __skb_get_hash_symmetric(skb);
if (rxhash) {
struct tun_flow_entry *e;
e = tun_flow_find(&tun->flows[tun_hashfn(rxhash)],
@@ -1334,7 +1334,7 @@
skb_reset_network_header(skb);
skb_probe_transport_header(skb, 0);
- rxhash = skb_get_hash(skb);
+ rxhash = __skb_get_hash_symmetric(skb);
#ifndef CONFIG_4KSTACKS
tun_rx_batched(tun, tfile, skb, more);
#else
@@ -1510,9 +1510,8 @@
static ssize_t tun_do_read(struct tun_struct *tun, struct tun_file *tfile,
struct iov_iter *to,
- int noblock)
+ int noblock, struct sk_buff *skb)
{
- struct sk_buff *skb;
ssize_t ret;
int err;
@@ -1521,10 +1520,12 @@
if (!iov_iter_count(to))
return 0;
- /* Read frames from ring */
- skb = tun_ring_recv(tfile, noblock, &err);
- if (!skb)
- return err;
+ if (!skb) {
+ /* Read frames from ring */
+ skb = tun_ring_recv(tfile, noblock, &err);
+ if (!skb)
+ return err;
+ }
ret = tun_put_user(tun, tfile, skb, to);
if (unlikely(ret < 0))
@@ -1544,7 +1545,7 @@
if (!tun)
return -EBADFD;
- ret = tun_do_read(tun, tfile, to, file->f_flags & O_NONBLOCK);
+ ret = tun_do_read(tun, tfile, to, file->f_flags & O_NONBLOCK, NULL);
ret = min_t(ssize_t, ret, len);
if (ret > 0)
iocb->ki_pos = ret;
@@ -1646,7 +1647,8 @@
SOL_PACKET, TUN_TX_TIMESTAMP);
goto out;
}
- ret = tun_do_read(tun, tfile, &m->msg_iter, flags & MSG_DONTWAIT);
+ ret = tun_do_read(tun, tfile, &m->msg_iter, flags & MSG_DONTWAIT,
+ m->msg_control);
if (ret > (ssize_t)total_len) {
m->msg_flags |= MSG_TRUNC;
ret = flags & MSG_TRUNC ? ret : total_len;
@@ -2626,6 +2628,19 @@
}
EXPORT_SYMBOL_GPL(tun_get_socket);
+struct skb_array *tun_get_skb_array(struct file *file)
+{
+ struct tun_file *tfile;
+
+ if (file->f_op != &tun_fops)
+ return ERR_PTR(-EINVAL);
+ tfile = file->private_data;
+ if (!tfile)
+ return ERR_PTR(-EBADFD);
+ return &tfile->tx_array;
+}
+EXPORT_SYMBOL_GPL(tun_get_skb_array);
+
module_init(tun_init);
module_exit(tun_cleanup);
MODULE_DESCRIPTION(DRV_DESCRIPTION);
diff --git a/drivers/net/usb/asix_common.c b/drivers/net/usb/asix_common.c
index 125cff5..7847436 100644
--- a/drivers/net/usb/asix_common.c
+++ b/drivers/net/usb/asix_common.c
@@ -113,7 +113,6 @@
while (offset + sizeof(u16) <= skb->len) {
u16 copy_length;
- unsigned char *data;
if (!rx->remaining) {
if (skb->len - offset == sizeof(u16)) {
@@ -167,8 +166,8 @@
}
if (rx->ax_skb) {
- data = skb_put(rx->ax_skb, copy_length);
- memcpy(data, skb->data + offset, copy_length);
+ skb_put_data(rx->ax_skb, skb->data + offset,
+ copy_length);
if (!rx->remaining)
usbnet_skb_return(dev, rx->ax_skb);
}
diff --git a/drivers/net/usb/ax88179_178a.c b/drivers/net/usb/ax88179_178a.c
index 51cf600..793ce90 100644
--- a/drivers/net/usb/ax88179_178a.c
+++ b/drivers/net/usb/ax88179_178a.c
@@ -624,7 +624,10 @@
struct ethtool_link_ksettings *cmd)
{
struct usbnet *dev = netdev_priv(net);
- return mii_ethtool_get_link_ksettings(&dev->mii, cmd);
+
+ mii_ethtool_get_link_ksettings(&dev->mii, cmd);
+
+ return 0;
}
static int ax88179_set_link_ksettings(struct net_device *net,
diff --git a/drivers/net/usb/cdc-phonet.c b/drivers/net/usb/cdc-phonet.c
index c7a350b..2952cb5 100644
--- a/drivers/net/usb/cdc-phonet.c
+++ b/drivers/net/usb/cdc-phonet.c
@@ -162,7 +162,7 @@
skb = pnd->rx_skb = netdev_alloc_skb(dev, 12);
if (likely(skb)) {
/* Can't use pskb_pull() on page in IRQ */
- memcpy(skb_put(skb, 1), page_address(page), 1);
+ skb_put_data(skb, page_address(page), 1);
skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
page, 1, req->actual_length,
PAGE_SIZE);
diff --git a/drivers/net/usb/cdc_mbim.c b/drivers/net/usb/cdc_mbim.c
index a6b997c..18fa45f 100644
--- a/drivers/net/usb/cdc_mbim.c
+++ b/drivers/net/usb/cdc_mbim.c
@@ -399,7 +399,7 @@
memcpy(eth_hdr(skb)->h_dest, dev->net->dev_addr, ETH_ALEN);
/* add datagram */
- memcpy(skb_put(skb, len), buf, len);
+ skb_put_data(skb, buf, len);
/* map MBIM session to VLAN */
if (tci)
diff --git a/drivers/net/usb/cdc_ncm.c b/drivers/net/usb/cdc_ncm.c
index b5cec18..2067743 100644
--- a/drivers/net/usb/cdc_ncm.c
+++ b/drivers/net/usb/cdc_ncm.c
@@ -1017,7 +1017,7 @@
if (skb->len + align > max)
align = max - skb->len;
if (align && skb_tailroom(skb) >= align)
- memset(skb_put(skb, align), 0, align);
+ skb_put_zero(skb, align);
}
/* return a pointer to a valid struct usb_cdc_ncm_ndp16 of type sign, possibly
@@ -1069,7 +1069,7 @@
/* push a new empty NDP */
if (!(ctx->drvflags & CDC_NCM_FLAG_NDP_TO_END))
- ndp16 = (struct usb_cdc_ncm_ndp16 *)memset(skb_put(skb, ctx->max_ndp_size), 0, ctx->max_ndp_size);
+ ndp16 = skb_put_zero(skb, ctx->max_ndp_size);
else
ndp16 = ctx->delayed_ndp16;
@@ -1120,7 +1120,7 @@
goto exit_no_skb;
}
/* fill out the initial 16-bit NTB header */
- nth16 = (struct usb_cdc_ncm_nth16 *)memset(skb_put(skb_out, sizeof(struct usb_cdc_ncm_nth16)), 0, sizeof(struct usb_cdc_ncm_nth16));
+ nth16 = skb_put_zero(skb_out, sizeof(struct usb_cdc_ncm_nth16));
nth16->dwSignature = cpu_to_le32(USB_CDC_NCM_NTH16_SIGN);
nth16->wHeaderLength = cpu_to_le16(sizeof(struct usb_cdc_ncm_nth16));
nth16->wSequence = cpu_to_le16(ctx->tx_seq++);
@@ -1180,7 +1180,7 @@
ndp16->dpe16[index].wDatagramLength = cpu_to_le16(skb->len);
ndp16->dpe16[index].wDatagramIndex = cpu_to_le16(skb_out->len);
ndp16->wLength = cpu_to_le16(ndplen + sizeof(struct usb_cdc_ncm_dpe16));
- memcpy(skb_put(skb_out, skb->len), skb->data, skb->len);
+ skb_put_data(skb_out, skb->data, skb->len);
ctx->tx_curr_frame_payload += skb->len; /* count real tx payload data */
dev_kfree_skb_any(skb);
skb = NULL;
@@ -1229,7 +1229,7 @@
nth16 = (struct usb_cdc_ncm_nth16 *)skb_out->data;
cdc_ncm_align_tail(skb_out, ctx->tx_ndp_modulus, 0, ctx->tx_max);
nth16->wNdpIndex = cpu_to_le16(skb_out->len);
- memcpy(skb_put(skb_out, ctx->max_ndp_size), ctx->delayed_ndp16, ctx->max_ndp_size);
+ skb_put_data(skb_out, ctx->delayed_ndp16, ctx->max_ndp_size);
/* Zero out delayed NDP - signature checking will naturally fail. */
ndp16 = memset(ctx->delayed_ndp16, 0, ctx->max_ndp_size);
@@ -1247,10 +1247,10 @@
if (!(dev->driver_info->flags & FLAG_SEND_ZLP) &&
skb_out->len > ctx->min_tx_pkt) {
padding_count = ctx->tx_max - skb_out->len;
- memset(skb_put(skb_out, padding_count), 0, padding_count);
+ skb_put_zero(skb_out, padding_count);
} else if (skb_out->len < ctx->tx_max &&
(skb_out->len % dev->maxpacket) == 0) {
- *skb_put(skb_out, 1) = 0; /* force short packet */
+ skb_put_u8(skb_out, 0); /* force short packet */
}
/* set final frame length */
@@ -1497,7 +1497,7 @@
skb = netdev_alloc_skb_ip_align(dev->net, len);
if (!skb)
goto error;
- memcpy(skb_put(skb, len), skb_in->data + offset, len);
+ skb_put_data(skb, skb_in->data + offset, len);
usbnet_skb_return(dev, skb);
payload += len; /* count payload bytes in this NTB */
}
diff --git a/drivers/net/usb/gl620a.c b/drivers/net/usb/gl620a.c
index 1cc24e6..ba1ce10 100644
--- a/drivers/net/usb/gl620a.c
+++ b/drivers/net/usb/gl620a.c
@@ -121,8 +121,7 @@
if (gl_skb) {
// copy the packet data to the new skb
- memcpy(skb_put(gl_skb, size),
- packet->packet_data, size);
+ skb_put_data(gl_skb, packet->packet_data, size);
usbnet_skb_return(dev, gl_skb);
}
@@ -175,7 +174,7 @@
}
// attach the packet count to the header
- packet_count = (__le32 *) skb_push(skb, (4 + 4*1));
+ packet_count = skb_push(skb, (4 + 4 * 1));
packet_len = packet_count + 1;
*packet_count = cpu_to_le32(1);
diff --git a/drivers/net/usb/hso.c b/drivers/net/usb/hso.c
index 00067a0..d7a3379 100644
--- a/drivers/net/usb/hso.c
+++ b/drivers/net/usb/hso.c
@@ -861,7 +861,6 @@
unsigned short temp_bytes;
unsigned short buffer_offset = 0;
unsigned short frame_len;
- unsigned char *tmp_rx_buf;
/* log if needed */
hso_dbg(0x1, "Rx %d bytes\n", count);
@@ -911,11 +910,9 @@
/* Copy what we got so far. make room for iphdr
* after tail. */
- tmp_rx_buf =
- skb_put(odev->skb_rx_buf,
- sizeof(struct iphdr));
- memcpy(tmp_rx_buf, (char *)&(odev->rx_ip_hdr),
- sizeof(struct iphdr));
+ skb_put_data(odev->skb_rx_buf,
+ (char *)&(odev->rx_ip_hdr),
+ sizeof(struct iphdr));
/* ETH_HLEN */
odev->rx_buf_size = sizeof(struct iphdr);
@@ -934,8 +931,9 @@
/* Copy the rest of the bytes that are left in the
* buffer into the waiting sk_buf. */
/* Make room for temp_bytes after tail. */
- tmp_rx_buf = skb_put(odev->skb_rx_buf, temp_bytes);
- memcpy(tmp_rx_buf, ip_pkt + buffer_offset, temp_bytes);
+ skb_put_data(odev->skb_rx_buf,
+ ip_pkt + buffer_offset,
+ temp_bytes);
odev->rx_buf_missing -= temp_bytes;
count -= temp_bytes;
diff --git a/drivers/net/usb/int51x1.c b/drivers/net/usb/int51x1.c
index 5a43b77..ae2b256 100644
--- a/drivers/net/usb/int51x1.c
+++ b/drivers/net/usb/int51x1.c
@@ -106,11 +106,11 @@
pack_len += need_tail;
pack_len &= 0x07ff;
- len = (__le16 *) __skb_push(skb, INT51X1_HEADER_SIZE);
+ len = __skb_push(skb, INT51X1_HEADER_SIZE);
*len = cpu_to_le16(pack_len);
if(need_tail)
- memset(__skb_put(skb, need_tail), 0, need_tail);
+ __skb_put_zero(skb, need_tail);
return skb;
}
diff --git a/drivers/net/usb/ipheth.c b/drivers/net/usb/ipheth.c
index 76465b1..0f213ea 100644
--- a/drivers/net/usb/ipheth.c
+++ b/drivers/net/usb/ipheth.c
@@ -253,7 +253,7 @@
return;
}
- memcpy(skb_put(skb, len), buf, len);
+ skb_put_data(skb, buf, len);
skb->dev = dev->net;
skb->protocol = eth_type_trans(skb, dev->net);
diff --git a/drivers/net/usb/kalmia.c b/drivers/net/usb/kalmia.c
index 8aefb28..ce0b0b4 100644
--- a/drivers/net/usb/kalmia.c
+++ b/drivers/net/usb/kalmia.c
@@ -217,7 +217,7 @@
remainder = skb->len % KALMIA_ALIGN_SIZE;
if (remainder > 0) {
padlen = KALMIA_ALIGN_SIZE - remainder;
- memset(skb_put(skb, padlen), 0, padlen);
+ skb_put_zero(skb, padlen);
}
netdev_dbg(dev->net,
diff --git a/drivers/net/usb/kaweth.c b/drivers/net/usb/kaweth.c
index 37fb621..92e4fd2 100644
--- a/drivers/net/usb/kaweth.c
+++ b/drivers/net/usb/kaweth.c
@@ -809,7 +809,7 @@
return NETDEV_TX_OK;
}
- private_header = (__le16 *)__skb_push(skb, 2);
+ private_header = __skb_push(skb, 2);
*private_header = cpu_to_le16(skb->len-2);
kaweth->tx_skb = skb;
diff --git a/drivers/net/usb/lan78xx.c b/drivers/net/usb/lan78xx.c
index 9eff97a..5833f7e 100644
--- a/drivers/net/usb/lan78xx.c
+++ b/drivers/net/usb/lan78xx.c
@@ -1490,7 +1490,7 @@
if (ret < 0)
return ret;
- ret = phy_ethtool_ksettings_get(phydev, cmd);
+ phy_ethtool_ksettings_get(phydev, cmd);
usb_autopm_put_interface(dev->intf);
diff --git a/drivers/net/usb/lg-vl600.c b/drivers/net/usb/lg-vl600.c
index 5714107..dbabd7c 100644
--- a/drivers/net/usb/lg-vl600.c
+++ b/drivers/net/usb/lg-vl600.c
@@ -135,7 +135,7 @@
}
buf = s->current_rx_buf;
- memcpy(skb_put(buf, skb->len), skb->data, skb->len);
+ skb_put_data(buf, skb->data, skb->len);
} else if (skb->len < 4) {
netif_err(dev, ifup, dev->net, "Frame too short\n");
dev->net->stats.rx_length_errors++;
@@ -304,7 +304,7 @@
memset(&packet->dummy, 0, sizeof(packet->dummy));
packet->len = cpu_to_le32(orig_len);
- frame = (struct vl600_frame_hdr *) skb_push(skb, sizeof(*frame));
+ frame = skb_push(skb, sizeof(*frame));
memset(frame, 0, sizeof(*frame));
frame->len = cpu_to_le32(full_len);
frame->serial = cpu_to_le32(serial++);
diff --git a/drivers/net/usb/net1080.c b/drivers/net/usb/net1080.c
index 4cbdb13..18a13aa 100644
--- a/drivers/net/usb/net1080.c
+++ b/drivers/net/usb/net1080.c
@@ -264,17 +264,9 @@
* TTL register
*/
-#define TTL_THIS(ttl) (0x00ff & ttl)
#define TTL_OTHER(ttl) (0x00ff & (ttl >> 8))
#define MK_TTL(this,other) ((u16)(((other)<<8)|(0x00ff&(this))))
-static inline void nc_dump_ttl(struct usbnet *dev, u16 ttl)
-{
- netif_dbg(dev, link, dev->net, "net1080 %s-%s ttl 0x%x this = %d, other = %d\n",
- dev->udev->bus->bus_name, dev->udev->devpath,
- ttl, TTL_THIS(ttl), TTL_OTHER(ttl));
-}
-
/*-------------------------------------------------------------------------*/
static int net1080_reset(struct usbnet *dev)
@@ -308,7 +300,6 @@
goto done;
}
ttl = vp;
- // nc_dump_ttl(dev, ttl);
nc_register_write(dev, REG_TTL,
MK_TTL(NC_READ_TTL_MS, TTL_OTHER(ttl)) );
@@ -475,15 +466,15 @@
encapsulate:
/* header first */
- header = (struct nc_header *) skb_push(skb, sizeof *header);
+ header = skb_push(skb, sizeof *header);
header->hdr_len = cpu_to_le16(sizeof (*header));
header->packet_len = cpu_to_le16(len);
header->packet_id = cpu_to_le16((u16)dev->xid++);
/* maybe pad; then trailer */
if (!((skb->len + sizeof *trailer) & 0x01))
- *skb_put(skb, 1) = PAD_BYTE;
- trailer = (struct nc_trailer *) skb_put(skb, sizeof *trailer);
+ skb_put_u8(skb, PAD_BYTE);
+ trailer = skb_put(skb, sizeof *trailer);
put_unaligned(header->packet_id, &trailer->packet_id);
#if 0
netdev_dbg(dev->net, "frame >tx h %d p %d id %d\n",
diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
index 32a22f4e..5894e3c 100644
--- a/drivers/net/usb/qmi_wwan.c
+++ b/drivers/net/usb/qmi_wwan.c
@@ -101,7 +101,7 @@
unsigned int len = skb->len;
struct qmimux_hdr *hdr;
- hdr = (struct qmimux_hdr *)skb_push(skb, sizeof(struct qmimux_hdr));
+ hdr = skb_push(skb, sizeof(struct qmimux_hdr));
hdr->pad = 0;
hdr->mux_id = priv->mux_id;
hdr->pkt_len = cpu_to_be16(len);
@@ -188,7 +188,7 @@
goto skip;
}
- memcpy(skb_put(skbn, len), skb->data + offset, len);
+ skb_put_data(skbn, skb->data + offset, len);
if (netif_rx(skbn) != NET_RX_SUCCESS)
return 0;
diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c
index 1a419a4..6cfffef 100644
--- a/drivers/net/usb/r8152.c
+++ b/drivers/net/usb/r8152.c
@@ -29,7 +29,7 @@
#include <linux/acpi.h>
/* Information for net-next */
-#define NETNEXT_VERSION "08"
+#define NETNEXT_VERSION "09"
/* Information for net */
#define NET_VERSION "9"
@@ -51,11 +51,14 @@
#define PLA_FMC 0xc0b4
#define PLA_CFG_WOL 0xc0b6
#define PLA_TEREDO_CFG 0xc0bc
+#define PLA_TEREDO_WAKE_BASE 0xc0c4
#define PLA_MAR 0xcd00
#define PLA_BACKUP 0xd000
#define PAL_BDC_CR 0xd1a0
#define PLA_TEREDO_TIMER 0xd2cc
#define PLA_REALWOW_TIMER 0xd2e8
+#define PLA_EFUSE_DATA 0xdd00
+#define PLA_EFUSE_CMD 0xdd02
#define PLA_LEDSEL 0xdd90
#define PLA_LED_FEATURE 0xdd92
#define PLA_PHYAR 0xde00
@@ -105,7 +108,9 @@
#define USB_CSR_DUMMY2 0xb466
#define USB_DEV_STAT 0xb808
#define USB_CONNECT_TIMER 0xcbf8
+#define USB_MSC_TIMER 0xcbfc
#define USB_BURST_SIZE 0xcfc0
+#define USB_LPM_CONFIG 0xcfd8
#define USB_USB_CTRL 0xd406
#define USB_PHY_CTRL 0xd408
#define USB_TX_AGG 0xd40a
@@ -113,15 +118,20 @@
#define USB_USB_TIMER 0xd428
#define USB_RX_EARLY_TIMEOUT 0xd42c
#define USB_RX_EARLY_SIZE 0xd42e
-#define USB_PM_CTRL_STATUS 0xd432
+#define USB_PM_CTRL_STATUS 0xd432 /* RTL8153A */
+#define USB_RX_EXTRA_AGGR_TMR 0xd432 /* RTL8153B */
#define USB_TX_DMA 0xd434
+#define USB_UPT_RXDMA_OWN 0xd437
#define USB_TOLERANCE 0xd490
#define USB_LPM_CTRL 0xd41a
#define USB_BMU_RESET 0xd4b0
+#define USB_U1U2_TIMER 0xd4da
#define USB_UPS_CTRL 0xd800
-#define USB_MISC_0 0xd81a
#define USB_POWER_CUT 0xd80a
+#define USB_MISC_0 0xd81a
#define USB_AFE_CTRL2 0xd824
+#define USB_UPS_CFG 0xd842
+#define USB_UPS_FLAGS 0xd848
#define USB_WDT11_CTRL 0xe43c
#define USB_BP_BA 0xfc26
#define USB_BP_0 0xfc28
@@ -133,6 +143,15 @@
#define USB_BP_6 0xfc34
#define USB_BP_7 0xfc36
#define USB_BP_EN 0xfc38
+#define USB_BP_8 0xfc38
+#define USB_BP_9 0xfc3a
+#define USB_BP_10 0xfc3c
+#define USB_BP_11 0xfc3e
+#define USB_BP_12 0xfc40
+#define USB_BP_13 0xfc42
+#define USB_BP_14 0xfc44
+#define USB_BP_15 0xfc46
+#define USB_BP2_EN 0xfc48
/* OCP Registers */
#define OCP_ALDPS_CONFIG 0x2010
@@ -143,6 +162,7 @@
#define OCP_EEE_AR 0xa41a
#define OCP_EEE_DATA 0xa41c
#define OCP_PHY_STATUS 0xa420
+#define OCP_NCTL_CFG 0xa42c
#define OCP_POWER_CFG 0xa430
#define OCP_EEE_CFG 0xa432
#define OCP_SRAM_ADDR 0xa436
@@ -152,9 +172,14 @@
#define OCP_EEE_ADV 0xa5d0
#define OCP_EEE_LPABLE 0xa5d2
#define OCP_PHY_STATE 0xa708 /* nway state for 8153 */
+#define OCP_PHY_PATCH_STAT 0xb800
+#define OCP_PHY_PATCH_CMD 0xb820
+#define OCP_ADC_IOFFSET 0xbcfc
#define OCP_ADC_CFG 0xbc06
+#define OCP_SYSCLK_CFG 0xc416
/* SRAM Register */
+#define SRAM_GREEN_CFG 0x8011
#define SRAM_LPF_CFG 0x8012
#define SRAM_10M_AMP1 0x8080
#define SRAM_10M_AMP2 0x8082
@@ -252,6 +277,10 @@
/* PAL_BDC_CR */
#define ALDPS_PROXY_MODE 0x0001
+/* PLA_EFUSE_CMD */
+#define EFUSE_READ_CMD BIT(15)
+#define EFUSE_DATA_BIT16 BIT(7)
+
/* PLA_CONFIG34 */
#define LINK_ON_WAKE_EN 0x0010
#define LINK_OFF_WAKE_EN 0x0008
@@ -277,6 +306,7 @@
/* PLA_MAC_PWR_CTRL2 */
#define EEE_SPDWN_RATIO 0x8007
+#define MAC_CLK_SPDWN_EN BIT(15)
/* PLA_MAC_PWR_CTRL3 */
#define PKT_AVAIL_SPDWN_EN 0x0100
@@ -328,6 +358,9 @@
#define STAT_SPEED_HIGH 0x0000
#define STAT_SPEED_FULL 0x0002
+/* USB_LPM_CONFIG */
+#define LPM_U1U2_EN BIT(0)
+
/* USB_TX_AGG */
#define TX_AGG_MAX_THRESHOLD 0x03
@@ -335,6 +368,7 @@
#define RX_THR_SUPPER 0x0c350180
#define RX_THR_HIGH 0x7a120180
#define RX_THR_SLOW 0xffff0180
+#define RX_THR_B 0x00010001
/* USB_TX_DMA */
#define TEST_MODE_DISABLE 0x00000001
@@ -344,6 +378,10 @@
#define BMU_RESET_EP_IN 0x01
#define BMU_RESET_EP_OUT 0x02
+/* USB_UPT_RXDMA_OWN */
+#define OWN_UPDATE BIT(0)
+#define OWN_CLEAR BIT(1)
+
/* USB_UPS_CTRL */
#define POWER_CUT 0x0100
@@ -360,6 +398,8 @@
/* USB_POWER_CUT */
#define PWR_EN 0x0001
#define PHASE2_EN 0x0008
+#define UPS_EN BIT(4)
+#define USP_PREWAKE BIT(5)
/* USB_MISC_0 */
#define PCUT_STATUS 0x0001
@@ -386,6 +426,37 @@
#define SEN_VAL_NORMAL 0xa000
#define SEL_RXIDLE 0x0100
+/* USB_UPS_CFG */
+#define SAW_CNT_1MS_MASK 0x0fff
+
+/* USB_UPS_FLAGS */
+#define UPS_FLAGS_R_TUNE BIT(0)
+#define UPS_FLAGS_EN_10M_CKDIV BIT(1)
+#define UPS_FLAGS_250M_CKDIV BIT(2)
+#define UPS_FLAGS_EN_ALDPS BIT(3)
+#define UPS_FLAGS_CTAP_SHORT_DIS BIT(4)
+#define UPS_FLAGS_SPEED_MASK (0xf << 16)
+#define ups_flags_speed(x) ((x) << 16)
+#define UPS_FLAGS_EN_EEE BIT(20)
+#define UPS_FLAGS_EN_500M_EEE BIT(21)
+#define UPS_FLAGS_EN_EEE_CKDIV BIT(22)
+#define UPS_FLAGS_EEE_PLLOFF_GIGA BIT(24)
+#define UPS_FLAGS_EEE_CMOD_LV_EN BIT(25)
+#define UPS_FLAGS_EN_GREEN BIT(26)
+#define UPS_FLAGS_EN_FLOW_CTR BIT(27)
+
+enum spd_duplex {
+ NWAY_10M_HALF = 1,
+ NWAY_10M_FULL,
+ NWAY_100M_HALF,
+ NWAY_100M_FULL,
+ NWAY_1000M_FULL,
+ FORCE_10M_HALF,
+ FORCE_10M_FULL,
+ FORCE_100M_HALF,
+ FORCE_100M_FULL,
+};
+
/* OCP_ALDPS_CONFIG */
#define ENPWRSAVE 0x8000
#define ENPDNPS 0x0200
@@ -394,9 +465,13 @@
/* OCP_PHY_STATUS */
#define PHY_STAT_MASK 0x0007
+#define PHY_STAT_EXT_INIT 2
#define PHY_STAT_LAN_ON 3
#define PHY_STAT_PWRDN 5
+/* OCP_NCTL_CFG */
+#define PGA_RETURN_EN BIT(1)
+
/* OCP_POWER_CFG */
#define EEE_CLKDIV_EN 0x8000
#define EN_ALDPS 0x0004
@@ -438,17 +513,34 @@
#define EEE10_EN 0x0010
/* OCP_DOWN_SPEED */
+#define EN_EEE_CMODE BIT(14)
+#define EN_EEE_1000 BIT(13)
+#define EN_EEE_100 BIT(12)
+#define EN_10M_CLKDIV BIT(11)
#define EN_10M_BGOFF 0x0080
/* OCP_PHY_STATE */
#define TXDIS_STATE 0x01
#define ABD_STATE 0x02
+/* OCP_PHY_PATCH_STAT */
+#define PATCH_READY BIT(6)
+
+/* OCP_PHY_PATCH_CMD */
+#define PATCH_REQUEST BIT(4)
+
/* OCP_ADC_CFG */
#define CKADSEL_L 0x0100
#define ADC_EN 0x0080
#define EN_EMI_L 0x0040
+/* OCP_SYSCLK_CFG */
+#define clk_div_expo(x) (min(x, 5) << 8)
+
+/* SRAM_GREEN_CFG */
+#define GREEN_ETH_EN BIT(15)
+#define R_TUNE_EN BIT(11)
+
/* SRAM_LPF_CFG */
#define LPF_AUTO_TUNE 0x8000
@@ -477,7 +569,6 @@
#define RTL8152_MAX_TX 4
#define RTL8152_MAX_RX 10
#define INTBUFSIZE 2
-#define CRC_SIZE 4
#define TX_ALIGN 4
#define RX_ALIGN 8
@@ -496,12 +587,13 @@
#define BYTE_EN_END_MASK 0xf0
#define RTL8153_MAX_PACKET 9216 /* 9K */
-#define RTL8153_MAX_MTU (RTL8153_MAX_PACKET - VLAN_ETH_HLEN - VLAN_HLEN)
-#define RTL8152_RMS (VLAN_ETH_FRAME_LEN + VLAN_HLEN)
+#define RTL8153_MAX_MTU (RTL8153_MAX_PACKET - VLAN_ETH_HLEN - \
+ ETH_FCS_LEN)
+#define RTL8152_RMS (VLAN_ETH_FRAME_LEN + ETH_FCS_LEN)
#define RTL8153_RMS RTL8153_MAX_PACKET
#define RTL8152_TX_TIMEOUT (5 * HZ)
#define RTL8152_NAPI_WEIGHT 64
-#define rx_reserved_size(x) ((x) + VLAN_ETH_HLEN + CRC_SIZE + \
+#define rx_reserved_size(x) ((x) + VLAN_ETH_HLEN + ETH_FCS_LEN + \
sizeof(struct rx_desc) + RX_ALIGN)
/* rtl8152 flags */
@@ -513,6 +605,7 @@
SELECTIVE_SUSPEND,
PHY_RESET,
SCHEDULE_NAPI,
+ GREEN_ETHERNET,
};
/* Define these values to match your device */
@@ -658,6 +751,9 @@
RTL_VER_04,
RTL_VER_05,
RTL_VER_06,
+ RTL_VER_07,
+ RTL_VER_08,
+ RTL_VER_09,
RTL_VER_MAX
};
@@ -674,7 +770,7 @@
static unsigned int agg_buf_sz = 16384;
#define RTL_LIMITED_TSO_SIZE (agg_buf_sz - sizeof(struct tx_desc) - \
- VLAN_ETH_HLEN - VLAN_HLEN)
+ VLAN_ETH_HLEN - ETH_FCS_LEN)
static
int get_registers(struct r8152 *tp, u16 value, u16 index, u16 size, void *data)
@@ -841,12 +937,6 @@
}
static inline
-int usb_ocp_read(struct r8152 *tp, u16 index, u16 size, void *data)
-{
- return generic_ocp_read(tp, index, size, data, MCU_TYPE_USB);
-}
-
-static inline
int usb_ocp_write(struct r8152 *tp, u16 index, u16 byteen, u16 size, void *data)
{
return generic_ocp_write(tp, index, byteen, size, data, MCU_TYPE_USB);
@@ -872,11 +962,13 @@
{
u32 data;
__le32 tmp;
+ u16 byen = BYTE_EN_WORD;
u8 shift = index & 2;
index &= ~3;
+ byen <<= shift;
- generic_ocp_read(tp, index, sizeof(tmp), &tmp, type);
+ generic_ocp_read(tp, index, sizeof(tmp), &tmp, type | byen);
data = __le32_to_cpu(tmp);
data >>= (shift * 8);
@@ -988,6 +1080,12 @@
ocp_reg_write(tp, OCP_SRAM_DATA, data);
}
+static u16 sram_read(struct r8152 *tp, u16 addr)
+{
+ ocp_reg_write(tp, OCP_SRAM_ADDR, addr);
+ return ocp_reg_read(tp, OCP_SRAM_DATA);
+}
+
static int read_mii_word(struct net_device *netdev, int phy_id, int reg)
{
struct r8152 *tp = netdev_priv(netdev);
@@ -1818,6 +1916,10 @@
unsigned int pkt_len;
struct sk_buff *skb;
+ /* limite the skb numbers for rx_queue */
+ if (unlikely(skb_queue_len(&tp->rx_queue) >= 1000))
+ break;
+
pkt_len = le32_to_cpu(rx_desc->opts1) & RX_LEN_MASK;
if (pkt_len < ETH_ZLEN)
break;
@@ -1826,7 +1928,7 @@
if (urb->actual_length < len_used)
break;
- pkt_len -= CRC_SIZE;
+ pkt_len -= ETH_FCS_LEN;
rx_data += sizeof(struct rx_desc);
skb = napi_alloc_skb(napi, pkt_len);
@@ -1850,7 +1952,7 @@
}
find_next_rx:
- rx_data = rx_agg_align(rx_data + pkt_len + CRC_SIZE);
+ rx_data = rx_agg_align(rx_data + pkt_len + ETH_FCS_LEN);
rx_desc = (struct rx_desc *)rx_data;
len_used = (int)(rx_data - (u8 *)agg->head);
len_used += sizeof(struct rx_desc);
@@ -1939,7 +2041,8 @@
bottom_half(tp);
if (work_done < budget) {
- napi_complete(napi);
+ if (!napi_complete_done(napi, work_done))
+ goto out;
if (!list_empty(&tp->rx_done))
napi_schedule(napi);
else if (!skb_queue_empty(&tp->tx_queue) &&
@@ -1947,6 +2050,7 @@
napi_schedule(napi);
}
+out:
return work_done;
}
@@ -2138,7 +2242,7 @@
{
struct net_device *netdev = tp->netdev;
- tp->tx_qlen = agg_buf_sz / (netdev->mtu + VLAN_ETH_HLEN + VLAN_HLEN +
+ tp->tx_qlen = agg_buf_sz / (netdev->mtu + VLAN_ETH_HLEN + ETH_FCS_LEN +
sizeof(struct tx_desc));
}
@@ -2249,18 +2353,64 @@
return rtl_enable(tp);
}
+static inline void r8153b_rx_agg_chg_indicate(struct r8152 *tp)
+{
+ ocp_write_byte(tp, MCU_TYPE_USB, USB_UPT_RXDMA_OWN,
+ OWN_UPDATE | OWN_CLEAR);
+}
+
static void r8153_set_rx_early_timeout(struct r8152 *tp)
{
u32 ocp_data = tp->coalesce / 8;
- ocp_write_word(tp, MCU_TYPE_USB, USB_RX_EARLY_TIMEOUT, ocp_data);
+ switch (tp->version) {
+ case RTL_VER_03:
+ case RTL_VER_04:
+ case RTL_VER_05:
+ case RTL_VER_06:
+ ocp_write_word(tp, MCU_TYPE_USB, USB_RX_EARLY_TIMEOUT,
+ ocp_data);
+ break;
+
+ case RTL_VER_08:
+ case RTL_VER_09:
+ /* The RTL8153B uses USB_RX_EXTRA_AGGR_TMR for rx timeout
+ * primarily. For USB_RX_EARLY_TIMEOUT, we fix it to 128ns.
+ */
+ ocp_write_word(tp, MCU_TYPE_USB, USB_RX_EARLY_TIMEOUT,
+ 128 / 8);
+ ocp_write_word(tp, MCU_TYPE_USB, USB_RX_EXTRA_AGGR_TMR,
+ ocp_data);
+ r8153b_rx_agg_chg_indicate(tp);
+ break;
+
+ default:
+ break;
+ }
}
static void r8153_set_rx_early_size(struct r8152 *tp)
{
- u32 ocp_data = (agg_buf_sz - rx_reserved_size(tp->netdev->mtu)) / 4;
+ u32 ocp_data = agg_buf_sz - rx_reserved_size(tp->netdev->mtu);
- ocp_write_word(tp, MCU_TYPE_USB, USB_RX_EARLY_SIZE, ocp_data);
+ switch (tp->version) {
+ case RTL_VER_03:
+ case RTL_VER_04:
+ case RTL_VER_05:
+ case RTL_VER_06:
+ ocp_write_word(tp, MCU_TYPE_USB, USB_RX_EARLY_SIZE,
+ ocp_data / 4);
+ break;
+ case RTL_VER_08:
+ case RTL_VER_09:
+ ocp_write_word(tp, MCU_TYPE_USB, USB_RX_EARLY_SIZE,
+ ocp_data / 8);
+ r8153b_rx_agg_chg_indicate(tp);
+ break;
+ default:
+ WARN_ON_ONCE(1);
+ break;
+ }
}
static int rtl8153_enable(struct r8152 *tp)
@@ -2268,7 +2418,6 @@
if (test_bit(RTL8152_UNPLUG, &tp->flags))
return -ENODEV;
- usb_disable_lpm(tp->udev);
set_tx_qlen(tp);
rtl_set_eee_plus(tp);
r8153_set_rx_early_timeout(tp);
@@ -2434,6 +2583,29 @@
device_set_wakeup_enable(&tp->udev->dev, false);
}
+static void r8153_mac_clk_spd(struct r8152 *tp, bool enable)
+{
+ /* MAC clock speed down */
+ if (enable) {
+ ocp_write_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL,
+ ALDPS_SPDWN_RATIO);
+ ocp_write_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL2,
+ EEE_SPDWN_RATIO);
+ ocp_write_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL3,
+ PKT_AVAIL_SPDWN_EN | SUSPEND_SPDWN_EN |
+ U1U2_SPDWN_EN | L1_SPDWN_EN);
+ ocp_write_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL4,
+ PWRSAVE_SPDWN_EN | RXDV_SPDWN_EN | TX10MIDLE_EN |
+ TP100_SPDWN_EN | TP500_SPDWN_EN | EEE_SPDWN_EN |
+ TP1000_SPDWN_EN);
+ } else {
+ ocp_write_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL, 0);
+ ocp_write_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL2, 0);
+ ocp_write_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL3, 0);
+ ocp_write_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL4, 0);
+ }
+}
+
static void r8153_u1u2en(struct r8152 *tp, bool enable)
{
u8 u1u2[8];
@@ -2446,18 +2618,133 @@
usb_ocp_write(tp, USB_TOLERANCE, BYTE_EN_SIX_BYTES, sizeof(u1u2), u1u2);
}
+static void r8153b_u1u2en(struct r8152 *tp, bool enable)
+{
+ u32 ocp_data;
+
+ ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_LPM_CONFIG);
+ if (enable)
+ ocp_data |= LPM_U1U2_EN;
+ else
+ ocp_data &= ~LPM_U1U2_EN;
+
+ ocp_write_word(tp, MCU_TYPE_USB, USB_LPM_CONFIG, ocp_data);
+}
+
static void r8153_u2p3en(struct r8152 *tp, bool enable)
{
u32 ocp_data;
ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_U2P3_CTRL);
- if (enable && tp->version != RTL_VER_03 && tp->version != RTL_VER_04)
+ if (enable)
ocp_data |= U2P3_ENABLE;
else
ocp_data &= ~U2P3_ENABLE;
ocp_write_word(tp, MCU_TYPE_USB, USB_U2P3_CTRL, ocp_data);
}
+static void r8153b_ups_flags_w1w0(struct r8152 *tp, u32 set, u32 clear)
+{
+ u32 ocp_data;
+
+ ocp_data = ocp_read_dword(tp, MCU_TYPE_USB, USB_UPS_FLAGS);
+ ocp_data &= ~clear;
+ ocp_data |= set;
+ ocp_write_dword(tp, MCU_TYPE_USB, USB_UPS_FLAGS, ocp_data);
+}
+
+static void r8153b_green_en(struct r8152 *tp, bool enable)
+{
+ u16 data;
+
+ if (enable) {
+ sram_write(tp, 0x8045, 0); /* 10M abiq&ldvbias */
+ sram_write(tp, 0x804d, 0x1222); /* 100M short abiq&ldvbias */
+ sram_write(tp, 0x805d, 0x0022); /* 1000M short abiq&ldvbias */
+ } else {
+ sram_write(tp, 0x8045, 0x2444); /* 10M abiq&ldvbias */
+ sram_write(tp, 0x804d, 0x2444); /* 100M short abiq&ldvbias */
+ sram_write(tp, 0x805d, 0x2444); /* 1000M short abiq&ldvbias */
+ }
+
+ data = sram_read(tp, SRAM_GREEN_CFG);
+ data |= GREEN_ETH_EN;
+ sram_write(tp, SRAM_GREEN_CFG, data);
+
+ r8153b_ups_flags_w1w0(tp, UPS_FLAGS_EN_GREEN, 0);
+}
+
+static u16 r8153_phy_status(struct r8152 *tp, u16 desired)
+{
+ u16 data;
+ int i;
+
+ for (i = 0; i < 500; i++) {
+ data = ocp_reg_read(tp, OCP_PHY_STATUS);
+ data &= PHY_STAT_MASK;
+ if (desired) {
+ if (data == desired)
+ break;
+ } else if (data == PHY_STAT_LAN_ON || data == PHY_STAT_PWRDN ||
+ data == PHY_STAT_EXT_INIT) {
+ break;
+ }
+
+ msleep(20);
+ }
+
+ return data;
+}
+
+static void r8153b_ups_en(struct r8152 *tp, bool enable)
+{
+ u32 ocp_data = ocp_read_byte(tp, MCU_TYPE_USB, USB_POWER_CUT);
+
+ if (enable) {
+ ocp_data |= UPS_EN | USP_PREWAKE | PHASE2_EN;
+ ocp_write_byte(tp, MCU_TYPE_USB, USB_POWER_CUT, ocp_data);
+
+ ocp_data = ocp_read_byte(tp, MCU_TYPE_USB, 0xcfff);
+ ocp_data |= BIT(0);
+ ocp_write_byte(tp, MCU_TYPE_USB, 0xcfff, ocp_data);
+ } else {
+ u16 data;
+
+ ocp_data &= ~(UPS_EN | USP_PREWAKE);
+ ocp_write_byte(tp, MCU_TYPE_USB, USB_POWER_CUT, ocp_data);
+
+ ocp_data = ocp_read_byte(tp, MCU_TYPE_USB, 0xcfff);
+ ocp_data &= ~BIT(0);
+ ocp_write_byte(tp, MCU_TYPE_USB, 0xcfff, ocp_data);
+
+ ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_MISC_0);
+ ocp_data &= ~PCUT_STATUS;
+ ocp_write_word(tp, MCU_TYPE_USB, USB_MISC_0, ocp_data);
+
+ data = r8153_phy_status(tp, 0);
+
+ switch (data) {
+ case PHY_STAT_PWRDN:
+ case PHY_STAT_EXT_INIT:
+ r8153b_green_en(tp,
+ test_bit(GREEN_ETHERNET, &tp->flags));
+
+ data = r8152_mdio_read(tp, MII_BMCR);
+ data &= ~BMCR_PDOWN;
+ data |= BMCR_RESET;
+ r8152_mdio_write(tp, MII_BMCR, data);
+
+ data = r8153_phy_status(tp, PHY_STAT_LAN_ON);
+
+ default:
+ if (data != PHY_STAT_LAN_ON)
+ netif_warn(tp, link, tp->netdev,
+ "PHY not ready");
+ break;
+ }
+ }
+}
+
static void r8153_power_cut_en(struct r8152 *tp, bool enable)
{
u32 ocp_data;
@@ -2474,6 +2761,38 @@
ocp_write_word(tp, MCU_TYPE_USB, USB_MISC_0, ocp_data);
}
+static void r8153b_power_cut_en(struct r8152 *tp, bool enable)
+{
+ u32 ocp_data;
+
+ ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_POWER_CUT);
+ if (enable)
+ ocp_data |= PWR_EN | PHASE2_EN;
+ else
+ ocp_data &= ~PWR_EN;
+ ocp_write_word(tp, MCU_TYPE_USB, USB_POWER_CUT, ocp_data);
+
+ ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_MISC_0);
+ ocp_data &= ~PCUT_STATUS;
+ ocp_write_word(tp, MCU_TYPE_USB, USB_MISC_0, ocp_data);
+}
+
+static void r8153b_queue_wake(struct r8152 *tp, bool enable)
+{
+ u32 ocp_data;
+
+ ocp_data = ocp_read_byte(tp, MCU_TYPE_PLA, 0xd38a);
+ if (enable)
+ ocp_data |= BIT(0);
+ else
+ ocp_data &= ~BIT(0);
+ ocp_write_byte(tp, MCU_TYPE_PLA, 0xd38a, ocp_data);
+
+ ocp_data = ocp_read_byte(tp, MCU_TYPE_PLA, 0xd38c);
+ ocp_data &= ~BIT(0);
+ ocp_write_byte(tp, MCU_TYPE_PLA, 0xd38c, ocp_data);
+}
+
static bool rtl_can_wakeup(struct r8152 *tp)
{
struct usb_device *udev = tp->udev;
@@ -2512,24 +2831,76 @@
static void rtl8153_runtime_enable(struct r8152 *tp, bool enable)
{
- rtl_runtime_suspend_enable(tp, enable);
-
if (enable) {
r8153_u1u2en(tp, false);
r8153_u2p3en(tp, false);
+ r8153_mac_clk_spd(tp, true);
+ rtl_runtime_suspend_enable(tp, true);
} else {
- r8153_u2p3en(tp, true);
+ rtl_runtime_suspend_enable(tp, false);
+ r8153_mac_clk_spd(tp, false);
+
+ switch (tp->version) {
+ case RTL_VER_03:
+ case RTL_VER_04:
+ break;
+ case RTL_VER_05:
+ case RTL_VER_06:
+ default:
+ r8153_u2p3en(tp, true);
+ break;
+ }
+
r8153_u1u2en(tp, true);
}
}
+static void rtl8153b_runtime_enable(struct r8152 *tp, bool enable)
+{
+ if (enable) {
+ r8153b_queue_wake(tp, true);
+ r8153b_u1u2en(tp, false);
+ r8153_u2p3en(tp, false);
+ rtl_runtime_suspend_enable(tp, true);
+ r8153b_ups_en(tp, true);
+ } else {
+ r8153b_ups_en(tp, false);
+ r8153b_queue_wake(tp, false);
+ rtl_runtime_suspend_enable(tp, false);
+ r8153_u2p3en(tp, true);
+ r8153b_u1u2en(tp, true);
+ }
+}
+
static void r8153_teredo_off(struct r8152 *tp)
{
u32 ocp_data;
- ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_TEREDO_CFG);
- ocp_data &= ~(TEREDO_SEL | TEREDO_RS_EVENT_MASK | OOB_TEREDO_EN);
- ocp_write_word(tp, MCU_TYPE_PLA, PLA_TEREDO_CFG, ocp_data);
+ switch (tp->version) {
+ case RTL_VER_01:
+ case RTL_VER_02:
+ case RTL_VER_03:
+ case RTL_VER_04:
+ case RTL_VER_05:
+ case RTL_VER_06:
+ case RTL_VER_07:
+ ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_TEREDO_CFG);
+ ocp_data &= ~(TEREDO_SEL | TEREDO_RS_EVENT_MASK |
+ OOB_TEREDO_EN);
+ ocp_write_word(tp, MCU_TYPE_PLA, PLA_TEREDO_CFG, ocp_data);
+ break;
+
+ case RTL_VER_08:
+ case RTL_VER_09:
+ /* The bit 0 ~ 7 are relative with teredo settings. They are
+ * W1C (write 1 to clear), so set all 1 to disable it.
+ */
+ ocp_write_byte(tp, MCU_TYPE_PLA, PLA_TEREDO_CFG, 0xff);
+ break;
+
+ default:
+ break;
+ }
ocp_write_word(tp, MCU_TYPE_PLA, PLA_WDT6_CTRL, WDT6_SET_MODE);
ocp_write_word(tp, MCU_TYPE_PLA, PLA_REALWOW_TIMER, 0);
@@ -2775,6 +3146,33 @@
ocp_write_dword(tp, MCU_TYPE_PLA, PLA_RCR, ocp_data);
}
+static int r8153_patch_request(struct r8152 *tp, bool request)
+{
+ u16 data;
+ int i;
+
+ data = ocp_reg_read(tp, OCP_PHY_PATCH_CMD);
+ if (request)
+ data |= PATCH_REQUEST;
+ else
+ data &= ~PATCH_REQUEST;
+ ocp_reg_write(tp, OCP_PHY_PATCH_CMD, data);
+
+ for (i = 0; request && i < 5000; i++) {
+ usleep_range(1000, 2000);
+ if (ocp_reg_read(tp, OCP_PHY_PATCH_STAT) & PATCH_READY)
+ break;
+ }
+
+ if (request && !(ocp_reg_read(tp, OCP_PHY_PATCH_STAT) & PATCH_READY)) {
+ netif_err(tp, drv, tp->netdev, "patch request fail\n");
+ r8153_patch_request(tp, false);
+ return -ETIME;
+ } else {
+ return 0;
+ }
+}
+
static void r8153_aldps_en(struct r8152 *tp, bool enable)
{
u16 data;
@@ -2784,12 +3182,28 @@
data |= EN_ALDPS;
ocp_reg_write(tp, OCP_POWER_CFG, data);
} else {
+ int i;
+
data &= ~EN_ALDPS;
ocp_reg_write(tp, OCP_POWER_CFG, data);
- msleep(20);
+ for (i = 0; i < 20; i++) {
+ usleep_range(1000, 2000);
+ if (ocp_read_word(tp, MCU_TYPE_PLA, 0xe000) & 0x0100)
+ break;
+ }
}
}
+static void r8153b_aldps_en(struct r8152 *tp, bool enable)
+{
+ r8153_aldps_en(tp, enable);
+
+ if (enable)
+ r8153b_ups_flags_w1w0(tp, UPS_FLAGS_EN_ALDPS, 0);
+ else
+ r8153b_ups_flags_w1w0(tp, 0, UPS_FLAGS_EN_ALDPS);
+}
+
static void r8153_eee_en(struct r8152 *tp, bool enable)
{
u32 ocp_data;
@@ -2810,6 +3224,22 @@
ocp_reg_write(tp, OCP_EEE_CFG, config);
}
+static void r8153b_eee_en(struct r8152 *tp, bool enable)
+{
+ r8153_eee_en(tp, enable);
+
+ if (enable)
+ r8153b_ups_flags_w1w0(tp, UPS_FLAGS_EN_EEE, 0);
+ else
+ r8153b_ups_flags_w1w0(tp, 0, UPS_FLAGS_EN_EEE);
+}
+
+static void r8153b_enable_fc(struct r8152 *tp)
+{
+ r8152b_enable_fc(tp);
+ r8153b_ups_flags_w1w0(tp, UPS_FLAGS_EN_FLOW_CTR, 0);
+}
+
static void r8153_hw_phy_cfg(struct r8152 *tp)
{
u32 ocp_data;
@@ -2857,6 +3287,111 @@
r8153_aldps_en(tp, true);
r8152b_enable_fc(tp);
+ switch (tp->version) {
+ case RTL_VER_03:
+ case RTL_VER_04:
+ break;
+ case RTL_VER_05:
+ case RTL_VER_06:
+ default:
+ r8153_u2p3en(tp, true);
+ break;
+ }
+
+ set_bit(PHY_RESET, &tp->flags);
+}
+
+static u32 r8152_efuse_read(struct r8152 *tp, u8 addr)
+{
+ u32 ocp_data;
+
+ ocp_write_word(tp, MCU_TYPE_PLA, PLA_EFUSE_CMD, EFUSE_READ_CMD | addr);
+ ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_EFUSE_CMD);
+ ocp_data = (ocp_data & EFUSE_DATA_BIT16) << 9; /* data of bit16 */
+ ocp_data |= ocp_read_word(tp, MCU_TYPE_PLA, PLA_EFUSE_DATA);
+
+ return ocp_data;
+}
+
+static void r8153b_hw_phy_cfg(struct r8152 *tp)
+{
+ u32 ocp_data, ups_flags = 0;
+ u16 data;
+
+ /* disable ALDPS before updating the PHY parameters */
+ r8153b_aldps_en(tp, false);
+
+ /* disable EEE before updating the PHY parameters */
+ r8153b_eee_en(tp, false);
+ ocp_reg_write(tp, OCP_EEE_ADV, 0);
+
+ r8153b_green_en(tp, test_bit(GREEN_ETHERNET, &tp->flags));
+
+ data = sram_read(tp, SRAM_GREEN_CFG);
+ data |= R_TUNE_EN;
+ sram_write(tp, SRAM_GREEN_CFG, data);
+ data = ocp_reg_read(tp, OCP_NCTL_CFG);
+ data |= PGA_RETURN_EN;
+ ocp_reg_write(tp, OCP_NCTL_CFG, data);
+
+ /* ADC Bias Calibration:
+ * read efuse offset 0x7d to get a 17-bit data. Remove the dummy/fake
+ * bit (bit3) to rebuild the real 16-bit data. Write the data to the
+ * ADC ioffset.
+ */
+ ocp_data = r8152_efuse_read(tp, 0x7d);
+ data = (u16)(((ocp_data & 0x1fff0) >> 1) | (ocp_data & 0x7));
+ if (data != 0xffff)
+ ocp_reg_write(tp, OCP_ADC_IOFFSET, data);
+
+ /* ups mode tx-link-pulse timing adjustment:
+ * rg_saw_cnt = OCP reg 0xC426 Bit[13:0]
+ * swr_cnt_1ms_ini = 16000000 / rg_saw_cnt
+ */
+ ocp_data = ocp_reg_read(tp, 0xc426);
+ ocp_data &= 0x3fff;
+ if (ocp_data) {
+ u32 swr_cnt_1ms_ini;
+
+ swr_cnt_1ms_ini = (16000000 / ocp_data) & SAW_CNT_1MS_MASK;
+ ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_UPS_CFG);
+ ocp_data = (ocp_data & ~SAW_CNT_1MS_MASK) | swr_cnt_1ms_ini;
+ ocp_write_word(tp, MCU_TYPE_USB, USB_UPS_CFG, ocp_data);
+ }
+
+ ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_PHY_PWR);
+ ocp_data |= PFM_PWM_SWITCH;
+ ocp_write_word(tp, MCU_TYPE_PLA, PLA_PHY_PWR, ocp_data);
+
+ /* Advnace EEE */
+ if (!r8153_patch_request(tp, true)) {
+ data = ocp_reg_read(tp, OCP_POWER_CFG);
+ data |= EEE_CLKDIV_EN;
+ ocp_reg_write(tp, OCP_POWER_CFG, data);
+
+ data = ocp_reg_read(tp, OCP_DOWN_SPEED);
+ data |= EN_EEE_CMODE | EN_EEE_1000 | EN_10M_CLKDIV;
+ ocp_reg_write(tp, OCP_DOWN_SPEED, data);
+
+ ocp_reg_write(tp, OCP_SYSCLK_CFG, 0);
+ ocp_reg_write(tp, OCP_SYSCLK_CFG, clk_div_expo(5));
+
+ ups_flags |= UPS_FLAGS_EN_10M_CKDIV | UPS_FLAGS_250M_CKDIV |
+ UPS_FLAGS_EN_EEE_CKDIV | UPS_FLAGS_EEE_CMOD_LV_EN |
+ UPS_FLAGS_EEE_PLLOFF_GIGA;
+
+ r8153_patch_request(tp, false);
+ }
+
+ r8153b_ups_flags_w1w0(tp, ups_flags, 0);
+
+ r8153b_eee_en(tp, true);
+ ocp_reg_write(tp, OCP_EEE_ADV, MDIO_EEE_1000T | MDIO_EEE_100TX);
+
+ r8153b_aldps_en(tp, true);
+ r8153b_enable_fc(tp);
+ r8153_u2p3en(tp, true);
+
set_bit(PHY_RESET, &tp->flags);
}
@@ -2865,6 +3400,7 @@
u32 ocp_data;
int i;
+ r8153_mac_clk_spd(tp, false);
rxdy_gated_en(tp, true);
r8153_teredo_off(tp);
@@ -2903,7 +3439,7 @@
rtl_rx_vlan_en(tp, tp->netdev->features & NETIF_F_HW_VLAN_CTAG_RX);
- ocp_data = tp->netdev->mtu + VLAN_ETH_HLEN + CRC_SIZE;
+ ocp_data = tp->netdev->mtu + VLAN_ETH_HLEN + ETH_FCS_LEN;
ocp_write_word(tp, MCU_TYPE_PLA, PLA_RMS, ocp_data);
ocp_write_byte(tp, MCU_TYPE_PLA, PLA_MTPS, MTPS_JUMBO);
@@ -2919,11 +3455,6 @@
ocp_write_word(tp, MCU_TYPE_PLA, PLA_RXFIFO_CTRL2, RXFIFO_THR3_NORMAL);
/* TX share fifo free credit full threshold */
ocp_write_dword(tp, MCU_TYPE_PLA, PLA_TXFIFO_CTRL, TXFIFO_THR_NORMAL2);
-
- /* rx aggregation */
- ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_USB_CTRL);
- ocp_data &= ~(RX_AGG_DISABLE | RX_ZERO_EN);
- ocp_write_word(tp, MCU_TYPE_USB, USB_USB_CTRL, ocp_data);
}
static void r8153_enter_oob(struct r8152 *tp)
@@ -2931,6 +3462,8 @@
u32 ocp_data;
int i;
+ r8153_mac_clk_spd(tp, true);
+
ocp_data = ocp_read_byte(tp, MCU_TYPE_PLA, PLA_OOB_CTRL);
ocp_data &= ~NOW_IS_OOB;
ocp_write_byte(tp, MCU_TYPE_PLA, PLA_OOB_CTRL, ocp_data);
@@ -2956,12 +3489,31 @@
usleep_range(1000, 2000);
}
- ocp_data = tp->netdev->mtu + VLAN_ETH_HLEN + CRC_SIZE;
+ ocp_data = tp->netdev->mtu + VLAN_ETH_HLEN + ETH_FCS_LEN;
ocp_write_word(tp, MCU_TYPE_PLA, PLA_RMS, ocp_data);
- ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_TEREDO_CFG);
- ocp_data &= ~TEREDO_WAKE_MASK;
- ocp_write_word(tp, MCU_TYPE_PLA, PLA_TEREDO_CFG, ocp_data);
+ switch (tp->version) {
+ case RTL_VER_03:
+ case RTL_VER_04:
+ case RTL_VER_05:
+ case RTL_VER_06:
+ ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_TEREDO_CFG);
+ ocp_data &= ~TEREDO_WAKE_MASK;
+ ocp_write_word(tp, MCU_TYPE_PLA, PLA_TEREDO_CFG, ocp_data);
+ break;
+
+ case RTL_VER_08:
+ case RTL_VER_09:
+ /* Clear teredo wake event. bit[15:8] is the teredo wakeup
+ * type. Set it to zero. bits[7:0] are the W1C bits about
+ * the events. Set them to all 1 to clear them.
+ */
+ ocp_write_word(tp, MCU_TYPE_PLA, PLA_TEREDO_WAKE_BASE, 0x00ff);
+ break;
+
+ default:
+ break;
+ }
rtl_rx_vlan_en(tp, true);
@@ -2986,12 +3538,20 @@
rtl_disable(tp);
rtl_reset_bmu(tp);
r8153_aldps_en(tp, true);
- usb_enable_lpm(tp->udev);
+}
+
+static void rtl8153b_disable(struct r8152 *tp)
+{
+ r8153b_aldps_en(tp, false);
+ rtl_disable(tp);
+ rtl_reset_bmu(tp);
+ r8153b_aldps_en(tp, true);
}
static int rtl8152_set_speed(struct r8152 *tp, u8 autoneg, u16 speed, u8 duplex)
{
u16 bmcr, anar, gbcr;
+ enum spd_duplex speed_duplex;
int ret = 0;
anar = r8152_mdio_read(tp, MII_ADVERTISE);
@@ -3008,32 +3568,43 @@
if (speed == SPEED_10) {
bmcr = 0;
anar |= ADVERTISE_10HALF | ADVERTISE_10FULL;
+ speed_duplex = FORCE_10M_HALF;
} else if (speed == SPEED_100) {
bmcr = BMCR_SPEED100;
anar |= ADVERTISE_100HALF | ADVERTISE_100FULL;
+ speed_duplex = FORCE_100M_HALF;
} else if (speed == SPEED_1000 && tp->mii.supports_gmii) {
bmcr = BMCR_SPEED1000;
gbcr |= ADVERTISE_1000FULL | ADVERTISE_1000HALF;
+ speed_duplex = NWAY_1000M_FULL;
} else {
ret = -EINVAL;
goto out;
}
- if (duplex == DUPLEX_FULL)
+ if (duplex == DUPLEX_FULL) {
bmcr |= BMCR_FULLDPLX;
+ if (speed != SPEED_1000)
+ speed_duplex++;
+ }
} else {
if (speed == SPEED_10) {
- if (duplex == DUPLEX_FULL)
+ if (duplex == DUPLEX_FULL) {
anar |= ADVERTISE_10HALF | ADVERTISE_10FULL;
- else
+ speed_duplex = NWAY_10M_FULL;
+ } else {
anar |= ADVERTISE_10HALF;
+ speed_duplex = NWAY_10M_HALF;
+ }
} else if (speed == SPEED_100) {
if (duplex == DUPLEX_FULL) {
anar |= ADVERTISE_10HALF | ADVERTISE_10FULL;
anar |= ADVERTISE_100HALF | ADVERTISE_100FULL;
+ speed_duplex = NWAY_100M_FULL;
} else {
anar |= ADVERTISE_10HALF;
anar |= ADVERTISE_100HALF;
+ speed_duplex = NWAY_100M_HALF;
}
} else if (speed == SPEED_1000 && tp->mii.supports_gmii) {
if (duplex == DUPLEX_FULL) {
@@ -3045,6 +3616,7 @@
anar |= ADVERTISE_100HALF;
gbcr |= ADVERTISE_1000HALF;
}
+ speed_duplex = NWAY_1000M_FULL;
} else {
ret = -EINVAL;
goto out;
@@ -3062,6 +3634,17 @@
r8152_mdio_write(tp, MII_ADVERTISE, anar);
r8152_mdio_write(tp, MII_BMCR, bmcr);
+ switch (tp->version) {
+ case RTL_VER_08:
+ case RTL_VER_09:
+ r8153b_ups_flags_w1w0(tp, ups_flags_speed(speed_duplex),
+ UPS_FLAGS_SPEED_MASK);
+ break;
+
+ default:
+ break;
+ }
+
if (bmcr & BMCR_RESET) {
int i;
@@ -3105,12 +3688,23 @@
return;
r8153_u1u2en(tp, false);
+ r8153_u2p3en(tp, false);
r8153_aldps_en(tp, false);
r8153_first_init(tp);
r8153_aldps_en(tp, true);
- r8153_u2p3en(tp, true);
+
+ switch (tp->version) {
+ case RTL_VER_03:
+ case RTL_VER_04:
+ break;
+ case RTL_VER_05:
+ case RTL_VER_06:
+ default:
+ r8153_u2p3en(tp, true);
+ break;
+ }
+
r8153_u1u2en(tp, true);
- usb_enable_lpm(tp->udev);
}
static void rtl8153_down(struct r8152 *tp)
@@ -3128,6 +3722,38 @@
r8153_aldps_en(tp, true);
}
+static void rtl8153b_up(struct r8152 *tp)
+{
+ if (test_bit(RTL8152_UNPLUG, &tp->flags))
+ return;
+
+ r8153b_u1u2en(tp, false);
+ r8153_u2p3en(tp, false);
+ r8153b_aldps_en(tp, false);
+
+ r8153_first_init(tp);
+ ocp_write_dword(tp, MCU_TYPE_USB, USB_RX_BUF_TH, RX_THR_B);
+
+ r8153b_aldps_en(tp, true);
+ r8153_u2p3en(tp, true);
+ r8153b_u1u2en(tp, true);
+}
+
+static void rtl8153b_down(struct r8152 *tp)
+{
+ if (test_bit(RTL8152_UNPLUG, &tp->flags)) {
+ rtl_drop_queued_tx(tp);
+ return;
+ }
+
+ r8153b_u1u2en(tp, false);
+ r8153_u2p3en(tp, false);
+ r8153b_power_cut_en(tp, false);
+ r8153b_aldps_en(tp, false);
+ r8153_enter_oob(tp);
+ r8153b_aldps_en(tp, true);
+}
+
static bool rtl8152_in_nway(struct r8152 *tp)
{
u16 nway_state;
@@ -3426,12 +4052,7 @@
msleep(20);
}
- for (i = 0; i < 500; i++) {
- ocp_data = ocp_reg_read(tp, OCP_PHY_STATUS) & PHY_STAT_MASK;
- if (ocp_data == PHY_STAT_LAN_ON || ocp_data == PHY_STAT_PWRDN)
- break;
- msleep(20);
- }
+ data = r8153_phy_status(tp, 0);
if (tp->version == RTL_VER_03 || tp->version == RTL_VER_04 ||
tp->version == RTL_VER_05)
@@ -3443,14 +4064,8 @@
r8152_mdio_write(tp, MII_BMCR, data);
}
- for (i = 0; i < 500; i++) {
- ocp_data = ocp_reg_read(tp, OCP_PHY_STATUS) & PHY_STAT_MASK;
- if (ocp_data == PHY_STAT_LAN_ON)
- break;
- msleep(20);
- }
+ data = r8153_phy_status(tp, PHY_STAT_LAN_ON);
- usb_disable_lpm(tp->udev);
r8153_u2p3en(tp, false);
if (tp->version == RTL_VER_04) {
@@ -3510,15 +4125,88 @@
r8153_power_cut_en(tp, false);
r8153_u1u2en(tp, true);
+ r8153_mac_clk_spd(tp, false);
+ usb_enable_lpm(tp->udev);
- /* MAC clock speed down */
- ocp_write_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL, 0);
- ocp_write_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL2, 0);
- ocp_write_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL3, 0);
- ocp_write_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL4, 0);
+ /* rx aggregation */
+ ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_USB_CTRL);
+ ocp_data &= ~(RX_AGG_DISABLE | RX_ZERO_EN);
+ ocp_write_word(tp, MCU_TYPE_USB, USB_USB_CTRL, ocp_data);
rtl_tally_reset(tp);
- r8153_u2p3en(tp, true);
+
+ switch (tp->udev->speed) {
+ case USB_SPEED_SUPER:
+ case USB_SPEED_SUPER_PLUS:
+ tp->coalesce = COALESCE_SUPER;
+ break;
+ case USB_SPEED_HIGH:
+ tp->coalesce = COALESCE_HIGH;
+ break;
+ default:
+ tp->coalesce = COALESCE_SLOW;
+ break;
+ }
+}
+
+static void r8153b_init(struct r8152 *tp)
+{
+ u32 ocp_data;
+ u16 data;
+ int i;
+
+ if (test_bit(RTL8152_UNPLUG, &tp->flags))
+ return;
+
+ r8153b_u1u2en(tp, false);
+
+ for (i = 0; i < 500; i++) {
+ if (ocp_read_word(tp, MCU_TYPE_PLA, PLA_BOOT_CTRL) &
+ AUTOLOAD_DONE)
+ break;
+ msleep(20);
+ }
+
+ data = r8153_phy_status(tp, 0);
+
+ data = r8152_mdio_read(tp, MII_BMCR);
+ if (data & BMCR_PDOWN) {
+ data &= ~BMCR_PDOWN;
+ r8152_mdio_write(tp, MII_BMCR, data);
+ }
+
+ data = r8153_phy_status(tp, PHY_STAT_LAN_ON);
+
+ r8153_u2p3en(tp, false);
+
+ /* MSC timer = 0xfff * 8ms = 32760 ms */
+ ocp_write_word(tp, MCU_TYPE_USB, USB_MSC_TIMER, 0x0fff);
+
+ /* U1/U2/L1 idle timer. 500 us */
+ ocp_write_word(tp, MCU_TYPE_USB, USB_U1U2_TIMER, 500);
+
+ r8153b_power_cut_en(tp, false);
+ r8153b_ups_en(tp, false);
+ r8153b_queue_wake(tp, false);
+ rtl_runtime_suspend_enable(tp, false);
+ r8153b_u1u2en(tp, true);
+ usb_enable_lpm(tp->udev);
+
+ /* MAC clock speed down */
+ ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL2);
+ ocp_data |= MAC_CLK_SPDWN_EN;
+ ocp_write_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL2, ocp_data);
+
+ set_bit(GREEN_ETHERNET, &tp->flags);
+
+ /* rx aggregation */
+ ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_USB_CTRL);
+ ocp_data &= ~(RX_AGG_DISABLE | RX_ZERO_EN);
+ ocp_write_word(tp, MCU_TYPE_USB, USB_USB_CTRL, ocp_data);
+
+ rtl_tally_reset(tp);
+
+ tp->coalesce = 15000; /* 15 us */
}
static int rtl8152_pre_reset(struct usb_interface *intf)
@@ -3601,6 +4289,61 @@
return false;
}
+static int rtl8152_runtime_resume(struct r8152 *tp)
+{
+ struct net_device *netdev = tp->netdev;
+
+ if (netif_running(netdev) && netdev->flags & IFF_UP) {
+ struct napi_struct *napi = &tp->napi;
+
+ tp->rtl_ops.autosuspend_en(tp, false);
+ napi_disable(napi);
+ set_bit(WORK_ENABLE, &tp->flags);
+
+ if (netif_carrier_ok(netdev)) {
+ if (rtl8152_get_speed(tp) & LINK_STATUS) {
+ rtl_start_rx(tp);
+ } else {
+ netif_carrier_off(netdev);
+ tp->rtl_ops.disable(tp);
+ netif_info(tp, link, netdev, "linking down\n");
+ }
+ }
+
+ napi_enable(napi);
+ clear_bit(SELECTIVE_SUSPEND, &tp->flags);
+ smp_mb__after_atomic();
+
+ if (!list_empty(&tp->rx_done))
+ napi_schedule(&tp->napi);
+
+ usb_submit_urb(tp->intr_urb, GFP_NOIO);
+ } else {
+ if (netdev->flags & IFF_UP)
+ tp->rtl_ops.autosuspend_en(tp, false);
+
+ clear_bit(SELECTIVE_SUSPEND, &tp->flags);
+ }
+
+ return 0;
+}
+
+static int rtl8152_system_resume(struct r8152 *tp)
+{
+ struct net_device *netdev = tp->netdev;
+
+ netif_device_attach(netdev);
+
+ if (netif_running(netdev) && netdev->flags & IFF_UP) {
+ tp->rtl_ops.up(tp);
+ netif_carrier_off(netdev);
+ set_bit(WORK_ENABLE, &tp->flags);
+ usb_submit_urb(tp->intr_urb, GFP_NOIO);
+ }
+
+ return 0;
+}
+
static int rtl8152_runtime_suspend(struct r8152 *tp)
{
struct net_device *netdev = tp->netdev;
@@ -3612,13 +4355,6 @@
if (netif_running(netdev) && test_bit(WORK_ENABLE, &tp->flags)) {
u32 rcr = 0;
- if (delay_autosuspend(tp)) {
- clear_bit(SELECTIVE_SUSPEND, &tp->flags);
- smp_mb__after_atomic();
- ret = -EBUSY;
- goto out1;
- }
-
if (netif_carrier_ok(netdev)) {
u32 ocp_data;
@@ -3652,6 +4388,11 @@
ocp_write_dword(tp, MCU_TYPE_PLA, PLA_RCR, rcr);
napi_enable(napi);
}
+
+ if (delay_autosuspend(tp)) {
+ rtl8152_runtime_resume(tp);
+ ret = -EBUSY;
+ }
}
out1:
@@ -3699,53 +4440,18 @@
static int rtl8152_resume(struct usb_interface *intf)
{
struct r8152 *tp = usb_get_intfdata(intf);
- struct net_device *netdev = tp->netdev;
+ int ret;
mutex_lock(&tp->control);
- if (!test_bit(SELECTIVE_SUSPEND, &tp->flags)) {
- tp->rtl_ops.init(tp);
- queue_delayed_work(system_long_wq, &tp->hw_phy_work, 0);
- netif_device_attach(netdev);
- }
-
- if (netif_running(netdev) && netdev->flags & IFF_UP) {
- if (test_bit(SELECTIVE_SUSPEND, &tp->flags)) {
- struct napi_struct *napi = &tp->napi;
-
- tp->rtl_ops.autosuspend_en(tp, false);
- napi_disable(napi);
- set_bit(WORK_ENABLE, &tp->flags);
- if (netif_carrier_ok(netdev)) {
- if (rtl8152_get_speed(tp) & LINK_STATUS) {
- rtl_start_rx(tp);
- } else {
- netif_carrier_off(netdev);
- tp->rtl_ops.disable(tp);
- netif_info(tp, link, netdev,
- "linking down\n");
- }
- }
- napi_enable(napi);
- clear_bit(SELECTIVE_SUSPEND, &tp->flags);
- smp_mb__after_atomic();
- if (!list_empty(&tp->rx_done))
- napi_schedule(&tp->napi);
- } else {
- tp->rtl_ops.up(tp);
- netif_carrier_off(netdev);
- set_bit(WORK_ENABLE, &tp->flags);
- }
- usb_submit_urb(tp->intr_urb, GFP_KERNEL);
- } else if (test_bit(SELECTIVE_SUSPEND, &tp->flags)) {
- if (netdev->flags & IFF_UP)
- tp->rtl_ops.autosuspend_en(tp, false);
- clear_bit(SELECTIVE_SUSPEND, &tp->flags);
- }
+ if (test_bit(SELECTIVE_SUSPEND, &tp->flags))
+ ret = rtl8152_runtime_resume(tp);
+ else
+ ret = rtl8152_system_resume(tp);
mutex_unlock(&tp->control);
- return 0;
+ return ret;
}
static int rtl8152_reset_resume(struct usb_interface *intf)
@@ -3753,6 +4459,10 @@
struct r8152 *tp = usb_get_intfdata(intf);
clear_bit(SELECTIVE_SUSPEND, &tp->flags);
+ mutex_lock(&tp->control);
+ tp->rtl_ops.init(tp);
+ queue_delayed_work(system_long_wq, &tp->hw_phy_work, 0);
+ mutex_unlock(&tp->control);
return rtl8152_resume(intf);
}
@@ -3841,7 +4551,7 @@
mutex_lock(&tp->control);
- ret = mii_ethtool_get_link_ksettings(&tp->mii, cmd);
+ mii_ethtool_get_link_ksettings(&tp->mii, cmd);
mutex_unlock(&tp->control);
@@ -4022,6 +4732,20 @@
return 0;
}
+static int r8153b_set_eee(struct r8152 *tp, struct ethtool_eee *eee)
+{
+ u16 val = ethtool_adv_to_mmd_eee_adv_t(eee->advertised);
+
+ r8153b_eee_en(tp, eee->eee_enabled);
+
+ if (!eee->eee_enabled)
+ val = 0;
+
+ ocp_reg_write(tp, OCP_EEE_ADV, val);
+
+ return 0;
+}
+
static int
rtl_ethtool_get_eee(struct net_device *net, struct ethtool_eee *edata)
{
@@ -4097,6 +4821,7 @@
switch (tp->version) {
case RTL_VER_01:
case RTL_VER_02:
+ case RTL_VER_07:
return -EOPNOTSUPP;
default:
break;
@@ -4116,6 +4841,7 @@
switch (tp->version) {
case RTL_VER_01:
case RTL_VER_02:
+ case RTL_VER_07:
return -EOPNOTSUPP;
default:
break;
@@ -4215,6 +4941,7 @@
switch (tp->version) {
case RTL_VER_01:
case RTL_VER_02:
+ case RTL_VER_07:
dev->mtu = new_mtu;
return 0;
default:
@@ -4230,7 +4957,7 @@
dev->mtu = new_mtu;
if (netif_running(dev)) {
- u32 rms = new_mtu + VLAN_ETH_HLEN + CRC_SIZE;
+ u32 rms = new_mtu + VLAN_ETH_HLEN + ETH_FCS_LEN;
ocp_write_word(tp, MCU_TYPE_PLA, PLA_RMS, rms);
@@ -4276,6 +5003,14 @@
r8153_power_cut_en(tp, false);
}
+static void rtl8153b_unload(struct r8152 *tp)
+{
+ if (test_bit(RTL8152_UNPLUG, &tp->flags))
+ return;
+
+ r8153b_power_cut_en(tp, false);
+}
+
static int rtl_ops_init(struct r8152 *tp)
{
struct rtl_ops *ops = &tp->rtl_ops;
@@ -4284,6 +5019,7 @@
switch (tp->version) {
case RTL_VER_01:
case RTL_VER_02:
+ case RTL_VER_07:
ops->init = r8152b_init;
ops->enable = rtl8152_enable;
ops->disable = rtl8152_disable;
@@ -4314,6 +5050,21 @@
ops->autosuspend_en = rtl8153_runtime_enable;
break;
+ case RTL_VER_08:
+ case RTL_VER_09:
+ ops->init = r8153b_init;
+ ops->enable = rtl8153_enable;
+ ops->disable = rtl8153b_disable;
+ ops->up = rtl8153b_up;
+ ops->down = rtl8153b_down;
+ ops->unload = rtl8153b_unload;
+ ops->eee_get = r8153_get_eee;
+ ops->eee_set = r8153b_set_eee;
+ ops->in_nway = rtl8153_in_nway;
+ ops->hw_phy_cfg = r8153b_hw_phy_cfg;
+ ops->autosuspend_en = rtl8153b_runtime_enable;
+ break;
+
default:
ret = -ENODEV;
netif_err(tp, probe, tp->netdev, "Unknown Device\n");
@@ -4362,6 +5113,15 @@
case 0x5c30:
version = RTL_VER_06;
break;
+ case 0x4800:
+ version = RTL_VER_07;
+ break;
+ case 0x6000:
+ version = RTL_VER_08;
+ break;
+ case 0x6010:
+ version = RTL_VER_09;
+ break;
default:
version = RTL_VER_UNKNOWN;
dev_info(&intf->dev, "Unknown version 0x%04x\n", ocp_data);
@@ -4409,6 +5169,7 @@
switch (version) {
case RTL_VER_01:
case RTL_VER_02:
+ case RTL_VER_07:
tp->mii.supports_gmii = 0;
break;
default:
@@ -4466,19 +5227,6 @@
tp->mii.reg_num_mask = 0x1f;
tp->mii.phy_id = R8152_PHY_ID;
- switch (udev->speed) {
- case USB_SPEED_SUPER:
- case USB_SPEED_SUPER_PLUS:
- tp->coalesce = COALESCE_SUPER;
- break;
- case USB_SPEED_HIGH:
- tp->coalesce = COALESCE_HIGH;
- break;
- default:
- tp->coalesce = COALESCE_SLOW;
- break;
- }
-
tp->autoneg = AUTONEG_ENABLE;
tp->speed = tp->mii.supports_gmii ? SPEED_1000 : SPEED_100;
tp->duplex = DUPLEX_FULL;
@@ -4556,6 +5304,7 @@
/* table of devices that work with this driver */
static struct usb_device_id rtl8152_table[] = {
+ {REALTEK_USB_DEVICE(VENDOR_ID_REALTEK, 0x8050)},
{REALTEK_USB_DEVICE(VENDOR_ID_REALTEK, 0x8152)},
{REALTEK_USB_DEVICE(VENDOR_ID_REALTEK, 0x8153)},
{REALTEK_USB_DEVICE(VENDOR_ID_MICROSOFT, 0x07ab)},
diff --git a/drivers/net/usb/rndis_host.c b/drivers/net/usb/rndis_host.c
index e96e2e5..a151f26 100644
--- a/drivers/net/usb/rndis_host.c
+++ b/drivers/net/usb/rndis_host.c
@@ -578,7 +578,7 @@
* packets; Linux minimizes wasted bandwidth through tx queues.
*/
fill:
- hdr = (void *) __skb_push(skb, sizeof *hdr);
+ hdr = __skb_push(skb, sizeof *hdr);
memset(hdr, 0, sizeof *hdr);
hdr->msg_type = cpu_to_le32(RNDIS_MSG_PACKET);
hdr->msg_len = cpu_to_le32(skb->len);
diff --git a/drivers/net/usb/usbnet.c b/drivers/net/usb/usbnet.c
index 79048e7..6510e5c 100644
--- a/drivers/net/usb/usbnet.c
+++ b/drivers/net/usb/usbnet.c
@@ -956,7 +956,9 @@
if (!dev->mii.mdio_read)
return -EOPNOTSUPP;
- return mii_ethtool_get_link_ksettings(&dev->mii, cmd);
+ mii_ethtool_get_link_ksettings(&dev->mii, cmd);
+
+ return 0;
}
EXPORT_SYMBOL_GPL(usbnet_get_link_ksettings);
diff --git a/drivers/net/usb/zaurus.c b/drivers/net/usb/zaurus.c
index 6aaa6eb..9c2196c 100644
--- a/drivers/net/usb/zaurus.c
+++ b/drivers/net/usb/zaurus.c
@@ -74,10 +74,10 @@
fcs = crc32_le(~0, skb->data, skb->len);
fcs = ~fcs;
- *skb_put (skb, 1) = fcs & 0xff;
- *skb_put (skb, 1) = (fcs>> 8) & 0xff;
- *skb_put (skb, 1) = (fcs>>16) & 0xff;
- *skb_put (skb, 1) = (fcs>>24) & 0xff;
+ skb_put_u8(skb, fcs & 0xff);
+ skb_put_u8(skb, (fcs >> 8) & 0xff);
+ skb_put_u8(skb, (fcs >> 16) & 0xff);
+ skb_put_u8(skb, (fcs >> 24) & 0xff);
}
return skb;
}
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index a871f45..5c6388f 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -305,7 +305,7 @@
copy = len;
if (copy > skb_tailroom(skb))
copy = skb_tailroom(skb);
- memcpy(skb_put(skb, copy), p, copy);
+ skb_put_data(skb, p, copy);
len -= copy;
offset += copy;
@@ -1150,7 +1150,7 @@
struct virtio_net_hdr_mrg_rxbuf *hdr;
const unsigned char *dest = ((struct ethhdr *)skb->data)->h_dest;
struct virtnet_info *vi = sq->vq->vdev->priv;
- unsigned num_sg;
+ int num_sg;
unsigned hdr_len = vi->hdr_len;
bool can_push;
@@ -1177,11 +1177,16 @@
if (can_push) {
__skb_push(skb, hdr_len);
num_sg = skb_to_sgvec(skb, sq->sg, 0, skb->len);
+ if (unlikely(num_sg < 0))
+ return num_sg;
/* Pull header back to avoid skew in tx bytes calculations. */
__skb_pull(skb, hdr_len);
} else {
sg_set_buf(sq->sg, hdr, hdr_len);
- num_sg = skb_to_sgvec(skb, sq->sg + 1, 0, skb->len) + 1;
+ num_sg = skb_to_sgvec(skb, sq->sg + 1, 0, skb->len);
+ if (unlikely(num_sg < 0))
+ return num_sg;
+ num_sg++;
}
return virtqueue_add_outbuf(sq->vq, sq->sg, num_sg, skb, GFP_ATOMIC);
}
@@ -1950,16 +1955,18 @@
return err;
}
-static bool virtnet_xdp_query(struct net_device *dev)
+static u32 virtnet_xdp_query(struct net_device *dev)
{
struct virtnet_info *vi = netdev_priv(dev);
+ const struct bpf_prog *xdp_prog;
int i;
for (i = 0; i < vi->max_queue_pairs; i++) {
- if (vi->rq[i].xdp_prog)
- return true;
+ xdp_prog = rtnl_dereference(vi->rq[i].xdp_prog);
+ if (xdp_prog)
+ return xdp_prog->aux->id;
}
- return false;
+ return 0;
}
static int virtnet_xdp(struct net_device *dev, struct netdev_xdp *xdp)
@@ -1968,7 +1975,8 @@
case XDP_SETUP_PROG:
return virtnet_xdp_set(dev, xdp->prog, xdp->extack);
case XDP_QUERY_PROG:
- xdp->prog_attached = virtnet_xdp_query(dev);
+ xdp->prog_id = virtnet_xdp_query(dev);
+ xdp->prog_attached = !!xdp->prog_id;
return 0;
default:
return -EINVAL;
diff --git a/drivers/net/vrf.c b/drivers/net/vrf.c
index 022c0b5..997ef25 100644
--- a/drivers/net/vrf.c
+++ b/drivers/net/vrf.c
@@ -383,7 +383,7 @@
if (!list_empty(&vrf_dev->ptype_all) &&
likely(skb_headroom(skb) >= ETH_HLEN)) {
- struct ethhdr *eth = (struct ethhdr *)skb_push(skb, ETH_HLEN);
+ struct ethhdr *eth = skb_push(skb, ETH_HLEN);
ether_addr_copy(eth->h_source, vrf_dev->dev_addr);
eth_zero_addr(eth->h_dest);
@@ -563,7 +563,7 @@
static int vrf_rt6_create(struct net_device *dev)
{
- int flags = DST_HOST | DST_NOPOLICY | DST_NOXFRM | DST_NOCACHE;
+ int flags = DST_HOST | DST_NOPOLICY | DST_NOXFRM;
struct net_vrf *vrf = netdev_priv(dev);
struct net *net = dev_net(dev);
struct fib6_table *rt6i_table;
@@ -583,8 +583,6 @@
if (!rt6)
goto out;
- dst_hold(&rt6->dst);
-
rt6->rt6i_table = rt6i_table;
rt6->dst.output = vrf_output6;
@@ -597,8 +595,6 @@
goto out;
}
- dst_hold(&rt6_local->dst);
-
rt6_local->rt6i_idev = in6_dev_get(dev);
rt6_local->rt6i_flags = RTF_UP | RTF_NONEXTHOP | RTF_LOCAL;
rt6_local->rt6i_table = rt6i_table;
diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
index 5fa798a..653b2bb 100644
--- a/drivers/net/vxlan.c
+++ b/drivers/net/vxlan.c
@@ -226,7 +226,8 @@
return NULL;
}
-static struct vxlan_dev *vxlan_vs_find_vni(struct vxlan_sock *vs, __be32 vni)
+static struct vxlan_dev *vxlan_vs_find_vni(struct vxlan_sock *vs, int ifindex,
+ __be32 vni)
{
struct vxlan_dev *vxlan;
@@ -235,17 +236,27 @@
vni = 0;
hlist_for_each_entry_rcu(vxlan, vni_head(vs, vni), hlist) {
- if (vxlan->default_dst.remote_vni == vni)
- return vxlan;
+ if (vxlan->default_dst.remote_vni != vni)
+ continue;
+
+ if (IS_ENABLED(CONFIG_IPV6)) {
+ const struct vxlan_config *cfg = &vxlan->cfg;
+
+ if ((cfg->flags & VXLAN_F_IPV6_LINKLOCAL) &&
+ cfg->remote_ifindex != ifindex)
+ continue;
+ }
+
+ return vxlan;
}
return NULL;
}
/* Look up VNI in a per net namespace table */
-static struct vxlan_dev *vxlan_find_vni(struct net *net, __be32 vni,
- sa_family_t family, __be16 port,
- u32 flags)
+static struct vxlan_dev *vxlan_find_vni(struct net *net, int ifindex,
+ __be32 vni, sa_family_t family,
+ __be16 port, u32 flags)
{
struct vxlan_sock *vs;
@@ -253,7 +264,7 @@
if (!vs)
return NULL;
- return vxlan_vs_find_vni(vs, vni);
+ return vxlan_vs_find_vni(vs, ifindex, vni);
}
/* Fill in neighbour message in skbuff. */
@@ -305,7 +316,7 @@
if (rdst->remote_vni != vxlan->default_dst.remote_vni &&
nla_put_u32(skb, NDA_VNI, be32_to_cpu(rdst->remote_vni)))
goto nla_put_failure;
- if ((vxlan->flags & VXLAN_F_COLLECT_METADATA) && fdb->vni &&
+ if ((vxlan->cfg.flags & VXLAN_F_COLLECT_METADATA) && fdb->vni &&
nla_put_u32(skb, NDA_SRC_VNI,
be32_to_cpu(fdb->vni)))
goto nla_put_failure;
@@ -419,7 +430,7 @@
static inline struct hlist_head *vxlan_fdb_head(struct vxlan_dev *vxlan,
const u8 *mac, __be32 vni)
{
- if (vxlan->flags & VXLAN_F_COLLECT_METADATA)
+ if (vxlan->cfg.flags & VXLAN_F_COLLECT_METADATA)
return &vxlan->fdb_head[eth_vni_hash(mac, vni)];
else
return &vxlan->fdb_head[eth_hash(mac)];
@@ -434,7 +445,7 @@
hlist_for_each_entry_rcu(f, head, hlist) {
if (ether_addr_equal(mac, f->eth_addr)) {
- if (vxlan->flags & VXLAN_F_COLLECT_METADATA) {
+ if (vxlan->cfg.flags & VXLAN_F_COLLECT_METADATA) {
if (vni == f->vni)
return f;
} else {
@@ -957,20 +968,28 @@
*/
static bool vxlan_snoop(struct net_device *dev,
union vxlan_addr *src_ip, const u8 *src_mac,
- __be32 vni)
+ u32 src_ifindex, __be32 vni)
{
struct vxlan_dev *vxlan = netdev_priv(dev);
struct vxlan_fdb *f;
+ u32 ifindex = 0;
+
+#if IS_ENABLED(CONFIG_IPV6)
+ if (src_ip->sa.sa_family == AF_INET6 &&
+ (ipv6_addr_type(&src_ip->sin6.sin6_addr) & IPV6_ADDR_LINKLOCAL))
+ ifindex = src_ifindex;
+#endif
f = vxlan_find_mac(vxlan, src_mac, vni);
if (likely(f)) {
struct vxlan_rdst *rdst = first_remote_rcu(f);
- if (likely(vxlan_addr_equal(&rdst->remote_ip, src_ip)))
+ if (likely(vxlan_addr_equal(&rdst->remote_ip, src_ip) &&
+ rdst->remote_ifindex == ifindex))
return false;
/* Don't migrate static entries, drop packets */
- if (f->state & NUD_NOARP)
+ if (f->state & (NUD_PERMANENT | NUD_NOARP))
return true;
if (net_ratelimit())
@@ -993,7 +1012,7 @@
vxlan->cfg.dst_port,
vni,
vxlan->default_dst.remote_vni,
- 0, NTF_SELF);
+ ifindex, NTF_SELF);
spin_unlock(&vxlan->hash_lock);
}
@@ -1077,10 +1096,10 @@
#if IS_ENABLED(CONFIG_IPV6)
struct vxlan_sock *sock6 = rtnl_dereference(vxlan->vn6_sock);
- rcu_assign_pointer(vxlan->vn6_sock, NULL);
+ RCU_INIT_POINTER(vxlan->vn6_sock, NULL);
#endif
- rcu_assign_pointer(vxlan->vn4_sock, NULL);
+ RCU_INIT_POINTER(vxlan->vn4_sock, NULL);
synchronize_net();
vxlan_vs_del_dev(vxlan);
@@ -1264,6 +1283,7 @@
struct sk_buff *skb, __be32 vni)
{
union vxlan_addr saddr;
+ u32 ifindex = skb->dev->ifindex;
skb_reset_mac_header(skb);
skb->protocol = eth_type_trans(skb, vxlan->dev);
@@ -1284,8 +1304,8 @@
#endif
}
- if ((vxlan->flags & VXLAN_F_LEARN) &&
- vxlan_snoop(skb->dev, &saddr, eth_hdr(skb)->h_source, vni))
+ if ((vxlan->cfg.flags & VXLAN_F_LEARN) &&
+ vxlan_snoop(skb->dev, &saddr, eth_hdr(skb)->h_source, ifindex, vni))
return false;
return true;
@@ -1351,7 +1371,7 @@
vni = vxlan_vni(vxlan_hdr(skb)->vx_vni);
- vxlan = vxlan_vs_find_vni(vs, vni);
+ vxlan = vxlan_vs_find_vni(vs, skb->dev->ifindex, vni);
if (!vxlan)
goto drop;
@@ -1507,7 +1527,7 @@
if (netif_rx_ni(reply) == NET_RX_DROP)
dev->stats.rx_dropped++;
- } else if (vxlan->flags & VXLAN_F_L3MISS) {
+ } else if (vxlan->cfg.flags & VXLAN_F_L3MISS) {
union vxlan_addr ipa = {
.sin.sin_addr.s_addr = tip,
.sin.sin_family = AF_INET,
@@ -1584,10 +1604,8 @@
skb_pull(reply, sizeof(struct ipv6hdr));
skb_reset_transport_header(reply);
- na = (struct nd_msg *)skb_put(reply, sizeof(*na) + na_olen);
-
/* Neighbor Advertisement */
- memset(na, 0, sizeof(*na)+na_olen);
+ na = skb_put_zero(reply, sizeof(*na) + na_olen);
na->icmph.icmp6_type = NDISC_NEIGHBOUR_ADVERTISEMENT;
na->icmph.icmp6_router = isrouter;
na->icmph.icmp6_override = 1;
@@ -1667,7 +1685,7 @@
if (netif_rx_ni(reply) == NET_RX_DROP)
dev->stats.rx_dropped++;
- } else if (vxlan->flags & VXLAN_F_L3MISS) {
+ } else if (vxlan->cfg.flags & VXLAN_F_L3MISS) {
union vxlan_addr ipa = {
.sin6.sin6_addr = msg->target,
.sin6.sin6_family = AF_INET6,
@@ -1700,7 +1718,7 @@
return false;
pip = ip_hdr(skb);
n = neigh_lookup(&arp_tbl, &pip->daddr, dev);
- if (!n && (vxlan->flags & VXLAN_F_L3MISS)) {
+ if (!n && (vxlan->cfg.flags & VXLAN_F_L3MISS)) {
union vxlan_addr ipa = {
.sin.sin_addr.s_addr = pip->daddr,
.sin.sin_family = AF_INET,
@@ -1721,7 +1739,7 @@
return false;
pip6 = ipv6_hdr(skb);
n = neigh_lookup(ipv6_stub->nd_tbl, &pip6->daddr, dev);
- if (!n && (vxlan->flags & VXLAN_F_L3MISS)) {
+ if (!n && (vxlan->cfg.flags & VXLAN_F_L3MISS)) {
union vxlan_addr ipa = {
.sin6.sin6_addr = pip6->daddr,
.sin6.sin6_family = AF_INET6,
@@ -1829,7 +1847,7 @@
if (err)
return err;
- vxh = (struct vxlanhdr *) __skb_push(skb, sizeof(*vxh));
+ vxh = __skb_push(skb, sizeof(*vxh));
vxh->vx_flags = VXLAN_HF_VNI;
vxh->vx_vni = vxlan_vni_field(vni);
@@ -1995,8 +2013,9 @@
#endif
}
- if (dst_vxlan->flags & VXLAN_F_LEARN)
- vxlan_snoop(skb->dev, &loopback, eth_hdr(skb)->h_source, vni);
+ if (dst_vxlan->cfg.flags & VXLAN_F_LEARN)
+ vxlan_snoop(skb->dev, &loopback, eth_hdr(skb)->h_source, 0,
+ vni);
u64_stats_update_begin(&tx_stats->syncp);
tx_stats->tx_packets++;
@@ -2014,8 +2033,10 @@
}
static int encap_bypass_if_local(struct sk_buff *skb, struct net_device *dev,
- struct vxlan_dev *vxlan, union vxlan_addr *daddr,
- __be16 dst_port, __be32 vni, struct dst_entry *dst,
+ struct vxlan_dev *vxlan,
+ union vxlan_addr *daddr,
+ __be16 dst_port, int dst_ifindex, __be32 vni,
+ struct dst_entry *dst,
u32 rt_flags)
{
#if IS_ENABLED(CONFIG_IPV6)
@@ -2031,9 +2052,9 @@
struct vxlan_dev *dst_vxlan;
dst_release(dst);
- dst_vxlan = vxlan_find_vni(vxlan->net, vni,
+ dst_vxlan = vxlan_find_vni(vxlan->net, dst_ifindex, vni,
daddr->sa.sa_family, dst_port,
- vxlan->flags);
+ vxlan->cfg.flags);
if (!dst_vxlan) {
dev->stats.tx_errors++;
kfree_skb(skb);
@@ -2063,8 +2084,9 @@
struct dst_entry *ndst = NULL;
__be32 vni, label;
__u8 tos, ttl;
+ int ifindex;
int err;
- u32 flags = vxlan->flags;
+ u32 flags = vxlan->cfg.flags;
bool udp_sum = false;
bool xnet = !net_eq(vxlan->net, dev_net(vxlan->dev));
@@ -2083,6 +2105,7 @@
dst_port = rdst->remote_port ? rdst->remote_port : vxlan->cfg.dst_port;
vni = (rdst->remote_vni) ? : default_vni;
+ ifindex = rdst->remote_ifindex;
local_ip = vxlan->cfg.saddr;
dst_cache = &rdst->dst_cache;
md->gbp = skb->mark;
@@ -2116,6 +2139,7 @@
dst = &remote_ip;
dst_port = info->key.tp_dst ? : vxlan->cfg.dst_port;
vni = tunnel_id_to_key32(info->key.tun_id);
+ ifindex = 0;
dst_cache = &info->dst_cache;
if (info->options_len)
md = ip_tunnel_info_opts(info);
@@ -2133,8 +2157,7 @@
struct rtable *rt;
__be16 df = 0;
- rt = vxlan_get_route(vxlan, dev, sock4, skb,
- rdst ? rdst->remote_ifindex : 0, tos,
+ rt = vxlan_get_route(vxlan, dev, sock4, skb, ifindex, tos,
dst->sin.sin_addr.s_addr,
&local_ip.sin.sin_addr.s_addr,
dst_port, src_port,
@@ -2147,8 +2170,8 @@
/* Bypass encapsulation if the destination is local */
if (!info) {
err = encap_bypass_if_local(skb, dev, vxlan, dst,
- dst_port, vni, &rt->dst,
- rt->rt_flags);
+ dst_port, ifindex, vni,
+ &rt->dst, rt->rt_flags);
if (err)
goto out_unlock;
} else if (info->key.tun_flags & TUNNEL_DONT_FRAGMENT) {
@@ -2170,8 +2193,7 @@
} else {
struct vxlan_sock *sock6 = rcu_dereference(vxlan->vn6_sock);
- ndst = vxlan6_get_route(vxlan, dev, sock6, skb,
- rdst ? rdst->remote_ifindex : 0, tos,
+ ndst = vxlan6_get_route(vxlan, dev, sock6, skb, ifindex, tos,
label, &dst->sin6.sin6_addr,
&local_ip.sin6.sin6_addr,
dst_port, src_port,
@@ -2186,8 +2208,8 @@
u32 rt6i_flags = ((struct rt6_info *)ndst)->rt6i_flags;
err = encap_bypass_if_local(skb, dev, vxlan, dst,
- dst_port, vni, ndst,
- rt6i_flags);
+ dst_port, ifindex, vni,
+ ndst, rt6i_flags);
if (err)
goto out_unlock;
}
@@ -2246,7 +2268,7 @@
skb_reset_mac_header(skb);
- if (vxlan->flags & VXLAN_F_COLLECT_METADATA) {
+ if (vxlan->cfg.flags & VXLAN_F_COLLECT_METADATA) {
if (info && info->mode & IP_TUNNEL_INFO_BRIDGE &&
info->mode & IP_TUNNEL_INFO_TX) {
vni = tunnel_id_to_key32(info->key.tun_id);
@@ -2259,7 +2281,7 @@
}
}
- if (vxlan->flags & VXLAN_F_PROXY) {
+ if (vxlan->cfg.flags & VXLAN_F_PROXY) {
eth = eth_hdr(skb);
if (ntohs(eth->h_proto) == ETH_P_ARP)
return arp_reduce(dev, skb, vni);
@@ -2279,7 +2301,7 @@
f = vxlan_find_mac(vxlan, eth->h_dest, vni);
did_rsc = false;
- if (f && (f->flags & NTF_ROUTER) && (vxlan->flags & VXLAN_F_RSC) &&
+ if (f && (f->flags & NTF_ROUTER) && (vxlan->cfg.flags & VXLAN_F_RSC) &&
(ntohs(eth->h_proto) == ETH_P_IP ||
ntohs(eth->h_proto) == ETH_P_IPV6)) {
did_rsc = route_shortcircuit(dev, skb);
@@ -2290,7 +2312,7 @@
if (f == NULL) {
f = vxlan_find_mac(vxlan, all_zeros_mac, vni);
if (f == NULL) {
- if ((vxlan->flags & VXLAN_F_L2MISS) &&
+ if ((vxlan->cfg.flags & VXLAN_F_L2MISS) &&
!is_multicast_ether_addr(eth->h_dest))
vxlan_fdb_miss(vxlan, eth->h_dest);
@@ -2486,10 +2508,7 @@
struct vxlan_rdst *dst = &vxlan->default_dst;
struct net_device *lowerdev = __dev_get_by_index(vxlan->net,
dst->remote_ifindex);
- bool use_ipv6 = false;
-
- if (dst->remote_ip.sa.sa_family == AF_INET6)
- use_ipv6 = true;
+ bool use_ipv6 = !!(vxlan->cfg.flags & VXLAN_F_IPV6);
/* This check is different than dev->max_mtu, because it looks at
* the lowerdev->mtu, rather than the static dev->max_mtu
@@ -2625,6 +2644,10 @@
netif_keep_dst(dev);
dev->priv_flags |= IFF_NO_QUEUE;
+ /* MTU range: 68 - 65535 */
+ dev->min_mtu = ETH_MIN_MTU;
+ dev->max_mtu = ETH_MAX_MTU;
+
INIT_LIST_HEAD(&vxlan->next);
spin_lock_init(&vxlan->hash_lock);
@@ -2632,9 +2655,8 @@
vxlan->age_timer.function = vxlan_cleanup;
vxlan->age_timer.data = (unsigned long) vxlan;
- vxlan->cfg.dst_port = htons(vxlan_port);
-
vxlan->dev = dev;
+ vxlan->net = dev_net(dev);
gro_cells_init(&vxlan->gro_cells, dev);
@@ -2703,11 +2725,19 @@
}
}
+ if (tb[IFLA_MTU]) {
+ u32 mtu = nla_get_u32(data[IFLA_MTU]);
+
+ if (mtu < ETH_MIN_MTU || mtu > ETH_MAX_MTU)
+ return -EINVAL;
+ }
+
if (!data)
return -EINVAL;
if (data[IFLA_VXLAN_ID]) {
- __u32 id = nla_get_u32(data[IFLA_VXLAN_ID]);
+ u32 id = nla_get_u32(data[IFLA_VXLAN_ID]);
+
if (id >= VXLAN_N_VID)
return -ERANGE;
}
@@ -2823,7 +2853,7 @@
if (!vxlan->cfg.no_share) {
spin_lock(&vn->sock_lock);
vs = vxlan_find_sock(vxlan->net, ipv6 ? AF_INET6 : AF_INET,
- vxlan->cfg.dst_port, vxlan->flags);
+ vxlan->cfg.dst_port, vxlan->cfg.flags);
if (vs && !atomic_add_unless(&vs->refcnt, 1, 0)) {
spin_unlock(&vn->sock_lock);
return -EBUSY;
@@ -2832,7 +2862,7 @@
}
if (!vs)
vs = vxlan_socket_create(vxlan->net, ipv6,
- vxlan->cfg.dst_port, vxlan->flags);
+ vxlan->cfg.dst_port, vxlan->cfg.flags);
if (IS_ERR(vs))
return PTR_ERR(vs);
#if IS_ENABLED(CONFIG_IPV6)
@@ -2847,8 +2877,8 @@
static int vxlan_sock_add(struct vxlan_dev *vxlan)
{
- bool metadata = vxlan->flags & VXLAN_F_COLLECT_METADATA;
- bool ipv6 = vxlan->flags & VXLAN_F_IPV6 || metadata;
+ bool metadata = vxlan->cfg.flags & VXLAN_F_COLLECT_METADATA;
+ bool ipv6 = vxlan->cfg.flags & VXLAN_F_IPV6 || metadata;
bool ipv4 = !ipv6 || metadata;
int ret = 0;
@@ -2868,115 +2898,171 @@
return ret;
}
-static int vxlan_dev_configure(struct net *src_net, struct net_device *dev,
- struct vxlan_config *conf,
- bool changelink)
+static int vxlan_config_validate(struct net *src_net, struct vxlan_config *conf,
+ struct net_device **lower,
+ struct vxlan_dev *old)
{
struct vxlan_net *vn = net_generic(src_net, vxlan_net_id);
- struct vxlan_dev *vxlan = netdev_priv(dev), *tmp;
+ struct vxlan_dev *tmp;
+ bool use_ipv6 = false;
+
+ if (conf->flags & VXLAN_F_GPE) {
+ /* For now, allow GPE only together with
+ * COLLECT_METADATA. This can be relaxed later; in such
+ * case, the other side of the PtP link will have to be
+ * provided.
+ */
+ if ((conf->flags & ~VXLAN_F_ALLOWED_GPE) ||
+ !(conf->flags & VXLAN_F_COLLECT_METADATA)) {
+ return -EINVAL;
+ }
+ }
+
+ if (!conf->remote_ip.sa.sa_family && !conf->saddr.sa.sa_family) {
+ /* Unless IPv6 is explicitly requested, assume IPv4 */
+ conf->remote_ip.sa.sa_family = AF_INET;
+ conf->saddr.sa.sa_family = AF_INET;
+ } else if (!conf->remote_ip.sa.sa_family) {
+ conf->remote_ip.sa.sa_family = conf->saddr.sa.sa_family;
+ } else if (!conf->saddr.sa.sa_family) {
+ conf->saddr.sa.sa_family = conf->remote_ip.sa.sa_family;
+ }
+
+ if (conf->saddr.sa.sa_family != conf->remote_ip.sa.sa_family)
+ return -EINVAL;
+
+ if (vxlan_addr_multicast(&conf->saddr))
+ return -EINVAL;
+
+ if (conf->saddr.sa.sa_family == AF_INET6) {
+ if (!IS_ENABLED(CONFIG_IPV6))
+ return -EPFNOSUPPORT;
+ use_ipv6 = true;
+ conf->flags |= VXLAN_F_IPV6;
+
+ if (!(conf->flags & VXLAN_F_COLLECT_METADATA)) {
+ int local_type =
+ ipv6_addr_type(&conf->saddr.sin6.sin6_addr);
+ int remote_type =
+ ipv6_addr_type(&conf->remote_ip.sin6.sin6_addr);
+
+ if (local_type & IPV6_ADDR_LINKLOCAL) {
+ if (!(remote_type & IPV6_ADDR_LINKLOCAL) &&
+ (remote_type != IPV6_ADDR_ANY))
+ return -EINVAL;
+
+ conf->flags |= VXLAN_F_IPV6_LINKLOCAL;
+ } else {
+ if (remote_type ==
+ (IPV6_ADDR_UNICAST | IPV6_ADDR_LINKLOCAL))
+ return -EINVAL;
+
+ conf->flags &= ~VXLAN_F_IPV6_LINKLOCAL;
+ }
+ }
+ }
+
+ if (conf->label && !use_ipv6)
+ return -EINVAL;
+
+ if (conf->remote_ifindex) {
+ struct net_device *lowerdev;
+
+ lowerdev = __dev_get_by_index(src_net, conf->remote_ifindex);
+ if (!lowerdev)
+ return -ENODEV;
+
+#if IS_ENABLED(CONFIG_IPV6)
+ if (use_ipv6) {
+ struct inet6_dev *idev = __in6_dev_get(lowerdev);
+ if (idev && idev->cnf.disable_ipv6)
+ return -EPERM;
+ }
+#endif
+
+ *lower = lowerdev;
+ } else {
+ if (vxlan_addr_multicast(&conf->remote_ip))
+ return -EINVAL;
+
+#if IS_ENABLED(CONFIG_IPV6)
+ if (conf->flags & VXLAN_F_IPV6_LINKLOCAL)
+ return -EINVAL;
+#endif
+
+ *lower = NULL;
+ }
+
+ if (!conf->dst_port) {
+ if (conf->flags & VXLAN_F_GPE)
+ conf->dst_port = htons(4790); /* IANA VXLAN-GPE port */
+ else
+ conf->dst_port = htons(vxlan_port);
+ }
+
+ if (!conf->age_interval)
+ conf->age_interval = FDB_AGE_DEFAULT;
+
+ list_for_each_entry(tmp, &vn->vxlan_list, next) {
+ if (tmp == old)
+ continue;
+
+ if (tmp->cfg.vni != conf->vni)
+ continue;
+ if (tmp->cfg.dst_port != conf->dst_port)
+ continue;
+ if ((tmp->cfg.flags & (VXLAN_F_RCV_FLAGS | VXLAN_F_IPV6)) !=
+ (conf->flags & (VXLAN_F_RCV_FLAGS | VXLAN_F_IPV6)))
+ continue;
+
+ if ((conf->flags & VXLAN_F_IPV6_LINKLOCAL) &&
+ tmp->cfg.remote_ifindex != conf->remote_ifindex)
+ continue;
+
+ return -EEXIST;
+ }
+
+ return 0;
+}
+
+static void vxlan_config_apply(struct net_device *dev,
+ struct vxlan_config *conf,
+ struct net_device *lowerdev, bool changelink)
+{
+ struct vxlan_dev *vxlan = netdev_priv(dev);
struct vxlan_rdst *dst = &vxlan->default_dst;
unsigned short needed_headroom = ETH_HLEN;
- bool use_ipv6 = false;
- __be16 default_port = vxlan->cfg.dst_port;
- struct net_device *lowerdev = NULL;
+ bool use_ipv6 = !!(conf->flags & VXLAN_F_IPV6);
+ int max_mtu = ETH_MAX_MTU;
if (!changelink) {
- if (conf->flags & VXLAN_F_GPE) {
- /* For now, allow GPE only together with
- * COLLECT_METADATA. This can be relaxed later; in such
- * case, the other side of the PtP link will have to be
- * provided.
- */
- if ((conf->flags & ~VXLAN_F_ALLOWED_GPE) ||
- !(conf->flags & VXLAN_F_COLLECT_METADATA)) {
- pr_info("unsupported combination of extensions\n");
- return -EINVAL;
- }
+ if (conf->flags & VXLAN_F_GPE)
vxlan_raw_setup(dev);
- } else {
+ else
vxlan_ether_setup(dev);
- }
- /* MTU range: 68 - 65535 */
- dev->min_mtu = ETH_MIN_MTU;
- dev->max_mtu = ETH_MAX_MTU;
- vxlan->net = src_net;
+ if (conf->mtu)
+ dev->mtu = conf->mtu;
}
dst->remote_vni = conf->vni;
memcpy(&dst->remote_ip, &conf->remote_ip, sizeof(conf->remote_ip));
- /* Unless IPv6 is explicitly requested, assume IPv4 */
- if (!dst->remote_ip.sa.sa_family)
- dst->remote_ip.sa.sa_family = AF_INET;
-
- if (dst->remote_ip.sa.sa_family == AF_INET6 ||
- vxlan->cfg.saddr.sa.sa_family == AF_INET6) {
- if (!IS_ENABLED(CONFIG_IPV6))
- return -EPFNOSUPPORT;
- use_ipv6 = true;
- vxlan->flags |= VXLAN_F_IPV6;
- }
-
- if (conf->label && !use_ipv6) {
- pr_info("label only supported in use with IPv6\n");
- return -EINVAL;
- }
-
- if (conf->remote_ifindex &&
- conf->remote_ifindex != vxlan->cfg.remote_ifindex) {
- lowerdev = __dev_get_by_index(src_net, conf->remote_ifindex);
+ if (lowerdev) {
dst->remote_ifindex = conf->remote_ifindex;
- if (!lowerdev) {
- pr_info("ifindex %d does not exist\n",
- dst->remote_ifindex);
- return -ENODEV;
- }
-
-#if IS_ENABLED(CONFIG_IPV6)
- if (use_ipv6) {
- struct inet6_dev *idev = __in6_dev_get(lowerdev);
- if (idev && idev->cnf.disable_ipv6) {
- pr_info("IPv6 is disabled via sysctl\n");
- return -EPERM;
- }
- }
-#endif
-
- if (!conf->mtu)
- dev->mtu = lowerdev->mtu -
- (use_ipv6 ? VXLAN6_HEADROOM : VXLAN_HEADROOM);
-
- needed_headroom = lowerdev->hard_header_len;
- } else if (!conf->remote_ifindex &&
- vxlan_addr_multicast(&dst->remote_ip)) {
- pr_info("multicast destination requires interface to be specified\n");
- return -EINVAL;
- }
-
- if (lowerdev) {
dev->gso_max_size = lowerdev->gso_max_size;
dev->gso_max_segs = lowerdev->gso_max_segs;
+
+ needed_headroom = lowerdev->hard_header_len;
+
+ max_mtu = lowerdev->mtu - (use_ipv6 ? VXLAN6_HEADROOM :
+ VXLAN_HEADROOM);
}
- if (conf->mtu) {
- int max_mtu = ETH_MAX_MTU;
-
- if (lowerdev)
- max_mtu = lowerdev->mtu;
-
- max_mtu -= (use_ipv6 ? VXLAN6_HEADROOM : VXLAN_HEADROOM);
-
- if (conf->mtu < dev->min_mtu || conf->mtu > dev->max_mtu)
- return -EINVAL;
-
- dev->mtu = conf->mtu;
-
- if (conf->mtu > max_mtu)
- dev->mtu = max_mtu;
- }
+ if (dev->mtu > max_mtu)
+ dev->mtu = max_mtu;
if (use_ipv6 || conf->flags & VXLAN_F_COLLECT_METADATA)
needed_headroom += VXLAN6_HEADROOM;
@@ -2985,31 +3071,21 @@
dev->needed_headroom = needed_headroom;
memcpy(&vxlan->cfg, conf, sizeof(*conf));
- if (!vxlan->cfg.dst_port) {
- if (conf->flags & VXLAN_F_GPE)
- vxlan->cfg.dst_port = htons(4790); /* IANA VXLAN-GPE port */
- else
- vxlan->cfg.dst_port = default_port;
- }
- vxlan->flags |= conf->flags;
+}
- if (!vxlan->cfg.age_interval)
- vxlan->cfg.age_interval = FDB_AGE_DEFAULT;
+static int vxlan_dev_configure(struct net *src_net, struct net_device *dev,
+ struct vxlan_config *conf,
+ bool changelink)
+{
+ struct vxlan_dev *vxlan = netdev_priv(dev);
+ struct net_device *lowerdev;
+ int ret;
- if (changelink)
- return 0;
+ ret = vxlan_config_validate(src_net, conf, &lowerdev, vxlan);
+ if (ret)
+ return ret;
- list_for_each_entry(tmp, &vn->vxlan_list, next) {
- if (tmp->cfg.vni == conf->vni &&
- (tmp->default_dst.remote_ip.sa.sa_family == AF_INET6 ||
- tmp->cfg.saddr.sa.sa_family == AF_INET6) == use_ipv6 &&
- tmp->cfg.dst_port == vxlan->cfg.dst_port &&
- (tmp->flags & VXLAN_F_RCV_FLAGS) ==
- (vxlan->flags & VXLAN_F_RCV_FLAGS)) {
- pr_info("duplicate VNI %u\n", be32_to_cpu(conf->vni));
- return -EEXIST;
- }
- }
+ vxlan_config_apply(dev, conf, lowerdev, changelink);
return 0;
}
@@ -3073,22 +3149,35 @@
}
if (data[IFLA_VXLAN_GROUP]) {
+ if (changelink && (conf->remote_ip.sa.sa_family != AF_INET))
+ return -EOPNOTSUPP;
+
conf->remote_ip.sin.sin_addr.s_addr = nla_get_in_addr(data[IFLA_VXLAN_GROUP]);
+ conf->remote_ip.sa.sa_family = AF_INET;
} else if (data[IFLA_VXLAN_GROUP6]) {
if (!IS_ENABLED(CONFIG_IPV6))
return -EPFNOSUPPORT;
+ if (changelink && (conf->remote_ip.sa.sa_family != AF_INET6))
+ return -EOPNOTSUPP;
+
conf->remote_ip.sin6.sin6_addr = nla_get_in6_addr(data[IFLA_VXLAN_GROUP6]);
conf->remote_ip.sa.sa_family = AF_INET6;
}
if (data[IFLA_VXLAN_LOCAL]) {
+ if (changelink && (conf->saddr.sa.sa_family != AF_INET))
+ return -EOPNOTSUPP;
+
conf->saddr.sin.sin_addr.s_addr = nla_get_in_addr(data[IFLA_VXLAN_LOCAL]);
conf->saddr.sa.sa_family = AF_INET;
} else if (data[IFLA_VXLAN_LOCAL6]) {
if (!IS_ENABLED(CONFIG_IPV6))
return -EPFNOSUPPORT;
+ if (changelink && (conf->saddr.sa.sa_family != AF_INET6))
+ return -EOPNOTSUPP;
+
/* TODO: respect scope id */
conf->saddr.sin6.sin6_addr = nla_get_in6_addr(data[IFLA_VXLAN_LOCAL6]);
conf->saddr.sa.sa_family = AF_INET6;
@@ -3108,12 +3197,10 @@
IPV6_FLOWLABEL_MASK;
if (data[IFLA_VXLAN_LEARNING]) {
- if (nla_get_u8(data[IFLA_VXLAN_LEARNING])) {
+ if (nla_get_u8(data[IFLA_VXLAN_LEARNING]))
conf->flags |= VXLAN_F_LEARN;
- } else {
+ else
conf->flags &= ~VXLAN_F_LEARN;
- vxlan->flags &= ~VXLAN_F_LEARN;
- }
} else if (!changelink) {
/* default to learn on a new device */
conf->flags |= VXLAN_F_LEARN;
@@ -3396,43 +3483,44 @@
nla_put_u8(skb, IFLA_VXLAN_TOS, vxlan->cfg.tos) ||
nla_put_be32(skb, IFLA_VXLAN_LABEL, vxlan->cfg.label) ||
nla_put_u8(skb, IFLA_VXLAN_LEARNING,
- !!(vxlan->flags & VXLAN_F_LEARN)) ||
+ !!(vxlan->cfg.flags & VXLAN_F_LEARN)) ||
nla_put_u8(skb, IFLA_VXLAN_PROXY,
- !!(vxlan->flags & VXLAN_F_PROXY)) ||
- nla_put_u8(skb, IFLA_VXLAN_RSC, !!(vxlan->flags & VXLAN_F_RSC)) ||
+ !!(vxlan->cfg.flags & VXLAN_F_PROXY)) ||
+ nla_put_u8(skb, IFLA_VXLAN_RSC,
+ !!(vxlan->cfg.flags & VXLAN_F_RSC)) ||
nla_put_u8(skb, IFLA_VXLAN_L2MISS,
- !!(vxlan->flags & VXLAN_F_L2MISS)) ||
+ !!(vxlan->cfg.flags & VXLAN_F_L2MISS)) ||
nla_put_u8(skb, IFLA_VXLAN_L3MISS,
- !!(vxlan->flags & VXLAN_F_L3MISS)) ||
+ !!(vxlan->cfg.flags & VXLAN_F_L3MISS)) ||
nla_put_u8(skb, IFLA_VXLAN_COLLECT_METADATA,
- !!(vxlan->flags & VXLAN_F_COLLECT_METADATA)) ||
+ !!(vxlan->cfg.flags & VXLAN_F_COLLECT_METADATA)) ||
nla_put_u32(skb, IFLA_VXLAN_AGEING, vxlan->cfg.age_interval) ||
nla_put_u32(skb, IFLA_VXLAN_LIMIT, vxlan->cfg.addrmax) ||
nla_put_be16(skb, IFLA_VXLAN_PORT, vxlan->cfg.dst_port) ||
nla_put_u8(skb, IFLA_VXLAN_UDP_CSUM,
- !(vxlan->flags & VXLAN_F_UDP_ZERO_CSUM_TX)) ||
+ !(vxlan->cfg.flags & VXLAN_F_UDP_ZERO_CSUM_TX)) ||
nla_put_u8(skb, IFLA_VXLAN_UDP_ZERO_CSUM6_TX,
- !!(vxlan->flags & VXLAN_F_UDP_ZERO_CSUM6_TX)) ||
+ !!(vxlan->cfg.flags & VXLAN_F_UDP_ZERO_CSUM6_TX)) ||
nla_put_u8(skb, IFLA_VXLAN_UDP_ZERO_CSUM6_RX,
- !!(vxlan->flags & VXLAN_F_UDP_ZERO_CSUM6_RX)) ||
+ !!(vxlan->cfg.flags & VXLAN_F_UDP_ZERO_CSUM6_RX)) ||
nla_put_u8(skb, IFLA_VXLAN_REMCSUM_TX,
- !!(vxlan->flags & VXLAN_F_REMCSUM_TX)) ||
+ !!(vxlan->cfg.flags & VXLAN_F_REMCSUM_TX)) ||
nla_put_u8(skb, IFLA_VXLAN_REMCSUM_RX,
- !!(vxlan->flags & VXLAN_F_REMCSUM_RX)))
+ !!(vxlan->cfg.flags & VXLAN_F_REMCSUM_RX)))
goto nla_put_failure;
if (nla_put(skb, IFLA_VXLAN_PORT_RANGE, sizeof(ports), &ports))
goto nla_put_failure;
- if (vxlan->flags & VXLAN_F_GBP &&
+ if (vxlan->cfg.flags & VXLAN_F_GBP &&
nla_put_flag(skb, IFLA_VXLAN_GBP))
goto nla_put_failure;
- if (vxlan->flags & VXLAN_F_GPE &&
+ if (vxlan->cfg.flags & VXLAN_F_GPE &&
nla_put_flag(skb, IFLA_VXLAN_GPE))
goto nla_put_failure;
- if (vxlan->flags & VXLAN_F_REMCSUM_NOPARTIAL &&
+ if (vxlan->cfg.flags & VXLAN_F_REMCSUM_NOPARTIAL &&
nla_put_flag(skb, IFLA_VXLAN_REMCSUM_NOPARTIAL))
goto nla_put_failure;
diff --git a/drivers/net/wan/farsync.c b/drivers/net/wan/farsync.c
index 33265eb..bd46b25 100644
--- a/drivers/net/wan/farsync.c
+++ b/drivers/net/wan/farsync.c
@@ -857,7 +857,7 @@
dbg(DBG_TX, "fst_rx_dma_complete\n");
pi = port->index;
- memcpy(skb_put(skb, len), card->rx_dma_handle_host, len);
+ skb_put_data(skb, card->rx_dma_handle_host, len);
/* Reset buffer descriptor */
FST_WRB(card, rxDescrRing[pi][rxp].bits, DMA_OWN);
diff --git a/drivers/net/wan/fsl_ucc_hdlc.c b/drivers/net/wan/fsl_ucc_hdlc.c
index 6742ae6..33df764 100644
--- a/drivers/net/wan/fsl_ucc_hdlc.c
+++ b/drivers/net/wan/fsl_ucc_hdlc.c
@@ -36,7 +36,6 @@
#define DRV_NAME "ucc_hdlc"
#define TDM_PPPOHT_SLIC_MAXIN
-#define BROKEN_FRAME_INFO
static struct ucc_tdm_info utdm_primary_info = {
.uf_info = {
@@ -99,6 +98,13 @@
uf_info->tsa = 1;
uf_info->ctsp = 1;
}
+
+ /* This sets HPM register in CMXUCR register which configures a
+ * open drain connected HDLC bus
+ */
+ if (priv->hdlc_bus)
+ uf_info->brkpt_support = 1;
+
uf_info->uccm_mask = ((UCC_HDLC_UCCE_RXB | UCC_HDLC_UCCE_RXF |
UCC_HDLC_UCCE_TXB) << 16);
@@ -114,6 +120,9 @@
/* Loopback mode */
if (priv->loopback) {
dev_info(priv->dev, "Loopback Mode\n");
+ /* use the same clock when work in loopback */
+ qe_setbrg(ut_info->uf_info.rx_clock, 20000000, 1);
+
gumr = ioread32be(&priv->uf_regs->gumr);
gumr |= (UCC_FAST_GUMR_LOOPBACK | UCC_FAST_GUMR_CDS |
UCC_FAST_GUMR_TCI);
@@ -133,11 +142,33 @@
/* Set UPSMR normal mode (need fixed)*/
iowrite32be(0, &priv->uf_regs->upsmr);
+ /* hdlc_bus mode */
+ if (priv->hdlc_bus) {
+ u32 upsmr;
+
+ dev_info(priv->dev, "HDLC bus Mode\n");
+ upsmr = ioread32be(&priv->uf_regs->upsmr);
+
+ /* bus mode and retransmit enable, with collision window
+ * set to 8 bytes
+ */
+ upsmr |= UCC_HDLC_UPSMR_RTE | UCC_HDLC_UPSMR_BUS |
+ UCC_HDLC_UPSMR_CW8;
+ iowrite32be(upsmr, &priv->uf_regs->upsmr);
+
+ /* explicitly disable CDS & CTSP */
+ gumr = ioread32be(&priv->uf_regs->gumr);
+ gumr &= ~(UCC_FAST_GUMR_CDS | UCC_FAST_GUMR_CTSP);
+ /* set automatic sync to explicitly ignore CD signal */
+ gumr |= UCC_FAST_GUMR_SYNL_AUTO;
+ iowrite32be(gumr, &priv->uf_regs->gumr);
+ }
+
priv->rx_ring_size = RX_BD_RING_LEN;
priv->tx_ring_size = TX_BD_RING_LEN;
/* Alloc Rx BD */
priv->rx_bd_base = dma_alloc_coherent(priv->dev,
- RX_BD_RING_LEN * sizeof(struct qe_bd *),
+ RX_BD_RING_LEN * sizeof(struct qe_bd),
&priv->dma_rx_bd, GFP_KERNEL);
if (!priv->rx_bd_base) {
@@ -148,7 +179,7 @@
/* Alloc Tx BD */
priv->tx_bd_base = dma_alloc_coherent(priv->dev,
- TX_BD_RING_LEN * sizeof(struct qe_bd *),
+ TX_BD_RING_LEN * sizeof(struct qe_bd),
&priv->dma_tx_bd, GFP_KERNEL);
if (!priv->tx_bd_base) {
@@ -158,7 +189,7 @@
}
/* Alloc parameter ram for ucc hdlc */
- priv->ucc_pram_offset = qe_muram_alloc(sizeof(priv->ucc_pram),
+ priv->ucc_pram_offset = qe_muram_alloc(sizeof(struct ucc_hdlc_param),
ALIGNMENT_OF_UCC_HDLC_PRAM);
if (priv->ucc_pram_offset < 0) {
@@ -295,11 +326,11 @@
qe_muram_free(priv->ucc_pram_offset);
free_tx_bd:
dma_free_coherent(priv->dev,
- TX_BD_RING_LEN * sizeof(struct qe_bd *),
+ TX_BD_RING_LEN * sizeof(struct qe_bd),
priv->tx_bd_base, priv->dma_tx_bd);
free_rx_bd:
dma_free_coherent(priv->dev,
- RX_BD_RING_LEN * sizeof(struct qe_bd *),
+ RX_BD_RING_LEN * sizeof(struct qe_bd),
priv->rx_bd_base, priv->dma_rx_bd);
free_uccf:
ucc_fast_free(priv->uccf);
@@ -314,8 +345,6 @@
struct qe_bd __iomem *bd;
u16 bd_status;
unsigned long flags;
- u8 *send_buf;
- int i;
u16 *proto_head;
switch (dev->type) {
@@ -352,16 +381,6 @@
dev_kfree_skb(skb);
return -ENOMEM;
}
-
- pr_info("Tx data skb->len:%d ", skb->len);
- send_buf = (u8 *)skb->data;
- pr_info("\nTransmitted data:\n");
- for (i = 0; i < 16; i++) {
- if (i == skb->len)
- pr_info("++++");
- else
- pr_info("%02x\n", send_buf[i]);
- }
spin_lock_irqsave(&priv->lock, flags);
/* Start from the next BD that should be filled */
@@ -423,7 +442,6 @@
skb = priv->tx_skbuff[priv->skb_dirtytx];
if (!skb)
break;
- pr_info("TxBD: %x\n", bd_status);
dev->stats.tx_packets++;
memset(priv->tx_buffer +
(be32_to_cpu(bd->buf) - priv->dma_tx_addr),
@@ -454,14 +472,12 @@
static int hdlc_rx_done(struct ucc_hdlc_private *priv, int rx_work_limit)
{
struct net_device *dev = priv->ndev;
- struct sk_buff *skb;
+ struct sk_buff *skb = NULL;
hdlc_device *hdlc = dev_to_hdlc(dev);
struct qe_bd *bd;
u16 bd_status;
u16 length, howmany = 0;
u8 *bdbuffer;
- int i;
- static int entry;
bd = priv->currx_bd;
bd_status = ioread16be(&bd->status);
@@ -471,9 +487,6 @@
if (bd_status & R_OV_S)
dev->stats.rx_over_errors++;
if (bd_status & R_CR_S) {
-#ifdef BROKEN_FRAME_INFO
- pr_info("Broken Frame with RxBD: %x\n", bd_status);
-#endif
dev->stats.rx_crc_errors++;
dev->stats.rx_dropped++;
goto recycle;
@@ -482,17 +495,6 @@
(priv->currx_bdnum * MAX_RX_BUF_LENGTH);
length = ioread16be(&bd->length);
- pr_info("Received data length:%d", length);
- pr_info("while entry times:%d", entry++);
-
- pr_info("\nReceived data:\n");
- for (i = 0; (i < 16); i++) {
- if (i == length)
- pr_info("++++");
- else
- pr_info("%02x\n", bdbuffer[i]);
- }
-
switch (dev->type) {
case ARPHRD_RAWHDLC:
bdbuffer += HDLC_HEAD_LEN;
@@ -531,7 +533,6 @@
howmany++;
if (hdlc->proto)
skb->protocol = hdlc_type_trans(skb, dev);
- pr_info("skb->protocol:%x\n", skb->protocol);
netif_receive_skb(skb);
recycle:
@@ -566,7 +567,7 @@
/* Tx event processing */
spin_lock(&priv->lock);
- hdlc_tx_done(priv);
+ hdlc_tx_done(priv);
spin_unlock(&priv->lock);
howmany = 0;
@@ -597,7 +598,6 @@
uccm = ioread32be(uccf->p_uccm);
ucce &= uccm;
iowrite32be(ucce, uccf->p_ucce);
- pr_info("irq ucce:%x\n", ucce);
if (!ucce)
return IRQ_NONE;
@@ -688,7 +688,7 @@
if (priv->rx_bd_base) {
dma_free_coherent(priv->dev,
- RX_BD_RING_LEN * sizeof(struct qe_bd *),
+ RX_BD_RING_LEN * sizeof(struct qe_bd),
priv->rx_bd_base, priv->dma_rx_bd);
priv->rx_bd_base = NULL;
@@ -697,7 +697,7 @@
if (priv->tx_bd_base) {
dma_free_coherent(priv->dev,
- TX_BD_RING_LEN * sizeof(struct qe_bd *),
+ TX_BD_RING_LEN * sizeof(struct qe_bd),
priv->tx_bd_base, priv->dma_tx_bd);
priv->tx_bd_base = NULL;
@@ -855,7 +855,6 @@
/* save power */
ucc_fast_disable(priv->uccf, COMM_DIR_RX | COMM_DIR_TX);
- dev_dbg(dev, "ucc hdlc suspend\n");
return 0;
}
@@ -1001,7 +1000,7 @@
struct device_node *np = pdev->dev.of_node;
struct ucc_hdlc_private *uhdlc_priv = NULL;
struct ucc_tdm_info *ut_info;
- struct ucc_tdm *utdm;
+ struct ucc_tdm *utdm = NULL;
struct resource res;
struct net_device *dev;
hdlc_device *hdlc;
@@ -1054,10 +1053,6 @@
return -EINVAL;
}
- /* use the same clock when work in loopback */
- if (ut_info->uf_info.rx_clock == ut_info->uf_info.tx_clock)
- qe_setbrg(ut_info->uf_info.rx_clock, 20000000, 1);
-
ret = of_address_to_resource(np, 0, &res);
if (ret)
return -EINVAL;
@@ -1080,6 +1075,9 @@
if (of_get_property(np, "fsl,ucc-internal-loopback", NULL))
uhdlc_priv->loopback = 1;
+ if (of_get_property(np, "fsl,hdlc-bus", NULL))
+ uhdlc_priv->hdlc_bus = 1;
+
if (uhdlc_priv->tsa == 1) {
utdm = kzalloc(sizeof(*utdm), GFP_KERNEL);
if (!utdm) {
diff --git a/drivers/net/wan/fsl_ucc_hdlc.h b/drivers/net/wan/fsl_ucc_hdlc.h
index 881ecde..c21134c 100644
--- a/drivers/net/wan/fsl_ucc_hdlc.h
+++ b/drivers/net/wan/fsl_ucc_hdlc.h
@@ -78,6 +78,7 @@
u16 tsa;
bool hdlc_busy;
bool loopback;
+ bool hdlc_bus;
u8 *tx_buffer;
u8 *rx_buffer;
diff --git a/drivers/net/wan/hdlc_ppp.c b/drivers/net/wan/hdlc_ppp.c
index 47fdb87..0d2e00e 100644
--- a/drivers/net/wan/hdlc_ppp.c
+++ b/drivers/net/wan/hdlc_ppp.c
@@ -228,15 +228,15 @@
}
skb_reserve(skb, sizeof(struct hdlc_header));
- cp = (struct cp_header *)skb_put(skb, sizeof(struct cp_header));
+ cp = skb_put(skb, sizeof(struct cp_header));
cp->code = code;
cp->id = id;
cp->len = htons(sizeof(struct cp_header) + magic_len + len);
if (magic_len)
- memcpy(skb_put(skb, magic_len), &magic, magic_len);
+ skb_put_data(skb, &magic, magic_len);
if (len)
- memcpy(skb_put(skb, len), data, len);
+ skb_put_data(skb, data, len);
#if DEBUG_CP
BUG_ON(code >= CP_CODES);
@@ -448,7 +448,7 @@
/* Check HDLC header */
if (skb->len < sizeof(struct hdlc_header))
goto rx_error;
- cp = (struct cp_header*)skb_pull(skb, sizeof(struct hdlc_header));
+ cp = skb_pull(skb, sizeof(struct hdlc_header));
if (hdr->address != HDLC_ADDR_ALLSTATIONS ||
hdr->control != HDLC_CTRL_UI)
goto rx_error;
diff --git a/drivers/net/wan/hdlc_raw_eth.c b/drivers/net/wan/hdlc_raw_eth.c
index 2f11836..8bd3ed9 100644
--- a/drivers/net/wan/hdlc_raw_eth.c
+++ b/drivers/net/wan/hdlc_raw_eth.c
@@ -57,7 +57,8 @@
const size_t size = sizeof(raw_hdlc_proto);
raw_hdlc_proto new_settings;
hdlc_device *hdlc = dev_to_hdlc(dev);
- int result, old_qlen;
+ unsigned int old_qlen;
+ int result;
switch (ifr->ifr_settings.type) {
case IF_GET_PROTO:
diff --git a/drivers/net/wan/x25_asy.c b/drivers/net/wan/x25_asy.c
index 878b05d..40ee80c 100644
--- a/drivers/net/wan/x25_asy.c
+++ b/drivers/net/wan/x25_asy.c
@@ -202,7 +202,7 @@
return;
}
skb_push(skb, 1); /* LAPB internal control */
- memcpy(skb_put(skb, count), sl->rbuff, count);
+ skb_put_data(skb, sl->rbuff, count);
skb->protocol = x25_type_trans(skb, sl->dev);
err = lapb_data_received(skb->dev, skb);
if (err != LAPB_OK) {
diff --git a/drivers/net/wimax/i2400m/netdev.c b/drivers/net/wimax/i2400m/netdev.c
index 7f64e74..a654687 100644
--- a/drivers/net/wimax/i2400m/netdev.c
+++ b/drivers/net/wimax/i2400m/netdev.c
@@ -221,7 +221,7 @@
{
struct i2400m_pl_data_hdr *pl_hdr;
skb_pull(skb, ETH_HLEN);
- pl_hdr = (struct i2400m_pl_data_hdr *) skb_push(skb, sizeof(*pl_hdr));
+ pl_hdr = skb_push(skb, sizeof(*pl_hdr));
pl_hdr->reserved = 0;
}
@@ -488,7 +488,7 @@
net_dev->stats.rx_dropped++;
goto error_skb_realloc;
}
- memcpy(skb_put(skb, buf_len), buf, buf_len);
+ skb_put_data(skb, buf, buf_len);
}
i2400m_rx_fake_eth_header(i2400m->wimax_dev.net_dev,
skb->data - ETH_HLEN,
diff --git a/drivers/net/wireless/Kconfig b/drivers/net/wireless/Kconfig
index 8f5a3f4..166920a 100644
--- a/drivers/net/wireless/Kconfig
+++ b/drivers/net/wireless/Kconfig
@@ -45,6 +45,7 @@
source "drivers/net/wireless/st/Kconfig"
source "drivers/net/wireless/ti/Kconfig"
source "drivers/net/wireless/zydas/Kconfig"
+source "drivers/net/wireless/quantenna/Kconfig"
config PCMCIA_RAYCS
tristate "Aviator/Raytheon 2.4GHz wireless support"
diff --git a/drivers/net/wireless/Makefile b/drivers/net/wireless/Makefile
index f00d429..54b41ac 100644
--- a/drivers/net/wireless/Makefile
+++ b/drivers/net/wireless/Makefile
@@ -17,6 +17,7 @@
obj-$(CONFIG_WLAN_VENDOR_ST) += st/
obj-$(CONFIG_WLAN_VENDOR_TI) += ti/
obj-$(CONFIG_WLAN_VENDOR_ZYDAS) += zydas/
+obj-$(CONFIG_WLAN_VENDOR_QUANTENNA) += quantenna/
# 16-bit wireless PCMCIA client drivers
obj-$(CONFIG_PCMCIA_RAYCS) += ray_cs.o
diff --git a/drivers/net/wireless/admtek/adm8211.c b/drivers/net/wireless/admtek/adm8211.c
index ed626f56..3b0802f 100644
--- a/drivers/net/wireless/admtek/adm8211.c
+++ b/drivers/net/wireless/admtek/adm8211.c
@@ -390,9 +390,9 @@
priv->pdev,
priv->rx_buffers[entry].mapping,
pktlen, PCI_DMA_FROMDEVICE);
- memcpy(skb_put(skb, pktlen),
- skb_tail_pointer(priv->rx_buffers[entry].skb),
- pktlen);
+ skb_put_data(skb,
+ skb_tail_pointer(priv->rx_buffers[entry].skb),
+ pktlen);
pci_dma_sync_single_for_device(
priv->pdev,
priv->rx_buffers[entry].mapping,
@@ -1700,7 +1700,7 @@
skb_pull(skb, hdrlen);
payload_len = skb->len;
- txhdr = (struct adm8211_tx_hdr *) skb_push(skb, sizeof(*txhdr));
+ txhdr = skb_push(skb, sizeof(*txhdr));
memset(txhdr, 0, sizeof(*txhdr));
memcpy(txhdr->da, ieee80211_get_DA(hdr), ETH_ALEN);
txhdr->signal = plcp_signal;
diff --git a/drivers/net/wireless/ath/ar5523/ar5523.c b/drivers/net/wireless/ath/ar5523/ar5523.c
index f2f4ccf..106d6f8 100644
--- a/drivers/net/wireless/ath/ar5523/ar5523.c
+++ b/drivers/net/wireless/ath/ar5523/ar5523.c
@@ -829,8 +829,8 @@
data->ar = ar;
data->urb = urb;
- desc = (struct ar5523_tx_desc *)skb_push(skb, sizeof(*desc));
- chunk = (struct ar5523_chunk *)skb_push(skb, sizeof(*chunk));
+ desc = skb_push(skb, sizeof(*desc));
+ chunk = skb_push(skb, sizeof(*chunk));
chunk->seqnum = 0;
chunk->flags = UATH_CFLAGS_FINAL;
diff --git a/drivers/net/wireless/ath/ath10k/Kconfig b/drivers/net/wireless/ath/ath10k/Kconfig
index b4241cf..412eb13 100644
--- a/drivers/net/wireless/ath/ath10k/Kconfig
+++ b/drivers/net/wireless/ath/ath10k/Kconfig
@@ -22,6 +22,13 @@
---help---
This module adds support for AHB bus
+config ATH10K_SDIO
+ tristate "Atheros ath10k SDIO support (EXPERIMENTAL)"
+ depends on ATH10K && MMC
+ ---help---
+ This module adds experimental support for SDIO/MMC bus. Currently
+ work in progress and will not fully work.
+
config ATH10K_DEBUG
bool "Atheros ath10k debugging"
depends on ATH10K
diff --git a/drivers/net/wireless/ath/ath10k/Makefile b/drivers/net/wireless/ath/ath10k/Makefile
index 930fadd..b0b19a7 100644
--- a/drivers/net/wireless/ath/ath10k/Makefile
+++ b/drivers/net/wireless/ath/ath10k/Makefile
@@ -27,5 +27,8 @@
ath10k_pci-$(CONFIG_ATH10K_AHB) += ahb.o
+obj-$(CONFIG_ATH10K_SDIO) += ath10k_sdio.o
+ath10k_sdio-y += sdio.o
+
# for tracing framework to find trace.h
CFLAGS_trace.o := -I$(src)
diff --git a/drivers/net/wireless/ath/ath10k/bmi.c b/drivers/net/wireless/ath/ath10k/bmi.c
index abeee20..2d3a2f3 100644
--- a/drivers/net/wireless/ath/ath10k/bmi.c
+++ b/drivers/net/wireless/ath/ath10k/bmi.c
@@ -97,6 +97,77 @@
return 0;
}
+#define TARGET_VERSION_SENTINAL 0xffffffffu
+
+int ath10k_bmi_get_target_info_sdio(struct ath10k *ar,
+ struct bmi_target_info *target_info)
+{
+ struct bmi_cmd cmd;
+ union bmi_resp resp;
+ u32 cmdlen = sizeof(cmd.id) + sizeof(cmd.get_target_info);
+ u32 resplen, ver_len;
+ __le32 tmp;
+ int ret;
+
+ ath10k_dbg(ar, ATH10K_DBG_BMI, "bmi get target info SDIO\n");
+
+ if (ar->bmi.done_sent) {
+ ath10k_warn(ar, "BMI Get Target Info Command disallowed\n");
+ return -EBUSY;
+ }
+
+ cmd.id = __cpu_to_le32(BMI_GET_TARGET_INFO);
+
+ /* Step 1: Read 4 bytes of the target info and check if it is
+ * the special sentinal version word or the first word in the
+ * version response.
+ */
+ resplen = sizeof(u32);
+ ret = ath10k_hif_exchange_bmi_msg(ar, &cmd, cmdlen, &tmp, &resplen);
+ if (ret) {
+ ath10k_warn(ar, "unable to read from device\n");
+ return ret;
+ }
+
+ /* Some SDIO boards have a special sentinal byte before the real
+ * version response.
+ */
+ if (__le32_to_cpu(tmp) == TARGET_VERSION_SENTINAL) {
+ /* Step 1b: Read the version length */
+ resplen = sizeof(u32);
+ ret = ath10k_hif_exchange_bmi_msg(ar, NULL, 0, &tmp,
+ &resplen);
+ if (ret) {
+ ath10k_warn(ar, "unable to read from device\n");
+ return ret;
+ }
+ }
+
+ ver_len = __le32_to_cpu(tmp);
+
+ /* Step 2: Check the target info length */
+ if (ver_len != sizeof(resp.get_target_info)) {
+ ath10k_warn(ar, "Unexpected target info len: %u. Expected: %zu\n",
+ ver_len, sizeof(resp.get_target_info));
+ return -EINVAL;
+ }
+
+ /* Step 3: Read the rest of the version response */
+ resplen = sizeof(resp.get_target_info) - sizeof(u32);
+ ret = ath10k_hif_exchange_bmi_msg(ar, NULL, 0,
+ &resp.get_target_info.version,
+ &resplen);
+ if (ret) {
+ ath10k_warn(ar, "unable to read from device\n");
+ return ret;
+ }
+
+ target_info->version = __le32_to_cpu(resp.get_target_info.version);
+ target_info->type = __le32_to_cpu(resp.get_target_info.type);
+
+ return 0;
+}
+
int ath10k_bmi_read_memory(struct ath10k *ar,
u32 address, void *buffer, u32 length)
{
diff --git a/drivers/net/wireless/ath/ath10k/bmi.h b/drivers/net/wireless/ath/ath10k/bmi.h
index cc45b63..0342073 100644
--- a/drivers/net/wireless/ath/ath10k/bmi.h
+++ b/drivers/net/wireless/ath/ath10k/bmi.h
@@ -198,6 +198,8 @@
int ath10k_bmi_done(struct ath10k *ar);
int ath10k_bmi_get_target_info(struct ath10k *ar,
struct bmi_target_info *target_info);
+int ath10k_bmi_get_target_info_sdio(struct ath10k *ar,
+ struct bmi_target_info *target_info);
int ath10k_bmi_read_memory(struct ath10k *ar, u32 address,
void *buffer, u32 length);
int ath10k_bmi_write_memory(struct ath10k *ar, u32 address,
diff --git a/drivers/net/wireless/ath/ath10k/core.c b/drivers/net/wireless/ath/ath10k/core.c
index 5a06389..eea111d 100644
--- a/drivers/net/wireless/ath/ath10k/core.c
+++ b/drivers/net/wireless/ath/ath10k/core.c
@@ -389,6 +389,21 @@
complete(&ar->target_suspend);
}
+static void ath10k_init_sdio(struct ath10k *ar)
+{
+ u32 param = 0;
+
+ ath10k_bmi_write32(ar, hi_mbox_io_block_sz, 256);
+ ath10k_bmi_write32(ar, hi_mbox_isr_yield_limit, 99);
+ ath10k_bmi_read32(ar, hi_acs_flags, ¶m);
+
+ param |= (HI_ACS_FLAGS_SDIO_SWAP_MAILBOX_SET |
+ HI_ACS_FLAGS_SDIO_REDUCE_TX_COMPL_SET |
+ HI_ACS_FLAGS_ALT_DATA_CREDIT_SIZE);
+
+ ath10k_bmi_write32(ar, hi_acs_flags, param);
+}
+
static int ath10k_init_configure_target(struct ath10k *ar)
{
u32 param_host;
@@ -1395,7 +1410,18 @@
static void ath10k_core_get_fw_name(struct ath10k *ar, char *fw_name,
size_t fw_name_len, int fw_api)
{
- scnprintf(fw_name, fw_name_len, "%s-%d.bin", ATH10K_FW_FILE_BASE, fw_api);
+ switch (ar->hif.bus) {
+ case ATH10K_BUS_SDIO:
+ scnprintf(fw_name, fw_name_len, "%s-%s-%d.bin",
+ ATH10K_FW_FILE_BASE, ath10k_bus_str(ar->hif.bus),
+ fw_api);
+ break;
+ case ATH10K_BUS_PCI:
+ case ATH10K_BUS_AHB:
+ scnprintf(fw_name, fw_name_len, "%s-%d.bin",
+ ATH10K_FW_FILE_BASE, fw_api);
+ break;
+ }
}
static int ath10k_core_fetch_firmware_files(struct ath10k *ar)
@@ -1953,6 +1979,9 @@
if (status)
goto err;
+ if (ar->hif.bus == ATH10K_BUS_SDIO)
+ ath10k_init_sdio(ar);
+
ar->htc.htc_ops.target_send_suspend_complete =
ath10k_send_suspend_complete;
@@ -2200,7 +2229,10 @@
}
memset(&target_info, 0, sizeof(target_info));
- ret = ath10k_bmi_get_target_info(ar, &target_info);
+ if (ar->hif.bus == ATH10K_BUS_SDIO)
+ ret = ath10k_bmi_get_target_info_sdio(ar, &target_info);
+ else
+ ret = ath10k_bmi_get_target_info(ar, &target_info);
if (ret) {
ath10k_err(ar, "could not get target info (%d)\n", ret);
goto err_power_down;
diff --git a/drivers/net/wireless/ath/ath10k/core.h b/drivers/net/wireless/ath/ath10k/core.h
index bf09151..8fc08a5 100644
--- a/drivers/net/wireless/ath/ath10k/core.h
+++ b/drivers/net/wireless/ath/ath10k/core.h
@@ -91,6 +91,7 @@
enum ath10k_bus {
ATH10K_BUS_PCI,
ATH10K_BUS_AHB,
+ ATH10K_BUS_SDIO,
};
static inline const char *ath10k_bus_str(enum ath10k_bus bus)
@@ -100,6 +101,8 @@
return "pci";
case ATH10K_BUS_AHB:
return "ahb";
+ case ATH10K_BUS_SDIO:
+ return "sdio";
}
return "unknown";
diff --git a/drivers/net/wireless/ath/ath10k/debug.c b/drivers/net/wireless/ath/ath10k/debug.c
index 4cd2a0f..389fcb7 100644
--- a/drivers/net/wireless/ath/ath10k/debug.c
+++ b/drivers/net/wireless/ath/ath10k/debug.c
@@ -625,17 +625,21 @@
size_t count, loff_t *ppos)
{
struct ath10k *ar = file->private_data;
- char buf[32];
+ char buf[32] = {0};
+ ssize_t rc;
int ret;
- simple_write_to_buffer(buf, sizeof(buf) - 1, ppos, user_buf, count);
+ /* filter partial writes and invalid commands */
+ if (*ppos != 0 || count >= sizeof(buf) || count == 0)
+ return -EINVAL;
- /* make sure that buf is null terminated */
- buf[sizeof(buf) - 1] = 0;
+ rc = simple_write_to_buffer(buf, sizeof(buf) - 1, ppos, user_buf, count);
+ if (rc < 0)
+ return rc;
/* drop the possible '\n' from the end */
- if (buf[count - 1] == '\n')
- buf[count - 1] = 0;
+ if (buf[*ppos - 1] == '\n')
+ buf[*ppos - 1] = '\0';
mutex_lock(&ar->conf_mutex);
diff --git a/drivers/net/wireless/ath/ath10k/debug.h b/drivers/net/wireless/ath/ath10k/debug.h
index 2368f47..257d109 100644
--- a/drivers/net/wireless/ath/ath10k/debug.h
+++ b/drivers/net/wireless/ath/ath10k/debug.h
@@ -38,6 +38,8 @@
ATH10K_DBG_WMI_PRINT = 0x00002000,
ATH10K_DBG_PCI_PS = 0x00004000,
ATH10K_DBG_AHB = 0x00008000,
+ ATH10K_DBG_SDIO = 0x00010000,
+ ATH10K_DBG_SDIO_DUMP = 0x00020000,
ATH10K_DBG_ANY = 0xffffffff,
};
diff --git a/drivers/net/wireless/ath/ath10k/htc.c b/drivers/net/wireless/ath/ath10k/htc.c
index b7669b2..e5c80f5 100644
--- a/drivers/net/wireless/ath/ath10k/htc.c
+++ b/drivers/net/wireless/ath/ath10k/htc.c
@@ -57,8 +57,8 @@
skb_pull(skb, sizeof(struct ath10k_htc_hdr));
}
-static void ath10k_htc_notify_tx_completion(struct ath10k_htc_ep *ep,
- struct sk_buff *skb)
+void ath10k_htc_notify_tx_completion(struct ath10k_htc_ep *ep,
+ struct sk_buff *skb)
{
struct ath10k *ar = ep->htc->ar;
@@ -75,6 +75,7 @@
ep->ep_ops.ep_tx_complete(ep->htc->ar, skb);
}
+EXPORT_SYMBOL(ath10k_htc_notify_tx_completion);
static void ath10k_htc_prepare_tx_skb(struct ath10k_htc_ep *ep,
struct sk_buff *skb)
@@ -230,12 +231,79 @@
spin_unlock_bh(&htc->tx_lock);
}
-static int ath10k_htc_process_trailer(struct ath10k_htc *htc,
- u8 *buffer,
- int length,
- enum ath10k_htc_ep_id src_eid)
+static int
+ath10k_htc_process_lookahead(struct ath10k_htc *htc,
+ const struct ath10k_htc_lookahead_report *report,
+ int len,
+ enum ath10k_htc_ep_id eid,
+ void *next_lookaheads,
+ int *next_lookaheads_len)
{
struct ath10k *ar = htc->ar;
+
+ /* Invalid lookahead flags are actually transmitted by
+ * the target in the HTC control message.
+ * Since this will happen at every boot we silently ignore
+ * the lookahead in this case
+ */
+ if (report->pre_valid != ((~report->post_valid) & 0xFF))
+ return 0;
+
+ if (next_lookaheads && next_lookaheads_len) {
+ ath10k_dbg(ar, ATH10K_DBG_HTC,
+ "htc rx lookahead found pre_valid 0x%x post_valid 0x%x\n",
+ report->pre_valid, report->post_valid);
+
+ /* look ahead bytes are valid, copy them over */
+ memcpy((u8 *)next_lookaheads, report->lookahead, 4);
+
+ *next_lookaheads_len = 1;
+ }
+
+ return 0;
+}
+
+static int
+ath10k_htc_process_lookahead_bundle(struct ath10k_htc *htc,
+ const struct ath10k_htc_lookahead_bundle *report,
+ int len,
+ enum ath10k_htc_ep_id eid,
+ void *next_lookaheads,
+ int *next_lookaheads_len)
+{
+ struct ath10k *ar = htc->ar;
+ int bundle_cnt = len / sizeof(*report);
+
+ if (!bundle_cnt || (bundle_cnt > HTC_HOST_MAX_MSG_PER_BUNDLE)) {
+ ath10k_warn(ar, "Invalid lookahead bundle count: %d\n",
+ bundle_cnt);
+ return -EINVAL;
+ }
+
+ if (next_lookaheads && next_lookaheads_len) {
+ int i;
+
+ for (i = 0; i < bundle_cnt; i++) {
+ memcpy(((u8 *)next_lookaheads) + 4 * i,
+ report->lookahead, 4);
+ report++;
+ }
+
+ *next_lookaheads_len = bundle_cnt;
+ }
+
+ return 0;
+}
+
+int ath10k_htc_process_trailer(struct ath10k_htc *htc,
+ u8 *buffer,
+ int length,
+ enum ath10k_htc_ep_id src_eid,
+ void *next_lookaheads,
+ int *next_lookaheads_len)
+{
+ struct ath10k_htc_lookahead_bundle *bundle;
+ struct ath10k *ar = htc->ar;
int status = 0;
struct ath10k_htc_record *record;
u8 *orig_buffer;
@@ -274,6 +342,29 @@
record->hdr.len,
src_eid);
break;
+ case ATH10K_HTC_RECORD_LOOKAHEAD:
+ len = sizeof(struct ath10k_htc_lookahead_report);
+ if (record->hdr.len < len) {
+ ath10k_warn(ar, "Lookahead report too long\n");
+ status = -EINVAL;
+ break;
+ }
+ status = ath10k_htc_process_lookahead(htc,
+ record->lookahead_report,
+ record->hdr.len,
+ src_eid,
+ next_lookaheads,
+ next_lookaheads_len);
+ break;
+ case ATH10K_HTC_RECORD_LOOKAHEAD_BUNDLE:
+ bundle = record->lookahead_bundle;
+ status = ath10k_htc_process_lookahead_bundle(htc,
+ bundle,
+ record->hdr.len,
+ src_eid,
+ next_lookaheads,
+ next_lookaheads_len);
+ break;
default:
ath10k_warn(ar, "Unhandled record: id:%d length:%d\n",
record->hdr.id, record->hdr.len);
@@ -294,6 +385,7 @@
return status;
}
+EXPORT_SYMBOL(ath10k_htc_process_trailer);
void ath10k_htc_rx_completion_handler(struct ath10k *ar, struct sk_buff *skb)
{
@@ -360,7 +452,8 @@
trailer += payload_len;
trailer -= trailer_len;
status = ath10k_htc_process_trailer(htc, trailer,
- trailer_len, hdr->eid);
+ trailer_len, hdr->eid,
+ NULL, NULL);
if (status)
goto out;
@@ -371,42 +464,6 @@
/* zero length packet with trailer data, just drop these */
goto out;
- if (eid == ATH10K_HTC_EP_0) {
- struct ath10k_htc_msg *msg = (struct ath10k_htc_msg *)skb->data;
-
- switch (__le16_to_cpu(msg->hdr.message_id)) {
- case ATH10K_HTC_MSG_READY_ID:
- case ATH10K_HTC_MSG_CONNECT_SERVICE_RESP_ID:
- /* handle HTC control message */
- if (completion_done(&htc->ctl_resp)) {
- /*
- * this is a fatal error, target should not be
- * sending unsolicited messages on the ep 0
- */
- ath10k_warn(ar, "HTC rx ctrl still processing\n");
- complete(&htc->ctl_resp);
- goto out;
- }
-
- htc->control_resp_len =
- min_t(int, skb->len,
- ATH10K_HTC_MAX_CTRL_MSG_LEN);
-
- memcpy(htc->control_resp_buffer, skb->data,
- htc->control_resp_len);
-
- complete(&htc->ctl_resp);
- break;
- case ATH10K_HTC_MSG_SEND_SUSPEND_COMPLETE:
- htc->htc_ops.target_send_suspend_complete(ar);
- break;
- default:
- ath10k_warn(ar, "ignoring unsolicited htc ep0 event\n");
- break;
- }
- goto out;
- }
-
ath10k_dbg(ar, ATH10K_DBG_HTC, "htc rx completion ep %d skb %pK\n",
eid, skb);
ep->ep_ops.ep_rx_complete(ar, skb);
@@ -421,10 +478,40 @@
static void ath10k_htc_control_rx_complete(struct ath10k *ar,
struct sk_buff *skb)
{
- /* This is unexpected. FW is not supposed to send regular rx on this
- * endpoint.
- */
- ath10k_warn(ar, "unexpected htc rx\n");
+ struct ath10k_htc *htc = &ar->htc;
+ struct ath10k_htc_msg *msg = (struct ath10k_htc_msg *)skb->data;
+
+ switch (__le16_to_cpu(msg->hdr.message_id)) {
+ case ATH10K_HTC_MSG_READY_ID:
+ case ATH10K_HTC_MSG_CONNECT_SERVICE_RESP_ID:
+ /* handle HTC control message */
+ if (completion_done(&htc->ctl_resp)) {
+ /* this is a fatal error, target should not be
+ * sending unsolicited messages on the ep 0
+ */
+ ath10k_warn(ar, "HTC rx ctrl still processing\n");
+ complete(&htc->ctl_resp);
+ goto out;
+ }
+
+ htc->control_resp_len =
+ min_t(int, skb->len,
+ ATH10K_HTC_MAX_CTRL_MSG_LEN);
+
+ memcpy(htc->control_resp_buffer, skb->data,
+ htc->control_resp_len);
+
+ complete(&htc->ctl_resp);
+ break;
+ case ATH10K_HTC_MSG_SEND_SUSPEND_COMPLETE:
+ htc->htc_ops.target_send_suspend_complete(ar);
+ break;
+ default:
+ ath10k_warn(ar, "ignoring unsolicited htc ep0 event\n");
+ break;
+ }
+
+out:
kfree_skb(skb);
}
@@ -497,12 +584,8 @@
struct ath10k *ar = htc->ar;
int i, status = 0;
unsigned long time_left;
- struct ath10k_htc_svc_conn_req conn_req;
- struct ath10k_htc_svc_conn_resp conn_resp;
struct ath10k_htc_msg *msg;
u16 message_id;
- u16 credit_count;
- u16 credit_size;
time_left = wait_for_completion_timeout(&htc->ctl_resp,
ATH10K_HTC_WAIT_TIMEOUT_HZ);
@@ -539,16 +622,14 @@
msg = (struct ath10k_htc_msg *)htc->control_resp_buffer;
message_id = __le16_to_cpu(msg->hdr.message_id);
- credit_count = __le16_to_cpu(msg->ready.credit_count);
- credit_size = __le16_to_cpu(msg->ready.credit_size);
if (message_id != ATH10K_HTC_MSG_READY_ID) {
ath10k_err(ar, "Invalid HTC ready msg: 0x%x\n", message_id);
return -ECOMM;
}
- htc->total_transmit_credits = credit_count;
- htc->target_credit_size = credit_size;
+ htc->total_transmit_credits = __le16_to_cpu(msg->ready.credit_count);
+ htc->target_credit_size = __le16_to_cpu(msg->ready.credit_size);
ath10k_dbg(ar, ATH10K_DBG_HTC,
"Target ready! transmit resources: %d size:%d\n",
@@ -561,20 +642,17 @@
return -ECOMM;
}
- /* setup our pseudo HTC control endpoint connection */
- memset(&conn_req, 0, sizeof(conn_req));
- memset(&conn_resp, 0, sizeof(conn_resp));
- conn_req.ep_ops.ep_tx_complete = ath10k_htc_control_tx_complete;
- conn_req.ep_ops.ep_rx_complete = ath10k_htc_control_rx_complete;
- conn_req.max_send_queue_depth = ATH10K_NUM_CONTROL_TX_BUFFERS;
- conn_req.service_id = ATH10K_HTC_SVC_ID_RSVD_CTRL;
-
- /* connect fake service */
- status = ath10k_htc_connect_service(htc, &conn_req, &conn_resp);
- if (status) {
- ath10k_err(ar, "could not connect to htc service (%d)\n",
- status);
- return status;
+ /* The only way to determine if the ready message is an extended
+ * message is from the size.
+ */
+ if (htc->control_resp_len >=
+ sizeof(msg->hdr) + sizeof(msg->ready_ext)) {
+ htc->max_msgs_per_htc_bundle =
+ min_t(u8, msg->ready_ext.max_msgs_per_htc_bundle,
+ HTC_HOST_MAX_MSG_PER_BUNDLE);
+ ath10k_dbg(ar, ATH10K_DBG_HTC,
+ "Extended ready message. RX bundle size: %d\n",
+ htc->max_msgs_per_htc_bundle);
}
return 0;
@@ -772,6 +850,13 @@
msg->hdr.message_id =
__cpu_to_le16(ATH10K_HTC_MSG_SETUP_COMPLETE_EX_ID);
+ if (ar->hif.bus == ATH10K_BUS_SDIO) {
+ /* Extra setup params used by SDIO */
+ msg->setup_complete_ext.flags =
+ __cpu_to_le32(ATH10K_HTC_SETUP_COMPLETE_FLAGS_RX_BNDL_EN);
+ msg->setup_complete_ext.max_msgs_per_bundled_recv =
+ htc->max_msgs_per_htc_bundle;
+ }
ath10k_dbg(ar, ATH10K_DBG_HTC, "HTC is using TX credit flow control\n");
status = ath10k_htc_send(htc, ATH10K_HTC_EP_0, skb);
@@ -786,8 +871,10 @@
/* registered target arrival callback from the HIF layer */
int ath10k_htc_init(struct ath10k *ar)
{
- struct ath10k_htc_ep *ep = NULL;
+ int status;
struct ath10k_htc *htc = &ar->htc;
+ struct ath10k_htc_svc_conn_req conn_req;
+ struct ath10k_htc_svc_conn_resp conn_resp;
spin_lock_init(&htc->tx_lock);
@@ -795,10 +882,21 @@
htc->ar = ar;
- /* Get HIF default pipe for HTC message exchange */
- ep = &htc->endpoint[ATH10K_HTC_EP_0];
+ /* setup our pseudo HTC control endpoint connection */
+ memset(&conn_req, 0, sizeof(conn_req));
+ memset(&conn_resp, 0, sizeof(conn_resp));
+ conn_req.ep_ops.ep_tx_complete = ath10k_htc_control_tx_complete;
+ conn_req.ep_ops.ep_rx_complete = ath10k_htc_control_rx_complete;
+ conn_req.max_send_queue_depth = ATH10K_NUM_CONTROL_TX_BUFFERS;
+ conn_req.service_id = ATH10K_HTC_SVC_ID_RSVD_CTRL;
- ath10k_hif_get_default_pipe(ar, &ep->ul_pipe_id, &ep->dl_pipe_id);
+ /* connect fake service */
+ status = ath10k_htc_connect_service(htc, &conn_req, &conn_resp);
+ if (status) {
+ ath10k_err(ar, "could not connect to htc service (%d)\n",
+ status);
+ return status;
+ }
init_completion(&htc->ctl_resp);
diff --git a/drivers/net/wireless/ath/ath10k/htc.h b/drivers/net/wireless/ath/ath10k/htc.h
index 6ababa3..24663b0 100644
--- a/drivers/net/wireless/ath/ath10k/htc.h
+++ b/drivers/net/wireless/ath/ath10k/htc.h
@@ -50,6 +50,8 @@
* 4-byte aligned.
*/
+#define HTC_HOST_MAX_MSG_PER_BUNDLE 8
+
enum ath10k_htc_tx_flags {
ATH10K_HTC_FLAG_NEED_CREDIT_UPDATE = 0x01,
ATH10K_HTC_FLAG_SEND_BUNDLE = 0x02
@@ -110,6 +112,10 @@
ATH10K_HTC_CONN_SVC_STATUS_NO_MORE_EP = 4
};
+enum ath10k_htc_setup_complete_flags {
+ ATH10K_HTC_SETUP_COMPLETE_FLAGS_RX_BNDL_EN = 1
+};
+
struct ath10k_ath10k_htc_msg_hdr {
__le16 message_id; /* @enum htc_message_id */
} __packed;
@@ -174,8 +180,10 @@
} __packed __aligned(4);
enum ath10k_ath10k_htc_record_id {
- ATH10K_HTC_RECORD_NULL = 0,
- ATH10K_HTC_RECORD_CREDITS = 1
+ ATH10K_HTC_RECORD_NULL = 0,
+ ATH10K_HTC_RECORD_CREDITS = 1,
+ ATH10K_HTC_RECORD_LOOKAHEAD = 2,
+ ATH10K_HTC_RECORD_LOOKAHEAD_BUNDLE = 3,
};
struct ath10k_ath10k_htc_record_hdr {
@@ -192,10 +200,28 @@
u8 pad1;
} __packed;
+struct ath10k_htc_lookahead_report {
+ u8 pre_valid;
+ u8 pad0;
+ u8 pad1;
+ u8 pad2;
+ u8 lookahead[4];
+ u8 post_valid;
+ u8 pad3;
+ u8 pad4;
+ u8 pad5;
+} __packed;
+
+struct ath10k_htc_lookahead_bundle {
+ u8 lookahead[4];
+} __packed;
+
struct ath10k_htc_record {
struct ath10k_ath10k_htc_record_hdr hdr;
union {
struct ath10k_htc_credit_report credit_report[0];
+ struct ath10k_htc_lookahead_report lookahead_report[0];
+ struct ath10k_htc_lookahead_bundle lookahead_bundle[0];
u8 pauload[0];
};
} __packed __aligned(4);
@@ -338,6 +364,7 @@
int total_transmit_credits;
int target_credit_size;
+ u8 max_msgs_per_htc_bundle;
};
int ath10k_htc_init(struct ath10k *ar);
@@ -351,5 +378,13 @@
struct sk_buff *ath10k_htc_alloc_skb(struct ath10k *ar, int size);
void ath10k_htc_tx_completion_handler(struct ath10k *ar, struct sk_buff *skb);
void ath10k_htc_rx_completion_handler(struct ath10k *ar, struct sk_buff *skb);
+void ath10k_htc_notify_tx_completion(struct ath10k_htc_ep *ep,
+ struct sk_buff *skb);
+int ath10k_htc_process_trailer(struct ath10k_htc *htc,
+ u8 *buffer,
+ int length,
+ enum ath10k_htc_ep_id src_eid,
+ void *next_lookaheads,
+ int *next_lookaheads_len);
#endif
diff --git a/drivers/net/wireless/ath/ath10k/hw.h b/drivers/net/wireless/ath/ath10k/hw.h
index 5b1e90b..d342728 100644
--- a/drivers/net/wireless/ath/ath10k/hw.h
+++ b/drivers/net/wireless/ath/ath10k/hw.h
@@ -863,6 +863,59 @@
#define QCA9887_EEPROM_ADDR_LO_MASK 0x00ff0000
#define QCA9887_EEPROM_ADDR_LO_LSB 16
+#define MBOX_RESET_CONTROL_ADDRESS 0x00000000
+#define MBOX_HOST_INT_STATUS_ADDRESS 0x00000800
+#define MBOX_HOST_INT_STATUS_ERROR_LSB 7
+#define MBOX_HOST_INT_STATUS_ERROR_MASK 0x00000080
+#define MBOX_HOST_INT_STATUS_CPU_LSB 6
+#define MBOX_HOST_INT_STATUS_CPU_MASK 0x00000040
+#define MBOX_HOST_INT_STATUS_COUNTER_LSB 4
+#define MBOX_HOST_INT_STATUS_COUNTER_MASK 0x00000010
+#define MBOX_CPU_INT_STATUS_ADDRESS 0x00000801
+#define MBOX_ERROR_INT_STATUS_ADDRESS 0x00000802
+#define MBOX_ERROR_INT_STATUS_WAKEUP_LSB 2
+#define MBOX_ERROR_INT_STATUS_WAKEUP_MASK 0x00000004
+#define MBOX_ERROR_INT_STATUS_RX_UNDERFLOW_LSB 1
+#define MBOX_ERROR_INT_STATUS_RX_UNDERFLOW_MASK 0x00000002
+#define MBOX_ERROR_INT_STATUS_TX_OVERFLOW_LSB 0
+#define MBOX_ERROR_INT_STATUS_TX_OVERFLOW_MASK 0x00000001
+#define MBOX_COUNTER_INT_STATUS_ADDRESS 0x00000803
+#define MBOX_COUNTER_INT_STATUS_COUNTER_LSB 0
+#define MBOX_COUNTER_INT_STATUS_COUNTER_MASK 0x000000ff
+#define MBOX_RX_LOOKAHEAD_VALID_ADDRESS 0x00000805
+#define MBOX_INT_STATUS_ENABLE_ADDRESS 0x00000828
+#define MBOX_INT_STATUS_ENABLE_ERROR_LSB 7
+#define MBOX_INT_STATUS_ENABLE_ERROR_MASK 0x00000080
+#define MBOX_INT_STATUS_ENABLE_CPU_LSB 6
+#define MBOX_INT_STATUS_ENABLE_CPU_MASK 0x00000040
+#define MBOX_INT_STATUS_ENABLE_INT_LSB 5
+#define MBOX_INT_STATUS_ENABLE_INT_MASK 0x00000020
+#define MBOX_INT_STATUS_ENABLE_COUNTER_LSB 4
+#define MBOX_INT_STATUS_ENABLE_COUNTER_MASK 0x00000010
+#define MBOX_INT_STATUS_ENABLE_MBOX_DATA_LSB 0
+#define MBOX_INT_STATUS_ENABLE_MBOX_DATA_MASK 0x0000000f
+#define MBOX_CPU_INT_STATUS_ENABLE_ADDRESS 0x00000819
+#define MBOX_CPU_INT_STATUS_ENABLE_BIT_LSB 0
+#define MBOX_CPU_INT_STATUS_ENABLE_BIT_MASK 0x000000ff
+#define MBOX_ERROR_STATUS_ENABLE_ADDRESS 0x0000081a
+#define MBOX_ERROR_STATUS_ENABLE_RX_UNDERFLOW_LSB 1
+#define MBOX_ERROR_STATUS_ENABLE_RX_UNDERFLOW_MASK 0x00000002
+#define MBOX_ERROR_STATUS_ENABLE_TX_OVERFLOW_LSB 0
+#define MBOX_ERROR_STATUS_ENABLE_TX_OVERFLOW_MASK 0x00000001
+#define MBOX_COUNTER_INT_STATUS_ENABLE_ADDRESS 0x0000081b
+#define MBOX_COUNTER_INT_STATUS_ENABLE_BIT_LSB 0
+#define MBOX_COUNTER_INT_STATUS_ENABLE_BIT_MASK 0x000000ff
+#define MBOX_COUNT_ADDRESS 0x00000820
+#define MBOX_COUNT_DEC_ADDRESS 0x00000840
+#define MBOX_WINDOW_DATA_ADDRESS 0x00000874
+#define MBOX_WINDOW_WRITE_ADDR_ADDRESS 0x00000878
+#define MBOX_WINDOW_READ_ADDR_ADDRESS 0x0000087c
+#define MBOX_CPU_DBG_SEL_ADDRESS 0x00000883
+#define MBOX_CPU_DBG_ADDRESS 0x00000884
+#define MBOX_RTC_BASE_ADDRESS 0x00000000
+#define MBOX_GPIO_BASE_ADDRESS 0x00005000
+#define MBOX_MBOX_BASE_ADDRESS 0x00008000
+
#define RTC_STATE_V_GET(x) (((x) & RTC_STATE_V_MASK) >> RTC_STATE_V_LSB)
/* Register definitions for first generation ath10k cards. These cards include
diff --git a/drivers/net/wireless/ath/ath10k/mac.c b/drivers/net/wireless/ath/ath10k/mac.c
index 4674ff3..16cf250 100644
--- a/drivers/net/wireless/ath/ath10k/mac.c
+++ b/drivers/net/wireless/ath/ath10k/mac.c
@@ -3475,9 +3475,8 @@
if (arvif->u.ap.noa_data)
if (!pskb_expand_head(skb, 0, arvif->u.ap.noa_len,
GFP_ATOMIC))
- memcpy(skb_put(skb, arvif->u.ap.noa_len),
- arvif->u.ap.noa_data,
- arvif->u.ap.noa_len);
+ skb_put_data(skb, arvif->u.ap.noa_data,
+ arvif->u.ap.noa_len);
spin_unlock_bh(&ar->data_lock);
}
}
diff --git a/drivers/net/wireless/ath/ath10k/sdio.c b/drivers/net/wireless/ath/ath10k/sdio.c
new file mode 100644
index 0000000..9e78fba
--- /dev/null
+++ b/drivers/net/wireless/ath/ath10k/sdio.c
@@ -0,0 +1,2113 @@
+/*
+ * Copyright (c) 2004-2011 Atheros Communications Inc.
+ * Copyright (c) 2011-2012,2017 Qualcomm Atheros, Inc.
+ * Copyright (c) 2016-2017 Erik Stromdahl <erik.stromdahl@gmail.com>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include <linux/module.h>
+#include <linux/mmc/card.h>
+#include <linux/mmc/mmc.h>
+#include <linux/mmc/host.h>
+#include <linux/mmc/sdio_func.h>
+#include <linux/mmc/sdio_ids.h>
+#include <linux/mmc/sdio.h>
+#include <linux/mmc/sd.h>
+#include <linux/bitfield.h>
+#include "core.h"
+#include "bmi.h"
+#include "debug.h"
+#include "hif.h"
+#include "htc.h"
+#include "targaddrs.h"
+#include "trace.h"
+#include "sdio.h"
+
+/* inlined helper functions */
+
+static inline int ath10k_sdio_calc_txrx_padded_len(struct ath10k_sdio *ar_sdio,
+ size_t len)
+{
+ return __ALIGN_MASK((len), ar_sdio->mbox_info.block_mask);
+}
+
+static inline enum ath10k_htc_ep_id pipe_id_to_eid(u8 pipe_id)
+{
+ return (enum ath10k_htc_ep_id)pipe_id;
+}
+
+static inline void ath10k_sdio_mbox_free_rx_pkt(struct ath10k_sdio_rx_data *pkt)
+{
+ dev_kfree_skb(pkt->skb);
+ pkt->skb = NULL;
+ pkt->alloc_len = 0;
+ pkt->act_len = 0;
+ pkt->trailer_only = false;
+}
+
+static inline int ath10k_sdio_mbox_alloc_rx_pkt(struct ath10k_sdio_rx_data *pkt,
+ size_t act_len, size_t full_len,
+ bool part_of_bundle,
+ bool last_in_bundle)
+{
+ pkt->skb = dev_alloc_skb(full_len);
+ if (!pkt->skb)
+ return -ENOMEM;
+
+ pkt->act_len = act_len;
+ pkt->alloc_len = full_len;
+ pkt->part_of_bundle = part_of_bundle;
+ pkt->last_in_bundle = last_in_bundle;
+ pkt->trailer_only = false;
+
+ return 0;
+}
+
+static inline bool is_trailer_only_msg(struct ath10k_sdio_rx_data *pkt)
+{
+ bool trailer_only = false;
+ struct ath10k_htc_hdr *htc_hdr =
+ (struct ath10k_htc_hdr *)pkt->skb->data;
+ u16 len = __le16_to_cpu(htc_hdr->len);
+
+ if (len == htc_hdr->trailer_len)
+ trailer_only = true;
+
+ return trailer_only;
+}
+
+/* sdio/mmc functions */
+
+static inline void ath10k_sdio_set_cmd52_arg(u32 *arg, u8 write, u8 raw,
+ unsigned int address,
+ unsigned char val)
+{
+ *arg = FIELD_PREP(BIT(31), write) |
+ FIELD_PREP(BIT(27), raw) |
+ FIELD_PREP(BIT(26), 1) |
+ FIELD_PREP(GENMASK(25, 9), address) |
+ FIELD_PREP(BIT(8), 1) |
+ FIELD_PREP(GENMASK(7, 0), val);
+}
+
+static int ath10k_sdio_func0_cmd52_wr_byte(struct mmc_card *card,
+ unsigned int address,
+ unsigned char byte)
+{
+ struct mmc_command io_cmd;
+
+ memset(&io_cmd, 0, sizeof(io_cmd));
+ ath10k_sdio_set_cmd52_arg(&io_cmd.arg, 1, 0, address, byte);
+ io_cmd.opcode = SD_IO_RW_DIRECT;
+ io_cmd.flags = MMC_RSP_R5 | MMC_CMD_AC;
+
+ return mmc_wait_for_cmd(card->host, &io_cmd, 0);
+}
+
+static int ath10k_sdio_func0_cmd52_rd_byte(struct mmc_card *card,
+ unsigned int address,
+ unsigned char *byte)
+{
+ struct mmc_command io_cmd;
+ int ret;
+
+ memset(&io_cmd, 0, sizeof(io_cmd));
+ ath10k_sdio_set_cmd52_arg(&io_cmd.arg, 0, 0, address, 0);
+ io_cmd.opcode = SD_IO_RW_DIRECT;
+ io_cmd.flags = MMC_RSP_R5 | MMC_CMD_AC;
+
+ ret = mmc_wait_for_cmd(card->host, &io_cmd, 0);
+ if (!ret)
+ *byte = io_cmd.resp[0];
+
+ return ret;
+}
+
+static int ath10k_sdio_config(struct ath10k *ar)
+{
+ struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);
+ struct sdio_func *func = ar_sdio->func;
+ unsigned char byte, asyncintdelay = 2;
+ int ret;
+
+ ath10k_dbg(ar, ATH10K_DBG_BOOT, "sdio configuration\n");
+
+ sdio_claim_host(func);
+
+ byte = 0;
+ ret = ath10k_sdio_func0_cmd52_rd_byte(func->card,
+ SDIO_CCCR_DRIVE_STRENGTH,
+ &byte);
+
+ byte &= ~ATH10K_SDIO_DRIVE_DTSX_MASK;
+ byte |= FIELD_PREP(ATH10K_SDIO_DRIVE_DTSX_MASK,
+ ATH10K_SDIO_DRIVE_DTSX_TYPE_D);
+
+ ret = ath10k_sdio_func0_cmd52_wr_byte(func->card,
+ SDIO_CCCR_DRIVE_STRENGTH,
+ byte);
+
+ byte = 0;
+ ret = ath10k_sdio_func0_cmd52_rd_byte(
+ func->card,
+ CCCR_SDIO_DRIVER_STRENGTH_ENABLE_ADDR,
+ &byte);
+
+ byte |= (CCCR_SDIO_DRIVER_STRENGTH_ENABLE_A |
+ CCCR_SDIO_DRIVER_STRENGTH_ENABLE_C |
+ CCCR_SDIO_DRIVER_STRENGTH_ENABLE_D);
+
+ ret = ath10k_sdio_func0_cmd52_wr_byte(func->card,
+ CCCR_SDIO_DRIVER_STRENGTH_ENABLE_ADDR,
+ byte);
+ if (ret) {
+ ath10k_warn(ar, "failed to enable driver strength: %d\n", ret);
+ goto out;
+ }
+
+ byte = 0;
+ ret = ath10k_sdio_func0_cmd52_rd_byte(func->card,
+ CCCR_SDIO_IRQ_MODE_REG_SDIO3,
+ &byte);
+
+ byte |= SDIO_IRQ_MODE_ASYNC_4BIT_IRQ_SDIO3;
+
+ ret = ath10k_sdio_func0_cmd52_wr_byte(func->card,
+ CCCR_SDIO_IRQ_MODE_REG_SDIO3,
+ byte);
+ if (ret) {
+ ath10k_warn(ar, "failed to enable 4-bit async irq mode: %d\n",
+ ret);
+ goto out;
+ }
+
+ byte = 0;
+ ret = ath10k_sdio_func0_cmd52_rd_byte(func->card,
+ CCCR_SDIO_ASYNC_INT_DELAY_ADDRESS,
+ &byte);
+
+ byte &= ~CCCR_SDIO_ASYNC_INT_DELAY_MASK;
+ byte |= FIELD_PREP(CCCR_SDIO_ASYNC_INT_DELAY_MASK, asyncintdelay);
+
+ ret = ath10k_sdio_func0_cmd52_wr_byte(func->card,
+ CCCR_SDIO_ASYNC_INT_DELAY_ADDRESS,
+ byte);
+
+ /* give us some time to enable, in ms */
+ func->enable_timeout = 100;
+
+ ret = sdio_set_block_size(func, ar_sdio->mbox_info.block_size);
+ if (ret) {
+ ath10k_warn(ar, "failed to set sdio block size to %d: %d\n",
+ ar_sdio->mbox_info.block_size, ret);
+ goto out;
+ }
+
+out:
+ sdio_release_host(func);
+ return ret;
+}
+
+static int ath10k_sdio_write32(struct ath10k *ar, u32 addr, u32 val)
+{
+ struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);
+ struct sdio_func *func = ar_sdio->func;
+ int ret;
+
+ sdio_claim_host(func);
+
+ sdio_writel(func, val, addr, &ret);
+ if (ret) {
+ ath10k_warn(ar, "failed to write 0x%x to address 0x%x: %d\n",
+ val, addr, ret);
+ goto out;
+ }
+
+ ath10k_dbg(ar, ATH10K_DBG_SDIO, "sdio write32 addr 0x%x val 0x%x\n",
+ addr, val);
+
+out:
+ sdio_release_host(func);
+
+ return ret;
+}
+
+static int ath10k_sdio_writesb32(struct ath10k *ar, u32 addr, u32 val)
+{
+ struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);
+ struct sdio_func *func = ar_sdio->func;
+ __le32 *buf;
+ int ret;
+
+ buf = kzalloc(sizeof(*buf), GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ *buf = cpu_to_le32(val);
+
+ sdio_claim_host(func);
+
+ ret = sdio_writesb(func, addr, buf, sizeof(*buf));
+ if (ret) {
+ ath10k_warn(ar, "failed to write value 0x%x to fixed sb address 0x%x: %d\n",
+ val, addr, ret);
+ goto out;
+ }
+
+ ath10k_dbg(ar, ATH10K_DBG_SDIO, "sdio writesb32 addr 0x%x val 0x%x\n",
+ addr, val);
+
+out:
+ sdio_release_host(func);
+
+ kfree(buf);
+
+ return ret;
+}
+
+static int ath10k_sdio_read32(struct ath10k *ar, u32 addr, u32 *val)
+{
+ struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);
+ struct sdio_func *func = ar_sdio->func;
+ int ret;
+
+ sdio_claim_host(func);
+ *val = sdio_readl(func, addr, &ret);
+ if (ret) {
+ ath10k_warn(ar, "failed to read from address 0x%x: %d\n",
+ addr, ret);
+ goto out;
+ }
+
+ ath10k_dbg(ar, ATH10K_DBG_SDIO, "sdio read32 addr 0x%x val 0x%x\n",
+ addr, *val);
+
+out:
+ sdio_release_host(func);
+
+ return ret;
+}
+
+static int ath10k_sdio_read(struct ath10k *ar, u32 addr, void *buf, size_t len)
+{
+ struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);
+ struct sdio_func *func = ar_sdio->func;
+ int ret;
+
+ sdio_claim_host(func);
+
+ ret = sdio_memcpy_fromio(func, buf, addr, len);
+ if (ret) {
+ ath10k_warn(ar, "failed to read from address 0x%x: %d\n",
+ addr, ret);
+ goto out;
+ }
+
+ ath10k_dbg(ar, ATH10K_DBG_SDIO, "sdio read addr 0x%x buf 0x%p len %zu\n",
+ addr, buf, len);
+ ath10k_dbg_dump(ar, ATH10K_DBG_SDIO_DUMP, NULL, "sdio read ", buf, len);
+
+out:
+ sdio_release_host(func);
+
+ return ret;
+}
+
+static int ath10k_sdio_write(struct ath10k *ar, u32 addr, const void *buf, size_t len)
+{
+ struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);
+ struct sdio_func *func = ar_sdio->func;
+ int ret;
+
+ sdio_claim_host(func);
+
+ /* For some reason toio() doesn't have const for the buffer, need
+ * an ugly hack to workaround that.
+ */
+ ret = sdio_memcpy_toio(func, addr, (void *)buf, len);
+ if (ret) {
+ ath10k_warn(ar, "failed to write to address 0x%x: %d\n",
+ addr, ret);
+ goto out;
+ }
+
+ ath10k_dbg(ar, ATH10K_DBG_SDIO, "sdio write addr 0x%x buf 0x%p len %zu\n",
+ addr, buf, len);
+ ath10k_dbg_dump(ar, ATH10K_DBG_SDIO_DUMP, NULL, "sdio write ", buf, len);
+
+out:
+ sdio_release_host(func);
+
+ return ret;
+}
+
+static int ath10k_sdio_readsb(struct ath10k *ar, u32 addr, void *buf, size_t len)
+{
+ struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);
+ struct sdio_func *func = ar_sdio->func;
+ int ret;
+
+ sdio_claim_host(func);
+
+ len = round_down(len, ar_sdio->mbox_info.block_size);
+
+ ret = sdio_readsb(func, buf, addr, len);
+ if (ret) {
+ ath10k_warn(ar, "failed to read from fixed (sb) address 0x%x: %d\n",
+ addr, ret);
+ goto out;
+ }
+
+ ath10k_dbg(ar, ATH10K_DBG_SDIO, "sdio readsb addr 0x%x buf 0x%p len %zu\n",
+ addr, buf, len);
+ ath10k_dbg_dump(ar, ATH10K_DBG_SDIO_DUMP, NULL, "sdio readsb ", buf, len);
+
+out:
+ sdio_release_host(func);
+
+ return ret;
+}
+
+/* HIF mbox functions */
+
+static int ath10k_sdio_mbox_rx_process_packet(struct ath10k *ar,
+ struct ath10k_sdio_rx_data *pkt,
+ u32 *lookaheads,
+ int *n_lookaheads)
+{
+ struct ath10k_htc *htc = &ar->htc;
+ struct sk_buff *skb = pkt->skb;
+ struct ath10k_htc_hdr *htc_hdr = (struct ath10k_htc_hdr *)skb->data;
+ bool trailer_present = htc_hdr->flags & ATH10K_HTC_FLAG_TRAILER_PRESENT;
+ enum ath10k_htc_ep_id eid;
+ u16 payload_len;
+ u8 *trailer;
+ int ret;
+
+ payload_len = le16_to_cpu(htc_hdr->len);
+
+ if (trailer_present) {
+ trailer = skb->data + sizeof(*htc_hdr) +
+ payload_len - htc_hdr->trailer_len;
+
+ eid = pipe_id_to_eid(htc_hdr->eid);
+
+ ret = ath10k_htc_process_trailer(htc,
+ trailer,
+ htc_hdr->trailer_len,
+ eid,
+ lookaheads,
+ n_lookaheads);
+ if (ret)
+ return ret;
+
+ if (is_trailer_only_msg(pkt))
+ pkt->trailer_only = true;
+
+ skb_trim(skb, skb->len - htc_hdr->trailer_len);
+ }
+
+ skb_pull(skb, sizeof(*htc_hdr));
+
+ return 0;
+}
+
+static int ath10k_sdio_mbox_rx_process_packets(struct ath10k *ar,
+ u32 lookaheads[],
+ int *n_lookahead)
+{
+ struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);
+ struct ath10k_htc *htc = &ar->htc;
+ struct ath10k_sdio_rx_data *pkt;
+ struct ath10k_htc_ep *ep;
+ enum ath10k_htc_ep_id id;
+ int ret, i, *n_lookahead_local;
+ u32 *lookaheads_local;
+
+ for (i = 0; i < ar_sdio->n_rx_pkts; i++) {
+ lookaheads_local = lookaheads;
+ n_lookahead_local = n_lookahead;
+
+ id = ((struct ath10k_htc_hdr *)&lookaheads[i])->eid;
+
+ if (id >= ATH10K_HTC_EP_COUNT) {
+ ath10k_warn(ar, "invalid endpoint in look-ahead: %d\n",
+ id);
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ ep = &htc->endpoint[id];
+
+ if (ep->service_id == 0) {
+ ath10k_warn(ar, "ep %d is not connected\n", id);
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ pkt = &ar_sdio->rx_pkts[i];
+
+ if (pkt->part_of_bundle && !pkt->last_in_bundle) {
+ /* Only read lookahead's from RX trailers
+ * for the last packet in a bundle.
+ */
+ lookaheads_local = NULL;
+ n_lookahead_local = NULL;
+ }
+
+ ret = ath10k_sdio_mbox_rx_process_packet(ar,
+ pkt,
+ lookaheads_local,
+ n_lookahead_local);
+ if (ret)
+ goto out;
+
+ if (!pkt->trailer_only)
+ ep->ep_ops.ep_rx_complete(ar_sdio->ar, pkt->skb);
+ else
+ kfree_skb(pkt->skb);
+
+ /* The RX complete handler now owns the skb...*/
+ pkt->skb = NULL;
+ pkt->alloc_len = 0;
+ }
+
+ ret = 0;
+
+out:
+ /* Free all packets that was not passed on to the RX completion
+ * handler...
+ */
+ for (; i < ar_sdio->n_rx_pkts; i++)
+ ath10k_sdio_mbox_free_rx_pkt(&ar_sdio->rx_pkts[i]);
+
+ return ret;
+}
+
+static int ath10k_sdio_mbox_alloc_pkt_bundle(struct ath10k *ar,
+ struct ath10k_sdio_rx_data *rx_pkts,
+ struct ath10k_htc_hdr *htc_hdr,
+ size_t full_len, size_t act_len,
+ size_t *bndl_cnt)
+{
+ int ret, i;
+
+ *bndl_cnt = FIELD_GET(ATH10K_HTC_FLAG_BUNDLE_MASK, htc_hdr->flags);
+
+ if (*bndl_cnt > HTC_HOST_MAX_MSG_PER_BUNDLE) {
+ ath10k_warn(ar,
+ "HTC bundle length %u exceeds maximum %u\n",
+ le16_to_cpu(htc_hdr->len),
+ HTC_HOST_MAX_MSG_PER_BUNDLE);
+ return -ENOMEM;
+ }
+
+ /* Allocate bndl_cnt extra skb's for the bundle.
+ * The package containing the
+ * ATH10K_HTC_FLAG_BUNDLE_MASK flag is not included
+ * in bndl_cnt. The skb for that packet will be
+ * allocated separately.
+ */
+ for (i = 0; i < *bndl_cnt; i++) {
+ ret = ath10k_sdio_mbox_alloc_rx_pkt(&rx_pkts[i],
+ act_len,
+ full_len,
+ true,
+ false);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int ath10k_sdio_mbox_rx_alloc(struct ath10k *ar,
+ u32 lookaheads[], int n_lookaheads)
+{
+ struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);
+ struct ath10k_htc_hdr *htc_hdr;
+ size_t full_len, act_len;
+ bool last_in_bundle;
+ int ret, i;
+
+ if (n_lookaheads > ATH10K_SDIO_MAX_RX_MSGS) {
+ ath10k_warn(ar,
+ "the total number of pkgs to be fetched (%u) exceeds maximum %u\n",
+ n_lookaheads,
+ ATH10K_SDIO_MAX_RX_MSGS);
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ for (i = 0; i < n_lookaheads; i++) {
+ htc_hdr = (struct ath10k_htc_hdr *)&lookaheads[i];
+ last_in_bundle = false;
+
+ if (le16_to_cpu(htc_hdr->len) >
+ ATH10K_HTC_MBOX_MAX_PAYLOAD_LENGTH) {
+ ath10k_warn(ar,
+ "payload length %d exceeds max htc length: %zu\n",
+ le16_to_cpu(htc_hdr->len),
+ ATH10K_HTC_MBOX_MAX_PAYLOAD_LENGTH);
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ act_len = le16_to_cpu(htc_hdr->len) + sizeof(*htc_hdr);
+ full_len = ath10k_sdio_calc_txrx_padded_len(ar_sdio, act_len);
+
+ if (full_len > ATH10K_SDIO_MAX_BUFFER_SIZE) {
+ ath10k_warn(ar,
+ "rx buffer requested with invalid htc_hdr length (%d, 0x%x): %d\n",
+ htc_hdr->eid, htc_hdr->flags,
+ le16_to_cpu(htc_hdr->len));
+ ret = -EINVAL;
+ goto err;
+ }
+
+ if (htc_hdr->flags & ATH10K_HTC_FLAG_BUNDLE_MASK) {
+ /* HTC header indicates that every packet to follow
+ * has the same padded length so that it can be
+ * optimally fetched as a full bundle.
+ */
+ size_t bndl_cnt;
+
+ ret = ath10k_sdio_mbox_alloc_pkt_bundle(ar,
+ &ar_sdio->rx_pkts[i],
+ htc_hdr,
+ full_len,
+ act_len,
+ &bndl_cnt);
+
+ n_lookaheads += bndl_cnt;
+ i += bndl_cnt;
+ /*Next buffer will be the last in the bundle */
+ last_in_bundle = true;
+ }
+
+ /* Allocate skb for packet. If the packet had the
+ * ATH10K_HTC_FLAG_BUNDLE_MASK flag set, all bundled
+ * packet skb's have been allocated in the previous step.
+ */
+ ret = ath10k_sdio_mbox_alloc_rx_pkt(&ar_sdio->rx_pkts[i],
+ act_len,
+ full_len,
+ last_in_bundle,
+ last_in_bundle);
+ }
+
+ ar_sdio->n_rx_pkts = i;
+
+ return 0;
+
+err:
+ for (i = 0; i < ATH10K_SDIO_MAX_RX_MSGS; i++) {
+ if (!ar_sdio->rx_pkts[i].alloc_len)
+ break;
+ ath10k_sdio_mbox_free_rx_pkt(&ar_sdio->rx_pkts[i]);
+ }
+
+ return ret;
+}
+
+static int ath10k_sdio_mbox_rx_packet(struct ath10k *ar,
+ struct ath10k_sdio_rx_data *pkt)
+{
+ struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);
+ struct sk_buff *skb = pkt->skb;
+ int ret;
+
+ ret = ath10k_sdio_readsb(ar, ar_sdio->mbox_info.htc_addr,
+ skb->data, pkt->alloc_len);
+ pkt->status = ret;
+ if (!ret)
+ skb_put(skb, pkt->act_len);
+
+ return ret;
+}
+
+static int ath10k_sdio_mbox_rx_fetch(struct ath10k *ar)
+{
+ struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);
+ int ret, i;
+
+ for (i = 0; i < ar_sdio->n_rx_pkts; i++) {
+ ret = ath10k_sdio_mbox_rx_packet(ar,
+ &ar_sdio->rx_pkts[i]);
+ if (ret)
+ goto err;
+ }
+
+ return 0;
+
+err:
+ /* Free all packets that was not successfully fetched. */
+ for (; i < ar_sdio->n_rx_pkts; i++)
+ ath10k_sdio_mbox_free_rx_pkt(&ar_sdio->rx_pkts[i]);
+
+ return ret;
+}
+
+/* This is the timeout for mailbox processing done in the sdio irq
+ * handler. The timeout is deliberately set quite high since SDIO dump logs
+ * over serial port can/will add a substantial overhead to the processing
+ * (if enabled).
+ */
+#define SDIO_MBOX_PROCESSING_TIMEOUT_HZ (20 * HZ)
+
+static int ath10k_sdio_mbox_rxmsg_pending_handler(struct ath10k *ar,
+ u32 msg_lookahead, bool *done)
+{
+ struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);
+ u32 lookaheads[ATH10K_SDIO_MAX_RX_MSGS];
+ int n_lookaheads = 1;
+ unsigned long timeout;
+ int ret;
+
+ *done = true;
+
+ /* Copy the lookahead obtained from the HTC register table into our
+ * temp array as a start value.
+ */
+ lookaheads[0] = msg_lookahead;
+
+ timeout = jiffies + SDIO_MBOX_PROCESSING_TIMEOUT_HZ;
+ while (time_before(jiffies, timeout)) {
+ /* Try to allocate as many HTC RX packets indicated by
+ * n_lookaheads.
+ */
+ ret = ath10k_sdio_mbox_rx_alloc(ar, lookaheads,
+ n_lookaheads);
+ if (ret)
+ break;
+
+ if (ar_sdio->n_rx_pkts >= 2)
+ /* A recv bundle was detected, force IRQ status
+ * re-check again.
+ */
+ *done = false;
+
+ ret = ath10k_sdio_mbox_rx_fetch(ar);
+
+ /* Process fetched packets. This will potentially update
+ * n_lookaheads depending on if the packets contain lookahead
+ * reports.
+ */
+ n_lookaheads = 0;
+ ret = ath10k_sdio_mbox_rx_process_packets(ar,
+ lookaheads,
+ &n_lookaheads);
+
+ if (!n_lookaheads || ret)
+ break;
+
+ /* For SYNCH processing, if we get here, we are running
+ * through the loop again due to updated lookaheads. Set
+ * flag that we should re-check IRQ status registers again
+ * before leaving IRQ processing, this can net better
+ * performance in high throughput situations.
+ */
+ *done = false;
+ }
+
+ if (ret && (ret != -ECANCELED))
+ ath10k_warn(ar, "failed to get pending recv messages: %d\n",
+ ret);
+
+ return ret;
+}
+
+static int ath10k_sdio_mbox_proc_dbg_intr(struct ath10k *ar)
+{
+ u32 val;
+ int ret;
+
+ /* TODO: Add firmware crash handling */
+ ath10k_warn(ar, "firmware crashed\n");
+
+ /* read counter to clear the interrupt, the debug error interrupt is
+ * counter 0.
+ */
+ ret = ath10k_sdio_read32(ar, MBOX_COUNT_DEC_ADDRESS, &val);
+ if (ret)
+ ath10k_warn(ar, "failed to clear debug interrupt: %d\n", ret);
+
+ return ret;
+}
+
+static int ath10k_sdio_mbox_proc_counter_intr(struct ath10k *ar)
+{
+ struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);
+ struct ath10k_sdio_irq_data *irq_data = &ar_sdio->irq_data;
+ u8 counter_int_status;
+ int ret;
+
+ mutex_lock(&irq_data->mtx);
+ counter_int_status = irq_data->irq_proc_reg->counter_int_status &
+ irq_data->irq_en_reg->cntr_int_status_en;
+
+ /* NOTE: other modules like GMBOX may use the counter interrupt for
+ * credit flow control on other counters, we only need to check for
+ * the debug assertion counter interrupt.
+ */
+ if (counter_int_status & ATH10K_SDIO_TARGET_DEBUG_INTR_MASK)
+ ret = ath10k_sdio_mbox_proc_dbg_intr(ar);
+ else
+ ret = 0;
+
+ mutex_unlock(&irq_data->mtx);
+
+ return ret;
+}
+
+static int ath10k_sdio_mbox_proc_err_intr(struct ath10k *ar)
+{
+ struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);
+ struct ath10k_sdio_irq_data *irq_data = &ar_sdio->irq_data;
+ u8 error_int_status;
+ int ret;
+
+ ath10k_dbg(ar, ATH10K_DBG_SDIO, "sdio error interrupt\n");
+
+ error_int_status = irq_data->irq_proc_reg->error_int_status & 0x0F;
+ if (!error_int_status) {
+ ath10k_warn(ar, "invalid error interrupt status: 0x%x\n",
+ error_int_status);
+ return -EIO;
+ }
+
+ ath10k_dbg(ar, ATH10K_DBG_SDIO,
+ "sdio error_int_status 0x%x\n", error_int_status);
+
+ if (FIELD_GET(MBOX_ERROR_INT_STATUS_WAKEUP_MASK,
+ error_int_status))
+ ath10k_dbg(ar, ATH10K_DBG_SDIO, "sdio interrupt error wakeup\n");
+
+ if (FIELD_GET(MBOX_ERROR_INT_STATUS_RX_UNDERFLOW_MASK,
+ error_int_status))
+ ath10k_warn(ar, "rx underflow interrupt error\n");
+
+ if (FIELD_GET(MBOX_ERROR_INT_STATUS_TX_OVERFLOW_MASK,
+ error_int_status))
+ ath10k_warn(ar, "tx overflow interrupt error\n");
+
+ /* Clear the interrupt */
+ irq_data->irq_proc_reg->error_int_status &= ~error_int_status;
+
+ /* set W1C value to clear the interrupt, this hits the register first */
+ ret = ath10k_sdio_writesb32(ar, MBOX_ERROR_INT_STATUS_ADDRESS,
+ error_int_status);
+ if (ret) {
+ ath10k_warn(ar, "unable to write to error int status address: %d\n",
+ ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int ath10k_sdio_mbox_proc_cpu_intr(struct ath10k *ar)
+{
+ struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);
+ struct ath10k_sdio_irq_data *irq_data = &ar_sdio->irq_data;
+ u8 cpu_int_status;
+ int ret;
+
+ mutex_lock(&irq_data->mtx);
+ cpu_int_status = irq_data->irq_proc_reg->cpu_int_status &
+ irq_data->irq_en_reg->cpu_int_status_en;
+ if (!cpu_int_status) {
+ ath10k_warn(ar, "CPU interrupt status is zero\n");
+ ret = -EIO;
+ goto out;
+ }
+
+ /* Clear the interrupt */
+ irq_data->irq_proc_reg->cpu_int_status &= ~cpu_int_status;
+
+ /* Set up the register transfer buffer to hit the register 4 times,
+ * this is done to make the access 4-byte aligned to mitigate issues
+ * with host bus interconnects that restrict bus transfer lengths to
+ * be a multiple of 4-bytes.
+ *
+ * Set W1C value to clear the interrupt, this hits the register first.
+ */
+ ret = ath10k_sdio_writesb32(ar, MBOX_CPU_INT_STATUS_ADDRESS,
+ cpu_int_status);
+ if (ret) {
+ ath10k_warn(ar, "unable to write to cpu interrupt status address: %d\n",
+ ret);
+ goto out;
+ }
+
+out:
+ mutex_unlock(&irq_data->mtx);
+ return ret;
+}
+
+static int ath10k_sdio_mbox_read_int_status(struct ath10k *ar,
+ u8 *host_int_status,
+ u32 *lookahead)
+{
+ struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);
+ struct ath10k_sdio_irq_data *irq_data = &ar_sdio->irq_data;
+ struct ath10k_sdio_irq_proc_regs *irq_proc_reg = irq_data->irq_proc_reg;
+ struct ath10k_sdio_irq_enable_regs *irq_en_reg = irq_data->irq_en_reg;
+ u8 htc_mbox = FIELD_PREP(ATH10K_HTC_MAILBOX_MASK, 1);
+ int ret;
+
+ mutex_lock(&irq_data->mtx);
+
+ *lookahead = 0;
+ *host_int_status = 0;
+
+ /* int_status_en is supposed to be non zero, otherwise interrupts
+ * shouldn't be enabled. There is however a short time frame during
+ * initialization between the irq register and int_status_en init
+ * where this can happen.
+ * We silently ignore this condition.
+ */
+ if (!irq_en_reg->int_status_en) {
+ ret = 0;
+ goto out;
+ }
+
+ /* Read the first sizeof(struct ath10k_irq_proc_registers)
+ * bytes of the HTC register table. This
+ * will yield us the value of different int status
+ * registers and the lookahead registers.
+ */
+ ret = ath10k_sdio_read(ar, MBOX_HOST_INT_STATUS_ADDRESS,
+ irq_proc_reg, sizeof(*irq_proc_reg));
+ if (ret)
+ goto out;
+
+ /* Update only those registers that are enabled */
+ *host_int_status = irq_proc_reg->host_int_status &
+ irq_en_reg->int_status_en;
+
+ /* Look at mbox status */
+ if (!(*host_int_status & htc_mbox)) {
+ *lookahead = 0;
+ ret = 0;
+ goto out;
+ }
+
+ /* Mask out pending mbox value, we use look ahead as
+ * the real flag for mbox processing.
+ */
+ *host_int_status &= ~htc_mbox;
+ if (irq_proc_reg->rx_lookahead_valid & htc_mbox) {
+ *lookahead = le32_to_cpu(
+ irq_proc_reg->rx_lookahead[ATH10K_HTC_MAILBOX]);
+ if (!*lookahead)
+ ath10k_warn(ar, "sdio mbox lookahead is zero\n");
+ }
+
+out:
+ mutex_unlock(&irq_data->mtx);
+ return ret;
+}
+
+static int ath10k_sdio_mbox_proc_pending_irqs(struct ath10k *ar,
+ bool *done)
+{
+ u8 host_int_status;
+ u32 lookahead;
+ int ret;
+
+ /* NOTE: HIF implementation guarantees that the context of this
+ * call allows us to perform SYNCHRONOUS I/O, that is we can block,
+ * sleep or call any API that can block or switch thread/task
+ * contexts. This is a fully schedulable context.
+ */
+
+ ret = ath10k_sdio_mbox_read_int_status(ar,
+ &host_int_status,
+ &lookahead);
+ if (ret) {
+ *done = true;
+ goto out;
+ }
+
+ if (!host_int_status && !lookahead) {
+ ret = 0;
+ *done = true;
+ goto out;
+ }
+
+ if (lookahead) {
+ ath10k_dbg(ar, ATH10K_DBG_SDIO,
+ "sdio pending mailbox msg lookahead 0x%08x\n",
+ lookahead);
+
+ ret = ath10k_sdio_mbox_rxmsg_pending_handler(ar,
+ lookahead,
+ done);
+ if (ret)
+ goto out;
+ }
+
+ /* now, handle the rest of the interrupts */
+ ath10k_dbg(ar, ATH10K_DBG_SDIO,
+ "sdio host_int_status 0x%x\n", host_int_status);
+
+ if (FIELD_GET(MBOX_HOST_INT_STATUS_CPU_MASK, host_int_status)) {
+ /* CPU Interrupt */
+ ret = ath10k_sdio_mbox_proc_cpu_intr(ar);
+ if (ret)
+ goto out;
+ }
+
+ if (FIELD_GET(MBOX_HOST_INT_STATUS_ERROR_MASK, host_int_status)) {
+ /* Error Interrupt */
+ ret = ath10k_sdio_mbox_proc_err_intr(ar);
+ if (ret)
+ goto out;
+ }
+
+ if (FIELD_GET(MBOX_HOST_INT_STATUS_COUNTER_MASK, host_int_status))
+ /* Counter Interrupt */
+ ret = ath10k_sdio_mbox_proc_counter_intr(ar);
+
+ ret = 0;
+
+out:
+ /* An optimization to bypass reading the IRQ status registers
+ * unecessarily which can re-wake the target, if upper layers
+ * determine that we are in a low-throughput mode, we can rely on
+ * taking another interrupt rather than re-checking the status
+ * registers which can re-wake the target.
+ *
+ * NOTE : for host interfaces that makes use of detecting pending
+ * mbox messages at hif can not use this optimization due to
+ * possible side effects, SPI requires the host to drain all
+ * messages from the mailbox before exiting the ISR routine.
+ */
+
+ ath10k_dbg(ar, ATH10K_DBG_SDIO,
+ "sdio pending irqs done %d status %d",
+ *done, ret);
+
+ return ret;
+}
+
+static void ath10k_sdio_set_mbox_info(struct ath10k *ar)
+{
+ struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);
+ struct ath10k_mbox_info *mbox_info = &ar_sdio->mbox_info;
+ u16 device = ar_sdio->func->device, dev_id_base, dev_id_chiprev;
+
+ mbox_info->htc_addr = ATH10K_HIF_MBOX_BASE_ADDR;
+ mbox_info->block_size = ATH10K_HIF_MBOX_BLOCK_SIZE;
+ mbox_info->block_mask = ATH10K_HIF_MBOX_BLOCK_SIZE - 1;
+ mbox_info->gmbox_addr = ATH10K_HIF_GMBOX_BASE_ADDR;
+ mbox_info->gmbox_sz = ATH10K_HIF_GMBOX_WIDTH;
+
+ mbox_info->ext_info[0].htc_ext_addr = ATH10K_HIF_MBOX0_EXT_BASE_ADDR;
+
+ dev_id_base = FIELD_GET(QCA_MANUFACTURER_ID_BASE, device);
+ dev_id_chiprev = FIELD_GET(QCA_MANUFACTURER_ID_REV_MASK, device);
+ switch (dev_id_base) {
+ case QCA_MANUFACTURER_ID_AR6005_BASE:
+ if (dev_id_chiprev < 4)
+ mbox_info->ext_info[0].htc_ext_sz =
+ ATH10K_HIF_MBOX0_EXT_WIDTH;
+ else
+ /* from QCA6174 2.0(0x504), the width has been extended
+ * to 56K
+ */
+ mbox_info->ext_info[0].htc_ext_sz =
+ ATH10K_HIF_MBOX0_EXT_WIDTH_ROME_2_0;
+ break;
+ case QCA_MANUFACTURER_ID_QCA9377_BASE:
+ mbox_info->ext_info[0].htc_ext_sz =
+ ATH10K_HIF_MBOX0_EXT_WIDTH_ROME_2_0;
+ break;
+ default:
+ mbox_info->ext_info[0].htc_ext_sz =
+ ATH10K_HIF_MBOX0_EXT_WIDTH;
+ }
+
+ mbox_info->ext_info[1].htc_ext_addr =
+ mbox_info->ext_info[0].htc_ext_addr +
+ mbox_info->ext_info[0].htc_ext_sz +
+ ATH10K_HIF_MBOX_DUMMY_SPACE_SIZE;
+ mbox_info->ext_info[1].htc_ext_sz = ATH10K_HIF_MBOX1_EXT_WIDTH;
+}
+
+/* BMI functions */
+
+static int ath10k_sdio_bmi_credits(struct ath10k *ar)
+{
+ u32 addr, cmd_credits;
+ unsigned long timeout;
+ int ret;
+
+ /* Read the counter register to get the command credits */
+ addr = MBOX_COUNT_DEC_ADDRESS + ATH10K_HIF_MBOX_NUM_MAX * 4;
+ timeout = jiffies + BMI_COMMUNICATION_TIMEOUT_HZ;
+ cmd_credits = 0;
+
+ while (time_before(jiffies, timeout) && !cmd_credits) {
+ /* Hit the credit counter with a 4-byte access, the first byte
+ * read will hit the counter and cause a decrement, while the
+ * remaining 3 bytes has no effect. The rationale behind this
+ * is to make all HIF accesses 4-byte aligned.
+ */
+ ret = ath10k_sdio_read32(ar, addr, &cmd_credits);
+ if (ret) {
+ ath10k_warn(ar,
+ "unable to decrement the command credit count register: %d\n",
+ ret);
+ return ret;
+ }
+
+ /* The counter is only 8 bits.
+ * Ignore anything in the upper 3 bytes
+ */
+ cmd_credits &= 0xFF;
+ }
+
+ if (!cmd_credits) {
+ ath10k_warn(ar, "bmi communication timeout\n");
+ return -ETIMEDOUT;
+ }
+
+ return 0;
+}
+
+static int ath10k_sdio_bmi_get_rx_lookahead(struct ath10k *ar)
+{
+ unsigned long timeout;
+ u32 rx_word;
+ int ret;
+
+ timeout = jiffies + BMI_COMMUNICATION_TIMEOUT_HZ;
+ rx_word = 0;
+
+ while ((time_before(jiffies, timeout)) && !rx_word) {
+ ret = ath10k_sdio_read32(ar,
+ MBOX_HOST_INT_STATUS_ADDRESS,
+ &rx_word);
+ if (ret) {
+ ath10k_warn(ar, "unable to read RX_LOOKAHEAD_VALID: %d\n", ret);
+ return ret;
+ }
+
+ /* all we really want is one bit */
+ rx_word &= 1;
+ }
+
+ if (!rx_word) {
+ ath10k_warn(ar, "bmi_recv_buf FIFO empty\n");
+ return -EINVAL;
+ }
+
+ return ret;
+}
+
+static int ath10k_sdio_bmi_exchange_msg(struct ath10k *ar,
+ void *req, u32 req_len,
+ void *resp, u32 *resp_len)
+{
+ struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);
+ u32 addr;
+ int ret;
+
+ if (req) {
+ ret = ath10k_sdio_bmi_credits(ar);
+ if (ret)
+ return ret;
+
+ addr = ar_sdio->mbox_info.htc_addr;
+
+ memcpy(ar_sdio->bmi_buf, req, req_len);
+ ret = ath10k_sdio_write(ar, addr, ar_sdio->bmi_buf, req_len);
+ if (ret) {
+ ath10k_warn(ar,
+ "unable to send the bmi data to the device: %d\n",
+ ret);
+ return ret;
+ }
+ }
+
+ if (!resp || !resp_len)
+ /* No response expected */
+ return 0;
+
+ /* During normal bootup, small reads may be required.
+ * Rather than issue an HIF Read and then wait as the Target
+ * adds successive bytes to the FIFO, we wait here until
+ * we know that response data is available.
+ *
+ * This allows us to cleanly timeout on an unexpected
+ * Target failure rather than risk problems at the HIF level.
+ * In particular, this avoids SDIO timeouts and possibly garbage
+ * data on some host controllers. And on an interconnect
+ * such as Compact Flash (as well as some SDIO masters) which
+ * does not provide any indication on data timeout, it avoids
+ * a potential hang or garbage response.
+ *
+ * Synchronization is more difficult for reads larger than the
+ * size of the MBOX FIFO (128B), because the Target is unable
+ * to push the 129th byte of data until AFTER the Host posts an
+ * HIF Read and removes some FIFO data. So for large reads the
+ * Host proceeds to post an HIF Read BEFORE all the data is
+ * actually available to read. Fortunately, large BMI reads do
+ * not occur in practice -- they're supported for debug/development.
+ *
+ * So Host/Target BMI synchronization is divided into these cases:
+ * CASE 1: length < 4
+ * Should not happen
+ *
+ * CASE 2: 4 <= length <= 128
+ * Wait for first 4 bytes to be in FIFO
+ * If CONSERVATIVE_BMI_READ is enabled, also wait for
+ * a BMI command credit, which indicates that the ENTIRE
+ * response is available in the the FIFO
+ *
+ * CASE 3: length > 128
+ * Wait for the first 4 bytes to be in FIFO
+ *
+ * For most uses, a small timeout should be sufficient and we will
+ * usually see a response quickly; but there may be some unusual
+ * (debug) cases of BMI_EXECUTE where we want an larger timeout.
+ * For now, we use an unbounded busy loop while waiting for
+ * BMI_EXECUTE.
+ *
+ * If BMI_EXECUTE ever needs to support longer-latency execution,
+ * especially in production, this code needs to be enhanced to sleep
+ * and yield. Also note that BMI_COMMUNICATION_TIMEOUT is currently
+ * a function of Host processor speed.
+ */
+ ret = ath10k_sdio_bmi_get_rx_lookahead(ar);
+ if (ret)
+ return ret;
+
+ /* We always read from the start of the mbox address */
+ addr = ar_sdio->mbox_info.htc_addr;
+ ret = ath10k_sdio_read(ar, addr, ar_sdio->bmi_buf, *resp_len);
+ if (ret) {
+ ath10k_warn(ar,
+ "unable to read the bmi data from the device: %d\n",
+ ret);
+ return ret;
+ }
+
+ memcpy(resp, ar_sdio->bmi_buf, *resp_len);
+
+ return 0;
+}
+
+/* sdio async handling functions */
+
+static struct ath10k_sdio_bus_request
+*ath10k_sdio_alloc_busreq(struct ath10k *ar)
+{
+ struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);
+ struct ath10k_sdio_bus_request *bus_req;
+
+ spin_lock_bh(&ar_sdio->lock);
+
+ if (list_empty(&ar_sdio->bus_req_freeq)) {
+ bus_req = NULL;
+ goto out;
+ }
+
+ bus_req = list_first_entry(&ar_sdio->bus_req_freeq,
+ struct ath10k_sdio_bus_request, list);
+ list_del(&bus_req->list);
+
+out:
+ spin_unlock_bh(&ar_sdio->lock);
+ return bus_req;
+}
+
+static void ath10k_sdio_free_bus_req(struct ath10k *ar,
+ struct ath10k_sdio_bus_request *bus_req)
+{
+ struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);
+
+ memset(bus_req, 0, sizeof(*bus_req));
+
+ spin_lock_bh(&ar_sdio->lock);
+ list_add_tail(&bus_req->list, &ar_sdio->bus_req_freeq);
+ spin_unlock_bh(&ar_sdio->lock);
+}
+
+static void __ath10k_sdio_write_async(struct ath10k *ar,
+ struct ath10k_sdio_bus_request *req)
+{
+ struct ath10k_htc_ep *ep;
+ struct sk_buff *skb;
+ int ret;
+
+ skb = req->skb;
+ ret = ath10k_sdio_write(ar, req->address, skb->data, skb->len);
+ if (ret)
+ ath10k_warn(ar, "failed to write skb to 0x%x asynchronously: %d",
+ req->address, ret);
+
+ if (req->htc_msg) {
+ ep = &ar->htc.endpoint[req->eid];
+ ath10k_htc_notify_tx_completion(ep, skb);
+ } else if (req->comp) {
+ complete(req->comp);
+ }
+
+ ath10k_sdio_free_bus_req(ar, req);
+}
+
+static void ath10k_sdio_write_async_work(struct work_struct *work)
+{
+ struct ath10k_sdio *ar_sdio = container_of(work, struct ath10k_sdio,
+ wr_async_work);
+ struct ath10k *ar = ar_sdio->ar;
+ struct ath10k_sdio_bus_request *req, *tmp_req;
+
+ spin_lock_bh(&ar_sdio->wr_async_lock);
+
+ list_for_each_entry_safe(req, tmp_req, &ar_sdio->wr_asyncq, list) {
+ list_del(&req->list);
+ spin_unlock_bh(&ar_sdio->wr_async_lock);
+ __ath10k_sdio_write_async(ar, req);
+ spin_lock_bh(&ar_sdio->wr_async_lock);
+ }
+
+ spin_unlock_bh(&ar_sdio->wr_async_lock);
+}
+
+static int ath10k_sdio_prep_async_req(struct ath10k *ar, u32 addr,
+ struct sk_buff *skb,
+ struct completion *comp,
+ bool htc_msg, enum ath10k_htc_ep_id eid)
+{
+ struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);
+ struct ath10k_sdio_bus_request *bus_req;
+
+ /* Allocate a bus request for the message and queue it on the
+ * SDIO workqueue.
+ */
+ bus_req = ath10k_sdio_alloc_busreq(ar);
+ if (!bus_req) {
+ ath10k_warn(ar,
+ "unable to allocate bus request for async request\n");
+ return -ENOMEM;
+ }
+
+ bus_req->skb = skb;
+ bus_req->eid = eid;
+ bus_req->address = addr;
+ bus_req->htc_msg = htc_msg;
+ bus_req->comp = comp;
+
+ spin_lock_bh(&ar_sdio->wr_async_lock);
+ list_add_tail(&bus_req->list, &ar_sdio->wr_asyncq);
+ spin_unlock_bh(&ar_sdio->wr_async_lock);
+
+ return 0;
+}
+
+/* IRQ handler */
+
+static void ath10k_sdio_irq_handler(struct sdio_func *func)
+{
+ struct ath10k_sdio *ar_sdio = sdio_get_drvdata(func);
+ struct ath10k *ar = ar_sdio->ar;
+ unsigned long timeout;
+ bool done = false;
+ int ret;
+
+ /* Release the host during interrupts so we can pick it back up when
+ * we process commands.
+ */
+ sdio_release_host(ar_sdio->func);
+
+ timeout = jiffies + ATH10K_SDIO_HIF_COMMUNICATION_TIMEOUT_HZ;
+ while (time_before(jiffies, timeout) && !done) {
+ ret = ath10k_sdio_mbox_proc_pending_irqs(ar, &done);
+ if (ret)
+ break;
+ }
+
+ sdio_claim_host(ar_sdio->func);
+
+ wake_up(&ar_sdio->irq_wq);
+
+ if (ret && ret != -ECANCELED)
+ ath10k_warn(ar, "failed to process pending SDIO interrupts: %d\n",
+ ret);
+}
+
+/* sdio HIF functions */
+
+static int ath10k_sdio_hif_disable_intrs(struct ath10k *ar)
+{
+ struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);
+ struct ath10k_sdio_irq_data *irq_data = &ar_sdio->irq_data;
+ struct ath10k_sdio_irq_enable_regs *regs = irq_data->irq_en_reg;
+ int ret;
+
+ mutex_lock(&irq_data->mtx);
+
+ memset(regs, 0, sizeof(*regs));
+ ret = ath10k_sdio_write(ar, MBOX_INT_STATUS_ENABLE_ADDRESS,
+ ®s->int_status_en, sizeof(*regs));
+ if (ret)
+ ath10k_warn(ar, "unable to disable sdio interrupts: %d\n", ret);
+
+ mutex_unlock(&irq_data->mtx);
+
+ return ret;
+}
+
+static int ath10k_sdio_hif_power_up(struct ath10k *ar)
+{
+ struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);
+ struct sdio_func *func = ar_sdio->func;
+ int ret;
+
+ if (!ar_sdio->is_disabled)
+ return 0;
+
+ ath10k_dbg(ar, ATH10K_DBG_BOOT, "sdio power on\n");
+
+ sdio_claim_host(func);
+
+ ret = sdio_enable_func(func);
+ if (ret) {
+ ath10k_warn(ar, "unable to enable sdio function: %d)\n", ret);
+ sdio_release_host(func);
+ return ret;
+ }
+
+ sdio_release_host(func);
+
+ /* Wait for hardware to initialise. It should take a lot less than
+ * 20 ms but let's be conservative here.
+ */
+ msleep(20);
+
+ ar_sdio->is_disabled = false;
+
+ ret = ath10k_sdio_hif_disable_intrs(ar);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static void ath10k_sdio_hif_power_down(struct ath10k *ar)
+{
+ struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);
+ int ret;
+
+ if (ar_sdio->is_disabled)
+ return;
+
+ ath10k_dbg(ar, ATH10K_DBG_BOOT, "sdio power off\n");
+
+ /* Disable the card */
+ sdio_claim_host(ar_sdio->func);
+ ret = sdio_disable_func(ar_sdio->func);
+ sdio_release_host(ar_sdio->func);
+
+ if (ret)
+ ath10k_warn(ar, "unable to disable sdio function: %d\n", ret);
+
+ ar_sdio->is_disabled = true;
+}
+
+static int ath10k_sdio_hif_tx_sg(struct ath10k *ar, u8 pipe_id,
+ struct ath10k_hif_sg_item *items, int n_items)
+{
+ struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);
+ enum ath10k_htc_ep_id eid;
+ struct sk_buff *skb;
+ int ret, i;
+
+ eid = pipe_id_to_eid(pipe_id);
+
+ for (i = 0; i < n_items; i++) {
+ size_t padded_len;
+ u32 address;
+
+ skb = items[i].transfer_context;
+ padded_len = ath10k_sdio_calc_txrx_padded_len(ar_sdio,
+ skb->len);
+ skb_trim(skb, padded_len);
+
+ /* Write TX data to the end of the mbox address space */
+ address = ar_sdio->mbox_addr[eid] + ar_sdio->mbox_size[eid] -
+ skb->len;
+ ret = ath10k_sdio_prep_async_req(ar, address, skb,
+ NULL, true, eid);
+ if (ret)
+ return ret;
+ }
+
+ queue_work(ar_sdio->workqueue, &ar_sdio->wr_async_work);
+
+ return 0;
+}
+
+static int ath10k_sdio_hif_enable_intrs(struct ath10k *ar)
+{
+ struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);
+ struct ath10k_sdio_irq_data *irq_data = &ar_sdio->irq_data;
+ struct ath10k_sdio_irq_enable_regs *regs = irq_data->irq_en_reg;
+ int ret;
+
+ mutex_lock(&irq_data->mtx);
+
+ /* Enable all but CPU interrupts */
+ regs->int_status_en = FIELD_PREP(MBOX_INT_STATUS_ENABLE_ERROR_MASK, 1) |
+ FIELD_PREP(MBOX_INT_STATUS_ENABLE_CPU_MASK, 1) |
+ FIELD_PREP(MBOX_INT_STATUS_ENABLE_COUNTER_MASK, 1);
+
+ /* NOTE: There are some cases where HIF can do detection of
+ * pending mbox messages which is disabled now.
+ */
+ regs->int_status_en |=
+ FIELD_PREP(MBOX_INT_STATUS_ENABLE_MBOX_DATA_MASK, 1);
+
+ /* Set up the CPU Interrupt status Register */
+ regs->cpu_int_status_en = 0;
+
+ /* Set up the Error Interrupt status Register */
+ regs->err_int_status_en =
+ FIELD_PREP(MBOX_ERROR_STATUS_ENABLE_RX_UNDERFLOW_MASK, 1) |
+ FIELD_PREP(MBOX_ERROR_STATUS_ENABLE_TX_OVERFLOW_MASK, 1);
+
+ /* Enable Counter interrupt status register to get fatal errors for
+ * debugging.
+ */
+ regs->cntr_int_status_en =
+ FIELD_PREP(MBOX_COUNTER_INT_STATUS_ENABLE_BIT_MASK,
+ ATH10K_SDIO_TARGET_DEBUG_INTR_MASK);
+
+ ret = ath10k_sdio_write(ar, MBOX_INT_STATUS_ENABLE_ADDRESS,
+ ®s->int_status_en, sizeof(*regs));
+ if (ret)
+ ath10k_warn(ar,
+ "failed to update mbox interrupt status register : %d\n",
+ ret);
+
+ mutex_unlock(&irq_data->mtx);
+ return ret;
+}
+
+static int ath10k_sdio_hif_set_mbox_sleep(struct ath10k *ar, bool enable_sleep)
+{
+ u32 val;
+ int ret;
+
+ ret = ath10k_sdio_read32(ar, ATH10K_FIFO_TIMEOUT_AND_CHIP_CONTROL, &val);
+ if (ret) {
+ ath10k_warn(ar, "failed to read fifo/chip control register: %d\n",
+ ret);
+ return ret;
+ }
+
+ if (enable_sleep)
+ val &= ATH10K_FIFO_TIMEOUT_AND_CHIP_CONTROL_DISABLE_SLEEP_OFF;
+ else
+ val |= ATH10K_FIFO_TIMEOUT_AND_CHIP_CONTROL_DISABLE_SLEEP_ON;
+
+ ret = ath10k_sdio_write32(ar, ATH10K_FIFO_TIMEOUT_AND_CHIP_CONTROL, val);
+ if (ret) {
+ ath10k_warn(ar, "failed to write to FIFO_TIMEOUT_AND_CHIP_CONTROL: %d",
+ ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+/* HIF diagnostics */
+
+static int ath10k_sdio_hif_diag_read(struct ath10k *ar, u32 address, void *buf,
+ size_t buf_len)
+{
+ int ret;
+
+ /* set window register to start read cycle */
+ ret = ath10k_sdio_write32(ar, MBOX_WINDOW_READ_ADDR_ADDRESS, address);
+ if (ret) {
+ ath10k_warn(ar, "failed to set mbox window read address: %d", ret);
+ return ret;
+ }
+
+ /* read the data */
+ ret = ath10k_sdio_read(ar, MBOX_WINDOW_DATA_ADDRESS, buf, buf_len);
+ if (ret) {
+ ath10k_warn(ar, "failed to read from mbox window data addrress: %d\n",
+ ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int ath10k_sdio_hif_diag_read32(struct ath10k *ar, u32 address,
+ u32 *value)
+{
+ __le32 *val;
+ int ret;
+
+ val = kzalloc(sizeof(*val), GFP_KERNEL);
+ if (!val)
+ return -ENOMEM;
+
+ ret = ath10k_sdio_hif_diag_read(ar, address, val, sizeof(*val));
+ if (ret)
+ goto out;
+
+ *value = __le32_to_cpu(*val);
+
+out:
+ kfree(val);
+
+ return ret;
+}
+
+static int ath10k_sdio_hif_diag_write_mem(struct ath10k *ar, u32 address,
+ const void *data, int nbytes)
+{
+ int ret;
+
+ /* set write data */
+ ret = ath10k_sdio_write(ar, MBOX_WINDOW_DATA_ADDRESS, data, nbytes);
+ if (ret) {
+ ath10k_warn(ar,
+ "failed to write 0x%p to mbox window data addrress: %d\n",
+ data, ret);
+ return ret;
+ }
+
+ /* set window register, which starts the write cycle */
+ ret = ath10k_sdio_write32(ar, MBOX_WINDOW_WRITE_ADDR_ADDRESS, address);
+ if (ret) {
+ ath10k_warn(ar, "failed to set mbox window write address: %d", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+/* HIF start/stop */
+
+static int ath10k_sdio_hif_start(struct ath10k *ar)
+{
+ struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);
+ u32 addr, val;
+ int ret;
+
+ /* Sleep 20 ms before HIF interrupts are disabled.
+ * This will give target plenty of time to process the BMI done
+ * request before interrupts are disabled.
+ */
+ msleep(20);
+ ret = ath10k_sdio_hif_disable_intrs(ar);
+ if (ret)
+ return ret;
+
+ /* eid 0 always uses the lower part of the extended mailbox address
+ * space (ext_info[0].htc_ext_addr).
+ */
+ ar_sdio->mbox_addr[0] = ar_sdio->mbox_info.ext_info[0].htc_ext_addr;
+ ar_sdio->mbox_size[0] = ar_sdio->mbox_info.ext_info[0].htc_ext_sz;
+
+ sdio_claim_host(ar_sdio->func);
+
+ /* Register the isr */
+ ret = sdio_claim_irq(ar_sdio->func, ath10k_sdio_irq_handler);
+ if (ret) {
+ ath10k_warn(ar, "failed to claim sdio interrupt: %d\n", ret);
+ sdio_release_host(ar_sdio->func);
+ return ret;
+ }
+
+ sdio_release_host(ar_sdio->func);
+
+ ret = ath10k_sdio_hif_enable_intrs(ar);
+ if (ret)
+ ath10k_warn(ar, "failed to enable sdio interrupts: %d\n", ret);
+
+ addr = host_interest_item_address(HI_ITEM(hi_acs_flags));
+
+ ret = ath10k_sdio_hif_diag_read32(ar, addr, &val);
+ if (ret) {
+ ath10k_warn(ar, "unable to read hi_acs_flags address: %d\n", ret);
+ return ret;
+ }
+
+ if (val & HI_ACS_FLAGS_SDIO_SWAP_MAILBOX_FW_ACK) {
+ ath10k_dbg(ar, ATH10K_DBG_SDIO,
+ "sdio mailbox swap service enabled\n");
+ ar_sdio->swap_mbox = true;
+ }
+
+ /* Enable sleep and then disable it again */
+ ret = ath10k_sdio_hif_set_mbox_sleep(ar, true);
+ if (ret)
+ return ret;
+
+ /* Wait for 20ms for the written value to take effect */
+ msleep(20);
+
+ ret = ath10k_sdio_hif_set_mbox_sleep(ar, false);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+#define SDIO_IRQ_DISABLE_TIMEOUT_HZ (3 * HZ)
+
+static void ath10k_sdio_irq_disable(struct ath10k *ar)
+{
+ struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);
+ struct ath10k_sdio_irq_data *irq_data = &ar_sdio->irq_data;
+ struct ath10k_sdio_irq_enable_regs *regs = irq_data->irq_en_reg;
+ struct sk_buff *skb;
+ struct completion irqs_disabled_comp;
+ int ret;
+
+ skb = dev_alloc_skb(sizeof(*regs));
+ if (!skb)
+ return;
+
+ mutex_lock(&irq_data->mtx);
+
+ memset(regs, 0, sizeof(*regs)); /* disable all interrupts */
+ memcpy(skb->data, regs, sizeof(*regs));
+ skb_put(skb, sizeof(*regs));
+
+ mutex_unlock(&irq_data->mtx);
+
+ init_completion(&irqs_disabled_comp);
+ ret = ath10k_sdio_prep_async_req(ar, MBOX_INT_STATUS_ENABLE_ADDRESS,
+ skb, &irqs_disabled_comp, false, 0);
+ if (ret)
+ goto out;
+
+ queue_work(ar_sdio->workqueue, &ar_sdio->wr_async_work);
+
+ /* Wait for the completion of the IRQ disable request.
+ * If there is a timeout we will try to disable irq's anyway.
+ */
+ ret = wait_for_completion_timeout(&irqs_disabled_comp,
+ SDIO_IRQ_DISABLE_TIMEOUT_HZ);
+ if (!ret)
+ ath10k_warn(ar, "sdio irq disable request timed out\n");
+
+ sdio_claim_host(ar_sdio->func);
+
+ ret = sdio_release_irq(ar_sdio->func);
+ if (ret)
+ ath10k_warn(ar, "failed to release sdio interrupt: %d\n", ret);
+
+ sdio_release_host(ar_sdio->func);
+
+out:
+ kfree_skb(skb);
+}
+
+static void ath10k_sdio_hif_stop(struct ath10k *ar)
+{
+ struct ath10k_sdio_bus_request *req, *tmp_req;
+ struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);
+
+ ath10k_sdio_irq_disable(ar);
+
+ cancel_work_sync(&ar_sdio->wr_async_work);
+
+ spin_lock_bh(&ar_sdio->wr_async_lock);
+
+ /* Free all bus requests that have not been handled */
+ list_for_each_entry_safe(req, tmp_req, &ar_sdio->wr_asyncq, list) {
+ struct ath10k_htc_ep *ep;
+
+ list_del(&req->list);
+
+ if (req->htc_msg) {
+ ep = &ar->htc.endpoint[req->eid];
+ ath10k_htc_notify_tx_completion(ep, req->skb);
+ } else if (req->skb) {
+ kfree_skb(req->skb);
+ }
+ ath10k_sdio_free_bus_req(ar, req);
+ }
+
+ spin_unlock_bh(&ar_sdio->wr_async_lock);
+}
+
+#ifdef CONFIG_PM
+
+static int ath10k_sdio_hif_suspend(struct ath10k *ar)
+{
+ return -EOPNOTSUPP;
+}
+
+static int ath10k_sdio_hif_resume(struct ath10k *ar)
+{
+ switch (ar->state) {
+ case ATH10K_STATE_OFF:
+ ath10k_dbg(ar, ATH10K_DBG_SDIO,
+ "sdio resume configuring sdio\n");
+
+ /* need to set sdio settings after power is cut from sdio */
+ ath10k_sdio_config(ar);
+ break;
+
+ case ATH10K_STATE_ON:
+ default:
+ break;
+ }
+
+ return 0;
+}
+#endif
+
+static int ath10k_sdio_hif_map_service_to_pipe(struct ath10k *ar,
+ u16 service_id,
+ u8 *ul_pipe, u8 *dl_pipe)
+{
+ struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);
+ struct ath10k_htc *htc = &ar->htc;
+ u32 htt_addr, wmi_addr, htt_mbox_size, wmi_mbox_size;
+ enum ath10k_htc_ep_id eid;
+ bool ep_found = false;
+ int i;
+
+ /* For sdio, we are interested in the mapping between eid
+ * and pipeid rather than service_id to pipe_id.
+ * First we find out which eid has been allocated to the
+ * service...
+ */
+ for (i = 0; i < ATH10K_HTC_EP_COUNT; i++) {
+ if (htc->endpoint[i].service_id == service_id) {
+ eid = htc->endpoint[i].eid;
+ ep_found = true;
+ break;
+ }
+ }
+
+ if (!ep_found)
+ return -EINVAL;
+
+ /* Then we create the simplest mapping possible between pipeid
+ * and eid
+ */
+ *ul_pipe = *dl_pipe = (u8)eid;
+
+ /* Normally, HTT will use the upper part of the extended
+ * mailbox address space (ext_info[1].htc_ext_addr) and WMI ctrl
+ * the lower part (ext_info[0].htc_ext_addr).
+ * If fw wants swapping of mailbox addresses, the opposite is true.
+ */
+ if (ar_sdio->swap_mbox) {
+ htt_addr = ar_sdio->mbox_info.ext_info[0].htc_ext_addr;
+ wmi_addr = ar_sdio->mbox_info.ext_info[1].htc_ext_addr;
+ htt_mbox_size = ar_sdio->mbox_info.ext_info[0].htc_ext_sz;
+ wmi_mbox_size = ar_sdio->mbox_info.ext_info[1].htc_ext_sz;
+ } else {
+ htt_addr = ar_sdio->mbox_info.ext_info[1].htc_ext_addr;
+ wmi_addr = ar_sdio->mbox_info.ext_info[0].htc_ext_addr;
+ htt_mbox_size = ar_sdio->mbox_info.ext_info[1].htc_ext_sz;
+ wmi_mbox_size = ar_sdio->mbox_info.ext_info[0].htc_ext_sz;
+ }
+
+ switch (service_id) {
+ case ATH10K_HTC_SVC_ID_RSVD_CTRL:
+ /* HTC ctrl ep mbox address has already been setup in
+ * ath10k_sdio_hif_start
+ */
+ break;
+ case ATH10K_HTC_SVC_ID_WMI_CONTROL:
+ ar_sdio->mbox_addr[eid] = wmi_addr;
+ ar_sdio->mbox_size[eid] = wmi_mbox_size;
+ ath10k_dbg(ar, ATH10K_DBG_SDIO,
+ "sdio wmi ctrl mbox_addr 0x%x mbox_size %d\n",
+ ar_sdio->mbox_addr[eid], ar_sdio->mbox_size[eid]);
+ break;
+ case ATH10K_HTC_SVC_ID_HTT_DATA_MSG:
+ ar_sdio->mbox_addr[eid] = htt_addr;
+ ar_sdio->mbox_size[eid] = htt_mbox_size;
+ ath10k_dbg(ar, ATH10K_DBG_SDIO,
+ "sdio htt data mbox_addr 0x%x mbox_size %d\n",
+ ar_sdio->mbox_addr[eid], ar_sdio->mbox_size[eid]);
+ break;
+ default:
+ ath10k_warn(ar, "unsupported HTC service id: %d\n",
+ service_id);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static void ath10k_sdio_hif_get_default_pipe(struct ath10k *ar,
+ u8 *ul_pipe, u8 *dl_pipe)
+{
+ ath10k_dbg(ar, ATH10K_DBG_SDIO, "sdio hif get default pipe\n");
+
+ /* HTC ctrl ep (SVC id 1) always has eid (and pipe_id in our
+ * case) == 0
+ */
+ *ul_pipe = 0;
+ *dl_pipe = 0;
+}
+
+/* This op is currently only used by htc_wait_target if the HTC ready
+ * message times out. It is not applicable for SDIO since there is nothing
+ * we can do if the HTC ready message does not arrive in time.
+ * TODO: Make this op non mandatory by introducing a NULL check in the
+ * hif op wrapper.
+ */
+static void ath10k_sdio_hif_send_complete_check(struct ath10k *ar,
+ u8 pipe, int force)
+{
+}
+
+static const struct ath10k_hif_ops ath10k_sdio_hif_ops = {
+ .tx_sg = ath10k_sdio_hif_tx_sg,
+ .diag_read = ath10k_sdio_hif_diag_read,
+ .diag_write = ath10k_sdio_hif_diag_write_mem,
+ .exchange_bmi_msg = ath10k_sdio_bmi_exchange_msg,
+ .start = ath10k_sdio_hif_start,
+ .stop = ath10k_sdio_hif_stop,
+ .map_service_to_pipe = ath10k_sdio_hif_map_service_to_pipe,
+ .get_default_pipe = ath10k_sdio_hif_get_default_pipe,
+ .send_complete_check = ath10k_sdio_hif_send_complete_check,
+ .power_up = ath10k_sdio_hif_power_up,
+ .power_down = ath10k_sdio_hif_power_down,
+#ifdef CONFIG_PM
+ .suspend = ath10k_sdio_hif_suspend,
+ .resume = ath10k_sdio_hif_resume,
+#endif
+};
+
+#ifdef CONFIG_PM_SLEEP
+
+/* Empty handlers so that mmc subsystem doesn't remove us entirely during
+ * suspend. We instead follow cfg80211 suspend/resume handlers.
+ */
+static int ath10k_sdio_pm_suspend(struct device *device)
+{
+ return 0;
+}
+
+static int ath10k_sdio_pm_resume(struct device *device)
+{
+ return 0;
+}
+
+static SIMPLE_DEV_PM_OPS(ath10k_sdio_pm_ops, ath10k_sdio_pm_suspend,
+ ath10k_sdio_pm_resume);
+
+#define ATH10K_SDIO_PM_OPS (&ath10k_sdio_pm_ops)
+
+#else
+
+#define ATH10K_SDIO_PM_OPS NULL
+
+#endif /* CONFIG_PM_SLEEP */
+
+static int ath10k_sdio_probe(struct sdio_func *func,
+ const struct sdio_device_id *id)
+{
+ struct ath10k_sdio *ar_sdio;
+ struct ath10k *ar;
+ enum ath10k_hw_rev hw_rev;
+ u32 chip_id, dev_id_base;
+ int ret, i;
+
+ /* Assumption: All SDIO based chipsets (so far) are QCA6174 based.
+ * If there will be newer chipsets that does not use the hw reg
+ * setup as defined in qca6174_regs and qca6174_values, this
+ * assumption is no longer valid and hw_rev must be setup differently
+ * depending on chipset.
+ */
+ hw_rev = ATH10K_HW_QCA6174;
+
+ ar = ath10k_core_create(sizeof(*ar_sdio), &func->dev, ATH10K_BUS_SDIO,
+ hw_rev, &ath10k_sdio_hif_ops);
+ if (!ar) {
+ dev_err(&func->dev, "failed to allocate core\n");
+ return -ENOMEM;
+ }
+
+ ath10k_dbg(ar, ATH10K_DBG_BOOT,
+ "sdio new func %d vendor 0x%x device 0x%x block 0x%x/0x%x\n",
+ func->num, func->vendor, func->device,
+ func->max_blksize, func->cur_blksize);
+
+ ar_sdio = ath10k_sdio_priv(ar);
+
+ ar_sdio->irq_data.irq_proc_reg =
+ kzalloc(sizeof(struct ath10k_sdio_irq_proc_regs),
+ GFP_KERNEL);
+ if (!ar_sdio->irq_data.irq_proc_reg) {
+ ret = -ENOMEM;
+ goto err_core_destroy;
+ }
+
+ ar_sdio->irq_data.irq_en_reg =
+ kzalloc(sizeof(struct ath10k_sdio_irq_enable_regs),
+ GFP_KERNEL);
+ if (!ar_sdio->irq_data.irq_en_reg) {
+ ret = -ENOMEM;
+ goto err_free_proc_reg;
+ }
+
+ ar_sdio->bmi_buf = kzalloc(BMI_MAX_CMDBUF_SIZE, GFP_KERNEL);
+ if (!ar_sdio->bmi_buf) {
+ ret = -ENOMEM;
+ goto err_free_en_reg;
+ }
+
+ ar_sdio->func = func;
+ sdio_set_drvdata(func, ar_sdio);
+
+ ar_sdio->is_disabled = true;
+ ar_sdio->ar = ar;
+
+ spin_lock_init(&ar_sdio->lock);
+ spin_lock_init(&ar_sdio->wr_async_lock);
+ mutex_init(&ar_sdio->irq_data.mtx);
+
+ INIT_LIST_HEAD(&ar_sdio->bus_req_freeq);
+ INIT_LIST_HEAD(&ar_sdio->wr_asyncq);
+
+ INIT_WORK(&ar_sdio->wr_async_work, ath10k_sdio_write_async_work);
+ ar_sdio->workqueue = create_singlethread_workqueue("ath10k_sdio_wq");
+ if (!ar_sdio->workqueue) {
+ ret = -ENOMEM;
+ goto err_free_bmi_buf;
+ }
+
+ init_waitqueue_head(&ar_sdio->irq_wq);
+
+ for (i = 0; i < ATH10K_SDIO_BUS_REQUEST_MAX_NUM; i++)
+ ath10k_sdio_free_bus_req(ar, &ar_sdio->bus_req[i]);
+
+ dev_id_base = FIELD_GET(QCA_MANUFACTURER_ID_BASE, id->device);
+ switch (dev_id_base) {
+ case QCA_MANUFACTURER_ID_AR6005_BASE:
+ case QCA_MANUFACTURER_ID_QCA9377_BASE:
+ ar->dev_id = QCA9377_1_0_DEVICE_ID;
+ break;
+ default:
+ ret = -ENODEV;
+ ath10k_err(ar, "unsupported device id %u (0x%x)\n",
+ dev_id_base, id->device);
+ goto err_free_bmi_buf;
+ }
+
+ ar->id.vendor = id->vendor;
+ ar->id.device = id->device;
+
+ ath10k_sdio_set_mbox_info(ar);
+
+ ret = ath10k_sdio_config(ar);
+ if (ret) {
+ ath10k_err(ar, "failed to config sdio: %d\n", ret);
+ goto err_free_wq;
+ }
+
+ /* TODO: don't know yet how to get chip_id with SDIO */
+ chip_id = 0;
+ ret = ath10k_core_register(ar, chip_id);
+ if (ret) {
+ ath10k_err(ar, "failed to register driver core: %d\n", ret);
+ goto err_free_wq;
+ }
+
+ /* TODO: remove this once SDIO support is fully implemented */
+ ath10k_warn(ar, "WARNING: ath10k SDIO support is incomplete, don't expect anything to work!\n");
+
+ return 0;
+
+err_free_wq:
+ destroy_workqueue(ar_sdio->workqueue);
+err_free_bmi_buf:
+ kfree(ar_sdio->bmi_buf);
+err_free_en_reg:
+ kfree(ar_sdio->irq_data.irq_en_reg);
+err_free_proc_reg:
+ kfree(ar_sdio->irq_data.irq_proc_reg);
+err_core_destroy:
+ ath10k_core_destroy(ar);
+
+ return ret;
+}
+
+static void ath10k_sdio_remove(struct sdio_func *func)
+{
+ struct ath10k_sdio *ar_sdio = sdio_get_drvdata(func);
+ struct ath10k *ar = ar_sdio->ar;
+
+ ath10k_dbg(ar, ATH10K_DBG_BOOT,
+ "sdio removed func %d vendor 0x%x device 0x%x\n",
+ func->num, func->vendor, func->device);
+
+ (void)ath10k_sdio_hif_disable_intrs(ar);
+ cancel_work_sync(&ar_sdio->wr_async_work);
+ ath10k_core_unregister(ar);
+ ath10k_core_destroy(ar);
+}
+
+static const struct sdio_device_id ath10k_sdio_devices[] = {
+ {SDIO_DEVICE(QCA_MANUFACTURER_CODE,
+ (QCA_SDIO_ID_AR6005_BASE | 0xA))},
+ {SDIO_DEVICE(QCA_MANUFACTURER_CODE,
+ (QCA_SDIO_ID_QCA9377_BASE | 0x1))},
+ {},
+};
+
+MODULE_DEVICE_TABLE(sdio, ath10k_sdio_devices);
+
+static struct sdio_driver ath10k_sdio_driver = {
+ .name = "ath10k_sdio",
+ .id_table = ath10k_sdio_devices,
+ .probe = ath10k_sdio_probe,
+ .remove = ath10k_sdio_remove,
+ .drv.pm = ATH10K_SDIO_PM_OPS,
+};
+
+static int __init ath10k_sdio_init(void)
+{
+ int ret;
+
+ ret = sdio_register_driver(&ath10k_sdio_driver);
+ if (ret)
+ pr_err("sdio driver registration failed: %d\n", ret);
+
+ return ret;
+}
+
+static void __exit ath10k_sdio_exit(void)
+{
+ sdio_unregister_driver(&ath10k_sdio_driver);
+}
+
+module_init(ath10k_sdio_init);
+module_exit(ath10k_sdio_exit);
+
+MODULE_AUTHOR("Qualcomm Atheros");
+MODULE_DESCRIPTION("Driver support for Qualcomm Atheros 802.11ac WLAN SDIO devices");
+MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/net/wireless/ath/ath10k/sdio.h b/drivers/net/wireless/ath/ath10k/sdio.h
new file mode 100644
index 0000000..3f61c67
--- /dev/null
+++ b/drivers/net/wireless/ath/ath10k/sdio.h
@@ -0,0 +1,229 @@
+/*
+ * Copyright (c) 2004-2011 Atheros Communications Inc.
+ * Copyright (c) 2011-2012 Qualcomm Atheros, Inc.
+ * Copyright (c) 2016-2017 Erik Stromdahl <erik.stromdahl@gmail.com>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef _SDIO_H_
+#define _SDIO_H_
+
+#define ATH10K_HIF_MBOX_BLOCK_SIZE 256
+
+#define QCA_MANUFACTURER_ID_BASE GENMASK(11, 8)
+#define QCA_MANUFACTURER_ID_AR6005_BASE 0x5
+#define QCA_MANUFACTURER_ID_QCA9377_BASE 0x7
+#define QCA_SDIO_ID_AR6005_BASE 0x500
+#define QCA_SDIO_ID_QCA9377_BASE 0x700
+#define QCA_MANUFACTURER_ID_REV_MASK 0x00FF
+#define QCA_MANUFACTURER_CODE 0x271 /* Qualcomm/Atheros */
+
+#define ATH10K_SDIO_MAX_BUFFER_SIZE 4096 /*Unsure of this constant*/
+
+/* Mailbox address in SDIO address space */
+#define ATH10K_HIF_MBOX_BASE_ADDR 0x1000
+#define ATH10K_HIF_MBOX_WIDTH 0x800
+
+#define ATH10K_HIF_MBOX_TOT_WIDTH \
+ (ATH10K_HIF_MBOX_NUM_MAX * ATH10K_HIF_MBOX_WIDTH)
+
+#define ATH10K_HIF_MBOX0_EXT_BASE_ADDR 0x5000
+#define ATH10K_HIF_MBOX0_EXT_WIDTH (36 * 1024)
+#define ATH10K_HIF_MBOX0_EXT_WIDTH_ROME_2_0 (56 * 1024)
+#define ATH10K_HIF_MBOX1_EXT_WIDTH (36 * 1024)
+#define ATH10K_HIF_MBOX_DUMMY_SPACE_SIZE (2 * 1024)
+
+#define ATH10K_HTC_MBOX_MAX_PAYLOAD_LENGTH \
+ (ATH10K_SDIO_MAX_BUFFER_SIZE - sizeof(struct ath10k_htc_hdr))
+
+#define ATH10K_HIF_MBOX_NUM_MAX 4
+#define ATH10K_SDIO_BUS_REQUEST_MAX_NUM 64
+
+#define ATH10K_SDIO_HIF_COMMUNICATION_TIMEOUT_HZ (100 * HZ)
+
+/* HTC runs over mailbox 0 */
+#define ATH10K_HTC_MAILBOX 0
+#define ATH10K_HTC_MAILBOX_MASK BIT(ATH10K_HTC_MAILBOX)
+
+/* GMBOX addresses */
+#define ATH10K_HIF_GMBOX_BASE_ADDR 0x7000
+#define ATH10K_HIF_GMBOX_WIDTH 0x4000
+
+/* Modified versions of the sdio.h macros.
+ * The macros in sdio.h can't be used easily with the FIELD_{PREP|GET}
+ * macros in bitfield.h, so we define our own macros here.
+ */
+#define ATH10K_SDIO_DRIVE_DTSX_MASK \
+ (SDIO_DRIVE_DTSx_MASK << SDIO_DRIVE_DTSx_SHIFT)
+
+#define ATH10K_SDIO_DRIVE_DTSX_TYPE_B 0
+#define ATH10K_SDIO_DRIVE_DTSX_TYPE_A 1
+#define ATH10K_SDIO_DRIVE_DTSX_TYPE_C 2
+#define ATH10K_SDIO_DRIVE_DTSX_TYPE_D 3
+
+/* SDIO CCCR register definitions */
+#define CCCR_SDIO_IRQ_MODE_REG 0xF0
+#define CCCR_SDIO_IRQ_MODE_REG_SDIO3 0x16
+
+#define CCCR_SDIO_DRIVER_STRENGTH_ENABLE_ADDR 0xF2
+
+#define CCCR_SDIO_DRIVER_STRENGTH_ENABLE_A 0x02
+#define CCCR_SDIO_DRIVER_STRENGTH_ENABLE_C 0x04
+#define CCCR_SDIO_DRIVER_STRENGTH_ENABLE_D 0x08
+
+#define CCCR_SDIO_ASYNC_INT_DELAY_ADDRESS 0xF0
+#define CCCR_SDIO_ASYNC_INT_DELAY_MASK 0xC0
+
+/* mode to enable special 4-bit interrupt assertion without clock */
+#define SDIO_IRQ_MODE_ASYNC_4BIT_IRQ BIT(0)
+#define SDIO_IRQ_MODE_ASYNC_4BIT_IRQ_SDIO3 BIT(1)
+
+#define ATH10K_SDIO_TARGET_DEBUG_INTR_MASK 0x01
+
+/* The theoretical maximum number of RX messages that can be fetched
+ * from the mbox interrupt handler in one loop is derived in the following
+ * way:
+ *
+ * Let's assume that each packet in a bundle of the maximum bundle size
+ * (HTC_HOST_MAX_MSG_PER_BUNDLE) has the HTC header bundle count set
+ * to the maximum value (HTC_HOST_MAX_MSG_PER_BUNDLE).
+ *
+ * in this case the driver must allocate
+ * (HTC_HOST_MAX_MSG_PER_BUNDLE * HTC_HOST_MAX_MSG_PER_BUNDLE) skb's.
+ */
+#define ATH10K_SDIO_MAX_RX_MSGS \
+ (HTC_HOST_MAX_MSG_PER_BUNDLE * HTC_HOST_MAX_MSG_PER_BUNDLE)
+
+#define ATH10K_FIFO_TIMEOUT_AND_CHIP_CONTROL 0x00000868u
+#define ATH10K_FIFO_TIMEOUT_AND_CHIP_CONTROL_DISABLE_SLEEP_OFF 0xFFFEFFFF
+#define ATH10K_FIFO_TIMEOUT_AND_CHIP_CONTROL_DISABLE_SLEEP_ON 0x10000
+
+struct ath10k_sdio_bus_request {
+ struct list_head list;
+
+ /* sdio address */
+ u32 address;
+
+ struct sk_buff *skb;
+ enum ath10k_htc_ep_id eid;
+ int status;
+ /* Specifies if the current request is an HTC message.
+ * If not, the eid is not applicable an the TX completion handler
+ * associated with the endpoint will not be invoked.
+ */
+ bool htc_msg;
+ /* Completion that (if set) will be invoked for non HTC requests
+ * (htc_msg == false) when the request has been processed.
+ */
+ struct completion *comp;
+};
+
+struct ath10k_sdio_rx_data {
+ struct sk_buff *skb;
+ size_t alloc_len;
+ size_t act_len;
+ enum ath10k_htc_ep_id eid;
+ bool part_of_bundle;
+ bool last_in_bundle;
+ bool trailer_only;
+ int status;
+};
+
+struct ath10k_sdio_irq_proc_regs {
+ u8 host_int_status;
+ u8 cpu_int_status;
+ u8 error_int_status;
+ u8 counter_int_status;
+ u8 mbox_frame;
+ u8 rx_lookahead_valid;
+ u8 host_int_status2;
+ u8 gmbox_rx_avail;
+ __le32 rx_lookahead[2];
+ __le32 rx_gmbox_lookahead_alias[2];
+};
+
+struct ath10k_sdio_irq_enable_regs {
+ u8 int_status_en;
+ u8 cpu_int_status_en;
+ u8 err_int_status_en;
+ u8 cntr_int_status_en;
+};
+
+struct ath10k_sdio_irq_data {
+ /* protects irq_proc_reg and irq_en_reg below.
+ * We use a mutex here and not a spinlock since we will have the
+ * mutex locked while calling the sdio_memcpy_ functions.
+ * These function require non atomic context, and hence, spinlocks
+ * can be held while calling these functions.
+ */
+ struct mutex mtx;
+ struct ath10k_sdio_irq_proc_regs *irq_proc_reg;
+ struct ath10k_sdio_irq_enable_regs *irq_en_reg;
+};
+
+struct ath10k_mbox_ext_info {
+ u32 htc_ext_addr;
+ u32 htc_ext_sz;
+};
+
+struct ath10k_mbox_info {
+ u32 htc_addr;
+ struct ath10k_mbox_ext_info ext_info[2];
+ u32 block_size;
+ u32 block_mask;
+ u32 gmbox_addr;
+ u32 gmbox_sz;
+};
+
+struct ath10k_sdio {
+ struct sdio_func *func;
+
+ struct ath10k_mbox_info mbox_info;
+ bool swap_mbox;
+ u32 mbox_addr[ATH10K_HTC_EP_COUNT];
+ u32 mbox_size[ATH10K_HTC_EP_COUNT];
+
+ /* available bus requests */
+ struct ath10k_sdio_bus_request bus_req[ATH10K_SDIO_BUS_REQUEST_MAX_NUM];
+ /* free list of bus requests */
+ struct list_head bus_req_freeq;
+ /* protects access to bus_req_freeq */
+ spinlock_t lock;
+
+ struct ath10k_sdio_rx_data rx_pkts[ATH10K_SDIO_MAX_RX_MSGS];
+ size_t n_rx_pkts;
+
+ struct ath10k *ar;
+ struct ath10k_sdio_irq_data irq_data;
+
+ /* temporary buffer for BMI requests */
+ u8 *bmi_buf;
+
+ wait_queue_head_t irq_wq;
+
+ bool is_disabled;
+
+ struct workqueue_struct *workqueue;
+ struct work_struct wr_async_work;
+ struct list_head wr_asyncq;
+ /* protects access to wr_asyncq */
+ spinlock_t wr_async_lock;
+};
+
+static inline struct ath10k_sdio *ath10k_sdio_priv(struct ath10k *ar)
+{
+ return (struct ath10k_sdio *)ar->drv_priv;
+}
+
+#endif
diff --git a/drivers/net/wireless/ath/ath10k/targaddrs.h b/drivers/net/wireless/ath/ath10k/targaddrs.h
index cbac9e42..8bded5d 100644
--- a/drivers/net/wireless/ath/ath10k/targaddrs.h
+++ b/drivers/net/wireless/ath/ath10k/targaddrs.h
@@ -205,6 +205,24 @@
*/
/* Bit 1 - unused */
u32 hi_fw_swap; /* 0x104 */
+
+ /* global arenas pointer address, used by host driver debug */
+ u32 hi_dynamic_mem_arenas_addr; /* 0x108 */
+
+ /* allocated bytes of DRAM use by allocated */
+ u32 hi_dynamic_mem_allocated; /* 0x10C */
+
+ /* remaining bytes of DRAM */
+ u32 hi_dynamic_mem_remaining; /* 0x110 */
+
+ /* memory track count, configured by host */
+ u32 hi_dynamic_mem_track_max; /* 0x114 */
+
+ /* minidump buffer */
+ u32 hi_minidump; /* 0x118 */
+
+ /* bdata's sig and key addr */
+ u32 hi_bd_sig_key; /* 0x11c */
} __packed;
#define HI_ITEM(item) offsetof(struct host_interest, item)
@@ -319,6 +337,12 @@
#define HI_ACS_FLAGS_USE_WWAN (1 << 1)
/* Use test VAP */
#define HI_ACS_FLAGS_TEST_VAP (1 << 2)
+/* SDIO/mailbox ACS flag definitions */
+#define HI_ACS_FLAGS_SDIO_SWAP_MAILBOX_SET (1 << 0)
+#define HI_ACS_FLAGS_SDIO_REDUCE_TX_COMPL_SET (1 << 1)
+#define HI_ACS_FLAGS_ALT_DATA_CREDIT_SIZE (1 << 2)
+#define HI_ACS_FLAGS_SDIO_SWAP_MAILBOX_FW_ACK (1 << 16)
+#define HI_ACS_FLAGS_SDIO_REDUCE_TX_COMPL_FW_ACK (1 << 17)
/*
* CONSOLE FLAGS
diff --git a/drivers/net/wireless/ath/ath10k/testmode.c b/drivers/net/wireless/ath/ath10k/testmode.c
index d856462..9d3eb25 100644
--- a/drivers/net/wireless/ath/ath10k/testmode.c
+++ b/drivers/net/wireless/ath/ath10k/testmode.c
@@ -137,6 +137,13 @@
return ret;
}
+ ret = nla_put_u32(skb, ATH10K_TM_ATTR_WMI_OP_VERSION,
+ ar->normal_mode_fw.fw_file.wmi_op_version);
+ if (ret) {
+ kfree_skb(skb);
+ return ret;
+ }
+
return cfg80211_testmode_reply(skb);
}
diff --git a/drivers/net/wireless/ath/ath10k/testmode_i.h b/drivers/net/wireless/ath/ath10k/testmode_i.h
index ba81bf6..191a8f3 100644
--- a/drivers/net/wireless/ath/ath10k/testmode_i.h
+++ b/drivers/net/wireless/ath/ath10k/testmode_i.h
@@ -33,6 +33,7 @@
ATH10K_TM_ATTR_WMI_CMDID = 3,
ATH10K_TM_ATTR_VERSION_MAJOR = 4,
ATH10K_TM_ATTR_VERSION_MINOR = 5,
+ ATH10K_TM_ATTR_WMI_OP_VERSION = 6,
/* keep last */
__ATH10K_TM_ATTR_AFTER_LAST,
diff --git a/drivers/net/wireless/ath/ath10k/wmi.c b/drivers/net/wireless/ath/ath10k/wmi.c
index 6afc8d2..a66e248 100644
--- a/drivers/net/wireless/ath/ath10k/wmi.c
+++ b/drivers/net/wireless/ath/ath10k/wmi.c
@@ -3304,9 +3304,8 @@
if (arvif->u.ap.noa_data)
if (!pskb_expand_head(bcn, 0, arvif->u.ap.noa_len, GFP_ATOMIC))
- memcpy(skb_put(bcn, arvif->u.ap.noa_len),
- arvif->u.ap.noa_data,
- arvif->u.ap.noa_len);
+ skb_put_data(bcn, arvif->u.ap.noa_data,
+ arvif->u.ap.noa_len);
}
static int ath10k_wmi_op_pull_swba_ev(struct ath10k *ar, struct sk_buff *skb,
diff --git a/drivers/net/wireless/ath/ath5k/debug.c b/drivers/net/wireless/ath/ath5k/debug.c
index d068df5..bd7f6d7 100644
--- a/drivers/net/wireless/ath/ath5k/debug.c
+++ b/drivers/net/wireless/ath/ath5k/debug.c
@@ -938,7 +938,10 @@
}
for (i = 0; i < eesize; ++i) {
- AR5K_EEPROM_READ(i, val);
+ if (!ath5k_hw_nvram_read(ah, i, &val)) {
+ ret = -EIO;
+ goto freebuf;
+ }
buf[i] = val;
}
diff --git a/drivers/net/wireless/ath/ath6kl/debug.c b/drivers/net/wireless/ath/ath6kl/debug.c
index e2b7809..1eea6c2 100644
--- a/drivers/net/wireless/ath/ath6kl/debug.c
+++ b/drivers/net/wireless/ath/ath6kl/debug.c
@@ -348,7 +348,7 @@
if (!skb)
return;
- slot = (struct ath6kl_fwlog_slot *) skb_put(skb, slot_len);
+ slot = skb_put(skb, slot_len);
slot->timestamp = cpu_to_le32(jiffies);
slot->length = cpu_to_le32(len);
memcpy(slot->payload, buf, len);
diff --git a/drivers/net/wireless/ath/ath6kl/htc_pipe.c b/drivers/net/wireless/ath/ath6kl/htc_pipe.c
index d127a08..5c0ba83 100644
--- a/drivers/net/wireless/ath/ath6kl/htc_pipe.c
+++ b/drivers/net/wireless/ath/ath6kl/htc_pipe.c
@@ -228,8 +228,7 @@
payload_len = packet->act_len;
/* setup HTC frame header */
- htc_hdr = (struct htc_frame_hdr *) skb_push(skb,
- sizeof(*htc_hdr));
+ htc_hdr = skb_push(skb, sizeof(*htc_hdr));
if (!htc_hdr) {
WARN_ON_ONCE(1);
status = -EINVAL;
@@ -1274,8 +1273,7 @@
length = sizeof(struct htc_conn_service_msg);
/* assemble connect service message */
- conn_msg = (struct htc_conn_service_msg *) skb_put(skb,
- length);
+ conn_msg = skb_put(skb, length);
if (conn_msg == NULL) {
WARN_ON_ONCE(1);
status = -EINVAL;
@@ -1504,8 +1502,7 @@
skb = packet->skb;
/* assemble setup complete message */
- setup = (struct htc_setup_comp_ext_msg *) skb_put(skb,
- sizeof(*setup));
+ setup = skb_put(skb, sizeof(*setup));
memset(setup, 0, sizeof(struct htc_setup_comp_ext_msg));
setup->msg_id = cpu_to_le16(HTC_MSG_SETUP_COMPLETE_EX_ID);
diff --git a/drivers/net/wireless/ath/ath6kl/txrx.c b/drivers/net/wireless/ath/ath6kl/txrx.c
index a531e0c..e6b2517 100644
--- a/drivers/net/wireless/ath/ath6kl/txrx.c
+++ b/drivers/net/wireless/ath/ath6kl/txrx.c
@@ -399,15 +399,10 @@
csum_dest = skb->csum_offset + csum_start;
}
- if (skb_headroom(skb) < dev->needed_headroom) {
- struct sk_buff *tmp_skb = skb;
-
- skb = skb_realloc_headroom(skb, dev->needed_headroom);
- kfree_skb(tmp_skb);
- if (skb == NULL) {
- dev->stats.tx_dropped++;
- return 0;
- }
+ if (skb_cow_head(skb, dev->needed_headroom)) {
+ dev->stats.tx_dropped++;
+ kfree_skb(skb);
+ return 0;
}
if (ath6kl_wmi_dix_2_dot3(ar->wmi, skb)) {
diff --git a/drivers/net/wireless/ath/ath9k/channel.c b/drivers/net/wireless/ath/ath9k/channel.c
index b84539d..f0439f2 100644
--- a/drivers/net/wireless/ath/ath9k/channel.c
+++ b/drivers/net/wireless/ath/ath9k/channel.c
@@ -1005,7 +1005,7 @@
info->flags |= IEEE80211_TX_CTL_NO_CCK_RATE;
if (req->ie_len)
- memcpy(skb_put(skb, req->ie_len), req->ie, req->ie_len);
+ skb_put_data(skb, req->ie, req->ie_len);
skb_set_queue_mapping(skb, IEEE80211_AC_VO);
@@ -1521,13 +1521,11 @@
noa_desc = !!avp->offchannel_duration + !!avp->noa_duration;
noa_len = 2 + sizeof(struct ieee80211_p2p_noa_desc) * noa_desc;
- hdr = skb_put(skb, sizeof(noa_ie_hdr));
- memcpy(hdr, noa_ie_hdr, sizeof(noa_ie_hdr));
+ hdr = skb_put_data(skb, noa_ie_hdr, sizeof(noa_ie_hdr));
hdr[1] = sizeof(noa_ie_hdr) + noa_len - 2;
hdr[7] = noa_len;
- noa = (void *) skb_put(skb, noa_len);
- memset(noa, 0, noa_len);
+ noa = skb_put_zero(skb, noa_len);
noa->index = avp->noa_index;
noa->oppps_ctwindow = ath9k_get_ctwin(sc, avp);
diff --git a/drivers/net/wireless/ath/ath9k/common.c b/drivers/net/wireless/ath/ath9k/common.c
index c67d0e0..099f3d4 100644
--- a/drivers/net/wireless/ath/ath9k/common.c
+++ b/drivers/net/wireless/ath/ath9k/common.c
@@ -369,7 +369,7 @@
{
struct ath_regulatory *reg = ath9k_hw_regulatory(ah);
- if (reg->power_limit != new_txpow)
+ if (ah->curchan && reg->power_limit != new_txpow)
ath9k_hw_set_txpowerlimit(ah, new_txpow, false);
/* read back in case value is clamped */
diff --git a/drivers/net/wireless/ath/ath9k/eeprom.c b/drivers/net/wireless/ath/ath9k/eeprom.c
index 6ccf248..6fbd555 100644
--- a/drivers/net/wireless/ath/ath9k/eeprom.c
+++ b/drivers/net/wireless/ath/ath9k/eeprom.c
@@ -143,7 +143,7 @@
if (ah->eeprom_blob)
ret = ath9k_hw_nvram_read_firmware(ah->eeprom_blob, off, data);
- else if (pdata && !pdata->use_eeprom && pdata->eeprom_data)
+ else if (pdata && !pdata->use_eeprom)
ret = ath9k_hw_nvram_read_pdata(pdata, off, data);
else
ret = common->bus_ops->eeprom_read(common, off, data);
diff --git a/drivers/net/wireless/ath/ath9k/hif_usb.c b/drivers/net/wireless/ath/ath9k/hif_usb.c
index 12aa8ab..0d9687a 100644
--- a/drivers/net/wireless/ath/ath9k/hif_usb.c
+++ b/drivers/net/wireless/ath/ath9k/hif_usb.c
@@ -199,7 +199,7 @@
cmd->skb = skb;
cmd->hif_dev = hif_dev;
- hdr = (__le16 *) skb_push(skb, 4);
+ hdr = skb_push(skb, 4);
*hdr++ = cpu_to_le16(skb->len - 4);
*hdr++ = cpu_to_le16(ATH_USB_TX_STREAM_MODE_TAG);
diff --git a/drivers/net/wireless/ath/ath9k/htc_hst.c b/drivers/net/wireless/ath/ath9k/htc_hst.c
index 8e6dae2..1bf63a4 100644
--- a/drivers/net/wireless/ath/ath9k/htc_hst.c
+++ b/drivers/net/wireless/ath/ath9k/htc_hst.c
@@ -26,8 +26,7 @@
struct htc_endpoint *endpoint = &target->endpoint[epid];
int status;
- hdr = (struct htc_frame_hdr *)
- skb_push(skb, sizeof(struct htc_frame_hdr));
+ hdr = skb_push(skb, sizeof(struct htc_frame_hdr));
hdr->endpoint_id = epid;
hdr->flags = flags;
hdr->payload_len = cpu_to_be16(len);
@@ -156,8 +155,7 @@
}
skb_reserve(skb, sizeof(struct htc_frame_hdr));
- cp_msg = (struct htc_config_pipe_msg *)
- skb_put(skb, sizeof(struct htc_config_pipe_msg));
+ cp_msg = skb_put(skb, sizeof(struct htc_config_pipe_msg));
cp_msg->message_id = cpu_to_be16(HTC_MSG_CONFIG_PIPE_ID);
cp_msg->pipe_id = USB_WLAN_TX_PIPE;
@@ -195,8 +193,7 @@
}
skb_reserve(skb, sizeof(struct htc_frame_hdr));
- comp_msg = (struct htc_comp_msg *)
- skb_put(skb, sizeof(struct htc_comp_msg));
+ comp_msg = skb_put(skb, sizeof(struct htc_comp_msg));
comp_msg->msg_id = cpu_to_be16(HTC_MSG_SETUP_COMPLETE_ID);
target->htc_flags |= HTC_OP_START_WAIT;
@@ -265,8 +262,7 @@
skb_reserve(skb, sizeof(struct htc_frame_hdr));
- conn_msg = (struct htc_conn_svc_msg *)
- skb_put(skb, sizeof(struct htc_conn_svc_msg));
+ conn_msg = skb_put(skb, sizeof(struct htc_conn_svc_msg));
conn_msg->service_id = cpu_to_be16(service_connreq->service_id);
conn_msg->msg_id = cpu_to_be16(HTC_MSG_CONNECT_SERVICE_ID);
conn_msg->con_flags = cpu_to_be16(service_connreq->con_flags);
diff --git a/drivers/net/wireless/ath/ath9k/tx99.c b/drivers/net/wireless/ath/ath9k/tx99.c
index 16aca9e..a866cbd 100644
--- a/drivers/net/wireless/ath/ath9k/tx99.c
+++ b/drivers/net/wireless/ath/ath9k/tx99.c
@@ -153,7 +153,7 @@
sc->tx99_power,
sc->tx99_power / 2);
- /* We leave the harware awake as it will be chugging on */
+ /* We leave the hardware awake as it will be chugging on */
return 0;
}
diff --git a/drivers/net/wireless/ath/ath9k/wmi.c b/drivers/net/wireless/ath/ath9k/wmi.c
index 9c16e2a..64a354f 100644
--- a/drivers/net/wireless/ath/ath9k/wmi.c
+++ b/drivers/net/wireless/ath/ath9k/wmi.c
@@ -277,7 +277,7 @@
struct wmi_cmd_hdr *hdr;
unsigned long flags;
- hdr = (struct wmi_cmd_hdr *) skb_push(skb, sizeof(struct wmi_cmd_hdr));
+ hdr = skb_push(skb, sizeof(struct wmi_cmd_hdr));
hdr->command_id = cpu_to_be16(cmd);
hdr->seq_no = cpu_to_be16(++wmi->tx_seq_id);
@@ -298,7 +298,6 @@
u16 headroom = sizeof(struct htc_frame_hdr) +
sizeof(struct wmi_cmd_hdr);
struct sk_buff *skb;
- u8 *data;
unsigned long time_left;
int ret = 0;
@@ -312,8 +311,7 @@
skb_reserve(skb, headroom);
if (cmd_len != 0 && cmd_buf != NULL) {
- data = (u8 *) skb_put(skb, cmd_len);
- memcpy(data, cmd_buf, cmd_len);
+ skb_put_data(skb, cmd_buf, cmd_len);
}
mutex_lock(&wmi->op_mutex);
diff --git a/drivers/net/wireless/ath/carl9170/rx.c b/drivers/net/wireless/ath/carl9170/rx.c
index b216672..7050632 100644
--- a/drivers/net/wireless/ath/carl9170/rx.c
+++ b/drivers/net/wireless/ath/carl9170/rx.c
@@ -481,7 +481,7 @@
skb = dev_alloc_skb(len + reserved);
if (likely(skb)) {
skb_reserve(skb, reserved);
- memcpy(skb_put(skb, len), buf, len);
+ skb_put_data(skb, buf, len);
}
return skb;
@@ -916,7 +916,7 @@
}
}
- memcpy(skb_put(ar->rx_failover, tlen), tbuf, tlen);
+ skb_put_data(ar->rx_failover, tbuf, tlen);
ar->rx_failover_missing -= tlen;
if (ar->rx_failover_missing <= 0) {
@@ -958,7 +958,7 @@
* the rx - descriptor comes round again.
*/
- memcpy(skb_put(ar->rx_failover, tlen), tbuf, tlen);
+ skb_put_data(ar->rx_failover, tbuf, tlen);
ar->rx_failover_missing = clen - tlen;
return;
}
diff --git a/drivers/net/wireless/ath/carl9170/tx.c b/drivers/net/wireless/ath/carl9170/tx.c
index 2bf04c9..0cb5b58 100644
--- a/drivers/net/wireless/ath/carl9170/tx.c
+++ b/drivers/net/wireless/ath/carl9170/tx.c
@@ -991,7 +991,7 @@
else
cvif = NULL;
- txc = (void *)skb_push(skb, sizeof(*txc));
+ txc = skb_push(skb, sizeof(*txc));
memset(txc, 0, sizeof(*txc));
SET_VAL(CARL9170_TX_SUPER_MISC_QUEUE, txc->s.misc, hw_queue);
diff --git a/drivers/net/wireless/ath/wil6210/debugfs.c b/drivers/net/wireless/ath/wil6210/debugfs.c
index 5648ebb..5b0f9fc 100644
--- a/drivers/net/wireless/ath/wil6210/debugfs.c
+++ b/drivers/net/wireless/ath/wil6210/debugfs.c
@@ -795,15 +795,11 @@
struct wireless_dev *wdev = wil_to_wdev(wil);
struct cfg80211_mgmt_tx_params params;
int rc;
- void *frame = kmalloc(len, GFP_KERNEL);
+ void *frame;
- if (!frame)
- return -ENOMEM;
-
- if (copy_from_user(frame, buf, len)) {
- kfree(frame);
- return -EIO;
- }
+ frame = memdup_user(buf, len);
+ if (IS_ERR(frame))
+ return PTR_ERR(frame);
params.buf = frame;
params.len = len;
diff --git a/drivers/net/wireless/ath/wil6210/txrx.c b/drivers/net/wireless/ath/wil6210/txrx.c
index edab4c0..84d9160 100644
--- a/drivers/net/wireless/ath/wil6210/txrx.c
+++ b/drivers/net/wireless/ath/wil6210/txrx.c
@@ -363,7 +363,7 @@
return;
}
- rtap_vendor = (void *)skb_push(skb, rtap_len);
+ rtap_vendor = skb_push(skb, rtap_len);
memset(rtap_vendor, 0, rtap_len);
rtap_vendor->rtap.rthdr.it_version = PKTHDR_RADIOTAP_VERSION;
diff --git a/drivers/net/wireless/ath/wil6210/wmi.c b/drivers/net/wireless/ath/wil6210/wmi.c
index 814c356..0a5020f 100644
--- a/drivers/net/wireless/ath/wil6210/wmi.c
+++ b/drivers/net/wireless/ath/wil6210/wmi.c
@@ -677,11 +677,11 @@
return;
}
- eth = (struct ethhdr *)skb_put(skb, ETH_HLEN);
+ eth = skb_put(skb, ETH_HLEN);
ether_addr_copy(eth->h_dest, ndev->dev_addr);
ether_addr_copy(eth->h_source, evt->src_mac);
eth->h_proto = cpu_to_be16(ETH_P_PAE);
- memcpy(skb_put(skb, eapol_len), evt->eapol, eapol_len);
+ skb_put_data(skb, evt->eapol, eapol_len);
skb->protocol = eth_type_trans(skb, ndev);
if (likely(netif_rx_ni(skb) == NET_RX_SUCCESS)) {
ndev->stats.rx_packets++;
diff --git a/drivers/net/wireless/atmel/atmel.c b/drivers/net/wireless/atmel/atmel.c
index 27b110d..b68436b 100644
--- a/drivers/net/wireless/atmel/atmel.c
+++ b/drivers/net/wireless/atmel/atmel.c
@@ -1036,9 +1036,8 @@
priv->dev->stats.rx_dropped++;
} else {
skb_reserve(skb, 2);
- memcpy(skb_put(skb, priv->frag_len + 12),
- priv->rx_buf,
- priv->frag_len + 12);
+ skb_put_data(skb, priv->rx_buf,
+ priv->frag_len + 12);
skb->protocol = eth_type_trans(skb, priv->dev);
skb->ip_summed = CHECKSUM_NONE;
netif_rx(skb);
diff --git a/drivers/net/wireless/broadcom/b43/main.c b/drivers/net/wireless/broadcom/b43/main.c
index d23aac7..b37e739 100644
--- a/drivers/net/wireless/broadcom/b43/main.c
+++ b/drivers/net/wireless/broadcom/b43/main.c
@@ -71,8 +71,18 @@
MODULE_FIRMWARE("b43/ucode13.fw");
MODULE_FIRMWARE("b43/ucode14.fw");
MODULE_FIRMWARE("b43/ucode15.fw");
+MODULE_FIRMWARE("b43/ucode16_lp.fw");
MODULE_FIRMWARE("b43/ucode16_mimo.fw");
+MODULE_FIRMWARE("b43/ucode24_lcn.fw");
+MODULE_FIRMWARE("b43/ucode25_lcn.fw");
+MODULE_FIRMWARE("b43/ucode25_mimo.fw");
+MODULE_FIRMWARE("b43/ucode26_mimo.fw");
+MODULE_FIRMWARE("b43/ucode29_mimo.fw");
+MODULE_FIRMWARE("b43/ucode33_lcn40.fw");
+MODULE_FIRMWARE("b43/ucode30_mimo.fw");
MODULE_FIRMWARE("b43/ucode5.fw");
+MODULE_FIRMWARE("b43/ucode40.fw");
+MODULE_FIRMWARE("b43/ucode42.fw");
MODULE_FIRMWARE("b43/ucode9.fw");
static int modparam_bad_frames_preempt;
diff --git a/drivers/net/wireless/broadcom/b43legacy/dma.c b/drivers/net/wireless/broadcom/b43legacy/dma.c
index f9dd892..cfa617d 100644
--- a/drivers/net/wireless/broadcom/b43legacy/dma.c
+++ b/drivers/net/wireless/broadcom/b43legacy/dma.c
@@ -1072,7 +1072,7 @@
goto out_unmap_hdr;
}
- memcpy(skb_put(bounce_skb, skb->len), skb->data, skb->len);
+ skb_put_data(bounce_skb, skb->data, skb->len);
memcpy(bounce_skb->cb, skb->cb, sizeof(skb->cb));
bounce_skb->dev = skb->dev;
skb_set_queue_mapping(bounce_skb, skb_get_queue_mapping(skb));
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/btcoex.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/btcoex.c
index 14a70d4..3559fb5 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/btcoex.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/btcoex.c
@@ -380,9 +380,7 @@
/* Set up timer for BT */
btci->timer_on = false;
btci->timeout = BRCMF_BTCOEX_OPPR_WIN_TIME;
- init_timer(&btci->timer);
- btci->timer.data = (ulong)btci;
- btci->timer.function = brcmf_btcoex_timerfunc;
+ setup_timer(&btci->timer, brcmf_btcoex_timerfunc, (ulong)btci);
btci->cfg = cfg;
btci->saved_regs_part1 = false;
btci->saved_regs_part2 = false;
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
index 617199c..b8f0421 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
@@ -4674,9 +4674,6 @@
err = brcmf_fil_cmd_int_set(ifp, BRCMF_C_SET_AP, 0);
if (err < 0)
brcmf_err("setting AP mode failed %d\n", err);
- err = brcmf_fil_cmd_int_set(ifp, BRCMF_C_SET_INFRA, 0);
- if (err < 0)
- brcmf_err("setting INFRA mode failed %d\n", err);
if (brcmf_feat_is_enabled(ifp, BRCMF_FEAT_MBSS))
brcmf_fil_iovar_int_set(ifp, "mbss", 0);
brcmf_fil_cmd_int_set(ifp, BRCMF_C_SET_REGULATORY,
@@ -6377,16 +6374,6 @@
return -ENOMEM;
}
-static void brcmf_wiphy_pno_params(struct wiphy *wiphy)
-{
- /* scheduled scan settings */
- wiphy->max_sched_scan_reqs = 1;
- wiphy->max_sched_scan_ssids = BRCMF_PNO_MAX_PFN_COUNT;
- wiphy->max_match_sets = BRCMF_PNO_MAX_PFN_COUNT;
- wiphy->max_sched_scan_ie_len = BRCMF_SCAN_IE_LEN_MAX;
- wiphy->max_sched_scan_plan_interval = BRCMF_PNO_SCHED_SCAN_MAX_PERIOD;
-}
-
#ifdef CONFIG_PM
static const struct wiphy_wowlan_support brcmf_wowlan_support = {
.flags = WIPHY_WOWLAN_MAGIC_PKT | WIPHY_WOWLAN_DISCONNECT,
@@ -6433,6 +6420,7 @@
const struct ieee80211_iface_combination *combo;
struct ieee80211_supported_band *band;
u16 max_interfaces = 0;
+ bool gscan;
__le32 bandlist[3];
u32 n_bands;
int err, i;
@@ -6482,9 +6470,10 @@
wiphy->flags |= WIPHY_FLAG_SUPPORTS_FW_ROAM;
wiphy->mgmt_stypes = brcmf_txrx_stypes;
wiphy->max_remain_on_channel_duration = 5000;
- if (brcmf_feat_is_enabled(ifp, BRCMF_FEAT_PNO))
- brcmf_wiphy_pno_params(wiphy);
-
+ if (brcmf_feat_is_enabled(ifp, BRCMF_FEAT_PNO)) {
+ gscan = brcmf_feat_is_enabled(ifp, BRCMF_FEAT_GSCAN);
+ brcmf_pno_wiphy_params(wiphy, gscan);
+ }
/* vendor commands/events support */
wiphy->vendor_commands = brcmf_vendor_cmds;
wiphy->n_vendor_commands = BRCMF_VNDR_CMDS_LAST - 1;
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.h
index 8f19d95..a1c2e0a 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.h
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.h
@@ -24,6 +24,8 @@
#include "fwil_types.h"
#include "p2p.h"
+#define BRCMF_SCAN_IE_LEN_MAX 2048
+
#define WL_NUM_SCAN_MAX 10
#define WL_TLV_INFO_MAX 1024
#define WL_BSS_INFO_MAX 2048
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.c
index 62985f2..8c7ef59 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.c
@@ -27,6 +27,7 @@
#include "feature.h"
#include "common.h"
+#define BRCMF_FW_UNSUPPORTED 23
/*
* expand feature list to array of feature strings.
@@ -113,6 +114,22 @@
}
}
+static void brcmf_feat_iovar_data_set(struct brcmf_if *ifp,
+ enum brcmf_feat_id id, char *name,
+ const void *data, size_t len)
+{
+ int err;
+
+ err = brcmf_fil_iovar_data_set(ifp, name, data, len);
+ if (err != -BRCMF_FW_UNSUPPORTED) {
+ brcmf_dbg(INFO, "enabling feature: %s\n", brcmf_feat_names[id]);
+ ifp->drvr->feat_flags |= BIT(id);
+ } else {
+ brcmf_dbg(TRACE, "%s feature check failed: %d\n",
+ brcmf_feat_names[id], err);
+ }
+}
+
static void brcmf_feat_firmware_capabilities(struct brcmf_if *ifp)
{
char caps[256];
@@ -136,11 +153,14 @@
{
struct brcmf_if *ifp = brcmf_get_ifp(drvr, 0);
struct brcmf_pno_macaddr_le pfn_mac;
+ struct brcmf_gscan_config gscan_cfg;
u32 wowl_cap;
s32 err;
brcmf_feat_firmware_capabilities(ifp);
-
+ memset(&gscan_cfg, 0, sizeof(gscan_cfg));
+ brcmf_feat_iovar_data_set(ifp, BRCMF_FEAT_GSCAN, "pfn_gscan_cfg",
+ &gscan_cfg, sizeof(gscan_cfg));
brcmf_feat_iovar_int_get(ifp, BRCMF_FEAT_PNO, "pfn");
if (drvr->bus_if->wowl_supported)
brcmf_feat_iovar_int_get(ifp, BRCMF_FEAT_WOWL, "wowl");
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.h
index db4733a..c1dbd17 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.h
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.h
@@ -31,6 +31,7 @@
* WOWL_GTK: (WOWL) GTK rekeying offload
* WOWL_ARP_ND: ARP and Neighbor Discovery offload support during WOWL.
* MFP: 802.11w Management Frame Protection.
+ * GSCAN: enhanced scan offload feature.
*/
#define BRCMF_FEAT_LIST \
BRCMF_FEAT_DEF(MBSS) \
@@ -44,7 +45,8 @@
BRCMF_FEAT_DEF(WOWL_ND) \
BRCMF_FEAT_DEF(WOWL_GTK) \
BRCMF_FEAT_DEF(WOWL_ARP_ND) \
- BRCMF_FEAT_DEF(MFP)
+ BRCMF_FEAT_DEF(MFP) \
+ BRCMF_FEAT_DEF(GSCAN)
/*
* Quirks:
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil_types.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil_types.h
index 9a1eb5a..8c18fad 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil_types.h
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil_types.h
@@ -835,4 +835,63 @@
u8 replay_counter[BRCMF_RSN_REPLAY_LEN];
};
+/**
+ * struct brcmf_gscan_bucket_config - configuration data for channel bucket.
+ *
+ * @bucket_end_index: !unknown!
+ * @bucket_freq_multiple: !unknown!
+ * @flag: !unknown!
+ * @reserved: !unknown!
+ * @repeat: !unknown!
+ * @max_freq_multiple: !unknown!
+ */
+struct brcmf_gscan_bucket_config {
+ u8 bucket_end_index;
+ u8 bucket_freq_multiple;
+ u8 flag;
+ u8 reserved;
+ __le16 repeat;
+ __le16 max_freq_multiple;
+};
+
+/* version supported which must match firmware */
+#define BRCMF_GSCAN_CFG_VERSION 1
+
+/**
+ * enum brcmf_gscan_cfg_flags - bit values for gscan flags.
+ *
+ * @BRCMF_GSCAN_CFG_FLAGS_ALL_RESULTS: send probe responses/beacons to host.
+ * @BRCMF_GSCAN_CFG_FLAGS_CHANGE_ONLY: indicated only flags member is changed.
+ */
+enum brcmf_gscan_cfg_flags {
+ BRCMF_GSCAN_CFG_FLAGS_ALL_RESULTS = BIT(0),
+ BRCMF_GSCAN_CFG_FLAGS_CHANGE_ONLY = BIT(7),
+};
+
+/**
+ * struct brcmf_gscan_config - configuration data for gscan.
+ *
+ * @version: version of the api to match firmware.
+ * @flags: flags according %enum brcmf_gscan_cfg_flags.
+ * @buffer_threshold: percentage threshold of buffer to generate an event.
+ * @swc_nbssid_threshold: number of BSSIDs with significant change that
+ * will generate an event.
+ * @swc_rssi_window_size: size of rssi cache buffer (max=8).
+ * @count_of_channel_buckets: number of array members in @bucket.
+ * @retry_threshold: !unknown!
+ * @lost_ap_window: !unknown!
+ * @bucket: array of channel buckets.
+ */
+struct brcmf_gscan_config {
+ __le16 version;
+ u8 flags;
+ u8 buffer_threshold;
+ u8 swc_nbssid_threshold;
+ u8 swc_rssi_window_size;
+ u8 count_of_channel_buckets;
+ u8 retry_threshold;
+ __le16 lost_ap_window;
+ struct brcmf_gscan_bucket_config bucket[1];
+};
+
#endif /* FWIL_TYPES_H_ */
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pno.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pno.c
index 6c3bde8..a473445 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pno.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pno.c
@@ -239,3 +239,13 @@
return ret;
}
+void brcmf_pno_wiphy_params(struct wiphy *wiphy, bool gscan)
+{
+ /* scheduled scan settings */
+ wiphy->max_sched_scan_reqs = gscan ? 2 : 1;
+ wiphy->max_sched_scan_ssids = BRCMF_PNO_MAX_PFN_COUNT;
+ wiphy->max_match_sets = BRCMF_PNO_MAX_PFN_COUNT;
+ wiphy->max_sched_scan_ie_len = BRCMF_SCAN_IE_LEN_MAX;
+ wiphy->max_sched_scan_plan_interval = BRCMF_PNO_SCHED_SCAN_MAX_PERIOD;
+}
+
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pno.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pno.h
index bae55b2..07ec51f 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pno.h
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pno.h
@@ -37,4 +37,12 @@
int brcmf_pno_start_sched_scan(struct brcmf_if *ifp,
struct cfg80211_sched_scan_request *req);
+/**
+ * brcmf_pno_wiphy_params - fill scheduled scan parameters in wiphy instance.
+ *
+ * @wiphy: wiphy instance to be used.
+ * @gscan: indicates whether the device has support for g-scan feature.
+ */
+void brcmf_pno_wiphy_params(struct wiphy *wiphy, bool gscan);
+
#endif /* _BRCMF_PNO_H */
diff --git a/drivers/net/wireless/cisco/airo.c b/drivers/net/wireless/cisco/airo.c
index 1b7e125..4623155 100644
--- a/drivers/net/wireless/cisco/airo.c
+++ b/drivers/net/wireless/cisco/airo.c
@@ -3330,7 +3330,7 @@
}
skb_reserve(skb, 2); /* This way the IP header is aligned */
- buffer = (__le16 *) skb_put(skb, len + hdrlen);
+ buffer = skb_put(skb, len + hdrlen);
if (test_bit(FLAG_802_11, &ai->flags)) {
buffer[0] = fc;
bap_read(ai, buffer + 1, hdrlen - 2, BAP0);
@@ -3734,7 +3734,7 @@
ai->dev->stats.rx_dropped++;
goto badrx;
}
- buffer = (u16*)skb_put (skb, len + hdrlen);
+ buffer = skb_put(skb, len + hdrlen);
memcpy ((char *)buffer, ptr, hdrlen);
ptr += hdrlen;
if (hdrlen == 24)
diff --git a/drivers/net/wireless/intel/ipw2x00/ipw2200.c b/drivers/net/wireless/intel/ipw2x00/ipw2200.c
index bbc579b..5e4ce4a 100644
--- a/drivers/net/wireless/intel/ipw2x00/ipw2200.c
+++ b/drivers/net/wireless/intel/ipw2x00/ipw2200.c
@@ -10274,8 +10274,9 @@
printk(KERN_INFO "Adding frag %d %d...\n",
j, size);
- memcpy(skb_put(skb, size),
- txb->fragments[j]->data + hdr_len, size);
+ skb_put_data(skb,
+ txb->fragments[j]->data + hdr_len,
+ size);
}
dev_kfree_skb_any(txb->fragments[i]);
txb->fragments[i] = skb;
@@ -10370,7 +10371,7 @@
if (!dst)
continue;
- rt_hdr = (void *)skb_put(dst, sizeof(*rt_hdr));
+ rt_hdr = skb_put(dst, sizeof(*rt_hdr));
rt_hdr->it_version = PKTHDR_RADIOTAP_VERSION;
rt_hdr->it_pad = 0;
diff --git a/drivers/net/wireless/intel/ipw2x00/libipw_tx.c b/drivers/net/wireless/intel/ipw2x00/libipw_tx.c
index 048f1e3..84205aa 100644
--- a/drivers/net/wireless/intel/ipw2x00/libipw_tx.c
+++ b/drivers/net/wireless/intel/ipw2x00/libipw_tx.c
@@ -359,7 +359,7 @@
goto failed;
skb_reserve(skb_new, crypt->ops->extra_msdu_prefix_len);
- memcpy(skb_put(skb_new, hdr_len), &header, hdr_len);
+ skb_put_data(skb_new, &header, hdr_len);
snapped = 1;
libipw_copy_snap(skb_put(skb_new, SNAP_SIZE + sizeof(u16)),
ether_type);
@@ -439,8 +439,7 @@
if (rts_required) {
skb_frag = txb->fragments[0];
- frag_hdr =
- (struct libipw_hdr_3addrqos *)skb_put(skb_frag, hdr_len);
+ frag_hdr = skb_put(skb_frag, hdr_len);
/*
* Set header frame_ctl to the RTS.
@@ -470,9 +469,7 @@
skb_reserve(skb_frag,
crypt->ops->extra_mpdu_prefix_len);
- frag_hdr =
- (struct libipw_hdr_3addrqos *)skb_put(skb_frag, hdr_len);
- memcpy(frag_hdr, &header, hdr_len);
+ frag_hdr = skb_put_data(skb_frag, &header, hdr_len);
/* If this is not the last fragment, then add the MOREFRAGS
* bit to the frame control */
diff --git a/drivers/net/wireless/intel/iwlegacy/3945.c b/drivers/net/wireless/intel/iwlegacy/3945.c
index 080ea81..dbf164d 100644
--- a/drivers/net/wireless/intel/iwlegacy/3945.c
+++ b/drivers/net/wireless/intel/iwlegacy/3945.c
@@ -520,7 +520,7 @@
* and do not consume a full page
*/
if (len <= SMALL_PACKET_SIZE) {
- memcpy(skb_put(skb, len), rx_hdr->payload, len);
+ skb_put_data(skb, rx_hdr->payload, len);
} else {
skb_add_rx_frag(skb, 0, rxb->page,
(void *)rx_hdr->payload - (void *)pkt, len,
diff --git a/drivers/net/wireless/intel/iwlegacy/4965-mac.c b/drivers/net/wireless/intel/iwlegacy/4965-mac.c
index 49a2ff1..5b51fba 100644
--- a/drivers/net/wireless/intel/iwlegacy/4965-mac.c
+++ b/drivers/net/wireless/intel/iwlegacy/4965-mac.c
@@ -606,7 +606,7 @@
}
if (len <= SMALL_PACKET_SIZE) {
- memcpy(skb_put(skb, len), hdr, len);
+ skb_put_data(skb, hdr, len);
} else {
skb_add_rx_frag(skb, 0, rxb->page, (void *)hdr - rxb_addr(rxb),
len, PAGE_SIZE << il->hw_params.rx_page_order);
diff --git a/drivers/net/wireless/intel/iwlegacy/common.c b/drivers/net/wireless/intel/iwlegacy/common.c
index 140b6ea..8d5acda 100644
--- a/drivers/net/wireless/intel/iwlegacy/common.c
+++ b/drivers/net/wireless/intel/iwlegacy/common.c
@@ -5147,6 +5147,8 @@
if (changed & (IEEE80211_CONF_CHANGE_PS | IEEE80211_CONF_CHANGE_IDLE)) {
il->power_data.ps_disabled = !(conf->flags & IEEE80211_CONF_PS);
+ if (!il->power_data.ps_disabled)
+ IL_WARN_ONCE("Enabling power save might cause firmware crashes\n");
ret = il_power_update_mode(il, false);
if (ret)
D_MAC80211("Error setting sleep level\n");
diff --git a/drivers/net/wireless/intel/iwlegacy/common.h b/drivers/net/wireless/intel/iwlegacy/common.h
index 3bba521..18c60c9 100644
--- a/drivers/net/wireless/intel/iwlegacy/common.h
+++ b/drivers/net/wireless/intel/iwlegacy/common.h
@@ -45,6 +45,7 @@
#define IL_ERR(f, a...) dev_err(&il->pci_dev->dev, f, ## a)
#define IL_WARN(f, a...) dev_warn(&il->pci_dev->dev, f, ## a)
+#define IL_WARN_ONCE(f, a...) dev_warn_once(&il->pci_dev->dev, f, ## a)
#define IL_INFO(f, a...) dev_info(&il->pci_dev->dev, f, ## a)
#define RX_QUEUE_SIZE 256
diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/rx.c b/drivers/net/wireless/intel/iwlwifi/dvm/rx.c
index adfd630..eaad738 100644
--- a/drivers/net/wireless/intel/iwlwifi/dvm/rx.c
+++ b/drivers/net/wireless/intel/iwlwifi/dvm/rx.c
@@ -657,7 +657,7 @@
*/
hdrlen = (len <= skb_tailroom(skb)) ? len : sizeof(*hdr);
- memcpy(skb_put(skb, hdrlen), hdr, hdrlen);
+ skb_put_data(skb, hdr, hdrlen);
fraglen = len - hdrlen;
if (fraglen) {
diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/tx.c b/drivers/net/wireless/intel/iwlwifi/dvm/tx.c
index 4b97371..adaa2f0 100644
--- a/drivers/net/wireless/intel/iwlwifi/dvm/tx.c
+++ b/drivers/net/wireless/intel/iwlwifi/dvm/tx.c
@@ -319,8 +319,7 @@
if (noa_data &&
pskb_expand_head(skb, 0, noa_data->length,
GFP_ATOMIC) == 0) {
- memcpy(skb_put(skb, noa_data->length),
- noa_data->data, noa_data->length);
+ skb_put_data(skb, noa_data->data, noa_data->length);
hdr = (struct ieee80211_hdr *)skb->data;
}
}
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/d3.c b/drivers/net/wireless/intel/iwlwifi/mvm/d3.c
index 119a3bd..7a56a0a 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/d3.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/d3.c
@@ -1431,7 +1431,7 @@
if (!pkt)
goto report;
- memcpy(skb_put(pkt, hdrlen), pktdata, hdrlen);
+ skb_put_data(pkt, pktdata, hdrlen);
pktdata += hdrlen;
pktsize -= hdrlen;
@@ -1463,7 +1463,7 @@
pktsize -= ivlen + icvlen;
pktdata += ivlen;
- memcpy(skb_put(pkt, pktsize), pktdata, pktsize);
+ skb_put_data(pkt, pktdata, pktsize);
if (ieee80211_data_to_8023(pkt, vif->addr, vif->type))
goto report;
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c
index fd2fc46..15d1301 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c
@@ -1596,7 +1596,7 @@
rx_status.band);
/* copy the data */
- memcpy(skb_put(skb, size), sb->data, size);
+ skb_put_data(skb, sb->data, size);
memcpy(IEEE80211_SKB_RXCB(skb), &rx_status, sizeof(rx_status));
/* pass it as regular rx to mac80211 */
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rx.c b/drivers/net/wireless/intel/iwlwifi/mvm/rx.c
index fd1dd06..2c07719 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/rx.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/rx.c
@@ -133,7 +133,7 @@
*/
hdrlen = (len <= skb_tailroom(skb)) ? len : hdrlen + crypt_len + 8;
- memcpy(skb_put(skb, hdrlen), hdr, hdrlen);
+ skb_put_data(skb, hdr, hdrlen);
fraglen = len - hdrlen;
if (fraglen) {
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c b/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c
index 966cd75..cf48390 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c
@@ -183,9 +183,8 @@
* present before copying packet data.
*/
hdrlen += crypt_len;
- memcpy(skb_put(skb, hdrlen), hdr, hdrlen);
- memcpy(skb_put(skb, headlen - hdrlen), (u8 *)hdr + hdrlen + pad_len,
- headlen - hdrlen);
+ skb_put_data(skb, hdr, hdrlen);
+ skb_put_data(skb, (u8 *)hdr + hdrlen + pad_len, headlen - hdrlen);
fraglen = len - headlen;
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/tx.c b/drivers/net/wireless/intel/iwlwifi/pcie/tx.c
index 386950a..01013d2 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/tx.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/tx.c
@@ -2141,8 +2141,7 @@
htons(ETH_P_IPV6),
data_left);
- memcpy(skb_put(csum_skb, tcp_hdrlen(skb)),
- tcph, tcp_hdrlen(skb));
+ skb_put_data(csum_skb, tcph, tcp_hdrlen(skb));
skb_reset_transport_header(csum_skb);
csum_skb->csum_start =
(unsigned char *)tcp_hdr(csum_skb) -
@@ -2176,7 +2175,7 @@
dma_addr_t tb_phys;
if (trans_pcie->sw_csum_tx)
- memcpy(skb_put(csum_skb, size), tso.data, size);
+ skb_put_data(csum_skb, tso.data, size);
tb_phys = dma_map_single(trans->dev, tso.data,
size, DMA_TO_DEVICE);
diff --git a/drivers/net/wireless/intersil/hostap/hostap_80211_rx.c b/drivers/net/wireless/intersil/hostap/hostap_80211_rx.c
index 34dbddb..6d8b64c 100644
--- a/drivers/net/wireless/intersil/hostap/hostap_80211_rx.c
+++ b/drivers/net/wireless/intersil/hostap/hostap_80211_rx.c
@@ -131,8 +131,7 @@
if (prism_header == 1) {
struct linux_wlan_ng_prism_hdr *hdr;
- hdr = (struct linux_wlan_ng_prism_hdr *)
- skb_push(skb, phdrlen);
+ hdr = skb_push(skb, phdrlen);
memset(hdr, 0, phdrlen);
hdr->msgcode = LWNG_CAP_DID_BASE;
hdr->msglen = sizeof(*hdr);
@@ -153,8 +152,7 @@
#undef LWNG_SETVAL
} else if (prism_header == 2) {
struct linux_wlan_ng_cap_hdr *hdr;
- hdr = (struct linux_wlan_ng_cap_hdr *)
- skb_push(skb, phdrlen);
+ hdr = skb_push(skb, phdrlen);
memset(hdr, 0, phdrlen);
hdr->version = htonl(LWNG_CAPHDR_VERSION);
hdr->length = htonl(phdrlen);
@@ -172,7 +170,7 @@
hdr->encoding = htonl(1); /* cck */
} else if (prism_header == 3) {
struct hostap_radiotap_rx *hdr;
- hdr = (struct hostap_radiotap_rx *)skb_push(skb, phdrlen);
+ hdr = skb_push(skb, phdrlen);
memset(hdr, 0, phdrlen);
hdr->hdr.it_len = cpu_to_le16(phdrlen);
hdr->hdr.it_present =
diff --git a/drivers/net/wireless/intersil/hostap/hostap_80211_tx.c b/drivers/net/wireless/intersil/hostap/hostap_80211_tx.c
index 055e11d..c1b10d5 100644
--- a/drivers/net/wireless/intersil/hostap/hostap_80211_tx.c
+++ b/drivers/net/wireless/intersil/hostap/hostap_80211_tx.c
@@ -242,7 +242,7 @@
memcpy(skb_push(skb, encaps_len), encaps_data, encaps_len);
memcpy(skb_push(skb, hdr_len), &hdr, hdr_len);
if (use_wds == WDS_OWN_FRAME) {
- memcpy(skb_put(skb, ETH_ALEN), &hdr.addr4, ETH_ALEN);
+ skb_put_data(skb, &hdr.addr4, ETH_ALEN);
}
iface->stats.tx_packets++;
diff --git a/drivers/net/wireless/intersil/hostap/hostap_ap.c b/drivers/net/wireless/intersil/hostap/hostap_ap.c
index c995ace..eb9cd6f 100644
--- a/drivers/net/wireless/intersil/hostap/hostap_ap.c
+++ b/drivers/net/wireless/intersil/hostap/hostap_ap.c
@@ -998,11 +998,9 @@
fc = type_subtype;
hdrlen = hostap_80211_get_hdrlen(cpu_to_le16(type_subtype));
- hdr = (struct ieee80211_hdr *) skb_put(skb, hdrlen);
+ hdr = skb_put_zero(skb, hdrlen);
if (body)
- memcpy(skb_put(skb, body_len), body, body_len);
-
- memset(hdr, 0, hdrlen);
+ skb_put_data(skb, body, body_len);
/* FIX: ctrl::ack sending used special HFA384X_TX_CTRL_802_11
* tx_control instead of using local->tx_control */
@@ -1325,8 +1323,7 @@
}
skb_reserve(skb, ap->crypt->extra_mpdu_prefix_len);
- memset(skb_put(skb, WLAN_AUTH_CHALLENGE_LEN), 0,
- WLAN_AUTH_CHALLENGE_LEN);
+ skb_put_zero(skb, WLAN_AUTH_CHALLENGE_LEN);
if (ap->crypt->encrypt_mpdu(skb, 0, ap->crypt_priv)) {
dev_kfree_skb(skb);
kfree(tmpbuf);
@@ -2364,7 +2361,7 @@
return;
}
- hdr = (struct ieee80211_hdr *) skb_put(skb, 16);
+ hdr = skb_put(skb, 16);
/* Generate a fake pspoll frame to start packet delivery */
hdr->frame_control = cpu_to_le16(
diff --git a/drivers/net/wireless/intersil/hostap/hostap_hw.c b/drivers/net/wireless/intersil/hostap/hostap_hw.c
index 04dfd040..72b46ea 100644
--- a/drivers/net/wireless/intersil/hostap/hostap_hw.c
+++ b/drivers/net/wireless/intersil/hostap/hostap_hw.c
@@ -190,7 +190,7 @@
}
}
- if (atomic_dec_and_test(&entry->usecnt) && entry->del_req)
+ if (refcount_dec_and_test(&entry->usecnt) && entry->del_req)
kfree(entry);
}
@@ -228,7 +228,7 @@
spin_lock_irqsave(&local->cmdlock, flags);
list_for_each_safe(ptr, n, &local->cmd_queue) {
entry = list_entry(ptr, struct hostap_cmd_queue, list);
- atomic_inc(&entry->usecnt);
+ refcount_inc(&entry->usecnt);
printk(KERN_DEBUG "%s: removed pending cmd_queue entry "
"(type=%d, cmd=0x%04x, param0=0x%04x)\n",
local->dev->name, entry->type, entry->cmd,
@@ -350,7 +350,7 @@
if (entry == NULL)
return -ENOMEM;
- atomic_set(&entry->usecnt, 1);
+ refcount_set(&entry->usecnt, 1);
entry->type = CMD_SLEEP;
entry->cmd = cmd;
entry->param0 = param0;
@@ -516,7 +516,7 @@
if (entry == NULL)
return -ENOMEM;
- atomic_set(&entry->usecnt, 1);
+ refcount_set(&entry->usecnt, 1);
entry->type = CMD_CALLBACK;
entry->cmd = cmd;
entry->param0 = param0;
@@ -666,7 +666,7 @@
if (!list_empty(&local->cmd_queue)) {
entry = list_entry(local->cmd_queue.next,
struct hostap_cmd_queue, list);
- atomic_inc(&entry->usecnt);
+ refcount_inc(&entry->usecnt);
list_del_init(&entry->list);
local->cmd_queue_len--;
@@ -718,7 +718,7 @@
entry = NULL;
}
if (entry)
- atomic_inc(&entry->usecnt);
+ refcount_inc(&entry->usecnt);
}
spin_unlock(&local->cmdlock);
@@ -2005,7 +2005,7 @@
goto rx_dropped;
}
skb->dev = dev;
- memcpy(skb_put(skb, hdr_len), &rxdesc, hdr_len);
+ skb_put_data(skb, &rxdesc, hdr_len);
if (len > 0)
res = hfa384x_from_bap(dev, BAP0, skb_put(skb, len), len);
@@ -2209,9 +2209,9 @@
return;
}
- memcpy(skb_put(skb, hdrlen), (void *) &txdesc->frame_control, hdrlen);
+ skb_put_data(skb, (void *)&txdesc->frame_control, hdrlen);
if (payload)
- memcpy(skb_put(skb, len), payload, len);
+ skb_put_data(skb, payload, len);
skb->dev = local->dev;
skb_reset_mac_header(skb);
@@ -2362,8 +2362,7 @@
struct sk_buff *skb;
skb = dev_alloc_skb(sizeof(txdesc));
if (skb) {
- memcpy(skb_put(skb, sizeof(txdesc)), &txdesc,
- sizeof(txdesc));
+ skb_put_data(skb, &txdesc, sizeof(txdesc));
skb_queue_tail(&local->sta_tx_exc_list, skb);
tasklet_schedule(&local->sta_tx_exc_tasklet);
}
@@ -2460,7 +2459,7 @@
goto out;
}
- memcpy(skb_put(skb, sizeof(info)), &info, sizeof(info));
+ skb_put_data(skb, &info, sizeof(info));
if (left > 0 && hfa384x_from_bap(dev, BAP0, skb_put(skb, left), left))
{
spin_unlock(&local->baplock);
diff --git a/drivers/net/wireless/intersil/hostap/hostap_main.c b/drivers/net/wireless/intersil/hostap/hostap_main.c
index 1372b20..a3c066f 100644
--- a/drivers/net/wireless/intersil/hostap/hostap_main.c
+++ b/drivers/net/wireless/intersil/hostap/hostap_main.c
@@ -1039,15 +1039,13 @@
if (skb == NULL)
return -ENOMEM;
- mgmt = (struct hostap_ieee80211_mgmt *)
- skb_put(skb, IEEE80211_MGMT_HDR_LEN);
- memset(mgmt, 0, IEEE80211_MGMT_HDR_LEN);
+ mgmt = skb_put_zero(skb, IEEE80211_MGMT_HDR_LEN);
mgmt->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT | stype);
memcpy(mgmt->da, dst, ETH_ALEN);
memcpy(mgmt->sa, dev->dev_addr, ETH_ALEN);
memcpy(mgmt->bssid, dst, ETH_ALEN);
if (body)
- memcpy(skb_put(skb, bodylen), body, bodylen);
+ skb_put_data(skb, body, bodylen);
meta = (struct hostap_skb_tx_data *) skb->cb;
memset(meta, 0, sizeof(*meta));
diff --git a/drivers/net/wireless/intersil/hostap/hostap_wlan.h b/drivers/net/wireless/intersil/hostap/hostap_wlan.h
index ca25283..5352adb 100644
--- a/drivers/net/wireless/intersil/hostap/hostap_wlan.h
+++ b/drivers/net/wireless/intersil/hostap/hostap_wlan.h
@@ -6,6 +6,7 @@
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/mutex.h>
+#include <linux/refcount.h>
#include <net/iw_handler.h>
#include <net/ieee80211_radiotap.h>
#include <net/lib80211.h>
@@ -557,7 +558,7 @@
u16 resp0, res;
volatile int issued, issuing;
- atomic_t usecnt;
+ refcount_t usecnt;
int del_req;
};
diff --git a/drivers/net/wireless/intersil/orinoco/main.c b/drivers/net/wireless/intersil/orinoco/main.c
index d9128bb..28dac36 100644
--- a/drivers/net/wireless/intersil/orinoco/main.c
+++ b/drivers/net/wireless/intersil/orinoco/main.c
@@ -396,7 +396,7 @@
memcpy(hdr.encap, encaps_hdr, sizeof(encaps_hdr));
/* Make room for the new header, and copy it in */
- eh = (struct ethhdr *) skb_push(skb, ENCAPS_OVERHEAD);
+ eh = skb_push(skb, ENCAPS_OVERHEAD);
memcpy(eh, &hdr, sizeof(hdr));
}
@@ -792,7 +792,7 @@
}
/* Copy the 802.11 header to the skb */
- memcpy(skb_put(skb, hdrlen), &(desc->frame_ctl), hdrlen);
+ skb_put_data(skb, &(desc->frame_ctl), hdrlen);
skb_reset_mac_header(skb);
/* If any, copy the data from the card to the skb */
@@ -1029,11 +1029,10 @@
/* These indicate a SNAP within 802.2 LLC within
802.11 frame which we'll need to de-encapsulate to
the original EthernetII frame. */
- hdr = (struct ethhdr *)skb_push(skb,
- ETH_HLEN - ENCAPS_OVERHEAD);
+ hdr = skb_push(skb, ETH_HLEN - ENCAPS_OVERHEAD);
} else {
/* 802.3 frame - prepend 802.3 header as is */
- hdr = (struct ethhdr *)skb_push(skb, ETH_HLEN);
+ hdr = skb_push(skb, ETH_HLEN);
hdr->h_proto = htons(length);
}
memcpy(hdr->h_dest, desc->addr1, ETH_ALEN);
diff --git a/drivers/net/wireless/intersil/orinoco/orinoco_usb.c b/drivers/net/wireless/intersil/orinoco/orinoco_usb.c
index 132f5fb..c84fd84 100644
--- a/drivers/net/wireless/intersil/orinoco/orinoco_usb.c
+++ b/drivers/net/wireless/intersil/orinoco/orinoco_usb.c
@@ -64,6 +64,7 @@
#include <linux/etherdevice.h>
#include <linux/wireless.h>
#include <linux/firmware.h>
+#include <linux/refcount.h>
#include "mic.h"
#include "orinoco.h"
@@ -268,7 +269,7 @@
struct request_context {
struct list_head list;
- atomic_t refcount;
+ refcount_t refcount;
struct completion done; /* Signals that CTX is dead */
int killed;
struct urb *outurb; /* OUT for req pkt */
@@ -298,7 +299,7 @@
static void ezusb_request_context_put(struct request_context *ctx)
{
- if (!atomic_dec_and_test(&ctx->refcount))
+ if (!refcount_dec_and_test(&ctx->refcount))
return;
WARN_ON(!ctx->done.done);
@@ -328,7 +329,7 @@
} else {
ctx->state = EZUSB_CTX_RESP_TIMEOUT;
dev_dbg(&ctx->outurb->dev->dev, "couldn't unlink\n");
- atomic_inc(&ctx->refcount);
+ refcount_inc(&ctx->refcount);
ctx->killed = 1;
ezusb_ctx_complete(ctx);
ezusb_request_context_put(ctx);
@@ -361,7 +362,7 @@
ctx->out_rid = out_rid;
ctx->in_rid = in_rid;
- atomic_set(&ctx->refcount, 1);
+ refcount_set(&ctx->refcount, 1);
init_completion(&ctx->done);
setup_timer(&ctx->timer, ezusb_request_timerfn, (u_long)ctx);
@@ -469,7 +470,7 @@
list_move_tail(&ctx->list, &upriv->req_active);
if (ctx->state == EZUSB_CTX_QUEUED) {
- atomic_inc(&ctx->refcount);
+ refcount_inc(&ctx->refcount);
result = usb_submit_urb(ctx->outurb, GFP_ATOMIC);
if (result) {
ctx->state = EZUSB_CTX_REQSUBMIT_FAIL;
@@ -507,7 +508,7 @@
spin_unlock_irqrestore(&upriv->req_lock, flags);
goto done;
}
- atomic_inc(&ctx->refcount);
+ refcount_inc(&ctx->refcount);
list_add_tail(&ctx->list, &upriv->req_pending);
spin_unlock_irqrestore(&upriv->req_lock, flags);
@@ -1477,7 +1478,7 @@
int err;
ctx = list_entry(item, struct request_context, list);
- atomic_inc(&ctx->refcount);
+ refcount_inc(&ctx->refcount);
ctx->outurb->transfer_flags |= URB_ASYNC_UNLINK;
err = usb_unlink_urb(ctx->outurb);
diff --git a/drivers/net/wireless/intersil/p54/fwio.c b/drivers/net/wireless/intersil/p54/fwio.c
index 4ac6764..52c095c 100644
--- a/drivers/net/wireless/intersil/p54/fwio.c
+++ b/drivers/net/wireless/intersil/p54/fwio.c
@@ -176,8 +176,9 @@
* keeping a extra list for uploaded keys.
*/
- priv->used_rxkeys = kzalloc(BITS_TO_LONGS(
- priv->rx_keycache_size), GFP_KERNEL);
+ priv->used_rxkeys = kcalloc(BITS_TO_LONGS(priv->rx_keycache_size),
+ sizeof(long),
+ GFP_KERNEL);
if (!priv->used_rxkeys)
return -ENOMEM;
@@ -205,7 +206,7 @@
return NULL;
skb_reserve(skb, priv->tx_hdr_len);
- hdr = (struct p54_hdr *) skb_put(skb, sizeof(*hdr));
+ hdr = skb_put(skb, sizeof(*hdr));
hdr->flags = cpu_to_le16(hdr_flags);
hdr->len = cpu_to_le16(payload_len);
hdr->type = cpu_to_le16(type);
@@ -235,8 +236,7 @@
mutex_lock(&priv->eeprom_mutex);
priv->eeprom = buf;
- eeprom_hdr = (struct p54_eeprom_lm86 *) skb_put(skb,
- eeprom_hdr_size + len);
+ eeprom_hdr = skb_put(skb, eeprom_hdr_size + len);
if (priv->fw_var < 0x509) {
eeprom_hdr->v1.offset = cpu_to_le16(offset);
@@ -272,7 +272,7 @@
if (unlikely(!skb))
return -ENOMEM;
- tim = (struct p54_tim *) skb_put(skb, sizeof(*tim));
+ tim = skb_put(skb, sizeof(*tim));
tim->count = 1;
tim->entry[0] = cpu_to_le16(set ? (aid | 0x8000) : aid);
p54_tx(priv, skb);
@@ -289,7 +289,7 @@
if (unlikely(!skb))
return -ENOMEM;
- sta = (struct p54_sta_unlock *)skb_put(skb, sizeof(*sta));
+ sta = skb_put(skb, sizeof(*sta));
memcpy(sta->addr, addr, ETH_ALEN);
p54_tx(priv, skb);
return 0;
@@ -309,7 +309,7 @@
if (unlikely(!skb))
return -ENOMEM;
- cancel = (struct p54_txcancel *)skb_put(skb, sizeof(*cancel));
+ cancel = skb_put(skb, sizeof(*cancel));
cancel->req_id = req_id;
p54_tx(priv, skb);
return 0;
@@ -326,7 +326,7 @@
if (!skb)
return -ENOMEM;
- setup = (struct p54_setup_mac *) skb_put(skb, sizeof(*setup));
+ setup = skb_put(skb, sizeof(*setup));
if (!(priv->hw->conf.flags & IEEE80211_CONF_IDLE)) {
switch (priv->mode) {
case NL80211_IFTYPE_STATION:
@@ -412,18 +412,18 @@
if (!skb)
return -ENOMEM;
- head = (struct p54_scan_head *) skb_put(skb, sizeof(*head));
+ head = skb_put(skb, sizeof(*head));
memset(head->scan_params, 0, sizeof(head->scan_params));
head->mode = cpu_to_le16(mode);
head->dwell = cpu_to_le16(dwell);
head->freq = freq;
if (priv->rxhw == PDR_SYNTH_FRONTEND_LONGBOW) {
- __le16 *pa_power_points = (__le16 *) skb_put(skb, 2);
+ __le16 *pa_power_points = skb_put(skb, 2);
*pa_power_points = cpu_to_le16(0x0c);
}
- iq_autocal = (void *) skb_put(skb, sizeof(*iq_autocal));
+ iq_autocal = skb_put(skb, sizeof(*iq_autocal));
for (i = 0; i < priv->iq_autocal_len; i++) {
if (priv->iq_autocal[i].freq != freq)
continue;
@@ -436,9 +436,9 @@
goto err;
if (priv->rxhw == PDR_SYNTH_FRONTEND_LONGBOW)
- body = (void *) skb_put(skb, sizeof(body->longbow));
+ body = skb_put(skb, sizeof(body->longbow));
else
- body = (void *) skb_put(skb, sizeof(body->normal));
+ body = skb_put(skb, sizeof(body->normal));
for (i = 0; i < priv->output_limit->entries; i++) {
__le16 *entry_freq = (void *) (priv->output_limit->data +
@@ -499,25 +499,25 @@
goto err;
if ((priv->fw_var >= 0x500) && (priv->fw_var < 0x509)) {
- rate = (void *) skb_put(skb, sizeof(*rate));
+ rate = skb_put(skb, sizeof(*rate));
rate->basic_rate_mask = cpu_to_le32(priv->basic_rate_mask);
for (i = 0; i < sizeof(rate->rts_rates); i++)
rate->rts_rates[i] = i;
}
- rssi = (struct pda_rssi_cal_entry *) skb_put(skb, sizeof(*rssi));
+ rssi = skb_put(skb, sizeof(*rssi));
rssi_data = p54_rssi_find(priv, le16_to_cpu(freq));
rssi->mul = cpu_to_le16(rssi_data->mul);
rssi->add = cpu_to_le16(rssi_data->add);
if (priv->rxhw == PDR_SYNTH_FRONTEND_LONGBOW) {
/* Longbow frontend needs ever more */
- rssi = (void *) skb_put(skb, sizeof(*rssi));
+ rssi = skb_put(skb, sizeof(*rssi));
rssi->mul = cpu_to_le16(rssi_data->longbow_unkn);
rssi->add = cpu_to_le16(rssi_data->longbow_unk2);
}
if (priv->fw_var >= 0x509) {
- rate = (void *) skb_put(skb, sizeof(*rate));
+ rate = skb_put(skb, sizeof(*rate));
rate->basic_rate_mask = cpu_to_le32(priv->basic_rate_mask);
for (i = 0; i < sizeof(rate->rts_rates); i++)
rate->rts_rates[i] = i;
@@ -549,7 +549,7 @@
if (unlikely(!skb))
return -ENOMEM;
- led = (struct p54_led *) skb_put(skb, sizeof(*led));
+ led = skb_put(skb, sizeof(*led));
led->flags = cpu_to_le16(0x0003);
led->mask[0] = led->mask[1] = cpu_to_le16(priv->softled_state);
led->delay[0] = cpu_to_le16(1);
@@ -569,7 +569,7 @@
if (unlikely(!skb))
return -ENOMEM;
- edcf = (struct p54_edcf *)skb_put(skb, sizeof(*edcf));
+ edcf = skb_put(skb, sizeof(*edcf));
if (priv->use_short_slot) {
edcf->slottime = 9;
edcf->sifs = 0x10;
@@ -614,7 +614,7 @@
if (!skb)
return -ENOMEM;
- psm = (struct p54_psm *)skb_put(skb, sizeof(*psm));
+ psm = skb_put(skb, sizeof(*psm));
psm->mode = cpu_to_le16(mode);
psm->aid = cpu_to_le16(priv->aid);
for (i = 0; i < ARRAY_SIZE(psm->intervals); i++) {
@@ -643,7 +643,7 @@
if (unlikely(!skb))
return -ENOMEM;
- xbow = (struct p54_xbow_synth *)skb_put(skb, sizeof(*xbow));
+ xbow = skb_put(skb, sizeof(*xbow));
xbow->magic1 = cpu_to_le16(0x1);
xbow->magic2 = cpu_to_le16(0x2);
xbow->freq = cpu_to_le16(5390);
@@ -663,7 +663,7 @@
if (unlikely(!skb))
return -ENOMEM;
- rxkey = (struct p54_keycache *)skb_put(skb, sizeof(*rxkey));
+ rxkey = skb_put(skb, sizeof(*rxkey));
rxkey->entry = slot;
rxkey->key_id = idx;
rxkey->key_type = algo;
@@ -743,7 +743,7 @@
if (!skb)
return -ENOMEM;
- grp = (struct p54_group_address_table *)skb_put(skb, sizeof(*grp));
+ grp = skb_put(skb, sizeof(*grp));
on = !(priv->filter_flags & FIF_ALLMULTI) &&
(priv->mc_maclist_num > 0 &&
diff --git a/drivers/net/wireless/intersil/p54/p54spi.c b/drivers/net/wireless/intersil/p54/p54spi.c
index 7ab2f43..e41bf04 100644
--- a/drivers/net/wireless/intersil/p54/p54spi.c
+++ b/drivers/net/wireless/intersil/p54/p54spi.c
@@ -372,9 +372,9 @@
}
if (len <= READAHEAD_SZ) {
- memcpy(skb_put(skb, len), rx_head + 1, len);
+ skb_put_data(skb, rx_head + 1, len);
} else {
- memcpy(skb_put(skb, READAHEAD_SZ), rx_head + 1, READAHEAD_SZ);
+ skb_put_data(skb, rx_head + 1, READAHEAD_SZ);
p54spi_spi_read(priv, SPI_ADRS_DMA_DATA,
skb_put(skb, len - READAHEAD_SZ),
len - READAHEAD_SZ);
diff --git a/drivers/net/wireless/intersil/p54/txrx.c b/drivers/net/wireless/intersil/p54/txrx.c
index 5e1c91a..3a4214d 100644
--- a/drivers/net/wireless/intersil/p54/txrx.c
+++ b/drivers/net/wireless/intersil/p54/txrx.c
@@ -815,8 +815,8 @@
}
}
- txhdr = (struct p54_tx_data *) skb_push(skb, sizeof(*txhdr) + padding);
- hdr = (struct p54_hdr *) skb_push(skb, sizeof(*hdr));
+ txhdr = skb_push(skb, sizeof(*txhdr) + padding);
+ hdr = skb_push(skb, sizeof(*hdr));
if (padding)
hdr_flags |= P54_HDR_FLAG_DATA_ALIGN;
@@ -905,13 +905,13 @@
if (info->control.hw_key->cipher == WLAN_CIPHER_SUITE_TKIP) {
/* reserve space for the MIC key */
len += 8;
- memcpy(skb_put(skb, 8), &(info->control.hw_key->key
- [NL80211_TKIP_DATA_OFFSET_TX_MIC_KEY]), 8);
+ skb_put_data(skb,
+ &(info->control.hw_key->key[NL80211_TKIP_DATA_OFFSET_TX_MIC_KEY]),
+ 8);
}
/* reserve some space for ICV */
len += info->control.hw_key->icv_len;
- memset(skb_put(skb, info->control.hw_key->icv_len), 0,
- info->control.hw_key->icv_len);
+ skb_put_zero(skb, info->control.hw_key->icv_len);
} else {
txhdr->key_type = 0;
txhdr->key_len = 0;
diff --git a/drivers/net/wireless/intersil/prism54/islpci_eth.c b/drivers/net/wireless/intersil/prism54/islpci_eth.c
index d83f633..9b0ded7 100644
--- a/drivers/net/wireless/intersil/prism54/islpci_eth.c
+++ b/drivers/net/wireless/intersil/prism54/islpci_eth.c
@@ -276,10 +276,7 @@
}
/* make room for the new header and fill it. */
- avs =
- (struct avs_80211_1_header *) skb_push(*skb,
- sizeof (struct
- avs_80211_1_header));
+ avs = skb_push(*skb, sizeof(struct avs_80211_1_header));
avs->version = cpu_to_be32(P80211CAPTURE_VERSION);
avs->length = cpu_to_be32(sizeof (struct avs_80211_1_header));
diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c
index c854a55..c8852ac 100644
--- a/drivers/net/wireless/mac80211_hwsim.c
+++ b/drivers/net/wireless/mac80211_hwsim.c
@@ -650,7 +650,7 @@
skb = dev_alloc_skb(sizeof(*pspoll));
if (!skb)
return;
- pspoll = (void *) skb_put(skb, sizeof(*pspoll));
+ pspoll = skb_put(skb, sizeof(*pspoll));
pspoll->frame_control = cpu_to_le16(IEEE80211_FTYPE_CTL |
IEEE80211_STYPE_PSPOLL |
IEEE80211_FCTL_PM);
@@ -681,7 +681,7 @@
skb = dev_alloc_skb(sizeof(*hdr));
if (!skb)
return;
- hdr = (void *) skb_put(skb, sizeof(*hdr) - ETH_ALEN);
+ hdr = skb_put(skb, sizeof(*hdr) - ETH_ALEN);
hdr->frame_control = cpu_to_le16(IEEE80211_FTYPE_DATA |
IEEE80211_STYPE_NULLFUNC |
(ps ? IEEE80211_FCTL_PM : 0));
@@ -848,7 +848,7 @@
if (skb == NULL)
return;
- hdr = (struct hwsim_radiotap_hdr *) skb_push(skb, sizeof(*hdr));
+ hdr = skb_push(skb, sizeof(*hdr));
hdr->hdr.it_version = PKTHDR_RADIOTAP_VERSION;
hdr->hdr.it_pad = 0;
hdr->hdr.it_len = cpu_to_le16(sizeof(*hdr));
@@ -892,7 +892,7 @@
if (skb == NULL)
return;
- hdr = (struct hwsim_radiotap_ack_hdr *) skb_put(skb, sizeof(*hdr));
+ hdr = skb_put(skb, sizeof(*hdr));
hdr->hdr.it_version = PKTHDR_RADIOTAP_VERSION;
hdr->hdr.it_pad = 0;
hdr->hdr.it_len = cpu_to_le16(sizeof(*hdr));
@@ -904,7 +904,7 @@
flags = IEEE80211_CHAN_2GHZ;
hdr->rt_chbitmask = cpu_to_le16(flags);
- hdr11 = (struct ieee80211_hdr *) skb_put(skb, 10);
+ hdr11 = skb_put(skb, 10);
hdr11->frame_control = cpu_to_le16(IEEE80211_FTYPE_CTL |
IEEE80211_STYPE_ACK);
hdr11->duration_id = cpu_to_le16(0);
@@ -1146,7 +1146,7 @@
* Note that this code requires the headroom in the SKB
* that was allocated earlier.
*/
- rtap = (void *)skb_push(skb, sizeof(*rtap) + 8 + 4);
+ rtap = skb_push(skb, sizeof(*rtap) + 8 + 4);
rtap->oui[0] = HWSIM_RADIOTAP_OUI[0];
rtap->oui[1] = HWSIM_RADIOTAP_OUI[1];
rtap->oui[2] = HWSIM_RADIOTAP_OUI[2];
@@ -2020,8 +2020,7 @@
memcpy(mgmt->bssid, req->bssid, ETH_ALEN);
if (req->ie_len)
- memcpy(skb_put(probe, req->ie_len), req->ie,
- req->ie_len);
+ skb_put_data(probe, req->ie, req->ie_len);
local_bh_disable();
mac80211_hwsim_tx_frame(hwsim->hw, probe,
@@ -3021,7 +3020,7 @@
goto err;
/* Copy the data */
- memcpy(skb_put(skb, frame_data_len), frame_data, frame_data_len);
+ skb_put_data(skb, frame_data, frame_data_len);
data2 = get_hwsim_data_ref_from_addr(dst);
if (!data2)
diff --git a/drivers/net/wireless/marvell/libertas/cfg.c b/drivers/net/wireless/marvell/libertas/cfg.c
index a0463fe..71ba2c8 100644
--- a/drivers/net/wireless/marvell/libertas/cfg.c
+++ b/drivers/net/wireless/marvell/libertas/cfg.c
@@ -443,17 +443,12 @@
struct lbs_private *priv = wiphy_priv(wiphy);
int ret = -ENOTSUPP;
- lbs_deb_enter_args(LBS_DEB_CFG80211, "freq %d, type %d",
- chandef->chan->center_freq,
- cfg80211_get_chandef_type(chandef));
-
if (cfg80211_get_chandef_type(chandef) != NL80211_CHAN_NO_HT)
goto out;
ret = lbs_set_channel(priv, chandef->chan->hw_value);
out:
- lbs_deb_leave_args(LBS_DEB_CFG80211, "ret %d", ret);
return ret;
}
@@ -464,16 +459,12 @@
struct lbs_private *priv = wiphy_priv(wiphy);
int ret = -ENOTSUPP;
- lbs_deb_enter_args(LBS_DEB_CFG80211, "iface %s freq %d",
- netdev_name(netdev), channel->center_freq);
-
if (netdev != priv->mesh_dev)
goto out;
ret = lbs_mesh_set_channel(priv, channel->hw_value);
out:
- lbs_deb_leave_args(LBS_DEB_CFG80211, "ret %d", ret);
return ret;
}
@@ -512,8 +503,6 @@
int i;
int ret = -EILSEQ;
- lbs_deb_enter(LBS_DEB_CFG80211);
-
bsssize = get_unaligned_le16(&scanresp->bssdescriptsize);
lbs_deb_scan("scan response: %d BSSs (%d bytes); resp size %d bytes\n",
@@ -665,7 +654,6 @@
ret = 0;
done:
- lbs_deb_leave_args(LBS_DEB_SCAN, "ret %d", ret);
return ret;
}
@@ -693,11 +681,9 @@
int last_channel;
int running, carrier;
- lbs_deb_enter(LBS_DEB_SCAN);
-
scan_cmd = kzalloc(LBS_SCAN_MAX_CMD_SIZE, GFP_KERNEL);
if (scan_cmd == NULL)
- goto out_no_scan_cmd;
+ return;
/* prepare fixed part of scan command */
scan_cmd->bsstype = CMD_BSS_TYPE_ANY;
@@ -766,16 +752,11 @@
lbs_deb_scan("scan: waking up waiters\n");
wake_up_all(&priv->scan_q);
}
-
- out_no_scan_cmd:
- lbs_deb_leave(LBS_DEB_SCAN);
}
static void _internal_start_scan(struct lbs_private *priv, bool internal,
struct cfg80211_scan_request *request)
{
- lbs_deb_enter(LBS_DEB_CFG80211);
-
lbs_deb_scan("scan: ssids %d, channels %d, ie_len %zd\n",
request->n_ssids, request->n_channels, request->ie_len);
@@ -785,8 +766,6 @@
queue_delayed_work(priv->work_thread, &priv->scan_work,
msecs_to_jiffies(50));
-
- lbs_deb_leave(LBS_DEB_CFG80211);
}
/*
@@ -815,8 +794,6 @@
struct lbs_private *priv = wiphy_priv(wiphy);
int ret = 0;
- lbs_deb_enter(LBS_DEB_CFG80211);
-
if (priv->scan_req || delayed_work_pending(&priv->scan_work)) {
/* old scan request not yet processed */
ret = -EAGAIN;
@@ -829,7 +806,6 @@
ret = -EIO;
out:
- lbs_deb_leave_args(LBS_DEB_CFG80211, "ret %d", ret);
return ret;
}
@@ -843,18 +819,12 @@
void lbs_send_disconnect_notification(struct lbs_private *priv,
bool locally_generated)
{
- lbs_deb_enter(LBS_DEB_CFG80211);
-
cfg80211_disconnected(priv->dev, 0, NULL, 0, locally_generated,
GFP_KERNEL);
-
- lbs_deb_leave(LBS_DEB_CFG80211);
}
void lbs_send_mic_failureevent(struct lbs_private *priv, u32 event)
{
- lbs_deb_enter(LBS_DEB_CFG80211);
-
cfg80211_michael_mic_failure(priv->dev,
priv->assoc_bss,
event == MACREG_INT_CODE_MIC_ERR_MULTICAST ?
@@ -863,8 +833,6 @@
-1,
NULL,
GFP_KERNEL);
-
- lbs_deb_leave(LBS_DEB_CFG80211);
}
@@ -883,8 +851,6 @@
struct cmd_ds_802_11_set_wep cmd;
int ret;
- lbs_deb_enter(LBS_DEB_CFG80211);
-
memset(&cmd, 0, sizeof(cmd));
cmd.hdr.size = cpu_to_le16(sizeof(cmd));
cmd.keyindex = cpu_to_le16(priv->wep_tx_key);
@@ -892,7 +858,6 @@
ret = lbs_cmd_with_response(priv, CMD_802_11_SET_WEP, &cmd);
- lbs_deb_leave(LBS_DEB_CFG80211);
return ret;
}
@@ -905,8 +870,6 @@
int i;
int ret;
- lbs_deb_enter(LBS_DEB_CFG80211);
-
/*
* command 13 00
* size 50 00
@@ -956,7 +919,6 @@
ret = lbs_remove_wep_keys(priv);
}
- lbs_deb_leave(LBS_DEB_CFG80211);
return ret;
}
@@ -969,8 +931,6 @@
struct cmd_ds_802_11_enable_rsn cmd;
int ret;
- lbs_deb_enter_args(LBS_DEB_CFG80211, "%d", enable);
-
/*
* cmd 2f 00
* size 0c 00
@@ -986,7 +946,6 @@
ret = lbs_cmd_with_response(priv, CMD_802_11_ENABLE_RSN, &cmd);
- lbs_deb_leave(LBS_DEB_CFG80211);
return ret;
}
@@ -1014,8 +973,6 @@
struct cmd_key_material cmd;
int ret;
- lbs_deb_enter(LBS_DEB_CFG80211);
-
/*
* Example for WPA (TKIP):
*
@@ -1044,7 +1001,6 @@
ret = lbs_cmd_with_response(priv, CMD_802_11_KEY_MATERIAL, &cmd);
- lbs_deb_leave(LBS_DEB_CFG80211);
return ret;
}
@@ -1061,8 +1017,6 @@
struct cmd_ds_802_11_authenticate cmd;
int ret;
- lbs_deb_enter_args(LBS_DEB_CFG80211, "%d", sme->auth_type);
-
/*
* cmd 11 00
* size 19 00
@@ -1085,7 +1039,6 @@
ret = lbs_cmd_with_response(priv, CMD_802_11_AUTHENTICATE, &cmd);
done:
- lbs_deb_leave_args(LBS_DEB_CFG80211, "ret %d", ret);
return ret;
}
@@ -1116,8 +1069,6 @@
u8 *pos;
u8 *tmp;
- lbs_deb_enter(LBS_DEB_CFG80211);
-
if (!cmd) {
ret = -ENOMEM;
goto done;
@@ -1262,7 +1213,6 @@
kfree(cmd);
done:
- lbs_deb_leave_args(LBS_DEB_CFG80211, "ret %d", ret);
return ret;
}
@@ -1329,8 +1279,6 @@
if (dev == priv->mesh_dev)
return -EOPNOTSUPP;
- lbs_deb_enter(LBS_DEB_CFG80211);
-
if (!sme->bssid) {
struct cfg80211_scan_request *creq;
@@ -1442,7 +1390,6 @@
done:
if (bss)
cfg80211_put_bss(wiphy, bss);
- lbs_deb_leave_args(LBS_DEB_CFG80211, "ret %d", ret);
return ret;
}
@@ -1478,8 +1425,6 @@
if (dev == priv->mesh_dev)
return -EOPNOTSUPP;
- lbs_deb_enter_args(LBS_DEB_CFG80211, "reason_code %d", reason_code);
-
/* store for lbs_cfg_ret_disconnect() */
priv->disassoc_reason = reason_code;
@@ -1496,8 +1441,6 @@
if (netdev == priv->mesh_dev)
return -EOPNOTSUPP;
- lbs_deb_enter(LBS_DEB_CFG80211);
-
if (key_index != priv->wep_tx_key) {
lbs_deb_assoc("set_default_key: to %d\n", key_index);
priv->wep_tx_key = key_index;
@@ -1520,8 +1463,6 @@
if (netdev == priv->mesh_dev)
return -EOPNOTSUPP;
- lbs_deb_enter(LBS_DEB_CFG80211);
-
lbs_deb_assoc("add_key: cipher 0x%x, mac_addr %pM\n",
params->cipher, mac_addr);
lbs_deb_assoc("add_key: key index %d, key len %d\n",
@@ -1575,8 +1516,6 @@
u8 key_index, bool pairwise, const u8 *mac_addr)
{
- lbs_deb_enter(LBS_DEB_CFG80211);
-
lbs_deb_assoc("del_key: key_idx %d, mac_addr %pM\n",
key_index, mac_addr);
@@ -1619,8 +1558,6 @@
int ret;
size_t i;
- lbs_deb_enter(LBS_DEB_CFG80211);
-
sinfo->filled |= BIT(NL80211_STA_INFO_TX_BYTES) |
BIT(NL80211_STA_INFO_TX_PACKETS) |
BIT(NL80211_STA_INFO_RX_BYTES) |
@@ -1675,15 +1612,12 @@
return -EOPNOTSUPP;
}
- lbs_deb_enter(LBS_DEB_CFG80211);
-
if (priv->iface_running)
ret = lbs_set_iface_type(priv, type);
if (!ret)
priv->wdev->iftype = type;
- lbs_deb_leave_args(LBS_DEB_CFG80211, "ret %d", ret);
return ret;
}
@@ -1713,8 +1647,6 @@
u8 *fake = fake_ie;
struct cfg80211_bss *bss;
- lbs_deb_enter(LBS_DEB_CFG80211);
-
/*
* For cfg80211_inform_bss, we'll need a fake IE, as we can't get
* the real IE from the firmware. So we fabricate a fake IE based on
@@ -1777,8 +1709,6 @@
netif_carrier_on(priv->dev);
if (!priv->tx_pending_len)
netif_wake_queue(priv->dev);
-
- lbs_deb_leave(LBS_DEB_CFG80211);
}
static int lbs_ibss_join_existing(struct lbs_private *priv,
@@ -1790,8 +1720,6 @@
u8 preamble = RADIO_PREAMBLE_SHORT;
int ret = 0;
- lbs_deb_enter(LBS_DEB_CFG80211);
-
/* TODO: set preamble based on scan result */
ret = lbs_set_radio(priv, preamble, 1);
if (ret)
@@ -1888,7 +1816,6 @@
lbs_join_post(priv, params, bss->bssid, bss->capability);
out:
- lbs_deb_leave_args(LBS_DEB_CFG80211, "ret %d", ret);
return ret;
}
@@ -1904,8 +1831,6 @@
int ret = 0;
u16 capability;
- lbs_deb_enter(LBS_DEB_CFG80211);
-
ret = lbs_set_radio(priv, preamble, 1);
if (ret)
goto out;
@@ -1975,7 +1900,6 @@
lbs_join_post(priv, params, resp->bssid, capability);
out:
- lbs_deb_leave_args(LBS_DEB_CFG80211, "ret %d", ret);
return ret;
}
@@ -1990,8 +1914,6 @@
if (dev == priv->mesh_dev)
return -EOPNOTSUPP;
- lbs_deb_enter(LBS_DEB_CFG80211);
-
if (!params->chandef.chan) {
ret = -ENOTSUPP;
goto out;
@@ -2015,7 +1937,6 @@
out:
- lbs_deb_leave_args(LBS_DEB_CFG80211, "ret %d", ret);
return ret;
}
@@ -2029,8 +1950,6 @@
if (dev == priv->mesh_dev)
return -EOPNOTSUPP;
- lbs_deb_enter(LBS_DEB_CFG80211);
-
memset(&cmd, 0, sizeof(cmd));
cmd.hdr.size = cpu_to_le16(sizeof(cmd));
ret = lbs_cmd_with_response(priv, CMD_802_11_AD_HOC_STOP, &cmd);
@@ -2038,7 +1957,6 @@
/* TODO: consider doing this at MACREG_INT_CODE_ADHOC_BCN_LOST time */
lbs_mac_event_disconnected(priv, true);
- lbs_deb_leave_args(LBS_DEB_CFG80211, "ret %d", ret);
return ret;
}
@@ -2114,8 +2032,6 @@
int ret = 0;
struct wireless_dev *wdev;
- lbs_deb_enter(LBS_DEB_CFG80211);
-
wdev = kzalloc(sizeof(struct wireless_dev), GFP_KERNEL);
if (!wdev)
return ERR_PTR(-ENOMEM);
@@ -2127,12 +2043,10 @@
goto err_wiphy_new;
}
- lbs_deb_leave(LBS_DEB_CFG80211);
return wdev;
err_wiphy_new:
kfree(wdev);
- lbs_deb_leave_args(LBS_DEB_CFG80211, "ret %d", ret);
return ERR_PTR(ret);
}
@@ -2155,15 +2069,11 @@
};
size_t i;
- lbs_deb_enter(LBS_DEB_CFG80211);
-
for (i = 0; i < ARRAY_SIZE(regmap); i++)
if (regmap[i].code == priv->regioncode) {
regulatory_hint(priv->wdev->wiphy, regmap[i].cn);
break;
}
-
- lbs_deb_leave(LBS_DEB_CFG80211);
}
static void lbs_reg_notifier(struct wiphy *wiphy,
@@ -2171,15 +2081,9 @@
{
struct lbs_private *priv = wiphy_priv(wiphy);
- lbs_deb_enter_args(LBS_DEB_CFG80211, "cfg80211 regulatory domain "
- "callback for domain %c%c\n", request->alpha2[0],
- request->alpha2[1]);
-
memcpy(priv->country_code, request->alpha2, sizeof(request->alpha2));
if (lbs_iface_active(priv))
lbs_set_11d_domain_info(priv);
-
- lbs_deb_leave(LBS_DEB_CFG80211);
}
/*
@@ -2192,8 +2096,6 @@
struct wireless_dev *wdev = priv->wdev;
int ret;
- lbs_deb_enter(LBS_DEB_CFG80211);
-
wdev->wiphy->max_scan_ssids = 1;
wdev->wiphy->signal_type = CFG80211_SIGNAL_TYPE_MBM;
@@ -2229,13 +2131,11 @@
lbs_cfg_set_regulatory_hint(priv);
- lbs_deb_leave_args(LBS_DEB_CFG80211, "ret %d", ret);
return ret;
}
void lbs_scan_deinit(struct lbs_private *priv)
{
- lbs_deb_enter(LBS_DEB_CFG80211);
cancel_delayed_work_sync(&priv->scan_work);
}
@@ -2244,8 +2144,6 @@
{
struct wireless_dev *wdev = priv->wdev;
- lbs_deb_enter(LBS_DEB_CFG80211);
-
if (!wdev)
return;
diff --git a/drivers/net/wireless/marvell/libertas/cmd.c b/drivers/net/wireless/marvell/libertas/cmd.c
index 033ff88..c1f4229 100644
--- a/drivers/net/wireless/marvell/libertas/cmd.c
+++ b/drivers/net/wireless/marvell/libertas/cmd.c
@@ -91,8 +91,6 @@
int ret = -1;
u32 i;
- lbs_deb_enter(LBS_DEB_CMD);
-
memset(&cmd, 0, sizeof(cmd));
cmd.hdr.size = cpu_to_le16(sizeof(cmd));
memcpy(cmd.permanentaddr, priv->current_addr, ETH_ALEN);
@@ -159,14 +157,12 @@
}
out:
- lbs_deb_leave(LBS_DEB_CMD);
return ret;
}
static int lbs_ret_host_sleep_cfg(struct lbs_private *priv, unsigned long dummy,
struct cmd_header *resp)
{
- lbs_deb_enter(LBS_DEB_CMD);
if (priv->is_host_sleep_activated) {
priv->is_host_sleep_configured = 0;
if (priv->psstate == PS_STATE_FULL_POWER) {
@@ -176,7 +172,7 @@
} else {
priv->is_host_sleep_configured = 1;
}
- lbs_deb_leave(LBS_DEB_CMD);
+
return 0;
}
@@ -236,8 +232,6 @@
struct cmd_ds_802_11_ps_mode cmd;
int ret = 0;
- lbs_deb_enter(LBS_DEB_CMD);
-
memset(&cmd, 0, sizeof(cmd));
cmd.hdr.size = cpu_to_le16(sizeof(cmd));
cmd.action = cpu_to_le16(cmd_action);
@@ -262,7 +256,6 @@
lbs_cmd_async(priv, CMD_802_11_PS_MODE, &cmd.hdr, sizeof (cmd));
out:
- lbs_deb_leave_args(LBS_DEB_CMD, "ret %d", ret);
return ret;
}
@@ -272,8 +265,6 @@
struct cmd_ds_802_11_sleep_params cmd;
int ret;
- lbs_deb_enter(LBS_DEB_CMD);
-
if (cmd_action == CMD_ACT_GET) {
memset(&cmd, 0, sizeof(cmd));
} else {
@@ -304,7 +295,6 @@
sp->sp_reserved = le16_to_cpu(cmd.reserved);
}
- lbs_deb_leave_args(LBS_DEB_CMD, "ret %d", ret);
return ret;
}
@@ -312,8 +302,6 @@
{
int ret = 0;
- lbs_deb_enter(LBS_DEB_CMD);
-
if (priv->is_deep_sleep) {
if (!wait_event_interruptible_timeout(priv->ds_awake_q,
!priv->is_deep_sleep, (10 * HZ))) {
@@ -322,7 +310,6 @@
}
}
- lbs_deb_leave_args(LBS_DEB_CMD, "ret %d", ret);
return ret;
}
@@ -330,8 +317,6 @@
{
int ret = 0;
- lbs_deb_enter(LBS_DEB_CMD);
-
if (deep_sleep) {
if (priv->is_deep_sleep != 1) {
lbs_deb_cmd("deep sleep: sleep\n");
@@ -358,7 +343,6 @@
}
}
- lbs_deb_leave_args(LBS_DEB_CMD, "ret %d", ret);
return ret;
}
@@ -366,10 +350,9 @@
unsigned long dummy,
struct cmd_header *cmd)
{
- lbs_deb_enter(LBS_DEB_FW);
priv->is_host_sleep_activated = 1;
wake_up_interruptible(&priv->host_sleep_q);
- lbs_deb_leave(LBS_DEB_FW);
+
return 0;
}
@@ -379,8 +362,6 @@
int ret = 0;
uint32_t criteria = EHS_REMOVE_WAKEUP;
- lbs_deb_enter(LBS_DEB_CMD);
-
if (host_sleep) {
if (priv->is_host_sleep_activated != 1) {
memset(&cmd, 0, sizeof(cmd));
@@ -438,8 +419,6 @@
struct cmd_ds_802_11_snmp_mib cmd;
int ret;
- lbs_deb_enter(LBS_DEB_CMD);
-
memset(&cmd, 0, sizeof (cmd));
cmd.hdr.size = cpu_to_le16(sizeof(cmd));
cmd.action = cpu_to_le16(CMD_ACT_SET);
@@ -470,7 +449,6 @@
ret = lbs_cmd_with_response(priv, CMD_802_11_SNMP_MIB, &cmd);
out:
- lbs_deb_leave_args(LBS_DEB_CMD, "ret %d", ret);
return ret;
}
@@ -488,8 +466,6 @@
struct cmd_ds_802_11_snmp_mib cmd;
int ret;
- lbs_deb_enter(LBS_DEB_CMD);
-
memset(&cmd, 0, sizeof (cmd));
cmd.hdr.size = cpu_to_le16(sizeof(cmd));
cmd.action = cpu_to_le16(CMD_ACT_GET);
@@ -513,7 +489,6 @@
}
out:
- lbs_deb_leave_args(LBS_DEB_CMD, "ret %d", ret);
return ret;
}
@@ -533,8 +508,6 @@
struct cmd_ds_802_11_rf_tx_power cmd;
int ret;
- lbs_deb_enter(LBS_DEB_CMD);
-
memset(&cmd, 0, sizeof(cmd));
cmd.hdr.size = cpu_to_le16(sizeof(cmd));
cmd.action = cpu_to_le16(CMD_ACT_GET);
@@ -548,7 +521,6 @@
*maxlevel = cmd.maxlevel;
}
- lbs_deb_leave(LBS_DEB_CMD);
return ret;
}
@@ -565,8 +537,6 @@
struct cmd_ds_802_11_rf_tx_power cmd;
int ret;
- lbs_deb_enter(LBS_DEB_CMD);
-
memset(&cmd, 0, sizeof(cmd));
cmd.hdr.size = cpu_to_le16(sizeof(cmd));
cmd.action = cpu_to_le16(CMD_ACT_SET);
@@ -576,7 +546,6 @@
ret = lbs_cmd_with_response(priv, CMD_802_11_RF_TX_POWER, &cmd);
- lbs_deb_leave(LBS_DEB_CMD);
return ret;
}
@@ -608,7 +577,6 @@
ARPHRD_ETHER;
}
- lbs_deb_leave(LBS_DEB_CMD);
return ret;
}
@@ -624,8 +592,6 @@
struct cmd_ds_802_11_rf_channel cmd;
int ret = 0;
- lbs_deb_enter(LBS_DEB_CMD);
-
memset(&cmd, 0, sizeof(cmd));
cmd.hdr.size = cpu_to_le16(sizeof(cmd));
cmd.action = cpu_to_le16(CMD_OPT_802_11_RF_CHANNEL_GET);
@@ -638,7 +604,6 @@
lbs_deb_cmd("current radio channel is %d\n", ret);
out:
- lbs_deb_leave_args(LBS_DEB_CMD, "ret %d", ret);
return ret;
}
@@ -647,14 +612,12 @@
int ret;
/* the channel in f/w could be out of sync; get the current channel */
- lbs_deb_enter(LBS_DEB_ASSOC);
-
ret = lbs_get_channel(priv);
if (ret > 0) {
priv->channel = ret;
ret = 0;
}
- lbs_deb_leave_args(LBS_DEB_ASSOC, "ret %d", ret);
+
return ret;
}
@@ -674,8 +637,6 @@
#endif
int ret = 0;
- lbs_deb_enter(LBS_DEB_CMD);
-
memset(&cmd, 0, sizeof(cmd));
cmd.hdr.size = cpu_to_le16(sizeof(cmd));
cmd.action = cpu_to_le16(CMD_OPT_802_11_RF_CHANNEL_SET);
@@ -690,7 +651,6 @@
priv->channel);
out:
- lbs_deb_leave_args(LBS_DEB_CMD, "ret %d", ret);
return ret;
}
@@ -708,8 +668,6 @@
struct cmd_ds_802_11_rssi cmd;
int ret = 0;
- lbs_deb_enter(LBS_DEB_CMD);
-
BUG_ON(rssi == NULL);
BUG_ON(nf == NULL);
@@ -724,7 +682,6 @@
*rssi = CAL_RSSI(le16_to_cpu(cmd.n_or_snr), le16_to_cpu(cmd.nf));
}
- lbs_deb_leave_args(LBS_DEB_CMD, "ret %d", ret);
return ret;
}
@@ -752,7 +709,6 @@
size_t triplet_size;
int ret = 0;
- lbs_deb_enter(LBS_DEB_11D);
if (!priv->country_code[0])
goto out;
@@ -849,7 +805,6 @@
ret = lbs_cmd_with_response(priv, CMD_802_11D_DOMAIN_INFO, &cmd);
out:
- lbs_deb_leave_args(LBS_DEB_11D, "ret %d", ret);
return ret;
}
@@ -869,8 +824,6 @@
struct cmd_ds_reg_access cmd;
int ret = 0;
- lbs_deb_enter(LBS_DEB_CMD);
-
BUG_ON(value == NULL);
memset(&cmd, 0, sizeof(cmd));
@@ -894,7 +847,6 @@
}
out:
- lbs_deb_leave_args(LBS_DEB_CMD, "ret %d", ret);
return ret;
}
@@ -914,8 +866,6 @@
struct cmd_ds_reg_access cmd;
int ret = 0;
- lbs_deb_enter(LBS_DEB_CMD);
-
memset(&cmd, 0, sizeof(cmd));
cmd.hdr.size = cpu_to_le16(sizeof(cmd));
cmd.action = cpu_to_le16(CMD_ACT_SET);
@@ -933,7 +883,6 @@
ret = lbs_cmd_with_response(priv, reg, &cmd);
out:
- lbs_deb_leave_args(LBS_DEB_CMD, "ret %d", ret);
return ret;
}
@@ -943,15 +892,13 @@
unsigned long flags;
int addtail = 1;
- lbs_deb_enter(LBS_DEB_HOST);
-
if (!cmdnode) {
lbs_deb_host("QUEUE_CMD: cmdnode is NULL\n");
- goto done;
+ return;
}
if (!cmdnode->cmdbuf->size) {
lbs_deb_host("DNLD_CMD: cmd size is zero\n");
- goto done;
+ return;
}
cmdnode->result = 0;
@@ -979,9 +926,6 @@
lbs_deb_host("QUEUE_CMD: inserted command 0x%04x into cmdpendingq\n",
le16_to_cpu(cmdnode->cmdbuf->command));
-
-done:
- lbs_deb_leave(LBS_DEB_HOST);
}
static void lbs_submit_command(struct lbs_private *priv,
@@ -994,8 +938,6 @@
int timeo = 3 * HZ;
int ret;
- lbs_deb_enter(LBS_DEB_HOST);
-
cmd = cmdnode->cmdbuf;
spin_lock_irqsave(&priv->driver_lock, flags);
@@ -1036,8 +978,6 @@
/* Setup the timer after transmit command */
mod_timer(&priv->command_timer, jiffies + timeo);
}
-
- lbs_deb_leave(LBS_DEB_HOST);
}
/*
@@ -1047,10 +987,8 @@
static void __lbs_cleanup_and_insert_cmd(struct lbs_private *priv,
struct cmd_ctrl_node *cmdnode)
{
- lbs_deb_enter(LBS_DEB_HOST);
-
if (!cmdnode)
- goto out;
+ return;
cmdnode->callback = NULL;
cmdnode->callback_arg = 0;
@@ -1058,8 +996,6 @@
memset(cmdnode->cmdbuf, 0, LBS_CMD_BUFFER_SIZE);
list_add_tail(&cmdnode->list, &priv->cmdfreeq);
- out:
- lbs_deb_leave(LBS_DEB_HOST);
}
static void lbs_cleanup_and_insert_cmd(struct lbs_private *priv,
@@ -1107,8 +1043,6 @@
struct cmd_ds_802_11_radio_control cmd;
int ret = -EINVAL;
- lbs_deb_enter(LBS_DEB_CMD);
-
cmd.hdr.size = cpu_to_le16(sizeof(cmd));
cmd.action = cpu_to_le16(CMD_ACT_SET);
cmd.control = 0;
@@ -1141,7 +1075,6 @@
ret = lbs_cmd_with_response(priv, CMD_802_11_RADIO_CONTROL, &cmd);
out:
- lbs_deb_leave_args(LBS_DEB_CMD, "ret %d", ret);
return ret;
}
@@ -1149,15 +1082,11 @@
{
struct cmd_ds_mac_control cmd;
- lbs_deb_enter(LBS_DEB_CMD);
-
cmd.hdr.size = cpu_to_le16(sizeof(cmd));
cmd.action = cpu_to_le16(priv->mac_control);
cmd.reserved = 0;
lbs_cmd_async(priv, CMD_MAC_CONTROL, &cmd.hdr, sizeof(cmd));
-
- lbs_deb_leave(LBS_DEB_CMD);
}
int lbs_set_mac_control_sync(struct lbs_private *priv)
@@ -1165,14 +1094,11 @@
struct cmd_ds_mac_control cmd;
int ret = 0;
- lbs_deb_enter(LBS_DEB_CMD);
-
cmd.hdr.size = cpu_to_le16(sizeof(cmd));
cmd.action = cpu_to_le16(priv->mac_control);
cmd.reserved = 0;
ret = lbs_cmd_with_response(priv, CMD_MAC_CONTROL, &cmd);
- lbs_deb_leave(LBS_DEB_CMD);
return ret;
}
@@ -1191,8 +1117,6 @@
u32 i;
struct cmd_ctrl_node *cmdarray;
- lbs_deb_enter(LBS_DEB_HOST);
-
/* Allocate and initialize the command array */
bufsize = sizeof(struct cmd_ctrl_node) * LBS_NUM_CMD_BUFFERS;
if (!(cmdarray = kzalloc(bufsize, GFP_KERNEL))) {
@@ -1219,7 +1143,6 @@
ret = 0;
done:
- lbs_deb_leave_args(LBS_DEB_HOST, "ret %d", ret);
return ret;
}
@@ -1235,8 +1158,6 @@
struct cmd_ctrl_node *cmdarray;
unsigned int i;
- lbs_deb_enter(LBS_DEB_HOST);
-
/* need to check if cmd array is allocated or not */
if (priv->cmd_array == NULL) {
lbs_deb_host("FREE_CMD_BUF: cmd_array is NULL\n");
@@ -1260,7 +1181,6 @@
}
done:
- lbs_deb_leave(LBS_DEB_HOST);
return 0;
}
@@ -1278,8 +1198,6 @@
struct cmd_ctrl_node *tempnode;
unsigned long flags;
- lbs_deb_enter(LBS_DEB_HOST);
-
if (!priv)
return NULL;
@@ -1296,7 +1214,6 @@
spin_unlock_irqrestore(&priv->driver_lock, flags);
- lbs_deb_leave(LBS_DEB_HOST);
return tempnode;
}
@@ -1318,8 +1235,6 @@
/* Debug group is LBS_DEB_THREAD and not LBS_DEB_HOST, because the
* only caller to us is lbs_thread() and we get even when a
* data packet is received */
- lbs_deb_enter(LBS_DEB_THREAD);
-
spin_lock_irqsave(&priv->driver_lock, flags);
if (priv->cur_cmd) {
@@ -1440,7 +1355,6 @@
ret = 0;
done:
- lbs_deb_leave(LBS_DEB_THREAD);
return ret;
}
@@ -1449,7 +1363,6 @@
unsigned long flags;
int ret;
- lbs_deb_enter(LBS_DEB_HOST);
lbs_deb_hex(LBS_DEB_HOST, "sleep confirm", (u8 *) &confirm_sleep,
sizeof(confirm_sleep));
@@ -1457,7 +1370,7 @@
sizeof(confirm_sleep));
if (ret) {
netdev_alert(priv->dev, "confirm_sleep failed\n");
- goto out;
+ return;
}
spin_lock_irqsave(&priv->driver_lock, flags);
@@ -1475,9 +1388,6 @@
priv->psstate = PS_STATE_SLEEP;
spin_unlock_irqrestore(&priv->driver_lock, flags);
-
-out:
- lbs_deb_leave(LBS_DEB_HOST);
}
/**
@@ -1493,8 +1403,6 @@
unsigned long flags =0;
int allowed = 1;
- lbs_deb_enter(LBS_DEB_HOST);
-
spin_lock_irqsave(&priv->driver_lock, flags);
if (priv->dnld_sent) {
allowed = 0;
@@ -1520,8 +1428,6 @@
} else {
lbs_deb_host("sleep confirm has been delayed\n");
}
-
- lbs_deb_leave(LBS_DEB_HOST);
}
@@ -1596,8 +1502,6 @@
{
struct cmd_ctrl_node *cmdnode;
- lbs_deb_enter(LBS_DEB_HOST);
-
if (priv->surpriseremoved) {
lbs_deb_host("PREP_CMD: card removed\n");
cmdnode = ERR_PTR(-ENOENT);
@@ -1643,17 +1547,14 @@
wake_up(&priv->waitq);
done:
- lbs_deb_leave_args(LBS_DEB_HOST, "ret %p", cmdnode);
return cmdnode;
}
void lbs_cmd_async(struct lbs_private *priv, uint16_t command,
struct cmd_header *in_cmd, int in_cmd_size)
{
- lbs_deb_enter(LBS_DEB_CMD);
__lbs_cmd_async(priv, command, in_cmd, in_cmd_size,
lbs_cmd_async_callback, 0);
- lbs_deb_leave(LBS_DEB_CMD);
}
int __lbs_cmd(struct lbs_private *priv, uint16_t command,
@@ -1665,8 +1566,6 @@
unsigned long flags;
int ret = 0;
- lbs_deb_enter(LBS_DEB_HOST);
-
cmdnode = __lbs_cmd_async(priv, command, in_cmd, in_cmd_size,
callback, callback_arg);
if (IS_ERR(cmdnode)) {
@@ -1693,7 +1592,6 @@
spin_unlock_irqrestore(&priv->driver_lock, flags);
done:
- lbs_deb_leave_args(LBS_DEB_HOST, "ret %d", ret);
return ret;
}
EXPORT_SYMBOL_GPL(__lbs_cmd);
diff --git a/drivers/net/wireless/marvell/libertas/cmdresp.c b/drivers/net/wireless/marvell/libertas/cmdresp.c
index c753e36..aaf0161 100644
--- a/drivers/net/wireless/marvell/libertas/cmdresp.c
+++ b/drivers/net/wireless/marvell/libertas/cmdresp.c
@@ -32,8 +32,6 @@
if (priv->connect_status != LBS_CONNECTED)
return;
- lbs_deb_enter(LBS_DEB_ASSOC);
-
/*
* Cisco AP sends EAP failure and de-auth in less than 0.5 ms.
* It causes problem in the Supplicant
@@ -61,7 +59,6 @@
lbs_deb_cmd("disconnected, so exit PS mode\n");
lbs_set_ps_mode(priv, PS_MODE_ACTION_EXIT_PS, false);
}
- lbs_deb_leave(LBS_DEB_ASSOC);
}
int lbs_process_command_response(struct lbs_private *priv, u8 *data, u32 len)
@@ -72,8 +69,6 @@
unsigned long flags;
uint16_t result;
- lbs_deb_enter(LBS_DEB_HOST);
-
mutex_lock(&priv->lock);
spin_lock_irqsave(&priv->driver_lock, flags);
@@ -221,7 +216,6 @@
done:
mutex_unlock(&priv->lock);
- lbs_deb_leave_args(LBS_DEB_HOST, "ret %d", ret);
return ret;
}
@@ -230,8 +224,6 @@
int ret = 0;
struct cmd_header cmd;
- lbs_deb_enter(LBS_DEB_CMD);
-
switch (event) {
case MACREG_INT_CODE_LINK_SENSED:
lbs_deb_cmd("EVENT: link sensed\n");
@@ -359,6 +351,5 @@
break;
}
- lbs_deb_leave_args(LBS_DEB_CMD, "ret %d", ret);
return ret;
}
diff --git a/drivers/net/wireless/marvell/libertas/defs.h b/drivers/net/wireless/marvell/libertas/defs.h
index 407784a..d322144 100644
--- a/drivers/net/wireless/marvell/libertas/defs.h
+++ b/drivers/net/wireless/marvell/libertas/defs.h
@@ -55,15 +55,6 @@
#define LBS_DEB_LL(grp, grpnam, fmt, args...) do {} while (0)
#endif
-#define lbs_deb_enter(grp) \
- LBS_DEB_LL(grp | LBS_DEB_ENTER, " enter", "%s()\n", __func__);
-#define lbs_deb_enter_args(grp, fmt, args...) \
- LBS_DEB_LL(grp | LBS_DEB_ENTER, " enter", "%s(" fmt ")\n", __func__, ## args);
-#define lbs_deb_leave(grp) \
- LBS_DEB_LL(grp | LBS_DEB_LEAVE, " leave", "%s()\n", __func__);
-#define lbs_deb_leave_args(grp, fmt, args...) \
- LBS_DEB_LL(grp | LBS_DEB_LEAVE, " leave", "%s(), " fmt "\n", \
- __func__, ##args);
#define lbs_deb_main(fmt, args...) LBS_DEB_LL(LBS_DEB_MAIN, " main", fmt, ##args)
#define lbs_deb_net(fmt, args...) LBS_DEB_LL(LBS_DEB_NET, " net", fmt, ##args)
#define lbs_deb_mesh(fmt, args...) LBS_DEB_LL(LBS_DEB_MESH, " mesh", fmt, ##args)
diff --git a/drivers/net/wireless/marvell/libertas/ethtool.c b/drivers/net/wireless/marvell/libertas/ethtool.c
index f955b2d..693868f 100644
--- a/drivers/net/wireless/marvell/libertas/ethtool.c
+++ b/drivers/net/wireless/marvell/libertas/ethtool.c
@@ -41,8 +41,6 @@
struct cmd_ds_802_11_eeprom_access cmd;
int ret;
- lbs_deb_enter(LBS_DEB_ETHTOOL);
-
if (eeprom->offset + eeprom->len > LBS_EEPROM_LEN ||
eeprom->len > LBS_EEPROM_READ_LEN) {
ret = -EINVAL;
@@ -59,7 +57,6 @@
memcpy(bytes, cmd.value, eeprom->len);
out:
- lbs_deb_leave_args(LBS_DEB_ETHTOOL, "ret %d", ret);
return ret;
}
diff --git a/drivers/net/wireless/marvell/libertas/if_cs.c b/drivers/net/wireless/marvell/libertas/if_cs.c
index f499efc..7d88223 100644
--- a/drivers/net/wireless/marvell/libertas/if_cs.c
+++ b/drivers/net/wireless/marvell/libertas/if_cs.c
@@ -336,13 +336,11 @@
static inline void if_cs_enable_ints(struct if_cs_card *card)
{
- lbs_deb_enter(LBS_DEB_CS);
if_cs_write16(card, IF_CS_HOST_INT_MASK, 0);
}
static inline void if_cs_disable_ints(struct if_cs_card *card)
{
- lbs_deb_enter(LBS_DEB_CS);
if_cs_write16(card, IF_CS_HOST_INT_MASK, IF_CS_BIT_MASK);
}
@@ -355,7 +353,6 @@
int ret = -1;
int loops = 0;
- lbs_deb_enter(LBS_DEB_CS);
if_cs_disable_ints(card);
/* Is hardware ready? */
@@ -388,7 +385,6 @@
done:
if_cs_enable_ints(card);
- lbs_deb_leave_args(LBS_DEB_CS, "ret %d", ret);
return ret;
}
@@ -400,7 +396,6 @@
struct if_cs_card *card = (struct if_cs_card *)priv->card;
u16 status;
- lbs_deb_enter(LBS_DEB_CS);
if_cs_disable_ints(card);
status = if_cs_read16(card, IF_CS_CARD_STATUS);
@@ -416,8 +411,6 @@
if_cs_write16(card, IF_CS_HOST_STATUS, IF_CS_BIT_TX);
if_cs_write16(card, IF_CS_HOST_INT_CAUSE, IF_CS_BIT_TX);
if_cs_enable_ints(card);
-
- lbs_deb_leave(LBS_DEB_CS);
}
/*
@@ -429,8 +422,6 @@
int ret = -1;
u16 status;
- lbs_deb_enter(LBS_DEB_CS);
-
/* is hardware ready? */
status = if_cs_read16(priv->card, IF_CS_CARD_STATUS);
if ((status & IF_CS_BIT_RESP) == 0) {
@@ -463,7 +454,6 @@
spin_unlock_irqrestore(&priv->driver_lock, flags);
out:
- lbs_deb_leave_args(LBS_DEB_CS, "ret %d, len %d", ret, *len);
return ret;
}
@@ -473,8 +463,6 @@
u16 len;
u8 *data;
- lbs_deb_enter(LBS_DEB_CS);
-
len = if_cs_read16(priv->card, IF_CS_READ_LEN);
if (len == 0 || len > MRVDRV_ETH_RX_PACKET_BUFFER_SIZE) {
netdev_err(priv->dev,
@@ -501,7 +489,6 @@
if_cs_write16(priv->card, IF_CS_HOST_INT_CAUSE, IF_CS_BIT_RX);
out:
- lbs_deb_leave_args(LBS_DEB_CS, "ret %p", skb);
return skb;
}
@@ -511,8 +498,6 @@
struct lbs_private *priv = card->priv;
u16 cause;
- lbs_deb_enter(LBS_DEB_CS);
-
/* Ask card interrupt cause register if there is something for us */
cause = if_cs_read16(card, IF_CS_CARD_INT_CAUSE);
lbs_deb_cs("cause 0x%04x\n", cause);
@@ -569,7 +554,6 @@
/* Clear interrupt cause */
if_cs_write16(card, IF_CS_CARD_INT_CAUSE, cause & IF_CS_BIT_MASK);
- lbs_deb_leave(LBS_DEB_CS);
return IRQ_HANDLED;
}
@@ -591,8 +575,6 @@
int sent = 0;
u8 scratch;
- lbs_deb_enter(LBS_DEB_CS);
-
/*
* This is the only place where an unaligned register access happens on
* the CF8305 card, therefore for the sake of speed of the driver, we do
@@ -671,7 +653,6 @@
}
done:
- lbs_deb_leave_args(LBS_DEB_CS, "ret %d", ret);
return ret;
}
@@ -683,8 +664,6 @@
int len = 0;
int sent;
- lbs_deb_enter(LBS_DEB_CS);
-
lbs_deb_cs("fw size %td\n", fw->size);
ret = if_cs_poll_while_fw_download(card, IF_CS_SQ_READ_LOW,
@@ -734,7 +713,6 @@
pr_err("firmware download failed\n");
done:
- lbs_deb_leave_args(LBS_DEB_CS, "ret %d", ret);
return ret;
}
@@ -792,8 +770,6 @@
{
int ret = -1;
- lbs_deb_enter_args(LBS_DEB_CS, "type %d, bytes %d", type, nb);
-
switch (type) {
case MVMS_DAT:
priv->dnld_sent = DNLD_DATA_SENT;
@@ -809,7 +785,6 @@
__func__, type);
}
- lbs_deb_leave_args(LBS_DEB_CS, "ret %d", ret);
return ret;
}
@@ -818,14 +793,10 @@
{
struct if_cs_card *card = p_dev->priv;
- lbs_deb_enter(LBS_DEB_CS);
-
free_irq(p_dev->irq, card);
pcmcia_disable_device(p_dev);
if (card->iobase)
ioport_unmap(card->iobase);
-
- lbs_deb_leave(LBS_DEB_CS);
}
@@ -850,8 +821,6 @@
struct lbs_private *priv;
struct if_cs_card *card;
- lbs_deb_enter(LBS_DEB_CS);
-
card = kzalloc(sizeof(struct if_cs_card), GFP_KERNEL);
if (!card)
goto out;
@@ -961,7 +930,6 @@
out1:
pcmcia_disable_device(p_dev);
out:
- lbs_deb_leave_args(LBS_DEB_CS, "ret %d", ret);
return ret;
}
@@ -970,15 +938,11 @@
{
struct if_cs_card *card = p_dev->priv;
- lbs_deb_enter(LBS_DEB_CS);
-
lbs_stop_card(card->priv);
lbs_remove_card(card->priv);
if_cs_disable_ints(card);
if_cs_release(p_dev);
kfree(card);
-
- lbs_deb_leave(LBS_DEB_CS);
}
diff --git a/drivers/net/wireless/marvell/libertas/if_sdio.c b/drivers/net/wireless/marvell/libertas/if_sdio.c
index 47f4a14..2300e79 100644
--- a/drivers/net/wireless/marvell/libertas/if_sdio.c
+++ b/drivers/net/wireless/marvell/libertas/if_sdio.c
@@ -211,8 +211,6 @@
unsigned long flags;
u8 i;
- lbs_deb_enter(LBS_DEB_SDIO);
-
if (size > LBS_CMD_BUFFER_SIZE) {
lbs_deb_sdio("response packet too large (%d bytes)\n",
(int)size);
@@ -233,7 +231,6 @@
ret = 0;
out:
- lbs_deb_leave_args(LBS_DEB_SDIO, "ret %d", ret);
return ret;
}
@@ -242,9 +239,6 @@
{
int ret;
struct sk_buff *skb;
- char *data;
-
- lbs_deb_enter(LBS_DEB_SDIO);
if (size > MRVDRV_ETH_RX_PACKET_BUFFER_SIZE) {
lbs_deb_sdio("response packet too large (%d bytes)\n",
@@ -261,17 +255,13 @@
skb_reserve(skb, NET_IP_ALIGN);
- data = skb_put(skb, size);
-
- memcpy(data, buffer, size);
+ skb_put_data(skb, buffer, size);
lbs_process_rxed_packet(card->priv, skb);
ret = 0;
out:
- lbs_deb_leave_args(LBS_DEB_SDIO, "ret %d", ret);
-
return ret;
}
@@ -281,8 +271,6 @@
int ret;
u32 event;
- lbs_deb_enter(LBS_DEB_SDIO);
-
if (card->model == MODEL_8385) {
event = sdio_readb(card->func, IF_SDIO_EVENT, &ret);
if (ret)
@@ -307,8 +295,6 @@
ret = 0;
out:
- lbs_deb_leave_args(LBS_DEB_SDIO, "ret %d", ret);
-
return ret;
}
@@ -337,8 +323,6 @@
int ret;
u16 size, type, chunk;
- lbs_deb_enter(LBS_DEB_SDIO);
-
size = if_sdio_read_rx_len(card, &ret);
if (ret)
goto out;
@@ -410,8 +394,6 @@
if (ret)
pr_err("problem fetching packet from firmware\n");
- lbs_deb_leave_args(LBS_DEB_SDIO, "ret %d", ret);
-
return ret;
}
@@ -422,8 +404,6 @@
int ret;
unsigned long flags;
- lbs_deb_enter(LBS_DEB_SDIO);
-
card = container_of(work, struct if_sdio_card, packet_worker);
while (1) {
@@ -451,8 +431,6 @@
kfree(packet);
}
-
- lbs_deb_leave(LBS_DEB_SDIO);
}
/********************************************************************/
@@ -471,8 +449,6 @@
const u8 *firmware;
size_t size;
- lbs_deb_enter(LBS_DEB_SDIO);
-
chunk_buffer = kzalloc(64, GFP_KERNEL);
if (!chunk_buffer) {
ret = -ENOMEM;
@@ -556,7 +532,6 @@
if (ret)
pr_err("failed to load helper firmware\n");
- lbs_deb_leave_args(LBS_DEB_SDIO, "ret %d", ret);
return ret;
}
@@ -570,8 +545,6 @@
const u8 *firmware;
size_t size, req_size;
- lbs_deb_enter(LBS_DEB_SDIO);
-
chunk_buffer = kzalloc(512, GFP_KERNEL);
if (!chunk_buffer) {
ret = -ENOMEM;
@@ -691,7 +664,6 @@
if (ret)
pr_err("failed to load firmware\n");
- lbs_deb_leave_args(LBS_DEB_SDIO, "ret %d", ret);
return ret;
}
@@ -725,8 +697,6 @@
int ret;
u16 scratch;
- lbs_deb_enter(LBS_DEB_SDIO);
-
/*
* Disable interrupts
*/
@@ -769,7 +739,6 @@
fw_table, if_sdio_do_prog_firmware);
out:
- lbs_deb_leave_args(LBS_DEB_SDIO, "ret %d", ret);
return ret;
}
@@ -948,8 +917,6 @@
u16 size;
unsigned long flags;
- lbs_deb_enter_args(LBS_DEB_SDIO, "type %d, bytes %d", type, nb);
-
card = priv->card;
if (nb > (65536 - sizeof(struct if_sdio_packet) - 4)) {
@@ -1013,8 +980,6 @@
ret = 0;
out:
- lbs_deb_leave_args(LBS_DEB_SDIO, "ret %d", ret);
-
return ret;
}
@@ -1040,7 +1005,6 @@
struct if_sdio_card *card = priv->card;
int ret = -1;
- lbs_deb_enter(LBS_DEB_SDIO);
sdio_claim_host(card->func);
sdio_writeb(card->func, HOST_POWER_UP, CONFIGURATION_REG, &ret);
@@ -1048,7 +1012,7 @@
netdev_err(priv->dev, "sdio_writeb failed!\n");
sdio_release_host(card->func);
- lbs_deb_leave_args(LBS_DEB_SDIO, "ret %d", ret);
+
return ret;
}
@@ -1057,7 +1021,6 @@
struct if_sdio_card *card = priv->card;
int ret = -1;
- lbs_deb_enter(LBS_DEB_SDIO);
sdio_claim_host(card->func);
sdio_writeb(card->func, 0, CONFIGURATION_REG, &ret);
@@ -1065,7 +1028,7 @@
netdev_err(priv->dev, "sdio_writeb failed!\n");
sdio_release_host(card->func);
- lbs_deb_leave_args(LBS_DEB_SDIO, "ret %d", ret);
+
return ret;
}
@@ -1143,19 +1106,17 @@
struct if_sdio_card *card;
u8 cause;
- lbs_deb_enter(LBS_DEB_SDIO);
-
card = sdio_get_drvdata(func);
cause = sdio_readb(card->func, IF_SDIO_H_INT_STATUS, &ret);
if (ret || !cause)
- goto out;
+ return;
lbs_deb_sdio("interrupt: 0x%X\n", (unsigned)cause);
sdio_writeb(card->func, ~cause, IF_SDIO_H_INT_STATUS, &ret);
if (ret)
- goto out;
+ return;
/*
* Ignore the define name, this really means the card has
@@ -1169,13 +1130,8 @@
if (cause & IF_SDIO_H_INT_UPLD) {
ret = if_sdio_card_to_host(card);
if (ret)
- goto out;
+ return;
}
-
- ret = 0;
-
-out:
- lbs_deb_leave_args(LBS_DEB_SDIO, "ret %d", ret);
}
static int if_sdio_probe(struct sdio_func *func,
@@ -1187,8 +1143,6 @@
unsigned int model;
struct if_sdio_packet *packet;
- lbs_deb_enter(LBS_DEB_SDIO);
-
for (i = 0;i < func->card->num_info;i++) {
if (sscanf(func->card->info[i],
"802.11 SDIO ID: %x", &model) == 1)
@@ -1273,8 +1227,6 @@
goto err_activate_card;
out:
- lbs_deb_leave_args(LBS_DEB_SDIO, "ret %d", ret);
-
return ret;
err_activate_card:
@@ -1298,8 +1250,6 @@
struct if_sdio_card *card;
struct if_sdio_packet *packet;
- lbs_deb_enter(LBS_DEB_SDIO);
-
card = sdio_get_drvdata(func);
/* Undo decrement done above in if_sdio_probe */
@@ -1335,7 +1285,6 @@
}
kfree(card);
- lbs_deb_leave(LBS_DEB_SDIO);
}
static int if_sdio_suspend(struct device *dev)
@@ -1415,8 +1364,6 @@
{
int ret = 0;
- lbs_deb_enter(LBS_DEB_SDIO);
-
printk(KERN_INFO "libertas_sdio: Libertas SDIO driver\n");
printk(KERN_INFO "libertas_sdio: Copyright Pierre Ossman\n");
@@ -1425,23 +1372,17 @@
/* Clear the flag in case user removes the card. */
user_rmmod = 0;
- lbs_deb_leave_args(LBS_DEB_SDIO, "ret %d", ret);
-
return ret;
}
static void __exit if_sdio_exit_module(void)
{
- lbs_deb_enter(LBS_DEB_SDIO);
-
/* Set the flag as user is removing this module. */
user_rmmod = 1;
cancel_work_sync(&card_reset_work);
sdio_unregister_driver(&if_sdio_driver);
-
- lbs_deb_leave(LBS_DEB_SDIO);
}
module_init(if_sdio_init_module);
diff --git a/drivers/net/wireless/marvell/libertas/if_spi.c b/drivers/net/wireless/marvell/libertas/if_spi.c
index 7b4955c..e9aec6c 100644
--- a/drivers/net/wireless/marvell/libertas/if_spi.c
+++ b/drivers/net/wireless/marvell/libertas/if_spi.c
@@ -466,8 +466,6 @@
const u8 *fw;
u8 temp[HELPER_FW_LOAD_CHUNK_SZ];
- lbs_deb_enter(LBS_DEB_SPI);
-
err = spu_set_interrupt_mode(card, 1, 0);
if (err)
goto out;
@@ -533,7 +531,7 @@
out:
if (err)
pr_err("failed to load helper firmware (err=%d)\n", err);
- lbs_deb_leave_args(LBS_DEB_SPI, "err %d", err);
+
return err;
}
@@ -588,8 +586,6 @@
const u8 *fw;
u16 num_crc_errs;
- lbs_deb_enter(LBS_DEB_SPI);
-
err = spu_set_interrupt_mode(card, 1, 0);
if (err)
goto out;
@@ -666,7 +662,7 @@
out:
if (err)
pr_err("failed to load firmware (err=%d)\n", err);
- lbs_deb_leave_args(LBS_DEB_SPI, "err %d", err);
+
return err;
}
@@ -699,8 +695,6 @@
*/
BUILD_BUG_ON(IF_SPI_CMD_BUF_SIZE % 4 != 0);
- lbs_deb_enter(LBS_DEB_SPI);
-
/* How many bytes are there to read? */
err = spu_read_u16(card, IF_SPI_SCRATCH_2_REG, &len);
if (err)
@@ -735,7 +729,7 @@
out:
if (err)
netdev_err(priv->dev, "%s: err=%d\n", __func__, err);
- lbs_deb_leave(LBS_DEB_SPI);
+
return err;
}
@@ -748,8 +742,6 @@
u16 len;
int err = 0;
- lbs_deb_enter(LBS_DEB_SPI);
-
/* How many bytes are there to read? */
err = spu_read_u16(card, IF_SPI_SCRATCH_1_REG, &len);
if (err)
@@ -794,7 +786,7 @@
out:
if (err)
netdev_err(priv->dev, "%s: err=%d\n", __func__, err);
- lbs_deb_leave(LBS_DEB_SPI);
+
return err;
}
@@ -870,8 +862,6 @@
card = container_of(work, struct if_spi_card, packet_work);
priv = card->priv;
- lbs_deb_enter(LBS_DEB_SPI);
-
/*
* Read the host interrupt status register to see what we
* can do.
@@ -943,8 +933,6 @@
err:
if (err)
netdev_err(priv->dev, "%s: got error %d\n", __func__, err);
-
- lbs_deb_leave(LBS_DEB_SPI);
}
/*
@@ -962,8 +950,6 @@
struct if_spi_packet *packet;
u16 blen;
- lbs_deb_enter_args(LBS_DEB_SPI, "type %d, bytes %d", type, nb);
-
if (nb == 0) {
netdev_err(priv->dev, "%s: invalid size requested: %d\n",
__func__, nb);
@@ -1004,7 +990,6 @@
/* Queue spi xfer work */
queue_work(card->workqueue, &card->packet_work);
out:
- lbs_deb_leave_args(LBS_DEB_SPI, "err=%d", err);
return err;
}
@@ -1035,8 +1020,6 @@
const struct firmware *helper = NULL;
const struct firmware *mainfw = NULL;
- lbs_deb_enter(LBS_DEB_SPI);
-
err = spu_init(card, card->pdata->use_dummy_writes);
if (err)
goto out;
@@ -1093,7 +1076,6 @@
goto out;
out:
- lbs_deb_leave_args(LBS_DEB_SPI, "err %d\n", err);
return err;
}
@@ -1126,8 +1108,6 @@
struct libertas_spi_platform_data *pdata = dev_get_platdata(&spi->dev);
int err = 0;
- lbs_deb_enter(LBS_DEB_SPI);
-
if (!pdata) {
err = -EINVAL;
goto out;
@@ -1221,7 +1201,6 @@
if (pdata->teardown)
pdata->teardown(spi);
out:
- lbs_deb_leave_args(LBS_DEB_SPI, "err %d\n", err);
return err;
}
@@ -1231,7 +1210,6 @@
struct lbs_private *priv = card->priv;
lbs_deb_spi("libertas_spi_remove\n");
- lbs_deb_enter(LBS_DEB_SPI);
cancel_work_sync(&card->resume_work);
@@ -1243,7 +1221,7 @@
if (card->pdata->teardown)
card->pdata->teardown(spi);
free_if_spi_card(card);
- lbs_deb_leave(LBS_DEB_SPI);
+
return 0;
}
@@ -1297,18 +1275,16 @@
static int __init if_spi_init_module(void)
{
int ret = 0;
- lbs_deb_enter(LBS_DEB_SPI);
+
printk(KERN_INFO "libertas_spi: Libertas SPI driver\n");
ret = spi_register_driver(&libertas_spi_driver);
- lbs_deb_leave(LBS_DEB_SPI);
+
return ret;
}
static void __exit if_spi_exit_module(void)
{
- lbs_deb_enter(LBS_DEB_SPI);
spi_unregister_driver(&libertas_spi_driver);
- lbs_deb_leave(LBS_DEB_SPI);
}
module_init(if_spi_init_module);
diff --git a/drivers/net/wireless/marvell/libertas/if_usb.c b/drivers/net/wireless/marvell/libertas/if_usb.c
index aba0c99..e53025e 100644
--- a/drivers/net/wireless/marvell/libertas/if_usb.c
+++ b/drivers/net/wireless/marvell/libertas/if_usb.c
@@ -111,8 +111,6 @@
*/
static void if_usb_free(struct if_usb_card *cardp)
{
- lbs_deb_enter(LBS_DEB_USB);
-
/* Unlink tx & rx urb */
usb_kill_urb(cardp->tx_urb);
usb_kill_urb(cardp->rx_urb);
@@ -125,8 +123,6 @@
kfree(cardp->ep_out_buf);
cardp->ep_out_buf = NULL;
-
- lbs_deb_leave(LBS_DEB_USB);
}
static void if_usb_setup_firmware(struct lbs_private *priv)
@@ -306,8 +302,6 @@
struct if_usb_card *cardp = usb_get_intfdata(intf);
struct lbs_private *priv = cardp->priv;
- lbs_deb_enter(LBS_DEB_MAIN);
-
cardp->surprise_removed = 1;
if (priv) {
@@ -320,8 +314,6 @@
usb_set_intfdata(intf, NULL);
usb_put_dev(interface_to_usbdev(intf));
-
- lbs_deb_leave(LBS_DEB_MAIN);
}
/**
@@ -388,8 +380,6 @@
struct cmd_header *cmd = cardp->ep_out_buf + 4;
int ret;
- lbs_deb_enter(LBS_DEB_USB);
-
*(__le32 *)cardp->ep_out_buf = cpu_to_le32(CMD_TYPE_REQUEST);
cmd->command = cpu_to_le16(CMD_802_11_RESET);
@@ -407,8 +397,6 @@
if_usb_reset_olpc_card(NULL);
#endif
- lbs_deb_leave_args(LBS_DEB_USB, "ret %d", ret);
-
return ret;
}
@@ -671,8 +659,6 @@
__le32 *pkt = (__le32 *)(skb->data + IPFIELD_ALIGN_OFFSET);
uint32_t event;
- lbs_deb_enter(LBS_DEB_USB);
-
if (recvlength) {
if (urb->status) {
lbs_deb_usbd(&cardp->udev->dev, "RX URB failed: %d\n",
@@ -688,7 +674,7 @@
recvlength, recvtype);
} else if (urb->status) {
kfree_skb(skb);
- goto rx_exit;
+ return;
}
switch (recvtype) {
@@ -724,8 +710,6 @@
setup_for_next:
if_usb_submit_rx_urb(cardp);
-rx_exit:
- lbs_deb_leave(LBS_DEB_USB);
}
/**
@@ -835,8 +819,6 @@
int i = 0;
static int reset_count = 10;
- lbs_deb_enter(LBS_DEB_USB);
-
if (ret) {
pr_err("failed to find firmware (%d)\n", ret);
goto done;
@@ -942,7 +924,6 @@
done:
cardp->fw = NULL;
- lbs_deb_leave(LBS_DEB_USB);
}
@@ -953,8 +934,6 @@
struct lbs_private *priv = cardp->priv;
int ret;
- lbs_deb_enter(LBS_DEB_USB);
-
if (priv->psstate != PS_STATE_FULL_POWER) {
ret = -1;
goto out;
@@ -978,7 +957,6 @@
usb_kill_urb(cardp->rx_urb);
out:
- lbs_deb_leave(LBS_DEB_USB);
return ret;
}
@@ -987,13 +965,10 @@
struct if_usb_card *cardp = usb_get_intfdata(intf);
struct lbs_private *priv = cardp->priv;
- lbs_deb_enter(LBS_DEB_USB);
-
if_usb_submit_rx_urb(cardp);
lbs_resume(priv);
- lbs_deb_leave(LBS_DEB_USB);
return 0;
}
#else
diff --git a/drivers/net/wireless/marvell/libertas/main.c b/drivers/net/wireless/marvell/libertas/main.c
index e350020..55f8c99 100644
--- a/drivers/net/wireless/marvell/libertas/main.c
+++ b/drivers/net/wireless/marvell/libertas/main.c
@@ -180,7 +180,6 @@
struct lbs_private *priv = dev->ml_priv;
int ret = 0;
- lbs_deb_enter(LBS_DEB_NET);
if (!priv->iface_running) {
ret = lbs_start_iface(priv);
if (ret)
@@ -197,7 +196,6 @@
spin_unlock_irq(&priv->driver_lock);
out:
- lbs_deb_leave_args(LBS_DEB_NET, "ret %d", ret);
return ret;
}
@@ -216,8 +214,6 @@
unsigned long flags;
int ret = 0;
- lbs_deb_enter(LBS_DEB_MAIN);
-
spin_lock_irqsave(&priv->driver_lock, flags);
priv->iface_running = false;
kfree_skb(priv->currenttxskb);
@@ -236,7 +232,6 @@
if (priv->power_save)
ret = priv->power_save(priv);
- lbs_deb_leave(LBS_DEB_MAIN);
return ret;
}
@@ -250,8 +245,6 @@
{
struct lbs_private *priv = dev->ml_priv;
- lbs_deb_enter(LBS_DEB_NET);
-
if (priv->connect_status == LBS_CONNECTED)
lbs_disconnect(priv, WLAN_REASON_DEAUTH_LEAVING);
@@ -269,7 +262,6 @@
if (!lbs_iface_active(priv))
lbs_stop_iface(priv);
- lbs_deb_leave(LBS_DEB_NET);
return 0;
}
@@ -277,8 +269,6 @@
{
unsigned long flags;
- lbs_deb_enter(LBS_DEB_THREAD);
-
spin_lock_irqsave(&priv->driver_lock, flags);
del_timer(&priv->tx_lockup_timer);
@@ -291,7 +281,6 @@
}
spin_unlock_irqrestore(&priv->driver_lock, flags);
- lbs_deb_leave(LBS_DEB_THREAD);
}
EXPORT_SYMBOL_GPL(lbs_host_to_card_done);
@@ -301,8 +290,6 @@
struct lbs_private *priv = dev->ml_priv;
struct sockaddr *phwaddr = addr;
- lbs_deb_enter(LBS_DEB_NET);
-
/*
* Can only set MAC address when all interfaces are down, to be written
* to the hardware when one of them is brought up.
@@ -318,7 +305,6 @@
if (priv->mesh_dev)
memcpy(priv->mesh_dev->dev_addr, phwaddr->sa_data, ETH_ALEN);
- lbs_deb_leave_args(LBS_DEB_NET, "ret %d", ret);
return ret;
}
@@ -378,8 +364,6 @@
int nr_addrs;
int old_mac_control = priv->mac_control;
- lbs_deb_enter(LBS_DEB_NET);
-
if (netif_running(priv->dev))
dev_flags |= priv->dev->flags;
if (priv->mesh_dev && netif_running(priv->mesh_dev))
@@ -424,8 +408,6 @@
out_set_mac_control:
if (priv->mac_control != old_mac_control)
lbs_set_mac_control(priv);
-
- lbs_deb_leave(LBS_DEB_NET);
}
static void lbs_set_mcast_worker(struct work_struct *work)
@@ -455,8 +437,6 @@
struct lbs_private *priv = dev->ml_priv;
wait_queue_t wait;
- lbs_deb_enter(LBS_DEB_THREAD);
-
init_waitqueue_entry(&wait, current);
for (;;) {
@@ -648,7 +628,6 @@
del_timer(&priv->tx_lockup_timer);
del_timer(&priv->auto_deepsleep_timer);
- lbs_deb_leave(LBS_DEB_THREAD);
return 0;
}
@@ -664,8 +643,6 @@
int ret = -1;
s16 curlevel = 0, minlevel = 0, maxlevel = 0;
- lbs_deb_enter(LBS_DEB_FW);
-
/* Read MAC address from firmware */
eth_broadcast_addr(priv->current_addr);
ret = lbs_update_hw_spec(priv);
@@ -687,7 +664,6 @@
ret = lbs_set_mac_control_sync(priv);
done:
- lbs_deb_leave_args(LBS_DEB_FW, "ret %d", ret);
return ret;
}
@@ -695,8 +671,6 @@
{
int ret;
- lbs_deb_enter(LBS_DEB_FW);
-
if (priv->is_deep_sleep) {
ret = lbs_set_deep_sleep(priv, 0);
if (ret) {
@@ -713,7 +687,6 @@
if (priv->mesh_dev)
netif_device_detach(priv->mesh_dev);
- lbs_deb_leave_args(LBS_DEB_FW, "ret %d", ret);
return ret;
}
EXPORT_SYMBOL_GPL(lbs_suspend);
@@ -722,8 +695,6 @@
{
int ret;
- lbs_deb_enter(LBS_DEB_FW);
-
ret = lbs_set_host_sleep(priv, 0);
netif_device_attach(priv->dev);
@@ -741,7 +712,6 @@
if (priv->setup_fw_on_resume)
ret = lbs_setup_firmware(priv);
- lbs_deb_leave_args(LBS_DEB_FW, "ret %d", ret);
return ret;
}
EXPORT_SYMBOL_GPL(lbs_resume);
@@ -757,7 +727,6 @@
struct lbs_private *priv = (struct lbs_private *)data;
unsigned long flags;
- lbs_deb_enter(LBS_DEB_CMD);
spin_lock_irqsave(&priv->driver_lock, flags);
if (!priv->cur_cmd)
@@ -778,7 +747,6 @@
wake_up(&priv->waitq);
out:
spin_unlock_irqrestore(&priv->driver_lock, flags);
- lbs_deb_leave(LBS_DEB_CMD);
}
/**
@@ -793,7 +761,6 @@
struct lbs_private *priv = (struct lbs_private *)data;
unsigned long flags;
- lbs_deb_enter(LBS_DEB_TX);
spin_lock_irqsave(&priv->driver_lock, flags);
netdev_info(priv->dev, "TX lockup detected\n");
@@ -804,7 +771,6 @@
wake_up_interruptible(&priv->waitq);
spin_unlock_irqrestore(&priv->driver_lock, flags);
- lbs_deb_leave(LBS_DEB_TX);
}
/**
@@ -817,8 +783,6 @@
{
struct lbs_private *priv = (struct lbs_private *)data;
- lbs_deb_enter(LBS_DEB_CMD);
-
if (priv->is_activity_detected) {
priv->is_activity_detected = 0;
} else {
@@ -836,32 +800,25 @@
}
mod_timer(&priv->auto_deepsleep_timer , jiffies +
(priv->auto_deep_sleep_timeout * HZ)/1000);
- lbs_deb_leave(LBS_DEB_CMD);
}
int lbs_enter_auto_deep_sleep(struct lbs_private *priv)
{
- lbs_deb_enter(LBS_DEB_SDIO);
-
priv->is_auto_deep_sleep_enabled = 1;
if (priv->is_deep_sleep)
priv->wakeup_dev_required = 1;
mod_timer(&priv->auto_deepsleep_timer ,
jiffies + (priv->auto_deep_sleep_timeout * HZ)/1000);
- lbs_deb_leave(LBS_DEB_SDIO);
return 0;
}
int lbs_exit_auto_deep_sleep(struct lbs_private *priv)
{
- lbs_deb_enter(LBS_DEB_SDIO);
-
priv->is_auto_deep_sleep_enabled = 0;
priv->auto_deep_sleep_timeout = 0;
del_timer(&priv->auto_deepsleep_timer);
- lbs_deb_leave(LBS_DEB_SDIO);
return 0;
}
@@ -869,8 +826,6 @@
{
int ret;
- lbs_deb_enter(LBS_DEB_MAIN);
-
eth_broadcast_addr(priv->current_addr);
priv->connect_status = LBS_DISCONNECTED;
@@ -921,22 +876,16 @@
}
out:
- lbs_deb_leave_args(LBS_DEB_MAIN, "ret %d", ret);
-
return ret;
}
static void lbs_free_adapter(struct lbs_private *priv)
{
- lbs_deb_enter(LBS_DEB_MAIN);
-
lbs_free_cmd_buffer(priv);
kfifo_free(&priv->event_fifo);
del_timer(&priv->command_timer);
del_timer(&priv->tx_lockup_timer);
del_timer(&priv->auto_deepsleep_timer);
-
- lbs_deb_leave(LBS_DEB_MAIN);
}
static const struct net_device_ops lbs_netdev_ops = {
@@ -962,8 +911,6 @@
struct wireless_dev *wdev;
struct lbs_private *priv = NULL;
- lbs_deb_enter(LBS_DEB_MAIN);
-
/* Allocate an Ethernet device and register it */
wdev = lbs_cfg_alloc(dmdev);
if (IS_ERR(wdev)) {
@@ -1031,7 +978,6 @@
priv = NULL;
done:
- lbs_deb_leave_args(LBS_DEB_MAIN, "priv %p", priv);
return priv;
}
EXPORT_SYMBOL_GPL(lbs_add_card);
@@ -1041,8 +987,6 @@
{
struct net_device *dev = priv->dev;
- lbs_deb_enter(LBS_DEB_MAIN);
-
lbs_remove_mesh(priv);
if (priv->wiphy_registered)
@@ -1083,8 +1027,6 @@
lbs_free_adapter(priv);
lbs_cfg_free(priv);
free_netdev(dev);
-
- lbs_deb_leave(LBS_DEB_MAIN);
}
EXPORT_SYMBOL_GPL(lbs_remove_card);
@@ -1105,8 +1047,6 @@
struct net_device *dev = priv->dev;
int ret = -1;
- lbs_deb_enter(LBS_DEB_MAIN);
-
/* poke the firmware */
ret = lbs_setup_firmware(priv);
if (ret)
@@ -1133,7 +1073,6 @@
ret = 0;
done:
- lbs_deb_leave_args(LBS_DEB_MAIN, "ret %d", ret);
return ret;
}
EXPORT_SYMBOL_GPL(lbs_start_card);
@@ -1143,16 +1082,14 @@
{
struct net_device *dev;
- lbs_deb_enter(LBS_DEB_MAIN);
-
if (!priv)
- goto out;
+ return;
dev = priv->dev;
/* If the netdev isn't registered, it means that lbs_start_card() was
* never called so we have nothing to do here. */
if (dev->reg_state != NETREG_REGISTERED)
- goto out;
+ return;
netif_stop_queue(dev);
netif_carrier_off(dev);
@@ -1160,9 +1097,6 @@
lbs_debugfs_remove_one(priv);
lbs_deinit_mesh(priv);
unregister_netdev(dev);
-
-out:
- lbs_deb_leave(LBS_DEB_MAIN);
}
EXPORT_SYMBOL_GPL(lbs_stop_card);
@@ -1171,7 +1105,6 @@
{
unsigned long flags;
- lbs_deb_enter(LBS_DEB_THREAD);
spin_lock_irqsave(&priv->driver_lock, flags);
if (priv->psstate == PS_STATE_SLEEP)
@@ -1182,14 +1115,11 @@
wake_up(&priv->waitq);
spin_unlock_irqrestore(&priv->driver_lock, flags);
- lbs_deb_leave(LBS_DEB_THREAD);
}
EXPORT_SYMBOL_GPL(lbs_queue_event);
void lbs_notify_command_response(struct lbs_private *priv, u8 resp_idx)
{
- lbs_deb_enter(LBS_DEB_THREAD);
-
if (priv->psstate == PS_STATE_SLEEP)
priv->psstate = PS_STATE_AWAKE;
@@ -1198,28 +1128,23 @@
priv->resp_idx = resp_idx;
wake_up(&priv->waitq);
-
- lbs_deb_leave(LBS_DEB_THREAD);
}
EXPORT_SYMBOL_GPL(lbs_notify_command_response);
static int __init lbs_init_module(void)
{
- lbs_deb_enter(LBS_DEB_MAIN);
memset(&confirm_sleep, 0, sizeof(confirm_sleep));
confirm_sleep.hdr.command = cpu_to_le16(CMD_802_11_PS_MODE);
confirm_sleep.hdr.size = cpu_to_le16(sizeof(confirm_sleep));
confirm_sleep.action = cpu_to_le16(PS_MODE_ACTION_SLEEP_CONFIRMED);
lbs_debugfs_init();
- lbs_deb_leave(LBS_DEB_MAIN);
+
return 0;
}
static void __exit lbs_exit_module(void)
{
- lbs_deb_enter(LBS_DEB_MAIN);
lbs_debugfs_remove();
- lbs_deb_leave(LBS_DEB_MAIN);
}
module_init(lbs_init_module);
diff --git a/drivers/net/wireless/marvell/libertas/mesh.c b/drivers/net/wireless/marvell/libertas/mesh.c
index d0c881d..eeeb892 100644
--- a/drivers/net/wireless/marvell/libertas/mesh.c
+++ b/drivers/net/wireless/marvell/libertas/mesh.c
@@ -26,8 +26,6 @@
{
int ret;
- lbs_deb_enter_args(LBS_DEB_CMD, "action %d", cmd_action);
-
cmd->hdr.command = cpu_to_le16(CMD_MESH_ACCESS);
cmd->hdr.size = cpu_to_le16(sizeof(*cmd));
cmd->hdr.result = 0;
@@ -36,7 +34,6 @@
ret = lbs_cmd_with_response(priv, CMD_MESH_ACCESS, cmd);
- lbs_deb_leave(LBS_DEB_CMD);
return ret;
}
@@ -47,8 +44,6 @@
int ret;
u16 command = CMD_MESH_CONFIG_OLD;
- lbs_deb_enter(LBS_DEB_CMD);
-
/*
* Command id is 0xac for v10 FW along with mesh interface
* id in bits 14-13-12.
@@ -66,7 +61,6 @@
ret = lbs_cmd_with_response(priv, command, cmd);
- lbs_deb_leave(LBS_DEB_CMD);
return ret;
}
@@ -823,8 +817,6 @@
{
int ret = 0;
- lbs_deb_enter(LBS_DEB_MESH);
-
/* Determine mesh_fw_ver from fwrelease and fwcapinfo */
/* 5.0.16p0 9.0.0.p0 is known to NOT support any mesh */
/* 5.110.22 have mesh command with 0xa3 command id */
@@ -870,7 +862,6 @@
ret = 1;
}
- lbs_deb_leave_args(LBS_DEB_MESH, "ret %d", ret);
return ret;
}
@@ -887,14 +878,11 @@
struct net_device *dev = priv->dev;
int ret = 0;
- lbs_deb_enter(LBS_DEB_MESH);
-
if (priv->mesh_tlv) {
device_remove_file(&dev->dev, &dev_attr_lbs_mesh);
ret = 1;
}
- lbs_deb_leave_args(LBS_DEB_MESH, "ret %d", ret);
return ret;
}
@@ -909,7 +897,6 @@
{
struct lbs_private *priv = dev->ml_priv;
- lbs_deb_enter(LBS_DEB_MESH);
lbs_mesh_config(priv, CMD_ACT_MESH_CONFIG_STOP,
lbs_mesh_get_channel(priv));
@@ -924,7 +911,6 @@
if (!lbs_iface_active(priv))
lbs_stop_iface(priv);
- lbs_deb_leave(LBS_DEB_MESH);
return 0;
}
@@ -939,7 +925,6 @@
struct lbs_private *priv = dev->ml_priv;
int ret = 0;
- lbs_deb_enter(LBS_DEB_NET);
if (!priv->iface_running) {
ret = lbs_start_iface(priv);
if (ret)
@@ -965,7 +950,6 @@
lbs_mesh_get_channel(priv));
out:
- lbs_deb_leave_args(LBS_DEB_NET, "ret %d", ret);
return ret;
}
@@ -989,8 +973,6 @@
struct wireless_dev *mesh_wdev;
int ret = 0;
- lbs_deb_enter(LBS_DEB_MESH);
-
/* Allocate a virtual mesh device */
mesh_wdev = kzalloc(sizeof(struct wireless_dev), GFP_KERNEL);
if (!mesh_wdev) {
@@ -1048,7 +1030,6 @@
kfree(mesh_wdev);
done:
- lbs_deb_leave_args(LBS_DEB_MESH, "ret %d", ret);
return ret;
}
@@ -1060,7 +1041,6 @@
if (!mesh_dev)
return;
- lbs_deb_enter(LBS_DEB_MESH);
netif_stop_queue(mesh_dev);
netif_carrier_off(mesh_dev);
sysfs_remove_group(&(mesh_dev->dev.kobj), &lbs_mesh_attr_group);
@@ -1069,7 +1049,6 @@
priv->mesh_dev = NULL;
kfree(mesh_dev->ieee80211_ptr);
free_netdev(mesh_dev);
- lbs_deb_leave(LBS_DEB_MESH);
}
@@ -1108,15 +1087,15 @@
* Ethtool related
*/
-static const char * const mesh_stat_strings[] = {
- "drop_duplicate_bcast",
- "drop_ttl_zero",
- "drop_no_fwd_route",
- "drop_no_buffers",
- "fwded_unicast_cnt",
- "fwded_bcast_cnt",
- "drop_blind_table",
- "tx_failed_cnt"
+static const char mesh_stat_strings[MESH_STATS_NUM][ETH_GSTRING_LEN] = {
+ "drop_duplicate_bcast",
+ "drop_ttl_zero",
+ "drop_no_fwd_route",
+ "drop_no_buffers",
+ "fwded_unicast_cnt",
+ "fwded_bcast_cnt",
+ "drop_blind_table",
+ "tx_failed_cnt"
};
void lbs_mesh_ethtool_get_stats(struct net_device *dev,
@@ -1126,8 +1105,6 @@
struct cmd_ds_mesh_access mesh_access;
int ret;
- lbs_deb_enter(LBS_DEB_ETHTOOL);
-
/* Get Mesh Statistics */
ret = lbs_mesh_access(priv, CMD_ACT_MESH_GET_STATS, &mesh_access);
@@ -1153,8 +1130,6 @@
data[5] = priv->mstats.fwd_bcast_cnt;
data[6] = priv->mstats.drop_blind;
data[7] = priv->mstats.tx_failed_cnt;
-
- lbs_deb_enter(LBS_DEB_ETHTOOL);
}
int lbs_mesh_ethtool_get_sset_count(struct net_device *dev, int sset)
@@ -1170,18 +1145,9 @@
void lbs_mesh_ethtool_get_strings(struct net_device *dev,
uint32_t stringset, uint8_t *s)
{
- int i;
-
- lbs_deb_enter(LBS_DEB_ETHTOOL);
-
switch (stringset) {
case ETH_SS_STATS:
- for (i = 0; i < MESH_STATS_NUM; i++) {
- memcpy(s + i * ETH_GSTRING_LEN,
- mesh_stat_strings[i],
- ETH_GSTRING_LEN);
- }
+ memcpy(s, mesh_stat_strings, sizeof(mesh_stat_strings));
break;
}
- lbs_deb_enter(LBS_DEB_ETHTOOL);
}
diff --git a/drivers/net/wireless/marvell/libertas/rx.c b/drivers/net/wireless/marvell/libertas/rx.c
index e446fed..7586ff6 100644
--- a/drivers/net/wireless/marvell/libertas/rx.c
+++ b/drivers/net/wireless/marvell/libertas/rx.c
@@ -65,8 +65,6 @@
0xaa, 0xaa, 0x03, 0x00, 0x00, 0x00
};
- lbs_deb_enter(LBS_DEB_RX);
-
BUG_ON(!skb);
skb->ip_summed = CHECKSUM_NONE;
@@ -158,7 +156,6 @@
ret = 0;
done:
- lbs_deb_leave_args(LBS_DEB_RX, "ret %d", ret);
return ret;
}
EXPORT_SYMBOL_GPL(lbs_process_rxed_packet);
@@ -221,8 +218,6 @@
struct rx_radiotap_hdr radiotap_hdr;
struct rx_radiotap_hdr *pradiotap_hdr;
- lbs_deb_enter(LBS_DEB_RX);
-
p_rx_pkt = (struct rx80211packethdr *) skb->data;
prxpd = &p_rx_pkt->rx_pd;
@@ -262,7 +257,7 @@
goto done;
}
- pradiotap_hdr = (void *)skb_push(skb, sizeof(struct rx_radiotap_hdr));
+ pradiotap_hdr = skb_push(skb, sizeof(struct rx_radiotap_hdr));
memcpy(pradiotap_hdr, &radiotap_hdr, sizeof(struct rx_radiotap_hdr));
priv->cur_rate = lbs_fw_index_to_data_rate(prxpd->rx_rate);
@@ -281,6 +276,5 @@
ret = 0;
done:
- lbs_deb_leave_args(LBS_DEB_RX, "ret %d", ret);
return ret;
}
diff --git a/drivers/net/wireless/marvell/libertas/tx.c b/drivers/net/wireless/marvell/libertas/tx.c
index c025f9c..723ba5f 100644
--- a/drivers/net/wireless/marvell/libertas/tx.c
+++ b/drivers/net/wireless/marvell/libertas/tx.c
@@ -70,8 +70,6 @@
uint16_t pkt_len;
netdev_tx_t ret = NETDEV_TX_OK;
- lbs_deb_enter(LBS_DEB_TX);
-
/* We need to protect against the queues being restarted before
we get round to stopping them */
spin_lock_irqsave(&priv->driver_lock, flags);
@@ -166,7 +164,6 @@
spin_unlock_irqrestore(&priv->driver_lock, flags);
wake_up(&priv->waitq);
- lbs_deb_leave_args(LBS_DEB_TX, "ret %d", ret);
return ret;
}
diff --git a/drivers/net/wireless/marvell/libertas_tf/main.c b/drivers/net/wireless/marvell/libertas_tf/main.c
index d803331..81228bf 100644
--- a/drivers/net/wireless/marvell/libertas_tf/main.c
+++ b/drivers/net/wireless/marvell/libertas_tf/main.c
@@ -260,7 +260,7 @@
len = skb->len;
info = IEEE80211_SKB_CB(skb);
- txpd = (struct txpd *) skb_push(skb, sizeof(struct txpd));
+ txpd = skb_push(skb, sizeof(struct txpd));
if (priv->surpriseremoved) {
dev_kfree_skb_any(skb);
diff --git a/drivers/net/wireless/marvell/mwifiex/11h.c b/drivers/net/wireless/marvell/mwifiex/11h.c
index 366eb49..238accf 100644
--- a/drivers/net/wireless/marvell/mwifiex/11h.c
+++ b/drivers/net/wireless/marvell/mwifiex/11h.c
@@ -128,9 +128,6 @@
container_of(delayed_work, struct mwifiex_private,
dfs_cac_work);
- if (WARN_ON(!priv))
- return;
-
chandef = priv->dfs_chandef;
if (priv->wdev.cac_started) {
mwifiex_dbg(priv->adapter, MSG,
@@ -289,9 +286,6 @@
container_of(delayed_work, struct mwifiex_private,
dfs_chan_sw_work);
- if (WARN_ON(!priv))
- return;
-
bss_cfg = &priv->bss_cfg;
if (!bss_cfg->beacon_period) {
mwifiex_dbg(priv->adapter, ERROR,
diff --git a/drivers/net/wireless/marvell/mwifiex/11n.c b/drivers/net/wireless/marvell/mwifiex/11n.c
index c174e79..16c77c2 100644
--- a/drivers/net/wireless/marvell/mwifiex/11n.c
+++ b/drivers/net/wireless/marvell/mwifiex/11n.c
@@ -653,11 +653,13 @@
void mwifiex_11n_delba(struct mwifiex_private *priv, int tid)
{
struct mwifiex_rx_reorder_tbl *rx_reor_tbl_ptr;
+ unsigned long flags;
+ spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags);
if (list_empty(&priv->rx_reorder_tbl_ptr)) {
dev_dbg(priv->adapter->dev,
"mwifiex_11n_delba: rx_reorder_tbl_ptr empty\n");
- return;
+ goto exit;
}
list_for_each_entry(rx_reor_tbl_ptr, &priv->rx_reorder_tbl_ptr, list) {
@@ -666,9 +668,11 @@
"Send delba to tid=%d, %pM\n",
tid, rx_reor_tbl_ptr->ta);
mwifiex_send_delba(priv, tid, rx_reor_tbl_ptr->ta, 0);
- return;
+ goto exit;
}
}
+exit:
+ spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags);
}
/*
@@ -764,14 +768,9 @@
return;
spin_lock_irqsave(&priv->tx_ba_stream_tbl_lock, flags);
- list_for_each_entry_safe(tbl, tmp, &priv->tx_ba_stream_tbl_ptr, list) {
- if (!memcmp(tbl->ra, ra, ETH_ALEN)) {
- spin_unlock_irqrestore(&priv->tx_ba_stream_tbl_lock,
- flags);
+ list_for_each_entry_safe(tbl, tmp, &priv->tx_ba_stream_tbl_ptr, list)
+ if (!memcmp(tbl->ra, ra, ETH_ALEN))
mwifiex_11n_delete_tx_ba_stream_tbl_entry(priv, tbl);
- spin_lock_irqsave(&priv->tx_ba_stream_tbl_lock, flags);
- }
- }
spin_unlock_irqrestore(&priv->tx_ba_stream_tbl_lock, flags);
return;
diff --git a/drivers/net/wireless/marvell/mwifiex/11n_aggr.c b/drivers/net/wireless/marvell/mwifiex/11n_aggr.c
index a75013a..042a1d0 100644
--- a/drivers/net/wireless/marvell/mwifiex/11n_aggr.c
+++ b/drivers/net/wireless/marvell/mwifiex/11n_aggr.c
@@ -62,7 +62,7 @@
};
struct tx_packet_hdr *tx_header;
- tx_header = (void *)skb_put(skb_aggr, sizeof(*tx_header));
+ tx_header = skb_put(skb_aggr, sizeof(*tx_header));
/* Copy DA and SA */
dt_offset = 2 * ETH_ALEN;
@@ -81,7 +81,7 @@
tx_header->eth803_hdr.h_proto = htons(skb_src->len + LLC_SNAP_LEN);
/* Add payload */
- memcpy(skb_put(skb_aggr, skb_src->len), skb_src->data, skb_src->len);
+ skb_put_data(skb_aggr, skb_src->data, skb_src->len);
/* Add padding for new MSDU to start from 4 byte boundary */
*pad = (4 - ((unsigned long)skb_aggr->tail & 0x3)) % 4;
@@ -164,7 +164,7 @@
int pad = 0, aggr_num = 0, ret;
struct mwifiex_tx_param tx_param;
struct txpd *ptx_pd = NULL;
- int headroom = adapter->iface_type == MWIFIEX_USB ? 0 : INTF_HEADER_LEN;
+ int headroom = adapter->intf_hdr_len;
skb_src = skb_peek(&pra_list->skb_head);
if (!skb_src) {
@@ -250,15 +250,15 @@
return 0;
}
+ if (skb_src)
+ tx_param.next_pkt_len = skb_src->len + sizeof(struct txpd);
+ else
+ tx_param.next_pkt_len = 0;
+
if (adapter->iface_type == MWIFIEX_USB) {
ret = adapter->if_ops.host_to_card(adapter, priv->usb_port,
- skb_aggr, NULL);
+ skb_aggr, &tx_param);
} else {
- if (skb_src)
- tx_param.next_pkt_len =
- skb_src->len + sizeof(struct txpd);
- else
- tx_param.next_pkt_len = 0;
ret = adapter->if_ops.host_to_card(adapter, MWIFIEX_TYPE_DATA,
skb_aggr, &tx_param);
diff --git a/drivers/net/wireless/marvell/mwifiex/cfg80211.c b/drivers/net/wireless/marvell/mwifiex/cfg80211.c
index 7ec06bf..c20e494 100644
--- a/drivers/net/wireless/marvell/mwifiex/cfg80211.c
+++ b/drivers/net/wireless/marvell/mwifiex/cfg80211.c
@@ -176,12 +176,10 @@
memcpy(skb_push(skb, sizeof(pkt_type)), &pkt_type, sizeof(pkt_type));
/* Add packet data and address4 */
- memcpy(skb_put(skb, sizeof(struct ieee80211_hdr_3addr)), buf,
- sizeof(struct ieee80211_hdr_3addr));
- memcpy(skb_put(skb, ETH_ALEN), addr, ETH_ALEN);
- memcpy(skb_put(skb, len - sizeof(struct ieee80211_hdr_3addr)),
- buf + sizeof(struct ieee80211_hdr_3addr),
- len - sizeof(struct ieee80211_hdr_3addr));
+ skb_put_data(skb, buf, sizeof(struct ieee80211_hdr_3addr));
+ skb_put_data(skb, addr, ETH_ALEN);
+ skb_put_data(skb, buf + sizeof(struct ieee80211_hdr_3addr),
+ len - sizeof(struct ieee80211_hdr_3addr));
skb->priority = LOW_PRIO_TID;
__net_timestamp(skb);
@@ -2964,10 +2962,8 @@
if (!dev) {
mwifiex_dbg(adapter, ERROR,
"no memory available for netdevice\n");
- memset(&priv->wdev, 0, sizeof(priv->wdev));
- priv->wdev.iftype = NL80211_IFTYPE_UNSPECIFIED;
- priv->bss_mode = NL80211_IFTYPE_UNSPECIFIED;
- return ERR_PTR(-ENOMEM);
+ ret = -ENOMEM;
+ goto err_alloc_netdev;
}
mwifiex_init_priv_params(priv, dev);
@@ -2976,11 +2972,11 @@
ret = mwifiex_send_cmd(priv, HostCmd_CMD_SET_BSS_MODE,
HostCmd_ACT_GEN_SET, 0, NULL, true);
if (ret)
- return ERR_PTR(ret);
+ goto err_set_bss_mode;
ret = mwifiex_sta_init_cmd(priv, false, false);
if (ret)
- return ERR_PTR(ret);
+ goto err_sta_init;
mwifiex_setup_ht_caps(&wiphy->bands[NL80211_BAND_2GHZ]->ht_cap, priv);
if (adapter->is_hw_11ac_capable)
@@ -3011,31 +3007,14 @@
SET_NETDEV_DEV(dev, adapter->dev);
- /* Register network device */
- if (register_netdevice(dev)) {
- mwifiex_dbg(adapter, ERROR,
- "cannot register virtual network device\n");
- free_netdev(dev);
- priv->bss_mode = NL80211_IFTYPE_UNSPECIFIED;
- priv->netdev = NULL;
- memset(&priv->wdev, 0, sizeof(priv->wdev));
- priv->wdev.iftype = NL80211_IFTYPE_UNSPECIFIED;
- return ERR_PTR(-EFAULT);
- }
-
priv->dfs_cac_workqueue = alloc_workqueue("MWIFIEX_DFS_CAC%s",
WQ_HIGHPRI |
WQ_MEM_RECLAIM |
WQ_UNBOUND, 1, name);
if (!priv->dfs_cac_workqueue) {
- mwifiex_dbg(adapter, ERROR,
- "cannot register virtual network device\n");
- free_netdev(dev);
- priv->bss_mode = NL80211_IFTYPE_UNSPECIFIED;
- priv->netdev = NULL;
- memset(&priv->wdev, 0, sizeof(priv->wdev));
- priv->wdev.iftype = NL80211_IFTYPE_UNSPECIFIED;
- return ERR_PTR(-ENOMEM);
+ mwifiex_dbg(adapter, ERROR, "cannot alloc DFS CAC queue\n");
+ ret = -ENOMEM;
+ goto err_alloc_cac;
}
INIT_DELAYED_WORK(&priv->dfs_cac_work, mwifiex_dfs_cac_work_queue);
@@ -3044,16 +3023,9 @@
WQ_HIGHPRI | WQ_UNBOUND |
WQ_MEM_RECLAIM, 1, name);
if (!priv->dfs_chan_sw_workqueue) {
- mwifiex_dbg(adapter, ERROR,
- "cannot register virtual network device\n");
- free_netdev(dev);
- priv->bss_mode = NL80211_IFTYPE_UNSPECIFIED;
- priv->netdev = NULL;
- memset(&priv->wdev, 0, sizeof(priv->wdev));
- priv->wdev.iftype = NL80211_IFTYPE_UNSPECIFIED;
- destroy_workqueue(priv->dfs_cac_workqueue);
- priv->dfs_cac_workqueue = NULL;
- return ERR_PTR(-ENOMEM);
+ mwifiex_dbg(adapter, ERROR, "cannot alloc DFS channel sw queue\n");
+ ret = -ENOMEM;
+ goto err_alloc_chsw;
}
INIT_DELAYED_WORK(&priv->dfs_chan_sw_work,
@@ -3061,6 +3033,13 @@
sema_init(&priv->async_sem, 1);
+ /* Register network device */
+ if (register_netdevice(dev)) {
+ mwifiex_dbg(adapter, ERROR, "cannot register network device\n");
+ ret = -EFAULT;
+ goto err_reg_netdev;
+ }
+
mwifiex_dbg(adapter, INFO,
"info: %s: Marvell 802.11 Adapter\n", dev->name);
@@ -3081,11 +3060,29 @@
adapter->curr_iface_comb.p2p_intf++;
break;
default:
+ /* This should be dead code; checked above */
mwifiex_dbg(adapter, ERROR, "type not supported\n");
return ERR_PTR(-EINVAL);
}
return &priv->wdev;
+
+err_reg_netdev:
+ destroy_workqueue(priv->dfs_chan_sw_workqueue);
+ priv->dfs_chan_sw_workqueue = NULL;
+err_alloc_chsw:
+ destroy_workqueue(priv->dfs_cac_workqueue);
+ priv->dfs_cac_workqueue = NULL;
+err_alloc_cac:
+ free_netdev(dev);
+ priv->netdev = NULL;
+err_sta_init:
+err_set_bss_mode:
+err_alloc_netdev:
+ memset(&priv->wdev, 0, sizeof(priv->wdev));
+ priv->wdev.iftype = NL80211_IFTYPE_UNSPECIFIED;
+ priv->bss_mode = NL80211_IFTYPE_UNSPECIFIED;
+ return ERR_PTR(ret);
}
EXPORT_SYMBOL_GPL(mwifiex_add_virtual_intf);
diff --git a/drivers/net/wireless/marvell/mwifiex/cmdevt.c b/drivers/net/wireless/marvell/mwifiex/cmdevt.c
index 0c3b217..8dad528 100644
--- a/drivers/net/wireless/marvell/mwifiex/cmdevt.c
+++ b/drivers/net/wireless/marvell/mwifiex/cmdevt.c
@@ -258,10 +258,10 @@
if (ret == -EBUSY)
cmd_node->cmd_skb = NULL;
} else {
- skb_push(cmd_node->cmd_skb, INTF_HEADER_LEN);
+ skb_push(cmd_node->cmd_skb, adapter->intf_hdr_len);
ret = adapter->if_ops.host_to_card(adapter, MWIFIEX_TYPE_CMD,
cmd_node->cmd_skb, NULL);
- skb_pull(cmd_node->cmd_skb, INTF_HEADER_LEN);
+ skb_pull(cmd_node->cmd_skb, adapter->intf_hdr_len);
}
if (ret == -1) {
@@ -351,10 +351,10 @@
if (ret != -EBUSY)
dev_kfree_skb_any(sleep_cfm_tmp);
} else {
- skb_push(adapter->sleep_cfm, INTF_HEADER_LEN);
+ skb_push(adapter->sleep_cfm, adapter->intf_hdr_len);
ret = adapter->if_ops.host_to_card(adapter, MWIFIEX_TYPE_CMD,
adapter->sleep_cfm, NULL);
- skb_pull(adapter->sleep_cfm, INTF_HEADER_LEN);
+ skb_pull(adapter->sleep_cfm, adapter->intf_hdr_len);
}
if (ret == -1) {
@@ -622,8 +622,7 @@
return -1;
}
- memset(skb_put(cmd_node->cmd_skb, sizeof(struct host_cmd_ds_command)),
- 0, sizeof(struct host_cmd_ds_command));
+ skb_put_zero(cmd_node->cmd_skb, sizeof(struct host_cmd_ds_command));
cmd_ptr = (struct host_cmd_ds_command *) (cmd_node->cmd_skb->data);
cmd_ptr->command = cpu_to_le16(cmd_no);
@@ -761,8 +760,6 @@
}
cmd_node = list_first_entry(&adapter->cmd_pending_q,
struct cmd_ctrl_node, list);
- spin_unlock_irqrestore(&adapter->cmd_pending_q_lock,
- cmd_pending_q_flags);
host_cmd = (struct host_cmd_ds_command *) (cmd_node->cmd_skb->data);
priv = cmd_node->priv;
@@ -771,11 +768,12 @@
mwifiex_dbg(adapter, ERROR,
"%s: cannot send cmd in sleep state,\t"
"this should not happen\n", __func__);
+ spin_unlock_irqrestore(&adapter->cmd_pending_q_lock,
+ cmd_pending_q_flags);
spin_unlock_irqrestore(&adapter->mwifiex_cmd_lock, cmd_flags);
return ret;
}
- spin_lock_irqsave(&adapter->cmd_pending_q_lock, cmd_pending_q_flags);
list_del(&cmd_node->list);
spin_unlock_irqrestore(&adapter->cmd_pending_q_lock,
cmd_pending_q_flags);
@@ -1056,12 +1054,10 @@
list_for_each_entry_safe(cmd_node, tmp_node,
&adapter->cmd_pending_q, list) {
list_del(&cmd_node->list);
- spin_unlock_irqrestore(&adapter->cmd_pending_q_lock, flags);
if (cmd_node->wait_q_enabled)
adapter->cmd_wait_q.status = -1;
mwifiex_recycle_cmd_node(adapter, cmd_node);
- spin_lock_irqsave(&adapter->cmd_pending_q_lock, flags);
}
spin_unlock_irqrestore(&adapter->cmd_pending_q_lock, flags);
spin_unlock_irqrestore(&adapter->mwifiex_cmd_lock, cmd_flags);
diff --git a/drivers/net/wireless/marvell/mwifiex/fw.h b/drivers/net/wireless/marvell/mwifiex/fw.h
index 6cf9ab9..b4d915b 100644
--- a/drivers/net/wireless/marvell/mwifiex/fw.h
+++ b/drivers/net/wireless/marvell/mwifiex/fw.h
@@ -405,6 +405,7 @@
#define HostCmd_CMD_TDLS_OPER 0x0122
#define HostCmd_CMD_SDIO_SP_RX_AGGR_CFG 0x0223
#define HostCmd_CMD_CHAN_REGION_CFG 0x0242
+#define HostCmd_CMD_PACKET_AGGR_CTRL 0x0251
#define PROTOCOL_NO_SECURITY 0x01
#define PROTOCOL_STATIC_WEP 0x02
@@ -2268,6 +2269,14 @@
__le16 action;
} __packed;
+struct host_cmd_ds_pkt_aggr_ctrl {
+ __le16 action;
+ __le16 enable;
+ __le16 tx_aggr_max_size;
+ __le16 tx_aggr_max_num;
+ __le16 tx_aggr_align;
+} __packed;
+
struct host_cmd_ds_command {
__le16 command;
__le16 size;
@@ -2343,6 +2352,7 @@
struct host_cmd_ds_wakeup_reason hs_wakeup_reason;
struct host_cmd_ds_gtk_rekey_params rekey;
struct host_cmd_ds_chan_region_cfg reg_cfg;
+ struct host_cmd_ds_pkt_aggr_ctrl pkt_aggr_ctrl;
} params;
} __packed;
diff --git a/drivers/net/wireless/marvell/mwifiex/init.c b/drivers/net/wireless/marvell/mwifiex/init.c
index 7569483..3ecb59f 100644
--- a/drivers/net/wireless/marvell/mwifiex/init.c
+++ b/drivers/net/wireless/marvell/mwifiex/init.c
@@ -217,6 +217,11 @@
else
adapter->data_sent = false;
+ if (adapter->iface_type == MWIFIEX_USB)
+ adapter->intf_hdr_len = 0;
+ else
+ adapter->intf_hdr_len = INTF_HEADER_LEN;
+
adapter->cmd_resp_received = false;
adapter->event_received = false;
adapter->data_received = false;
@@ -409,11 +414,6 @@
static void
mwifiex_adapter_cleanup(struct mwifiex_adapter *adapter)
{
- if (!adapter) {
- pr_err("%s: adapter is NULL\n", __func__);
- return;
- }
-
del_timer(&adapter->wakeup_timer);
mwifiex_cancel_all_pending_cmd(adapter);
wake_up_interruptible(&adapter->cmd_wait_q.wait);
@@ -439,7 +439,6 @@
struct mwifiex_private *priv;
s32 i, j;
- spin_lock_init(&adapter->mwifiex_lock);
spin_lock_init(&adapter->int_lock);
spin_lock_init(&adapter->main_proc_lock);
spin_lock_init(&adapter->mwifiex_cmd_lock);
@@ -670,8 +669,7 @@
mwifiex_clean_auto_tdls(priv);
mwifiex_abort_cac(priv);
- mwifiex_clean_txrx(priv);
- mwifiex_delete_bss_prio_tbl(priv);
+ mwifiex_free_priv(priv);
}
}
@@ -694,11 +692,8 @@
spin_unlock_irqrestore(&adapter->rx_proc_lock, flags);
- spin_lock(&adapter->mwifiex_lock);
-
mwifiex_adapter_cleanup(adapter);
- spin_unlock(&adapter->mwifiex_lock);
adapter->hw_status = MWIFIEX_HW_STATUS_NOT_READY;
}
diff --git a/drivers/net/wireless/marvell/mwifiex/main.c b/drivers/net/wireless/marvell/mwifiex/main.c
index 39b6b5e..f2600b8 100644
--- a/drivers/net/wireless/marvell/mwifiex/main.c
+++ b/drivers/net/wireless/marvell/mwifiex/main.c
@@ -44,6 +44,10 @@
module_param(mfg_mode, bool, 0);
MODULE_PARM_DESC(mfg_mode, "manufacturing mode enable:1, disable:0");
+bool aggr_ctrl;
+module_param(aggr_ctrl, bool, 0000);
+MODULE_PARM_DESC(aggr_ctrl, "usb tx aggreataon enable:1, disable:0");
+
/*
* This function registers the device and performs all the necessary
* initializations.
diff --git a/drivers/net/wireless/marvell/mwifiex/main.h b/drivers/net/wireless/marvell/mwifiex/main.h
index bb2a467..c37fb26 100644
--- a/drivers/net/wireless/marvell/mwifiex/main.h
+++ b/drivers/net/wireless/marvell/mwifiex/main.h
@@ -60,6 +60,7 @@
extern const char driver_version[];
extern bool mfg_mode;
+extern bool aggr_ctrl;
struct mwifiex_adapter;
struct mwifiex_private;
@@ -798,6 +799,18 @@
u8 do_setup;
};
+#define MWIFIEX_TYPE_AGGR_DATA_V2 11
+#define MWIFIEX_BUS_AGGR_MODE_LEN_V2 (2)
+#define MWIFIEX_BUS_AGGR_MAX_LEN 16000
+#define MWIFIEX_BUS_AGGR_MAX_NUM 10
+struct bus_aggr_params {
+ u16 enable;
+ u16 mode;
+ u16 tx_aggr_max_size;
+ u16 tx_aggr_max_num;
+ u16 tx_aggr_align;
+};
+
struct mwifiex_if_ops {
int (*init_if) (struct mwifiex_adapter *);
void (*cleanup_if) (struct mwifiex_adapter *);
@@ -849,6 +862,7 @@
u8 perm_addr[ETH_ALEN];
bool surprise_removed;
u32 fw_release_number;
+ u8 intf_hdr_len;
u16 init_wait_q_woken;
wait_queue_head_t init_wait_q;
void *card;
@@ -870,8 +884,6 @@
bool rx_locked;
bool main_locked;
struct mwifiex_bss_prio_tbl bss_prio_tbl[MWIFIEX_MAX_BSS_NUM];
- /* spin lock for init/shutdown */
- spinlock_t mwifiex_lock;
/* spin lock for main process */
spinlock_t main_proc_lock;
u32 mwifiex_processing;
@@ -1017,6 +1029,8 @@
/* Wake-on-WLAN (WoWLAN) */
int irq_wakeup;
bool wake_by_wifi;
+ /* Aggregation parameters*/
+ struct bus_aggr_params bus_aggr;
};
void mwifiex_process_tx_queue(struct mwifiex_adapter *adapter);
@@ -1235,7 +1249,8 @@
* Currently we assume if we are in Infra, then DA=RA. This might not be
* true in the future
*/
- if ((priv->bss_mode == NL80211_IFTYPE_STATION) &&
+ if ((priv->bss_mode == NL80211_IFTYPE_STATION ||
+ priv->bss_mode == NL80211_IFTYPE_P2P_CLIENT) &&
(GET_BSS_ROLE(priv) == MWIFIEX_BSS_ROLE_STA))
return false;
diff --git a/drivers/net/wireless/marvell/mwifiex/pcie.c b/drivers/net/wireless/marvell/mwifiex/pcie.c
index ac62bce..b53ecf1 100644
--- a/drivers/net/wireless/marvell/mwifiex/pcie.c
+++ b/drivers/net/wireless/marvell/mwifiex/pcie.c
@@ -370,7 +370,6 @@
* PCIe and HW.
*/
mwifiex_shutdown_sw(adapter);
- adapter->surprise_removed = true;
clear_bit(MWIFIEX_IFACE_WORK_DEVICE_DUMP, &card->work_flags);
clear_bit(MWIFIEX_IFACE_WORK_CARD_RESET, &card->work_flags);
} else {
@@ -378,7 +377,6 @@
* after performing FLR respectively. Reconfigure the software
* and firmware including firmware redownload
*/
- adapter->surprise_removed = false;
ret = mwifiex_reinit_sw(adapter);
if (ret) {
dev_err(&pdev->dev, "reinit failed: %d\n", ret);
@@ -1391,7 +1389,7 @@
* first 2 bytes for len, next 2 bytes is for type
*/
rx_len = get_unaligned_le16(skb_data->data);
- if (WARN_ON(rx_len <= INTF_HEADER_LEN ||
+ if (WARN_ON(rx_len <= adapter->intf_hdr_len ||
rx_len > MWIFIEX_RX_DATA_BUF_SIZE)) {
mwifiex_dbg(adapter, ERROR,
"Invalid RX len %d, Rd=%#x, Wr=%#x\n",
@@ -1402,7 +1400,7 @@
mwifiex_dbg(adapter, DATA,
"info: RECV DATA: Rd=%#x, Wr=%#x, Len=%d\n",
card->rxbd_rdptr, wrptr, rx_len);
- skb_pull(skb_data, INTF_HEADER_LEN);
+ skb_pull(skb_data, adapter->intf_hdr_len);
if (adapter->rx_work_enabled) {
skb_queue_tail(&adapter->rx_data_q, skb_data);
adapter->data_received = true;
@@ -1736,7 +1734,7 @@
MWIFIEX_MAX_DELAY_COUNT);
mwifiex_unmap_pci_memory(adapter, skb,
PCI_DMA_FROMDEVICE);
- skb_pull(skb, INTF_HEADER_LEN);
+ skb_pull(skb, adapter->intf_hdr_len);
while (reg->sleep_cookie && (count++ < 10) &&
mwifiex_pcie_ok_to_access_hw(adapter))
usleep_range(50, 60);
@@ -1749,12 +1747,12 @@
}
memcpy(adapter->upld_buf, skb->data,
min_t(u32, MWIFIEX_SIZE_OF_CMD_BUFFER, skb->len));
- skb_push(skb, INTF_HEADER_LEN);
+ skb_push(skb, adapter->intf_hdr_len);
if (mwifiex_map_pci_memory(adapter, skb, MWIFIEX_UPLD_SIZE,
PCI_DMA_FROMDEVICE))
return -1;
} else if (mwifiex_pcie_ok_to_access_hw(adapter)) {
- skb_pull(skb, INTF_HEADER_LEN);
+ skb_pull(skb, adapter->intf_hdr_len);
adapter->curr_cmd->resp_skb = skb;
adapter->cmd_resp_received = true;
/* Take the pointer and set it to CMD node and will
@@ -1791,7 +1789,7 @@
if (skb) {
card->cmdrsp_buf = skb;
- skb_push(card->cmdrsp_buf, INTF_HEADER_LEN);
+ skb_push(card->cmdrsp_buf, adapter->intf_hdr_len);
if (mwifiex_map_pci_memory(adapter, skb, MWIFIEX_UPLD_SIZE,
PCI_DMA_FROMDEVICE))
return -1;
@@ -1856,14 +1854,15 @@
desc = card->evtbd_ring[rdptr];
memset(desc, 0, sizeof(*desc));
- event = get_unaligned_le32(&skb_cmd->data[INTF_HEADER_LEN]);
+ event = get_unaligned_le32(
+ &skb_cmd->data[adapter->intf_hdr_len]);
adapter->event_cause = event;
/* The first 4bytes will be the event transfer header
len is 2 bytes followed by type which is 2 bytes */
memcpy(&data_len, skb_cmd->data, sizeof(__le16));
evt_len = le16_to_cpu(data_len);
skb_trim(skb_cmd, evt_len);
- skb_pull(skb_cmd, INTF_HEADER_LEN);
+ skb_pull(skb_cmd, adapter->intf_hdr_len);
mwifiex_dbg(adapter, EVENT,
"info: Event length: %d\n", evt_len);
@@ -1922,7 +1921,7 @@
}
if (!card->evt_buf_list[rdptr]) {
- skb_push(skb, INTF_HEADER_LEN);
+ skb_push(skb, adapter->intf_hdr_len);
skb_put(skb, MAX_EVENT_SIZE - skb->len);
if (mwifiex_map_pci_memory(adapter, skb,
MAX_EVENT_SIZE,
@@ -2380,11 +2379,6 @@
struct pcie_service_card *card;
struct mwifiex_adapter *adapter;
- if (!pdev) {
- pr_err("info: %s: pdev is NULL\n", __func__);
- goto exit;
- }
-
card = pci_get_drvdata(pdev);
if (!card->adapter) {
@@ -2822,6 +2816,13 @@
mwifiex_upload_device_dump(adapter, drv_info, drv_info_size);
}
+static void mwifiex_pcie_card_reset_work(struct mwifiex_adapter *adapter)
+{
+ struct pcie_service_card *card = adapter->card;
+
+ pci_reset_function(card->dev);
+}
+
static void mwifiex_pcie_work(struct work_struct *work)
{
struct pcie_service_card *card =
@@ -2830,6 +2831,9 @@
if (test_and_clear_bit(MWIFIEX_IFACE_WORK_DEVICE_DUMP,
&card->work_flags))
mwifiex_pcie_device_dump_work(card->adapter);
+ if (test_and_clear_bit(MWIFIEX_IFACE_WORK_CARD_RESET,
+ &card->work_flags))
+ mwifiex_pcie_card_reset_work(card->adapter);
}
/* This function dumps FW information */
@@ -2837,12 +2841,72 @@
{
struct pcie_service_card *card = adapter->card;
- if (test_bit(MWIFIEX_IFACE_WORK_DEVICE_DUMP, &card->work_flags))
- return;
+ if (!test_and_set_bit(MWIFIEX_IFACE_WORK_DEVICE_DUMP,
+ &card->work_flags))
+ schedule_work(&card->work);
+}
- set_bit(MWIFIEX_IFACE_WORK_DEVICE_DUMP, &card->work_flags);
+static void mwifiex_pcie_card_reset(struct mwifiex_adapter *adapter)
+{
+ struct pcie_service_card *card = adapter->card;
- schedule_work(&card->work);
+ if (!test_and_set_bit(MWIFIEX_IFACE_WORK_CARD_RESET, &card->work_flags))
+ schedule_work(&card->work);
+}
+
+static int mwifiex_pcie_alloc_buffers(struct mwifiex_adapter *adapter)
+{
+ struct pcie_service_card *card = adapter->card;
+ const struct mwifiex_pcie_card_reg *reg = card->pcie.reg;
+ int ret;
+
+ card->cmdrsp_buf = NULL;
+ ret = mwifiex_pcie_create_txbd_ring(adapter);
+ if (ret) {
+ mwifiex_dbg(adapter, ERROR, "Failed to create txbd ring\n");
+ goto err_cre_txbd;
+ }
+
+ ret = mwifiex_pcie_create_rxbd_ring(adapter);
+ if (ret) {
+ mwifiex_dbg(adapter, ERROR, "Failed to create rxbd ring\n");
+ goto err_cre_rxbd;
+ }
+
+ ret = mwifiex_pcie_create_evtbd_ring(adapter);
+ if (ret) {
+ mwifiex_dbg(adapter, ERROR, "Failed to create evtbd ring\n");
+ goto err_cre_evtbd;
+ }
+
+ ret = mwifiex_pcie_alloc_cmdrsp_buf(adapter);
+ if (ret) {
+ mwifiex_dbg(adapter, ERROR, "Failed to allocate cmdbuf buffer\n");
+ goto err_alloc_cmdbuf;
+ }
+
+ if (reg->sleep_cookie) {
+ ret = mwifiex_pcie_alloc_sleep_cookie_buf(adapter);
+ if (ret) {
+ mwifiex_dbg(adapter, ERROR, "Failed to allocate sleep_cookie buffer\n");
+ goto err_alloc_cookie;
+ }
+ } else {
+ card->sleep_cookie_vbase = NULL;
+ }
+
+ return 0;
+
+err_alloc_cookie:
+ mwifiex_pcie_delete_cmdrsp_buf(adapter);
+err_alloc_cmdbuf:
+ mwifiex_pcie_delete_evtbd_ring(adapter);
+err_cre_evtbd:
+ mwifiex_pcie_delete_rxbd_ring(adapter);
+err_cre_rxbd:
+ mwifiex_pcie_delete_txbd_ring(adapter);
+err_cre_txbd:
+ return ret;
}
static void mwifiex_pcie_free_buffers(struct mwifiex_adapter *adapter)
@@ -2862,20 +2926,12 @@
/*
* This function initializes the PCI-E host memory space, WCB rings, etc.
- *
- * The following initializations steps are followed -
- * - Allocate TXBD ring buffers
- * - Allocate RXBD ring buffers
- * - Allocate event BD ring buffers
- * - Allocate command response ring buffer
- * - Allocate sleep cookie buffer
*/
static int mwifiex_init_pcie(struct mwifiex_adapter *adapter)
{
struct pcie_service_card *card = adapter->card;
int ret;
struct pci_dev *pdev = card->dev;
- const struct mwifiex_pcie_card_reg *reg = card->pcie.reg;
pci_set_drvdata(pdev, card);
@@ -2924,37 +2980,13 @@
pr_notice("PCI memory map Virt0: %p PCI memory map Virt2: %p\n",
card->pci_mmap, card->pci_mmap1);
- card->cmdrsp_buf = NULL;
- ret = mwifiex_pcie_create_txbd_ring(adapter);
+ ret = mwifiex_pcie_alloc_buffers(adapter);
if (ret)
- goto err_cre_txbd;
- ret = mwifiex_pcie_create_rxbd_ring(adapter);
- if (ret)
- goto err_cre_rxbd;
- ret = mwifiex_pcie_create_evtbd_ring(adapter);
- if (ret)
- goto err_cre_evtbd;
- ret = mwifiex_pcie_alloc_cmdrsp_buf(adapter);
- if (ret)
- goto err_alloc_cmdbuf;
- if (reg->sleep_cookie) {
- ret = mwifiex_pcie_alloc_sleep_cookie_buf(adapter);
- if (ret)
- goto err_alloc_cookie;
- } else {
- card->sleep_cookie_vbase = NULL;
- }
- return ret;
+ goto err_alloc_buffers;
-err_alloc_cookie:
- mwifiex_pcie_delete_cmdrsp_buf(adapter);
-err_alloc_cmdbuf:
- mwifiex_pcie_delete_evtbd_ring(adapter);
-err_cre_evtbd:
- mwifiex_pcie_delete_rxbd_ring(adapter);
-err_cre_rxbd:
- mwifiex_pcie_delete_txbd_ring(adapter);
-err_cre_txbd:
+ return 0;
+
+err_alloc_buffers:
pci_iounmap(pdev, card->pci_mmap1);
err_iomap2:
pci_release_region(pdev, 2);
@@ -3168,73 +3200,25 @@
card->adapter = NULL;
}
-/* This function initializes the PCI-E host memory space, WCB rings, etc.
- *
- * The following initializations steps are followed -
- * - Allocate TXBD ring buffers
- * - Allocate RXBD ring buffers
- * - Allocate event BD ring buffers
- * - Allocate command response ring buffer
- * - Allocate sleep cookie buffer
- * Part of mwifiex_init_pcie(), not reset the PCIE registers
+/*
+ * This function initializes the PCI-E host memory space, WCB rings, etc.,
+ * similar to mwifiex_init_pcie(), but without resetting PCI-E state.
*/
static void mwifiex_pcie_up_dev(struct mwifiex_adapter *adapter)
{
struct pcie_service_card *card = adapter->card;
int ret;
struct pci_dev *pdev = card->dev;
- const struct mwifiex_pcie_card_reg *reg = card->pcie.reg;
/* tx_buf_size might be changed to 3584 by firmware during
* data transfer, we should reset it to default size.
*/
adapter->tx_buf_size = card->pcie.tx_buf_size;
- card->cmdrsp_buf = NULL;
- ret = mwifiex_pcie_create_txbd_ring(adapter);
- if (ret) {
- mwifiex_dbg(adapter, ERROR, "Failed to create txbd ring\n");
- goto err_cre_txbd;
- }
+ ret = mwifiex_pcie_alloc_buffers(adapter);
+ if (!ret)
+ return;
- ret = mwifiex_pcie_create_rxbd_ring(adapter);
- if (ret) {
- mwifiex_dbg(adapter, ERROR, "Failed to create rxbd ring\n");
- goto err_cre_rxbd;
- }
-
- ret = mwifiex_pcie_create_evtbd_ring(adapter);
- if (ret) {
- mwifiex_dbg(adapter, ERROR, "Failed to create evtbd ring\n");
- goto err_cre_evtbd;
- }
-
- ret = mwifiex_pcie_alloc_cmdrsp_buf(adapter);
- if (ret) {
- mwifiex_dbg(adapter, ERROR, "Failed to allocate cmdbuf buffer\n");
- goto err_alloc_cmdbuf;
- }
-
- if (reg->sleep_cookie) {
- ret = mwifiex_pcie_alloc_sleep_cookie_buf(adapter);
- if (ret) {
- mwifiex_dbg(adapter, ERROR, "Failed to allocate sleep_cookie buffer\n");
- goto err_alloc_cookie;
- }
- } else {
- card->sleep_cookie_vbase = NULL;
- }
- return;
-
-err_alloc_cookie:
- mwifiex_pcie_delete_cmdrsp_buf(adapter);
-err_alloc_cmdbuf:
- mwifiex_pcie_delete_evtbd_ring(adapter);
-err_cre_evtbd:
- mwifiex_pcie_delete_rxbd_ring(adapter);
-err_cre_rxbd:
- mwifiex_pcie_delete_txbd_ring(adapter);
-err_cre_txbd:
pci_iounmap(pdev, card->pci_mmap1);
}
@@ -3274,6 +3258,7 @@
.cleanup_mpa_buf = NULL,
.init_fw_port = mwifiex_pcie_init_fw_port,
.clean_pcie_ring = mwifiex_clean_pcie_ring_buf,
+ .card_reset = mwifiex_pcie_card_reset,
.reg_dump = mwifiex_pcie_reg_dump,
.device_dump = mwifiex_pcie_device_dump,
.down_dev = mwifiex_pcie_down_dev,
diff --git a/drivers/net/wireless/marvell/mwifiex/sdio.c b/drivers/net/wireless/marvell/mwifiex/sdio.c
index 0af1c67..f81a006 100644
--- a/drivers/net/wireless/marvell/mwifiex/sdio.c
+++ b/drivers/net/wireless/marvell/mwifiex/sdio.c
@@ -1125,7 +1125,7 @@
data = skb->data;
total_pkt_len = skb->len;
- while (total_pkt_len >= (SDIO_HEADER_OFFSET + INTF_HEADER_LEN)) {
+ while (total_pkt_len >= (SDIO_HEADER_OFFSET + adapter->intf_hdr_len)) {
if (total_pkt_len < adapter->sdio_rx_block_size)
break;
blk_num = *(data + BLOCK_NUMBER_OFFSET);
@@ -1152,7 +1152,7 @@
break;
skb_put(skb_deaggr, pkt_len);
memcpy(skb_deaggr->data, data + SDIO_HEADER_OFFSET, pkt_len);
- skb_pull(skb_deaggr, INTF_HEADER_LEN);
+ skb_pull(skb_deaggr, adapter->intf_hdr_len);
mwifiex_handle_rx_packet(adapter, skb_deaggr);
data += blk_size;
@@ -1178,7 +1178,7 @@
if (upld_typ != MWIFIEX_TYPE_AGGR_DATA) {
skb_trim(skb, pkt_len);
- skb_pull(skb, INTF_HEADER_LEN);
+ skb_pull(skb, adapter->intf_hdr_len);
}
switch (upld_typ) {
@@ -1537,7 +1537,7 @@
rx_len = card->mp_regs[reg->cmd_rd_len_1] << 8;
rx_len |= (u16)card->mp_regs[reg->cmd_rd_len_0];
rx_blocks = DIV_ROUND_UP(rx_len, MWIFIEX_SDIO_BLOCK_SIZE);
- if (rx_len <= INTF_HEADER_LEN ||
+ if (rx_len <= adapter->intf_hdr_len ||
(rx_blocks * MWIFIEX_SDIO_BLOCK_SIZE) >
MWIFIEX_RX_DATA_BUF_SIZE)
return -1;
@@ -1635,7 +1635,7 @@
rx_blocks =
(rx_len + MWIFIEX_SDIO_BLOCK_SIZE -
1) / MWIFIEX_SDIO_BLOCK_SIZE;
- if (rx_len <= INTF_HEADER_LEN ||
+ if (rx_len <= adapter->intf_hdr_len ||
(card->mpa_rx.enabled &&
((rx_blocks * MWIFIEX_SDIO_BLOCK_SIZE) >
card->mpa_rx.buf_size))) {
@@ -1896,7 +1896,7 @@
adapter->cmd_sent = true;
/* Type must be MWIFIEX_TYPE_CMD */
- if (pkt_len <= INTF_HEADER_LEN ||
+ if (pkt_len <= adapter->intf_hdr_len ||
pkt_len > MWIFIEX_UPLD_SIZE)
mwifiex_dbg(adapter, ERROR,
"%s: payload=%p, nb=%d\n",
@@ -2533,12 +2533,8 @@
{
struct sdio_mmc_card *card = adapter->card;
- if (test_bit(MWIFIEX_IFACE_WORK_CARD_RESET, &card->work_flags))
- return;
-
- set_bit(MWIFIEX_IFACE_WORK_CARD_RESET, &card->work_flags);
-
- schedule_work(&card->work);
+ if (!test_and_set_bit(MWIFIEX_IFACE_WORK_CARD_RESET, &card->work_flags))
+ schedule_work(&card->work);
}
/* This function dumps FW information */
@@ -2546,11 +2542,9 @@
{
struct sdio_mmc_card *card = adapter->card;
- if (test_bit(MWIFIEX_IFACE_WORK_DEVICE_DUMP, &card->work_flags))
- return;
-
- set_bit(MWIFIEX_IFACE_WORK_DEVICE_DUMP, &card->work_flags);
- schedule_work(&card->work);
+ if (!test_and_set_bit(MWIFIEX_IFACE_WORK_DEVICE_DUMP,
+ &card->work_flags))
+ schedule_work(&card->work);
}
/* Function to dump SDIO function registers and SDIO scratch registers in case
diff --git a/drivers/net/wireless/marvell/mwifiex/sta_cmd.c b/drivers/net/wireless/marvell/mwifiex/sta_cmd.c
index 83916c1..534d94a 100644
--- a/drivers/net/wireless/marvell/mwifiex/sta_cmd.c
+++ b/drivers/net/wireless/marvell/mwifiex/sta_cmd.c
@@ -2064,6 +2064,15 @@
case HostCmd_CMD_11AC_CFG:
ret = mwifiex_cmd_11ac_cfg(priv, cmd_ptr, cmd_action, data_buf);
break;
+ case HostCmd_CMD_PACKET_AGGR_CTRL:
+ cmd_ptr->command = cpu_to_le16(cmd_no);
+ cmd_ptr->params.pkt_aggr_ctrl.action = cpu_to_le16(cmd_action);
+ cmd_ptr->params.pkt_aggr_ctrl.enable =
+ cpu_to_le16(*(u16 *)data_buf);
+ cmd_ptr->size =
+ cpu_to_le16(sizeof(struct host_cmd_ds_pkt_aggr_ctrl) +
+ S_DS_GEN);
+ break;
case HostCmd_CMD_P2P_MODE_CFG:
cmd_ptr->command = cpu_to_le16(cmd_no);
cmd_ptr->params.mode_cfg.action = cpu_to_le16(cmd_action);
@@ -2241,6 +2250,7 @@
enum state_11d_t state_11d;
struct mwifiex_ds_11n_tx_cfg tx_cfg;
u8 sdio_sp_rx_aggr_enable;
+ u16 packet_aggr_enable;
int data;
if (first_sta) {
@@ -2387,6 +2397,14 @@
"11D: failed to enable 11D\n");
}
+ /* Pacekt aggregation handshake with firmware */
+ if (aggr_ctrl) {
+ packet_aggr_enable = true;
+ mwifiex_send_cmd(priv, HostCmd_CMD_PACKET_AGGR_CTRL,
+ HostCmd_ACT_GEN_SET, 0,
+ &packet_aggr_enable, true);
+ }
+
/* Send cmd to FW to configure 11n specific configuration
* (Short GI, Channel BW, Green field support etc.) for transmit
*/
diff --git a/drivers/net/wireless/marvell/mwifiex/sta_cmdresp.c b/drivers/net/wireless/marvell/mwifiex/sta_cmdresp.c
index f1d1f56..3348fb3 100644
--- a/drivers/net/wireless/marvell/mwifiex/sta_cmdresp.c
+++ b/drivers/net/wireless/marvell/mwifiex/sta_cmdresp.c
@@ -1154,6 +1154,27 @@
return 0;
}
+int mwifiex_ret_pkt_aggr_ctrl(struct mwifiex_private *priv,
+ struct host_cmd_ds_command *resp)
+{
+ struct host_cmd_ds_pkt_aggr_ctrl *pkt_aggr_ctrl =
+ &resp->params.pkt_aggr_ctrl;
+ struct mwifiex_adapter *adapter = priv->adapter;
+
+ adapter->bus_aggr.enable = le16_to_cpu(pkt_aggr_ctrl->enable);
+ if (adapter->bus_aggr.enable)
+ adapter->intf_hdr_len = INTF_HEADER_LEN;
+ adapter->bus_aggr.mode = MWIFIEX_BUS_AGGR_MODE_LEN_V2;
+ adapter->bus_aggr.tx_aggr_max_size =
+ le16_to_cpu(pkt_aggr_ctrl->tx_aggr_max_size);
+ adapter->bus_aggr.tx_aggr_max_num =
+ le16_to_cpu(pkt_aggr_ctrl->tx_aggr_max_num);
+ adapter->bus_aggr.tx_aggr_align =
+ le16_to_cpu(pkt_aggr_ctrl->tx_aggr_align);
+
+ return 0;
+}
+
/*
* This function handles the command responses.
*
@@ -1255,6 +1276,9 @@
break;
case HostCmd_CMD_11AC_CFG:
break;
+ case HostCmd_CMD_PACKET_AGGR_CTRL:
+ ret = mwifiex_ret_pkt_aggr_ctrl(priv, resp);
+ break;
case HostCmd_CMD_P2P_MODE_CFG:
ret = mwifiex_ret_p2p_mode_cfg(priv, resp, data_buf);
break;
diff --git a/drivers/net/wireless/marvell/mwifiex/sta_tx.c b/drivers/net/wireless/marvell/mwifiex/sta_tx.c
index f6683ea..620f865 100644
--- a/drivers/net/wireless/marvell/mwifiex/sta_tx.c
+++ b/drivers/net/wireless/marvell/mwifiex/sta_tx.c
@@ -49,8 +49,7 @@
struct mwifiex_txinfo *tx_info = MWIFIEX_SKB_TXCB(skb);
unsigned int pad;
u16 pkt_type, pkt_offset;
- int hroom = (priv->adapter->iface_type == MWIFIEX_USB) ? 0 :
- INTF_HEADER_LEN;
+ int hroom = adapter->intf_hdr_len;
if (!skb->len) {
mwifiex_dbg(adapter, ERROR,
@@ -116,7 +115,7 @@
local_tx_pd->tx_pkt_offset = cpu_to_le16(pkt_offset);
- /* make space for INTF_HEADER_LEN */
+ /* make space for adapter->intf_hdr_len */
skb_push(skb, hroom);
if (!local_tx_pd->tx_control)
@@ -165,8 +164,9 @@
memset(tx_info, 0, sizeof(*tx_info));
tx_info->bss_num = priv->bss_num;
tx_info->bss_type = priv->bss_type;
- tx_info->pkt_len = data_len - (sizeof(struct txpd) + INTF_HEADER_LEN);
- skb_reserve(skb, sizeof(struct txpd) + INTF_HEADER_LEN);
+ tx_info->pkt_len = data_len -
+ (sizeof(struct txpd) + adapter->intf_hdr_len);
+ skb_reserve(skb, sizeof(struct txpd) + adapter->intf_hdr_len);
skb_push(skb, sizeof(struct txpd));
local_tx_pd = (struct txpd *) skb->data;
@@ -177,11 +177,11 @@
local_tx_pd->bss_num = priv->bss_num;
local_tx_pd->bss_type = priv->bss_type;
+ skb_push(skb, adapter->intf_hdr_len);
if (adapter->iface_type == MWIFIEX_USB) {
ret = adapter->if_ops.host_to_card(adapter, priv->usb_port,
skb, NULL);
} else {
- skb_push(skb, INTF_HEADER_LEN);
tx_param.next_pkt_len = 0;
ret = adapter->if_ops.host_to_card(adapter, MWIFIEX_TYPE_DATA,
skb, &tx_param);
diff --git a/drivers/net/wireless/marvell/mwifiex/tdls.c b/drivers/net/wireless/marvell/mwifiex/tdls.c
index 7d0d3ff..39cd677 100644
--- a/drivers/net/wireless/marvell/mwifiex/tdls.c
+++ b/drivers/net/wireless/marvell/mwifiex/tdls.c
@@ -55,11 +55,8 @@
tx_info->flags |= MWIFIEX_BUF_FLAG_TDLS_PKT;
} else {
tid_list = &priv->wmm.tid_tbl_ptr[tid_down].ra_list;
- if (!list_empty(tid_list))
- ra_list = list_first_entry(tid_list,
- struct mwifiex_ra_list_tbl, list);
- else
- ra_list = NULL;
+ ra_list = list_first_entry_or_null(tid_list,
+ struct mwifiex_ra_list_tbl, list);
tx_info->flags &= ~MWIFIEX_BUF_FLAG_TDLS_PKT;
}
@@ -161,7 +158,7 @@
u8 *pos;
assoc_rsp = (struct ieee_types_assoc_rsp *)&priv->assoc_rsp_buf;
- pos = (void *)skb_put(skb, 4);
+ pos = skb_put(skb, 4);
*pos++ = WLAN_EID_AID;
*pos++ = 2;
memcpy(pos, &assoc_rsp->a_id, sizeof(assoc_rsp->a_id));
@@ -175,7 +172,7 @@
struct ieee80211_vht_cap vht_cap;
u8 *pos;
- pos = (void *)skb_put(skb, sizeof(struct ieee80211_vht_cap) + 2);
+ pos = skb_put(skb, sizeof(struct ieee80211_vht_cap) + 2);
*pos++ = WLAN_EID_VHT_CAPABILITY;
*pos++ = sizeof(struct ieee80211_vht_cap);
@@ -210,7 +207,7 @@
return 0;
}
- pos = (void *)skb_put(skb, sizeof(struct ieee80211_ht_operation) + 2);
+ pos = skb_put(skb, sizeof(struct ieee80211_ht_operation) + 2);
*pos++ = WLAN_EID_HT_OPERATION;
*pos++ = sizeof(struct ieee80211_ht_operation);
ht_oper = (void *)pos;
@@ -275,7 +272,7 @@
ap_vht_cap = bss_desc->bcn_vht_cap;
}
- pos = (void *)skb_put(skb, sizeof(struct ieee80211_vht_operation) + 2);
+ pos = skb_put(skb, sizeof(struct ieee80211_vht_operation) + 2);
*pos++ = WLAN_EID_VHT_OPERATION;
*pos++ = sizeof(struct ieee80211_vht_operation);
vht_oper = (struct ieee80211_vht_operation *)pos;
@@ -362,7 +359,7 @@
{
struct ieee_types_extcap *extcap;
- extcap = (void *)skb_put(skb, sizeof(struct ieee_types_extcap));
+ extcap = skb_put(skb, sizeof(struct ieee_types_extcap));
extcap->ieee_hdr.element_id = WLAN_EID_EXT_CAPABILITY;
extcap->ieee_hdr.len = 8;
memset(extcap->ext_capab, 0, 8);
@@ -375,7 +372,7 @@
static void mwifiex_tdls_add_qos_capab(struct sk_buff *skb)
{
- u8 *pos = (void *)skb_put(skb, 3);
+ u8 *pos = skb_put(skb, 3);
*pos++ = WLAN_EID_QOS_CAPA;
*pos++ = 1;
@@ -391,8 +388,7 @@
u8 ac_be[] = {0x03, 0xa4, 0x00, 0x00};
u8 ac_bk[] = {0x27, 0xa4, 0x00, 0x00};
- wmm = (void *)skb_put(skb, sizeof(*wmm));
- memset(wmm, 0, sizeof(*wmm));
+ wmm = skb_put_zero(skb, sizeof(*wmm));
wmm->element_id = WLAN_EID_VENDOR_SPECIFIC;
wmm->len = sizeof(*wmm) - 2;
@@ -417,8 +413,8 @@
{
u8 *buf;
- buf = (void *)skb_put(skb, MWIFIEX_TDLS_WMM_INFO_SIZE +
- sizeof(struct ieee_types_header));
+ buf = skb_put(skb,
+ MWIFIEX_TDLS_WMM_INFO_SIZE + sizeof(struct ieee_types_header));
*buf++ = WLAN_EID_VENDOR_SPECIFIC;
*buf++ = 7; /* len */
@@ -435,7 +431,7 @@
{
struct ieee_types_bss_co_2040 *bssco;
- bssco = (void *)skb_put(skb, sizeof(struct ieee_types_bss_co_2040));
+ bssco = skb_put(skb, sizeof(struct ieee_types_bss_co_2040));
bssco->ieee_hdr.element_id = WLAN_EID_BSS_COEX_2040;
bssco->ieee_hdr.len = sizeof(struct ieee_types_bss_co_2040) -
sizeof(struct ieee_types_header);
@@ -447,8 +443,8 @@
struct ieee_types_generic *supp_chan;
u8 chan_supp[] = {1, 11};
- supp_chan = (void *)skb_put(skb, (sizeof(struct ieee_types_header) +
- sizeof(chan_supp)));
+ supp_chan = skb_put(skb,
+ (sizeof(struct ieee_types_header) + sizeof(chan_supp)));
supp_chan->ieee_hdr.element_id = WLAN_EID_SUPPORTED_CHANNELS;
supp_chan->ieee_hdr.len = sizeof(chan_supp);
memcpy(supp_chan->data, chan_supp, sizeof(chan_supp));
@@ -459,8 +455,8 @@
struct ieee_types_generic *reg_class;
u8 rc_list[] = {1,
1, 2, 3, 4, 12, 22, 23, 24, 25, 27, 28, 29, 30, 32, 33};
- reg_class = (void *)skb_put(skb, (sizeof(struct ieee_types_header) +
- sizeof(rc_list)));
+ reg_class = skb_put(skb,
+ (sizeof(struct ieee_types_header) + sizeof(rc_list)));
reg_class->ieee_hdr.element_id = WLAN_EID_SUPPORTED_REGULATORY_CLASSES;
reg_class->ieee_hdr.len = sizeof(rc_list);
memcpy(reg_class->data, rc_list, sizeof(rc_list));
@@ -479,7 +475,7 @@
capab = priv->curr_bss_params.bss_descriptor.cap_info_bitmap;
- tf = (void *)skb_put(skb, offsetof(struct ieee80211_tdls_data, u));
+ tf = skb_put(skb, offsetof(struct ieee80211_tdls_data, u));
memcpy(tf->da, peer, ETH_ALEN);
memcpy(tf->sa, priv->curr_addr, ETH_ALEN);
tf->ether_type = cpu_to_be16(ETH_P_TDLS);
@@ -498,7 +494,7 @@
return ret;
}
- pos = (void *)skb_put(skb, sizeof(struct ieee80211_ht_cap) + 2);
+ pos = skb_put(skb, sizeof(struct ieee80211_ht_cap) + 2);
*pos++ = WLAN_EID_HT_CAPABILITY;
*pos++ = sizeof(struct ieee80211_ht_cap);
ht_cap = (void *)pos;
@@ -538,7 +534,7 @@
return ret;
}
- pos = (void *)skb_put(skb, sizeof(struct ieee80211_ht_cap) + 2);
+ pos = skb_put(skb, sizeof(struct ieee80211_ht_cap) + 2);
*pos++ = WLAN_EID_HT_CAPABILITY;
*pos++ = sizeof(struct ieee80211_ht_cap);
ht_cap = (void *)pos;
@@ -620,7 +616,7 @@
{
struct ieee80211_tdls_lnkie *lnkid;
- lnkid = (void *)skb_put(skb, sizeof(struct ieee80211_tdls_lnkie));
+ lnkid = skb_put(skb, sizeof(struct ieee80211_tdls_lnkie));
lnkid->ie_type = WLAN_EID_LINK_ID;
lnkid->ie_len = sizeof(struct ieee80211_tdls_lnkie) -
sizeof(struct ieee_types_header);
@@ -683,8 +679,7 @@
return ret;
}
if (extra_ies_len)
- memcpy(skb_put(skb, extra_ies_len), extra_ies,
- extra_ies_len);
+ skb_put_data(skb, extra_ies, extra_ies_len);
mwifiex_tdls_add_link_ie(skb, priv->curr_addr, peer,
priv->cfg_bssid);
break;
@@ -697,8 +692,7 @@
return ret;
}
if (extra_ies_len)
- memcpy(skb_put(skb, extra_ies_len), extra_ies,
- extra_ies_len);
+ skb_put_data(skb, extra_ies, extra_ies_len);
mwifiex_tdls_add_link_ie(skb, peer, priv->curr_addr,
priv->cfg_bssid);
break;
@@ -747,7 +741,7 @@
capab = priv->curr_bss_params.bss_descriptor.cap_info_bitmap;
- mgmt = (void *)skb_put(skb, offsetof(struct ieee80211_mgmt, u));
+ mgmt = skb_put(skb, offsetof(struct ieee80211_mgmt, u));
memset(mgmt, 0, 24);
memcpy(mgmt->da, peer, ETH_ALEN);
@@ -781,7 +775,7 @@
return ret;
}
- pos = (void *)skb_put(skb, sizeof(struct ieee80211_ht_cap) + 2);
+ pos = skb_put(skb, sizeof(struct ieee80211_ht_cap) + 2);
*pos++ = WLAN_EID_HT_CAPABILITY;
*pos++ = sizeof(struct ieee80211_ht_cap);
ht_cap = (void *)pos;
@@ -856,8 +850,8 @@
pkt_type = PKT_TYPE_MGMT;
tx_control = 0;
- pos = skb_put(skb, MWIFIEX_MGMT_FRAME_HEADER_SIZE + sizeof(pkt_len));
- memset(pos, 0, MWIFIEX_MGMT_FRAME_HEADER_SIZE + sizeof(pkt_len));
+ pos = skb_put_zero(skb,
+ MWIFIEX_MGMT_FRAME_HEADER_SIZE + sizeof(pkt_len));
memcpy(pos, &pkt_type, sizeof(pkt_type));
memcpy(pos + sizeof(pkt_type), &tx_control, sizeof(tx_control));
@@ -869,7 +863,7 @@
}
if (extra_ies_len)
- memcpy(skb_put(skb, extra_ies_len), extra_ies, extra_ies_len);
+ skb_put_data(skb, extra_ies, extra_ies_len);
/* the TDLS link IE is always added last we are the responder */
diff --git a/drivers/net/wireless/marvell/mwifiex/txrx.c b/drivers/net/wireless/marvell/mwifiex/txrx.c
index fac28bd..d848933 100644
--- a/drivers/net/wireless/marvell/mwifiex/txrx.c
+++ b/drivers/net/wireless/marvell/mwifiex/txrx.c
@@ -91,7 +91,7 @@
struct mwifiex_sta_node *dest_node;
struct ethhdr *hdr = (void *)skb->data;
- hroom = (adapter->iface_type == MWIFIEX_USB) ? 0 : INTF_HEADER_LEN;
+ hroom = adapter->intf_hdr_len;
if (priv->bss_role == MWIFIEX_BSS_ROLE_UAP) {
dest_node = mwifiex_get_sta_entry(priv, hdr->h_dest);
@@ -117,7 +117,7 @@
if (adapter->iface_type == MWIFIEX_USB) {
ret = adapter->if_ops.host_to_card(adapter,
priv->usb_port,
- skb, NULL);
+ skb, tx_param);
} else {
ret = adapter->if_ops.host_to_card(adapter,
MWIFIEX_TYPE_DATA,
@@ -179,18 +179,13 @@
mwifiex_write_data_complete(adapter, skb, 0, 0);
return ret;
}
- if (GET_BSS_ROLE(priv) == MWIFIEX_BSS_ROLE_STA) {
- if (adapter->iface_type == MWIFIEX_USB)
- local_tx_pd = (struct txpd *)head_ptr;
- else
- local_tx_pd = (struct txpd *) (head_ptr +
- INTF_HEADER_LEN);
- }
+ if (GET_BSS_ROLE(priv) == MWIFIEX_BSS_ROLE_STA)
+ local_tx_pd = (struct txpd *)(head_ptr + adapter->intf_hdr_len);
if (adapter->iface_type == MWIFIEX_USB) {
ret = adapter->if_ops.host_to_card(adapter,
priv->usb_port,
- skb, NULL);
+ skb, tx_param);
} else {
ret = adapter->if_ops.host_to_card(adapter,
MWIFIEX_TYPE_DATA,
diff --git a/drivers/net/wireless/marvell/mwifiex/uap_event.c b/drivers/net/wireless/marvell/mwifiex/uap_event.c
index e10b2a5..e8c8728 100644
--- a/drivers/net/wireless/marvell/mwifiex/uap_event.c
+++ b/drivers/net/wireless/marvell/mwifiex/uap_event.c
@@ -312,6 +312,17 @@
adapter->event_skb->len -
sizeof(eventcause));
break;
+
+ case EVENT_REMAIN_ON_CHAN_EXPIRED:
+ mwifiex_dbg(adapter, EVENT,
+ "event: uap: Remain on channel expired\n");
+ cfg80211_remain_on_channel_expired(&priv->wdev,
+ priv->roc_cfg.cookie,
+ &priv->roc_cfg.chan,
+ GFP_ATOMIC);
+ memset(&priv->roc_cfg, 0x00, sizeof(struct mwifiex_roc_cfg));
+ break;
+
default:
mwifiex_dbg(adapter, EVENT,
"event: unknown event id: %#x\n", eventcause);
diff --git a/drivers/net/wireless/marvell/mwifiex/uap_txrx.c b/drivers/net/wireless/marvell/mwifiex/uap_txrx.c
index bf5660e..1e6a62c 100644
--- a/drivers/net/wireless/marvell/mwifiex/uap_txrx.c
+++ b/drivers/net/wireless/marvell/mwifiex/uap_txrx.c
@@ -468,8 +468,7 @@
struct mwifiex_txinfo *tx_info = MWIFIEX_SKB_TXCB(skb);
int pad;
u16 pkt_type, pkt_offset;
- int hroom = (priv->adapter->iface_type == MWIFIEX_USB) ? 0 :
- INTF_HEADER_LEN;
+ int hroom = adapter->intf_hdr_len;
if (!skb->len) {
mwifiex_dbg(adapter, ERROR,
@@ -521,7 +520,7 @@
txpd->tx_pkt_offset = cpu_to_le16(pkt_offset);
- /* make space for INTF_HEADER_LEN */
+ /* make space for adapter->intf_hdr_len */
skb_push(skb, hroom);
if (!txpd->tx_control)
diff --git a/drivers/net/wireless/marvell/mwifiex/usb.c b/drivers/net/wireless/marvell/mwifiex/usb.c
index 2f7705c..cb1753e 100644
--- a/drivers/net/wireless/marvell/mwifiex/usb.c
+++ b/drivers/net/wireless/marvell/mwifiex/usb.c
@@ -363,6 +363,7 @@
for (i = 0; i < MWIFIEX_TX_DATA_PORT; i++) {
port = &card->port[i];
for (j = 0; j < MWIFIEX_TX_DATA_URB; j++) {
+ usb_kill_urb(port->tx_data_list[j].urb);
usb_free_urb(port->tx_data_list[j].urb);
port->tx_data_list[j].urb = NULL;
}
@@ -424,7 +425,8 @@
card->intf = intf;
pr_debug("info: bcdUSB=%#x Device Class=%#x SubClass=%#x Protocol=%#x\n",
- udev->descriptor.bcdUSB, udev->descriptor.bDeviceClass,
+ le16_to_cpu(udev->descriptor.bcdUSB),
+ udev->descriptor.bDeviceClass,
udev->descriptor.bDeviceSubClass,
udev->descriptor.bDeviceProtocol);
@@ -661,75 +663,6 @@
.soft_unbind = 1,
};
-static int mwifiex_usb_tx_init(struct mwifiex_adapter *adapter)
-{
- struct usb_card_rec *card = (struct usb_card_rec *)adapter->card;
- struct usb_tx_data_port *port;
- int i, j;
-
- card->tx_cmd.adapter = adapter;
- card->tx_cmd.ep = card->tx_cmd_ep;
-
- card->tx_cmd.urb = usb_alloc_urb(0, GFP_KERNEL);
- if (!card->tx_cmd.urb)
- return -ENOMEM;
-
- for (i = 0; i < MWIFIEX_TX_DATA_PORT; i++) {
- port = &card->port[i];
- if (!port->tx_data_ep)
- continue;
- port->tx_data_ix = 0;
- if (port->tx_data_ep == MWIFIEX_USB_EP_DATA)
- port->block_status = false;
- else
- port->block_status = true;
- for (j = 0; j < MWIFIEX_TX_DATA_URB; j++) {
- port->tx_data_list[j].adapter = adapter;
- port->tx_data_list[j].ep = port->tx_data_ep;
- port->tx_data_list[j].urb =
- usb_alloc_urb(0, GFP_KERNEL);
- if (!port->tx_data_list[j].urb)
- return -ENOMEM;
- }
- }
-
- return 0;
-}
-
-static int mwifiex_usb_rx_init(struct mwifiex_adapter *adapter)
-{
- struct usb_card_rec *card = (struct usb_card_rec *)adapter->card;
- int i;
-
- card->rx_cmd.adapter = adapter;
- card->rx_cmd.ep = card->rx_cmd_ep;
-
- card->rx_cmd.urb = usb_alloc_urb(0, GFP_KERNEL);
- if (!card->rx_cmd.urb)
- return -ENOMEM;
-
- card->rx_cmd.skb = dev_alloc_skb(MWIFIEX_RX_CMD_BUF_SIZE);
- if (!card->rx_cmd.skb)
- return -ENOMEM;
-
- if (mwifiex_usb_submit_rx_urb(&card->rx_cmd, MWIFIEX_RX_CMD_BUF_SIZE))
- return -1;
-
- for (i = 0; i < MWIFIEX_RX_DATA_URB; i++) {
- card->rx_data_list[i].adapter = adapter;
- card->rx_data_list[i].ep = card->rx_data_ep;
-
- card->rx_data_list[i].urb = usb_alloc_urb(0, GFP_KERNEL);
- if (!card->rx_data_list[i].urb)
- return -1;
- if (mwifiex_usb_submit_rx_urb(&card->rx_data_list[i],
- MWIFIEX_RX_DATA_BUF_SIZE))
- return -1;
- }
-
- return 0;
-}
-
static int mwifiex_write_data_sync(struct mwifiex_adapter *adapter, u8 *pbuf,
u32 *len, u8 ep, u32 timeout)
{
@@ -845,73 +778,31 @@
return true;
}
-/* This function write a command/data packet to card. */
-static int mwifiex_usb_host_to_card(struct mwifiex_adapter *adapter, u8 ep,
- struct sk_buff *skb,
- struct mwifiex_tx_param *tx_param)
+static int mwifiex_usb_construct_send_urb(struct mwifiex_adapter *adapter,
+ struct usb_tx_data_port *port, u8 ep,
+ struct urb_context *context,
+ struct sk_buff *skb_send)
{
struct usb_card_rec *card = adapter->card;
- struct urb_context *context = NULL;
- struct usb_tx_data_port *port = NULL;
- u8 *data = (u8 *)skb->data;
+ int ret = -EINPROGRESS;
struct urb *tx_urb;
- int idx, ret = -EINPROGRESS;
-
- if (adapter->is_suspended) {
- mwifiex_dbg(adapter, ERROR,
- "%s: not allowed while suspended\n", __func__);
- return -1;
- }
-
- if (adapter->surprise_removed) {
- mwifiex_dbg(adapter, ERROR, "%s: device removed\n", __func__);
- return -1;
- }
-
- mwifiex_dbg(adapter, INFO, "%s: ep=%d\n", __func__, ep);
-
- if (ep == card->tx_cmd_ep) {
- context = &card->tx_cmd;
- } else {
- for (idx = 0; idx < MWIFIEX_TX_DATA_PORT; idx++) {
- if (ep == card->port[idx].tx_data_ep) {
- port = &card->port[idx];
- if (atomic_read(&port->tx_data_urb_pending)
- >= MWIFIEX_TX_DATA_URB) {
- port->block_status = true;
- adapter->data_sent =
- mwifiex_usb_data_sent(adapter);
- return -EBUSY;
- }
- if (port->tx_data_ix >= MWIFIEX_TX_DATA_URB)
- port->tx_data_ix = 0;
- context =
- &port->tx_data_list[port->tx_data_ix++];
- break;
- }
- }
- if (!port) {
- mwifiex_dbg(adapter, ERROR, "Wrong usb tx data port\n");
- return -1;
- }
- }
context->adapter = adapter;
context->ep = ep;
- context->skb = skb;
+ context->skb = skb_send;
tx_urb = context->urb;
if (ep == card->tx_cmd_ep &&
card->tx_cmd_ep_type == USB_ENDPOINT_XFER_INT)
usb_fill_int_urb(tx_urb, card->udev,
- usb_sndintpipe(card->udev, ep), data,
- skb->len, mwifiex_usb_tx_complete,
+ usb_sndintpipe(card->udev, ep), skb_send->data,
+ skb_send->len, mwifiex_usb_tx_complete,
(void *)context, card->tx_cmd_interval);
else
usb_fill_bulk_urb(tx_urb, card->udev,
- usb_sndbulkpipe(card->udev, ep), data,
- skb->len, mwifiex_usb_tx_complete,
- (void *)context);
+ usb_sndbulkpipe(card->udev, ep),
+ skb_send->data, skb_send->len,
+ mwifiex_usb_tx_complete, (void *)context);
tx_urb->transfer_flags |= URB_ZERO_PACKET;
@@ -948,6 +839,444 @@
return ret;
}
+static int mwifiex_usb_prepare_tx_aggr_skb(struct mwifiex_adapter *adapter,
+ struct usb_tx_data_port *port,
+ struct sk_buff **skb_send)
+{
+ struct sk_buff *skb_aggr, *skb_tmp;
+ u8 *payload, pad;
+ u16 align = adapter->bus_aggr.tx_aggr_align;
+ struct mwifiex_txinfo *tx_info = NULL;
+ bool is_txinfo_set = false;
+
+ /* Packets in aggr_list will be send in either skb_aggr or
+ * write complete, delete the tx_aggr timer
+ */
+ if (port->tx_aggr.timer_cnxt.is_hold_timer_set) {
+ del_timer(&port->tx_aggr.timer_cnxt.hold_timer);
+ port->tx_aggr.timer_cnxt.is_hold_timer_set = false;
+ port->tx_aggr.timer_cnxt.hold_tmo_msecs = 0;
+ }
+
+ skb_aggr = mwifiex_alloc_dma_align_buf(port->tx_aggr.aggr_len,
+ GFP_ATOMIC);
+ if (!skb_aggr) {
+ mwifiex_dbg(adapter, ERROR,
+ "%s: alloc skb_aggr failed\n", __func__);
+
+ while ((skb_tmp = skb_dequeue(&port->tx_aggr.aggr_list)))
+ mwifiex_write_data_complete(adapter, skb_tmp, 0, -1);
+
+ port->tx_aggr.aggr_num = 0;
+ port->tx_aggr.aggr_len = 0;
+ return -EBUSY;
+ }
+
+ tx_info = MWIFIEX_SKB_TXCB(skb_aggr);
+ memset(tx_info, 0, sizeof(*tx_info));
+
+ while ((skb_tmp = skb_dequeue(&port->tx_aggr.aggr_list))) {
+ /* padding for aligning next packet header*/
+ pad = (align - (skb_tmp->len & (align - 1))) % align;
+ payload = skb_put(skb_aggr, skb_tmp->len + pad);
+ memcpy(payload, skb_tmp->data, skb_tmp->len);
+ if (skb_queue_empty(&port->tx_aggr.aggr_list)) {
+ /* do not padding for last packet*/
+ *(u16 *)payload = cpu_to_le16(skb_tmp->len);
+ *(u16 *)&payload[2] =
+ cpu_to_le16(MWIFIEX_TYPE_AGGR_DATA_V2 | 0x80);
+ skb_trim(skb_aggr, skb_aggr->len - pad);
+ } else {
+ /* add aggregation interface header */
+ *(u16 *)payload = cpu_to_le16(skb_tmp->len + pad);
+ *(u16 *)&payload[2] =
+ cpu_to_le16(MWIFIEX_TYPE_AGGR_DATA_V2);
+ }
+
+ if (!is_txinfo_set) {
+ tx_info->bss_num = MWIFIEX_SKB_TXCB(skb_tmp)->bss_num;
+ tx_info->bss_type = MWIFIEX_SKB_TXCB(skb_tmp)->bss_type;
+ is_txinfo_set = true;
+ }
+
+ port->tx_aggr.aggr_num--;
+ port->tx_aggr.aggr_len -= (skb_tmp->len + pad);
+ mwifiex_write_data_complete(adapter, skb_tmp, 0, 0);
+ }
+
+ tx_info->pkt_len = skb_aggr->len -
+ (sizeof(struct txpd) + adapter->intf_hdr_len);
+ tx_info->flags |= MWIFIEX_BUF_FLAG_AGGR_PKT;
+
+ port->tx_aggr.aggr_num = 0;
+ port->tx_aggr.aggr_len = 0;
+ *skb_send = skb_aggr;
+
+ return 0;
+}
+
+/* This function prepare data packet to be send under usb tx aggregation
+ * protocol, check current usb aggregation status, link packet to aggrgation
+ * list if possible, work flow as below:
+ * (1) if only 1 packet available, add usb tx aggregation header and send.
+ * (2) if packet is able to aggregated, link it to current aggregation list.
+ * (3) if packet is not able to aggregated, aggregate and send exist packets
+ * in aggrgation list. Then, link packet in the list if there is more
+ * packet in transmit queue, otherwise try to transmit single packet.
+ */
+static int mwifiex_usb_aggr_tx_data(struct mwifiex_adapter *adapter, u8 ep,
+ struct sk_buff *skb,
+ struct mwifiex_tx_param *tx_param,
+ struct usb_tx_data_port *port)
+{
+ u8 *payload, pad;
+ u16 align = adapter->bus_aggr.tx_aggr_align;
+ struct sk_buff *skb_send = NULL;
+ struct urb_context *context = NULL;
+ struct txpd *local_tx_pd =
+ (struct txpd *)((u8 *)skb->data + adapter->intf_hdr_len);
+ u8 f_send_aggr_buf = 0;
+ u8 f_send_cur_buf = 0;
+ u8 f_precopy_cur_buf = 0;
+ u8 f_postcopy_cur_buf = 0;
+ u32 timeout;
+ int ret;
+
+ /* padding to ensure each packet alginment */
+ pad = (align - (skb->len & (align - 1))) % align;
+
+ if (tx_param && tx_param->next_pkt_len) {
+ /* next packet available in tx queue*/
+ if (port->tx_aggr.aggr_len + skb->len + pad >
+ adapter->bus_aggr.tx_aggr_max_size) {
+ f_send_aggr_buf = 1;
+ f_postcopy_cur_buf = 1;
+ } else {
+ /* current packet could be aggregated*/
+ f_precopy_cur_buf = 1;
+
+ if (port->tx_aggr.aggr_len + skb->len + pad +
+ tx_param->next_pkt_len >
+ adapter->bus_aggr.tx_aggr_max_size ||
+ port->tx_aggr.aggr_num + 2 >
+ adapter->bus_aggr.tx_aggr_max_num) {
+ /* next packet could not be aggregated
+ * send current aggregation buffer
+ */
+ f_send_aggr_buf = 1;
+ }
+ }
+ } else {
+ /* last packet in tx queue */
+ if (port->tx_aggr.aggr_num > 0) {
+ /* pending packets in aggregation buffer*/
+ if (port->tx_aggr.aggr_len + skb->len + pad >
+ adapter->bus_aggr.tx_aggr_max_size) {
+ /* current packet not be able to aggregated,
+ * send aggr buffer first, then send packet.
+ */
+ f_send_cur_buf = 1;
+ } else {
+ /* last packet, Aggregation and send */
+ f_precopy_cur_buf = 1;
+ }
+
+ f_send_aggr_buf = 1;
+ } else {
+ /* no pending packets in aggregation buffer,
+ * send current packet immediately
+ */
+ f_send_cur_buf = 1;
+ }
+ }
+
+ if (local_tx_pd->flags & MWIFIEX_TxPD_POWER_MGMT_NULL_PACKET) {
+ /* Send NULL packet immediately*/
+ if (f_precopy_cur_buf) {
+ if (skb_queue_empty(&port->tx_aggr.aggr_list)) {
+ f_precopy_cur_buf = 0;
+ f_send_aggr_buf = 0;
+ f_send_cur_buf = 1;
+ } else {
+ f_send_aggr_buf = 1;
+ }
+ } else if (f_postcopy_cur_buf) {
+ f_send_cur_buf = 1;
+ f_postcopy_cur_buf = 0;
+ }
+ }
+
+ if (f_precopy_cur_buf) {
+ skb_queue_tail(&port->tx_aggr.aggr_list, skb);
+ port->tx_aggr.aggr_len += (skb->len + pad);
+ port->tx_aggr.aggr_num++;
+ if (f_send_aggr_buf)
+ goto send_aggr_buf;
+
+ /* packet will not been send immediately,
+ * set a timer to make sure it will be sent under
+ * strict time limit. Dynamically fit the timeout
+ * value, according to packets number in aggr_list
+ */
+ if (!port->tx_aggr.timer_cnxt.is_hold_timer_set) {
+ port->tx_aggr.timer_cnxt.hold_tmo_msecs =
+ MWIFIEX_USB_TX_AGGR_TMO_MIN;
+ timeout =
+ port->tx_aggr.timer_cnxt.hold_tmo_msecs;
+ mod_timer(&port->tx_aggr.timer_cnxt.hold_timer,
+ jiffies + msecs_to_jiffies(timeout));
+ port->tx_aggr.timer_cnxt.is_hold_timer_set = true;
+ } else {
+ if (port->tx_aggr.timer_cnxt.hold_tmo_msecs <
+ MWIFIEX_USB_TX_AGGR_TMO_MAX) {
+ /* Dyanmic fit timeout */
+ timeout =
+ ++port->tx_aggr.timer_cnxt.hold_tmo_msecs;
+ mod_timer(&port->tx_aggr.timer_cnxt.hold_timer,
+ jiffies + msecs_to_jiffies(timeout));
+ }
+ }
+ }
+
+send_aggr_buf:
+ if (f_send_aggr_buf) {
+ ret = mwifiex_usb_prepare_tx_aggr_skb(adapter, port, &skb_send);
+ if (!ret) {
+ context = &port->tx_data_list[port->tx_data_ix++];
+ ret = mwifiex_usb_construct_send_urb(adapter, port, ep,
+ context, skb_send);
+ if (ret == -1)
+ mwifiex_write_data_complete(adapter, skb_send,
+ 0, -1);
+ }
+ }
+
+ if (f_send_cur_buf) {
+ if (f_send_aggr_buf) {
+ if (atomic_read(&port->tx_data_urb_pending) >=
+ MWIFIEX_TX_DATA_URB) {
+ port->block_status = true;
+ adapter->data_sent =
+ mwifiex_usb_data_sent(adapter);
+ /* no available urb, postcopy packet*/
+ f_postcopy_cur_buf = 1;
+ goto postcopy_cur_buf;
+ }
+
+ if (port->tx_data_ix >= MWIFIEX_TX_DATA_URB)
+ port->tx_data_ix = 0;
+ }
+
+ payload = skb->data;
+ *(u16 *)&payload[2] =
+ cpu_to_le16(MWIFIEX_TYPE_AGGR_DATA_V2 | 0x80);
+ *(u16 *)payload = cpu_to_le16(skb->len);
+ skb_send = skb;
+ context = &port->tx_data_list[port->tx_data_ix++];
+ return mwifiex_usb_construct_send_urb(adapter, port, ep,
+ context, skb_send);
+ }
+
+postcopy_cur_buf:
+ if (f_postcopy_cur_buf) {
+ skb_queue_tail(&port->tx_aggr.aggr_list, skb);
+ port->tx_aggr.aggr_len += (skb->len + pad);
+ port->tx_aggr.aggr_num++;
+ /* New aggregation begin, start timer */
+ if (!port->tx_aggr.timer_cnxt.is_hold_timer_set) {
+ port->tx_aggr.timer_cnxt.hold_tmo_msecs =
+ MWIFIEX_USB_TX_AGGR_TMO_MIN;
+ timeout = port->tx_aggr.timer_cnxt.hold_tmo_msecs;
+ mod_timer(&port->tx_aggr.timer_cnxt.hold_timer,
+ jiffies + msecs_to_jiffies(timeout));
+ port->tx_aggr.timer_cnxt.is_hold_timer_set = true;
+ }
+ }
+
+ return -EINPROGRESS;
+}
+
+static void mwifiex_usb_tx_aggr_tmo(unsigned long context)
+{
+ struct urb_context *urb_cnxt = NULL;
+ struct sk_buff *skb_send = NULL;
+ struct tx_aggr_tmr_cnxt *timer_context =
+ (struct tx_aggr_tmr_cnxt *)context;
+ struct mwifiex_adapter *adapter = timer_context->adapter;
+ struct usb_tx_data_port *port = timer_context->port;
+ unsigned long flags;
+ int err = 0;
+
+ spin_lock_irqsave(&port->tx_aggr_lock, flags);
+ err = mwifiex_usb_prepare_tx_aggr_skb(adapter, port, &skb_send);
+ if (err) {
+ mwifiex_dbg(adapter, ERROR,
+ "prepare tx aggr skb failed, err=%d\n", err);
+ return;
+ }
+
+ if (atomic_read(&port->tx_data_urb_pending) >=
+ MWIFIEX_TX_DATA_URB) {
+ port->block_status = true;
+ adapter->data_sent =
+ mwifiex_usb_data_sent(adapter);
+ err = -1;
+ goto done;
+ }
+
+ if (port->tx_data_ix >= MWIFIEX_TX_DATA_URB)
+ port->tx_data_ix = 0;
+
+ urb_cnxt = &port->tx_data_list[port->tx_data_ix++];
+ err = mwifiex_usb_construct_send_urb(adapter, port, port->tx_data_ep,
+ urb_cnxt, skb_send);
+done:
+ if (err == -1)
+ mwifiex_write_data_complete(adapter, skb_send, 0, -1);
+ spin_unlock_irqrestore(&port->tx_aggr_lock, flags);
+}
+
+/* This function write a command/data packet to card. */
+static int mwifiex_usb_host_to_card(struct mwifiex_adapter *adapter, u8 ep,
+ struct sk_buff *skb,
+ struct mwifiex_tx_param *tx_param)
+{
+ struct usb_card_rec *card = adapter->card;
+ struct urb_context *context = NULL;
+ struct usb_tx_data_port *port = NULL;
+ unsigned long flags;
+ int idx, ret;
+
+ if (adapter->is_suspended) {
+ mwifiex_dbg(adapter, ERROR,
+ "%s: not allowed while suspended\n", __func__);
+ return -1;
+ }
+
+ if (adapter->surprise_removed) {
+ mwifiex_dbg(adapter, ERROR, "%s: device removed\n", __func__);
+ return -1;
+ }
+
+ mwifiex_dbg(adapter, INFO, "%s: ep=%d\n", __func__, ep);
+
+ if (ep == card->tx_cmd_ep) {
+ context = &card->tx_cmd;
+ } else {
+ /* get the data port structure for endpoint */
+ for (idx = 0; idx < MWIFIEX_TX_DATA_PORT; idx++) {
+ if (ep == card->port[idx].tx_data_ep) {
+ port = &card->port[idx];
+ if (atomic_read(&port->tx_data_urb_pending)
+ >= MWIFIEX_TX_DATA_URB) {
+ port->block_status = true;
+ adapter->data_sent =
+ mwifiex_usb_data_sent(adapter);
+ return -EBUSY;
+ }
+ if (port->tx_data_ix >= MWIFIEX_TX_DATA_URB)
+ port->tx_data_ix = 0;
+ break;
+ }
+ }
+
+ if (!port) {
+ mwifiex_dbg(adapter, ERROR, "Wrong usb tx data port\n");
+ return -1;
+ }
+
+ if (adapter->bus_aggr.enable) {
+ spin_lock_irqsave(&port->tx_aggr_lock, flags);
+ ret = mwifiex_usb_aggr_tx_data(adapter, ep, skb,
+ tx_param, port);
+ spin_unlock_irqrestore(&port->tx_aggr_lock, flags);
+ return ret;
+ }
+
+ context = &port->tx_data_list[port->tx_data_ix++];
+ }
+
+ return mwifiex_usb_construct_send_urb(adapter, port, ep, context, skb);
+}
+
+static int mwifiex_usb_tx_init(struct mwifiex_adapter *adapter)
+{
+ struct usb_card_rec *card = (struct usb_card_rec *)adapter->card;
+ struct usb_tx_data_port *port;
+ int i, j;
+
+ card->tx_cmd.adapter = adapter;
+ card->tx_cmd.ep = card->tx_cmd_ep;
+
+ card->tx_cmd.urb = usb_alloc_urb(0, GFP_KERNEL);
+ if (!card->tx_cmd.urb)
+ return -ENOMEM;
+
+ for (i = 0; i < MWIFIEX_TX_DATA_PORT; i++) {
+ port = &card->port[i];
+ if (!port->tx_data_ep)
+ continue;
+ port->tx_data_ix = 0;
+ skb_queue_head_init(&port->tx_aggr.aggr_list);
+ if (port->tx_data_ep == MWIFIEX_USB_EP_DATA)
+ port->block_status = false;
+ else
+ port->block_status = true;
+ for (j = 0; j < MWIFIEX_TX_DATA_URB; j++) {
+ port->tx_data_list[j].adapter = adapter;
+ port->tx_data_list[j].ep = port->tx_data_ep;
+ port->tx_data_list[j].urb =
+ usb_alloc_urb(0, GFP_KERNEL);
+ if (!port->tx_data_list[j].urb)
+ return -ENOMEM;
+ }
+
+ port->tx_aggr.timer_cnxt.adapter = adapter;
+ port->tx_aggr.timer_cnxt.port = port;
+ port->tx_aggr.timer_cnxt.is_hold_timer_set = false;
+ port->tx_aggr.timer_cnxt.hold_tmo_msecs = 0;
+ setup_timer(&port->tx_aggr.timer_cnxt.hold_timer,
+ mwifiex_usb_tx_aggr_tmo,
+ (unsigned long)&port->tx_aggr.timer_cnxt);
+ }
+
+ return 0;
+}
+
+static int mwifiex_usb_rx_init(struct mwifiex_adapter *adapter)
+{
+ struct usb_card_rec *card = (struct usb_card_rec *)adapter->card;
+ int i;
+
+ card->rx_cmd.adapter = adapter;
+ card->rx_cmd.ep = card->rx_cmd_ep;
+
+ card->rx_cmd.urb = usb_alloc_urb(0, GFP_KERNEL);
+ if (!card->rx_cmd.urb)
+ return -ENOMEM;
+
+ card->rx_cmd.skb = dev_alloc_skb(MWIFIEX_RX_CMD_BUF_SIZE);
+ if (!card->rx_cmd.skb)
+ return -ENOMEM;
+
+ if (mwifiex_usb_submit_rx_urb(&card->rx_cmd, MWIFIEX_RX_CMD_BUF_SIZE))
+ return -1;
+
+ for (i = 0; i < MWIFIEX_RX_DATA_URB; i++) {
+ card->rx_data_list[i].adapter = adapter;
+ card->rx_data_list[i].ep = card->rx_data_ep;
+
+ card->rx_data_list[i].urb = usb_alloc_urb(0, GFP_KERNEL);
+ if (!card->rx_data_list[i].urb)
+ return -1;
+ if (mwifiex_usb_submit_rx_urb(&card->rx_data_list[i],
+ MWIFIEX_RX_DATA_BUF_SIZE))
+ return -1;
+ }
+
+ return 0;
+}
+
/* This function register usb device and initialize parameter. */
static int mwifiex_register_dev(struct mwifiex_adapter *adapter)
{
@@ -988,10 +1317,32 @@
return 0;
}
+static void mwifiex_usb_cleanup_tx_aggr(struct mwifiex_adapter *adapter)
+{
+ struct usb_card_rec *card = (struct usb_card_rec *)adapter->card;
+ struct usb_tx_data_port *port;
+ struct sk_buff *skb_tmp;
+ int idx;
+
+ for (idx = 0; idx < MWIFIEX_TX_DATA_PORT; idx++) {
+ port = &card->port[idx];
+ if (adapter->bus_aggr.enable)
+ while ((skb_tmp =
+ skb_dequeue(&port->tx_aggr.aggr_list)))
+ mwifiex_write_data_complete(adapter, skb_tmp,
+ 0, -1);
+ del_timer_sync(&port->tx_aggr.timer_cnxt.hold_timer);
+ port->tx_aggr.timer_cnxt.is_hold_timer_set = false;
+ port->tx_aggr.timer_cnxt.hold_tmo_msecs = 0;
+ }
+}
+
static void mwifiex_unregister_dev(struct mwifiex_adapter *adapter)
{
struct usb_card_rec *card = (struct usb_card_rec *)adapter->card;
+ mwifiex_usb_cleanup_tx_aggr(adapter);
+
card->adapter = NULL;
}
diff --git a/drivers/net/wireless/marvell/mwifiex/usb.h b/drivers/net/wireless/marvell/mwifiex/usb.h
index e36bd63..37abd22 100644
--- a/drivers/net/wireless/marvell/mwifiex/usb.h
+++ b/drivers/net/wireless/marvell/mwifiex/usb.h
@@ -64,12 +64,35 @@
u8 ep;
};
+#define MWIFIEX_USB_TX_AGGR_TMO_MIN 1
+#define MWIFIEX_USB_TX_AGGR_TMO_MAX 4
+
+struct tx_aggr_tmr_cnxt {
+ struct mwifiex_adapter *adapter;
+ struct usb_tx_data_port *port;
+ struct timer_list hold_timer;
+ bool is_hold_timer_set;
+ u32 hold_tmo_msecs;
+};
+
+struct usb_tx_aggr {
+ struct sk_buff_head aggr_list;
+ int aggr_len;
+ int aggr_num;
+ struct tx_aggr_tmr_cnxt timer_cnxt;
+};
+
struct usb_tx_data_port {
u8 tx_data_ep;
u8 block_status;
atomic_t tx_data_urb_pending;
int tx_data_ix;
struct urb_context tx_data_list[MWIFIEX_TX_DATA_URB];
+ /* usb tx aggregation*/
+ struct usb_tx_aggr tx_aggr;
+ struct sk_buff *skb_aggr[MWIFIEX_TX_DATA_URB];
+ /* lock for protect tx aggregation data path*/
+ spinlock_t tx_aggr_lock;
};
struct usb_card_rec {
diff --git a/drivers/net/wireless/marvell/mwifiex/wmm.c b/drivers/net/wireless/marvell/mwifiex/wmm.c
index e4ff3b9..0edd268 100644
--- a/drivers/net/wireless/marvell/mwifiex/wmm.c
+++ b/drivers/net/wireless/marvell/mwifiex/wmm.c
@@ -868,12 +868,8 @@
return;
default:
list_head = priv->wmm.tid_tbl_ptr[tid_down].ra_list;
- if (!list_empty(&list_head))
- ra_list = list_first_entry(
- &list_head, struct mwifiex_ra_list_tbl,
- list);
- else
- ra_list = NULL;
+ ra_list = list_first_entry_or_null(&list_head,
+ struct mwifiex_ra_list_tbl, list);
break;
}
} else {
@@ -1363,13 +1359,13 @@
spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock, ra_list_flags);
+ tx_param.next_pkt_len =
+ ((skb_next) ? skb_next->len +
+ sizeof(struct txpd) : 0);
if (adapter->iface_type == MWIFIEX_USB) {
ret = adapter->if_ops.host_to_card(adapter, priv->usb_port,
- skb, NULL);
+ skb, &tx_param);
} else {
- tx_param.next_pkt_len =
- ((skb_next) ? skb_next->len +
- sizeof(struct txpd) : 0);
ret = adapter->if_ops.host_to_card(adapter, MWIFIEX_TYPE_DATA,
skb, &tx_param);
}
diff --git a/drivers/net/wireless/mediatek/mt7601u/dma.c b/drivers/net/wireless/mediatek/mt7601u/dma.c
index a8bc064..660267b 100644
--- a/drivers/net/wireless/mediatek/mt7601u/dma.c
+++ b/drivers/net/wireless/mediatek/mt7601u/dma.c
@@ -52,7 +52,7 @@
goto bad_frame;
if (rxwi->rxinfo & cpu_to_le32(MT_RXINFO_L2PAD)) {
- memcpy(skb_put(skb, hdr_len), data, hdr_len);
+ skb_put_data(skb, data, hdr_len);
data += hdr_len + 2;
true_len -= hdr_len;
@@ -63,7 +63,7 @@
copy = (true_len <= skb_tailroom(skb)) ? true_len : hdr_len + 8;
frag = true_len - copy;
- memcpy(skb_put(skb, copy), data, copy);
+ skb_put_data(skb, data, copy);
data += copy;
if (frag) {
diff --git a/drivers/net/wireless/mediatek/mt7601u/mcu.c b/drivers/net/wireless/mediatek/mt7601u/mcu.c
index a9f5f39..65a8004 100644
--- a/drivers/net/wireless/mediatek/mt7601u/mcu.c
+++ b/drivers/net/wireless/mediatek/mt7601u/mcu.c
@@ -68,7 +68,7 @@
skb = alloc_skb(len + MT_DMA_HDR_LEN + 4, GFP_KERNEL);
if (skb) {
skb_reserve(skb, MT_DMA_HDR_LEN);
- memcpy(skb_put(skb, len), data, len);
+ skb_put_data(skb, data, len);
}
return skb;
diff --git a/drivers/net/wireless/mediatek/mt7601u/tx.c b/drivers/net/wireless/mediatek/mt7601u/tx.c
index ad77bec..3600e91 100644
--- a/drivers/net/wireless/mediatek/mt7601u/tx.c
+++ b/drivers/net/wireless/mediatek/mt7601u/tx.c
@@ -148,7 +148,7 @@
u16 rate_ctl;
u8 nss;
- txwi = (struct mt76_txwi *)skb_push(skb, sizeof(struct mt76_txwi));
+ txwi = skb_push(skb, sizeof(struct mt76_txwi));
memset(txwi, 0, sizeof(*txwi));
if (!wcid->tx_rate_set)
diff --git a/drivers/net/wireless/quantenna/Kconfig b/drivers/net/wireless/quantenna/Kconfig
new file mode 100644
index 0000000..3094365
--- /dev/null
+++ b/drivers/net/wireless/quantenna/Kconfig
@@ -0,0 +1,16 @@
+config WLAN_VENDOR_QUANTENNA
+ bool "Quantenna wireless cards support"
+ default y
+ ---help---
+ If you have a wireless card belonging to this class, say Y.
+
+ Note that the answer to this question doesn't directly affect the
+ kernel: saying N will just cause the configurator to skip all
+ the questions about cards. If you say Y, you will be asked for
+ your specific card in the following questions.
+
+if WLAN_VENDOR_QUANTENNA
+
+source "drivers/net/wireless/quantenna/qtnfmac/Kconfig"
+
+endif # WLAN_VENDOR_QUANTENNA
diff --git a/drivers/net/wireless/quantenna/Makefile b/drivers/net/wireless/quantenna/Makefile
new file mode 100644
index 0000000..baebfbd
--- /dev/null
+++ b/drivers/net/wireless/quantenna/Makefile
@@ -0,0 +1,6 @@
+#
+# Copyright (c) 2015-2016 Quantenna Communications, Inc.
+# All rights reserved.
+#
+
+obj-$(CONFIG_QTNFMAC) += qtnfmac/
diff --git a/drivers/net/wireless/quantenna/qtnfmac/Kconfig b/drivers/net/wireless/quantenna/qtnfmac/Kconfig
new file mode 100644
index 0000000..025fa60
--- /dev/null
+++ b/drivers/net/wireless/quantenna/qtnfmac/Kconfig
@@ -0,0 +1,19 @@
+config QTNFMAC
+ tristate
+ depends on QTNFMAC_PEARL_PCIE
+ default m if QTNFMAC_PEARL_PCIE=m
+ default y if QTNFMAC_PEARL_PCIE=y
+
+config QTNFMAC_PEARL_PCIE
+ tristate "Quantenna QSR10g PCIe support"
+ default n
+ depends on HAS_DMA && PCI && CFG80211
+ select QTNFMAC
+ select FW_LOADER
+ select CRC32
+ ---help---
+ This option adds support for wireless adapters based on Quantenna
+ 802.11ac QSR10g (aka Pearl) FullMAC chipset running over PCIe.
+
+ If you choose to build it as a module, two modules will be built:
+ qtnfmac.ko and qtnfmac_pearl_pcie.ko.
diff --git a/drivers/net/wireless/quantenna/qtnfmac/Makefile b/drivers/net/wireless/quantenna/qtnfmac/Makefile
new file mode 100644
index 0000000..0d618e5
--- /dev/null
+++ b/drivers/net/wireless/quantenna/qtnfmac/Makefile
@@ -0,0 +1,31 @@
+#
+# Copyright (c) 2015-2016 Quantenna Communications, Inc.
+# All rights reserved.
+#
+
+ccflags-y += \
+ -Idrivers/net/wireless/quantenna/qtnfmac
+
+obj-$(CONFIG_QTNFMAC) += qtnfmac.o
+qtnfmac-objs += \
+ core.o \
+ commands.o \
+ trans.o \
+ cfg80211.o \
+ event.o \
+ util.o \
+ qlink_util.o
+
+#
+
+obj-$(CONFIG_QTNFMAC_PEARL_PCIE) += qtnfmac_pearl_pcie.o
+
+qtnfmac_pearl_pcie-objs += \
+ shm_ipc.o \
+ pearl/pcie.o
+
+qtnfmac_pearl_pcie-$(CONFIG_DEBUG_FS) += debug.o
+
+#
+
+ccflags-y += -D__CHECK_ENDIAN
diff --git a/drivers/net/wireless/quantenna/qtnfmac/bus.h b/drivers/net/wireless/quantenna/qtnfmac/bus.h
new file mode 100644
index 0000000..dda0500
--- /dev/null
+++ b/drivers/net/wireless/quantenna/qtnfmac/bus.h
@@ -0,0 +1,139 @@
+/*
+ * Copyright (c) 2015 Quantenna Communications
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
+ * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
+ * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
+ * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef QTNFMAC_BUS_H
+#define QTNFMAC_BUS_H
+
+#include <linux/netdevice.h>
+#include <linux/workqueue.h>
+
+#define QTNF_MAX_MAC 3
+
+enum qtnf_fw_state {
+ QTNF_FW_STATE_RESET,
+ QTNF_FW_STATE_FW_DNLD_DONE,
+ QTNF_FW_STATE_BOOT_DONE,
+ QTNF_FW_STATE_ACTIVE,
+ QTNF_FW_STATE_DEAD,
+};
+
+struct qtnf_bus;
+
+struct qtnf_bus_ops {
+ /* mgmt methods */
+ int (*preinit)(struct qtnf_bus *);
+ void (*stop)(struct qtnf_bus *);
+
+ /* control path methods */
+ int (*control_tx)(struct qtnf_bus *, struct sk_buff *);
+
+ /* data xfer methods */
+ int (*data_tx)(struct qtnf_bus *, struct sk_buff *);
+ void (*data_tx_timeout)(struct qtnf_bus *, struct net_device *);
+ void (*data_rx_start)(struct qtnf_bus *);
+ void (*data_rx_stop)(struct qtnf_bus *);
+};
+
+struct qtnf_bus {
+ struct device *dev;
+ enum qtnf_fw_state fw_state;
+ u32 chip;
+ u32 chiprev;
+ const struct qtnf_bus_ops *bus_ops;
+ struct qtnf_wmac *mac[QTNF_MAX_MAC];
+ struct qtnf_qlink_transport trans;
+ struct qtnf_hw_info hw_info;
+ char fwname[32];
+ struct napi_struct mux_napi;
+ struct net_device mux_dev;
+ struct completion request_firmware_complete;
+ struct workqueue_struct *workqueue;
+ struct work_struct event_work;
+ struct mutex bus_lock; /* lock during command/event processing */
+ struct dentry *dbg_dir;
+ /* bus private data */
+ char bus_priv[0] __aligned(sizeof(void *));
+};
+
+static inline void *get_bus_priv(struct qtnf_bus *bus)
+{
+ if (WARN(!bus, "qtnfmac: invalid bus pointer"))
+ return NULL;
+
+ return &bus->bus_priv;
+}
+
+/* callback wrappers */
+
+static inline int qtnf_bus_preinit(struct qtnf_bus *bus)
+{
+ if (!bus->bus_ops->preinit)
+ return 0;
+ return bus->bus_ops->preinit(bus);
+}
+
+static inline void qtnf_bus_stop(struct qtnf_bus *bus)
+{
+ if (!bus->bus_ops->stop)
+ return;
+ bus->bus_ops->stop(bus);
+}
+
+static inline int qtnf_bus_data_tx(struct qtnf_bus *bus, struct sk_buff *skb)
+{
+ return bus->bus_ops->data_tx(bus, skb);
+}
+
+static inline void
+qtnf_bus_data_tx_timeout(struct qtnf_bus *bus, struct net_device *ndev)
+{
+ return bus->bus_ops->data_tx_timeout(bus, ndev);
+}
+
+static inline int qtnf_bus_control_tx(struct qtnf_bus *bus, struct sk_buff *skb)
+{
+ return bus->bus_ops->control_tx(bus, skb);
+}
+
+static inline void qtnf_bus_data_rx_start(struct qtnf_bus *bus)
+{
+ return bus->bus_ops->data_rx_start(bus);
+}
+
+static inline void qtnf_bus_data_rx_stop(struct qtnf_bus *bus)
+{
+ return bus->bus_ops->data_rx_stop(bus);
+}
+
+static __always_inline void qtnf_bus_lock(struct qtnf_bus *bus)
+{
+ mutex_lock(&bus->bus_lock);
+}
+
+static __always_inline void qtnf_bus_unlock(struct qtnf_bus *bus)
+{
+ mutex_unlock(&bus->bus_lock);
+}
+
+/* interface functions from common layer */
+
+void qtnf_rx_frame(struct device *dev, struct sk_buff *rxp);
+int qtnf_core_attach(struct qtnf_bus *bus);
+void qtnf_core_detach(struct qtnf_bus *bus);
+void qtnf_txflowblock(struct device *dev, bool state);
+void qtnf_txcomplete(struct device *dev, struct sk_buff *txp, bool success);
+
+#endif /* QTNFMAC_BUS_H */
diff --git a/drivers/net/wireless/quantenna/qtnfmac/cfg80211.c b/drivers/net/wireless/quantenna/qtnfmac/cfg80211.c
new file mode 100644
index 0000000..e3c0900
--- /dev/null
+++ b/drivers/net/wireless/quantenna/qtnfmac/cfg80211.c
@@ -0,0 +1,995 @@
+/*
+ * Copyright (c) 2012-2012 Quantenna Communications, Inc.
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/etherdevice.h>
+#include <linux/vmalloc.h>
+#include <linux/ieee80211.h>
+#include <net/cfg80211.h>
+#include <net/netlink.h>
+
+#include "cfg80211.h"
+#include "commands.h"
+#include "core.h"
+#include "util.h"
+#include "bus.h"
+
+/* Supported rates to be advertised to the cfg80211 */
+static struct ieee80211_rate qtnf_rates_2g[] = {
+ {.bitrate = 10, .hw_value = 2, },
+ {.bitrate = 20, .hw_value = 4, },
+ {.bitrate = 55, .hw_value = 11, },
+ {.bitrate = 110, .hw_value = 22, },
+ {.bitrate = 60, .hw_value = 12, },
+ {.bitrate = 90, .hw_value = 18, },
+ {.bitrate = 120, .hw_value = 24, },
+ {.bitrate = 180, .hw_value = 36, },
+ {.bitrate = 240, .hw_value = 48, },
+ {.bitrate = 360, .hw_value = 72, },
+ {.bitrate = 480, .hw_value = 96, },
+ {.bitrate = 540, .hw_value = 108, },
+};
+
+/* Supported rates to be advertised to the cfg80211 */
+static struct ieee80211_rate qtnf_rates_5g[] = {
+ {.bitrate = 60, .hw_value = 12, },
+ {.bitrate = 90, .hw_value = 18, },
+ {.bitrate = 120, .hw_value = 24, },
+ {.bitrate = 180, .hw_value = 36, },
+ {.bitrate = 240, .hw_value = 48, },
+ {.bitrate = 360, .hw_value = 72, },
+ {.bitrate = 480, .hw_value = 96, },
+ {.bitrate = 540, .hw_value = 108, },
+};
+
+/* Supported crypto cipher suits to be advertised to cfg80211 */
+static const u32 qtnf_cipher_suites[] = {
+ WLAN_CIPHER_SUITE_TKIP,
+ WLAN_CIPHER_SUITE_CCMP,
+ WLAN_CIPHER_SUITE_AES_CMAC,
+};
+
+/* Supported mgmt frame types to be advertised to cfg80211 */
+static const struct ieee80211_txrx_stypes
+qtnf_mgmt_stypes[NUM_NL80211_IFTYPES] = {
+ [NL80211_IFTYPE_STATION] = {
+ .tx = BIT(IEEE80211_STYPE_ACTION >> 4),
+ .rx = BIT(IEEE80211_STYPE_ACTION >> 4) |
+ BIT(IEEE80211_STYPE_PROBE_REQ >> 4),
+ },
+ [NL80211_IFTYPE_AP] = {
+ .tx = BIT(IEEE80211_STYPE_ACTION >> 4),
+ .rx = BIT(IEEE80211_STYPE_ACTION >> 4) |
+ BIT(IEEE80211_STYPE_PROBE_REQ >> 4),
+ },
+};
+
+static int
+qtnf_change_virtual_intf(struct wiphy *wiphy,
+ struct net_device *dev,
+ enum nl80211_iftype type,
+ struct vif_params *params)
+{
+ struct qtnf_vif *vif = qtnf_netdev_get_priv(dev);
+ u8 *mac_addr;
+ int ret;
+
+ if (params)
+ mac_addr = params->macaddr;
+ else
+ mac_addr = NULL;
+
+ qtnf_scan_done(vif->mac, true);
+
+ ret = qtnf_cmd_send_change_intf_type(vif, type, mac_addr);
+ if (ret) {
+ pr_err("VIF%u.%u: failed to change VIF type: %d\n",
+ vif->mac->macid, vif->vifid, ret);
+ return ret;
+ }
+
+ vif->wdev.iftype = type;
+ return 0;
+}
+
+int qtnf_del_virtual_intf(struct wiphy *wiphy, struct wireless_dev *wdev)
+{
+ struct net_device *netdev = wdev->netdev;
+ struct qtnf_vif *vif;
+
+ if (WARN_ON(!netdev))
+ return -EFAULT;
+
+ vif = qtnf_netdev_get_priv(wdev->netdev);
+
+ if (qtnf_cmd_send_del_intf(vif))
+ pr_err("VIF%u.%u: failed to delete VIF\n", vif->mac->macid,
+ vif->vifid);
+
+ /* Stop data */
+ netif_tx_stop_all_queues(netdev);
+ if (netif_carrier_ok(netdev))
+ netif_carrier_off(netdev);
+
+ if (netdev->reg_state == NETREG_REGISTERED)
+ unregister_netdevice(netdev);
+
+ vif->netdev->ieee80211_ptr = NULL;
+ vif->netdev = NULL;
+ vif->wdev.iftype = NL80211_IFTYPE_UNSPECIFIED;
+ eth_zero_addr(vif->mac_addr);
+
+ return 0;
+}
+
+static struct wireless_dev *qtnf_add_virtual_intf(struct wiphy *wiphy,
+ const char *name,
+ unsigned char name_assign_t,
+ enum nl80211_iftype type,
+ struct vif_params *params)
+{
+ struct qtnf_wmac *mac;
+ struct qtnf_vif *vif;
+ u8 *mac_addr = NULL;
+
+ mac = wiphy_priv(wiphy);
+
+ if (!mac)
+ return ERR_PTR(-EFAULT);
+
+ switch (type) {
+ case NL80211_IFTYPE_STATION:
+ case NL80211_IFTYPE_AP:
+ vif = qtnf_mac_get_free_vif(mac);
+ if (!vif) {
+ pr_err("MAC%u: no free VIF available\n", mac->macid);
+ return ERR_PTR(-EFAULT);
+ }
+
+ eth_zero_addr(vif->mac_addr);
+ vif->bss_priority = QTNF_DEF_BSS_PRIORITY;
+ vif->wdev.wiphy = wiphy;
+ vif->wdev.iftype = type;
+ vif->sta_state = QTNF_STA_DISCONNECTED;
+ break;
+ default:
+ pr_err("MAC%u: unsupported IF type %d\n", mac->macid, type);
+ return ERR_PTR(-ENOTSUPP);
+ }
+
+ if (params)
+ mac_addr = params->macaddr;
+
+ if (qtnf_cmd_send_add_intf(vif, type, mac_addr)) {
+ pr_err("VIF%u.%u: failed to add VIF\n", mac->macid, vif->vifid);
+ goto err_cmd;
+ }
+
+ if (!is_valid_ether_addr(vif->mac_addr)) {
+ pr_err("VIF%u.%u: FW reported bad MAC: %pM\n",
+ mac->macid, vif->vifid, vif->mac_addr);
+ goto err_mac;
+ }
+
+ if (qtnf_core_net_attach(mac, vif, name, name_assign_t, type)) {
+ pr_err("VIF%u.%u: failed to attach netdev\n", mac->macid,
+ vif->vifid);
+ goto err_net;
+ }
+
+ vif->wdev.netdev = vif->netdev;
+ return &vif->wdev;
+
+err_net:
+ vif->netdev = NULL;
+err_mac:
+ qtnf_cmd_send_del_intf(vif);
+err_cmd:
+ vif->wdev.iftype = NL80211_IFTYPE_UNSPECIFIED;
+
+ return ERR_PTR(-EFAULT);
+}
+
+static int qtnf_mgmt_set_appie(struct qtnf_vif *vif,
+ const struct cfg80211_beacon_data *info)
+{
+ int ret = 0;
+
+ if (!info->beacon_ies || !info->beacon_ies_len) {
+ ret = qtnf_cmd_send_mgmt_set_appie(vif, QLINK_MGMT_FRAME_BEACON,
+ NULL, 0);
+ } else {
+ ret = qtnf_cmd_send_mgmt_set_appie(vif, QLINK_MGMT_FRAME_BEACON,
+ info->beacon_ies,
+ info->beacon_ies_len);
+ }
+
+ if (ret)
+ goto out;
+
+ if (!info->proberesp_ies || !info->proberesp_ies_len) {
+ ret = qtnf_cmd_send_mgmt_set_appie(vif,
+ QLINK_MGMT_FRAME_PROBE_RESP,
+ NULL, 0);
+ } else {
+ ret = qtnf_cmd_send_mgmt_set_appie(vif,
+ QLINK_MGMT_FRAME_PROBE_RESP,
+ info->proberesp_ies,
+ info->proberesp_ies_len);
+ }
+
+ if (ret)
+ goto out;
+
+ if (!info->assocresp_ies || !info->assocresp_ies_len) {
+ ret = qtnf_cmd_send_mgmt_set_appie(vif,
+ QLINK_MGMT_FRAME_ASSOC_RESP,
+ NULL, 0);
+ } else {
+ ret = qtnf_cmd_send_mgmt_set_appie(vif,
+ QLINK_MGMT_FRAME_ASSOC_RESP,
+ info->assocresp_ies,
+ info->assocresp_ies_len);
+ }
+
+out:
+ return ret;
+}
+
+static int qtnf_change_beacon(struct wiphy *wiphy, struct net_device *dev,
+ struct cfg80211_beacon_data *info)
+{
+ struct qtnf_vif *vif = qtnf_netdev_get_priv(dev);
+
+ if (!(vif->bss_status & QTNF_STATE_AP_START)) {
+ pr_err("VIF%u.%u: not started\n", vif->mac->macid, vif->vifid);
+ return -EFAULT;
+ }
+
+ return qtnf_mgmt_set_appie(vif, info);
+}
+
+static int qtnf_start_ap(struct wiphy *wiphy, struct net_device *dev,
+ struct cfg80211_ap_settings *settings)
+{
+ struct qtnf_vif *vif = qtnf_netdev_get_priv(dev);
+ struct qtnf_bss_config *bss_cfg;
+ int ret;
+
+ bss_cfg = &vif->bss_cfg;
+
+ memset(bss_cfg, 0, sizeof(*bss_cfg));
+
+ bss_cfg->bcn_period = settings->beacon_interval;
+ bss_cfg->dtim = settings->dtim_period;
+ bss_cfg->auth_type = settings->auth_type;
+ bss_cfg->privacy = settings->privacy;
+
+ bss_cfg->ssid_len = settings->ssid_len;
+ memcpy(&bss_cfg->ssid, settings->ssid, bss_cfg->ssid_len);
+
+ memcpy(&bss_cfg->chandef, &settings->chandef,
+ sizeof(struct cfg80211_chan_def));
+ memcpy(&bss_cfg->crypto, &settings->crypto,
+ sizeof(struct cfg80211_crypto_settings));
+
+ ret = qtnf_cmd_send_config_ap(vif);
+ if (ret) {
+ pr_err("VIF%u.%u: failed to push config to FW\n",
+ vif->mac->macid, vif->vifid);
+ goto out;
+ }
+
+ if (!(vif->bss_status & QTNF_STATE_AP_CONFIG)) {
+ pr_err("VIF%u.%u: AP config failed in FW\n", vif->mac->macid,
+ vif->vifid);
+ ret = -EFAULT;
+ goto out;
+ }
+
+ ret = qtnf_mgmt_set_appie(vif, &settings->beacon);
+ if (ret) {
+ pr_err("VIF%u.%u: failed to add IEs to beacon\n",
+ vif->mac->macid, vif->vifid);
+ goto out;
+ }
+
+ ret = qtnf_cmd_send_start_ap(vif);
+ if (ret) {
+ pr_err("VIF%u.%u: failed to start AP\n", vif->mac->macid,
+ vif->vifid);
+ goto out;
+ }
+
+ if (!(vif->bss_status & QTNF_STATE_AP_START)) {
+ pr_err("VIF%u.%u: FW failed to start AP operation\n",
+ vif->mac->macid, vif->vifid);
+ ret = -EFAULT;
+ }
+
+out:
+ return ret;
+}
+
+static int qtnf_stop_ap(struct wiphy *wiphy, struct net_device *dev)
+{
+ struct qtnf_vif *vif = qtnf_netdev_get_priv(dev);
+ int ret;
+
+ ret = qtnf_cmd_send_stop_ap(vif);
+ if (ret) {
+ pr_err("VIF%u.%u: failed to stop AP operation in FW\n",
+ vif->mac->macid, vif->vifid);
+ vif->bss_status &= ~QTNF_STATE_AP_START;
+ vif->bss_status &= ~QTNF_STATE_AP_CONFIG;
+
+ netif_carrier_off(vif->netdev);
+ }
+
+ return ret;
+}
+
+static int qtnf_set_wiphy_params(struct wiphy *wiphy, u32 changed)
+{
+ struct qtnf_wmac *mac = wiphy_priv(wiphy);
+ struct qtnf_vif *vif;
+ int ret;
+
+ vif = qtnf_mac_get_base_vif(mac);
+ if (!vif) {
+ pr_err("MAC%u: primary VIF is not configured\n", mac->macid);
+ return -EFAULT;
+ }
+
+ if (changed & (WIPHY_PARAM_RETRY_LONG | WIPHY_PARAM_RETRY_SHORT)) {
+ pr_err("MAC%u: can't modify retry params\n", mac->macid);
+ return -EOPNOTSUPP;
+ }
+
+ ret = qtnf_cmd_send_update_phy_params(mac, changed);
+ if (ret)
+ pr_err("MAC%u: failed to update PHY params\n", mac->macid);
+
+ return ret;
+}
+
+static void
+qtnf_mgmt_frame_register(struct wiphy *wiphy, struct wireless_dev *wdev,
+ u16 frame_type, bool reg)
+{
+ struct qtnf_vif *vif = qtnf_netdev_get_priv(wdev->netdev);
+ u16 mgmt_type;
+ u16 new_mask;
+ u16 qlink_frame_type = 0;
+
+ mgmt_type = (frame_type & IEEE80211_FCTL_STYPE) >> 4;
+
+ if (reg)
+ new_mask = vif->mgmt_frames_bitmask | BIT(mgmt_type);
+ else
+ new_mask = vif->mgmt_frames_bitmask & ~BIT(mgmt_type);
+
+ if (new_mask == vif->mgmt_frames_bitmask)
+ return;
+
+ switch (frame_type & IEEE80211_FCTL_STYPE) {
+ case IEEE80211_STYPE_PROBE_REQ:
+ qlink_frame_type = QLINK_MGMT_FRAME_PROBE_REQ;
+ break;
+ case IEEE80211_STYPE_ACTION:
+ qlink_frame_type = QLINK_MGMT_FRAME_ACTION;
+ break;
+ default:
+ pr_warn("VIF%u.%u: unsupported frame type: %X\n",
+ vif->mac->macid, vif->vifid,
+ (frame_type & IEEE80211_FCTL_STYPE) >> 4);
+ return;
+ }
+
+ if (qtnf_cmd_send_register_mgmt(vif, qlink_frame_type, reg)) {
+ pr_warn("VIF%u.%u: failed to %sregister mgmt frame type 0x%x\n",
+ vif->mac->macid, vif->vifid, reg ? "" : "un",
+ frame_type);
+ return;
+ }
+
+ vif->mgmt_frames_bitmask = new_mask;
+ pr_debug("VIF%u.%u: %sregistered mgmt frame type 0x%x\n",
+ vif->mac->macid, vif->vifid, reg ? "" : "un", frame_type);
+}
+
+static int
+qtnf_mgmt_tx(struct wiphy *wiphy, struct wireless_dev *wdev,
+ struct cfg80211_mgmt_tx_params *params, u64 *cookie)
+{
+ struct qtnf_vif *vif = qtnf_netdev_get_priv(wdev->netdev);
+ const struct ieee80211_mgmt *mgmt_frame = (void *)params->buf;
+ u32 short_cookie = prandom_u32();
+ u16 flags = 0;
+
+ *cookie = short_cookie;
+
+ if (params->offchan)
+ flags |= QLINK_MGMT_FRAME_TX_FLAG_OFFCHAN;
+
+ if (params->no_cck)
+ flags |= QLINK_MGMT_FRAME_TX_FLAG_NO_CCK;
+
+ if (params->dont_wait_for_ack)
+ flags |= QLINK_MGMT_FRAME_TX_FLAG_ACK_NOWAIT;
+
+ pr_debug("%s freq:%u; FC:%.4X; DA:%pM; len:%zu; C:%.8X; FL:%.4X\n",
+ wdev->netdev->name, params->chan->center_freq,
+ le16_to_cpu(mgmt_frame->frame_control), mgmt_frame->da,
+ params->len, short_cookie, flags);
+
+ return qtnf_cmd_send_mgmt_frame(vif, short_cookie, flags,
+ params->chan->center_freq,
+ params->buf, params->len);
+}
+
+static int
+qtnf_get_station(struct wiphy *wiphy, struct net_device *dev,
+ const u8 *mac, struct station_info *sinfo)
+{
+ struct qtnf_vif *vif = qtnf_netdev_get_priv(dev);
+
+ return qtnf_cmd_get_sta_info(vif, mac, sinfo);
+}
+
+static int
+qtnf_dump_station(struct wiphy *wiphy, struct net_device *dev,
+ int idx, u8 *mac, struct station_info *sinfo)
+{
+ struct qtnf_vif *vif = qtnf_netdev_get_priv(dev);
+ const struct qtnf_sta_node *sta_node;
+ int ret;
+
+ sta_node = qtnf_sta_list_lookup_index(&vif->sta_list, idx);
+
+ if (unlikely(!sta_node))
+ return -ENOENT;
+
+ ether_addr_copy(mac, sta_node->mac_addr);
+
+ ret = qtnf_cmd_get_sta_info(vif, sta_node->mac_addr, sinfo);
+
+ if (unlikely(ret == -ENOENT)) {
+ qtnf_sta_list_del(&vif->sta_list, mac);
+ cfg80211_del_sta(vif->netdev, mac, GFP_KERNEL);
+ sinfo->filled = 0;
+ }
+
+ return ret;
+}
+
+static int qtnf_add_key(struct wiphy *wiphy, struct net_device *dev,
+ u8 key_index, bool pairwise, const u8 *mac_addr,
+ struct key_params *params)
+{
+ struct qtnf_vif *vif = qtnf_netdev_get_priv(dev);
+ int ret;
+
+ ret = qtnf_cmd_send_add_key(vif, key_index, pairwise, mac_addr, params);
+ if (ret)
+ pr_err("VIF%u.%u: failed to add key: cipher=%x idx=%u pw=%u\n",
+ vif->mac->macid, vif->vifid, params->cipher, key_index,
+ pairwise);
+
+ return ret;
+}
+
+static int qtnf_del_key(struct wiphy *wiphy, struct net_device *dev,
+ u8 key_index, bool pairwise, const u8 *mac_addr)
+{
+ struct qtnf_vif *vif = qtnf_netdev_get_priv(dev);
+ int ret;
+
+ ret = qtnf_cmd_send_del_key(vif, key_index, pairwise, mac_addr);
+ if (ret)
+ pr_err("VIF%u.%u: failed to delete key: idx=%u pw=%u\n",
+ vif->mac->macid, vif->vifid, key_index, pairwise);
+
+ return ret;
+}
+
+static int qtnf_set_default_key(struct wiphy *wiphy, struct net_device *dev,
+ u8 key_index, bool unicast, bool multicast)
+{
+ struct qtnf_vif *vif = qtnf_netdev_get_priv(dev);
+ int ret;
+
+ ret = qtnf_cmd_send_set_default_key(vif, key_index, unicast, multicast);
+ if (ret)
+ pr_err("VIF%u.%u: failed to set dflt key: idx=%u uc=%u mc=%u\n",
+ vif->mac->macid, vif->vifid, key_index, unicast,
+ multicast);
+
+ return ret;
+}
+
+static int
+qtnf_set_default_mgmt_key(struct wiphy *wiphy, struct net_device *dev,
+ u8 key_index)
+{
+ struct qtnf_vif *vif = qtnf_netdev_get_priv(dev);
+ int ret;
+
+ ret = qtnf_cmd_send_set_default_mgmt_key(vif, key_index);
+ if (ret)
+ pr_err("VIF%u.%u: failed to set default MGMT key: idx=%u\n",
+ vif->mac->macid, vif->vifid, key_index);
+
+ return ret;
+}
+
+static int
+qtnf_change_station(struct wiphy *wiphy, struct net_device *dev,
+ const u8 *mac, struct station_parameters *params)
+{
+ struct qtnf_vif *vif = qtnf_netdev_get_priv(dev);
+ int ret;
+
+ ret = qtnf_cmd_send_change_sta(vif, mac, params);
+ if (ret)
+ pr_err("VIF%u.%u: failed to change STA %pM\n",
+ vif->mac->macid, vif->vifid, mac);
+
+ return ret;
+}
+
+static int
+qtnf_del_station(struct wiphy *wiphy, struct net_device *dev,
+ struct station_del_parameters *params)
+{
+ struct qtnf_vif *vif = qtnf_netdev_get_priv(dev);
+ int ret;
+
+ if (params->mac &&
+ (vif->wdev.iftype == NL80211_IFTYPE_AP) &&
+ !is_broadcast_ether_addr(params->mac) &&
+ !qtnf_sta_list_lookup(&vif->sta_list, params->mac))
+ return 0;
+
+ qtnf_scan_done(vif->mac, true);
+
+ ret = qtnf_cmd_send_del_sta(vif, params);
+ if (ret)
+ pr_err("VIF%u.%u: failed to delete STA %pM\n",
+ vif->mac->macid, vif->vifid, params->mac);
+ return ret;
+}
+
+static int
+qtnf_scan(struct wiphy *wiphy, struct cfg80211_scan_request *request)
+{
+ struct qtnf_wmac *mac = wiphy_priv(wiphy);
+ int ret;
+
+ mac->scan_req = request;
+
+ ret = qtnf_cmd_send_scan(mac);
+ if (ret)
+ pr_err("MAC%u: failed to start scan\n", mac->macid);
+
+ return ret;
+}
+
+static int
+qtnf_connect(struct wiphy *wiphy, struct net_device *dev,
+ struct cfg80211_connect_params *sme)
+{
+ struct qtnf_vif *vif = qtnf_netdev_get_priv(dev);
+ struct qtnf_bss_config *bss_cfg;
+ int ret;
+
+ if (vif->wdev.iftype != NL80211_IFTYPE_STATION)
+ return -EOPNOTSUPP;
+
+ if (vif->sta_state != QTNF_STA_DISCONNECTED)
+ return -EBUSY;
+
+ bss_cfg = &vif->bss_cfg;
+ memset(bss_cfg, 0, sizeof(*bss_cfg));
+
+ bss_cfg->ssid_len = sme->ssid_len;
+ memcpy(&bss_cfg->ssid, sme->ssid, bss_cfg->ssid_len);
+ bss_cfg->chandef.chan = sme->channel;
+ bss_cfg->auth_type = sme->auth_type;
+ bss_cfg->privacy = sme->privacy;
+ bss_cfg->mfp = sme->mfp;
+
+ if ((sme->bg_scan_period > 0) &&
+ (sme->bg_scan_period <= QTNF_MAX_BG_SCAN_PERIOD))
+ bss_cfg->bg_scan_period = sme->bg_scan_period;
+ else if (sme->bg_scan_period == -1)
+ bss_cfg->bg_scan_period = QTNF_DEFAULT_BG_SCAN_PERIOD;
+ else
+ bss_cfg->bg_scan_period = 0; /* disabled */
+
+ bss_cfg->connect_flags = 0;
+
+ if (sme->flags & ASSOC_REQ_DISABLE_HT)
+ bss_cfg->connect_flags |= QLINK_STA_CONNECT_DISABLE_HT;
+ if (sme->flags & ASSOC_REQ_DISABLE_VHT)
+ bss_cfg->connect_flags |= QLINK_STA_CONNECT_DISABLE_VHT;
+ if (sme->flags & ASSOC_REQ_USE_RRM)
+ bss_cfg->connect_flags |= QLINK_STA_CONNECT_USE_RRM;
+
+ memcpy(&bss_cfg->crypto, &sme->crypto, sizeof(bss_cfg->crypto));
+ if (sme->bssid)
+ ether_addr_copy(bss_cfg->bssid, sme->bssid);
+ else
+ eth_zero_addr(bss_cfg->bssid);
+
+ ret = qtnf_cmd_send_connect(vif, sme);
+ if (ret) {
+ pr_err("VIF%u.%u: failed to connect\n", vif->mac->macid,
+ vif->vifid);
+ return ret;
+ }
+
+ vif->sta_state = QTNF_STA_CONNECTING;
+ return 0;
+}
+
+static int
+qtnf_disconnect(struct wiphy *wiphy, struct net_device *dev,
+ u16 reason_code)
+{
+ struct qtnf_wmac *mac = wiphy_priv(wiphy);
+ struct qtnf_vif *vif;
+ int ret;
+
+ vif = qtnf_mac_get_base_vif(mac);
+ if (!vif) {
+ pr_err("MAC%u: primary VIF is not configured\n", mac->macid);
+ return -EFAULT;
+ }
+
+ if (vif->wdev.iftype != NL80211_IFTYPE_STATION)
+ return -EOPNOTSUPP;
+
+ if (vif->sta_state == QTNF_STA_DISCONNECTED)
+ return 0;
+
+ ret = qtnf_cmd_send_disconnect(vif, reason_code);
+ if (ret) {
+ pr_err("VIF%u.%u: failed to disconnect\n", mac->macid,
+ vif->vifid);
+ return ret;
+ }
+
+ vif->sta_state = QTNF_STA_DISCONNECTED;
+ return 0;
+}
+
+static struct cfg80211_ops qtn_cfg80211_ops = {
+ .add_virtual_intf = qtnf_add_virtual_intf,
+ .change_virtual_intf = qtnf_change_virtual_intf,
+ .del_virtual_intf = qtnf_del_virtual_intf,
+ .start_ap = qtnf_start_ap,
+ .change_beacon = qtnf_change_beacon,
+ .stop_ap = qtnf_stop_ap,
+ .set_wiphy_params = qtnf_set_wiphy_params,
+ .mgmt_frame_register = qtnf_mgmt_frame_register,
+ .mgmt_tx = qtnf_mgmt_tx,
+ .change_station = qtnf_change_station,
+ .del_station = qtnf_del_station,
+ .get_station = qtnf_get_station,
+ .dump_station = qtnf_dump_station,
+ .add_key = qtnf_add_key,
+ .del_key = qtnf_del_key,
+ .set_default_key = qtnf_set_default_key,
+ .set_default_mgmt_key = qtnf_set_default_mgmt_key,
+ .scan = qtnf_scan,
+ .connect = qtnf_connect,
+ .disconnect = qtnf_disconnect
+};
+
+static void qtnf_cfg80211_reg_notifier(struct wiphy *wiphy,
+ struct regulatory_request *req)
+{
+ struct qtnf_wmac *mac = wiphy_priv(wiphy);
+ struct qtnf_bus *bus;
+ struct qtnf_vif *vif;
+ struct qtnf_wmac *chan_mac;
+ int i;
+ enum nl80211_band band;
+
+ bus = mac->bus;
+
+ pr_debug("MAC%u: initiator=%d alpha=%c%c\n", mac->macid, req->initiator,
+ req->alpha2[0], req->alpha2[1]);
+
+ vif = qtnf_mac_get_base_vif(mac);
+ if (!vif) {
+ pr_err("MAC%u: primary VIF is not configured\n", mac->macid);
+ return;
+ }
+
+ /* ignore non-ISO3166 country codes */
+ for (i = 0; i < sizeof(req->alpha2); i++) {
+ if (req->alpha2[i] < 'A' || req->alpha2[i] > 'Z') {
+ pr_err("MAC%u: not an ISO3166 code\n", mac->macid);
+ return;
+ }
+ }
+ if (!strncasecmp(req->alpha2, bus->hw_info.alpha2_code,
+ sizeof(req->alpha2))) {
+ pr_warn("MAC%u: unchanged country code\n", mac->macid);
+ return;
+ }
+
+ if (qtnf_cmd_send_regulatory_config(mac, req->alpha2)) {
+ pr_err("MAC%u: failed to configure regulatory\n", mac->macid);
+ return;
+ }
+
+ for (i = 0; i < bus->hw_info.num_mac; i++) {
+ chan_mac = bus->mac[i];
+
+ if (!chan_mac)
+ continue;
+
+ if (!(bus->hw_info.mac_bitmap & BIT(i)))
+ continue;
+
+ for (band = 0; band < NUM_NL80211_BANDS; ++band) {
+ if (!wiphy->bands[band])
+ continue;
+
+ if (qtnf_cmd_get_mac_chan_info(chan_mac,
+ wiphy->bands[band])) {
+ pr_err("MAC%u: can't get channel info\n",
+ chan_mac->macid);
+ qtnf_core_detach(bus);
+
+ return;
+ }
+ }
+ }
+}
+
+void qtnf_band_setup_htvht_caps(struct qtnf_mac_info *macinfo,
+ struct ieee80211_supported_band *band)
+{
+ struct ieee80211_sta_ht_cap *ht_cap;
+ struct ieee80211_sta_vht_cap *vht_cap;
+
+ ht_cap = &band->ht_cap;
+ ht_cap->ht_supported = true;
+ memcpy(&ht_cap->cap, &macinfo->ht_cap.cap_info,
+ sizeof(u16));
+ ht_cap->ampdu_factor = IEEE80211_HT_MAX_AMPDU_64K;
+ ht_cap->ampdu_density = IEEE80211_HT_MPDU_DENSITY_NONE;
+ memcpy(&ht_cap->mcs, &macinfo->ht_cap.mcs,
+ sizeof(ht_cap->mcs));
+
+ if (macinfo->phymode_cap & QLINK_PHYMODE_AC) {
+ vht_cap = &band->vht_cap;
+ vht_cap->vht_supported = true;
+ memcpy(&vht_cap->cap,
+ &macinfo->vht_cap.vht_cap_info, sizeof(u32));
+ /* Update MCS support for VHT */
+ memcpy(&vht_cap->vht_mcs,
+ &macinfo->vht_cap.supp_mcs,
+ sizeof(struct ieee80211_vht_mcs_info));
+ }
+}
+
+struct wiphy *qtnf_wiphy_allocate(struct qtnf_bus *bus)
+{
+ struct wiphy *wiphy;
+
+ wiphy = wiphy_new(&qtn_cfg80211_ops, sizeof(struct qtnf_wmac));
+ if (!wiphy)
+ return NULL;
+
+ set_wiphy_dev(wiphy, bus->dev);
+
+ return wiphy;
+}
+
+static int qtnf_wiphy_setup_if_comb(struct wiphy *wiphy,
+ struct ieee80211_iface_combination *if_comb,
+ const struct qtnf_mac_info *mac_info)
+{
+ size_t max_interfaces = 0;
+ u16 interface_modes = 0;
+ size_t i;
+
+ if (unlikely(!mac_info->limits || !mac_info->n_limits))
+ return -ENOENT;
+
+ if_comb->limits = mac_info->limits;
+ if_comb->n_limits = mac_info->n_limits;
+
+ for (i = 0; i < mac_info->n_limits; i++) {
+ max_interfaces += mac_info->limits[i].max;
+ interface_modes |= mac_info->limits[i].types;
+ }
+
+ if_comb->num_different_channels = 1;
+ if_comb->beacon_int_infra_match = true;
+ if_comb->max_interfaces = max_interfaces;
+ if_comb->radar_detect_widths = mac_info->radar_detect_widths;
+ wiphy->interface_modes = interface_modes;
+
+ return 0;
+}
+
+int qtnf_wiphy_register(struct qtnf_hw_info *hw_info, struct qtnf_wmac *mac)
+{
+ struct wiphy *wiphy = priv_to_wiphy(mac);
+ struct ieee80211_iface_combination *iface_comb = NULL;
+ int ret;
+
+ if (!wiphy) {
+ pr_err("invalid wiphy pointer\n");
+ return -EFAULT;
+ }
+
+ iface_comb = kzalloc(sizeof(*iface_comb), GFP_KERNEL);
+ if (!iface_comb) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ ret = qtnf_wiphy_setup_if_comb(wiphy, iface_comb, &mac->macinfo);
+ if (ret)
+ goto out;
+
+ pr_info("MAC%u: phymode=%#x radar=%#x\n", mac->macid,
+ mac->macinfo.phymode_cap, mac->macinfo.radar_detect_widths);
+
+ wiphy->frag_threshold = mac->macinfo.frag_thr;
+ wiphy->rts_threshold = mac->macinfo.rts_thr;
+ wiphy->retry_short = mac->macinfo.sretry_limit;
+ wiphy->retry_long = mac->macinfo.lretry_limit;
+ wiphy->coverage_class = mac->macinfo.coverage_class;
+
+ wiphy->max_scan_ssids = QTNF_MAX_SSID_LIST_LENGTH;
+ wiphy->max_scan_ie_len = QTNF_MAX_VSIE_LEN;
+ wiphy->mgmt_stypes = qtnf_mgmt_stypes;
+ wiphy->max_remain_on_channel_duration = 5000;
+
+ wiphy->iface_combinations = iface_comb;
+ wiphy->n_iface_combinations = 1;
+
+ /* Initialize cipher suits */
+ wiphy->cipher_suites = qtnf_cipher_suites;
+ wiphy->n_cipher_suites = ARRAY_SIZE(qtnf_cipher_suites);
+ wiphy->signal_type = CFG80211_SIGNAL_TYPE_MBM;
+ wiphy->flags |= WIPHY_FLAG_HAVE_AP_SME |
+ WIPHY_FLAG_AP_PROBE_RESP_OFFLOAD |
+ WIPHY_FLAG_AP_UAPSD;
+
+ wiphy->probe_resp_offload = NL80211_PROBE_RESP_OFFLOAD_SUPPORT_WPS |
+ NL80211_PROBE_RESP_OFFLOAD_SUPPORT_WPS2;
+
+ wiphy->available_antennas_tx = mac->macinfo.num_tx_chain;
+ wiphy->available_antennas_rx = mac->macinfo.num_rx_chain;
+
+ wiphy->max_ap_assoc_sta = mac->macinfo.max_ap_assoc_sta;
+
+ ether_addr_copy(wiphy->perm_addr, mac->macaddr);
+
+ if (hw_info->hw_capab & QLINK_HW_SUPPORTS_REG_UPDATE) {
+ pr_debug("device supports REG_UPDATE\n");
+ wiphy->reg_notifier = qtnf_cfg80211_reg_notifier;
+ pr_debug("hint regulatory about EP region: %c%c\n",
+ hw_info->alpha2_code[0],
+ hw_info->alpha2_code[1]);
+ regulatory_hint(wiphy, hw_info->alpha2_code);
+ } else {
+ pr_debug("device doesn't support REG_UPDATE\n");
+ wiphy->regulatory_flags |= REGULATORY_WIPHY_SELF_MANAGED;
+ }
+
+ ret = wiphy_register(wiphy);
+
+out:
+ if (ret < 0) {
+ kfree(iface_comb);
+ return ret;
+ }
+
+ return 0;
+}
+
+void qtnf_netdev_updown(struct net_device *ndev, bool up)
+{
+ struct qtnf_vif *vif = qtnf_netdev_get_priv(ndev);
+
+ if (qtnf_cmd_send_updown_intf(vif, up))
+ pr_err("failed to send up/down command to FW\n");
+}
+
+void qtnf_virtual_intf_cleanup(struct net_device *ndev)
+{
+ struct qtnf_vif *vif = qtnf_netdev_get_priv(ndev);
+ struct qtnf_wmac *mac = wiphy_priv(vif->wdev.wiphy);
+
+ if (vif->wdev.iftype == NL80211_IFTYPE_STATION) {
+ switch (vif->sta_state) {
+ case QTNF_STA_DISCONNECTED:
+ break;
+ case QTNF_STA_CONNECTING:
+ cfg80211_connect_result(vif->netdev,
+ vif->bss_cfg.bssid, NULL, 0,
+ NULL, 0,
+ WLAN_STATUS_UNSPECIFIED_FAILURE,
+ GFP_KERNEL);
+ qtnf_disconnect(vif->wdev.wiphy, ndev,
+ WLAN_REASON_DEAUTH_LEAVING);
+ break;
+ case QTNF_STA_CONNECTED:
+ cfg80211_disconnected(vif->netdev,
+ WLAN_REASON_DEAUTH_LEAVING,
+ NULL, 0, 1, GFP_KERNEL);
+ qtnf_disconnect(vif->wdev.wiphy, ndev,
+ WLAN_REASON_DEAUTH_LEAVING);
+ break;
+ }
+
+ vif->sta_state = QTNF_STA_DISCONNECTED;
+ qtnf_scan_done(mac, true);
+ }
+}
+
+void qtnf_cfg80211_vif_reset(struct qtnf_vif *vif)
+{
+ if (vif->wdev.iftype == NL80211_IFTYPE_STATION) {
+ switch (vif->sta_state) {
+ case QTNF_STA_CONNECTING:
+ cfg80211_connect_result(vif->netdev,
+ vif->bss_cfg.bssid, NULL, 0,
+ NULL, 0,
+ WLAN_STATUS_UNSPECIFIED_FAILURE,
+ GFP_KERNEL);
+ break;
+ case QTNF_STA_CONNECTED:
+ cfg80211_disconnected(vif->netdev,
+ WLAN_REASON_DEAUTH_LEAVING,
+ NULL, 0, 1, GFP_KERNEL);
+ break;
+ case QTNF_STA_DISCONNECTED:
+ break;
+ }
+ }
+
+ cfg80211_shutdown_all_interfaces(vif->wdev.wiphy);
+ vif->sta_state = QTNF_STA_DISCONNECTED;
+}
+
+void qtnf_band_init_rates(struct ieee80211_supported_band *band)
+{
+ switch (band->band) {
+ case NL80211_BAND_2GHZ:
+ band->bitrates = qtnf_rates_2g;
+ band->n_bitrates = ARRAY_SIZE(qtnf_rates_2g);
+ break;
+ case NL80211_BAND_5GHZ:
+ band->bitrates = qtnf_rates_5g;
+ band->n_bitrates = ARRAY_SIZE(qtnf_rates_5g);
+ break;
+ default:
+ band->bitrates = NULL;
+ band->n_bitrates = 0;
+ break;
+ }
+}
diff --git a/drivers/net/wireless/quantenna/qtnfmac/cfg80211.h b/drivers/net/wireless/quantenna/qtnfmac/cfg80211.h
new file mode 100644
index 0000000..5bd3312
--- /dev/null
+++ b/drivers/net/wireless/quantenna/qtnfmac/cfg80211.h
@@ -0,0 +1,43 @@
+/*
+ * Copyright (c) 2015-2016 Quantenna Communications, Inc.
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _QTN_FMAC_CFG80211_H_
+#define _QTN_FMAC_CFG80211_H_
+
+#include <net/cfg80211.h>
+
+#include "core.h"
+
+int qtnf_wiphy_register(struct qtnf_hw_info *hw_info, struct qtnf_wmac *mac);
+int qtnf_del_virtual_intf(struct wiphy *wiphy, struct wireless_dev *wdev);
+void qtnf_cfg80211_vif_reset(struct qtnf_vif *vif);
+void qtnf_band_init_rates(struct ieee80211_supported_band *band);
+void qtnf_band_setup_htvht_caps(struct qtnf_mac_info *macinfo,
+ struct ieee80211_supported_band *band);
+
+static inline void qtnf_scan_done(struct qtnf_wmac *mac, bool aborted)
+{
+ struct cfg80211_scan_info info = {
+ .aborted = aborted,
+ };
+
+ if (mac->scan_req) {
+ cfg80211_scan_done(mac->scan_req, &info);
+ mac->scan_req = NULL;
+ }
+}
+
+#endif /* _QTN_FMAC_CFG80211_H_ */
diff --git a/drivers/net/wireless/quantenna/qtnfmac/commands.c b/drivers/net/wireless/quantenna/qtnfmac/commands.c
new file mode 100644
index 0000000..37c3bec
--- /dev/null
+++ b/drivers/net/wireless/quantenna/qtnfmac/commands.c
@@ -0,0 +1,1978 @@
+/*
+ * Copyright (c) 2015-2016 Quantenna Communications, Inc.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/types.h>
+#include <linux/skbuff.h>
+
+#include "cfg80211.h"
+#include "core.h"
+#include "qlink.h"
+#include "qlink_util.h"
+#include "bus.h"
+#include "commands.h"
+
+static int qtnf_cmd_check_reply_header(const struct qlink_resp *resp,
+ u16 cmd_id, u8 mac_id, u8 vif_id,
+ size_t resp_size)
+{
+ if (unlikely(le16_to_cpu(resp->cmd_id) != cmd_id)) {
+ pr_warn("VIF%u.%u CMD%x: bad cmd_id in response: 0x%.4X\n",
+ mac_id, vif_id, cmd_id, le16_to_cpu(resp->cmd_id));
+ return -EINVAL;
+ }
+
+ if (unlikely(resp->macid != mac_id)) {
+ pr_warn("VIF%u.%u CMD%x: bad MAC in response: %u\n",
+ mac_id, vif_id, cmd_id, resp->macid);
+ return -EINVAL;
+ }
+
+ if (unlikely(resp->vifid != vif_id)) {
+ pr_warn("VIF%u.%u CMD%x: bad VIF in response: %u\n",
+ mac_id, vif_id, cmd_id, resp->vifid);
+ return -EINVAL;
+ }
+
+ if (unlikely(le16_to_cpu(resp->mhdr.len) < resp_size)) {
+ pr_warn("VIF%u.%u CMD%x: bad response size %u < %zu\n",
+ mac_id, vif_id, cmd_id,
+ le16_to_cpu(resp->mhdr.len), resp_size);
+ return -ENOSPC;
+ }
+
+ return 0;
+}
+
+static int qtnf_cmd_send_with_reply(struct qtnf_bus *bus,
+ struct sk_buff *cmd_skb,
+ struct sk_buff **response_skb,
+ u16 *result_code,
+ size_t const_resp_size,
+ size_t *var_resp_size)
+{
+ struct qlink_cmd *cmd;
+ const struct qlink_resp *resp;
+ struct sk_buff *resp_skb = NULL;
+ u16 cmd_id;
+ u8 mac_id, vif_id;
+ int ret;
+
+ cmd = (struct qlink_cmd *)cmd_skb->data;
+ cmd_id = le16_to_cpu(cmd->cmd_id);
+ mac_id = cmd->macid;
+ vif_id = cmd->vifid;
+ cmd->mhdr.len = cpu_to_le16(cmd_skb->len);
+
+ if (unlikely(bus->fw_state != QTNF_FW_STATE_ACTIVE &&
+ le16_to_cpu(cmd->cmd_id) != QLINK_CMD_FW_INIT)) {
+ pr_warn("VIF%u.%u: drop cmd 0x%.4X in fw state %d\n",
+ mac_id, vif_id, le16_to_cpu(cmd->cmd_id),
+ bus->fw_state);
+ return -ENODEV;
+ }
+
+ pr_debug("VIF%u.%u cmd=0x%.4X\n", mac_id, vif_id,
+ le16_to_cpu(cmd->cmd_id));
+
+ ret = qtnf_trans_send_cmd_with_resp(bus, cmd_skb, &resp_skb);
+
+ if (unlikely(ret))
+ goto out;
+
+ resp = (const struct qlink_resp *)resp_skb->data;
+ ret = qtnf_cmd_check_reply_header(resp, cmd_id, mac_id, vif_id,
+ const_resp_size);
+
+ if (unlikely(ret))
+ goto out;
+
+ if (likely(result_code))
+ *result_code = le16_to_cpu(resp->result);
+
+ /* Return length of variable part of response */
+ if (response_skb && var_resp_size)
+ *var_resp_size = le16_to_cpu(resp->mhdr.len) - const_resp_size;
+
+out:
+ if (response_skb)
+ *response_skb = resp_skb;
+ else
+ consume_skb(resp_skb);
+
+ return ret;
+}
+
+static inline int qtnf_cmd_send(struct qtnf_bus *bus,
+ struct sk_buff *cmd_skb,
+ u16 *result_code)
+{
+ return qtnf_cmd_send_with_reply(bus, cmd_skb, NULL, result_code,
+ sizeof(struct qlink_resp), NULL);
+}
+
+static struct sk_buff *qtnf_cmd_alloc_new_cmdskb(u8 macid, u8 vifid, u16 cmd_no,
+ size_t cmd_size)
+{
+ struct qlink_cmd *cmd;
+ struct sk_buff *cmd_skb;
+
+ cmd_skb = __dev_alloc_skb(sizeof(*cmd) +
+ QTNF_MAX_CMD_BUF_SIZE, GFP_KERNEL);
+ if (unlikely(!cmd_skb)) {
+ pr_err("VIF%u.%u CMD %u: alloc failed\n", macid, vifid, cmd_no);
+ return NULL;
+ }
+
+ skb_put_zero(cmd_skb, cmd_size);
+
+ cmd = (struct qlink_cmd *)cmd_skb->data;
+ cmd->mhdr.len = cpu_to_le16(cmd_skb->len);
+ cmd->mhdr.type = cpu_to_le16(QLINK_MSG_TYPE_CMD);
+ cmd->cmd_id = cpu_to_le16(cmd_no);
+ cmd->macid = macid;
+ cmd->vifid = vifid;
+
+ return cmd_skb;
+}
+
+int qtnf_cmd_send_start_ap(struct qtnf_vif *vif)
+{
+ struct sk_buff *cmd_skb;
+ u16 res_code = QLINK_CMD_RESULT_OK;
+ int ret;
+
+ cmd_skb = qtnf_cmd_alloc_new_cmdskb(vif->mac->macid, vif->vifid,
+ QLINK_CMD_START_AP,
+ sizeof(struct qlink_cmd));
+ if (unlikely(!cmd_skb))
+ return -ENOMEM;
+
+ qtnf_bus_lock(vif->mac->bus);
+
+ ret = qtnf_cmd_send(vif->mac->bus, cmd_skb, &res_code);
+
+ if (unlikely(ret))
+ goto out;
+
+ if (unlikely(res_code != QLINK_CMD_RESULT_OK)) {
+ pr_err("VIF%u.%u: CMD failed: %u\n", vif->mac->macid,
+ vif->vifid, res_code);
+ ret = -EFAULT;
+ goto out;
+ }
+
+ vif->bss_status |= QTNF_STATE_AP_START;
+ netif_carrier_on(vif->netdev);
+
+out:
+ qtnf_bus_unlock(vif->mac->bus);
+ return ret;
+}
+
+int qtnf_cmd_send_regulatory_config(struct qtnf_wmac *mac, const char *alpha2)
+{
+ struct sk_buff *cmd_skb;
+ u16 res_code = QLINK_CMD_RESULT_OK;
+ int ret;
+
+ cmd_skb = qtnf_cmd_alloc_new_cmdskb(mac->macid, QLINK_VIFID_RSVD,
+ QLINK_CMD_REG_REGION,
+ sizeof(struct qlink_cmd));
+ if (unlikely(!cmd_skb))
+ return -ENOMEM;
+
+ qtnf_cmd_skb_put_tlv_arr(cmd_skb, WLAN_EID_COUNTRY, alpha2,
+ QTNF_MAX_ALPHA_LEN);
+
+ ret = qtnf_cmd_send(mac->bus, cmd_skb, &res_code);
+
+ if (unlikely(ret))
+ goto out;
+
+ if (unlikely(res_code != QLINK_CMD_RESULT_OK)) {
+ pr_err("MAC%u: CMD failed: %u\n", mac->macid, res_code);
+ ret = -EFAULT;
+ goto out;
+ }
+
+ memcpy(mac->bus->hw_info.alpha2_code, alpha2,
+ sizeof(mac->bus->hw_info.alpha2_code));
+out:
+ return ret;
+}
+
+int qtnf_cmd_send_config_ap(struct qtnf_vif *vif)
+{
+ struct sk_buff *cmd_skb;
+ struct qtnf_bss_config *bss_cfg = &vif->bss_cfg;
+ struct cfg80211_chan_def *chandef = &bss_cfg->chandef;
+ struct qlink_tlv_channel *qchan;
+ struct qlink_auth_encr aen;
+ u16 res_code = QLINK_CMD_RESULT_OK;
+ int ret;
+ int i;
+
+ cmd_skb = qtnf_cmd_alloc_new_cmdskb(vif->mac->macid, vif->vifid,
+ QLINK_CMD_CONFIG_AP,
+ sizeof(struct qlink_cmd));
+ if (unlikely(!cmd_skb))
+ return -ENOMEM;
+
+ qtnf_bus_lock(vif->mac->bus);
+
+ qtnf_cmd_skb_put_tlv_arr(cmd_skb, WLAN_EID_SSID, bss_cfg->ssid,
+ bss_cfg->ssid_len);
+ qtnf_cmd_skb_put_tlv_u16(cmd_skb, QTN_TLV_ID_BCN_PERIOD,
+ bss_cfg->bcn_period);
+ qtnf_cmd_skb_put_tlv_u8(cmd_skb, QTN_TLV_ID_DTIM, bss_cfg->dtim);
+
+ qchan = skb_put_zero(cmd_skb, sizeof(*qchan));
+ qchan->hdr.type = cpu_to_le16(QTN_TLV_ID_CHANNEL);
+ qchan->hdr.len = cpu_to_le16(sizeof(*qchan) -
+ sizeof(struct qlink_tlv_hdr));
+ qchan->hw_value = cpu_to_le16(
+ ieee80211_frequency_to_channel(chandef->chan->center_freq));
+
+ memset(&aen, 0, sizeof(aen));
+ aen.auth_type = bss_cfg->auth_type;
+ aen.privacy = !!bss_cfg->privacy;
+ aen.mfp = bss_cfg->mfp;
+ aen.wpa_versions = cpu_to_le32(bss_cfg->crypto.wpa_versions);
+ aen.cipher_group = cpu_to_le32(bss_cfg->crypto.cipher_group);
+ aen.n_ciphers_pairwise = cpu_to_le32(
+ bss_cfg->crypto.n_ciphers_pairwise);
+ for (i = 0; i < QLINK_MAX_NR_CIPHER_SUITES; i++)
+ aen.ciphers_pairwise[i] = cpu_to_le32(
+ bss_cfg->crypto.ciphers_pairwise[i]);
+ aen.n_akm_suites = cpu_to_le32(
+ bss_cfg->crypto.n_akm_suites);
+ for (i = 0; i < QLINK_MAX_NR_AKM_SUITES; i++)
+ aen.akm_suites[i] = cpu_to_le32(
+ bss_cfg->crypto.akm_suites[i]);
+ aen.control_port = bss_cfg->crypto.control_port;
+ aen.control_port_no_encrypt =
+ bss_cfg->crypto.control_port_no_encrypt;
+ aen.control_port_ethertype = cpu_to_le16(be16_to_cpu(
+ bss_cfg->crypto.control_port_ethertype));
+
+ qtnf_cmd_skb_put_tlv_arr(cmd_skb, QTN_TLV_ID_CRYPTO, (u8 *)&aen,
+ sizeof(aen));
+
+ ret = qtnf_cmd_send(vif->mac->bus, cmd_skb, &res_code);
+
+ if (unlikely(ret))
+ goto out;
+
+ if (unlikely(res_code != QLINK_CMD_RESULT_OK)) {
+ pr_err("VIF%u.%u: CMD failed: %u\n", vif->mac->macid,
+ vif->vifid, res_code);
+ ret = -EFAULT;
+ goto out;
+ }
+
+ vif->bss_status |= QTNF_STATE_AP_CONFIG;
+
+out:
+ qtnf_bus_unlock(vif->mac->bus);
+ return ret;
+}
+
+int qtnf_cmd_send_stop_ap(struct qtnf_vif *vif)
+{
+ struct sk_buff *cmd_skb;
+ u16 res_code = QLINK_CMD_RESULT_OK;
+ int ret;
+
+ cmd_skb = qtnf_cmd_alloc_new_cmdskb(vif->mac->macid, vif->vifid,
+ QLINK_CMD_STOP_AP,
+ sizeof(struct qlink_cmd));
+ if (unlikely(!cmd_skb))
+ return -ENOMEM;
+
+ qtnf_bus_lock(vif->mac->bus);
+
+ ret = qtnf_cmd_send(vif->mac->bus, cmd_skb, &res_code);
+
+ if (unlikely(ret))
+ goto out;
+
+ if (unlikely(res_code != QLINK_CMD_RESULT_OK)) {
+ pr_err("VIF%u.%u: CMD failed: %u\n", vif->mac->macid,
+ vif->vifid, res_code);
+ ret = -EFAULT;
+ goto out;
+ }
+
+ vif->bss_status &= ~QTNF_STATE_AP_START;
+ vif->bss_status &= ~QTNF_STATE_AP_CONFIG;
+
+ netif_carrier_off(vif->netdev);
+
+out:
+ qtnf_bus_unlock(vif->mac->bus);
+ return ret;
+}
+
+int qtnf_cmd_send_register_mgmt(struct qtnf_vif *vif, u16 frame_type, bool reg)
+{
+ struct sk_buff *cmd_skb;
+ struct qlink_cmd_mgmt_frame_register *cmd;
+ u16 res_code = QLINK_CMD_RESULT_OK;
+ int ret;
+
+ cmd_skb = qtnf_cmd_alloc_new_cmdskb(vif->mac->macid, vif->vifid,
+ QLINK_CMD_REGISTER_MGMT,
+ sizeof(*cmd));
+ if (unlikely(!cmd_skb))
+ return -ENOMEM;
+
+ qtnf_bus_lock(vif->mac->bus);
+
+ cmd = (struct qlink_cmd_mgmt_frame_register *)cmd_skb->data;
+ cmd->frame_type = cpu_to_le16(frame_type);
+ cmd->do_register = reg;
+
+ ret = qtnf_cmd_send(vif->mac->bus, cmd_skb, &res_code);
+
+ if (unlikely(ret))
+ goto out;
+
+ if (unlikely(res_code != QLINK_CMD_RESULT_OK)) {
+ pr_err("VIF%u.%u: CMD failed: %u\n", vif->mac->macid,
+ vif->vifid, res_code);
+ ret = -EFAULT;
+ goto out;
+ }
+
+out:
+ qtnf_bus_unlock(vif->mac->bus);
+ return ret;
+}
+
+int qtnf_cmd_send_mgmt_frame(struct qtnf_vif *vif, u32 cookie, u16 flags,
+ u16 freq, const u8 *buf, size_t len)
+{
+ struct sk_buff *cmd_skb;
+ struct qlink_cmd_mgmt_frame_tx *cmd;
+ u16 res_code = QLINK_CMD_RESULT_OK;
+ int ret;
+
+ if (sizeof(*cmd) + len > QTNF_MAX_CMD_BUF_SIZE) {
+ pr_warn("VIF%u.%u: frame is too big: %zu\n", vif->mac->macid,
+ vif->vifid, len);
+ return -E2BIG;
+ }
+
+ cmd_skb = qtnf_cmd_alloc_new_cmdskb(vif->mac->macid, vif->vifid,
+ QLINK_CMD_SEND_MGMT_FRAME,
+ sizeof(*cmd));
+ if (unlikely(!cmd_skb))
+ return -ENOMEM;
+
+ qtnf_bus_lock(vif->mac->bus);
+
+ cmd = (struct qlink_cmd_mgmt_frame_tx *)cmd_skb->data;
+ cmd->cookie = cpu_to_le32(cookie);
+ cmd->freq = cpu_to_le16(freq);
+ cmd->flags = cpu_to_le16(flags);
+
+ if (len && buf)
+ qtnf_cmd_skb_put_buffer(cmd_skb, buf, len);
+
+ ret = qtnf_cmd_send(vif->mac->bus, cmd_skb, &res_code);
+
+ if (unlikely(ret))
+ goto out;
+
+ if (unlikely(res_code != QLINK_CMD_RESULT_OK)) {
+ pr_err("VIF%u.%u: CMD failed: %u\n", vif->mac->macid,
+ vif->vifid, res_code);
+ ret = -EFAULT;
+ goto out;
+ }
+
+out:
+ qtnf_bus_unlock(vif->mac->bus);
+ return ret;
+}
+
+int qtnf_cmd_send_mgmt_set_appie(struct qtnf_vif *vif, u8 frame_type,
+ const u8 *buf, size_t len)
+{
+ struct sk_buff *cmd_skb;
+ struct qlink_cmd_mgmt_append_ie *cmd;
+ u16 res_code = QLINK_CMD_RESULT_OK;
+ int ret;
+
+ if (sizeof(*cmd) + len > QTNF_MAX_CMD_BUF_SIZE) {
+ pr_warn("VIF%u.%u: %u frame is too big: %zu\n", vif->mac->macid,
+ vif->vifid, frame_type, len);
+ return -E2BIG;
+ }
+
+ cmd_skb = qtnf_cmd_alloc_new_cmdskb(vif->mac->macid, vif->vifid,
+ QLINK_CMD_MGMT_SET_APPIE,
+ sizeof(*cmd));
+ if (unlikely(!cmd_skb))
+ return -ENOMEM;
+
+ qtnf_bus_lock(vif->mac->bus);
+
+ cmd = (struct qlink_cmd_mgmt_append_ie *)cmd_skb->data;
+ cmd->type = frame_type;
+ cmd->flags = 0;
+
+ /* If len == 0 then IE buf for specified frame type
+ * should be cleared on EP.
+ */
+ if (len && buf)
+ qtnf_cmd_skb_put_buffer(cmd_skb, buf, len);
+
+ ret = qtnf_cmd_send(vif->mac->bus, cmd_skb, &res_code);
+
+ if (unlikely(ret))
+ goto out;
+
+ if (unlikely(res_code != QLINK_CMD_RESULT_OK)) {
+ pr_err("VIF%u.%u frame %u: CMD failed: %u\n", vif->mac->macid,
+ vif->vifid, frame_type, res_code);
+ ret = -EFAULT;
+ goto out;
+ }
+
+out:
+ qtnf_bus_unlock(vif->mac->bus);
+ return ret;
+}
+
+static void
+qtnf_sta_info_parse_basic_counters(struct station_info *sinfo,
+ const struct qlink_sta_stat_basic_counters *counters)
+{
+ sinfo->filled |= BIT(NL80211_STA_INFO_RX_BYTES) |
+ BIT(NL80211_STA_INFO_TX_BYTES);
+ sinfo->rx_bytes = get_unaligned_le64(&counters->rx_bytes);
+ sinfo->tx_bytes = get_unaligned_le64(&counters->tx_bytes);
+
+ sinfo->filled |= BIT(NL80211_STA_INFO_RX_PACKETS) |
+ BIT(NL80211_STA_INFO_TX_PACKETS) |
+ BIT(NL80211_STA_INFO_BEACON_RX);
+ sinfo->rx_packets = get_unaligned_le32(&counters->rx_packets);
+ sinfo->tx_packets = get_unaligned_le32(&counters->tx_packets);
+ sinfo->rx_beacon = get_unaligned_le64(&counters->rx_beacons);
+
+ sinfo->filled |= BIT(NL80211_STA_INFO_RX_DROP_MISC) |
+ BIT(NL80211_STA_INFO_TX_FAILED);
+ sinfo->rx_dropped_misc = get_unaligned_le32(&counters->rx_dropped);
+ sinfo->tx_failed = get_unaligned_le32(&counters->tx_failed);
+}
+
+static void
+qtnf_sta_info_parse_rate(struct rate_info *rate_dst,
+ const struct qlink_sta_info_rate *rate_src)
+{
+ rate_dst->legacy = get_unaligned_le16(&rate_src->rate) * 10;
+
+ rate_dst->mcs = rate_src->mcs;
+ rate_dst->nss = rate_src->nss;
+ rate_dst->flags = 0;
+
+ switch (rate_src->bw) {
+ case QLINK_STA_INFO_RATE_BW_5:
+ rate_dst->bw = RATE_INFO_BW_5;
+ break;
+ case QLINK_STA_INFO_RATE_BW_10:
+ rate_dst->bw = RATE_INFO_BW_10;
+ break;
+ case QLINK_STA_INFO_RATE_BW_20:
+ rate_dst->bw = RATE_INFO_BW_20;
+ break;
+ case QLINK_STA_INFO_RATE_BW_40:
+ rate_dst->bw = RATE_INFO_BW_40;
+ break;
+ case QLINK_STA_INFO_RATE_BW_80:
+ rate_dst->bw = RATE_INFO_BW_80;
+ break;
+ case QLINK_STA_INFO_RATE_BW_160:
+ rate_dst->bw = RATE_INFO_BW_160;
+ break;
+ default:
+ rate_dst->bw = 0;
+ break;
+ }
+
+ if (rate_src->flags & QLINK_STA_INFO_RATE_FLAG_HT_MCS)
+ rate_dst->flags |= RATE_INFO_FLAGS_MCS;
+ else if (rate_src->flags & QLINK_STA_INFO_RATE_FLAG_VHT_MCS)
+ rate_dst->flags |= RATE_INFO_FLAGS_VHT_MCS;
+}
+
+static void
+qtnf_sta_info_parse_flags(struct nl80211_sta_flag_update *dst,
+ const struct qlink_sta_info_state *src)
+{
+ u32 mask, value;
+
+ dst->mask = 0;
+ dst->set = 0;
+
+ mask = le32_to_cpu(src->mask);
+ value = le32_to_cpu(src->value);
+
+ if (mask & QLINK_STA_FLAG_AUTHORIZED) {
+ dst->mask |= BIT(NL80211_STA_FLAG_AUTHORIZED);
+ if (value & QLINK_STA_FLAG_AUTHORIZED)
+ dst->set |= BIT(NL80211_STA_FLAG_AUTHORIZED);
+ }
+
+ if (mask & QLINK_STA_FLAG_SHORT_PREAMBLE) {
+ dst->mask |= BIT(NL80211_STA_FLAG_SHORT_PREAMBLE);
+ if (value & QLINK_STA_FLAG_SHORT_PREAMBLE)
+ dst->set |= BIT(NL80211_STA_FLAG_SHORT_PREAMBLE);
+ }
+
+ if (mask & QLINK_STA_FLAG_WME) {
+ dst->mask |= BIT(NL80211_STA_FLAG_WME);
+ if (value & QLINK_STA_FLAG_WME)
+ dst->set |= BIT(NL80211_STA_FLAG_WME);
+ }
+
+ if (mask & QLINK_STA_FLAG_MFP) {
+ dst->mask |= BIT(NL80211_STA_FLAG_MFP);
+ if (value & QLINK_STA_FLAG_MFP)
+ dst->set |= BIT(NL80211_STA_FLAG_MFP);
+ }
+
+ if (mask & QLINK_STA_FLAG_AUTHENTICATED) {
+ dst->mask |= BIT(NL80211_STA_FLAG_AUTHENTICATED);
+ if (value & QLINK_STA_FLAG_AUTHENTICATED)
+ dst->set |= BIT(NL80211_STA_FLAG_AUTHENTICATED);
+ }
+
+ if (mask & QLINK_STA_FLAG_TDLS_PEER) {
+ dst->mask |= BIT(NL80211_STA_FLAG_TDLS_PEER);
+ if (value & QLINK_STA_FLAG_TDLS_PEER)
+ dst->set |= BIT(NL80211_STA_FLAG_TDLS_PEER);
+ }
+
+ if (mask & QLINK_STA_FLAG_ASSOCIATED) {
+ dst->mask |= BIT(NL80211_STA_FLAG_ASSOCIATED);
+ if (value & QLINK_STA_FLAG_ASSOCIATED)
+ dst->set |= BIT(NL80211_STA_FLAG_ASSOCIATED);
+ }
+}
+
+static void
+qtnf_sta_info_parse_generic_info(struct station_info *sinfo,
+ const struct qlink_sta_info_generic *info)
+{
+ sinfo->filled |= BIT(NL80211_STA_INFO_CONNECTED_TIME) |
+ BIT(NL80211_STA_INFO_INACTIVE_TIME);
+ sinfo->connected_time = get_unaligned_le32(&info->connected_time);
+ sinfo->inactive_time = get_unaligned_le32(&info->inactive_time);
+
+ sinfo->filled |= BIT(NL80211_STA_INFO_SIGNAL) |
+ BIT(NL80211_STA_INFO_SIGNAL_AVG);
+ sinfo->signal = info->rssi - 120;
+ sinfo->signal_avg = info->rssi_avg - QLINK_RSSI_OFFSET;
+
+ if (info->rx_rate.rate) {
+ sinfo->filled |= BIT(NL80211_STA_INFO_RX_BITRATE);
+ qtnf_sta_info_parse_rate(&sinfo->rxrate, &info->rx_rate);
+ }
+
+ if (info->tx_rate.rate) {
+ sinfo->filled |= BIT(NL80211_STA_INFO_TX_BITRATE);
+ qtnf_sta_info_parse_rate(&sinfo->txrate, &info->tx_rate);
+ }
+
+ sinfo->filled |= BIT(NL80211_STA_INFO_STA_FLAGS);
+ qtnf_sta_info_parse_flags(&sinfo->sta_flags, &info->state);
+}
+
+static int qtnf_cmd_sta_info_parse(struct station_info *sinfo,
+ const u8 *payload, size_t payload_size)
+{
+ const struct qlink_sta_stat_basic_counters *counters;
+ const struct qlink_sta_info_generic *sta_info;
+ u16 tlv_type;
+ u16 tlv_value_len;
+ size_t tlv_full_len;
+ const struct qlink_tlv_hdr *tlv;
+
+ sinfo->filled = 0;
+
+ tlv = (const struct qlink_tlv_hdr *)payload;
+ while (payload_size >= sizeof(struct qlink_tlv_hdr)) {
+ tlv_type = le16_to_cpu(tlv->type);
+ tlv_value_len = le16_to_cpu(tlv->len);
+ tlv_full_len = tlv_value_len + sizeof(struct qlink_tlv_hdr);
+ if (tlv_full_len > payload_size) {
+ pr_warn("malformed TLV 0x%.2X; LEN: %u\n",
+ tlv_type, tlv_value_len);
+ return -EINVAL;
+ }
+ switch (tlv_type) {
+ case QTN_TLV_ID_STA_BASIC_COUNTERS:
+ if (unlikely(tlv_value_len < sizeof(*counters))) {
+ pr_err("invalid TLV size %.4X: %u\n",
+ tlv_type, tlv_value_len);
+ break;
+ }
+
+ counters = (void *)tlv->val;
+ qtnf_sta_info_parse_basic_counters(sinfo, counters);
+ break;
+ case QTN_TLV_ID_STA_GENERIC_INFO:
+ if (unlikely(tlv_value_len < sizeof(*sta_info)))
+ break;
+
+ sta_info = (void *)tlv->val;
+ qtnf_sta_info_parse_generic_info(sinfo, sta_info);
+ break;
+ default:
+ pr_warn("unexpected TLV type: %.4X\n", tlv_type);
+ break;
+ }
+ payload_size -= tlv_full_len;
+ tlv = (struct qlink_tlv_hdr *)(tlv->val + tlv_value_len);
+ }
+
+ if (payload_size) {
+ pr_warn("malformed TLV buf; bytes left: %zu\n", payload_size);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+int qtnf_cmd_get_sta_info(struct qtnf_vif *vif, const u8 *sta_mac,
+ struct station_info *sinfo)
+{
+ struct sk_buff *cmd_skb, *resp_skb = NULL;
+ struct qlink_cmd_get_sta_info *cmd;
+ const struct qlink_resp_get_sta_info *resp;
+ size_t var_resp_len;
+ u16 res_code = QLINK_CMD_RESULT_OK;
+ int ret = 0;
+
+ cmd_skb = qtnf_cmd_alloc_new_cmdskb(vif->mac->macid, vif->vifid,
+ QLINK_CMD_GET_STA_INFO,
+ sizeof(*cmd));
+
+ if (unlikely(!cmd_skb))
+ return -ENOMEM;
+
+ qtnf_bus_lock(vif->mac->bus);
+
+ cmd = (struct qlink_cmd_get_sta_info *)cmd_skb->data;
+ ether_addr_copy(cmd->sta_addr, sta_mac);
+
+ ret = qtnf_cmd_send_with_reply(vif->mac->bus, cmd_skb, &resp_skb,
+ &res_code, sizeof(*resp),
+ &var_resp_len);
+
+ if (unlikely(ret))
+ goto out;
+
+ if (unlikely(res_code != QLINK_CMD_RESULT_OK)) {
+ switch (res_code) {
+ case QLINK_CMD_RESULT_ENOTFOUND:
+ pr_warn("VIF%u.%u: %pM STA not found\n",
+ vif->mac->macid, vif->vifid, sta_mac);
+ ret = -ENOENT;
+ break;
+ default:
+ pr_err("VIF%u.%u: can't get info for %pM: %u\n",
+ vif->mac->macid, vif->vifid, sta_mac, res_code);
+ ret = -EFAULT;
+ break;
+ }
+ goto out;
+ }
+
+ resp = (const struct qlink_resp_get_sta_info *)resp_skb->data;
+
+ if (unlikely(!ether_addr_equal(sta_mac, resp->sta_addr))) {
+ pr_err("VIF%u.%u: wrong mac in reply: %pM != %pM\n",
+ vif->mac->macid, vif->vifid, resp->sta_addr, sta_mac);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ ret = qtnf_cmd_sta_info_parse(sinfo, resp->info, var_resp_len);
+
+out:
+ qtnf_bus_unlock(vif->mac->bus);
+ consume_skb(resp_skb);
+
+ return ret;
+}
+
+static int qtnf_cmd_send_add_change_intf(struct qtnf_vif *vif,
+ enum nl80211_iftype iftype,
+ u8 *mac_addr,
+ enum qlink_cmd_type cmd_type)
+{
+ struct sk_buff *cmd_skb, *resp_skb = NULL;
+ struct qlink_cmd_manage_intf *cmd;
+ const struct qlink_resp_manage_intf *resp;
+ u16 res_code = QLINK_CMD_RESULT_OK;
+ int ret = 0;
+
+ cmd_skb = qtnf_cmd_alloc_new_cmdskb(vif->mac->macid, vif->vifid,
+ cmd_type,
+ sizeof(*cmd));
+ if (unlikely(!cmd_skb))
+ return -ENOMEM;
+
+ qtnf_bus_lock(vif->mac->bus);
+
+ cmd = (struct qlink_cmd_manage_intf *)cmd_skb->data;
+
+ switch (iftype) {
+ case NL80211_IFTYPE_AP:
+ cmd->intf_info.if_type = cpu_to_le16(QLINK_IFTYPE_AP);
+ break;
+ case NL80211_IFTYPE_STATION:
+ cmd->intf_info.if_type = cpu_to_le16(QLINK_IFTYPE_STATION);
+ break;
+ default:
+ pr_err("VIF%u.%u: unsupported type %d\n", vif->mac->macid,
+ vif->vifid, iftype);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ if (mac_addr)
+ ether_addr_copy(cmd->intf_info.mac_addr, mac_addr);
+ else
+ eth_zero_addr(cmd->intf_info.mac_addr);
+
+ ret = qtnf_cmd_send_with_reply(vif->mac->bus, cmd_skb, &resp_skb,
+ &res_code, sizeof(*resp), NULL);
+
+ if (unlikely(ret))
+ goto out;
+
+ if (unlikely(res_code != QLINK_CMD_RESULT_OK)) {
+ pr_err("VIF%u.%u: CMD %d failed: %u\n", vif->mac->macid,
+ vif->vifid, cmd_type, res_code);
+ ret = -EFAULT;
+ goto out;
+ }
+
+ resp = (const struct qlink_resp_manage_intf *)resp_skb->data;
+ ether_addr_copy(vif->mac_addr, resp->intf_info.mac_addr);
+
+out:
+ qtnf_bus_unlock(vif->mac->bus);
+ consume_skb(resp_skb);
+
+ return ret;
+}
+
+int qtnf_cmd_send_add_intf(struct qtnf_vif *vif,
+ enum nl80211_iftype iftype, u8 *mac_addr)
+{
+ return qtnf_cmd_send_add_change_intf(vif, iftype, mac_addr,
+ QLINK_CMD_ADD_INTF);
+}
+
+int qtnf_cmd_send_change_intf_type(struct qtnf_vif *vif,
+ enum nl80211_iftype iftype, u8 *mac_addr)
+{
+ return qtnf_cmd_send_add_change_intf(vif, iftype, mac_addr,
+ QLINK_CMD_CHANGE_INTF);
+}
+
+int qtnf_cmd_send_del_intf(struct qtnf_vif *vif)
+{
+ struct sk_buff *cmd_skb;
+ struct qlink_cmd_manage_intf *cmd;
+ u16 res_code = QLINK_CMD_RESULT_OK;
+ int ret = 0;
+
+ cmd_skb = qtnf_cmd_alloc_new_cmdskb(vif->mac->macid, vif->vifid,
+ QLINK_CMD_DEL_INTF,
+ sizeof(*cmd));
+ if (unlikely(!cmd_skb))
+ return -ENOMEM;
+
+ qtnf_bus_lock(vif->mac->bus);
+
+ cmd = (struct qlink_cmd_manage_intf *)cmd_skb->data;
+
+ switch (vif->wdev.iftype) {
+ case NL80211_IFTYPE_AP:
+ cmd->intf_info.if_type = cpu_to_le16(QLINK_IFTYPE_AP);
+ break;
+ case NL80211_IFTYPE_STATION:
+ cmd->intf_info.if_type = cpu_to_le16(QLINK_IFTYPE_STATION);
+ break;
+ default:
+ pr_warn("VIF%u.%u: unsupported iftype %d\n", vif->mac->macid,
+ vif->vifid, vif->wdev.iftype);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ eth_zero_addr(cmd->intf_info.mac_addr);
+
+ ret = qtnf_cmd_send(vif->mac->bus, cmd_skb, &res_code);
+
+ if (unlikely(ret))
+ goto out;
+
+ if (unlikely(res_code != QLINK_CMD_RESULT_OK)) {
+ pr_err("VIF%u.%u: CMD failed: %u\n", vif->mac->macid,
+ vif->vifid, res_code);
+ ret = -EFAULT;
+ goto out;
+ }
+
+out:
+ qtnf_bus_unlock(vif->mac->bus);
+ return ret;
+}
+
+static int
+qtnf_cmd_resp_proc_hw_info(struct qtnf_bus *bus,
+ const struct qlink_resp_get_hw_info *resp)
+{
+ struct qtnf_hw_info *hwinfo = &bus->hw_info;
+
+ hwinfo->num_mac = resp->num_mac;
+ hwinfo->mac_bitmap = resp->mac_bitmap;
+ hwinfo->fw_ver = le32_to_cpu(resp->fw_ver);
+ hwinfo->ql_proto_ver = le16_to_cpu(resp->ql_proto_ver);
+ memcpy(hwinfo->alpha2_code, resp->alpha2_code,
+ sizeof(hwinfo->alpha2_code));
+ hwinfo->total_tx_chain = resp->total_tx_chain;
+ hwinfo->total_rx_chain = resp->total_rx_chain;
+ hwinfo->hw_capab = le32_to_cpu(resp->hw_capab);
+
+ pr_info("fw_version=%d, MACs map %#x, alpha2=\"%c%c\", chains Tx=%u Rx=%u\n",
+ hwinfo->fw_ver, hwinfo->mac_bitmap,
+ hwinfo->alpha2_code[0], hwinfo->alpha2_code[1],
+ hwinfo->total_tx_chain, hwinfo->total_rx_chain);
+
+ return 0;
+}
+
+static int qtnf_parse_variable_mac_info(struct qtnf_wmac *mac,
+ const u8 *tlv_buf, size_t tlv_buf_size)
+{
+ struct ieee80211_iface_limit *limits = NULL;
+ const struct qlink_iface_limit *limit_record;
+ size_t record_count = 0, rec = 0;
+ u16 tlv_type, tlv_value_len, mask;
+ struct qlink_iface_comb_num *comb;
+ size_t tlv_full_len;
+ const struct qlink_tlv_hdr *tlv;
+
+ mac->macinfo.n_limits = 0;
+
+ tlv = (const struct qlink_tlv_hdr *)tlv_buf;
+ while (tlv_buf_size >= sizeof(struct qlink_tlv_hdr)) {
+ tlv_type = le16_to_cpu(tlv->type);
+ tlv_value_len = le16_to_cpu(tlv->len);
+ tlv_full_len = tlv_value_len + sizeof(struct qlink_tlv_hdr);
+ if (tlv_full_len > tlv_buf_size) {
+ pr_warn("MAC%u: malformed TLV 0x%.2X; LEN: %u\n",
+ mac->macid, tlv_type, tlv_value_len);
+ return -EINVAL;
+ }
+
+ switch (tlv_type) {
+ case QTN_TLV_ID_NUM_IFACE_COMB:
+ if (unlikely(tlv_value_len != sizeof(*comb)))
+ return -EINVAL;
+
+ comb = (void *)tlv->val;
+ record_count = le16_to_cpu(comb->iface_comb_num);
+
+ mac->macinfo.n_limits = record_count;
+ /* free earlier iface limits memory */
+ kfree(mac->macinfo.limits);
+ mac->macinfo.limits =
+ kzalloc(sizeof(*mac->macinfo.limits) *
+ record_count, GFP_KERNEL);
+
+ if (unlikely(!mac->macinfo.limits))
+ return -ENOMEM;
+
+ limits = mac->macinfo.limits;
+ break;
+ case QTN_TLV_ID_IFACE_LIMIT:
+ if (unlikely(!limits)) {
+ pr_warn("MAC%u: limits are not inited\n",
+ mac->macid);
+ return -EINVAL;
+ }
+
+ if (unlikely(tlv_value_len != sizeof(*limit_record))) {
+ pr_warn("MAC%u: record size mismatch\n",
+ mac->macid);
+ return -EINVAL;
+ }
+
+ limit_record = (void *)tlv->val;
+ limits[rec].max = le16_to_cpu(limit_record->max_num);
+ mask = le16_to_cpu(limit_record->type_mask);
+ limits[rec].types = qlink_iface_type_mask_to_nl(mask);
+ /* only AP and STA modes are supported */
+ limits[rec].types &= BIT(NL80211_IFTYPE_AP) |
+ BIT(NL80211_IFTYPE_STATION);
+
+ pr_debug("MAC%u: MAX: %u; TYPES: %.4X\n", mac->macid,
+ limits[rec].max, limits[rec].types);
+
+ if (limits[rec].types)
+ rec++;
+ break;
+ default:
+ break;
+ }
+ tlv_buf_size -= tlv_full_len;
+ tlv = (struct qlink_tlv_hdr *)(tlv->val + tlv_value_len);
+ }
+
+ if (tlv_buf_size) {
+ pr_warn("MAC%u: malformed TLV buf; bytes left: %zu\n",
+ mac->macid, tlv_buf_size);
+ return -EINVAL;
+ }
+
+ if (mac->macinfo.n_limits != rec) {
+ pr_err("MAC%u: combination mismatch: reported=%zu parsed=%zu\n",
+ mac->macid, mac->macinfo.n_limits, rec);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static void
+qtnf_cmd_resp_proc_mac_info(struct qtnf_wmac *mac,
+ const struct qlink_resp_get_mac_info *resp_info)
+{
+ struct qtnf_mac_info *mac_info;
+ struct qtnf_vif *vif;
+
+ mac_info = &mac->macinfo;
+
+ mac_info->bands_cap = resp_info->bands_cap;
+ mac_info->phymode_cap = resp_info->phymode_cap;
+ memcpy(&mac_info->dev_mac, &resp_info->dev_mac,
+ sizeof(mac_info->dev_mac));
+
+ ether_addr_copy(mac->macaddr, mac_info->dev_mac);
+
+ vif = qtnf_mac_get_base_vif(mac);
+ if (vif)
+ ether_addr_copy(vif->mac_addr, mac->macaddr);
+ else
+ pr_err("could not get valid base vif\n");
+
+ mac_info->num_tx_chain = resp_info->num_tx_chain;
+ mac_info->num_rx_chain = resp_info->num_rx_chain;
+
+ mac_info->max_ap_assoc_sta = le16_to_cpu(resp_info->max_ap_assoc_sta);
+ mac_info->radar_detect_widths =
+ qlink_chan_width_mask_to_nl(le16_to_cpu(
+ resp_info->radar_detect_widths));
+
+ memcpy(&mac_info->ht_cap, &resp_info->ht_cap, sizeof(mac_info->ht_cap));
+ memcpy(&mac_info->vht_cap, &resp_info->vht_cap,
+ sizeof(mac_info->vht_cap));
+}
+
+static int
+qtnf_cmd_resp_fill_channels_info(struct ieee80211_supported_band *band,
+ struct qlink_resp_get_chan_info *resp,
+ size_t payload_len)
+{
+ u16 tlv_type;
+ size_t tlv_len;
+ const struct qlink_tlv_hdr *tlv;
+ const struct qlink_tlv_channel *qchan;
+ struct ieee80211_channel *chan;
+ unsigned int chidx = 0;
+ u32 qflags;
+
+ kfree(band->channels);
+ band->channels = NULL;
+
+ band->n_channels = resp->num_chans;
+ if (band->n_channels == 0)
+ return 0;
+
+ band->channels = kcalloc(band->n_channels, sizeof(*chan), GFP_KERNEL);
+ if (!band->channels) {
+ band->n_channels = 0;
+ return -ENOMEM;
+ }
+
+ tlv = (struct qlink_tlv_hdr *)resp->info;
+
+ while (payload_len >= sizeof(*tlv)) {
+ tlv_type = le16_to_cpu(tlv->type);
+ tlv_len = le16_to_cpu(tlv->len) + sizeof(*tlv);
+
+ if (tlv_len > payload_len) {
+ pr_warn("malformed TLV 0x%.2X; LEN: %zu\n",
+ tlv_type, tlv_len);
+ goto error_ret;
+ }
+
+ switch (tlv_type) {
+ case QTN_TLV_ID_CHANNEL:
+ if (unlikely(tlv_len != sizeof(*qchan))) {
+ pr_err("invalid channel TLV len %zu\n",
+ tlv_len);
+ goto error_ret;
+ }
+
+ if (chidx == band->n_channels) {
+ pr_err("too many channel TLVs\n");
+ goto error_ret;
+ }
+
+ qchan = (const struct qlink_tlv_channel *)tlv;
+ chan = &band->channels[chidx++];
+ qflags = le32_to_cpu(qchan->flags);
+
+ chan->hw_value = le16_to_cpu(qchan->hw_value);
+ chan->band = band->band;
+ chan->center_freq = le16_to_cpu(qchan->center_freq);
+ chan->max_antenna_gain = (int)qchan->max_antenna_gain;
+ chan->max_power = (int)qchan->max_power;
+ chan->max_reg_power = (int)qchan->max_reg_power;
+ chan->beacon_found = qchan->beacon_found;
+ chan->dfs_cac_ms = le32_to_cpu(qchan->dfs_cac_ms);
+ chan->flags = 0;
+
+ if (qflags & QLINK_CHAN_DISABLED)
+ chan->flags |= IEEE80211_CHAN_DISABLED;
+
+ if (qflags & QLINK_CHAN_NO_IR)
+ chan->flags |= IEEE80211_CHAN_NO_IR;
+
+ if (qflags & QLINK_CHAN_NO_HT40PLUS)
+ chan->flags |= IEEE80211_CHAN_NO_HT40PLUS;
+
+ if (qflags & QLINK_CHAN_NO_HT40MINUS)
+ chan->flags |= IEEE80211_CHAN_NO_HT40MINUS;
+
+ if (qflags & QLINK_CHAN_NO_OFDM)
+ chan->flags |= IEEE80211_CHAN_NO_OFDM;
+
+ if (qflags & QLINK_CHAN_NO_80MHZ)
+ chan->flags |= IEEE80211_CHAN_NO_80MHZ;
+
+ if (qflags & QLINK_CHAN_NO_160MHZ)
+ chan->flags |= IEEE80211_CHAN_NO_160MHZ;
+
+ if (qflags & QLINK_CHAN_INDOOR_ONLY)
+ chan->flags |= IEEE80211_CHAN_INDOOR_ONLY;
+
+ if (qflags & QLINK_CHAN_IR_CONCURRENT)
+ chan->flags |= IEEE80211_CHAN_IR_CONCURRENT;
+
+ if (qflags & QLINK_CHAN_NO_20MHZ)
+ chan->flags |= IEEE80211_CHAN_NO_20MHZ;
+
+ if (qflags & QLINK_CHAN_NO_10MHZ)
+ chan->flags |= IEEE80211_CHAN_NO_10MHZ;
+
+ if (qflags & QLINK_CHAN_RADAR) {
+ chan->flags |= IEEE80211_CHAN_RADAR;
+ chan->dfs_state_entered = jiffies;
+
+ if (qchan->dfs_state == QLINK_DFS_USABLE)
+ chan->dfs_state = NL80211_DFS_USABLE;
+ else if (qchan->dfs_state ==
+ QLINK_DFS_AVAILABLE)
+ chan->dfs_state = NL80211_DFS_AVAILABLE;
+ else
+ chan->dfs_state =
+ NL80211_DFS_UNAVAILABLE;
+ }
+
+ pr_debug("chan=%d flags=%#x max_pow=%d max_reg_pow=%d\n",
+ chan->hw_value, chan->flags, chan->max_power,
+ chan->max_reg_power);
+ break;
+ default:
+ pr_warn("unknown TLV type: %#x\n", tlv_type);
+ break;
+ }
+
+ payload_len -= tlv_len;
+ tlv = (struct qlink_tlv_hdr *)((u8 *)tlv + tlv_len);
+ }
+
+ if (payload_len) {
+ pr_err("malformed TLV buf; bytes left: %zu\n", payload_len);
+ goto error_ret;
+ }
+
+ if (band->n_channels != chidx) {
+ pr_err("channel count mismatch: reported=%d, parsed=%d\n",
+ band->n_channels, chidx);
+ goto error_ret;
+ }
+
+ return 0;
+
+error_ret:
+ kfree(band->channels);
+ band->channels = NULL;
+ band->n_channels = 0;
+
+ return -EINVAL;
+}
+
+static int qtnf_cmd_resp_proc_phy_params(struct qtnf_wmac *mac,
+ const u8 *payload, size_t payload_len)
+{
+ struct qtnf_mac_info *mac_info;
+ struct qlink_tlv_frag_rts_thr *phy_thr;
+ struct qlink_tlv_rlimit *limit;
+ struct qlink_tlv_cclass *class;
+ u16 tlv_type;
+ u16 tlv_value_len;
+ size_t tlv_full_len;
+ const struct qlink_tlv_hdr *tlv;
+
+ mac_info = &mac->macinfo;
+
+ tlv = (struct qlink_tlv_hdr *)payload;
+ while (payload_len >= sizeof(struct qlink_tlv_hdr)) {
+ tlv_type = le16_to_cpu(tlv->type);
+ tlv_value_len = le16_to_cpu(tlv->len);
+ tlv_full_len = tlv_value_len + sizeof(struct qlink_tlv_hdr);
+
+ if (tlv_full_len > payload_len) {
+ pr_warn("MAC%u: malformed TLV 0x%.2X; LEN: %u\n",
+ mac->macid, tlv_type, tlv_value_len);
+ return -EINVAL;
+ }
+
+ switch (tlv_type) {
+ case QTN_TLV_ID_FRAG_THRESH:
+ phy_thr = (void *)tlv;
+ mac_info->frag_thr = (u32)le16_to_cpu(phy_thr->thr);
+ break;
+ case QTN_TLV_ID_RTS_THRESH:
+ phy_thr = (void *)tlv;
+ mac_info->rts_thr = (u32)le16_to_cpu(phy_thr->thr);
+ break;
+ case QTN_TLV_ID_SRETRY_LIMIT:
+ limit = (void *)tlv;
+ mac_info->sretry_limit = limit->rlimit;
+ break;
+ case QTN_TLV_ID_LRETRY_LIMIT:
+ limit = (void *)tlv;
+ mac_info->lretry_limit = limit->rlimit;
+ break;
+ case QTN_TLV_ID_COVERAGE_CLASS:
+ class = (void *)tlv;
+ mac_info->coverage_class = class->cclass;
+ break;
+ default:
+ pr_err("MAC%u: Unknown TLV type: %#x\n", mac->macid,
+ le16_to_cpu(tlv->type));
+ break;
+ }
+
+ payload_len -= tlv_full_len;
+ tlv = (struct qlink_tlv_hdr *)(tlv->val + tlv_value_len);
+ }
+
+ if (payload_len) {
+ pr_warn("MAC%u: malformed TLV buf; bytes left: %zu\n",
+ mac->macid, payload_len);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+int qtnf_cmd_get_mac_info(struct qtnf_wmac *mac)
+{
+ struct sk_buff *cmd_skb, *resp_skb = NULL;
+ const struct qlink_resp_get_mac_info *resp;
+ size_t var_data_len;
+ u16 res_code = QLINK_CMD_RESULT_OK;
+ int ret = 0;
+
+ cmd_skb = qtnf_cmd_alloc_new_cmdskb(mac->macid, QLINK_VIFID_RSVD,
+ QLINK_CMD_MAC_INFO,
+ sizeof(struct qlink_cmd));
+ if (unlikely(!cmd_skb))
+ return -ENOMEM;
+
+ qtnf_bus_lock(mac->bus);
+
+ ret = qtnf_cmd_send_with_reply(mac->bus, cmd_skb, &resp_skb, &res_code,
+ sizeof(*resp), &var_data_len);
+ if (unlikely(ret))
+ goto out;
+
+ if (unlikely(res_code != QLINK_CMD_RESULT_OK)) {
+ pr_err("MAC%u: CMD failed: %u\n", mac->macid, res_code);
+ ret = -EFAULT;
+ goto out;
+ }
+
+ resp = (const struct qlink_resp_get_mac_info *)resp_skb->data;
+ qtnf_cmd_resp_proc_mac_info(mac, resp);
+ ret = qtnf_parse_variable_mac_info(mac, resp->var_info, var_data_len);
+
+out:
+ qtnf_bus_unlock(mac->bus);
+ consume_skb(resp_skb);
+
+ return ret;
+}
+
+int qtnf_cmd_get_hw_info(struct qtnf_bus *bus)
+{
+ struct sk_buff *cmd_skb, *resp_skb = NULL;
+ const struct qlink_resp_get_hw_info *resp;
+ u16 res_code = QLINK_CMD_RESULT_OK;
+ int ret = 0;
+
+ cmd_skb = qtnf_cmd_alloc_new_cmdskb(QLINK_MACID_RSVD, QLINK_VIFID_RSVD,
+ QLINK_CMD_GET_HW_INFO,
+ sizeof(struct qlink_cmd));
+ if (unlikely(!cmd_skb))
+ return -ENOMEM;
+
+ qtnf_bus_lock(bus);
+
+ ret = qtnf_cmd_send_with_reply(bus, cmd_skb, &resp_skb, &res_code,
+ sizeof(*resp), NULL);
+
+ if (unlikely(ret))
+ goto out;
+
+ if (unlikely(res_code != QLINK_CMD_RESULT_OK)) {
+ pr_err("cmd exec failed: 0x%.4X\n", res_code);
+ ret = -EFAULT;
+ goto out;
+ }
+
+ resp = (const struct qlink_resp_get_hw_info *)resp_skb->data;
+ ret = qtnf_cmd_resp_proc_hw_info(bus, resp);
+
+out:
+ qtnf_bus_unlock(bus);
+ consume_skb(resp_skb);
+
+ return ret;
+}
+
+int qtnf_cmd_get_mac_chan_info(struct qtnf_wmac *mac,
+ struct ieee80211_supported_band *band)
+{
+ struct sk_buff *cmd_skb, *resp_skb = NULL;
+ size_t info_len;
+ struct qlink_cmd_chans_info_get *cmd;
+ struct qlink_resp_get_chan_info *resp;
+ u16 res_code = QLINK_CMD_RESULT_OK;
+ int ret = 0;
+ u8 qband;
+
+ cmd_skb = qtnf_cmd_alloc_new_cmdskb(mac->macid, 0,
+ QLINK_CMD_CHANS_INFO_GET,
+ sizeof(*cmd));
+ if (!cmd_skb)
+ return -ENOMEM;
+
+ switch (band->band) {
+ case NL80211_BAND_2GHZ:
+ qband = QLINK_BAND_2GHZ;
+ break;
+ case NL80211_BAND_5GHZ:
+ qband = QLINK_BAND_5GHZ;
+ break;
+ case NL80211_BAND_60GHZ:
+ qband = QLINK_BAND_60GHZ;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ cmd = (struct qlink_cmd_chans_info_get *)cmd_skb->data;
+ cmd->band = qband;
+ ret = qtnf_cmd_send_with_reply(mac->bus, cmd_skb, &resp_skb, &res_code,
+ sizeof(*resp), &info_len);
+
+ if (unlikely(ret))
+ goto out;
+
+ if (unlikely(res_code != QLINK_CMD_RESULT_OK)) {
+ pr_err("MAC%u: CMD failed: %u\n", mac->macid, res_code);
+ ret = -EFAULT;
+ goto out;
+ }
+
+ resp = (struct qlink_resp_get_chan_info *)resp_skb->data;
+ if (resp->band != qband) {
+ pr_err("MAC%u: reply band %u != cmd band %u\n", mac->macid,
+ resp->band, qband);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ ret = qtnf_cmd_resp_fill_channels_info(band, resp, info_len);
+
+out:
+ consume_skb(resp_skb);
+
+ return ret;
+}
+
+int qtnf_cmd_send_get_phy_params(struct qtnf_wmac *mac)
+{
+ struct sk_buff *cmd_skb, *resp_skb = NULL;
+ size_t response_size;
+ struct qlink_resp_phy_params *resp;
+ u16 res_code = QLINK_CMD_RESULT_OK;
+ int ret = 0;
+
+ cmd_skb = qtnf_cmd_alloc_new_cmdskb(mac->macid, 0,
+ QLINK_CMD_PHY_PARAMS_GET,
+ sizeof(struct qlink_cmd));
+ if (!cmd_skb)
+ return -ENOMEM;
+
+ qtnf_bus_lock(mac->bus);
+
+ ret = qtnf_cmd_send_with_reply(mac->bus, cmd_skb, &resp_skb, &res_code,
+ sizeof(*resp), &response_size);
+
+ if (unlikely(ret))
+ goto out;
+
+ if (unlikely(res_code != QLINK_CMD_RESULT_OK)) {
+ pr_err("MAC%u: CMD failed: %u\n", mac->macid, res_code);
+ ret = -EFAULT;
+ goto out;
+ }
+
+ resp = (struct qlink_resp_phy_params *)resp_skb->data;
+ ret = qtnf_cmd_resp_proc_phy_params(mac, resp->info, response_size);
+
+out:
+ qtnf_bus_unlock(mac->bus);
+ consume_skb(resp_skb);
+
+ return ret;
+}
+
+int qtnf_cmd_send_update_phy_params(struct qtnf_wmac *mac, u32 changed)
+{
+ struct wiphy *wiphy = priv_to_wiphy(mac);
+ struct sk_buff *cmd_skb;
+ u16 res_code = QLINK_CMD_RESULT_OK;
+ int ret = 0;
+
+ cmd_skb = qtnf_cmd_alloc_new_cmdskb(mac->macid, 0,
+ QLINK_CMD_PHY_PARAMS_SET,
+ sizeof(struct qlink_cmd));
+ if (!cmd_skb)
+ return -ENOMEM;
+
+ qtnf_bus_lock(mac->bus);
+
+ if (changed & WIPHY_PARAM_FRAG_THRESHOLD)
+ qtnf_cmd_skb_put_tlv_u16(cmd_skb, QTN_TLV_ID_FRAG_THRESH,
+ wiphy->frag_threshold);
+ if (changed & WIPHY_PARAM_RTS_THRESHOLD)
+ qtnf_cmd_skb_put_tlv_u16(cmd_skb, QTN_TLV_ID_RTS_THRESH,
+ wiphy->rts_threshold);
+ if (changed & WIPHY_PARAM_COVERAGE_CLASS)
+ qtnf_cmd_skb_put_tlv_u8(cmd_skb, QTN_TLV_ID_COVERAGE_CLASS,
+ wiphy->coverage_class);
+
+ ret = qtnf_cmd_send(mac->bus, cmd_skb, &res_code);
+
+ if (unlikely(ret))
+ goto out;
+
+ if (unlikely(res_code != QLINK_CMD_RESULT_OK)) {
+ pr_err("MAC%u: CMD failed: %u\n", mac->macid, res_code);
+ ret = -EFAULT;
+ goto out;
+ }
+
+out:
+ qtnf_bus_unlock(mac->bus);
+ return ret;
+}
+
+int qtnf_cmd_send_init_fw(struct qtnf_bus *bus)
+{
+ struct sk_buff *cmd_skb;
+ u16 res_code = QLINK_CMD_RESULT_OK;
+ int ret = 0;
+
+ cmd_skb = qtnf_cmd_alloc_new_cmdskb(QLINK_MACID_RSVD, QLINK_VIFID_RSVD,
+ QLINK_CMD_FW_INIT,
+ sizeof(struct qlink_cmd));
+ if (unlikely(!cmd_skb))
+ return -ENOMEM;
+
+ qtnf_bus_lock(bus);
+
+ ret = qtnf_cmd_send(bus, cmd_skb, &res_code);
+
+ if (unlikely(ret))
+ goto out;
+
+ if (unlikely(res_code != QLINK_CMD_RESULT_OK)) {
+ pr_err("cmd exec failed: 0x%.4X\n", res_code);
+ ret = -EFAULT;
+ goto out;
+ }
+
+out:
+ qtnf_bus_unlock(bus);
+ return ret;
+}
+
+void qtnf_cmd_send_deinit_fw(struct qtnf_bus *bus)
+{
+ struct sk_buff *cmd_skb;
+
+ cmd_skb = qtnf_cmd_alloc_new_cmdskb(QLINK_MACID_RSVD, QLINK_VIFID_RSVD,
+ QLINK_CMD_FW_DEINIT,
+ sizeof(struct qlink_cmd));
+ if (!cmd_skb)
+ return;
+
+ qtnf_bus_lock(bus);
+
+ qtnf_cmd_send(bus, cmd_skb, NULL);
+
+ qtnf_bus_unlock(bus);
+}
+
+int qtnf_cmd_send_add_key(struct qtnf_vif *vif, u8 key_index, bool pairwise,
+ const u8 *mac_addr, struct key_params *params)
+{
+ struct sk_buff *cmd_skb;
+ struct qlink_cmd_add_key *cmd;
+ u16 res_code = QLINK_CMD_RESULT_OK;
+ int ret = 0;
+
+ cmd_skb = qtnf_cmd_alloc_new_cmdskb(vif->mac->macid, vif->vifid,
+ QLINK_CMD_ADD_KEY,
+ sizeof(*cmd));
+ if (unlikely(!cmd_skb))
+ return -ENOMEM;
+
+ qtnf_bus_lock(vif->mac->bus);
+
+ cmd = (struct qlink_cmd_add_key *)cmd_skb->data;
+
+ if (mac_addr)
+ ether_addr_copy(cmd->addr, mac_addr);
+ else
+ eth_broadcast_addr(cmd->addr);
+
+ cmd->cipher = cpu_to_le32(params->cipher);
+ cmd->key_index = key_index;
+ cmd->pairwise = pairwise;
+
+ if (params->key && params->key_len > 0)
+ qtnf_cmd_skb_put_tlv_arr(cmd_skb, QTN_TLV_ID_KEY,
+ params->key,
+ params->key_len);
+
+ if (params->seq && params->seq_len > 0)
+ qtnf_cmd_skb_put_tlv_arr(cmd_skb, QTN_TLV_ID_SEQ,
+ params->seq,
+ params->seq_len);
+
+ ret = qtnf_cmd_send(vif->mac->bus, cmd_skb, &res_code);
+ if (unlikely(ret))
+ goto out;
+
+ if (unlikely(res_code != QLINK_CMD_RESULT_OK)) {
+ pr_err("VIF%u.%u: CMD failed: %u\n",
+ vif->mac->macid, vif->vifid, res_code);
+ ret = -EFAULT;
+ goto out;
+ }
+
+out:
+ qtnf_bus_unlock(vif->mac->bus);
+ return ret;
+}
+
+int qtnf_cmd_send_del_key(struct qtnf_vif *vif, u8 key_index, bool pairwise,
+ const u8 *mac_addr)
+{
+ struct sk_buff *cmd_skb;
+ struct qlink_cmd_del_key *cmd;
+ u16 res_code = QLINK_CMD_RESULT_OK;
+ int ret = 0;
+
+ cmd_skb = qtnf_cmd_alloc_new_cmdskb(vif->mac->macid, vif->vifid,
+ QLINK_CMD_DEL_KEY,
+ sizeof(*cmd));
+ if (unlikely(!cmd_skb))
+ return -ENOMEM;
+
+ qtnf_bus_lock(vif->mac->bus);
+
+ cmd = (struct qlink_cmd_del_key *)cmd_skb->data;
+
+ if (mac_addr)
+ ether_addr_copy(cmd->addr, mac_addr);
+ else
+ eth_broadcast_addr(cmd->addr);
+
+ cmd->key_index = key_index;
+ cmd->pairwise = pairwise;
+ ret = qtnf_cmd_send(vif->mac->bus, cmd_skb, &res_code);
+ if (unlikely(ret))
+ goto out;
+
+ if (unlikely(res_code != QLINK_CMD_RESULT_OK)) {
+ pr_err("VIF%u.%u: CMD failed: %u\n",
+ vif->mac->macid, vif->vifid, res_code);
+ ret = -EFAULT;
+ goto out;
+ }
+
+out:
+ qtnf_bus_unlock(vif->mac->bus);
+ return ret;
+}
+
+int qtnf_cmd_send_set_default_key(struct qtnf_vif *vif, u8 key_index,
+ bool unicast, bool multicast)
+{
+ struct sk_buff *cmd_skb;
+ struct qlink_cmd_set_def_key *cmd;
+ u16 res_code = QLINK_CMD_RESULT_OK;
+ int ret = 0;
+
+ cmd_skb = qtnf_cmd_alloc_new_cmdskb(vif->mac->macid, vif->vifid,
+ QLINK_CMD_SET_DEFAULT_KEY,
+ sizeof(*cmd));
+ if (unlikely(!cmd_skb))
+ return -ENOMEM;
+
+ qtnf_bus_lock(vif->mac->bus);
+
+ cmd = (struct qlink_cmd_set_def_key *)cmd_skb->data;
+ cmd->key_index = key_index;
+ cmd->unicast = unicast;
+ cmd->multicast = multicast;
+ ret = qtnf_cmd_send(vif->mac->bus, cmd_skb, &res_code);
+ if (unlikely(ret))
+ goto out;
+
+ if (unlikely(res_code != QLINK_CMD_RESULT_OK)) {
+ pr_err("VIF%u.%u: CMD failed: %u\n", vif->mac->macid,
+ vif->vifid, res_code);
+ ret = -EFAULT;
+ goto out;
+ }
+
+out:
+ qtnf_bus_unlock(vif->mac->bus);
+ return ret;
+}
+
+int qtnf_cmd_send_set_default_mgmt_key(struct qtnf_vif *vif, u8 key_index)
+{
+ struct sk_buff *cmd_skb;
+ struct qlink_cmd_set_def_mgmt_key *cmd;
+ u16 res_code = QLINK_CMD_RESULT_OK;
+ int ret = 0;
+
+ cmd_skb = qtnf_cmd_alloc_new_cmdskb(vif->mac->macid, vif->vifid,
+ QLINK_CMD_SET_DEFAULT_MGMT_KEY,
+ sizeof(*cmd));
+ if (unlikely(!cmd_skb))
+ return -ENOMEM;
+
+ qtnf_bus_lock(vif->mac->bus);
+
+ cmd = (struct qlink_cmd_set_def_mgmt_key *)cmd_skb->data;
+ cmd->key_index = key_index;
+ ret = qtnf_cmd_send(vif->mac->bus, cmd_skb, &res_code);
+ if (unlikely(ret))
+ goto out;
+
+ if (unlikely(res_code != QLINK_CMD_RESULT_OK)) {
+ pr_err("VIF%u.%u: CMD failed: %u\n", vif->mac->macid,
+ vif->vifid, res_code);
+ ret = -EFAULT;
+ goto out;
+ }
+
+out:
+ qtnf_bus_unlock(vif->mac->bus);
+ return ret;
+}
+
+static u32 qtnf_encode_sta_flags(u32 flags)
+{
+ u32 code = 0;
+
+ if (flags & BIT(NL80211_STA_FLAG_AUTHORIZED))
+ code |= QLINK_STA_FLAG_AUTHORIZED;
+ if (flags & BIT(NL80211_STA_FLAG_SHORT_PREAMBLE))
+ code |= QLINK_STA_FLAG_SHORT_PREAMBLE;
+ if (flags & BIT(NL80211_STA_FLAG_WME))
+ code |= QLINK_STA_FLAG_WME;
+ if (flags & BIT(NL80211_STA_FLAG_MFP))
+ code |= QLINK_STA_FLAG_MFP;
+ if (flags & BIT(NL80211_STA_FLAG_AUTHENTICATED))
+ code |= QLINK_STA_FLAG_AUTHENTICATED;
+ if (flags & BIT(NL80211_STA_FLAG_TDLS_PEER))
+ code |= QLINK_STA_FLAG_TDLS_PEER;
+ if (flags & BIT(NL80211_STA_FLAG_ASSOCIATED))
+ code |= QLINK_STA_FLAG_ASSOCIATED;
+ return code;
+}
+
+int qtnf_cmd_send_change_sta(struct qtnf_vif *vif, const u8 *mac,
+ struct station_parameters *params)
+{
+ struct sk_buff *cmd_skb;
+ struct qlink_cmd_change_sta *cmd;
+ u16 res_code = QLINK_CMD_RESULT_OK;
+ int ret = 0;
+
+ cmd_skb = qtnf_cmd_alloc_new_cmdskb(vif->mac->macid, vif->vifid,
+ QLINK_CMD_CHANGE_STA,
+ sizeof(*cmd));
+ if (unlikely(!cmd_skb))
+ return -ENOMEM;
+
+ qtnf_bus_lock(vif->mac->bus);
+
+ cmd = (struct qlink_cmd_change_sta *)cmd_skb->data;
+ ether_addr_copy(cmd->sta_addr, mac);
+ cmd->sta_flags_mask = cpu_to_le32(qtnf_encode_sta_flags(
+ params->sta_flags_mask));
+ cmd->sta_flags_set = cpu_to_le32(qtnf_encode_sta_flags(
+ params->sta_flags_set));
+
+ ret = qtnf_cmd_send(vif->mac->bus, cmd_skb, &res_code);
+ if (unlikely(ret))
+ goto out;
+
+ if (unlikely(res_code != QLINK_CMD_RESULT_OK)) {
+ pr_err("VIF%u.%u: CMD failed: %u\n", vif->mac->macid,
+ vif->vifid, res_code);
+ ret = -EFAULT;
+ goto out;
+ }
+
+out:
+ qtnf_bus_unlock(vif->mac->bus);
+ return ret;
+}
+
+int qtnf_cmd_send_del_sta(struct qtnf_vif *vif,
+ struct station_del_parameters *params)
+{
+ struct sk_buff *cmd_skb;
+ struct qlink_cmd_del_sta *cmd;
+ u16 res_code = QLINK_CMD_RESULT_OK;
+ int ret = 0;
+
+ cmd_skb = qtnf_cmd_alloc_new_cmdskb(vif->mac->macid, vif->vifid,
+ QLINK_CMD_DEL_STA,
+ sizeof(*cmd));
+ if (unlikely(!cmd_skb))
+ return -ENOMEM;
+
+ qtnf_bus_lock(vif->mac->bus);
+
+ cmd = (struct qlink_cmd_del_sta *)cmd_skb->data;
+
+ if (params->mac)
+ ether_addr_copy(cmd->sta_addr, params->mac);
+ else
+ eth_broadcast_addr(cmd->sta_addr); /* flush all stations */
+
+ cmd->subtype = params->subtype;
+ cmd->reason_code = cpu_to_le16(params->reason_code);
+
+ ret = qtnf_cmd_send(vif->mac->bus, cmd_skb, &res_code);
+ if (unlikely(ret))
+ goto out;
+
+ if (unlikely(res_code != QLINK_CMD_RESULT_OK)) {
+ pr_err("VIF%u.%u: CMD failed: %u\n", vif->mac->macid,
+ vif->vifid, res_code);
+ ret = -EFAULT;
+ goto out;
+ }
+
+out:
+ qtnf_bus_unlock(vif->mac->bus);
+ return ret;
+}
+
+int qtnf_cmd_send_scan(struct qtnf_wmac *mac)
+{
+ struct sk_buff *cmd_skb;
+ u16 res_code = QLINK_CMD_RESULT_OK;
+ struct ieee80211_channel *sc;
+ struct cfg80211_scan_request *scan_req = mac->scan_req;
+ struct qlink_tlv_channel *qchan;
+ int n_channels;
+ int count = 0;
+ int ret;
+ u32 flags;
+
+ if (scan_req->n_ssids > QTNF_MAX_SSID_LIST_LENGTH) {
+ pr_err("MAC%u: too many SSIDs in scan request\n", mac->macid);
+ return -EINVAL;
+ }
+
+ cmd_skb = qtnf_cmd_alloc_new_cmdskb(mac->macid, QLINK_VIFID_RSVD,
+ QLINK_CMD_SCAN,
+ sizeof(struct qlink_cmd));
+ if (unlikely(!cmd_skb))
+ return -ENOMEM;
+
+ qtnf_bus_lock(mac->bus);
+
+ if (scan_req->n_ssids != 0) {
+ while (count < scan_req->n_ssids) {
+ qtnf_cmd_skb_put_tlv_arr(cmd_skb, WLAN_EID_SSID,
+ scan_req->ssids[count].ssid,
+ scan_req->ssids[count].ssid_len);
+ count++;
+ }
+ }
+
+ if (scan_req->ie_len != 0)
+ qtnf_cmd_skb_put_tlv_arr(cmd_skb, QTN_TLV_ID_IE_SET,
+ scan_req->ie,
+ scan_req->ie_len);
+
+ if (scan_req->n_channels) {
+ n_channels = scan_req->n_channels;
+ count = 0;
+
+ while (n_channels != 0) {
+ sc = scan_req->channels[count];
+ if (sc->flags & IEEE80211_CHAN_DISABLED) {
+ n_channels--;
+ continue;
+ }
+
+ pr_debug("MAC%u: scan chan=%d, freq=%d, flags=%#x\n",
+ mac->macid, sc->hw_value, sc->center_freq,
+ sc->flags);
+ qchan = skb_put_zero(cmd_skb, sizeof(*qchan));
+ flags = 0;
+
+ qchan->hdr.type = cpu_to_le16(QTN_TLV_ID_CHANNEL);
+ qchan->hdr.len = cpu_to_le16(sizeof(*qchan) -
+ sizeof(struct qlink_tlv_hdr));
+ qchan->center_freq = cpu_to_le16(sc->center_freq);
+ qchan->hw_value = cpu_to_le16(sc->hw_value);
+
+ if (sc->flags & IEEE80211_CHAN_NO_IR)
+ flags |= QLINK_CHAN_NO_IR;
+
+ if (sc->flags & IEEE80211_CHAN_RADAR)
+ flags |= QLINK_CHAN_RADAR;
+
+ qchan->flags = cpu_to_le32(flags);
+ n_channels--;
+ count++;
+ }
+ }
+
+ ret = qtnf_cmd_send(mac->bus, cmd_skb, &res_code);
+
+ if (unlikely(ret))
+ goto out;
+
+ pr_debug("MAC%u: scan started\n", mac->macid);
+
+ if (unlikely(res_code != QLINK_CMD_RESULT_OK)) {
+ pr_err("MAC%u: CMD failed: %u\n", mac->macid, res_code);
+ ret = -EFAULT;
+ goto out;
+ }
+out:
+ qtnf_bus_unlock(mac->bus);
+ return ret;
+}
+
+int qtnf_cmd_send_connect(struct qtnf_vif *vif,
+ struct cfg80211_connect_params *sme)
+{
+ struct sk_buff *cmd_skb;
+ struct qlink_cmd_connect *cmd;
+ struct qtnf_bss_config *bss_cfg = &vif->bss_cfg;
+ struct qlink_auth_encr aen;
+ u16 res_code = QLINK_CMD_RESULT_OK;
+ int ret;
+ int i;
+
+ cmd_skb = qtnf_cmd_alloc_new_cmdskb(vif->mac->macid, vif->vifid,
+ QLINK_CMD_CONNECT,
+ sizeof(*cmd));
+ if (unlikely(!cmd_skb))
+ return -ENOMEM;
+
+ qtnf_bus_lock(vif->mac->bus);
+
+ cmd = (struct qlink_cmd_connect *)cmd_skb->data;
+
+ ether_addr_copy(cmd->bssid, bss_cfg->bssid);
+
+ if (bss_cfg->chandef.chan)
+ cmd->freq = cpu_to_le16(bss_cfg->chandef.chan->center_freq);
+
+ cmd->bg_scan_period = cpu_to_le16(bss_cfg->bg_scan_period);
+
+ memset(&aen, 0, sizeof(aen));
+ aen.auth_type = bss_cfg->auth_type;
+ aen.privacy = !!bss_cfg->privacy;
+ aen.mfp = bss_cfg->mfp;
+ aen.wpa_versions = cpu_to_le32(bss_cfg->crypto.wpa_versions);
+ aen.cipher_group = cpu_to_le32(bss_cfg->crypto.cipher_group);
+ aen.n_ciphers_pairwise = cpu_to_le32(
+ bss_cfg->crypto.n_ciphers_pairwise);
+
+ for (i = 0; i < QLINK_MAX_NR_CIPHER_SUITES; i++)
+ aen.ciphers_pairwise[i] = cpu_to_le32(
+ bss_cfg->crypto.ciphers_pairwise[i]);
+
+ aen.n_akm_suites = cpu_to_le32(bss_cfg->crypto.n_akm_suites);
+
+ for (i = 0; i < QLINK_MAX_NR_AKM_SUITES; i++)
+ aen.akm_suites[i] = cpu_to_le32(
+ bss_cfg->crypto.akm_suites[i]);
+
+ aen.control_port = bss_cfg->crypto.control_port;
+ aen.control_port_no_encrypt =
+ bss_cfg->crypto.control_port_no_encrypt;
+ aen.control_port_ethertype = cpu_to_le16(be16_to_cpu(
+ bss_cfg->crypto.control_port_ethertype));
+
+ qtnf_cmd_skb_put_tlv_arr(cmd_skb, WLAN_EID_SSID, bss_cfg->ssid,
+ bss_cfg->ssid_len);
+ qtnf_cmd_skb_put_tlv_arr(cmd_skb, QTN_TLV_ID_CRYPTO, (u8 *)&aen,
+ sizeof(aen));
+
+ if (sme->ie_len != 0)
+ qtnf_cmd_skb_put_tlv_arr(cmd_skb, QTN_TLV_ID_IE_SET,
+ sme->ie,
+ sme->ie_len);
+
+ ret = qtnf_cmd_send(vif->mac->bus, cmd_skb, &res_code);
+
+ if (unlikely(ret))
+ goto out;
+
+ if (unlikely(res_code != QLINK_CMD_RESULT_OK)) {
+ pr_err("VIF%u.%u: CMD failed: %u\n", vif->mac->macid,
+ vif->vifid, res_code);
+ ret = -EFAULT;
+ goto out;
+ }
+out:
+ qtnf_bus_unlock(vif->mac->bus);
+ return ret;
+}
+
+int qtnf_cmd_send_disconnect(struct qtnf_vif *vif, u16 reason_code)
+{
+ struct sk_buff *cmd_skb;
+ struct qlink_cmd_disconnect *cmd;
+ u16 res_code = QLINK_CMD_RESULT_OK;
+ int ret;
+
+ cmd_skb = qtnf_cmd_alloc_new_cmdskb(vif->mac->macid, vif->vifid,
+ QLINK_CMD_DISCONNECT,
+ sizeof(*cmd));
+ if (unlikely(!cmd_skb))
+ return -ENOMEM;
+
+ qtnf_bus_lock(vif->mac->bus);
+
+ cmd = (struct qlink_cmd_disconnect *)cmd_skb->data;
+ cmd->reason = cpu_to_le16(reason_code);
+
+ ret = qtnf_cmd_send(vif->mac->bus, cmd_skb, &res_code);
+
+ if (unlikely(ret))
+ goto out;
+
+ if (unlikely(res_code != QLINK_CMD_RESULT_OK)) {
+ pr_err("VIF%u.%u: CMD failed: %u\n", vif->mac->macid,
+ vif->vifid, res_code);
+ ret = -EFAULT;
+ goto out;
+ }
+out:
+ qtnf_bus_unlock(vif->mac->bus);
+ return ret;
+}
+
+int qtnf_cmd_send_updown_intf(struct qtnf_vif *vif, bool up)
+{
+ struct sk_buff *cmd_skb;
+ struct qlink_cmd_updown *cmd;
+ u16 res_code = QLINK_CMD_RESULT_OK;
+ int ret;
+
+ cmd_skb = qtnf_cmd_alloc_new_cmdskb(vif->mac->macid, vif->vifid,
+ QLINK_CMD_UPDOWN_INTF,
+ sizeof(*cmd));
+ if (unlikely(!cmd_skb))
+ return -ENOMEM;
+
+ cmd = (struct qlink_cmd_updown *)cmd_skb->data;
+ cmd->if_up = !!up;
+
+ qtnf_bus_lock(vif->mac->bus);
+
+ ret = qtnf_cmd_send(vif->mac->bus, cmd_skb, &res_code);
+
+ if (unlikely(ret))
+ goto out;
+
+ if (unlikely(res_code != QLINK_CMD_RESULT_OK)) {
+ pr_err("VIF%u.%u: CMD failed: %u\n", vif->mac->macid,
+ vif->vifid, res_code);
+ ret = -EFAULT;
+ goto out;
+ }
+out:
+ qtnf_bus_unlock(vif->mac->bus);
+ return ret;
+}
diff --git a/drivers/net/wireless/quantenna/qtnfmac/commands.h b/drivers/net/wireless/quantenna/qtnfmac/commands.h
new file mode 100644
index 0000000..6c51854
--- /dev/null
+++ b/drivers/net/wireless/quantenna/qtnfmac/commands.h
@@ -0,0 +1,74 @@
+/*
+ * Copyright (c) 2016 Quantenna Communications, Inc.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef QLINK_COMMANDS_H_
+#define QLINK_COMMANDS_H_
+
+#include <linux/nl80211.h>
+
+#include "core.h"
+#include "bus.h"
+
+int qtnf_cmd_send_init_fw(struct qtnf_bus *bus);
+void qtnf_cmd_send_deinit_fw(struct qtnf_bus *bus);
+int qtnf_cmd_get_hw_info(struct qtnf_bus *bus);
+int qtnf_cmd_get_mac_info(struct qtnf_wmac *mac);
+int qtnf_cmd_send_add_intf(struct qtnf_vif *vif, enum nl80211_iftype iftype,
+ u8 *mac_addr);
+int qtnf_cmd_send_change_intf_type(struct qtnf_vif *vif,
+ enum nl80211_iftype iftype, u8 *mac_addr);
+int qtnf_cmd_send_del_intf(struct qtnf_vif *vif);
+int qtnf_cmd_get_mac_chan_info(struct qtnf_wmac *mac,
+ struct ieee80211_supported_band *band);
+int qtnf_cmd_send_regulatory_config(struct qtnf_wmac *mac, const char *alpha2);
+int qtnf_cmd_send_config_ap(struct qtnf_vif *vif);
+int qtnf_cmd_send_start_ap(struct qtnf_vif *vif);
+int qtnf_cmd_send_stop_ap(struct qtnf_vif *vif);
+int qtnf_cmd_send_register_mgmt(struct qtnf_vif *vif, u16 frame_type, bool reg);
+int qtnf_cmd_send_mgmt_frame(struct qtnf_vif *vif, u32 cookie, u16 flags,
+ u16 freq, const u8 *buf, size_t len);
+int qtnf_cmd_send_mgmt_set_appie(struct qtnf_vif *vif, u8 frame_type,
+ const u8 *buf, size_t len);
+int qtnf_cmd_get_sta_info(struct qtnf_vif *vif, const u8 *sta_mac,
+ struct station_info *sinfo);
+int qtnf_cmd_send_phy_params(struct qtnf_wmac *mac, u16 cmd_action,
+ void *data_buf);
+int qtnf_cmd_send_add_key(struct qtnf_vif *vif, u8 key_index, bool pairwise,
+ const u8 *mac_addr, struct key_params *params);
+int qtnf_cmd_send_del_key(struct qtnf_vif *vif, u8 key_index, bool pairwise,
+ const u8 *mac_addr);
+int qtnf_cmd_send_set_default_key(struct qtnf_vif *vif, u8 key_index,
+ bool unicast, bool multicast);
+int qtnf_cmd_send_set_default_mgmt_key(struct qtnf_vif *vif, u8 key_index);
+int qtnf_cmd_send_add_sta(struct qtnf_vif *vif, const u8 *mac,
+ struct station_parameters *params);
+int qtnf_cmd_send_change_sta(struct qtnf_vif *vif, const u8 *mac,
+ struct station_parameters *params);
+int qtnf_cmd_send_del_sta(struct qtnf_vif *vif,
+ struct station_del_parameters *params);
+
+int qtnf_cmd_resp_parse(struct qtnf_bus *bus, struct sk_buff *resp_skb);
+int qtnf_cmd_resp_check(const struct qtnf_vif *vif,
+ const struct sk_buff *resp_skb, u16 cmd_id,
+ u16 *result, const u8 **payload, size_t *payload_size);
+int qtnf_cmd_send_scan(struct qtnf_wmac *mac);
+int qtnf_cmd_send_connect(struct qtnf_vif *vif,
+ struct cfg80211_connect_params *sme);
+int qtnf_cmd_send_disconnect(struct qtnf_vif *vif,
+ u16 reason_code);
+int qtnf_cmd_send_updown_intf(struct qtnf_vif *vif,
+ bool up);
+
+#endif /* QLINK_COMMANDS_H_ */
diff --git a/drivers/net/wireless/quantenna/qtnfmac/core.c b/drivers/net/wireless/quantenna/qtnfmac/core.c
new file mode 100644
index 0000000..f053532
--- /dev/null
+++ b/drivers/net/wireless/quantenna/qtnfmac/core.c
@@ -0,0 +1,618 @@
+/*
+ * Copyright (c) 2015-2016 Quantenna Communications, Inc.
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/if_ether.h>
+
+#include "core.h"
+#include "bus.h"
+#include "trans.h"
+#include "commands.h"
+#include "cfg80211.h"
+#include "event.h"
+#include "util.h"
+
+#define QTNF_DMP_MAX_LEN 48
+#define QTNF_PRIMARY_VIF_IDX 0
+
+struct qtnf_frame_meta_info {
+ u8 magic_s;
+ u8 ifidx;
+ u8 macid;
+ u8 magic_e;
+} __packed;
+
+struct qtnf_wmac *qtnf_core_get_mac(const struct qtnf_bus *bus, u8 macid)
+{
+ struct qtnf_wmac *mac = NULL;
+
+ if (unlikely(macid >= QTNF_MAX_MAC)) {
+ pr_err("invalid MAC index %u\n", macid);
+ return NULL;
+ }
+
+ mac = bus->mac[macid];
+
+ if (unlikely(!mac)) {
+ pr_err("MAC%u: not initialized\n", macid);
+ return NULL;
+ }
+
+ return mac;
+}
+
+/* Netdev handler for open.
+ */
+static int qtnf_netdev_open(struct net_device *ndev)
+{
+ netif_carrier_off(ndev);
+ qtnf_netdev_updown(ndev, 1);
+ return 0;
+}
+
+/* Netdev handler for close.
+ */
+static int qtnf_netdev_close(struct net_device *ndev)
+{
+ netif_carrier_off(ndev);
+ qtnf_virtual_intf_cleanup(ndev);
+ qtnf_netdev_updown(ndev, 0);
+ return 0;
+}
+
+/* Netdev handler for data transmission.
+ */
+static int
+qtnf_netdev_hard_start_xmit(struct sk_buff *skb, struct net_device *ndev)
+{
+ struct qtnf_vif *vif;
+ struct qtnf_wmac *mac;
+
+ vif = qtnf_netdev_get_priv(ndev);
+
+ if (unlikely(skb->dev != ndev)) {
+ pr_err_ratelimited("invalid skb->dev");
+ dev_kfree_skb_any(skb);
+ return 0;
+ }
+
+ if (unlikely(vif->wdev.iftype == NL80211_IFTYPE_UNSPECIFIED)) {
+ pr_err_ratelimited("%s: VIF not initialized\n", ndev->name);
+ dev_kfree_skb_any(skb);
+ return 0;
+ }
+
+ mac = vif->mac;
+ if (unlikely(!mac)) {
+ pr_err_ratelimited("%s: NULL mac pointer", ndev->name);
+ dev_kfree_skb_any(skb);
+ return 0;
+ }
+
+ if (!skb->len || (skb->len > ETH_FRAME_LEN)) {
+ pr_err_ratelimited("%s: invalid skb len %d\n", ndev->name,
+ skb->len);
+ dev_kfree_skb_any(skb);
+ ndev->stats.tx_dropped++;
+ return 0;
+ }
+
+ /* tx path is enabled: reset vif timeout */
+ vif->cons_tx_timeout_cnt = 0;
+
+ return qtnf_bus_data_tx(mac->bus, skb);
+}
+
+/* Netdev handler for getting stats.
+ */
+static struct net_device_stats *qtnf_netdev_get_stats(struct net_device *dev)
+{
+ return &dev->stats;
+}
+
+/* Netdev handler for transmission timeout.
+ */
+static void qtnf_netdev_tx_timeout(struct net_device *ndev)
+{
+ struct qtnf_vif *vif = qtnf_netdev_get_priv(ndev);
+ struct qtnf_wmac *mac;
+ struct qtnf_bus *bus;
+
+ if (unlikely(!vif || !vif->mac || !vif->mac->bus))
+ return;
+
+ mac = vif->mac;
+ bus = mac->bus;
+
+ pr_warn("VIF%u.%u: Tx timeout- %lu\n", mac->macid, vif->vifid, jiffies);
+
+ qtnf_bus_data_tx_timeout(bus, ndev);
+ ndev->stats.tx_errors++;
+
+ if (++vif->cons_tx_timeout_cnt > QTNF_TX_TIMEOUT_TRSHLD) {
+ pr_err("Tx timeout threshold exceeded !\n");
+ pr_err("schedule interface %s reset !\n", netdev_name(ndev));
+ queue_work(bus->workqueue, &vif->reset_work);
+ }
+}
+
+/* Network device ops handlers */
+const struct net_device_ops qtnf_netdev_ops = {
+ .ndo_open = qtnf_netdev_open,
+ .ndo_stop = qtnf_netdev_close,
+ .ndo_start_xmit = qtnf_netdev_hard_start_xmit,
+ .ndo_tx_timeout = qtnf_netdev_tx_timeout,
+ .ndo_get_stats = qtnf_netdev_get_stats,
+};
+
+static int qtnf_mac_init_single_band(struct wiphy *wiphy,
+ struct qtnf_wmac *mac,
+ enum nl80211_band band)
+{
+ int ret;
+
+ wiphy->bands[band] = kzalloc(sizeof(*wiphy->bands[band]), GFP_KERNEL);
+ if (!wiphy->bands[band])
+ return -ENOMEM;
+
+ wiphy->bands[band]->band = band;
+
+ ret = qtnf_cmd_get_mac_chan_info(mac, wiphy->bands[band]);
+ if (ret) {
+ pr_err("MAC%u: band %u: failed to get chans info: %d\n",
+ mac->macid, band, ret);
+ return ret;
+ }
+
+ qtnf_band_init_rates(wiphy->bands[band]);
+ qtnf_band_setup_htvht_caps(&mac->macinfo, wiphy->bands[band]);
+
+ return 0;
+}
+
+static int qtnf_mac_init_bands(struct qtnf_wmac *mac)
+{
+ struct wiphy *wiphy = priv_to_wiphy(mac);
+ int ret = 0;
+
+ if (mac->macinfo.bands_cap & QLINK_BAND_2GHZ) {
+ ret = qtnf_mac_init_single_band(wiphy, mac, NL80211_BAND_2GHZ);
+ if (ret)
+ goto out;
+ }
+
+ if (mac->macinfo.bands_cap & QLINK_BAND_5GHZ) {
+ ret = qtnf_mac_init_single_band(wiphy, mac, NL80211_BAND_5GHZ);
+ if (ret)
+ goto out;
+ }
+
+ if (mac->macinfo.bands_cap & QLINK_BAND_60GHZ)
+ ret = qtnf_mac_init_single_band(wiphy, mac, NL80211_BAND_60GHZ);
+
+out:
+ return ret;
+}
+
+struct qtnf_vif *qtnf_mac_get_free_vif(struct qtnf_wmac *mac)
+{
+ struct qtnf_vif *vif;
+ int i;
+
+ for (i = 0; i < QTNF_MAX_INTF; i++) {
+ vif = &mac->iflist[i];
+ if (vif->wdev.iftype == NL80211_IFTYPE_UNSPECIFIED)
+ return vif;
+ }
+
+ return NULL;
+}
+
+struct qtnf_vif *qtnf_mac_get_base_vif(struct qtnf_wmac *mac)
+{
+ struct qtnf_vif *vif;
+
+ vif = &mac->iflist[QTNF_PRIMARY_VIF_IDX];
+
+ if (vif->wdev.iftype == NL80211_IFTYPE_UNSPECIFIED)
+ return NULL;
+
+ return vif;
+}
+
+static void qtnf_vif_reset_handler(struct work_struct *work)
+{
+ struct qtnf_vif *vif = container_of(work, struct qtnf_vif, reset_work);
+
+ rtnl_lock();
+
+ if (vif->wdev.iftype == NL80211_IFTYPE_UNSPECIFIED) {
+ rtnl_unlock();
+ return;
+ }
+
+ /* stop tx completely */
+ netif_tx_stop_all_queues(vif->netdev);
+ if (netif_carrier_ok(vif->netdev))
+ netif_carrier_off(vif->netdev);
+
+ qtnf_cfg80211_vif_reset(vif);
+
+ rtnl_unlock();
+}
+
+static void qtnf_mac_init_primary_intf(struct qtnf_wmac *mac)
+{
+ struct qtnf_vif *vif = &mac->iflist[QTNF_PRIMARY_VIF_IDX];
+
+ vif->wdev.iftype = NL80211_IFTYPE_AP;
+ vif->bss_priority = QTNF_DEF_BSS_PRIORITY;
+ vif->wdev.wiphy = priv_to_wiphy(mac);
+ INIT_WORK(&vif->reset_work, qtnf_vif_reset_handler);
+ vif->cons_tx_timeout_cnt = 0;
+}
+
+static struct qtnf_wmac *qtnf_core_mac_alloc(struct qtnf_bus *bus,
+ unsigned int macid)
+{
+ struct wiphy *wiphy;
+ struct qtnf_wmac *mac;
+ unsigned int i;
+
+ wiphy = qtnf_wiphy_allocate(bus);
+ if (!wiphy)
+ return ERR_PTR(-ENOMEM);
+
+ mac = wiphy_priv(wiphy);
+
+ mac->macid = macid;
+ mac->bus = bus;
+
+ for (i = 0; i < QTNF_MAX_INTF; i++) {
+ memset(&mac->iflist[i], 0, sizeof(struct qtnf_vif));
+ mac->iflist[i].wdev.iftype = NL80211_IFTYPE_UNSPECIFIED;
+ mac->iflist[i].mac = mac;
+ mac->iflist[i].vifid = i;
+ qtnf_sta_list_init(&mac->iflist[i].sta_list);
+ }
+
+ qtnf_mac_init_primary_intf(mac);
+ bus->mac[macid] = mac;
+
+ return mac;
+}
+
+int qtnf_core_net_attach(struct qtnf_wmac *mac, struct qtnf_vif *vif,
+ const char *name, unsigned char name_assign_type,
+ enum nl80211_iftype iftype)
+{
+ struct wiphy *wiphy = priv_to_wiphy(mac);
+ struct net_device *dev;
+ void *qdev_vif;
+ int ret;
+
+ dev = alloc_netdev_mqs(sizeof(struct qtnf_vif *), name,
+ name_assign_type, ether_setup, 1, 1);
+ if (!dev) {
+ memset(&vif->wdev, 0, sizeof(vif->wdev));
+ vif->wdev.iftype = NL80211_IFTYPE_UNSPECIFIED;
+ return -ENOMEM;
+ }
+
+ vif->netdev = dev;
+
+ dev->netdev_ops = &qtnf_netdev_ops;
+ dev->needs_free_netdev = true;
+ dev_net_set(dev, wiphy_net(wiphy));
+ dev->ieee80211_ptr = &vif->wdev;
+ dev->ieee80211_ptr->iftype = iftype;
+ ether_addr_copy(dev->dev_addr, vif->mac_addr);
+ SET_NETDEV_DEV(dev, wiphy_dev(wiphy));
+ dev->flags |= IFF_BROADCAST | IFF_MULTICAST;
+ dev->watchdog_timeo = QTNF_DEF_WDOG_TIMEOUT;
+ dev->tx_queue_len = 100;
+
+ qdev_vif = netdev_priv(dev);
+ *((void **)qdev_vif) = vif;
+
+ SET_NETDEV_DEV(dev, mac->bus->dev);
+
+ ret = register_netdevice(dev);
+ if (ret) {
+ free_netdev(dev);
+ vif->wdev.iftype = NL80211_IFTYPE_UNSPECIFIED;
+ }
+
+ return ret;
+}
+
+static void qtnf_core_mac_detach(struct qtnf_bus *bus, unsigned int macid)
+{
+ struct qtnf_wmac *mac;
+ struct wiphy *wiphy;
+ struct qtnf_vif *vif;
+ unsigned int i;
+ enum nl80211_band band;
+
+ mac = bus->mac[macid];
+
+ if (!mac)
+ return;
+
+ wiphy = priv_to_wiphy(mac);
+
+ for (i = 0; i < QTNF_MAX_INTF; i++) {
+ vif = &mac->iflist[i];
+ rtnl_lock();
+ if (vif->netdev &&
+ vif->wdev.iftype != NL80211_IFTYPE_UNSPECIFIED) {
+ qtnf_virtual_intf_cleanup(vif->netdev);
+ qtnf_del_virtual_intf(wiphy, &vif->wdev);
+ }
+ rtnl_unlock();
+ qtnf_sta_list_free(&vif->sta_list);
+ }
+
+ if (mac->wiphy_registered)
+ wiphy_unregister(wiphy);
+
+ for (band = NL80211_BAND_2GHZ; band < NUM_NL80211_BANDS; ++band) {
+ if (!wiphy->bands[band])
+ continue;
+
+ kfree(wiphy->bands[band]->channels);
+ wiphy->bands[band]->n_channels = 0;
+
+ kfree(wiphy->bands[band]);
+ wiphy->bands[band] = NULL;
+ }
+
+ kfree(mac->macinfo.limits);
+ kfree(wiphy->iface_combinations);
+ wiphy_free(wiphy);
+ bus->mac[macid] = NULL;
+}
+
+static int qtnf_core_mac_attach(struct qtnf_bus *bus, unsigned int macid)
+{
+ struct qtnf_wmac *mac;
+ struct qtnf_vif *vif;
+ int ret;
+
+ if (!(bus->hw_info.mac_bitmap & BIT(macid))) {
+ pr_info("MAC%u is not active in FW\n", macid);
+ return 0;
+ }
+
+ mac = qtnf_core_mac_alloc(bus, macid);
+ if (IS_ERR(mac)) {
+ pr_err("MAC%u allocation failed\n", macid);
+ return PTR_ERR(mac);
+ }
+
+ ret = qtnf_cmd_get_mac_info(mac);
+ if (ret) {
+ pr_err("MAC%u: failed to get info\n", macid);
+ goto error;
+ }
+
+ vif = qtnf_mac_get_base_vif(mac);
+ if (!vif) {
+ pr_err("MAC%u: primary VIF is not ready\n", macid);
+ ret = -EFAULT;
+ goto error;
+ }
+
+ ret = qtnf_cmd_send_add_intf(vif, NL80211_IFTYPE_AP, vif->mac_addr);
+ if (ret) {
+ pr_err("MAC%u: failed to add VIF\n", macid);
+ goto error;
+ }
+
+ ret = qtnf_cmd_send_get_phy_params(mac);
+ if (ret) {
+ pr_err("MAC%u: failed to get PHY settings\n", macid);
+ goto error;
+ }
+
+ ret = qtnf_mac_init_bands(mac);
+ if (ret) {
+ pr_err("MAC%u: failed to init bands\n", macid);
+ goto error;
+ }
+
+ ret = qtnf_wiphy_register(&bus->hw_info, mac);
+ if (ret) {
+ pr_err("MAC%u: wiphy registration failed\n", macid);
+ goto error;
+ }
+
+ mac->wiphy_registered = 1;
+
+ rtnl_lock();
+
+ ret = qtnf_core_net_attach(mac, vif, "wlan%d", NET_NAME_ENUM,
+ NL80211_IFTYPE_AP);
+ rtnl_unlock();
+
+ if (ret) {
+ pr_err("MAC%u: failed to attach netdev\n", macid);
+ vif->wdev.iftype = NL80211_IFTYPE_UNSPECIFIED;
+ vif->netdev = NULL;
+ goto error;
+ }
+
+ pr_debug("MAC%u initialized\n", macid);
+
+ return 0;
+
+error:
+ qtnf_core_mac_detach(bus, macid);
+ return ret;
+}
+
+int qtnf_core_attach(struct qtnf_bus *bus)
+{
+ unsigned int i;
+ int ret;
+
+ qtnf_trans_init(bus);
+
+ bus->fw_state = QTNF_FW_STATE_BOOT_DONE;
+ qtnf_bus_data_rx_start(bus);
+
+ bus->workqueue = alloc_ordered_workqueue("QTNF_BUS", 0);
+ if (!bus->workqueue) {
+ pr_err("failed to alloc main workqueue\n");
+ ret = -ENOMEM;
+ goto error;
+ }
+
+ INIT_WORK(&bus->event_work, qtnf_event_work_handler);
+
+ ret = qtnf_cmd_send_init_fw(bus);
+ if (ret) {
+ pr_err("failed to init FW: %d\n", ret);
+ goto error;
+ }
+
+ bus->fw_state = QTNF_FW_STATE_ACTIVE;
+
+ ret = qtnf_cmd_get_hw_info(bus);
+ if (ret) {
+ pr_err("failed to get HW info: %d\n", ret);
+ goto error;
+ }
+
+ if (bus->hw_info.ql_proto_ver != QLINK_PROTO_VER) {
+ pr_err("qlink version mismatch %u != %u\n",
+ QLINK_PROTO_VER, bus->hw_info.ql_proto_ver);
+ ret = -EPROTONOSUPPORT;
+ goto error;
+ }
+
+ if (bus->hw_info.num_mac > QTNF_MAX_MAC) {
+ pr_err("no support for number of MACs=%u\n",
+ bus->hw_info.num_mac);
+ ret = -ERANGE;
+ goto error;
+ }
+
+ for (i = 0; i < bus->hw_info.num_mac; i++) {
+ ret = qtnf_core_mac_attach(bus, i);
+
+ if (ret) {
+ pr_err("MAC%u: attach failed: %d\n", i, ret);
+ goto error;
+ }
+ }
+
+ return 0;
+
+error:
+ qtnf_core_detach(bus);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(qtnf_core_attach);
+
+void qtnf_core_detach(struct qtnf_bus *bus)
+{
+ unsigned int macid;
+
+ qtnf_bus_data_rx_stop(bus);
+
+ for (macid = 0; macid < QTNF_MAX_MAC; macid++)
+ qtnf_core_mac_detach(bus, macid);
+
+ if (bus->fw_state == QTNF_FW_STATE_ACTIVE)
+ qtnf_cmd_send_deinit_fw(bus);
+
+ bus->fw_state = QTNF_FW_STATE_DEAD;
+
+ if (bus->workqueue) {
+ flush_workqueue(bus->workqueue);
+ destroy_workqueue(bus->workqueue);
+ }
+
+ qtnf_trans_free(bus);
+}
+EXPORT_SYMBOL_GPL(qtnf_core_detach);
+
+static inline int qtnf_is_frame_meta_magic_valid(struct qtnf_frame_meta_info *m)
+{
+ return m->magic_s == 0xAB && m->magic_e == 0xBA;
+}
+
+struct net_device *qtnf_classify_skb(struct qtnf_bus *bus, struct sk_buff *skb)
+{
+ struct qtnf_frame_meta_info *meta;
+ struct net_device *ndev = NULL;
+ struct qtnf_wmac *mac;
+ struct qtnf_vif *vif;
+
+ meta = (struct qtnf_frame_meta_info *)
+ (skb_tail_pointer(skb) - sizeof(*meta));
+
+ if (unlikely(!qtnf_is_frame_meta_magic_valid(meta))) {
+ pr_err_ratelimited("invalid magic 0x%x:0x%x\n",
+ meta->magic_s, meta->magic_e);
+ goto out;
+ }
+
+ if (unlikely(meta->macid >= QTNF_MAX_MAC)) {
+ pr_err_ratelimited("invalid mac(%u)\n", meta->macid);
+ goto out;
+ }
+
+ if (unlikely(meta->ifidx >= QTNF_MAX_INTF)) {
+ pr_err_ratelimited("invalid vif(%u)\n", meta->ifidx);
+ goto out;
+ }
+
+ mac = bus->mac[meta->macid];
+
+ if (unlikely(!mac)) {
+ pr_err_ratelimited("mac(%d) does not exist\n", meta->macid);
+ goto out;
+ }
+
+ vif = &mac->iflist[meta->ifidx];
+
+ if (unlikely(vif->wdev.iftype == NL80211_IFTYPE_UNSPECIFIED)) {
+ pr_err_ratelimited("vif(%u) does not exists\n", meta->ifidx);
+ goto out;
+ }
+
+ ndev = vif->netdev;
+
+ if (unlikely(!ndev)) {
+ pr_err_ratelimited("netdev for wlan%u.%u does not exists\n",
+ meta->macid, meta->ifidx);
+ goto out;
+ }
+
+ __skb_trim(skb, skb->len - sizeof(*meta));
+
+out:
+ return ndev;
+}
+EXPORT_SYMBOL_GPL(qtnf_classify_skb);
+
+MODULE_AUTHOR("Quantenna Communications");
+MODULE_DESCRIPTION("Quantenna 802.11 wireless LAN FullMAC driver.");
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/wireless/quantenna/qtnfmac/core.h b/drivers/net/wireless/quantenna/qtnfmac/core.h
new file mode 100644
index 0000000..a616434
--- /dev/null
+++ b/drivers/net/wireless/quantenna/qtnfmac/core.h
@@ -0,0 +1,173 @@
+/*
+ * Copyright (c) 2015-2016 Quantenna Communications, Inc.
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _QTN_FMAC_CORE_H_
+#define _QTN_FMAC_CORE_H_
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/sched.h>
+#include <linux/semaphore.h>
+#include <linux/ip.h>
+#include <linux/skbuff.h>
+#include <linux/if_arp.h>
+#include <linux/etherdevice.h>
+#include <net/sock.h>
+#include <net/lib80211.h>
+#include <net/cfg80211.h>
+#include <linux/vmalloc.h>
+#include <linux/firmware.h>
+#include <linux/ctype.h>
+#include <linux/workqueue.h>
+#include <linux/slab.h>
+
+#include "qlink.h"
+#include "trans.h"
+
+#undef pr_fmt
+#define pr_fmt(fmt) KBUILD_MODNAME ": %s: " fmt, __func__
+
+#define QTNF_MAX_SSID_LIST_LENGTH 2
+#define QTNF_MAX_VSIE_LEN 255
+#define QTNF_MAX_ALPHA_LEN 2
+#define QTNF_MAX_INTF 8
+#define QTNF_MAX_EVENT_QUEUE_LEN 255
+#define QTNF_DEFAULT_BG_SCAN_PERIOD 300
+#define QTNF_MAX_BG_SCAN_PERIOD 0xffff
+
+#define QTNF_DEF_BSS_PRIORITY 0
+#define QTNF_DEF_WDOG_TIMEOUT 5
+#define QTNF_TX_TIMEOUT_TRSHLD 100
+
+#define QTNF_STATE_AP_CONFIG BIT(2)
+#define QTNF_STATE_AP_START BIT(1)
+
+extern const struct net_device_ops qtnf_netdev_ops;
+struct qtnf_bus;
+struct qtnf_vif;
+
+struct qtnf_bss_config {
+ u8 ssid[IEEE80211_MAX_SSID_LEN];
+ u8 bssid[ETH_ALEN];
+ size_t ssid_len;
+ u8 dtim;
+ u16 bcn_period;
+ u16 auth_type;
+ bool privacy;
+ enum nl80211_mfp mfp;
+ struct cfg80211_chan_def chandef;
+ struct cfg80211_crypto_settings crypto;
+ u16 bg_scan_period;
+ u32 connect_flags;
+};
+
+struct qtnf_sta_node {
+ struct list_head list;
+ u8 mac_addr[ETH_ALEN];
+};
+
+struct qtnf_sta_list {
+ struct list_head head;
+ atomic_t size;
+};
+
+enum qtnf_sta_state {
+ QTNF_STA_DISCONNECTED,
+ QTNF_STA_CONNECTING,
+ QTNF_STA_CONNECTED
+};
+
+struct qtnf_vif {
+ struct wireless_dev wdev;
+ u8 vifid;
+ u8 bss_priority;
+ u8 bss_status;
+ enum qtnf_sta_state sta_state;
+ u16 mgmt_frames_bitmask;
+ struct net_device *netdev;
+ struct qtnf_wmac *mac;
+ u8 mac_addr[ETH_ALEN];
+ struct work_struct reset_work;
+ struct qtnf_bss_config bss_cfg;
+ struct qtnf_sta_list sta_list;
+ unsigned long cons_tx_timeout_cnt;
+};
+
+struct qtnf_mac_info {
+ u8 bands_cap;
+ u8 phymode_cap;
+ u8 dev_mac[ETH_ALEN];
+ u8 num_tx_chain;
+ u8 num_rx_chain;
+ u16 max_ap_assoc_sta;
+ u32 frag_thr;
+ u32 rts_thr;
+ u8 lretry_limit;
+ u8 sretry_limit;
+ u8 coverage_class;
+ u8 radar_detect_widths;
+ struct ieee80211_ht_cap ht_cap;
+ struct ieee80211_vht_cap vht_cap;
+ struct ieee80211_iface_limit *limits;
+ size_t n_limits;
+};
+
+struct qtnf_wmac {
+ u8 macid;
+ u8 wiphy_registered;
+ u8 macaddr[ETH_ALEN];
+ struct qtnf_bus *bus;
+ struct qtnf_mac_info macinfo;
+ struct qtnf_vif iflist[QTNF_MAX_INTF];
+ struct cfg80211_scan_request *scan_req;
+};
+
+struct qtnf_hw_info {
+ u8 num_mac;
+ u8 mac_bitmap;
+ u8 alpha2_code[QTNF_MAX_ALPHA_LEN];
+ u32 fw_ver;
+ u16 ql_proto_ver;
+ u8 total_tx_chain;
+ u8 total_rx_chain;
+ u32 hw_capab;
+};
+
+struct qtnf_vif *qtnf_mac_get_free_vif(struct qtnf_wmac *mac);
+struct qtnf_vif *qtnf_mac_get_base_vif(struct qtnf_wmac *mac);
+struct wiphy *qtnf_wiphy_allocate(struct qtnf_bus *bus);
+int qtnf_core_net_attach(struct qtnf_wmac *mac, struct qtnf_vif *priv,
+ const char *name, unsigned char name_assign_type,
+ enum nl80211_iftype iftype);
+void qtnf_main_work_queue(struct work_struct *work);
+int qtnf_cmd_send_update_phy_params(struct qtnf_wmac *mac, u32 changed);
+int qtnf_cmd_send_get_phy_params(struct qtnf_wmac *mac);
+
+struct qtnf_wmac *qtnf_core_get_mac(const struct qtnf_bus *bus, u8 macid);
+struct net_device *qtnf_classify_skb(struct qtnf_bus *bus, struct sk_buff *skb);
+struct net_device *qtnf_classify_skb_no_mbss(struct qtnf_bus *bus,
+ struct sk_buff *skb);
+
+void qtnf_virtual_intf_cleanup(struct net_device *ndev);
+
+void qtnf_netdev_updown(struct net_device *ndev, bool up);
+
+static inline struct qtnf_vif *qtnf_netdev_get_priv(struct net_device *dev)
+{
+ return *((void **)netdev_priv(dev));
+}
+
+#endif /* _QTN_FMAC_CORE_H_ */
diff --git a/drivers/net/wireless/quantenna/qtnfmac/debug.c b/drivers/net/wireless/quantenna/qtnfmac/debug.c
new file mode 100644
index 0000000..9f826b9
--- /dev/null
+++ b/drivers/net/wireless/quantenna/qtnfmac/debug.c
@@ -0,0 +1,46 @@
+/*
+ * Copyright (c) 2015-2016 Quantenna Communications, Inc.
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include "debug.h"
+
+#undef pr_fmt
+#define pr_fmt(fmt) "qtnfmac dbg: %s: " fmt, __func__
+
+void qtnf_debugfs_init(struct qtnf_bus *bus, const char *name)
+{
+ bus->dbg_dir = debugfs_create_dir(name, NULL);
+
+ if (IS_ERR_OR_NULL(bus->dbg_dir)) {
+ pr_warn("failed to create debugfs root dir\n");
+ bus->dbg_dir = NULL;
+ }
+}
+
+void qtnf_debugfs_remove(struct qtnf_bus *bus)
+{
+ debugfs_remove_recursive(bus->dbg_dir);
+ bus->dbg_dir = NULL;
+}
+
+void qtnf_debugfs_add_entry(struct qtnf_bus *bus, const char *name,
+ int (*fn)(struct seq_file *seq, void *data))
+{
+ struct dentry *entry;
+
+ entry = debugfs_create_devm_seqfile(bus->dev, name, bus->dbg_dir, fn);
+ if (IS_ERR_OR_NULL(entry))
+ pr_warn("failed to add entry (%s)\n", name);
+}
diff --git a/drivers/net/wireless/quantenna/qtnfmac/debug.h b/drivers/net/wireless/quantenna/qtnfmac/debug.h
new file mode 100644
index 0000000..d6dd12b5
--- /dev/null
+++ b/drivers/net/wireless/quantenna/qtnfmac/debug.h
@@ -0,0 +1,50 @@
+/*
+ * Copyright (c) 2015-2016 Quantenna Communications, Inc.
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _QTN_FMAC_DEBUG_H_
+#define _QTN_FMAC_DEBUG_H_
+
+#include <linux/debugfs.h>
+
+#include "core.h"
+#include "bus.h"
+
+#ifdef CONFIG_DEBUG_FS
+
+void qtnf_debugfs_init(struct qtnf_bus *bus, const char *name);
+void qtnf_debugfs_remove(struct qtnf_bus *bus);
+void qtnf_debugfs_add_entry(struct qtnf_bus *bus, const char *name,
+ int (*fn)(struct seq_file *seq, void *data));
+
+#else
+
+static inline void qtnf_debugfs_init(struct qtnf_bus *bus, const char *name)
+{
+}
+
+static inline void qtnf_debugfs_remove(struct qtnf_bus *bus)
+{
+}
+
+static inline void
+qtnf_debugfs_add_entry(struct qtnf_bus *bus, const char *name,
+ int (*fn)(struct seq_file *seq, void *data))
+{
+}
+
+#endif /* CONFIG_DEBUG_FS */
+
+#endif /* _QTN_FMAC_DEBUG_H_ */
diff --git a/drivers/net/wireless/quantenna/qtnfmac/event.c b/drivers/net/wireless/quantenna/qtnfmac/event.c
new file mode 100644
index 0000000..9b61e9a
--- /dev/null
+++ b/drivers/net/wireless/quantenna/qtnfmac/event.c
@@ -0,0 +1,452 @@
+/*
+ * Copyright (c) 2015-2016 Quantenna Communications, Inc.
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+
+#include "cfg80211.h"
+#include "core.h"
+#include "qlink.h"
+#include "bus.h"
+#include "trans.h"
+#include "util.h"
+#include "event.h"
+
+static int
+qtnf_event_handle_sta_assoc(struct qtnf_wmac *mac, struct qtnf_vif *vif,
+ const struct qlink_event_sta_assoc *sta_assoc,
+ u16 len)
+{
+ const u8 *sta_addr;
+ u16 frame_control;
+ struct station_info sinfo = { 0 };
+ size_t payload_len;
+ u16 tlv_type;
+ u16 tlv_value_len;
+ size_t tlv_full_len;
+ const struct qlink_tlv_hdr *tlv;
+
+ if (unlikely(len < sizeof(*sta_assoc))) {
+ pr_err("VIF%u.%u: payload is too short (%u < %zu)\n",
+ mac->macid, vif->vifid, len, sizeof(*sta_assoc));
+ return -EINVAL;
+ }
+
+ if (vif->wdev.iftype != NL80211_IFTYPE_AP) {
+ pr_err("VIF%u.%u: STA_ASSOC event when not in AP mode\n",
+ mac->macid, vif->vifid);
+ return -EPROTO;
+ }
+
+ if (!(vif->bss_status & QTNF_STATE_AP_START)) {
+ pr_err("VIF%u.%u: STA_ASSOC event when AP is not started\n",
+ mac->macid, vif->vifid);
+ return -EPROTO;
+ }
+
+ sta_addr = sta_assoc->sta_addr;
+ frame_control = le16_to_cpu(sta_assoc->frame_control);
+
+ pr_debug("VIF%u.%u: MAC:%pM FC:%x\n", mac->macid, vif->vifid, sta_addr,
+ frame_control);
+
+ qtnf_sta_list_add(&vif->sta_list, sta_addr);
+
+ sinfo.assoc_req_ies = NULL;
+ sinfo.assoc_req_ies_len = 0;
+
+ payload_len = len - sizeof(*sta_assoc);
+ tlv = (struct qlink_tlv_hdr *)sta_assoc->ies;
+
+ while (payload_len >= sizeof(struct qlink_tlv_hdr)) {
+ tlv_type = le16_to_cpu(tlv->type);
+ tlv_value_len = le16_to_cpu(tlv->len);
+ tlv_full_len = tlv_value_len + sizeof(struct qlink_tlv_hdr);
+
+ if (tlv_full_len > payload_len) {
+ pr_warn("VIF%u.%u: malformed TLV 0x%.2X; LEN: %u\n",
+ mac->macid, vif->vifid, tlv_type,
+ tlv_value_len);
+ return -EINVAL;
+ }
+
+ if (tlv_type == QTN_TLV_ID_IE_SET) {
+ sinfo.assoc_req_ies = tlv->val;
+ sinfo.assoc_req_ies_len = tlv_value_len;
+ }
+
+ payload_len -= tlv_full_len;
+ tlv = (struct qlink_tlv_hdr *)(tlv->val + tlv_value_len);
+ }
+
+ if (payload_len) {
+ pr_warn("VIF%u.%u: malformed TLV buf; bytes left: %zu\n",
+ mac->macid, vif->vifid, payload_len);
+ return -EINVAL;
+ }
+
+ cfg80211_new_sta(vif->netdev, sta_assoc->sta_addr, &sinfo,
+ GFP_KERNEL);
+
+ return 0;
+}
+
+static int
+qtnf_event_handle_sta_deauth(struct qtnf_wmac *mac, struct qtnf_vif *vif,
+ const struct qlink_event_sta_deauth *sta_deauth,
+ u16 len)
+{
+ const u8 *sta_addr;
+ u16 reason;
+
+ if (unlikely(len < sizeof(*sta_deauth))) {
+ pr_err("VIF%u.%u: payload is too short (%u < %zu)\n",
+ mac->macid, vif->vifid, len,
+ sizeof(struct qlink_event_sta_deauth));
+ return -EINVAL;
+ }
+
+ if (vif->wdev.iftype != NL80211_IFTYPE_AP) {
+ pr_err("VIF%u.%u: STA_DEAUTH event when not in AP mode\n",
+ mac->macid, vif->vifid);
+ return -EPROTO;
+ }
+
+ if (!(vif->bss_status & QTNF_STATE_AP_START)) {
+ pr_err("VIF%u.%u: STA_DEAUTH event when AP is not started\n",
+ mac->macid, vif->vifid);
+ return -EPROTO;
+ }
+
+ sta_addr = sta_deauth->sta_addr;
+ reason = le16_to_cpu(sta_deauth->reason);
+
+ pr_debug("VIF%u.%u: MAC:%pM reason:%x\n", mac->macid, vif->vifid,
+ sta_addr, reason);
+
+ if (qtnf_sta_list_del(&vif->sta_list, sta_addr))
+ cfg80211_del_sta(vif->netdev, sta_deauth->sta_addr,
+ GFP_KERNEL);
+
+ return 0;
+}
+
+static int
+qtnf_event_handle_bss_join(struct qtnf_vif *vif,
+ const struct qlink_event_bss_join *join_info,
+ u16 len)
+{
+ if (unlikely(len < sizeof(*join_info))) {
+ pr_err("VIF%u.%u: payload is too short (%u < %zu)\n",
+ vif->mac->macid, vif->vifid, len,
+ sizeof(struct qlink_event_bss_join));
+ return -EINVAL;
+ }
+
+ if (vif->wdev.iftype != NL80211_IFTYPE_STATION) {
+ pr_err("VIF%u.%u: BSS_JOIN event when not in STA mode\n",
+ vif->mac->macid, vif->vifid);
+ return -EPROTO;
+ }
+
+ if (vif->sta_state != QTNF_STA_CONNECTING) {
+ pr_err("VIF%u.%u: BSS_JOIN event when STA is not connecting\n",
+ vif->mac->macid, vif->vifid);
+ return -EPROTO;
+ }
+
+ pr_debug("VIF%u.%u: BSSID:%pM\n", vif->mac->macid, vif->vifid,
+ join_info->bssid);
+
+ cfg80211_connect_result(vif->netdev, join_info->bssid, NULL, 0, NULL,
+ 0, le16_to_cpu(join_info->status), GFP_KERNEL);
+
+ if (le16_to_cpu(join_info->status) == WLAN_STATUS_SUCCESS) {
+ vif->sta_state = QTNF_STA_CONNECTED;
+ netif_carrier_on(vif->netdev);
+ } else {
+ vif->sta_state = QTNF_STA_DISCONNECTED;
+ }
+
+ return 0;
+}
+
+static int
+qtnf_event_handle_bss_leave(struct qtnf_vif *vif,
+ const struct qlink_event_bss_leave *leave_info,
+ u16 len)
+{
+ if (unlikely(len < sizeof(*leave_info))) {
+ pr_err("VIF%u.%u: payload is too short (%u < %zu)\n",
+ vif->mac->macid, vif->vifid, len,
+ sizeof(struct qlink_event_bss_leave));
+ return -EINVAL;
+ }
+
+ if (vif->wdev.iftype != NL80211_IFTYPE_STATION) {
+ pr_err("VIF%u.%u: BSS_LEAVE event when not in STA mode\n",
+ vif->mac->macid, vif->vifid);
+ return -EPROTO;
+ }
+
+ if (vif->sta_state != QTNF_STA_CONNECTED) {
+ pr_err("VIF%u.%u: BSS_LEAVE event when STA is not connected\n",
+ vif->mac->macid, vif->vifid);
+ return -EPROTO;
+ }
+
+ pr_debug("VIF%u.%u: disconnected\n", vif->mac->macid, vif->vifid);
+
+ cfg80211_disconnected(vif->netdev, leave_info->reason, NULL, 0, 0,
+ GFP_KERNEL);
+
+ vif->sta_state = QTNF_STA_DISCONNECTED;
+ netif_carrier_off(vif->netdev);
+
+ return 0;
+}
+
+static int
+qtnf_event_handle_mgmt_received(struct qtnf_vif *vif,
+ const struct qlink_event_rxmgmt *rxmgmt,
+ u16 len)
+{
+ const size_t min_len = sizeof(*rxmgmt) +
+ sizeof(struct ieee80211_hdr_3addr);
+ const struct ieee80211_hdr_3addr *frame = (void *)rxmgmt->frame_data;
+ const u16 frame_len = len - sizeof(*rxmgmt);
+ enum nl80211_rxmgmt_flags flags = 0;
+
+ if (unlikely(len < min_len)) {
+ pr_err("VIF%u.%u: payload is too short (%u < %zu)\n",
+ vif->mac->macid, vif->vifid, len, min_len);
+ return -EINVAL;
+ }
+
+ if (le32_to_cpu(rxmgmt->flags) & QLINK_RXMGMT_FLAG_ANSWERED)
+ flags |= NL80211_RXMGMT_FLAG_ANSWERED;
+
+ pr_debug("%s LEN:%u FC:%.4X SA:%pM\n", vif->netdev->name, frame_len,
+ le16_to_cpu(frame->frame_control), frame->addr2);
+
+ cfg80211_rx_mgmt(&vif->wdev, le32_to_cpu(rxmgmt->freq),
+ le32_to_cpu(rxmgmt->sig_dbm), rxmgmt->frame_data,
+ frame_len, flags);
+
+ return 0;
+}
+
+static int
+qtnf_event_handle_scan_results(struct qtnf_vif *vif,
+ const struct qlink_event_scan_result *sr,
+ u16 len)
+{
+ struct cfg80211_bss *bss;
+ struct ieee80211_channel *channel;
+ struct wiphy *wiphy = priv_to_wiphy(vif->mac);
+ enum cfg80211_bss_frame_type frame_type;
+ size_t payload_len;
+ u16 tlv_type;
+ u16 tlv_value_len;
+ size_t tlv_full_len;
+ const struct qlink_tlv_hdr *tlv;
+
+ const u8 *ies = NULL;
+ size_t ies_len = 0;
+
+ if (len < sizeof(*sr)) {
+ pr_err("VIF%u.%u: payload is too short\n", vif->mac->macid,
+ vif->vifid);
+ return -EINVAL;
+ }
+
+ channel = ieee80211_get_channel(wiphy, le16_to_cpu(sr->freq));
+ if (!channel) {
+ pr_err("VIF%u.%u: channel at %u MHz not found\n",
+ vif->mac->macid, vif->vifid, le16_to_cpu(sr->freq));
+ return -EINVAL;
+ }
+
+ switch (sr->frame_type) {
+ case QLINK_BSS_FTYPE_BEACON:
+ frame_type = CFG80211_BSS_FTYPE_BEACON;
+ break;
+ case QLINK_BSS_FTYPE_PRESP:
+ frame_type = CFG80211_BSS_FTYPE_PRESP;
+ break;
+ default:
+ frame_type = CFG80211_BSS_FTYPE_UNKNOWN;
+ }
+
+ payload_len = len - sizeof(*sr);
+ tlv = (struct qlink_tlv_hdr *)sr->payload;
+
+ while (payload_len >= sizeof(struct qlink_tlv_hdr)) {
+ tlv_type = le16_to_cpu(tlv->type);
+ tlv_value_len = le16_to_cpu(tlv->len);
+ tlv_full_len = tlv_value_len + sizeof(struct qlink_tlv_hdr);
+
+ if (tlv_full_len > payload_len) {
+ pr_warn("VIF%u.%u: malformed TLV 0x%.2X; LEN: %u\n",
+ vif->mac->macid, vif->vifid, tlv_type,
+ tlv_value_len);
+ return -EINVAL;
+ }
+
+ if (tlv_type == QTN_TLV_ID_IE_SET) {
+ ies = tlv->val;
+ ies_len = tlv_value_len;
+ }
+
+ payload_len -= tlv_full_len;
+ tlv = (struct qlink_tlv_hdr *)(tlv->val + tlv_value_len);
+ }
+
+ if (payload_len) {
+ pr_warn("VIF%u.%u: malformed TLV buf; bytes left: %zu\n",
+ vif->mac->macid, vif->vifid, payload_len);
+ return -EINVAL;
+ }
+
+ bss = cfg80211_inform_bss(wiphy, channel, frame_type,
+ sr->bssid, get_unaligned_le64(&sr->tsf),
+ le16_to_cpu(sr->capab),
+ le16_to_cpu(sr->bintval), ies, ies_len,
+ sr->signal, GFP_KERNEL);
+ if (!bss)
+ return -ENOMEM;
+
+ cfg80211_put_bss(wiphy, bss);
+
+ return 0;
+}
+
+static int
+qtnf_event_handle_scan_complete(struct qtnf_wmac *mac,
+ const struct qlink_event_scan_complete *status,
+ u16 len)
+{
+ if (len < sizeof(*status)) {
+ pr_err("MAC%u: payload is too short\n", mac->macid);
+ return -EINVAL;
+ }
+
+ qtnf_scan_done(mac, le32_to_cpu(status->flags) & QLINK_SCAN_ABORTED);
+
+ return 0;
+}
+
+static int qtnf_event_parse(struct qtnf_wmac *mac,
+ const struct sk_buff *event_skb)
+{
+ const struct qlink_event *event;
+ struct qtnf_vif *vif = NULL;
+ int ret = -1;
+ u16 event_id;
+ u16 event_len;
+
+ event = (const struct qlink_event *)event_skb->data;
+ event_id = le16_to_cpu(event->event_id);
+ event_len = le16_to_cpu(event->mhdr.len);
+
+ if (likely(event->vifid < QTNF_MAX_INTF)) {
+ vif = &mac->iflist[event->vifid];
+ } else {
+ pr_err("invalid vif(%u)\n", event->vifid);
+ return -EINVAL;
+ }
+
+ switch (event_id) {
+ case QLINK_EVENT_STA_ASSOCIATED:
+ ret = qtnf_event_handle_sta_assoc(mac, vif, (const void *)event,
+ event_len);
+ break;
+ case QLINK_EVENT_STA_DEAUTH:
+ ret = qtnf_event_handle_sta_deauth(mac, vif,
+ (const void *)event,
+ event_len);
+ break;
+ case QLINK_EVENT_MGMT_RECEIVED:
+ ret = qtnf_event_handle_mgmt_received(vif, (const void *)event,
+ event_len);
+ break;
+ case QLINK_EVENT_SCAN_RESULTS:
+ ret = qtnf_event_handle_scan_results(vif, (const void *)event,
+ event_len);
+ break;
+ case QLINK_EVENT_SCAN_COMPLETE:
+ ret = qtnf_event_handle_scan_complete(mac, (const void *)event,
+ event_len);
+ break;
+ case QLINK_EVENT_BSS_JOIN:
+ ret = qtnf_event_handle_bss_join(vif, (const void *)event,
+ event_len);
+ break;
+ case QLINK_EVENT_BSS_LEAVE:
+ ret = qtnf_event_handle_bss_leave(vif, (const void *)event,
+ event_len);
+ break;
+ default:
+ pr_warn("unknown event type: %x\n", event_id);
+ break;
+ }
+
+ return ret;
+}
+
+static int qtnf_event_process_skb(struct qtnf_bus *bus,
+ const struct sk_buff *skb)
+{
+ const struct qlink_event *event;
+ struct qtnf_wmac *mac;
+ int res;
+
+ if (unlikely(!skb || skb->len < sizeof(*event))) {
+ pr_err("invalid event buffer\n");
+ return -EINVAL;
+ }
+
+ event = (struct qlink_event *)skb->data;
+
+ mac = qtnf_core_get_mac(bus, event->macid);
+
+ pr_debug("new event id:%x len:%u mac:%u vif:%u\n",
+ le16_to_cpu(event->event_id), le16_to_cpu(event->mhdr.len),
+ event->macid, event->vifid);
+
+ if (unlikely(!mac))
+ return -ENXIO;
+
+ qtnf_bus_lock(bus);
+ res = qtnf_event_parse(mac, skb);
+ qtnf_bus_unlock(bus);
+
+ return res;
+}
+
+void qtnf_event_work_handler(struct work_struct *work)
+{
+ struct qtnf_bus *bus = container_of(work, struct qtnf_bus, event_work);
+ struct sk_buff_head *event_queue = &bus->trans.event_queue;
+ struct sk_buff *current_event_skb = skb_dequeue(event_queue);
+
+ while (current_event_skb) {
+ qtnf_event_process_skb(bus, current_event_skb);
+ dev_kfree_skb_any(current_event_skb);
+ current_event_skb = skb_dequeue(event_queue);
+ }
+}
diff --git a/drivers/net/wireless/quantenna/qtnfmac/event.h b/drivers/net/wireless/quantenna/qtnfmac/event.h
new file mode 100644
index 0000000..ae759b6
--- /dev/null
+++ b/drivers/net/wireless/quantenna/qtnfmac/event.h
@@ -0,0 +1,27 @@
+/*
+ * Copyright (c) 2015-2016 Quantenna Communications, Inc.
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _QTN_FMAC_EVENT_H_
+#define _QTN_FMAC_EVENT_H_
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+
+#include "qlink.h"
+
+void qtnf_event_work_handler(struct work_struct *work);
+
+#endif /* _QTN_FMAC_EVENT_H_ */
diff --git a/drivers/net/wireless/quantenna/qtnfmac/pearl/pcie.c b/drivers/net/wireless/quantenna/qtnfmac/pearl/pcie.c
new file mode 100644
index 0000000..f93b27f
--- /dev/null
+++ b/drivers/net/wireless/quantenna/qtnfmac/pearl/pcie.c
@@ -0,0 +1,1378 @@
+/*
+ * Copyright (c) 2015-2016 Quantenna Communications, Inc.
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/firmware.h>
+#include <linux/pci.h>
+#include <linux/vmalloc.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/sched.h>
+#include <linux/completion.h>
+#include <linux/crc32.h>
+#include <linux/spinlock.h>
+
+#include "qtn_hw_ids.h"
+#include "pcie_bus_priv.h"
+#include "core.h"
+#include "bus.h"
+#include "debug.h"
+
+static bool use_msi = true;
+module_param(use_msi, bool, 0644);
+MODULE_PARM_DESC(use_msi, "set 0 to use legacy interrupt");
+
+static unsigned int tx_bd_size_param = 256;
+module_param(tx_bd_size_param, uint, 0644);
+MODULE_PARM_DESC(tx_bd_size_param, "Tx descriptors queue size");
+
+static unsigned int rx_bd_size_param = 256;
+module_param(rx_bd_size_param, uint, 0644);
+MODULE_PARM_DESC(rx_bd_size_param, "Rx descriptors queue size");
+
+static unsigned int rx_bd_reserved_param = 16;
+module_param(rx_bd_reserved_param, uint, 0644);
+MODULE_PARM_DESC(rx_bd_reserved_param, "Reserved RX descriptors");
+
+static u8 flashboot = 1;
+module_param(flashboot, byte, 0644);
+MODULE_PARM_DESC(flashboot, "set to 0 to use FW binary file on FS");
+
+#define DRV_NAME "qtnfmac_pearl_pcie"
+
+static inline void qtnf_non_posted_write(u32 val, void __iomem *basereg)
+{
+ writel(val, basereg);
+
+ /* flush posted write */
+ readl(basereg);
+}
+
+static inline void qtnf_init_hdp_irqs(struct qtnf_pcie_bus_priv *priv)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&priv->irq_lock, flags);
+ priv->pcie_irq_mask = (PCIE_HDP_INT_RX_BITS | PCIE_HDP_INT_TX_BITS);
+ spin_unlock_irqrestore(&priv->irq_lock, flags);
+}
+
+static inline void qtnf_enable_hdp_irqs(struct qtnf_pcie_bus_priv *priv)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&priv->irq_lock, flags);
+ writel(priv->pcie_irq_mask, PCIE_HDP_INT_EN(priv->pcie_reg_base));
+ spin_unlock_irqrestore(&priv->irq_lock, flags);
+}
+
+static inline void qtnf_disable_hdp_irqs(struct qtnf_pcie_bus_priv *priv)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&priv->irq_lock, flags);
+ writel(0x0, PCIE_HDP_INT_EN(priv->pcie_reg_base));
+ spin_unlock_irqrestore(&priv->irq_lock, flags);
+}
+
+static inline void qtnf_en_rxdone_irq(struct qtnf_pcie_bus_priv *priv)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&priv->irq_lock, flags);
+ priv->pcie_irq_mask |= PCIE_HDP_INT_RX_BITS;
+ writel(priv->pcie_irq_mask, PCIE_HDP_INT_EN(priv->pcie_reg_base));
+ spin_unlock_irqrestore(&priv->irq_lock, flags);
+}
+
+static inline void qtnf_dis_rxdone_irq(struct qtnf_pcie_bus_priv *priv)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&priv->irq_lock, flags);
+ priv->pcie_irq_mask &= ~PCIE_HDP_INT_RX_BITS;
+ writel(priv->pcie_irq_mask, PCIE_HDP_INT_EN(priv->pcie_reg_base));
+ spin_unlock_irqrestore(&priv->irq_lock, flags);
+}
+
+static inline void qtnf_en_txdone_irq(struct qtnf_pcie_bus_priv *priv)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&priv->irq_lock, flags);
+ priv->pcie_irq_mask |= PCIE_HDP_INT_TX_BITS;
+ writel(priv->pcie_irq_mask, PCIE_HDP_INT_EN(priv->pcie_reg_base));
+ spin_unlock_irqrestore(&priv->irq_lock, flags);
+}
+
+static inline void qtnf_dis_txdone_irq(struct qtnf_pcie_bus_priv *priv)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&priv->irq_lock, flags);
+ priv->pcie_irq_mask &= ~PCIE_HDP_INT_TX_BITS;
+ writel(priv->pcie_irq_mask, PCIE_HDP_INT_EN(priv->pcie_reg_base));
+ spin_unlock_irqrestore(&priv->irq_lock, flags);
+}
+
+static int qtnf_pcie_init_irq(struct qtnf_pcie_bus_priv *priv)
+{
+ struct pci_dev *pdev = priv->pdev;
+
+ /* fall back to legacy INTx interrupts by default */
+ priv->msi_enabled = 0;
+
+ /* check if MSI capability is available */
+ if (use_msi) {
+ if (!pci_enable_msi(pdev)) {
+ pr_debug("MSI interrupt enabled\n");
+ priv->msi_enabled = 1;
+ } else {
+ pr_warn("failed to enable MSI interrupts");
+ }
+ }
+
+ if (!priv->msi_enabled) {
+ pr_warn("legacy PCIE interrupts enabled\n");
+ pci_intx(pdev, 1);
+ }
+
+ return 0;
+}
+
+static void qtnf_deassert_intx(struct qtnf_pcie_bus_priv *priv)
+{
+ void __iomem *reg = priv->sysctl_bar + PEARL_PCIE_CFG0_OFFSET;
+ u32 cfg;
+
+ cfg = readl(reg);
+ cfg &= ~PEARL_ASSERT_INTX;
+ qtnf_non_posted_write(cfg, reg);
+}
+
+static void qtnf_ipc_gen_ep_int(void *arg)
+{
+ const struct qtnf_pcie_bus_priv *priv = arg;
+ const u32 data = QTN_PEARL_IPC_IRQ_WORD(QTN_PEARL_LHOST_IPC_IRQ);
+ void __iomem *reg = priv->sysctl_bar +
+ QTN_PEARL_SYSCTL_LHOST_IRQ_OFFSET;
+
+ qtnf_non_posted_write(data, reg);
+}
+
+static void __iomem *qtnf_map_bar(struct qtnf_pcie_bus_priv *priv, u8 index)
+{
+ void __iomem *vaddr;
+ dma_addr_t busaddr;
+ size_t len;
+ int ret;
+
+ ret = pcim_iomap_regions(priv->pdev, 1 << index, DRV_NAME);
+ if (ret)
+ return IOMEM_ERR_PTR(ret);
+
+ busaddr = pci_resource_start(priv->pdev, index);
+ vaddr = pcim_iomap_table(priv->pdev)[index];
+ len = pci_resource_len(priv->pdev, index);
+
+ pr_debug("BAR%u vaddr=0x%p busaddr=%pad len=%u\n",
+ index, vaddr, &busaddr, (int)len);
+
+ return vaddr;
+}
+
+static void qtnf_pcie_control_rx_callback(void *arg, const u8 *buf, size_t len)
+{
+ struct qtnf_pcie_bus_priv *priv = arg;
+ struct qtnf_bus *bus = pci_get_drvdata(priv->pdev);
+ struct sk_buff *skb;
+
+ if (unlikely(len == 0)) {
+ pr_warn("zero length packet received\n");
+ return;
+ }
+
+ skb = __dev_alloc_skb(len, GFP_KERNEL);
+
+ if (unlikely(!skb)) {
+ pr_err("failed to allocate skb\n");
+ return;
+ }
+
+ skb_put_data(skb, buf, len);
+
+ qtnf_trans_handle_rx_ctl_packet(bus, skb);
+}
+
+static int qtnf_pcie_init_shm_ipc(struct qtnf_pcie_bus_priv *priv)
+{
+ struct qtnf_shm_ipc_region __iomem *ipc_tx_reg;
+ struct qtnf_shm_ipc_region __iomem *ipc_rx_reg;
+ const struct qtnf_shm_ipc_int ipc_int = { qtnf_ipc_gen_ep_int, priv };
+ const struct qtnf_shm_ipc_rx_callback rx_callback = {
+ qtnf_pcie_control_rx_callback, priv };
+
+ ipc_tx_reg = &priv->bda->bda_shm_reg1;
+ ipc_rx_reg = &priv->bda->bda_shm_reg2;
+
+ qtnf_shm_ipc_init(&priv->shm_ipc_ep_in, QTNF_SHM_IPC_OUTBOUND,
+ ipc_tx_reg, priv->workqueue,
+ &ipc_int, &rx_callback);
+ qtnf_shm_ipc_init(&priv->shm_ipc_ep_out, QTNF_SHM_IPC_INBOUND,
+ ipc_rx_reg, priv->workqueue,
+ &ipc_int, &rx_callback);
+
+ return 0;
+}
+
+static void qtnf_pcie_free_shm_ipc(struct qtnf_pcie_bus_priv *priv)
+{
+ qtnf_shm_ipc_free(&priv->shm_ipc_ep_in);
+ qtnf_shm_ipc_free(&priv->shm_ipc_ep_out);
+}
+
+static int qtnf_pcie_init_memory(struct qtnf_pcie_bus_priv *priv)
+{
+ int ret;
+
+ priv->sysctl_bar = qtnf_map_bar(priv, QTN_SYSCTL_BAR);
+ if (IS_ERR_OR_NULL(priv->sysctl_bar)) {
+ pr_err("failed to map BAR%u\n", QTN_SYSCTL_BAR);
+ return ret;
+ }
+
+ priv->dmareg_bar = qtnf_map_bar(priv, QTN_DMA_BAR);
+ if (IS_ERR_OR_NULL(priv->dmareg_bar)) {
+ pr_err("failed to map BAR%u\n", QTN_DMA_BAR);
+ return ret;
+ }
+
+ priv->epmem_bar = qtnf_map_bar(priv, QTN_SHMEM_BAR);
+ if (IS_ERR_OR_NULL(priv->epmem_bar)) {
+ pr_err("failed to map BAR%u\n", QTN_SHMEM_BAR);
+ return ret;
+ }
+
+ priv->pcie_reg_base = priv->dmareg_bar;
+ priv->bda = priv->epmem_bar;
+ writel(priv->msi_enabled, &priv->bda->bda_rc_msi_enabled);
+
+ return 0;
+}
+
+static int
+qtnf_pcie_init_dma_mask(struct qtnf_pcie_bus_priv *priv, u64 dma_mask)
+{
+ int ret;
+
+ ret = dma_supported(&priv->pdev->dev, dma_mask);
+ if (!ret) {
+ pr_err("DMA mask %llu not supported\n", dma_mask);
+ return ret;
+ }
+
+ ret = pci_set_dma_mask(priv->pdev, dma_mask);
+ if (ret) {
+ pr_err("failed to set DMA mask %llu\n", dma_mask);
+ return ret;
+ }
+
+ ret = pci_set_consistent_dma_mask(priv->pdev, dma_mask);
+ if (ret) {
+ pr_err("failed to set consistent DMA mask %llu\n", dma_mask);
+ return ret;
+ }
+
+ return ret;
+}
+
+static void qtnf_tune_pcie_mps(struct qtnf_pcie_bus_priv *priv)
+{
+ struct pci_dev *pdev = priv->pdev;
+ struct pci_dev *parent;
+ int mps_p, mps_o, mps_m, mps;
+ int ret;
+
+ /* current mps */
+ mps_o = pcie_get_mps(pdev);
+
+ /* maximum supported mps */
+ mps_m = 128 << pdev->pcie_mpss;
+
+ /* suggested new mps value */
+ mps = mps_m;
+
+ if (pdev->bus && pdev->bus->self) {
+ /* parent (bus) mps */
+ parent = pdev->bus->self;
+
+ if (pci_is_pcie(parent)) {
+ mps_p = pcie_get_mps(parent);
+ mps = min(mps_m, mps_p);
+ }
+ }
+
+ ret = pcie_set_mps(pdev, mps);
+ if (ret) {
+ pr_err("failed to set mps to %d, keep using current %d\n",
+ mps, mps_o);
+ priv->mps = mps_o;
+ return;
+ }
+
+ pr_debug("set mps to %d (was %d, max %d)\n", mps, mps_o, mps_m);
+ priv->mps = mps;
+}
+
+static int qtnf_is_state(__le32 __iomem *reg, u32 state)
+{
+ u32 s = readl(reg);
+
+ return s & state;
+}
+
+static void qtnf_set_state(__le32 __iomem *reg, u32 state)
+{
+ u32 s = readl(reg);
+
+ qtnf_non_posted_write(state | s, reg);
+}
+
+static void qtnf_clear_state(__le32 __iomem *reg, u32 state)
+{
+ u32 s = readl(reg);
+
+ qtnf_non_posted_write(s & ~state, reg);
+}
+
+static int qtnf_poll_state(__le32 __iomem *reg, u32 state, u32 delay_in_ms)
+{
+ u32 timeout = 0;
+
+ while ((qtnf_is_state(reg, state) == 0)) {
+ usleep_range(1000, 1200);
+ if (++timeout > delay_in_ms)
+ return -1;
+ }
+
+ return 0;
+}
+
+static int alloc_skb_array(struct qtnf_pcie_bus_priv *priv)
+{
+ struct sk_buff **vaddr;
+ int len;
+
+ len = priv->tx_bd_num * sizeof(*priv->tx_skb) +
+ priv->rx_bd_num * sizeof(*priv->rx_skb);
+ vaddr = devm_kzalloc(&priv->pdev->dev, len, GFP_KERNEL);
+
+ if (!vaddr)
+ return -ENOMEM;
+
+ priv->tx_skb = vaddr;
+
+ vaddr += priv->tx_bd_num;
+ priv->rx_skb = vaddr;
+
+ return 0;
+}
+
+static int alloc_bd_table(struct qtnf_pcie_bus_priv *priv)
+{
+ dma_addr_t paddr;
+ void *vaddr;
+ int len;
+
+ len = priv->tx_bd_num * sizeof(struct qtnf_tx_bd) +
+ priv->rx_bd_num * sizeof(struct qtnf_rx_bd);
+
+ vaddr = dmam_alloc_coherent(&priv->pdev->dev, len, &paddr, GFP_KERNEL);
+ if (!vaddr)
+ return -ENOMEM;
+
+ /* tx bd */
+
+ memset(vaddr, 0, len);
+
+ priv->bd_table_vaddr = vaddr;
+ priv->bd_table_paddr = paddr;
+ priv->bd_table_len = len;
+
+ priv->tx_bd_vbase = vaddr;
+ priv->tx_bd_pbase = paddr;
+
+ pr_debug("TX descriptor table: vaddr=0x%p paddr=%pad\n", vaddr, &paddr);
+
+ priv->tx_bd_reclaim_start = 0;
+ priv->tx_bd_index = 0;
+ priv->tx_queue_len = 0;
+
+ /* rx bd */
+
+ vaddr = ((struct qtnf_tx_bd *)vaddr) + priv->tx_bd_num;
+ paddr += priv->tx_bd_num * sizeof(struct qtnf_tx_bd);
+
+ priv->rx_bd_vbase = vaddr;
+ priv->rx_bd_pbase = paddr;
+
+ writel(QTN_HOST_LO32(paddr),
+ PCIE_HDP_TX_HOST_Q_BASE_L(priv->pcie_reg_base));
+ writel(QTN_HOST_HI32(paddr),
+ PCIE_HDP_TX_HOST_Q_BASE_H(priv->pcie_reg_base));
+ writel(priv->rx_bd_num | (sizeof(struct qtnf_rx_bd)) << 16,
+ PCIE_HDP_TX_HOST_Q_SZ_CTRL(priv->pcie_reg_base));
+
+ priv->hw_txproc_wr_ptr = priv->rx_bd_num - rx_bd_reserved_param;
+
+ writel(priv->hw_txproc_wr_ptr,
+ PCIE_HDP_TX_HOST_Q_WR_PTR(priv->pcie_reg_base));
+
+ pr_debug("RX descriptor table: vaddr=0x%p paddr=%pad\n", vaddr, &paddr);
+
+ priv->rx_bd_index = 0;
+
+ return 0;
+}
+
+static int skb2rbd_attach(struct qtnf_pcie_bus_priv *priv, u16 rx_bd_index)
+{
+ struct qtnf_rx_bd *rxbd;
+ struct sk_buff *skb;
+ dma_addr_t paddr;
+
+ skb = __dev_alloc_skb(SKB_BUF_SIZE + NET_IP_ALIGN,
+ GFP_ATOMIC);
+ if (!skb) {
+ priv->rx_skb[rx_bd_index] = NULL;
+ return -ENOMEM;
+ }
+
+ priv->rx_skb[rx_bd_index] = skb;
+
+ skb_reserve(skb, NET_IP_ALIGN);
+
+ rxbd = &priv->rx_bd_vbase[rx_bd_index];
+
+ paddr = pci_map_single(priv->pdev, skb->data,
+ SKB_BUF_SIZE, PCI_DMA_FROMDEVICE);
+ if (pci_dma_mapping_error(priv->pdev, paddr)) {
+ pr_err("skb DMA mapping error: %pad\n", &paddr);
+ return -ENOMEM;
+ }
+
+ writel(QTN_HOST_LO32(paddr),
+ PCIE_HDP_HHBM_BUF_PTR(priv->pcie_reg_base));
+ writel(QTN_HOST_HI32(paddr),
+ PCIE_HDP_HHBM_BUF_PTR_H(priv->pcie_reg_base));
+
+ /* keep rx skb paddrs in rx buffer descriptors for cleanup purposes */
+ rxbd->addr = cpu_to_le32(QTN_HOST_LO32(paddr));
+ rxbd->addr_h = cpu_to_le32(QTN_HOST_HI32(paddr));
+
+ rxbd->info = 0x0;
+
+ return 0;
+}
+
+static int alloc_rx_buffers(struct qtnf_pcie_bus_priv *priv)
+{
+ u16 i;
+ int ret = 0;
+
+ memset(priv->rx_bd_vbase, 0x0,
+ priv->rx_bd_num * sizeof(struct qtnf_rx_bd));
+
+ for (i = 0; i < priv->rx_bd_num; i++) {
+ ret = skb2rbd_attach(priv, i);
+ if (ret)
+ break;
+ }
+
+ return ret;
+}
+
+/* all rx/tx activity should have ceased before calling this function */
+static void free_xfer_buffers(void *data)
+{
+ struct qtnf_pcie_bus_priv *priv = (struct qtnf_pcie_bus_priv *)data;
+ struct qtnf_rx_bd *rxbd;
+ dma_addr_t paddr;
+ int i;
+
+ /* free rx buffers */
+ for (i = 0; i < priv->rx_bd_num; i++) {
+ if (priv->rx_skb[i]) {
+ rxbd = &priv->rx_bd_vbase[i];
+ paddr = QTN_HOST_ADDR(le32_to_cpu(rxbd->addr_h),
+ le32_to_cpu(rxbd->addr));
+ pci_unmap_single(priv->pdev, paddr, SKB_BUF_SIZE,
+ PCI_DMA_FROMDEVICE);
+
+ dev_kfree_skb_any(priv->rx_skb[i]);
+ }
+ }
+
+ /* free tx buffers */
+ for (i = 0; i < priv->tx_bd_num; i++) {
+ if (priv->tx_skb[i]) {
+ dev_kfree_skb_any(priv->tx_skb[i]);
+ priv->tx_skb[i] = NULL;
+ }
+ }
+}
+
+static int qtnf_pcie_init_xfer(struct qtnf_pcie_bus_priv *priv)
+{
+ int ret;
+
+ priv->tx_bd_num = tx_bd_size_param;
+ priv->rx_bd_num = rx_bd_size_param;
+
+ ret = alloc_skb_array(priv);
+ if (ret) {
+ pr_err("failed to allocate skb array\n");
+ return ret;
+ }
+
+ ret = alloc_bd_table(priv);
+ if (ret) {
+ pr_err("failed to allocate bd table\n");
+ return ret;
+ }
+
+ ret = alloc_rx_buffers(priv);
+ if (ret) {
+ pr_err("failed to allocate rx buffers\n");
+ return ret;
+ }
+
+ return ret;
+}
+
+static int qtnf_pcie_data_tx_reclaim(struct qtnf_pcie_bus_priv *priv)
+{
+ struct qtnf_tx_bd *txbd;
+ struct sk_buff *skb;
+ dma_addr_t paddr;
+ int last_sent;
+ int count;
+ int i;
+
+ last_sent = readl(PCIE_HDP_RX0DMA_CNT(priv->pcie_reg_base))
+ % priv->tx_bd_num;
+ i = priv->tx_bd_reclaim_start;
+ count = 0;
+
+ while (i != last_sent) {
+ skb = priv->tx_skb[i];
+ if (!skb)
+ break;
+
+ txbd = &priv->tx_bd_vbase[i];
+ paddr = QTN_HOST_ADDR(le32_to_cpu(txbd->addr_h),
+ le32_to_cpu(txbd->addr));
+ pci_unmap_single(priv->pdev, paddr, skb->len, PCI_DMA_TODEVICE);
+
+ if (skb->dev) {
+ skb->dev->stats.tx_packets++;
+ skb->dev->stats.tx_bytes += skb->len;
+
+ if (netif_queue_stopped(skb->dev))
+ netif_wake_queue(skb->dev);
+ }
+
+ dev_kfree_skb_any(skb);
+ priv->tx_skb[i] = NULL;
+ priv->tx_queue_len--;
+ count++;
+
+ if (++i >= priv->tx_bd_num)
+ i = 0;
+ }
+
+ priv->tx_bd_reclaim_start = i;
+ priv->tx_reclaim_done += count;
+ priv->tx_reclaim_req++;
+
+ return count;
+}
+
+static bool qtnf_tx_queue_ready(struct qtnf_pcie_bus_priv *priv)
+{
+ if (priv->tx_queue_len >= priv->tx_bd_num - 1) {
+ pr_err_ratelimited("reclaim full Tx queue\n");
+ qtnf_pcie_data_tx_reclaim(priv);
+
+ if (priv->tx_queue_len >= priv->tx_bd_num - 1) {
+ priv->tx_full_count++;
+ return false;
+ }
+ }
+
+ return true;
+}
+
+static int qtnf_pcie_data_tx(struct qtnf_bus *bus, struct sk_buff *skb)
+{
+ struct qtnf_pcie_bus_priv *priv = (void *)get_bus_priv(bus);
+ dma_addr_t txbd_paddr, skb_paddr;
+ struct qtnf_tx_bd *txbd;
+ unsigned long flags;
+ int len, i;
+ u32 info;
+ int ret = 0;
+
+ spin_lock_irqsave(&priv->tx_lock, flags);
+
+ priv->tx_done_count++;
+
+ if (!qtnf_tx_queue_ready(priv)) {
+ if (skb->dev)
+ netif_stop_queue(skb->dev);
+
+ spin_unlock_irqrestore(&priv->tx_lock, flags);
+ return NETDEV_TX_BUSY;
+ }
+
+ i = priv->tx_bd_index;
+ priv->tx_skb[i] = skb;
+ len = skb->len;
+
+ skb_paddr = pci_map_single(priv->pdev, skb->data,
+ skb->len, PCI_DMA_TODEVICE);
+ if (pci_dma_mapping_error(priv->pdev, skb_paddr)) {
+ pr_err("skb DMA mapping error: %pad\n", &skb_paddr);
+ ret = -ENOMEM;
+ goto tx_done;
+ }
+
+ txbd = &priv->tx_bd_vbase[i];
+ txbd->addr = cpu_to_le32(QTN_HOST_LO32(skb_paddr));
+ txbd->addr_h = cpu_to_le32(QTN_HOST_HI32(skb_paddr));
+
+ info = (len & QTN_PCIE_TX_DESC_LEN_MASK) << QTN_PCIE_TX_DESC_LEN_SHIFT;
+ txbd->info = cpu_to_le32(info);
+
+ /* sync up all descriptor updates before passing them to EP */
+ dma_wmb();
+
+ /* write new TX descriptor to PCIE_RX_FIFO on EP */
+ txbd_paddr = priv->tx_bd_pbase + i * sizeof(struct qtnf_tx_bd);
+ writel(QTN_HOST_LO32(txbd_paddr),
+ PCIE_HDP_HOST_WR_DESC0(priv->pcie_reg_base));
+ writel(QTN_HOST_HI32(txbd_paddr),
+ PCIE_HDP_HOST_WR_DESC0_H(priv->pcie_reg_base));
+
+ if (++i >= priv->tx_bd_num)
+ i = 0;
+
+ priv->tx_bd_index = i;
+ priv->tx_queue_len++;
+
+tx_done:
+ if (ret && skb) {
+ pr_err_ratelimited("drop skb\n");
+ if (skb->dev)
+ skb->dev->stats.tx_dropped++;
+ dev_kfree_skb_any(skb);
+ }
+
+ spin_unlock_irqrestore(&priv->tx_lock, flags);
+
+ return NETDEV_TX_OK;
+}
+
+static int qtnf_pcie_control_tx(struct qtnf_bus *bus, struct sk_buff *skb)
+{
+ struct qtnf_pcie_bus_priv *priv = (void *)get_bus_priv(bus);
+
+ return qtnf_shm_ipc_send(&priv->shm_ipc_ep_in, skb->data, skb->len);
+}
+
+static irqreturn_t qtnf_interrupt(int irq, void *data)
+{
+ struct qtnf_bus *bus = (struct qtnf_bus *)data;
+ struct qtnf_pcie_bus_priv *priv = (void *)get_bus_priv(bus);
+ u32 status;
+
+ priv->pcie_irq_count++;
+ status = readl(PCIE_HDP_INT_STATUS(priv->pcie_reg_base));
+
+ qtnf_shm_ipc_irq_handler(&priv->shm_ipc_ep_in);
+ qtnf_shm_ipc_irq_handler(&priv->shm_ipc_ep_out);
+
+ if (!(status & priv->pcie_irq_mask))
+ goto irq_done;
+
+ if (status & PCIE_HDP_INT_RX_BITS) {
+ priv->pcie_irq_rx_count++;
+ qtnf_dis_rxdone_irq(priv);
+ napi_schedule(&bus->mux_napi);
+ }
+
+ if (status & PCIE_HDP_INT_TX_BITS) {
+ priv->pcie_irq_tx_count++;
+ qtnf_dis_txdone_irq(priv);
+ tasklet_hi_schedule(&priv->reclaim_tq);
+ }
+
+irq_done:
+ /* H/W workaround: clean all bits, not only enabled */
+ qtnf_non_posted_write(~0U, PCIE_HDP_INT_STATUS(priv->pcie_reg_base));
+
+ if (!priv->msi_enabled)
+ qtnf_deassert_intx(priv);
+
+ return IRQ_HANDLED;
+}
+
+static inline void hw_txproc_wr_ptr_inc(struct qtnf_pcie_bus_priv *priv)
+{
+ u32 index;
+
+ index = priv->hw_txproc_wr_ptr;
+
+ if (++index >= priv->rx_bd_num)
+ index = 0;
+
+ priv->hw_txproc_wr_ptr = index;
+}
+
+static int qtnf_rx_poll(struct napi_struct *napi, int budget)
+{
+ struct qtnf_bus *bus = container_of(napi, struct qtnf_bus, mux_napi);
+ struct qtnf_pcie_bus_priv *priv = (void *)get_bus_priv(bus);
+ struct net_device *ndev = NULL;
+ struct sk_buff *skb = NULL;
+ int processed = 0;
+ struct qtnf_rx_bd *rxbd;
+ dma_addr_t skb_paddr;
+ u32 descw;
+ u16 index;
+ int ret;
+
+ index = priv->rx_bd_index;
+ rxbd = &priv->rx_bd_vbase[index];
+
+ descw = le32_to_cpu(rxbd->info);
+
+ while ((descw & QTN_TXDONE_MASK) && (processed < budget)) {
+ skb = priv->rx_skb[index];
+
+ if (likely(skb)) {
+ skb_put(skb, QTN_GET_LEN(descw));
+
+ skb_paddr = QTN_HOST_ADDR(le32_to_cpu(rxbd->addr_h),
+ le32_to_cpu(rxbd->addr));
+ pci_unmap_single(priv->pdev, skb_paddr, SKB_BUF_SIZE,
+ PCI_DMA_FROMDEVICE);
+
+ ndev = qtnf_classify_skb(bus, skb);
+ if (likely(ndev)) {
+ ndev->stats.rx_packets++;
+ ndev->stats.rx_bytes += skb->len;
+
+ skb->protocol = eth_type_trans(skb, ndev);
+ netif_receive_skb(skb);
+ } else {
+ pr_debug("drop untagged skb\n");
+ bus->mux_dev.stats.rx_dropped++;
+ dev_kfree_skb_any(skb);
+ }
+
+ processed++;
+ } else {
+ pr_err("missing rx_skb[%d]\n", index);
+ }
+
+ /* attached rx buffer is passed upstream: map a new one */
+ ret = skb2rbd_attach(priv, index);
+ if (likely(!ret)) {
+ if (++index >= priv->rx_bd_num)
+ index = 0;
+
+ priv->rx_bd_index = index;
+ hw_txproc_wr_ptr_inc(priv);
+
+ rxbd = &priv->rx_bd_vbase[index];
+ descw = le32_to_cpu(rxbd->info);
+ } else {
+ pr_err("failed to allocate new rx_skb[%d]\n", index);
+ break;
+ }
+
+ writel(priv->hw_txproc_wr_ptr,
+ PCIE_HDP_TX_HOST_Q_WR_PTR(priv->pcie_reg_base));
+ }
+
+ if (processed < budget) {
+ napi_complete(napi);
+ qtnf_en_rxdone_irq(priv);
+ }
+
+ return processed;
+}
+
+static void
+qtnf_pcie_data_tx_timeout(struct qtnf_bus *bus, struct net_device *ndev)
+{
+ struct qtnf_pcie_bus_priv *priv = (void *)get_bus_priv(bus);
+
+ tasklet_hi_schedule(&priv->reclaim_tq);
+}
+
+static void qtnf_pcie_data_rx_start(struct qtnf_bus *bus)
+{
+ struct qtnf_pcie_bus_priv *priv = (void *)get_bus_priv(bus);
+
+ qtnf_enable_hdp_irqs(priv);
+ napi_enable(&bus->mux_napi);
+}
+
+static void qtnf_pcie_data_rx_stop(struct qtnf_bus *bus)
+{
+ struct qtnf_pcie_bus_priv *priv = (void *)get_bus_priv(bus);
+
+ napi_disable(&bus->mux_napi);
+ qtnf_disable_hdp_irqs(priv);
+}
+
+static const struct qtnf_bus_ops qtnf_pcie_bus_ops = {
+ /* control path methods */
+ .control_tx = qtnf_pcie_control_tx,
+
+ /* data path methods */
+ .data_tx = qtnf_pcie_data_tx,
+ .data_tx_timeout = qtnf_pcie_data_tx_timeout,
+ .data_rx_start = qtnf_pcie_data_rx_start,
+ .data_rx_stop = qtnf_pcie_data_rx_stop,
+};
+
+static int qtnf_ep_fw_send(struct qtnf_pcie_bus_priv *priv, uint32_t size,
+ int blk, const u8 *pblk, const u8 *fw)
+{
+ struct pci_dev *pdev = priv->pdev;
+ struct qtnf_bus *bus = pci_get_drvdata(pdev);
+
+ struct qtnf_pcie_fw_hdr *hdr;
+ u8 *pdata;
+
+ int hds = sizeof(*hdr);
+ struct sk_buff *skb = NULL;
+ int len = 0;
+ int ret;
+
+ skb = __dev_alloc_skb(QTN_PCIE_FW_BUFSZ, GFP_KERNEL);
+ if (!skb)
+ return -ENOMEM;
+
+ skb->len = QTN_PCIE_FW_BUFSZ;
+ skb->dev = NULL;
+
+ hdr = (struct qtnf_pcie_fw_hdr *)skb->data;
+ memcpy(hdr->boardflg, QTN_PCIE_BOARDFLG, strlen(QTN_PCIE_BOARDFLG));
+ hdr->fwsize = cpu_to_le32(size);
+ hdr->seqnum = cpu_to_le32(blk);
+
+ if (blk)
+ hdr->type = cpu_to_le32(QTN_FW_DSUB);
+ else
+ hdr->type = cpu_to_le32(QTN_FW_DBEGIN);
+
+ pdata = skb->data + hds;
+
+ len = QTN_PCIE_FW_BUFSZ - hds;
+ if (pblk >= (fw + size - len)) {
+ len = fw + size - pblk;
+ hdr->type = cpu_to_le32(QTN_FW_DEND);
+ }
+
+ hdr->pktlen = cpu_to_le32(len);
+ memcpy(pdata, pblk, len);
+ hdr->crc = cpu_to_le32(~crc32(0, pdata, len));
+
+ ret = qtnf_pcie_data_tx(bus, skb);
+
+ return (ret == NETDEV_TX_OK) ? len : 0;
+}
+
+static int
+qtnf_ep_fw_load(struct qtnf_pcie_bus_priv *priv, const u8 *fw, u32 fw_size)
+{
+ int blk_size = QTN_PCIE_FW_BUFSZ - sizeof(struct qtnf_pcie_fw_hdr);
+ int blk_count = fw_size / blk_size + ((fw_size % blk_size) ? 1 : 0);
+ const u8 *pblk = fw;
+ int threshold = 0;
+ int blk = 0;
+ int len;
+
+ pr_debug("FW upload started: fw_addr=0x%p size=%d\n", fw, fw_size);
+
+ while (blk < blk_count) {
+ if (++threshold > 10000) {
+ pr_err("FW upload failed: too many retries\n");
+ return -ETIMEDOUT;
+ }
+
+ len = qtnf_ep_fw_send(priv, fw_size, blk, pblk, fw);
+ if (len <= 0)
+ continue;
+
+ if (!((blk + 1) & QTN_PCIE_FW_DLMASK) ||
+ (blk == (blk_count - 1))) {
+ qtnf_set_state(&priv->bda->bda_rc_state,
+ QTN_RC_FW_SYNC);
+ if (qtnf_poll_state(&priv->bda->bda_ep_state,
+ QTN_EP_FW_SYNC,
+ QTN_FW_DL_TIMEOUT_MS)) {
+ pr_err("FW upload failed: SYNC timed out\n");
+ return -ETIMEDOUT;
+ }
+
+ qtnf_clear_state(&priv->bda->bda_ep_state,
+ QTN_EP_FW_SYNC);
+
+ if (qtnf_is_state(&priv->bda->bda_ep_state,
+ QTN_EP_FW_RETRY)) {
+ if (blk == (blk_count - 1)) {
+ int last_round =
+ blk_count & QTN_PCIE_FW_DLMASK;
+ blk -= last_round;
+ pblk -= ((last_round - 1) *
+ blk_size + len);
+ } else {
+ blk -= QTN_PCIE_FW_DLMASK;
+ pblk -= QTN_PCIE_FW_DLMASK * blk_size;
+ }
+
+ qtnf_clear_state(&priv->bda->bda_ep_state,
+ QTN_EP_FW_RETRY);
+
+ pr_warn("FW upload retry: block #%d\n", blk);
+ continue;
+ }
+
+ qtnf_pcie_data_tx_reclaim(priv);
+ }
+
+ pblk += len;
+ blk++;
+ }
+
+ pr_debug("FW upload completed: totally sent %d blocks\n", blk);
+ return 0;
+}
+
+static void qtnf_firmware_load(const struct firmware *fw, void *context)
+{
+ struct qtnf_pcie_bus_priv *priv = (void *)context;
+ struct pci_dev *pdev = priv->pdev;
+ struct qtnf_bus *bus = pci_get_drvdata(pdev);
+ int ret;
+
+ if (!fw) {
+ pr_err("failed to get firmware %s\n", bus->fwname);
+ goto fw_load_err;
+ }
+
+ ret = qtnf_ep_fw_load(priv, fw->data, fw->size);
+ if (ret) {
+ pr_err("FW upload error\n");
+ goto fw_load_err;
+ }
+
+ if (qtnf_poll_state(&priv->bda->bda_ep_state, QTN_EP_FW_DONE,
+ QTN_FW_DL_TIMEOUT_MS)) {
+ pr_err("FW bringup timed out\n");
+ goto fw_load_err;
+ }
+
+ bus->fw_state = QTNF_FW_STATE_FW_DNLD_DONE;
+ pr_info("firmware is up and running\n");
+
+fw_load_err:
+
+ if (fw)
+ release_firmware(fw);
+
+ complete(&bus->request_firmware_complete);
+}
+
+static int qtnf_bringup_fw(struct qtnf_bus *bus)
+{
+ struct qtnf_pcie_bus_priv *priv = (void *)get_bus_priv(bus);
+ struct pci_dev *pdev = priv->pdev;
+ int ret;
+ u32 state = QTN_RC_FW_LOADRDY | QTN_RC_FW_QLINK;
+
+ if (flashboot)
+ state |= QTN_RC_FW_FLASHBOOT;
+
+ qtnf_set_state(&priv->bda->bda_rc_state, state);
+
+ if (qtnf_poll_state(&priv->bda->bda_ep_state, QTN_EP_FW_LOADRDY,
+ QTN_FW_DL_TIMEOUT_MS)) {
+ pr_err("card is not ready\n");
+ return -ETIMEDOUT;
+ }
+
+ qtnf_clear_state(&priv->bda->bda_ep_state, QTN_EP_FW_LOADRDY);
+
+ if (flashboot) {
+ pr_info("Booting FW from flash\n");
+
+ if (!qtnf_poll_state(&priv->bda->bda_ep_state, QTN_EP_FW_DONE,
+ QTN_FW_DL_TIMEOUT_MS))
+ bus->fw_state = QTNF_FW_STATE_FW_DNLD_DONE;
+
+ return 0;
+ }
+
+ pr_info("starting firmware upload: %s\n", bus->fwname);
+
+ ret = request_firmware_nowait(THIS_MODULE, 1, bus->fwname, &pdev->dev,
+ GFP_KERNEL, priv, qtnf_firmware_load);
+ if (ret < 0)
+ pr_err("request_firmware_nowait error %d\n", ret);
+ else
+ ret = 1;
+
+ return ret;
+}
+
+static void qtnf_reclaim_tasklet_fn(unsigned long data)
+{
+ struct qtnf_pcie_bus_priv *priv = (void *)data;
+ unsigned long flags;
+
+ spin_lock_irqsave(&priv->tx_lock, flags);
+ qtnf_pcie_data_tx_reclaim(priv);
+ spin_unlock_irqrestore(&priv->tx_lock, flags);
+ qtnf_en_txdone_irq(priv);
+}
+
+static int qtnf_dbg_mps_show(struct seq_file *s, void *data)
+{
+ struct qtnf_bus *bus = dev_get_drvdata(s->private);
+ struct qtnf_pcie_bus_priv *priv = get_bus_priv(bus);
+
+ seq_printf(s, "%d\n", priv->mps);
+
+ return 0;
+}
+
+static int qtnf_dbg_msi_show(struct seq_file *s, void *data)
+{
+ struct qtnf_bus *bus = dev_get_drvdata(s->private);
+ struct qtnf_pcie_bus_priv *priv = get_bus_priv(bus);
+
+ seq_printf(s, "%u\n", priv->msi_enabled);
+
+ return 0;
+}
+
+static int qtnf_dbg_irq_stats(struct seq_file *s, void *data)
+{
+ struct qtnf_bus *bus = dev_get_drvdata(s->private);
+ struct qtnf_pcie_bus_priv *priv = get_bus_priv(bus);
+
+ seq_printf(s, "pcie_irq_count(%u)\n", priv->pcie_irq_count);
+ seq_printf(s, "pcie_irq_tx_count(%u)\n", priv->pcie_irq_tx_count);
+ seq_printf(s, "pcie_irq_rx_count(%u)\n", priv->pcie_irq_rx_count);
+
+ return 0;
+}
+
+static int qtnf_dbg_hdp_stats(struct seq_file *s, void *data)
+{
+ struct qtnf_bus *bus = dev_get_drvdata(s->private);
+ struct qtnf_pcie_bus_priv *priv = get_bus_priv(bus);
+
+ seq_printf(s, "tx_full_count(%u)\n", priv->tx_full_count);
+ seq_printf(s, "tx_done_count(%u)\n", priv->tx_done_count);
+ seq_printf(s, "tx_reclaim_done(%u)\n", priv->tx_reclaim_done);
+ seq_printf(s, "tx_reclaim_req(%u)\n", priv->tx_reclaim_req);
+ seq_printf(s, "tx_bd_reclaim_start(%u)\n", priv->tx_bd_reclaim_start);
+ seq_printf(s, "tx_bd_index(%u)\n", priv->tx_bd_index);
+ seq_printf(s, "rx_bd_index(%u)\n", priv->rx_bd_index);
+ seq_printf(s, "tx_queue_len(%u)\n", priv->tx_queue_len);
+
+ return 0;
+}
+
+static int qtnf_dbg_shm_stats(struct seq_file *s, void *data)
+{
+ struct qtnf_bus *bus = dev_get_drvdata(s->private);
+ struct qtnf_pcie_bus_priv *priv = get_bus_priv(bus);
+
+ seq_printf(s, "shm_ipc_ep_in.tx_packet_count(%zu)\n",
+ priv->shm_ipc_ep_in.tx_packet_count);
+ seq_printf(s, "shm_ipc_ep_in.rx_packet_count(%zu)\n",
+ priv->shm_ipc_ep_in.rx_packet_count);
+ seq_printf(s, "shm_ipc_ep_out.tx_packet_count(%zu)\n",
+ priv->shm_ipc_ep_out.tx_timeout_count);
+ seq_printf(s, "shm_ipc_ep_out.rx_packet_count(%zu)\n",
+ priv->shm_ipc_ep_out.rx_packet_count);
+
+ return 0;
+}
+
+static int qtnf_pcie_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+{
+ struct qtnf_pcie_bus_priv *pcie_priv;
+ struct qtnf_bus *bus;
+ int ret;
+
+ bus = devm_kzalloc(&pdev->dev,
+ sizeof(*bus) + sizeof(*pcie_priv), GFP_KERNEL);
+ if (!bus) {
+ ret = -ENOMEM;
+ goto err_init;
+ }
+
+ pcie_priv = get_bus_priv(bus);
+
+ pci_set_drvdata(pdev, bus);
+ bus->bus_ops = &qtnf_pcie_bus_ops;
+ bus->dev = &pdev->dev;
+ bus->fw_state = QTNF_FW_STATE_RESET;
+ pcie_priv->pdev = pdev;
+
+ strcpy(bus->fwname, QTN_PCI_PEARL_FW_NAME);
+ init_completion(&bus->request_firmware_complete);
+ mutex_init(&bus->bus_lock);
+ spin_lock_init(&pcie_priv->irq_lock);
+ spin_lock_init(&pcie_priv->tx_lock);
+
+ /* init stats */
+ pcie_priv->tx_full_count = 0;
+ pcie_priv->tx_done_count = 0;
+ pcie_priv->pcie_irq_count = 0;
+ pcie_priv->pcie_irq_rx_count = 0;
+ pcie_priv->pcie_irq_tx_count = 0;
+ pcie_priv->tx_reclaim_done = 0;
+ pcie_priv->tx_reclaim_req = 0;
+
+ pcie_priv->workqueue = create_singlethread_workqueue("QTNF_PEARL_PCIE");
+ if (!pcie_priv->workqueue) {
+ pr_err("failed to alloc bus workqueue\n");
+ ret = -ENODEV;
+ goto err_priv;
+ }
+
+ if (!pci_is_pcie(pdev)) {
+ pr_err("device %s is not PCI Express\n", pci_name(pdev));
+ ret = -EIO;
+ goto err_base;
+ }
+
+ qtnf_tune_pcie_mps(pcie_priv);
+
+ ret = pcim_enable_device(pdev);
+ if (ret) {
+ pr_err("failed to init PCI device %x\n", pdev->device);
+ goto err_base;
+ } else {
+ pr_debug("successful init of PCI device %x\n", pdev->device);
+ }
+
+ pcim_pin_device(pdev);
+ pci_set_master(pdev);
+
+ ret = qtnf_pcie_init_irq(pcie_priv);
+ if (ret < 0) {
+ pr_err("irq init failed\n");
+ goto err_base;
+ }
+
+ ret = qtnf_pcie_init_memory(pcie_priv);
+ if (ret < 0) {
+ pr_err("PCIE memory init failed\n");
+ goto err_base;
+ }
+
+ ret = qtnf_pcie_init_shm_ipc(pcie_priv);
+ if (ret < 0) {
+ pr_err("PCIE SHM IPC init failed\n");
+ goto err_base;
+ }
+
+ ret = qtnf_pcie_init_dma_mask(pcie_priv, DMA_BIT_MASK(32));
+ if (ret) {
+ pr_err("PCIE DMA mask init failed\n");
+ goto err_base;
+ }
+
+ ret = devm_add_action(&pdev->dev, free_xfer_buffers, (void *)pcie_priv);
+ if (ret) {
+ pr_err("custom release callback init failed\n");
+ goto err_base;
+ }
+
+ ret = qtnf_pcie_init_xfer(pcie_priv);
+ if (ret) {
+ pr_err("PCIE xfer init failed\n");
+ goto err_base;
+ }
+
+ /* init default irq settings */
+ qtnf_init_hdp_irqs(pcie_priv);
+
+ /* start with disabled irqs */
+ qtnf_disable_hdp_irqs(pcie_priv);
+
+ ret = devm_request_irq(&pdev->dev, pdev->irq, &qtnf_interrupt, 0,
+ "qtnf_pcie_irq", (void *)bus);
+ if (ret) {
+ pr_err("failed to request pcie irq %d\n", pdev->irq);
+ goto err_base;
+ }
+
+ tasklet_init(&pcie_priv->reclaim_tq, qtnf_reclaim_tasklet_fn,
+ (unsigned long)pcie_priv);
+ init_dummy_netdev(&bus->mux_dev);
+ netif_napi_add(&bus->mux_dev, &bus->mux_napi,
+ qtnf_rx_poll, 10);
+
+ ret = qtnf_bringup_fw(bus);
+ if (ret < 0)
+ goto err_bringup_fw;
+ else if (ret)
+ wait_for_completion(&bus->request_firmware_complete);
+
+ if (bus->fw_state != QTNF_FW_STATE_FW_DNLD_DONE) {
+ pr_err("failed to start FW\n");
+ goto err_bringup_fw;
+ }
+
+ if (qtnf_poll_state(&pcie_priv->bda->bda_ep_state, QTN_EP_FW_QLINK_DONE,
+ QTN_FW_QLINK_TIMEOUT_MS)) {
+ pr_err("FW runtime failure\n");
+ goto err_bringup_fw;
+ }
+
+ ret = qtnf_core_attach(bus);
+ if (ret) {
+ pr_err("failed to attach core\n");
+ goto err_bringup_fw;
+ }
+
+ qtnf_debugfs_init(bus, DRV_NAME);
+ qtnf_debugfs_add_entry(bus, "mps", qtnf_dbg_mps_show);
+ qtnf_debugfs_add_entry(bus, "msi_enabled", qtnf_dbg_msi_show);
+ qtnf_debugfs_add_entry(bus, "hdp_stats", qtnf_dbg_hdp_stats);
+ qtnf_debugfs_add_entry(bus, "irq_stats", qtnf_dbg_irq_stats);
+ qtnf_debugfs_add_entry(bus, "shm_stats", qtnf_dbg_shm_stats);
+
+ return 0;
+
+err_bringup_fw:
+ netif_napi_del(&bus->mux_napi);
+
+err_base:
+ flush_workqueue(pcie_priv->workqueue);
+ destroy_workqueue(pcie_priv->workqueue);
+
+err_priv:
+ pci_set_drvdata(pdev, NULL);
+
+err_init:
+ return ret;
+}
+
+static void qtnf_pcie_remove(struct pci_dev *pdev)
+{
+ struct qtnf_pcie_bus_priv *priv;
+ struct qtnf_bus *bus;
+
+ bus = pci_get_drvdata(pdev);
+ if (!bus)
+ return;
+
+ priv = get_bus_priv(bus);
+
+ qtnf_core_detach(bus);
+ netif_napi_del(&bus->mux_napi);
+
+ flush_workqueue(priv->workqueue);
+ destroy_workqueue(priv->workqueue);
+ tasklet_kill(&priv->reclaim_tq);
+
+ qtnf_debugfs_remove(bus);
+
+ qtnf_pcie_free_shm_ipc(priv);
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int qtnf_pcie_suspend(struct device *dev)
+{
+ return -EOPNOTSUPP;
+}
+
+static int qtnf_pcie_resume(struct device *dev)
+{
+ return 0;
+}
+#endif /* CONFIG_PM_SLEEP */
+
+#ifdef CONFIG_PM_SLEEP
+/* Power Management Hooks */
+static SIMPLE_DEV_PM_OPS(qtnf_pcie_pm_ops, qtnf_pcie_suspend,
+ qtnf_pcie_resume);
+#endif
+
+static struct pci_device_id qtnf_pcie_devid_table[] = {
+ {
+ PCIE_VENDOR_ID_QUANTENNA, PCIE_DEVICE_ID_QTN_PEARL,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+ },
+ { },
+};
+
+MODULE_DEVICE_TABLE(pci, qtnf_pcie_devid_table);
+
+static struct pci_driver qtnf_pcie_drv_data = {
+ .name = DRV_NAME,
+ .id_table = qtnf_pcie_devid_table,
+ .probe = qtnf_pcie_probe,
+ .remove = qtnf_pcie_remove,
+#ifdef CONFIG_PM_SLEEP
+ .driver = {
+ .pm = &qtnf_pcie_pm_ops,
+ },
+#endif
+};
+
+static int __init qtnf_pcie_register(void)
+{
+ pr_info("register Quantenna QSR10g FullMAC PCIE driver\n");
+ return pci_register_driver(&qtnf_pcie_drv_data);
+}
+
+static void __exit qtnf_pcie_exit(void)
+{
+ pr_info("unregister Quantenna QSR10g FullMAC PCIE driver\n");
+ pci_unregister_driver(&qtnf_pcie_drv_data);
+}
+
+module_init(qtnf_pcie_register);
+module_exit(qtnf_pcie_exit);
+
+MODULE_AUTHOR("Quantenna Communications");
+MODULE_DESCRIPTION("Quantenna QSR10g PCIe bus driver for 802.11 wireless LAN.");
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/wireless/quantenna/qtnfmac/pearl/pcie_bus_priv.h b/drivers/net/wireless/quantenna/qtnfmac/pearl/pcie_bus_priv.h
new file mode 100644
index 0000000..2a897db
--- /dev/null
+++ b/drivers/net/wireless/quantenna/qtnfmac/pearl/pcie_bus_priv.h
@@ -0,0 +1,89 @@
+/*
+ * Copyright (c) 2015-2016 Quantenna Communications, Inc.
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _QTN_FMAC_PCIE_H_
+#define _QTN_FMAC_PCIE_H_
+
+#include <linux/dma-mapping.h>
+#include <linux/io.h>
+
+#include "pcie_regs_pearl.h"
+#include "pcie_ipc.h"
+#include "shm_ipc.h"
+
+struct bus;
+
+struct qtnf_pcie_bus_priv {
+ struct pci_dev *pdev;
+
+ /* lock for irq configuration changes */
+ spinlock_t irq_lock;
+
+ /* lock for tx operations */
+ spinlock_t tx_lock;
+ u8 msi_enabled;
+ int mps;
+
+ struct workqueue_struct *workqueue;
+ struct tasklet_struct reclaim_tq;
+
+ void __iomem *sysctl_bar;
+ void __iomem *epmem_bar;
+ void __iomem *dmareg_bar;
+
+ struct qtnf_shm_ipc shm_ipc_ep_in;
+ struct qtnf_shm_ipc shm_ipc_ep_out;
+
+ struct qtnf_pcie_bda __iomem *bda;
+ void __iomem *pcie_reg_base;
+
+ u16 tx_bd_num;
+ u16 rx_bd_num;
+
+ struct sk_buff **tx_skb;
+ struct sk_buff **rx_skb;
+
+ struct qtnf_tx_bd *tx_bd_vbase;
+ dma_addr_t tx_bd_pbase;
+
+ struct qtnf_rx_bd *rx_bd_vbase;
+ dma_addr_t rx_bd_pbase;
+
+ dma_addr_t bd_table_paddr;
+ void *bd_table_vaddr;
+ u32 bd_table_len;
+
+ u32 hw_txproc_wr_ptr;
+
+ u16 tx_bd_reclaim_start;
+ u16 tx_bd_index;
+ u32 tx_queue_len;
+
+ u16 rx_bd_index;
+
+ u32 pcie_irq_mask;
+
+ /* diagnostics stats */
+ u32 pcie_irq_count;
+ u32 pcie_irq_rx_count;
+ u32 pcie_irq_tx_count;
+ u32 tx_full_count;
+ u32 tx_done_count;
+ u32 tx_reclaim_done;
+ u32 tx_reclaim_req;
+};
+
+#endif /* _QTN_FMAC_PCIE_H_ */
diff --git a/drivers/net/wireless/quantenna/qtnfmac/pearl/pcie_ipc.h b/drivers/net/wireless/quantenna/qtnfmac/pearl/pcie_ipc.h
new file mode 100644
index 0000000..e00d508
--- /dev/null
+++ b/drivers/net/wireless/quantenna/qtnfmac/pearl/pcie_ipc.h
@@ -0,0 +1,158 @@
+/*
+ * Copyright (c) 2015-2016 Quantenna Communications, Inc.
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _QTN_FMAC_PCIE_IPC_H_
+#define _QTN_FMAC_PCIE_IPC_H_
+
+#include <linux/types.h>
+
+#include "shm_ipc_defs.h"
+
+/* bitmap for EP status and flags: updated by EP, read by RC */
+#define QTN_EP_HAS_UBOOT BIT(0)
+#define QTN_EP_HAS_FIRMWARE BIT(1)
+#define QTN_EP_REQ_UBOOT BIT(2)
+#define QTN_EP_REQ_FIRMWARE BIT(3)
+#define QTN_EP_ERROR_UBOOT BIT(4)
+#define QTN_EP_ERROR_FIRMWARE BIT(5)
+
+#define QTN_EP_FW_LOADRDY BIT(8)
+#define QTN_EP_FW_SYNC BIT(9)
+#define QTN_EP_FW_RETRY BIT(10)
+#define QTN_EP_FW_QLINK_DONE BIT(15)
+#define QTN_EP_FW_DONE BIT(16)
+
+/* bitmap for RC status and flags: updated by RC, read by EP */
+#define QTN_RC_PCIE_LINK BIT(0)
+#define QTN_RC_NET_LINK BIT(1)
+#define QTN_RC_FW_FLASHBOOT BIT(5)
+#define QTN_RC_FW_QLINK BIT(7)
+#define QTN_RC_FW_LOADRDY BIT(8)
+#define QTN_RC_FW_SYNC BIT(9)
+
+/* state transition timeouts */
+#define QTN_FW_DL_TIMEOUT_MS 3000
+#define QTN_FW_QLINK_TIMEOUT_MS 30000
+
+#define PCIE_HDP_INT_RX_BITS (0 \
+ | PCIE_HDP_INT_EP_TXDMA \
+ | PCIE_HDP_INT_EP_TXEMPTY \
+ )
+
+#define PCIE_HDP_INT_TX_BITS (0 \
+ | PCIE_HDP_INT_EP_RXDMA \
+ )
+
+#if BITS_PER_LONG == 64
+#define QTN_HOST_HI32(a) ((u32)(((u64)a) >> 32))
+#define QTN_HOST_LO32(a) ((u32)(((u64)a) & 0xffffffffUL))
+#define QTN_HOST_ADDR(h, l) ((((u64)h) << 32) | ((u64)l))
+#elif BITS_PER_LONG == 32
+#define QTN_HOST_HI32(a) 0
+#define QTN_HOST_LO32(a) ((u32)(((u32)a) & 0xffffffffUL))
+#define QTN_HOST_ADDR(h, l) ((u32)l)
+#else
+#error Unexpected BITS_PER_LONG value
+#endif
+
+#define QTN_SYSCTL_BAR 0
+#define QTN_SHMEM_BAR 2
+#define QTN_DMA_BAR 3
+
+#define QTN_PCIE_BDA_VERSION 0x1002
+
+#define PCIE_BDA_NAMELEN 32
+#define PCIE_HHBM_MAX_SIZE 512
+
+#define SKB_BUF_SIZE 2048
+
+#define QTN_PCIE_BOARDFLG "PCIEQTN"
+#define QTN_PCIE_FW_DLMASK 0xF
+#define QTN_PCIE_FW_BUFSZ 2048
+
+#define QTN_ENET_ADDR_LENGTH 6
+
+#define QTN_TXDONE_MASK ((u32)0x80000000)
+#define QTN_GET_LEN(x) ((x) & 0xFFFF)
+
+#define QTN_PCIE_TX_DESC_LEN_MASK 0xFFFF
+#define QTN_PCIE_TX_DESC_LEN_SHIFT 0
+#define QTN_PCIE_TX_DESC_PORT_MASK 0xF
+#define QTN_PCIE_TX_DESC_PORT_SHIFT 16
+#define QTN_PCIE_TX_DESC_TQE_BIT BIT(24)
+
+#define QTN_EP_LHOST_TQE_PORT 4
+
+enum qtnf_pcie_bda_ipc_flags {
+ QTN_PCIE_IPC_FLAG_HBM_MAGIC = BIT(0),
+ QTN_PCIE_IPC_FLAG_SHM_PIO = BIT(1),
+};
+
+struct qtnf_pcie_bda {
+ __le16 bda_len;
+ __le16 bda_version;
+ __le32 bda_pci_endian;
+ __le32 bda_ep_state;
+ __le32 bda_rc_state;
+ __le32 bda_dma_mask;
+ __le32 bda_msi_addr;
+ __le32 bda_flashsz;
+ u8 bda_boardname[PCIE_BDA_NAMELEN];
+ __le32 bda_rc_msi_enabled;
+ __le32 bda_hhbm_list[PCIE_HHBM_MAX_SIZE];
+ __le32 bda_dsbw_start_index;
+ __le32 bda_dsbw_end_index;
+ __le32 bda_dsbw_total_bytes;
+ __le32 bda_rc_tx_bd_base;
+ __le32 bda_rc_tx_bd_num;
+ u8 bda_pcie_mac[QTN_ENET_ADDR_LENGTH];
+ struct qtnf_shm_ipc_region bda_shm_reg1 __aligned(4096); /* host TX */
+ struct qtnf_shm_ipc_region bda_shm_reg2 __aligned(4096); /* host RX */
+} __packed;
+
+struct qtnf_tx_bd {
+ __le32 addr;
+ __le32 addr_h;
+ __le32 info;
+ __le32 info_h;
+} __packed;
+
+struct qtnf_rx_bd {
+ __le32 addr;
+ __le32 addr_h;
+ __le32 info;
+ __le32 info_h;
+ __le32 next_ptr;
+ __le32 next_ptr_h;
+} __packed;
+
+enum qtnf_fw_loadtype {
+ QTN_FW_DBEGIN,
+ QTN_FW_DSUB,
+ QTN_FW_DEND,
+ QTN_FW_CTRL
+};
+
+struct qtnf_pcie_fw_hdr {
+ u8 boardflg[8];
+ __le32 fwsize;
+ __le32 seqnum;
+ __le32 type;
+ __le32 pktlen;
+ __le32 crc;
+} __packed;
+
+#endif /* _QTN_FMAC_PCIE_IPC_H_ */
diff --git a/drivers/net/wireless/quantenna/qtnfmac/pearl/pcie_regs_pearl.h b/drivers/net/wireless/quantenna/qtnfmac/pearl/pcie_regs_pearl.h
new file mode 100644
index 0000000..78715b8
--- /dev/null
+++ b/drivers/net/wireless/quantenna/qtnfmac/pearl/pcie_regs_pearl.h
@@ -0,0 +1,353 @@
+/*
+ * Copyright (c) 2015 Quantenna Communications, Inc.
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef __PEARL_PCIE_H
+#define __PEARL_PCIE_H
+
+#define PCIE_GEN2_BASE (0xe9000000)
+#define PCIE_GEN3_BASE (0xe7000000)
+
+#define PEARL_CUR_PCIE_BASE (PCIE_GEN2_BASE)
+#define PCIE_HDP_OFFSET (0x2000)
+
+#define PCIE_HDP_CTRL(base) ((base) + 0x2c00)
+#define PCIE_HDP_AXI_CTRL(base) ((base) + 0x2c04)
+#define PCIE_HDP_HOST_WR_DESC0(base) ((base) + 0x2c10)
+#define PCIE_HDP_HOST_WR_DESC0_H(base) ((base) + 0x2c14)
+#define PCIE_HDP_HOST_WR_DESC1(base) ((base) + 0x2c18)
+#define PCIE_HDP_HOST_WR_DESC1_H(base) ((base) + 0x2c1c)
+#define PCIE_HDP_HOST_WR_DESC2(base) ((base) + 0x2c20)
+#define PCIE_HDP_HOST_WR_DESC2_H(base) ((base) + 0x2c24)
+#define PCIE_HDP_HOST_WR_DESC3(base) ((base) + 0x2c28)
+#define PCIE_HDP_HOST_WR_DESC4_H(base) ((base) + 0x2c2c)
+#define PCIE_HDP_RX_INT_CTRL(base) ((base) + 0x2c30)
+#define PCIE_HDP_TX_INT_CTRL(base) ((base) + 0x2c34)
+#define PCIE_HDP_INT_STATUS(base) ((base) + 0x2c38)
+#define PCIE_HDP_INT_EN(base) ((base) + 0x2c3c)
+#define PCIE_HDP_RX_DESC0_PTR(base) ((base) + 0x2c40)
+#define PCIE_HDP_RX_DESC0_NOE(base) ((base) + 0x2c44)
+#define PCIE_HDP_RX_DESC1_PTR(base) ((base) + 0x2c48)
+#define PCIE_HDP_RX_DESC1_NOE(base) ((base) + 0x2c4c)
+#define PCIE_HDP_RX_DESC2_PTR(base) ((base) + 0x2c50)
+#define PCIE_HDP_RX_DESC2_NOE(base) ((base) + 0x2c54)
+#define PCIE_HDP_RX_DESC3_PTR(base) ((base) + 0x2c58)
+#define PCIE_HDP_RX_DESC3_NOE(base) ((base) + 0x2c5c)
+
+#define PCIE_HDP_TX0_BASE_ADDR(base) ((base) + 0x2c60)
+#define PCIE_HDP_TX1_BASE_ADDR(base) ((base) + 0x2c64)
+#define PCIE_HDP_TX0_Q_CTRL(base) ((base) + 0x2c70)
+#define PCIE_HDP_TX1_Q_CTRL(base) ((base) + 0x2c74)
+#define PCIE_HDP_CFG0(base) ((base) + 0x2c80)
+#define PCIE_HDP_CFG1(base) ((base) + 0x2c84)
+#define PCIE_HDP_CFG2(base) ((base) + 0x2c88)
+#define PCIE_HDP_CFG3(base) ((base) + 0x2c8c)
+#define PCIE_HDP_CFG4(base) ((base) + 0x2c90)
+#define PCIE_HDP_CFG5(base) ((base) + 0x2c94)
+#define PCIE_HDP_CFG6(base) ((base) + 0x2c98)
+#define PCIE_HDP_CFG7(base) ((base) + 0x2c9c)
+#define PCIE_HDP_CFG8(base) ((base) + 0x2ca0)
+#define PCIE_HDP_CFG9(base) ((base) + 0x2ca4)
+#define PCIE_HDP_CFG10(base) ((base) + 0x2ca8)
+#define PCIE_HDP_CFG11(base) ((base) + 0x2cac)
+#define PCIE_INT(base) ((base) + 0x2cb0)
+#define PCIE_INT_MASK(base) ((base) + 0x2cb4)
+#define PCIE_MSI_MASK(base) ((base) + 0x2cb8)
+#define PCIE_MSI_PNDG(base) ((base) + 0x2cbc)
+#define PCIE_PRI_CFG(base) ((base) + 0x2cc0)
+#define PCIE_PHY_CR(base) ((base) + 0x2cc4)
+#define PCIE_HDP_CTAG_CTRL(base) ((base) + 0x2cf4)
+#define PCIE_HDP_HHBM_BUF_PTR(base) ((base) + 0x2d00)
+#define PCIE_HDP_HHBM_BUF_PTR_H(base) ((base) + 0x2d04)
+#define PCIE_HDP_HHBM_BUF_FIFO_NOE(base) ((base) + 0x2d04)
+#define PCIE_HDP_RX0DMA_CNT(base) ((base) + 0x2d10)
+#define PCIE_HDP_RX1DMA_CNT(base) ((base) + 0x2d14)
+#define PCIE_HDP_RX2DMA_CNT(base) ((base) + 0x2d18)
+#define PCIE_HDP_RX3DMA_CNT(base) ((base) + 0x2d1c)
+#define PCIE_HDP_TX0DMA_CNT(base) ((base) + 0x2d20)
+#define PCIE_HDP_TX1DMA_CNT(base) ((base) + 0x2d24)
+#define PCIE_HDP_RXDMA_CTRL(base) ((base) + 0x2d28)
+#define PCIE_HDP_TX_HOST_Q_SZ_CTRL(base) ((base) + 0x2d2c)
+#define PCIE_HDP_TX_HOST_Q_BASE_L(base) ((base) + 0x2d30)
+#define PCIE_HDP_TX_HOST_Q_BASE_H(base) ((base) + 0x2d34)
+#define PCIE_HDP_TX_HOST_Q_WR_PTR(base) ((base) + 0x2d38)
+#define PCIE_HDP_TX_HOST_Q_RD_PTR(base) ((base) + 0x2d3c)
+#define PCIE_HDP_TX_HOST_Q_STS(base) ((base) + 0x2d40)
+
+/* Host HBM pool registers */
+#define PCIE_HHBM_CSR_REG(base) ((base) + 0x2e00)
+#define PCIE_HHBM_Q_BASE_REG(base) ((base) + 0x2e04)
+#define PCIE_HHBM_Q_LIMIT_REG(base) ((base) + 0x2e08)
+#define PCIE_HHBM_Q_WR_REG(base) ((base) + 0x2e0c)
+#define PCIE_HHBM_Q_RD_REG(base) ((base) + 0x2e10)
+#define PCIE_HHBM_POOL_DATA_0_H(base) ((base) + 0x2e90)
+#define PCIE_HHBM_CONFIG(base) ((base) + 0x2f9c)
+#define PCIE_HHBM_POOL_REQ_0(base) ((base) + 0x2f10)
+#define PCIE_HHBM_POOL_DATA_0(base) ((base) + 0x2f40)
+#define PCIE_HHBM_WATERMARK_MASKED_INT(base) ((base) + 0x2f68)
+#define PCIE_HHBM_WATERMARK_INT(base) ((base) + 0x2f6c)
+#define PCIE_HHBM_POOL_WATERMARK(base) ((base) + 0x2f70)
+#define PCIE_HHBM_POOL_OVERFLOW_CNT(base) ((base) + 0x2f90)
+#define PCIE_HHBM_POOL_UNDERFLOW_CNT(base) ((base) + 0x2f94)
+#define HBM_INT_STATUS(base) ((base) + 0x2f9c)
+#define PCIE_HHBM_POOL_CNFIG(base) ((base) + 0x2f9c)
+
+/* host HBM bit field definition */
+#define HHBM_CONFIG_SOFT_RESET (BIT(8))
+#define HHBM_WR_REQ (BIT(0))
+#define HHBM_RD_REQ (BIT(1))
+#define HHBM_DONE (BIT(31))
+
+/* offsets for dual PCIE */
+#define PCIE_PORT_LINK_CTL(base) ((base) + 0x0710)
+#define PCIE_GEN2_CTL(base) ((base) + 0x080C)
+#define PCIE_GEN3_OFF(base) ((base) + 0x0890)
+#define PCIE_ATU_CTRL1(base) ((base) + 0x0904)
+#define PCIE_ATU_CTRL2(base) ((base) + 0x0908)
+#define PCIE_ATU_BASE_LOW(base) ((base) + 0x090C)
+#define PCIE_ATU_BASE_HIGH(base) ((base) + 0x0910)
+#define PCIE_ATU_BASE_LIMIT(base) ((base) + 0x0914)
+#define PCIE_ATU_TGT_LOW(base) ((base) + 0x0918)
+#define PCIE_ATU_TGT_HIGH(base) ((base) + 0x091C)
+#define PCIE_DMA_WR_ENABLE(base) ((base) + 0x097C)
+#define PCIE_DMA_WR_CHWTLOW(base) ((base) + 0x0988)
+#define PCIE_DMA_WR_CHWTHIG(base) ((base) + 0x098C)
+#define PCIE_DMA_WR_INTSTS(base) ((base) + 0x09BC)
+#define PCIE_DMA_WR_INTMASK(base) ((base) + 0x09C4)
+#define PCIE_DMA_WR_INTCLER(base) ((base) + 0x09C8)
+#define PCIE_DMA_WR_DONE_IMWR_ADDR_L(base) ((base) + 0x09D0)
+#define PCIE_DMA_WR_DONE_IMWR_ADDR_H(base) ((base) + 0x09D4)
+#define PCIE_DMA_WR_ABORT_IMWR_ADDR_L(base) ((base) + 0x09D8)
+#define PCIE_DMA_WR_ABORT_IMWR_ADDR_H(base) ((base) + 0x09DC)
+#define PCIE_DMA_WR_IMWR_DATA(base) ((base) + 0x09E0)
+#define PCIE_DMA_WR_LL_ERR_EN(base) ((base) + 0x0A00)
+#define PCIE_DMA_WR_DOORBELL(base) ((base) + 0x0980)
+#define PCIE_DMA_RD_ENABLE(base) ((base) + 0x099C)
+#define PCIE_DMA_RD_DOORBELL(base) ((base) + 0x09A0)
+#define PCIE_DMA_RD_CHWTLOW(base) ((base) + 0x09A8)
+#define PCIE_DMA_RD_CHWTHIG(base) ((base) + 0x09AC)
+#define PCIE_DMA_RD_INTSTS(base) ((base) + 0x0A10)
+#define PCIE_DMA_RD_INTMASK(base) ((base) + 0x0A18)
+#define PCIE_DMA_RD_INTCLER(base) ((base) + 0x0A1C)
+#define PCIE_DMA_RD_ERR_STS_L(base) ((base) + 0x0A24)
+#define PCIE_DMA_RD_ERR_STS_H(base) ((base) + 0x0A28)
+#define PCIE_DMA_RD_LL_ERR_EN(base) ((base) + 0x0A34)
+#define PCIE_DMA_RD_DONE_IMWR_ADDR_L(base) ((base) + 0x0A3C)
+#define PCIE_DMA_RD_DONE_IMWR_ADDR_H(base) ((base) + 0x0A40)
+#define PCIE_DMA_RD_ABORT_IMWR_ADDR_L(base) ((base) + 0x0A44)
+#define PCIE_DMA_RD_ABORT_IMWR_ADDR_H(base) ((base) + 0x0A48)
+#define PCIE_DMA_RD_IMWR_DATA(base) ((base) + 0x0A4C)
+#define PCIE_DMA_CHNL_CONTEXT(base) ((base) + 0x0A6C)
+#define PCIE_DMA_CHNL_CNTRL(base) ((base) + 0x0A70)
+#define PCIE_DMA_XFR_SIZE(base) ((base) + 0x0A78)
+#define PCIE_DMA_SAR_LOW(base) ((base) + 0x0A7C)
+#define PCIE_DMA_SAR_HIGH(base) ((base) + 0x0A80)
+#define PCIE_DMA_DAR_LOW(base) ((base) + 0x0A84)
+#define PCIE_DMA_DAR_HIGH(base) ((base) + 0x0A88)
+#define PCIE_DMA_LLPTR_LOW(base) ((base) + 0x0A8C)
+#define PCIE_DMA_LLPTR_HIGH(base) ((base) + 0x0A90)
+#define PCIE_DMA_WRLL_ERR_ENB(base) ((base) + 0x0A00)
+#define PCIE_DMA_RDLL_ERR_ENB(base) ((base) + 0x0A34)
+#define PCIE_DMABD_CHNL_CNTRL(base) ((base) + 0x8000)
+#define PCIE_DMABD_XFR_SIZE(base) ((base) + 0x8004)
+#define PCIE_DMABD_SAR_LOW(base) ((base) + 0x8008)
+#define PCIE_DMABD_SAR_HIGH(base) ((base) + 0x800c)
+#define PCIE_DMABD_DAR_LOW(base) ((base) + 0x8010)
+#define PCIE_DMABD_DAR_HIGH(base) ((base) + 0x8014)
+#define PCIE_DMABD_LLPTR_LOW(base) ((base) + 0x8018)
+#define PCIE_DMABD_LLPTR_HIGH(base) ((base) + 0x801c)
+#define PCIE_WRDMA0_CHNL_CNTRL(base) ((base) + 0x8000)
+#define PCIE_WRDMA0_XFR_SIZE(base) ((base) + 0x8004)
+#define PCIE_WRDMA0_SAR_LOW(base) ((base) + 0x8008)
+#define PCIE_WRDMA0_SAR_HIGH(base) ((base) + 0x800c)
+#define PCIE_WRDMA0_DAR_LOW(base) ((base) + 0x8010)
+#define PCIE_WRDMA0_DAR_HIGH(base) ((base) + 0x8014)
+#define PCIE_WRDMA0_LLPTR_LOW(base) ((base) + 0x8018)
+#define PCIE_WRDMA0_LLPTR_HIGH(base) ((base) + 0x801c)
+#define PCIE_WRDMA1_CHNL_CNTRL(base) ((base) + 0x8020)
+#define PCIE_WRDMA1_XFR_SIZE(base) ((base) + 0x8024)
+#define PCIE_WRDMA1_SAR_LOW(base) ((base) + 0x8028)
+#define PCIE_WRDMA1_SAR_HIGH(base) ((base) + 0x802c)
+#define PCIE_WRDMA1_DAR_LOW(base) ((base) + 0x8030)
+#define PCIE_WRDMA1_DAR_HIGH(base) ((base) + 0x8034)
+#define PCIE_WRDMA1_LLPTR_LOW(base) ((base) + 0x8038)
+#define PCIE_WRDMA1_LLPTR_HIGH(base) ((base) + 0x803c)
+#define PCIE_RDDMA0_CHNL_CNTRL(base) ((base) + 0x8040)
+#define PCIE_RDDMA0_XFR_SIZE(base) ((base) + 0x8044)
+#define PCIE_RDDMA0_SAR_LOW(base) ((base) + 0x8048)
+#define PCIE_RDDMA0_SAR_HIGH(base) ((base) + 0x804c)
+#define PCIE_RDDMA0_DAR_LOW(base) ((base) + 0x8050)
+#define PCIE_RDDMA0_DAR_HIGH(base) ((base) + 0x8054)
+#define PCIE_RDDMA0_LLPTR_LOW(base) ((base) + 0x8058)
+#define PCIE_RDDMA0_LLPTR_HIGH(base) ((base) + 0x805c)
+#define PCIE_RDDMA1_CHNL_CNTRL(base) ((base) + 0x8060)
+#define PCIE_RDDMA1_XFR_SIZE(base) ((base) + 0x8064)
+#define PCIE_RDDMA1_SAR_LOW(base) ((base) + 0x8068)
+#define PCIE_RDDMA1_SAR_HIGH(base) ((base) + 0x806c)
+#define PCIE_RDDMA1_DAR_LOW(base) ((base) + 0x8070)
+#define PCIE_RDDMA1_DAR_HIGH(base) ((base) + 0x8074)
+#define PCIE_RDDMA1_LLPTR_LOW(base) ((base) + 0x8078)
+#define PCIE_RDDMA1_LLPTR_HIGH(base) ((base) + 0x807c)
+
+#define PCIE_ID(base) ((base) + 0x0000)
+#define PCIE_CMD(base) ((base) + 0x0004)
+#define PCIE_BAR(base, n) ((base) + 0x0010 + ((n) << 2))
+#define PCIE_CAP_PTR(base) ((base) + 0x0034)
+#define PCIE_MSI_LBAR(base) ((base) + 0x0054)
+#define PCIE_MSI_CTRL(base) ((base) + 0x0050)
+#define PCIE_MSI_ADDR_L(base) ((base) + 0x0054)
+#define PCIE_MSI_ADDR_H(base) ((base) + 0x0058)
+#define PCIE_MSI_DATA(base) ((base) + 0x005C)
+#define PCIE_MSI_MASK_BIT(base) ((base) + 0x0060)
+#define PCIE_MSI_PEND_BIT(base) ((base) + 0x0064)
+#define PCIE_DEVCAP(base) ((base) + 0x0074)
+#define PCIE_DEVCTLSTS(base) ((base) + 0x0078)
+
+#define PCIE_CMDSTS(base) ((base) + 0x0004)
+#define PCIE_LINK_STAT(base) ((base) + 0x80)
+#define PCIE_LINK_CTL2(base) ((base) + 0xa0)
+#define PCIE_ASPM_L1_CTRL(base) ((base) + 0x70c)
+#define PCIE_ASPM_LINK_CTRL(base) (PCIE_LINK_STAT)
+#define PCIE_ASPM_L1_SUBSTATE_TIMING(base) ((base) + 0xB44)
+#define PCIE_L1SUB_CTRL1(base) ((base) + 0x150)
+#define PCIE_PMCSR(base) ((base) + 0x44)
+#define PCIE_CFG_SPACE_LIMIT(base) ((base) + 0x100)
+
+/* PCIe link defines */
+#define PEARL_PCIE_LINKUP (0x7)
+#define PEARL_PCIE_DATA_LINK (BIT(0))
+#define PEARL_PCIE_PHY_LINK (BIT(1))
+#define PEARL_PCIE_LINK_RST (BIT(3))
+#define PEARL_PCIE_FATAL_ERR (BIT(5))
+#define PEARL_PCIE_NONFATAL_ERR (BIT(6))
+
+/* PCIe Lane defines */
+#define PCIE_G2_LANE_X1 ((BIT(0)) << 16)
+#define PCIE_G2_LANE_X2 ((BIT(0) | BIT(1)) << 16)
+
+/* PCIe DLL link enable */
+#define PCIE_DLL_LINK_EN ((BIT(0)) << 5)
+
+#define PCIE_LINK_GEN1 (BIT(0))
+#define PCIE_LINK_GEN2 (BIT(1))
+#define PCIE_LINK_GEN3 (BIT(2))
+#define PCIE_LINK_MODE(x) (((x) >> 16) & 0x7)
+
+#define MSI_EN (BIT(0))
+#define MSI_64_EN (BIT(7))
+#define PCIE_MSI_ADDR_OFFSET(a) ((a) & 0xFFFF)
+#define PCIE_MSI_ADDR_ALIGN(a) ((a) & (~0xFFFF))
+
+#define PCIE_BAR_MASK(base, n) ((base) + 0x1010 + ((n) << 2))
+#define PCIE_MAX_BAR (6)
+
+#define PCIE_ATU_VIEW(base) ((base) + 0x0900)
+#define PCIE_ATU_CTL1(base) ((base) + 0x0904)
+#define PCIE_ATU_CTL2(base) ((base) + 0x0908)
+#define PCIE_ATU_LBAR(base) ((base) + 0x090c)
+#define PCIE_ATU_UBAR(base) ((base) + 0x0910)
+#define PCIE_ATU_LAR(base) ((base) + 0x0914)
+#define PCIE_ATU_LTAR(base) ((base) + 0x0918)
+#define PCIE_ATU_UTAR(base) ((base) + 0x091c)
+
+#define PCIE_MSI_ADDR_LOWER(base) ((base) + 0x0820)
+#define PCIE_MSI_ADDR_UPPER(base) ((base) + 0x0824)
+#define PCIE_MSI_ENABLE(base) ((base) + 0x0828)
+#define PCIE_MSI_MASK_RC(base) ((base) + 0x082c)
+#define PCIE_MSI_STATUS(base) ((base) + 0x0830)
+#define PEARL_PCIE_MSI_REGION (0xce000000)
+#define PEARL_PCIE_MSI_DATA (0)
+#define PCIE_MSI_GPIO(base) ((base) + 0x0888)
+
+#define PCIE_HDP_HOST_QUEUE_FULL (BIT(17))
+#define USE_BAR_MATCH_MODE
+#define PCIE_ATU_OB_REGION (BIT(0))
+#define PCIE_ATU_EN_REGION (BIT(31))
+#define PCIE_ATU_EN_MATCH (BIT(30))
+#define PCIE_BASE_REGION (0xb0000000)
+#define PCIE_MEM_MAP_SIZE (512 * 1024)
+
+#define PCIE_OB_REG_REGION (0xcf000000)
+#define PCIE_CONFIG_REGION (0xcf000000)
+#define PCIE_CONFIG_SIZE (4096)
+#define PCIE_CONFIG_CH (1)
+
+/* inbound mapping */
+#define PCIE_IB_BAR0 (0x00000000) /* ddr */
+#define PCIE_IB_BAR0_CH (0)
+#define PCIE_IB_BAR3 (0xe0000000) /* sys_reg */
+#define PCIE_IB_BAR3_CH (1)
+
+/* outbound mapping */
+#define PCIE_MEM_CH (0)
+#define PCIE_REG_CH (1)
+#define PCIE_MEM_REGION (0xc0000000)
+#define PCIE_MEM_SIZE (0x000fffff)
+#define PCIE_MEM_TAR (0x80000000)
+
+#define PCIE_MSI_REGION (0xce000000)
+#define PCIE_MSI_SIZE (KBYTE(4) - 1)
+#define PCIE_MSI_CH (1)
+
+/* size of config region */
+#define PCIE_CFG_SIZE (0x0000ffff)
+
+#define PCIE_ATU_DIR_IB (BIT(31))
+#define PCIE_ATU_DIR_OB (0)
+#define PCIE_ATU_DIR_CFG (2)
+#define PCIE_ATU_DIR_MATCH_IB (BIT(31) | BIT(30))
+
+#define PCIE_DMA_WR_0 (0)
+#define PCIE_DMA_WR_1 (1)
+#define PCIE_DMA_RD_0 (2)
+#define PCIE_DMA_RD_1 (3)
+
+#define PCIE_DMA_CHNL_CNTRL_CB (BIT(0))
+#define PCIE_DMA_CHNL_CNTRL_TCB (BIT(1))
+#define PCIE_DMA_CHNL_CNTRL_LLP (BIT(2))
+#define PCIE_DMA_CHNL_CNTRL_LIE (BIT(3))
+#define PCIE_DMA_CHNL_CNTRL_RIE (BIT(4))
+#define PCIE_DMA_CHNL_CNTRL_CSS (BIT(8))
+#define PCIE_DMA_CHNL_CNTRL_LLE (BIT(9))
+#define PCIE_DMA_CHNL_CNTRL_TLP (BIT(26))
+
+#define PCIE_DMA_CHNL_CONTEXT_RD (BIT(31))
+#define PCIE_DMA_CHNL_CONTEXT_WR (0)
+#define PCIE_MAX_BAR (6)
+
+/* PCIe HDP interrupt status definition */
+#define PCIE_HDP_INT_EP_RXDMA (BIT(0))
+#define PCIE_HDP_INT_HBM_UF (BIT(1))
+#define PCIE_HDP_INT_RX_LEN_ERR (BIT(2))
+#define PCIE_HDP_INT_RX_HDR_LEN_ERR (BIT(3))
+#define PCIE_HDP_INT_EP_TXDMA (BIT(12))
+#define PCIE_HDP_INT_EP_TXEMPTY (BIT(15))
+#define PCIE_HDP_INT_IPC (BIT(29))
+
+/* PCIe interrupt status definition */
+#define PCIE_INT_MSI (BIT(24))
+#define PCIE_INT_INTX (BIT(23))
+
+/* PCIe legacy INTx */
+#define PEARL_PCIE_CFG0_OFFSET (0x6C)
+#define PEARL_ASSERT_INTX (BIT(9))
+
+/* SYS CTL regs */
+#define QTN_PEARL_SYSCTL_LHOST_IRQ_OFFSET (0x001C)
+
+#define QTN_PEARL_IPC_IRQ_WORD(irq) (BIT(irq) | BIT(irq + 16))
+#define QTN_PEARL_LHOST_IPC_IRQ (6)
+
+#endif /* __PEARL_PCIE_H */
diff --git a/drivers/net/wireless/quantenna/qtnfmac/qlink.h b/drivers/net/wireless/quantenna/qtnfmac/qlink.h
new file mode 100644
index 0000000..6eafc15
--- /dev/null
+++ b/drivers/net/wireless/quantenna/qtnfmac/qlink.h
@@ -0,0 +1,901 @@
+/*
+ * Copyright (c) 2015-2016 Quantenna Communications, Inc.
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _QTN_QLINK_H_
+#define _QTN_QLINK_H_
+
+#include <linux/ieee80211.h>
+
+#define QLINK_PROTO_VER 3
+
+#define QLINK_MACID_RSVD 0xFF
+#define QLINK_VIFID_RSVD 0xFF
+
+/* Common QLINK protocol messages definitions.
+ */
+
+/**
+ * enum qlink_msg_type - QLINK message types
+ *
+ * Used to distinguish between message types of QLINK protocol.
+ *
+ * @QLINK_MSG_TYPE_CMD: Message is carrying data of a command sent from
+ * driver to wireless hardware.
+ * @QLINK_MSG_TYPE_CMDRSP: Message is carrying data of a response to a command.
+ * Sent from wireless HW to driver in reply to previously issued command.
+ * @QLINK_MSG_TYPE_EVENT: Data for an event originated in wireless hardware and
+ * sent asynchronously to driver.
+ */
+enum qlink_msg_type {
+ QLINK_MSG_TYPE_CMD = 1,
+ QLINK_MSG_TYPE_CMDRSP = 2,
+ QLINK_MSG_TYPE_EVENT = 3
+};
+
+/**
+ * struct qlink_msg_header - common QLINK protocol message header
+ *
+ * Portion of QLINK protocol header common for all message types.
+ *
+ * @type: Message type, one of &enum qlink_msg_type.
+ * @len: Total length of message including all headers.
+ */
+struct qlink_msg_header {
+ __le16 type;
+ __le16 len;
+} __packed;
+
+/* Generic definitions of data and information carried in QLINK messages
+ */
+
+enum qlink_hw_capab {
+ QLINK_HW_SUPPORTS_REG_UPDATE = BIT(0),
+};
+
+enum qlink_phy_mode {
+ QLINK_PHYMODE_BGN = BIT(0),
+ QLINK_PHYMODE_AN = BIT(1),
+ QLINK_PHYMODE_AC = BIT(2),
+};
+
+enum qlink_iface_type {
+ QLINK_IFTYPE_AP = 1,
+ QLINK_IFTYPE_STATION = 2,
+ QLINK_IFTYPE_ADHOC = 3,
+ QLINK_IFTYPE_MONITOR = 4,
+ QLINK_IFTYPE_WDS = 5,
+};
+
+/**
+ * struct qlink_intf_info - information on virtual interface.
+ *
+ * Data describing a single virtual interface.
+ *
+ * @if_type: Mode of interface operation, one of &enum qlink_iface_type
+ * @flags: interface flagsmap.
+ * @mac_addr: MAC address of virtual interface.
+ */
+struct qlink_intf_info {
+ __le16 if_type;
+ __le16 flags;
+ u8 mac_addr[ETH_ALEN];
+ u8 rsvd[2];
+} __packed;
+
+enum qlink_sta_flags {
+ QLINK_STA_FLAG_INVALID = 0,
+ QLINK_STA_FLAG_AUTHORIZED = BIT(0),
+ QLINK_STA_FLAG_SHORT_PREAMBLE = BIT(1),
+ QLINK_STA_FLAG_WME = BIT(2),
+ QLINK_STA_FLAG_MFP = BIT(3),
+ QLINK_STA_FLAG_AUTHENTICATED = BIT(4),
+ QLINK_STA_FLAG_TDLS_PEER = BIT(5),
+ QLINK_STA_FLAG_ASSOCIATED = BIT(6),
+};
+
+enum qlink_channel_width {
+ QLINK_CHAN_WIDTH_5 = BIT(0),
+ QLINK_CHAN_WIDTH_10 = BIT(1),
+ QLINK_CHAN_WIDTH_20_NOHT = BIT(2),
+ QLINK_CHAN_WIDTH_20 = BIT(3),
+ QLINK_CHAN_WIDTH_40 = BIT(4),
+ QLINK_CHAN_WIDTH_80 = BIT(5),
+ QLINK_CHAN_WIDTH_80P80 = BIT(6),
+ QLINK_CHAN_WIDTH_160 = BIT(7),
+};
+
+/* QLINK Command messages related definitions
+ */
+
+/**
+ * enum qlink_cmd_type - list of supported commands
+ *
+ * Commands are QLINK messages of type @QLINK_MSG_TYPE_CMD, sent by driver to
+ * wireless network device for processing. Device is expected to send back a
+ * reply message of type &QLINK_MSG_TYPE_CMDRSP, containing at least command
+ * execution status (one of &enum qlink_cmd_result) at least. Reply message
+ * may also contain data payload specific to the command type.
+ *
+ * @QLINK_CMD_CHANS_INFO_GET: for the specified MAC and specified band, get
+ * number of operational channels and information on each of the channel.
+ * This command is generic to a specified MAC, interface index must be set
+ * to QLINK_VIFID_RSVD in command header.
+ */
+enum qlink_cmd_type {
+ QLINK_CMD_FW_INIT = 0x0001,
+ QLINK_CMD_FW_DEINIT = 0x0002,
+ QLINK_CMD_REGISTER_MGMT = 0x0003,
+ QLINK_CMD_SEND_MGMT_FRAME = 0x0004,
+ QLINK_CMD_MGMT_SET_APPIE = 0x0005,
+ QLINK_CMD_PHY_PARAMS_GET = 0x0011,
+ QLINK_CMD_PHY_PARAMS_SET = 0x0012,
+ QLINK_CMD_GET_HW_INFO = 0x0013,
+ QLINK_CMD_MAC_INFO = 0x0014,
+ QLINK_CMD_ADD_INTF = 0x0015,
+ QLINK_CMD_DEL_INTF = 0x0016,
+ QLINK_CMD_CHANGE_INTF = 0x0017,
+ QLINK_CMD_UPDOWN_INTF = 0x0018,
+ QLINK_CMD_REG_REGION = 0x0019,
+ QLINK_CMD_CHANS_INFO_GET = 0x001A,
+ QLINK_CMD_CONFIG_AP = 0x0020,
+ QLINK_CMD_START_AP = 0x0021,
+ QLINK_CMD_STOP_AP = 0x0022,
+ QLINK_CMD_GET_STA_INFO = 0x0030,
+ QLINK_CMD_ADD_KEY = 0x0040,
+ QLINK_CMD_DEL_KEY = 0x0041,
+ QLINK_CMD_SET_DEFAULT_KEY = 0x0042,
+ QLINK_CMD_SET_DEFAULT_MGMT_KEY = 0x0043,
+ QLINK_CMD_CHANGE_STA = 0x0051,
+ QLINK_CMD_DEL_STA = 0x0052,
+ QLINK_CMD_SCAN = 0x0053,
+ QLINK_CMD_CONNECT = 0x0060,
+ QLINK_CMD_DISCONNECT = 0x0061,
+};
+
+/**
+ * struct qlink_cmd - QLINK command message header
+ *
+ * Header used for QLINK messages of QLINK_MSG_TYPE_CMD type.
+ *
+ * @mhdr: Common QLINK message header.
+ * @cmd_id: command id, one of &enum qlink_cmd_type.
+ * @seq_num: sequence number of command message, used for matching with
+ * response message.
+ * @macid: index of physical radio device the command is destined to or
+ * QLINK_MACID_RSVD if not applicable.
+ * @vifid: index of virtual wireless interface on specified @macid the command
+ * is destined to or QLINK_VIFID_RSVD if not applicable.
+ */
+struct qlink_cmd {
+ struct qlink_msg_header mhdr;
+ __le16 cmd_id;
+ __le16 seq_num;
+ u8 rsvd[2];
+ u8 macid;
+ u8 vifid;
+} __packed;
+
+/**
+ * struct qlink_cmd_manage_intf - interface management command
+ *
+ * Data for interface management commands QLINK_CMD_ADD_INTF, QLINK_CMD_DEL_INTF
+ * and QLINK_CMD_CHANGE_INTF.
+ *
+ * @intf_info: interface description.
+ */
+struct qlink_cmd_manage_intf {
+ struct qlink_cmd chdr;
+ struct qlink_intf_info intf_info;
+} __packed;
+
+enum qlink_mgmt_frame_type {
+ QLINK_MGMT_FRAME_ASSOC_REQ = 0x00,
+ QLINK_MGMT_FRAME_ASSOC_RESP = 0x01,
+ QLINK_MGMT_FRAME_REASSOC_REQ = 0x02,
+ QLINK_MGMT_FRAME_REASSOC_RESP = 0x03,
+ QLINK_MGMT_FRAME_PROBE_REQ = 0x04,
+ QLINK_MGMT_FRAME_PROBE_RESP = 0x05,
+ QLINK_MGMT_FRAME_BEACON = 0x06,
+ QLINK_MGMT_FRAME_ATIM = 0x07,
+ QLINK_MGMT_FRAME_DISASSOC = 0x08,
+ QLINK_MGMT_FRAME_AUTH = 0x09,
+ QLINK_MGMT_FRAME_DEAUTH = 0x0A,
+ QLINK_MGMT_FRAME_ACTION = 0x0B,
+
+ QLINK_MGMT_FRAME_TYPE_COUNT
+};
+
+/**
+ * struct qlink_cmd_mgmt_frame_register - data for QLINK_CMD_REGISTER_MGMT
+ *
+ * @frame_type: MGMT frame type the registration request describes, one of
+ * &enum qlink_mgmt_frame_type.
+ * @do_register: 0 - unregister, otherwise register for reception of specified
+ * MGMT frame type.
+ */
+struct qlink_cmd_mgmt_frame_register {
+ struct qlink_cmd chdr;
+ __le16 frame_type;
+ u8 do_register;
+} __packed;
+
+enum qlink_mgmt_frame_tx_flags {
+ QLINK_MGMT_FRAME_TX_FLAG_NONE = 0,
+ QLINK_MGMT_FRAME_TX_FLAG_OFFCHAN = BIT(0),
+ QLINK_MGMT_FRAME_TX_FLAG_NO_CCK = BIT(1),
+ QLINK_MGMT_FRAME_TX_FLAG_ACK_NOWAIT = BIT(2),
+};
+
+/**
+ * struct qlink_cmd_mgmt_frame_tx - data for QLINK_CMD_SEND_MGMT_FRAME command
+ *
+ * @cookie: opaque request identifier.
+ * @freq: Frequency to use for frame transmission.
+ * @flags: Transmission flags, one of &enum qlink_mgmt_frame_tx_flags.
+ * @frame_data: frame to transmit.
+ */
+struct qlink_cmd_mgmt_frame_tx {
+ struct qlink_cmd chdr;
+ __le32 cookie;
+ __le16 freq;
+ __le16 flags;
+ u8 frame_data[0];
+} __packed;
+
+/**
+ * struct qlink_cmd_mgmt_append_ie - data for QLINK_CMD_MGMT_SET_APPIE command
+ *
+ * @type: type of MGMT frame to appent requested IEs to, one of
+ * &enum qlink_mgmt_frame_type.
+ * @flags: for future use.
+ * @ie_data: IEs data to append.
+ */
+struct qlink_cmd_mgmt_append_ie {
+ struct qlink_cmd chdr;
+ u8 type;
+ u8 flags;
+ u8 ie_data[0];
+} __packed;
+
+/**
+ * struct qlink_cmd_get_sta_info - data for QLINK_CMD_GET_STA_INFO command
+ *
+ * @sta_addr: MAC address of the STA statistics is requested for.
+ */
+struct qlink_cmd_get_sta_info {
+ struct qlink_cmd chdr;
+ u8 sta_addr[ETH_ALEN];
+} __packed;
+
+/**
+ * struct qlink_cmd_add_key - data for QLINK_CMD_ADD_KEY command.
+ *
+ * @key_index: index of the key being installed.
+ * @pairwise: whether to use pairwise key.
+ * @addr: MAC address of a STA key is being installed to.
+ * @cipher: cipher suite.
+ * @key_data: key data itself.
+ */
+struct qlink_cmd_add_key {
+ struct qlink_cmd chdr;
+ u8 key_index;
+ u8 pairwise;
+ u8 addr[ETH_ALEN];
+ __le32 cipher;
+ u8 key_data[0];
+} __packed;
+
+/**
+ * struct qlink_cmd_del_key_req - data for QLINK_CMD_DEL_KEY command
+ *
+ * @key_index: index of the key being removed.
+ * @pairwise: whether to use pairwise key.
+ * @addr: MAC address of a STA for which a key is removed.
+ */
+struct qlink_cmd_del_key {
+ struct qlink_cmd chdr;
+ u8 key_index;
+ u8 pairwise;
+ u8 addr[ETH_ALEN];
+} __packed;
+
+/**
+ * struct qlink_cmd_set_def_key - data for QLINK_CMD_SET_DEFAULT_KEY command
+ *
+ * @key_index: index of the key to be set as default one.
+ * @unicast: key is unicast.
+ * @multicast: key is multicast.
+ */
+struct qlink_cmd_set_def_key {
+ struct qlink_cmd chdr;
+ u8 key_index;
+ u8 unicast;
+ u8 multicast;
+} __packed;
+
+/**
+ * struct qlink_cmd_set_def_mgmt_key - data for QLINK_CMD_SET_DEFAULT_MGMT_KEY
+ *
+ * @key_index: index of the key to be set as default MGMT key.
+ */
+struct qlink_cmd_set_def_mgmt_key {
+ struct qlink_cmd chdr;
+ u8 key_index;
+} __packed;
+
+/**
+ * struct qlink_cmd_change_sta - data for QLINK_CMD_CHANGE_STA command
+ *
+ * @sta_flags_mask: STA flags mask, bitmap of &enum qlink_sta_flags
+ * @sta_flags_set: STA flags values, bitmap of &enum qlink_sta_flags
+ * @sta_addr: address of the STA for which parameters are set.
+ */
+struct qlink_cmd_change_sta {
+ struct qlink_cmd chdr;
+ __le32 sta_flags_mask;
+ __le32 sta_flags_set;
+ u8 sta_addr[ETH_ALEN];
+} __packed;
+
+/**
+ * struct qlink_cmd_del_sta - data for QLINK_CMD_DEL_STA command.
+ *
+ * See &struct station_del_parameters
+ */
+struct qlink_cmd_del_sta {
+ struct qlink_cmd chdr;
+ __le16 reason_code;
+ u8 subtype;
+ u8 sta_addr[ETH_ALEN];
+} __packed;
+
+enum qlink_sta_connect_flags {
+ QLINK_STA_CONNECT_DISABLE_HT = BIT(0),
+ QLINK_STA_CONNECT_DISABLE_VHT = BIT(1),
+ QLINK_STA_CONNECT_USE_RRM = BIT(2),
+};
+
+/**
+ * struct qlink_cmd_connect - data for QLINK_CMD_CONNECT command
+ *
+ * @flags: for future use.
+ * @freq: center frequence of a channel which should be used to connect.
+ * @bg_scan_period: period of background scan.
+ * @bssid: BSSID of the BSS to connect to.
+ * @payload: variable portion of connection request.
+ */
+struct qlink_cmd_connect {
+ struct qlink_cmd chdr;
+ __le32 flags;
+ __le16 freq;
+ __le16 bg_scan_period;
+ u8 bssid[ETH_ALEN];
+ u8 payload[0];
+} __packed;
+
+/**
+ * struct qlink_cmd_disconnect - data for QLINK_CMD_DISCONNECT command
+ *
+ * @reason: code of the reason of disconnect, see &enum ieee80211_reasoncode.
+ */
+struct qlink_cmd_disconnect {
+ struct qlink_cmd chdr;
+ __le16 reason;
+} __packed;
+
+/**
+ * struct qlink_cmd_updown - data for QLINK_CMD_UPDOWN_INTF command
+ *
+ * @if_up: bring specified interface DOWN (if_up==0) or UP (otherwise).
+ * Interface is specified in common command header @chdr.
+ */
+struct qlink_cmd_updown {
+ struct qlink_cmd chdr;
+ u8 if_up;
+} __packed;
+
+/**
+ * enum qlink_band - a list of frequency bands
+ *
+ * @QLINK_BAND_2GHZ: 2.4GHz band
+ * @QLINK_BAND_5GHZ: 5GHz band
+ * @QLINK_BAND_60GHZ: 60GHz band
+ */
+enum qlink_band {
+ QLINK_BAND_2GHZ = BIT(0),
+ QLINK_BAND_5GHZ = BIT(1),
+ QLINK_BAND_60GHZ = BIT(2),
+};
+
+/**
+ * struct qlink_cmd_chans_info_get - data for QLINK_CMD_CHANS_INFO_GET command
+ *
+ * @band: a PHY band for which channels info is needed, one of @enum qlink_band
+ */
+struct qlink_cmd_chans_info_get {
+ struct qlink_cmd chdr;
+ u8 band;
+} __packed;
+
+/* QLINK Command Responses messages related definitions
+ */
+
+enum qlink_cmd_result {
+ QLINK_CMD_RESULT_OK = 0,
+ QLINK_CMD_RESULT_INVALID,
+ QLINK_CMD_RESULT_ENOTSUPP,
+ QLINK_CMD_RESULT_ENOTFOUND,
+};
+
+/**
+ * struct qlink_resp - QLINK command response message header
+ *
+ * Header used for QLINK messages of QLINK_MSG_TYPE_CMDRSP type.
+ *
+ * @mhdr: see &struct qlink_msg_header.
+ * @cmd_id: command ID the response corresponds to, one of &enum qlink_cmd_type.
+ * @seq_num: sequence number of command message, used for matching with
+ * response message.
+ * @result: result of the command execution, one of &enum qlink_cmd_result.
+ * @macid: index of physical radio device the response is sent from or
+ * QLINK_MACID_RSVD if not applicable.
+ * @vifid: index of virtual wireless interface on specified @macid the response
+ * is sent from or QLINK_VIFID_RSVD if not applicable.
+ */
+struct qlink_resp {
+ struct qlink_msg_header mhdr;
+ __le16 cmd_id;
+ __le16 seq_num;
+ __le16 result;
+ u8 macid;
+ u8 vifid;
+} __packed;
+
+/**
+ * struct qlink_resp_get_mac_info - response for QLINK_CMD_MAC_INFO command
+ *
+ * Data describing specific physical device providing wireless MAC
+ * functionality.
+ *
+ * @dev_mac: MAC address of physical WMAC device (used for first BSS on
+ * specified WMAC).
+ * @num_tx_chain: Number of transmit chains used by WMAC.
+ * @num_rx_chain: Number of receive chains used by WMAC.
+ * @vht_cap: VHT capabilities.
+ * @ht_cap: HT capabilities.
+ * @bands_cap: wireless bands WMAC can operate in, bitmap of &enum qlink_band.
+ * @phymode_cap: PHY modes WMAC can operate in, bitmap of &enum qlink_phy_mode.
+ * @max_ap_assoc_sta: Maximum number of associations supported by WMAC.
+ * @radar_detect_widths: bitmask of channels BW for which WMAC can detect radar.
+ * @var_info: variable-length WMAC info data.
+ */
+struct qlink_resp_get_mac_info {
+ struct qlink_resp rhdr;
+ u8 dev_mac[ETH_ALEN];
+ u8 num_tx_chain;
+ u8 num_rx_chain;
+ struct ieee80211_vht_cap vht_cap;
+ struct ieee80211_ht_cap ht_cap;
+ u8 bands_cap;
+ u8 phymode_cap;
+ __le16 max_ap_assoc_sta;
+ __le16 radar_detect_widths;
+ u8 var_info[0];
+} __packed;
+
+/**
+ * struct qlink_resp_get_hw_info - response for QLINK_CMD_GET_HW_INFO command
+ *
+ * Description of wireless hardware capabilities and features.
+ *
+ * @fw_ver: wireless hardware firmware version.
+ * @hw_capab: Bitmap of capabilities supported by firmware.
+ * @ql_proto_ver: Version of QLINK protocol used by firmware.
+ * @country_code: country code ID firmware is configured to.
+ * @num_mac: Number of separate physical radio devices provided by hardware.
+ * @mac_bitmap: Bitmap of MAC IDs that are active and can be used in firmware.
+ * @total_tx_chains: total number of transmit chains used by device.
+ * @total_rx_chains: total number of receive chains.
+ */
+struct qlink_resp_get_hw_info {
+ struct qlink_resp rhdr;
+ __le32 fw_ver;
+ __le32 hw_capab;
+ __le16 ql_proto_ver;
+ u8 alpha2_code[2];
+ u8 num_mac;
+ u8 mac_bitmap;
+ u8 total_tx_chain;
+ u8 total_rx_chain;
+} __packed;
+
+/**
+ * struct qlink_resp_manage_intf - response for interface management commands
+ *
+ * Response data for QLINK_CMD_ADD_INTF and QLINK_CMD_CHANGE_INTF commands.
+ *
+ * @rhdr: Common Command Response message header.
+ * @intf_info: interface description.
+ */
+struct qlink_resp_manage_intf {
+ struct qlink_resp rhdr;
+ struct qlink_intf_info intf_info;
+} __packed;
+
+/**
+ * struct qlink_resp_get_sta_info - response for QLINK_CMD_GET_STA_INFO command
+ *
+ * Response data containing statistics for specified STA.
+ *
+ * @sta_addr: MAC address of STA the response carries statistic for.
+ * @info: statistics for specified STA.
+ */
+struct qlink_resp_get_sta_info {
+ struct qlink_resp rhdr;
+ u8 sta_addr[ETH_ALEN];
+ u8 info[0];
+} __packed;
+
+/**
+ * struct qlink_resp_get_chan_info - response for QLINK_CMD_CHANS_INFO_GET cmd
+ *
+ * @band: frequency band to which channels belong to, one of @enum qlink_band.
+ * @num_chans: total number of channels info data contained in reply data.
+ * @info: variable-length channels info.
+ */
+struct qlink_resp_get_chan_info {
+ struct qlink_resp rhdr;
+ u8 band;
+ u8 num_chans;
+ u8 rsvd[2];
+ u8 info[0];
+} __packed;
+
+/**
+ * struct qlink_resp_phy_params - response for QLINK_CMD_PHY_PARAMS_GET command
+ *
+ * @info: variable-length array of PHY params.
+ */
+struct qlink_resp_phy_params {
+ struct qlink_resp rhdr;
+ u8 info[0];
+} __packed;
+
+/* QLINK Events messages related definitions
+ */
+
+enum qlink_event_type {
+ QLINK_EVENT_STA_ASSOCIATED = 0x0021,
+ QLINK_EVENT_STA_DEAUTH = 0x0022,
+ QLINK_EVENT_MGMT_RECEIVED = 0x0023,
+ QLINK_EVENT_SCAN_RESULTS = 0x0024,
+ QLINK_EVENT_SCAN_COMPLETE = 0x0025,
+ QLINK_EVENT_BSS_JOIN = 0x0026,
+ QLINK_EVENT_BSS_LEAVE = 0x0027,
+};
+
+/**
+ * struct qlink_event - QLINK event message header
+ *
+ * Header used for QLINK messages of QLINK_MSG_TYPE_EVENT type.
+ *
+ * @mhdr: Common QLINK message header.
+ * @event_id: Specifies specific event ID, one of &enum qlink_event_type.
+ * @macid: index of physical radio device the event was generated on or
+ * QLINK_MACID_RSVD if not applicable.
+ * @vifid: index of virtual wireless interface on specified @macid the event
+ * was generated on or QLINK_VIFID_RSVD if not applicable.
+ */
+struct qlink_event {
+ struct qlink_msg_header mhdr;
+ __le16 event_id;
+ u8 macid;
+ u8 vifid;
+} __packed;
+
+/**
+ * struct qlink_event_sta_assoc - data for QLINK_EVENT_STA_ASSOCIATED event
+ *
+ * @sta_addr: Address of a STA for which new association event was generated
+ * @frame_control: control bits from 802.11 ASSOC_REQUEST header.
+ * @payload: IEs from association request.
+ */
+struct qlink_event_sta_assoc {
+ struct qlink_event ehdr;
+ u8 sta_addr[ETH_ALEN];
+ __le16 frame_control;
+ u8 ies[0];
+} __packed;
+
+/**
+ * struct qlink_event_sta_deauth - data for QLINK_EVENT_STA_DEAUTH event
+ *
+ * @sta_addr: Address of a deauthenticated STA.
+ * @reason: reason for deauthentication.
+ */
+struct qlink_event_sta_deauth {
+ struct qlink_event ehdr;
+ u8 sta_addr[ETH_ALEN];
+ __le16 reason;
+} __packed;
+
+/**
+ * struct qlink_event_bss_join - data for QLINK_EVENT_BSS_JOIN event
+ *
+ * @bssid: BSSID of a BSS which interface tried to joined.
+ * @status: status of joining attempt, see &enum ieee80211_statuscode.
+ */
+struct qlink_event_bss_join {
+ struct qlink_event ehdr;
+ u8 bssid[ETH_ALEN];
+ __le16 status;
+} __packed;
+
+/**
+ * struct qlink_event_bss_leave - data for QLINK_EVENT_BSS_LEAVE event
+ *
+ * @reason: reason of disconnecting from BSS.
+ */
+struct qlink_event_bss_leave {
+ struct qlink_event ehdr;
+ u16 reason;
+} __packed;
+
+enum qlink_rxmgmt_flags {
+ QLINK_RXMGMT_FLAG_ANSWERED = 1 << 0,
+};
+
+/**
+ * struct qlink_event_rxmgmt - data for QLINK_EVENT_MGMT_RECEIVED event
+ *
+ * @freq: Frequency on which the frame was received in MHz.
+ * @sig_dbm: signal strength in dBm.
+ * @flags: bitmap of &enum qlink_rxmgmt_flags.
+ * @frame_data: data of Rx'd frame itself.
+ */
+struct qlink_event_rxmgmt {
+ struct qlink_event ehdr;
+ __le32 freq;
+ __le32 sig_dbm;
+ __le32 flags;
+ u8 frame_data[0];
+} __packed;
+
+enum qlink_frame_type {
+ QLINK_BSS_FTYPE_UNKNOWN,
+ QLINK_BSS_FTYPE_BEACON,
+ QLINK_BSS_FTYPE_PRESP,
+};
+
+/**
+ * struct qlink_event_scan_result - data for QLINK_EVENT_SCAN_RESULTS event
+ *
+ * @tsf: TSF timestamp indicating when scan results were generated.
+ * @freq: Center frequency of the channel where BSS for which the scan result
+ * event was generated was discovered.
+ * @capab: capabilities field.
+ * @bintval: beacon interval announced by discovered BSS.
+ * @signal: signal strength.
+ * @frame_type: frame type used to get scan result, see &enum qlink_frame_type.
+ * @bssid: BSSID announced by discovered BSS.
+ * @ssid_len: length of SSID announced by BSS.
+ * @ssid: SSID announced by discovered BSS.
+ * @payload: IEs that are announced by discovered BSS in its MGMt frames.
+ */
+struct qlink_event_scan_result {
+ struct qlink_event ehdr;
+ __le64 tsf;
+ __le16 freq;
+ __le16 capab;
+ __le16 bintval;
+ s8 signal;
+ u8 frame_type;
+ u8 bssid[ETH_ALEN];
+ u8 ssid_len;
+ u8 ssid[IEEE80211_MAX_SSID_LEN];
+ u8 payload[0];
+} __packed;
+
+/**
+ * enum qlink_scan_complete_flags - indicates result of scan request.
+ *
+ * @QLINK_SCAN_NONE: Scan request was processed.
+ * @QLINK_SCAN_ABORTED: Scan was aborted.
+ */
+enum qlink_scan_complete_flags {
+ QLINK_SCAN_NONE = 0,
+ QLINK_SCAN_ABORTED = BIT(0),
+};
+
+/**
+ * struct qlink_event_scan_complete - data for QLINK_EVENT_SCAN_COMPLETE event
+ *
+ * @flags: flags indicating the status of pending scan request,
+ * see &enum qlink_scan_complete_flags.
+ */
+struct qlink_event_scan_complete {
+ struct qlink_event ehdr;
+ __le32 flags;
+} __packed;
+
+/* QLINK TLVs (Type-Length Values) definitions
+ */
+
+enum qlink_tlv_id {
+ QTN_TLV_ID_FRAG_THRESH = 0x0201,
+ QTN_TLV_ID_RTS_THRESH = 0x0202,
+ QTN_TLV_ID_SRETRY_LIMIT = 0x0203,
+ QTN_TLV_ID_LRETRY_LIMIT = 0x0204,
+ QTN_TLV_ID_BCN_PERIOD = 0x0205,
+ QTN_TLV_ID_DTIM = 0x0206,
+ QTN_TLV_ID_CHANNEL = 0x020F,
+ QTN_TLV_ID_COVERAGE_CLASS = 0x0213,
+ QTN_TLV_ID_IFACE_LIMIT = 0x0214,
+ QTN_TLV_ID_NUM_IFACE_COMB = 0x0215,
+ QTN_TLV_ID_STA_BASIC_COUNTERS = 0x0300,
+ QTN_TLV_ID_STA_GENERIC_INFO = 0x0301,
+ QTN_TLV_ID_KEY = 0x0302,
+ QTN_TLV_ID_SEQ = 0x0303,
+ QTN_TLV_ID_CRYPTO = 0x0304,
+ QTN_TLV_ID_IE_SET = 0x0305,
+};
+
+struct qlink_tlv_hdr {
+ __le16 type;
+ __le16 len;
+ u8 val[0];
+} __packed;
+
+struct qlink_iface_limit {
+ __le16 max_num;
+ __le16 type_mask;
+} __packed;
+
+struct qlink_iface_comb_num {
+ __le16 iface_comb_num;
+} __packed;
+
+struct qlink_sta_stat_basic_counters {
+ __le64 rx_bytes;
+ __le64 tx_bytes;
+ __le64 rx_beacons;
+ __le32 rx_packets;
+ __le32 tx_packets;
+ __le32 rx_dropped;
+ __le32 tx_failed;
+} __packed;
+
+enum qlink_sta_info_rate_flags {
+ QLINK_STA_INFO_RATE_FLAG_INVALID = 0,
+ QLINK_STA_INFO_RATE_FLAG_HT_MCS = BIT(0),
+ QLINK_STA_INFO_RATE_FLAG_VHT_MCS = BIT(1),
+ QLINK_STA_INFO_RATE_FLAG_SHORT_GI = BIT(2),
+ QLINK_STA_INFO_RATE_FLAG_60G = BIT(3),
+};
+
+enum qlink_sta_info_rate_bw {
+ QLINK_STA_INFO_RATE_BW_5 = 0,
+ QLINK_STA_INFO_RATE_BW_10 = 1,
+ QLINK_STA_INFO_RATE_BW_20 = 2,
+ QLINK_STA_INFO_RATE_BW_40 = 3,
+ QLINK_STA_INFO_RATE_BW_80 = 4,
+ QLINK_STA_INFO_RATE_BW_160 = 5,
+};
+
+/**
+ * struct qlink_sta_info_rate - STA rate statistics
+ *
+ * @rate: data rate in Mbps.
+ * @flags: bitmap of &enum qlink_sta_flags.
+ * @mcs: 802.11-defined MCS index.
+ * nss: Number of Spatial Streams.
+ * @bw: bandwidth, one of &enum qlink_sta_info_rate_bw.
+ */
+struct qlink_sta_info_rate {
+ __le16 rate;
+ u8 flags;
+ u8 mcs;
+ u8 nss;
+ u8 bw;
+} __packed;
+
+struct qlink_sta_info_state {
+ __le32 mask;
+ __le32 value;
+} __packed;
+
+#define QLINK_RSSI_OFFSET 120
+
+struct qlink_sta_info_generic {
+ struct qlink_sta_info_state state;
+ __le32 connected_time;
+ __le32 inactive_time;
+ struct qlink_sta_info_rate rx_rate;
+ struct qlink_sta_info_rate tx_rate;
+ u8 rssi;
+ u8 rssi_avg;
+} __packed;
+
+struct qlink_tlv_frag_rts_thr {
+ struct qlink_tlv_hdr hdr;
+ __le16 thr;
+} __packed;
+
+struct qlink_tlv_rlimit {
+ struct qlink_tlv_hdr hdr;
+ u8 rlimit;
+} __packed;
+
+struct qlink_tlv_cclass {
+ struct qlink_tlv_hdr hdr;
+ u8 cclass;
+} __packed;
+
+enum qlink_dfs_state {
+ QLINK_DFS_USABLE,
+ QLINK_DFS_UNAVAILABLE,
+ QLINK_DFS_AVAILABLE,
+};
+
+enum qlink_channel_flags {
+ QLINK_CHAN_DISABLED = BIT(0),
+ QLINK_CHAN_NO_IR = BIT(1),
+ QLINK_CHAN_RADAR = BIT(3),
+ QLINK_CHAN_NO_HT40PLUS = BIT(4),
+ QLINK_CHAN_NO_HT40MINUS = BIT(5),
+ QLINK_CHAN_NO_OFDM = BIT(6),
+ QLINK_CHAN_NO_80MHZ = BIT(7),
+ QLINK_CHAN_NO_160MHZ = BIT(8),
+ QLINK_CHAN_INDOOR_ONLY = BIT(9),
+ QLINK_CHAN_IR_CONCURRENT = BIT(10),
+ QLINK_CHAN_NO_20MHZ = BIT(11),
+ QLINK_CHAN_NO_10MHZ = BIT(12),
+};
+
+struct qlink_tlv_channel {
+ struct qlink_tlv_hdr hdr;
+ __le16 hw_value;
+ __le16 center_freq;
+ __le32 flags;
+ u8 band;
+ u8 max_antenna_gain;
+ u8 max_power;
+ u8 max_reg_power;
+ __le32 dfs_cac_ms;
+ u8 dfs_state;
+ u8 beacon_found;
+ u8 rsvd[2];
+} __packed;
+
+#define QLINK_MAX_NR_CIPHER_SUITES 5
+#define QLINK_MAX_NR_AKM_SUITES 2
+
+struct qlink_auth_encr {
+ __le32 wpa_versions;
+ __le32 cipher_group;
+ __le32 n_ciphers_pairwise;
+ __le32 ciphers_pairwise[QLINK_MAX_NR_CIPHER_SUITES];
+ __le32 n_akm_suites;
+ __le32 akm_suites[QLINK_MAX_NR_AKM_SUITES];
+ __le16 control_port_ethertype;
+ u8 auth_type;
+ u8 privacy;
+ u8 mfp;
+ u8 control_port;
+ u8 control_port_no_encrypt;
+} __packed;
+
+#endif /* _QTN_QLINK_H_ */
diff --git a/drivers/net/wireless/quantenna/qtnfmac/qlink_util.c b/drivers/net/wireless/quantenna/qtnfmac/qlink_util.c
new file mode 100644
index 0000000..49ae652
--- /dev/null
+++ b/drivers/net/wireless/quantenna/qtnfmac/qlink_util.c
@@ -0,0 +1,71 @@
+/*
+ * Copyright (c) 2015-2016 Quantenna Communications, Inc.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/nl80211.h>
+
+#include "qlink_util.h"
+
+u16 qlink_iface_type_mask_to_nl(u16 qlink_mask)
+{
+ u16 result = 0;
+
+ if (qlink_mask & QLINK_IFTYPE_AP)
+ result |= BIT(NL80211_IFTYPE_AP);
+
+ if (qlink_mask & QLINK_IFTYPE_STATION)
+ result |= BIT(NL80211_IFTYPE_STATION);
+
+ if (qlink_mask & QLINK_IFTYPE_ADHOC)
+ result |= BIT(NL80211_IFTYPE_ADHOC);
+
+ if (qlink_mask & QLINK_IFTYPE_MONITOR)
+ result |= BIT(NL80211_IFTYPE_MONITOR);
+
+ if (qlink_mask & QLINK_IFTYPE_WDS)
+ result |= BIT(NL80211_IFTYPE_WDS);
+
+ return result;
+}
+
+u8 qlink_chan_width_mask_to_nl(u16 qlink_mask)
+{
+ u8 result = 0;
+
+ if (qlink_mask & QLINK_CHAN_WIDTH_5)
+ result |= BIT(NL80211_CHAN_WIDTH_5);
+
+ if (qlink_mask & QLINK_CHAN_WIDTH_10)
+ result |= BIT(NL80211_CHAN_WIDTH_10);
+
+ if (qlink_mask & QLINK_CHAN_WIDTH_20_NOHT)
+ result |= BIT(NL80211_CHAN_WIDTH_20_NOHT);
+
+ if (qlink_mask & QLINK_CHAN_WIDTH_20)
+ result |= BIT(NL80211_CHAN_WIDTH_20);
+
+ if (qlink_mask & QLINK_CHAN_WIDTH_40)
+ result |= BIT(NL80211_CHAN_WIDTH_40);
+
+ if (qlink_mask & QLINK_CHAN_WIDTH_80)
+ result |= BIT(NL80211_CHAN_WIDTH_80);
+
+ if (qlink_mask & QLINK_CHAN_WIDTH_80P80)
+ result |= BIT(NL80211_CHAN_WIDTH_80P80);
+
+ if (qlink_mask & QLINK_CHAN_WIDTH_160)
+ result |= BIT(NL80211_CHAN_WIDTH_160);
+
+ return result;
+}
diff --git a/drivers/net/wireless/quantenna/qtnfmac/qlink_util.h b/drivers/net/wireless/quantenna/qtnfmac/qlink_util.h
new file mode 100644
index 0000000..90d7d09
--- /dev/null
+++ b/drivers/net/wireless/quantenna/qtnfmac/qlink_util.h
@@ -0,0 +1,74 @@
+/*
+ * Copyright (c) 2015-2016 Quantenna Communications, Inc.
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _QTN_FMAC_QLINK_UTIL_H_
+#define _QTN_FMAC_QLINK_UTIL_H_
+
+#include <linux/types.h>
+#include <linux/skbuff.h>
+
+#include "qlink.h"
+
+static inline void qtnf_cmd_skb_put_action(struct sk_buff *skb, u16 action)
+{
+ __le16 *buf_ptr;
+
+ buf_ptr = skb_put(skb, sizeof(action));
+ *buf_ptr = cpu_to_le16(action);
+}
+
+static inline void
+qtnf_cmd_skb_put_buffer(struct sk_buff *skb, const u8 *buf_src, size_t len)
+{
+ skb_put_data(skb, buf_src, len);
+}
+
+static inline void qtnf_cmd_skb_put_tlv_arr(struct sk_buff *skb,
+ u16 tlv_id, const u8 arr[],
+ size_t arr_len)
+{
+ struct qlink_tlv_hdr *hdr = skb_put(skb, sizeof(*hdr) + arr_len);
+
+ hdr->type = cpu_to_le16(tlv_id);
+ hdr->len = cpu_to_le16(arr_len);
+ memcpy(hdr->val, arr, arr_len);
+}
+
+static inline void qtnf_cmd_skb_put_tlv_u8(struct sk_buff *skb, u16 tlv_id,
+ u8 value)
+{
+ struct qlink_tlv_hdr *hdr = skb_put(skb, sizeof(*hdr) + sizeof(value));
+
+ hdr->type = cpu_to_le16(tlv_id);
+ hdr->len = cpu_to_le16(sizeof(value));
+ *hdr->val = value;
+}
+
+static inline void qtnf_cmd_skb_put_tlv_u16(struct sk_buff *skb,
+ u16 tlv_id, u16 value)
+{
+ struct qlink_tlv_hdr *hdr = skb_put(skb, sizeof(*hdr) + sizeof(value));
+ __le16 tmp = cpu_to_le16(value);
+
+ hdr->type = cpu_to_le16(tlv_id);
+ hdr->len = cpu_to_le16(sizeof(value));
+ memcpy(hdr->val, &tmp, sizeof(tmp));
+}
+
+u16 qlink_iface_type_mask_to_nl(u16 qlink_mask);
+u8 qlink_chan_width_mask_to_nl(u16 qlink_mask);
+
+#endif /* _QTN_FMAC_QLINK_UTIL_H_ */
diff --git a/drivers/net/wireless/quantenna/qtnfmac/qtn_hw_ids.h b/drivers/net/wireless/quantenna/qtnfmac/qtn_hw_ids.h
new file mode 100644
index 0000000..c4ad40d
--- /dev/null
+++ b/drivers/net/wireless/quantenna/qtnfmac/qtn_hw_ids.h
@@ -0,0 +1,32 @@
+/*
+ * Copyright (c) 2015-2016 Quantenna Communications, Inc.
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _QTN_HW_IDS_H_
+#define _QTN_HW_IDS_H_
+
+#include <linux/pci_ids.h>
+
+#define PCIE_VENDOR_ID_QUANTENNA (0x1bb5)
+
+/* PCIE Device IDs */
+
+#define PCIE_DEVICE_ID_QTN_PEARL (0x0008)
+
+/* FW names */
+
+#define QTN_PCI_PEARL_FW_NAME "qtn/fmac_qsr10g.img"
+
+#endif /* _QTN_HW_IDS_H_ */
diff --git a/drivers/net/wireless/quantenna/qtnfmac/shm_ipc.c b/drivers/net/wireless/quantenna/qtnfmac/shm_ipc.c
new file mode 100644
index 0000000..aa106dd
--- /dev/null
+++ b/drivers/net/wireless/quantenna/qtnfmac/shm_ipc.c
@@ -0,0 +1,176 @@
+/*
+ * Copyright (c) 2015-2016 Quantenna Communications, Inc.
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/types.h>
+#include <linux/io.h>
+
+#include "shm_ipc.h"
+
+#undef pr_fmt
+#define pr_fmt(fmt) "qtnfmac shm_ipc: %s: " fmt, __func__
+
+static bool qtnf_shm_ipc_has_new_data(struct qtnf_shm_ipc *ipc)
+{
+ const u32 flags = readl(&ipc->shm_region->headroom.hdr.flags);
+
+ return (flags & QTNF_SHM_IPC_NEW_DATA);
+}
+
+static void qtnf_shm_handle_new_data(struct qtnf_shm_ipc *ipc)
+{
+ size_t size;
+ bool rx_buff_ok = true;
+ struct qtnf_shm_ipc_region_header __iomem *shm_reg_hdr;
+
+ shm_reg_hdr = &ipc->shm_region->headroom.hdr;
+
+ size = readw(&shm_reg_hdr->data_len);
+
+ if (unlikely(size == 0 || size > QTN_IPC_MAX_DATA_SZ)) {
+ pr_err("wrong rx packet size: %zu\n", size);
+ rx_buff_ok = false;
+ } else {
+ memcpy_fromio(ipc->rx_data, ipc->shm_region->data, size);
+ }
+
+ writel(QTNF_SHM_IPC_ACK, &shm_reg_hdr->flags);
+ readl(&shm_reg_hdr->flags); /* flush PCIe write */
+
+ ipc->interrupt.fn(ipc->interrupt.arg);
+
+ if (likely(rx_buff_ok)) {
+ ipc->rx_packet_count++;
+ ipc->rx_callback.fn(ipc->rx_callback.arg, ipc->rx_data, size);
+ }
+}
+
+static void qtnf_shm_ipc_irq_work(struct work_struct *work)
+{
+ struct qtnf_shm_ipc *ipc = container_of(work, struct qtnf_shm_ipc,
+ irq_work);
+
+ while (qtnf_shm_ipc_has_new_data(ipc))
+ qtnf_shm_handle_new_data(ipc);
+}
+
+static void qtnf_shm_ipc_irq_inbound_handler(struct qtnf_shm_ipc *ipc)
+{
+ u32 flags;
+
+ flags = readl(&ipc->shm_region->headroom.hdr.flags);
+
+ if (flags & QTNF_SHM_IPC_NEW_DATA)
+ queue_work(ipc->workqueue, &ipc->irq_work);
+}
+
+static void qtnf_shm_ipc_irq_outbound_handler(struct qtnf_shm_ipc *ipc)
+{
+ u32 flags;
+
+ if (!READ_ONCE(ipc->waiting_for_ack))
+ return;
+
+ flags = readl(&ipc->shm_region->headroom.hdr.flags);
+
+ if (flags & QTNF_SHM_IPC_ACK) {
+ WRITE_ONCE(ipc->waiting_for_ack, 0);
+ complete(&ipc->tx_completion);
+ }
+}
+
+int qtnf_shm_ipc_init(struct qtnf_shm_ipc *ipc,
+ enum qtnf_shm_ipc_direction direction,
+ struct qtnf_shm_ipc_region __iomem *shm_region,
+ struct workqueue_struct *workqueue,
+ const struct qtnf_shm_ipc_int *interrupt,
+ const struct qtnf_shm_ipc_rx_callback *rx_callback)
+{
+ BUILD_BUG_ON(offsetof(struct qtnf_shm_ipc_region, data) !=
+ QTN_IPC_REG_HDR_SZ);
+ BUILD_BUG_ON(sizeof(struct qtnf_shm_ipc_region) > QTN_IPC_REG_SZ);
+
+ ipc->shm_region = shm_region;
+ ipc->direction = direction;
+ ipc->interrupt = *interrupt;
+ ipc->rx_callback = *rx_callback;
+ ipc->tx_packet_count = 0;
+ ipc->rx_packet_count = 0;
+ ipc->workqueue = workqueue;
+ ipc->waiting_for_ack = 0;
+ ipc->tx_timeout_count = 0;
+
+ switch (direction) {
+ case QTNF_SHM_IPC_OUTBOUND:
+ ipc->irq_handler = qtnf_shm_ipc_irq_outbound_handler;
+ break;
+ case QTNF_SHM_IPC_INBOUND:
+ ipc->irq_handler = qtnf_shm_ipc_irq_inbound_handler;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ INIT_WORK(&ipc->irq_work, qtnf_shm_ipc_irq_work);
+ init_completion(&ipc->tx_completion);
+
+ return 0;
+}
+
+void qtnf_shm_ipc_free(struct qtnf_shm_ipc *ipc)
+{
+ complete_all(&ipc->tx_completion);
+}
+
+int qtnf_shm_ipc_send(struct qtnf_shm_ipc *ipc, const u8 *buf, size_t size)
+{
+ int ret = 0;
+ struct qtnf_shm_ipc_region_header __iomem *shm_reg_hdr;
+
+ shm_reg_hdr = &ipc->shm_region->headroom.hdr;
+
+ if (unlikely(size > QTN_IPC_MAX_DATA_SZ))
+ return -E2BIG;
+
+ ipc->tx_packet_count++;
+
+ writew(size, &shm_reg_hdr->data_len);
+ memcpy_toio(ipc->shm_region->data, buf, size);
+
+ /* sync previous writes before proceeding */
+ dma_wmb();
+
+ WRITE_ONCE(ipc->waiting_for_ack, 1);
+
+ /* sync previous memory write before announcing new data ready */
+ wmb();
+
+ writel(QTNF_SHM_IPC_NEW_DATA, &shm_reg_hdr->flags);
+ readl(&shm_reg_hdr->flags); /* flush PCIe write */
+
+ ipc->interrupt.fn(ipc->interrupt.arg);
+
+ if (!wait_for_completion_timeout(&ipc->tx_completion,
+ QTN_SHM_IPC_ACK_TIMEOUT)) {
+ ret = -ETIMEDOUT;
+ ipc->tx_timeout_count++;
+ pr_err("TX ACK timeout\n");
+ }
+
+ /* now we're not waiting for ACK even in case of timeout */
+ WRITE_ONCE(ipc->waiting_for_ack, 0);
+
+ return ret;
+}
diff --git a/drivers/net/wireless/quantenna/qtnfmac/shm_ipc.h b/drivers/net/wireless/quantenna/qtnfmac/shm_ipc.h
new file mode 100644
index 0000000..453dd64
--- /dev/null
+++ b/drivers/net/wireless/quantenna/qtnfmac/shm_ipc.h
@@ -0,0 +1,80 @@
+/*
+ * Copyright (c) 2015-2016 Quantenna Communications, Inc.
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _QTN_FMAC_SHM_IPC_H_
+#define _QTN_FMAC_SHM_IPC_H_
+
+#include <linux/workqueue.h>
+#include <linux/completion.h>
+#include <linux/mutex.h>
+#include <linux/spinlock.h>
+
+#include "shm_ipc_defs.h"
+
+#define QTN_SHM_IPC_ACK_TIMEOUT (2 * HZ)
+
+struct qtnf_shm_ipc_int {
+ void (*fn)(void *arg);
+ void *arg;
+};
+
+struct qtnf_shm_ipc_rx_callback {
+ void (*fn)(void *arg, const u8 *buf, size_t len);
+ void *arg;
+};
+
+enum qtnf_shm_ipc_direction {
+ QTNF_SHM_IPC_OUTBOUND = BIT(0),
+ QTNF_SHM_IPC_INBOUND = BIT(1),
+};
+
+struct qtnf_shm_ipc {
+ struct qtnf_shm_ipc_region __iomem *shm_region;
+ enum qtnf_shm_ipc_direction direction;
+ size_t tx_packet_count;
+ size_t rx_packet_count;
+
+ size_t tx_timeout_count;
+
+ u8 waiting_for_ack;
+
+ u8 rx_data[QTN_IPC_MAX_DATA_SZ] __aligned(sizeof(u32));
+
+ struct qtnf_shm_ipc_int interrupt;
+ struct qtnf_shm_ipc_rx_callback rx_callback;
+
+ void (*irq_handler)(struct qtnf_shm_ipc *ipc);
+
+ struct workqueue_struct *workqueue;
+ struct work_struct irq_work;
+ struct completion tx_completion;
+};
+
+int qtnf_shm_ipc_init(struct qtnf_shm_ipc *ipc,
+ enum qtnf_shm_ipc_direction direction,
+ struct qtnf_shm_ipc_region __iomem *shm_region,
+ struct workqueue_struct *workqueue,
+ const struct qtnf_shm_ipc_int *interrupt,
+ const struct qtnf_shm_ipc_rx_callback *rx_callback);
+void qtnf_shm_ipc_free(struct qtnf_shm_ipc *ipc);
+int qtnf_shm_ipc_send(struct qtnf_shm_ipc *ipc, const u8 *buf, size_t size);
+
+static inline void qtnf_shm_ipc_irq_handler(struct qtnf_shm_ipc *ipc)
+{
+ ipc->irq_handler(ipc);
+}
+
+#endif /* _QTN_FMAC_SHM_IPC_H_ */
diff --git a/drivers/net/wireless/quantenna/qtnfmac/shm_ipc_defs.h b/drivers/net/wireless/quantenna/qtnfmac/shm_ipc_defs.h
new file mode 100644
index 0000000..95a5f89
--- /dev/null
+++ b/drivers/net/wireless/quantenna/qtnfmac/shm_ipc_defs.h
@@ -0,0 +1,46 @@
+/*
+ * Copyright (c) 2015-2016 Quantenna Communications, Inc.
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _QTN_FMAC_SHM_IPC_DEFS_H_
+#define _QTN_FMAC_SHM_IPC_DEFS_H_
+
+#include <linux/types.h>
+
+#define QTN_IPC_REG_HDR_SZ (32)
+#define QTN_IPC_REG_SZ (4096)
+#define QTN_IPC_MAX_DATA_SZ (QTN_IPC_REG_SZ - QTN_IPC_REG_HDR_SZ)
+
+enum qtnf_shm_ipc_region_flags {
+ QTNF_SHM_IPC_NEW_DATA = BIT(0),
+ QTNF_SHM_IPC_ACK = BIT(1),
+};
+
+struct qtnf_shm_ipc_region_header {
+ __le32 flags;
+ __le16 data_len;
+} __packed;
+
+union qtnf_shm_ipc_region_headroom {
+ struct qtnf_shm_ipc_region_header hdr;
+ u8 headroom[QTN_IPC_REG_HDR_SZ];
+} __packed;
+
+struct qtnf_shm_ipc_region {
+ union qtnf_shm_ipc_region_headroom headroom;
+ u8 data[QTN_IPC_MAX_DATA_SZ];
+} __packed;
+
+#endif /* _QTN_FMAC_SHM_IPC_DEFS_H_ */
diff --git a/drivers/net/wireless/quantenna/qtnfmac/trans.c b/drivers/net/wireless/quantenna/qtnfmac/trans.c
new file mode 100644
index 0000000..ccddfeb
--- /dev/null
+++ b/drivers/net/wireless/quantenna/qtnfmac/trans.c
@@ -0,0 +1,224 @@
+/*
+ * Copyright (c) 2015-2016 Quantenna Communications, Inc.
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/types.h>
+#include <linux/export.h>
+#include <linux/slab.h>
+
+#include "core.h"
+#include "commands.h"
+#include "event.h"
+#include "bus.h"
+
+#define QTNF_DEF_SYNC_CMD_TIMEOUT (5 * HZ)
+
+int qtnf_trans_send_cmd_with_resp(struct qtnf_bus *bus, struct sk_buff *cmd_skb,
+ struct sk_buff **response_skb)
+{
+ struct qtnf_cmd_ctl_node *ctl_node = &bus->trans.curr_cmd;
+ struct qlink_cmd *cmd = (void *)cmd_skb->data;
+ int ret = 0;
+ long status;
+ bool resp_not_handled = true;
+ struct sk_buff *resp_skb = NULL;
+
+ if (unlikely(!response_skb))
+ return -EFAULT;
+
+ spin_lock(&ctl_node->resp_lock);
+ ctl_node->seq_num++;
+ cmd->seq_num = cpu_to_le16(ctl_node->seq_num);
+ WARN(ctl_node->resp_skb, "qtnfmac: response skb not empty\n");
+ ctl_node->waiting_for_resp = true;
+ spin_unlock(&ctl_node->resp_lock);
+
+ ret = qtnf_bus_control_tx(bus, cmd_skb);
+ dev_kfree_skb(cmd_skb);
+
+ if (unlikely(ret))
+ goto out;
+
+ status = wait_for_completion_interruptible_timeout(
+ &ctl_node->cmd_resp_completion,
+ QTNF_DEF_SYNC_CMD_TIMEOUT);
+
+ spin_lock(&ctl_node->resp_lock);
+ resp_not_handled = ctl_node->waiting_for_resp;
+ resp_skb = ctl_node->resp_skb;
+ ctl_node->resp_skb = NULL;
+ ctl_node->waiting_for_resp = false;
+ spin_unlock(&ctl_node->resp_lock);
+
+ if (unlikely(status <= 0)) {
+ if (status == 0) {
+ ret = -ETIMEDOUT;
+ pr_err("response timeout\n");
+ } else {
+ ret = -EINTR;
+ pr_debug("interrupted\n");
+ }
+ }
+
+ if (unlikely(!resp_skb || resp_not_handled)) {
+ if (!ret)
+ ret = -EFAULT;
+
+ goto out;
+ }
+
+ ret = 0;
+ *response_skb = resp_skb;
+
+out:
+ if (unlikely(resp_skb && resp_not_handled))
+ dev_kfree_skb(resp_skb);
+
+ return ret;
+}
+
+static void qtnf_trans_signal_cmdresp(struct qtnf_bus *bus, struct sk_buff *skb)
+{
+ struct qtnf_cmd_ctl_node *ctl_node = &bus->trans.curr_cmd;
+ const struct qlink_resp *resp = (const struct qlink_resp *)skb->data;
+ const u16 recvd_seq_num = le16_to_cpu(resp->seq_num);
+
+ spin_lock(&ctl_node->resp_lock);
+
+ if (unlikely(!ctl_node->waiting_for_resp)) {
+ pr_err("unexpected response\n");
+ goto out_err;
+ }
+
+ if (unlikely(recvd_seq_num != ctl_node->seq_num)) {
+ pr_err("seq num mismatch\n");
+ goto out_err;
+ }
+
+ ctl_node->resp_skb = skb;
+ ctl_node->waiting_for_resp = false;
+
+ spin_unlock(&ctl_node->resp_lock);
+
+ complete(&ctl_node->cmd_resp_completion);
+ return;
+
+out_err:
+ spin_unlock(&ctl_node->resp_lock);
+ dev_kfree_skb(skb);
+}
+
+static int qtnf_trans_event_enqueue(struct qtnf_bus *bus, struct sk_buff *skb)
+{
+ struct qtnf_qlink_transport *trans = &bus->trans;
+
+ if (likely(skb_queue_len(&trans->event_queue) <
+ trans->event_queue_max_len)) {
+ skb_queue_tail(&trans->event_queue, skb);
+ queue_work(bus->workqueue, &bus->event_work);
+ } else {
+ pr_warn("event dropped due to queue overflow\n");
+ dev_kfree_skb(skb);
+ return -1;
+ }
+
+ return 0;
+}
+
+void qtnf_trans_init(struct qtnf_bus *bus)
+{
+ struct qtnf_qlink_transport *trans = &bus->trans;
+
+ init_completion(&trans->curr_cmd.cmd_resp_completion);
+ spin_lock_init(&trans->curr_cmd.resp_lock);
+
+ spin_lock(&trans->curr_cmd.resp_lock);
+ trans->curr_cmd.seq_num = 0;
+ trans->curr_cmd.waiting_for_resp = false;
+ trans->curr_cmd.resp_skb = NULL;
+ spin_unlock(&trans->curr_cmd.resp_lock);
+
+ /* Init event handling related fields */
+ skb_queue_head_init(&trans->event_queue);
+ trans->event_queue_max_len = QTNF_MAX_EVENT_QUEUE_LEN;
+}
+
+static void qtnf_trans_free_events(struct qtnf_bus *bus)
+{
+ struct sk_buff_head *event_queue = &bus->trans.event_queue;
+ struct sk_buff *current_event_skb = skb_dequeue(event_queue);
+
+ while (current_event_skb) {
+ dev_kfree_skb_any(current_event_skb);
+ current_event_skb = skb_dequeue(event_queue);
+ }
+}
+
+void qtnf_trans_free(struct qtnf_bus *bus)
+{
+ if (!bus) {
+ pr_err("invalid bus pointer\n");
+ return;
+ }
+
+ qtnf_trans_free_events(bus);
+}
+
+int qtnf_trans_handle_rx_ctl_packet(struct qtnf_bus *bus, struct sk_buff *skb)
+{
+ const struct qlink_msg_header *header = (void *)skb->data;
+ int ret = -1;
+
+ if (unlikely(skb->len < sizeof(*header))) {
+ pr_warn("packet is too small: %u\n", skb->len);
+ dev_kfree_skb(skb);
+ return -EINVAL;
+ }
+
+ if (unlikely(skb->len != le16_to_cpu(header->len))) {
+ pr_warn("cmd reply length mismatch: %u != %u\n",
+ skb->len, le16_to_cpu(header->len));
+ dev_kfree_skb(skb);
+ return -EFAULT;
+ }
+
+ switch (le16_to_cpu(header->type)) {
+ case QLINK_MSG_TYPE_CMDRSP:
+ if (unlikely(skb->len < sizeof(struct qlink_cmd))) {
+ pr_warn("cmd reply too short: %u\n", skb->len);
+ dev_kfree_skb(skb);
+ break;
+ }
+
+ qtnf_trans_signal_cmdresp(bus, skb);
+ break;
+ case QLINK_MSG_TYPE_EVENT:
+ if (unlikely(skb->len < sizeof(struct qlink_event))) {
+ pr_warn("event too short: %u\n", skb->len);
+ dev_kfree_skb(skb);
+ break;
+ }
+
+ ret = qtnf_trans_event_enqueue(bus, skb);
+ break;
+ default:
+ pr_warn("unknown packet type: %x\n", le16_to_cpu(header->type));
+ dev_kfree_skb(skb);
+ break;
+ }
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(qtnf_trans_handle_rx_ctl_packet);
diff --git a/drivers/net/wireless/quantenna/qtnfmac/trans.h b/drivers/net/wireless/quantenna/qtnfmac/trans.h
new file mode 100644
index 0000000..9a473e0
--- /dev/null
+++ b/drivers/net/wireless/quantenna/qtnfmac/trans.h
@@ -0,0 +1,57 @@
+/*
+ * Copyright (c) 2015-2016 Quantenna Communications, Inc.
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _QTN_FMAC_TRANS_H_
+#define _QTN_FMAC_TRANS_H_
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/skbuff.h>
+#include <linux/mutex.h>
+
+#include "qlink.h"
+
+#define QTNF_CMD_FLAG_RESP_REQ BIT(0)
+
+#define QTNF_MAX_CMD_BUF_SIZE 2048
+#define QTNF_DEF_CMD_HROOM 4
+
+struct qtnf_bus;
+
+struct qtnf_cmd_ctl_node {
+ struct completion cmd_resp_completion;
+ struct sk_buff *resp_skb;
+ u16 seq_num;
+ bool waiting_for_resp;
+ spinlock_t resp_lock; /* lock for resp_skb & waiting_for_resp changes */
+};
+
+struct qtnf_qlink_transport {
+ struct qtnf_cmd_ctl_node curr_cmd;
+ struct sk_buff_head event_queue;
+ size_t event_queue_max_len;
+};
+
+void qtnf_trans_init(struct qtnf_bus *bus);
+void qtnf_trans_free(struct qtnf_bus *bus);
+
+int qtnf_trans_send_next_cmd(struct qtnf_bus *bus);
+int qtnf_trans_handle_rx_ctl_packet(struct qtnf_bus *bus, struct sk_buff *skb);
+int qtnf_trans_send_cmd_with_resp(struct qtnf_bus *bus,
+ struct sk_buff *cmd_skb,
+ struct sk_buff **response_skb);
+
+#endif /* _QTN_FMAC_TRANS_H_ */
diff --git a/drivers/net/wireless/quantenna/qtnfmac/util.c b/drivers/net/wireless/quantenna/qtnfmac/util.c
new file mode 100644
index 0000000..ed38e87
--- /dev/null
+++ b/drivers/net/wireless/quantenna/qtnfmac/util.c
@@ -0,0 +1,114 @@
+/*
+ * Copyright (c) 2015-2016 Quantenna Communications, Inc.
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include "util.h"
+
+void qtnf_sta_list_init(struct qtnf_sta_list *list)
+{
+ if (unlikely(!list))
+ return;
+
+ INIT_LIST_HEAD(&list->head);
+ atomic_set(&list->size, 0);
+}
+
+struct qtnf_sta_node *qtnf_sta_list_lookup(struct qtnf_sta_list *list,
+ const u8 *mac)
+{
+ struct qtnf_sta_node *node;
+
+ if (unlikely(!mac))
+ return NULL;
+
+ list_for_each_entry(node, &list->head, list) {
+ if (ether_addr_equal(node->mac_addr, mac))
+ return node;
+ }
+
+ return NULL;
+}
+
+struct qtnf_sta_node *qtnf_sta_list_lookup_index(struct qtnf_sta_list *list,
+ size_t index)
+{
+ struct qtnf_sta_node *node;
+
+ if (qtnf_sta_list_size(list) <= index)
+ return NULL;
+
+ list_for_each_entry(node, &list->head, list) {
+ if (index-- == 0)
+ return node;
+ }
+
+ return NULL;
+}
+
+struct qtnf_sta_node *qtnf_sta_list_add(struct qtnf_sta_list *list,
+ const u8 *mac)
+{
+ struct qtnf_sta_node *node;
+
+ if (unlikely(!mac))
+ return NULL;
+
+ node = qtnf_sta_list_lookup(list, mac);
+
+ if (node)
+ goto done;
+
+ node = kzalloc(sizeof(*node), GFP_KERNEL);
+ if (unlikely(!node))
+ goto done;
+
+ ether_addr_copy(node->mac_addr, mac);
+ list_add_tail(&node->list, &list->head);
+ atomic_inc(&list->size);
+
+done:
+ return node;
+}
+
+bool qtnf_sta_list_del(struct qtnf_sta_list *list, const u8 *mac)
+{
+ struct qtnf_sta_node *node;
+ bool ret = false;
+
+ node = qtnf_sta_list_lookup(list, mac);
+
+ if (node) {
+ list_del(&node->list);
+ atomic_dec(&list->size);
+ kfree(node);
+ ret = true;
+ }
+
+ return ret;
+}
+
+void qtnf_sta_list_free(struct qtnf_sta_list *list)
+{
+ struct qtnf_sta_node *node, *tmp;
+
+ atomic_set(&list->size, 0);
+
+ list_for_each_entry_safe(node, tmp, &list->head, list) {
+ list_del(&node->list);
+ kfree(node);
+ }
+
+ INIT_LIST_HEAD(&list->head);
+}
diff --git a/drivers/net/wireless/quantenna/qtnfmac/util.h b/drivers/net/wireless/quantenna/qtnfmac/util.h
new file mode 100644
index 0000000..0359eae
--- /dev/null
+++ b/drivers/net/wireless/quantenna/qtnfmac/util.h
@@ -0,0 +1,45 @@
+/*
+ * Copyright (c) 2015 Quantenna Communications
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
+ * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
+ * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
+ * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef QTNFMAC_UTIL_H
+#define QTNFMAC_UTIL_H
+
+#include <linux/kernel.h>
+#include "core.h"
+
+void qtnf_sta_list_init(struct qtnf_sta_list *list);
+
+struct qtnf_sta_node *qtnf_sta_list_lookup(struct qtnf_sta_list *list,
+ const u8 *mac);
+struct qtnf_sta_node *qtnf_sta_list_lookup_index(struct qtnf_sta_list *list,
+ size_t index);
+struct qtnf_sta_node *qtnf_sta_list_add(struct qtnf_sta_list *list,
+ const u8 *mac);
+bool qtnf_sta_list_del(struct qtnf_sta_list *list, const u8 *mac);
+
+void qtnf_sta_list_free(struct qtnf_sta_list *list);
+
+static inline size_t qtnf_sta_list_size(const struct qtnf_sta_list *list)
+{
+ return atomic_read(&list->size);
+}
+
+static inline bool qtnf_sta_list_empty(const struct qtnf_sta_list *list)
+{
+ return list_empty(&list->head);
+}
+
+#endif /* QTNFMAC_UTIL_H */
diff --git a/drivers/net/wireless/ralink/rt2x00/rt2400pci.c b/drivers/net/wireless/ralink/rt2x00/rt2400pci.c
index 1987443..0bc8b02 100644
--- a/drivers/net/wireless/ralink/rt2x00/rt2400pci.c
+++ b/drivers/net/wireless/ralink/rt2x00/rt2400pci.c
@@ -77,10 +77,11 @@
mutex_unlock(&rt2x00dev->csr_mutex);
}
-static void rt2400pci_bbp_read(struct rt2x00_dev *rt2x00dev,
- const unsigned int word, u8 *value)
+static u8 rt2400pci_bbp_read(struct rt2x00_dev *rt2x00dev,
+ const unsigned int word)
{
u32 reg;
+ u8 value;
mutex_lock(&rt2x00dev->csr_mutex);
@@ -103,9 +104,11 @@
WAIT_FOR_BBP(rt2x00dev, ®);
}
- *value = rt2x00_get_field32(reg, BBPCSR_VALUE);
+ value = rt2x00_get_field32(reg, BBPCSR_VALUE);
mutex_unlock(&rt2x00dev->csr_mutex);
+
+ return value;
}
static void rt2400pci_rf_write(struct rt2x00_dev *rt2x00dev,
@@ -138,7 +141,7 @@
struct rt2x00_dev *rt2x00dev = eeprom->data;
u32 reg;
- rt2x00mmio_register_read(rt2x00dev, CSR21, ®);
+ reg = rt2x00mmio_register_read(rt2x00dev, CSR21);
eeprom->reg_data_in = !!rt2x00_get_field32(reg, CSR21_EEPROM_DATA_IN);
eeprom->reg_data_out = !!rt2x00_get_field32(reg, CSR21_EEPROM_DATA_OUT);
@@ -202,7 +205,7 @@
{
u32 reg;
- rt2x00mmio_register_read(rt2x00dev, GPIOCSR, ®);
+ reg = rt2x00mmio_register_read(rt2x00dev, GPIOCSR);
return rt2x00_get_field32(reg, GPIOCSR_VAL0);
}
@@ -215,7 +218,7 @@
unsigned int enabled = brightness != LED_OFF;
u32 reg;
- rt2x00mmio_register_read(led->rt2x00dev, LEDCSR, ®);
+ reg = rt2x00mmio_register_read(led->rt2x00dev, LEDCSR);
if (led->type == LED_TYPE_RADIO || led->type == LED_TYPE_ASSOC)
rt2x00_set_field32(®, LEDCSR_LINK, enabled);
@@ -233,7 +236,7 @@
container_of(led_cdev, struct rt2x00_led, led_dev);
u32 reg;
- rt2x00mmio_register_read(led->rt2x00dev, LEDCSR, ®);
+ reg = rt2x00mmio_register_read(led->rt2x00dev, LEDCSR);
rt2x00_set_field32(®, LEDCSR_ON_PERIOD, *delay_on);
rt2x00_set_field32(®, LEDCSR_OFF_PERIOD, *delay_off);
rt2x00mmio_register_write(led->rt2x00dev, LEDCSR, reg);
@@ -266,7 +269,7 @@
* Note that the version error will always be dropped
* since there is no filter for it at this time.
*/
- rt2x00mmio_register_read(rt2x00dev, RXCSR0, ®);
+ reg = rt2x00mmio_register_read(rt2x00dev, RXCSR0);
rt2x00_set_field32(®, RXCSR0_DROP_CRC,
!(filter_flags & FIF_FCSFAIL));
rt2x00_set_field32(®, RXCSR0_DROP_PHYSICAL,
@@ -295,14 +298,14 @@
* Enable beacon config
*/
bcn_preload = PREAMBLE + GET_DURATION(IEEE80211_HEADER, 20);
- rt2x00mmio_register_read(rt2x00dev, BCNCSR1, ®);
+ reg = rt2x00mmio_register_read(rt2x00dev, BCNCSR1);
rt2x00_set_field32(®, BCNCSR1_PRELOAD, bcn_preload);
rt2x00mmio_register_write(rt2x00dev, BCNCSR1, reg);
/*
* Enable synchronisation.
*/
- rt2x00mmio_register_read(rt2x00dev, CSR14, ®);
+ reg = rt2x00mmio_register_read(rt2x00dev, CSR14);
rt2x00_set_field32(®, CSR14_TSF_SYNC, conf->sync);
rt2x00mmio_register_write(rt2x00dev, CSR14, reg);
}
@@ -330,35 +333,35 @@
if (changed & BSS_CHANGED_ERP_PREAMBLE) {
preamble_mask = erp->short_preamble << 3;
- rt2x00mmio_register_read(rt2x00dev, TXCSR1, ®);
+ reg = rt2x00mmio_register_read(rt2x00dev, TXCSR1);
rt2x00_set_field32(®, TXCSR1_ACK_TIMEOUT, 0x1ff);
rt2x00_set_field32(®, TXCSR1_ACK_CONSUME_TIME, 0x13a);
rt2x00_set_field32(®, TXCSR1_TSF_OFFSET, IEEE80211_HEADER);
rt2x00_set_field32(®, TXCSR1_AUTORESPONDER, 1);
rt2x00mmio_register_write(rt2x00dev, TXCSR1, reg);
- rt2x00mmio_register_read(rt2x00dev, ARCSR2, ®);
+ reg = rt2x00mmio_register_read(rt2x00dev, ARCSR2);
rt2x00_set_field32(®, ARCSR2_SIGNAL, 0x00);
rt2x00_set_field32(®, ARCSR2_SERVICE, 0x04);
rt2x00_set_field32(®, ARCSR2_LENGTH,
GET_DURATION(ACK_SIZE, 10));
rt2x00mmio_register_write(rt2x00dev, ARCSR2, reg);
- rt2x00mmio_register_read(rt2x00dev, ARCSR3, ®);
+ reg = rt2x00mmio_register_read(rt2x00dev, ARCSR3);
rt2x00_set_field32(®, ARCSR3_SIGNAL, 0x01 | preamble_mask);
rt2x00_set_field32(®, ARCSR3_SERVICE, 0x04);
rt2x00_set_field32(®, ARCSR2_LENGTH,
GET_DURATION(ACK_SIZE, 20));
rt2x00mmio_register_write(rt2x00dev, ARCSR3, reg);
- rt2x00mmio_register_read(rt2x00dev, ARCSR4, ®);
+ reg = rt2x00mmio_register_read(rt2x00dev, ARCSR4);
rt2x00_set_field32(®, ARCSR4_SIGNAL, 0x02 | preamble_mask);
rt2x00_set_field32(®, ARCSR4_SERVICE, 0x04);
rt2x00_set_field32(®, ARCSR2_LENGTH,
GET_DURATION(ACK_SIZE, 55));
rt2x00mmio_register_write(rt2x00dev, ARCSR4, reg);
- rt2x00mmio_register_read(rt2x00dev, ARCSR5, ®);
+ reg = rt2x00mmio_register_read(rt2x00dev, ARCSR5);
rt2x00_set_field32(®, ARCSR5_SIGNAL, 0x03 | preamble_mask);
rt2x00_set_field32(®, ARCSR5_SERVICE, 0x84);
rt2x00_set_field32(®, ARCSR2_LENGTH,
@@ -370,23 +373,23 @@
rt2x00mmio_register_write(rt2x00dev, ARCSR1, erp->basic_rates);
if (changed & BSS_CHANGED_ERP_SLOT) {
- rt2x00mmio_register_read(rt2x00dev, CSR11, ®);
+ reg = rt2x00mmio_register_read(rt2x00dev, CSR11);
rt2x00_set_field32(®, CSR11_SLOT_TIME, erp->slot_time);
rt2x00mmio_register_write(rt2x00dev, CSR11, reg);
- rt2x00mmio_register_read(rt2x00dev, CSR18, ®);
+ reg = rt2x00mmio_register_read(rt2x00dev, CSR18);
rt2x00_set_field32(®, CSR18_SIFS, erp->sifs);
rt2x00_set_field32(®, CSR18_PIFS, erp->pifs);
rt2x00mmio_register_write(rt2x00dev, CSR18, reg);
- rt2x00mmio_register_read(rt2x00dev, CSR19, ®);
+ reg = rt2x00mmio_register_read(rt2x00dev, CSR19);
rt2x00_set_field32(®, CSR19_DIFS, erp->difs);
rt2x00_set_field32(®, CSR19_EIFS, erp->eifs);
rt2x00mmio_register_write(rt2x00dev, CSR19, reg);
}
if (changed & BSS_CHANGED_BEACON_INT) {
- rt2x00mmio_register_read(rt2x00dev, CSR12, ®);
+ reg = rt2x00mmio_register_read(rt2x00dev, CSR12);
rt2x00_set_field32(®, CSR12_BEACON_INTERVAL,
erp->beacon_int * 16);
rt2x00_set_field32(®, CSR12_CFP_MAX_DURATION,
@@ -408,8 +411,8 @@
BUG_ON(ant->rx == ANTENNA_SW_DIVERSITY ||
ant->tx == ANTENNA_SW_DIVERSITY);
- rt2400pci_bbp_read(rt2x00dev, 4, &r4);
- rt2400pci_bbp_read(rt2x00dev, 1, &r1);
+ r4 = rt2400pci_bbp_read(rt2x00dev, 4);
+ r1 = rt2400pci_bbp_read(rt2x00dev, 1);
/*
* Configure the TX antenna.
@@ -495,7 +498,7 @@
/*
* Clear false CRC during channel switch.
*/
- rt2x00mmio_register_read(rt2x00dev, CNT0, &rf->rf1);
+ rf->rf1 = rt2x00mmio_register_read(rt2x00dev, CNT0);
}
static void rt2400pci_config_txpower(struct rt2x00_dev *rt2x00dev, int txpower)
@@ -508,7 +511,7 @@
{
u32 reg;
- rt2x00mmio_register_read(rt2x00dev, CSR11, ®);
+ reg = rt2x00mmio_register_read(rt2x00dev, CSR11);
rt2x00_set_field32(®, CSR11_LONG_RETRY,
libconf->conf->long_frame_max_tx_count);
rt2x00_set_field32(®, CSR11_SHORT_RETRY,
@@ -525,7 +528,7 @@
u32 reg;
if (state == STATE_SLEEP) {
- rt2x00mmio_register_read(rt2x00dev, CSR20, ®);
+ reg = rt2x00mmio_register_read(rt2x00dev, CSR20);
rt2x00_set_field32(®, CSR20_DELAY_AFTER_TBCN,
(rt2x00dev->beacon_int - 20) * 16);
rt2x00_set_field32(®, CSR20_TBCN_BEFORE_WAKEUP,
@@ -538,7 +541,7 @@
rt2x00_set_field32(®, CSR20_AUTOWAKE, 1);
rt2x00mmio_register_write(rt2x00dev, CSR20, reg);
} else {
- rt2x00mmio_register_read(rt2x00dev, CSR20, ®);
+ reg = rt2x00mmio_register_read(rt2x00dev, CSR20);
rt2x00_set_field32(®, CSR20_AUTOWAKE, 0);
rt2x00mmio_register_write(rt2x00dev, CSR20, reg);
}
@@ -566,7 +569,7 @@
{
u32 reg;
- rt2x00mmio_register_read(rt2x00dev, CSR11, ®);
+ reg = rt2x00mmio_register_read(rt2x00dev, CSR11);
rt2x00_set_field32(®, CSR11_CWMIN, cw_min);
rt2x00_set_field32(®, CSR11_CWMAX, cw_max);
rt2x00mmio_register_write(rt2x00dev, CSR11, reg);
@@ -584,13 +587,13 @@
/*
* Update FCS error count from register.
*/
- rt2x00mmio_register_read(rt2x00dev, CNT0, ®);
+ reg = rt2x00mmio_register_read(rt2x00dev, CNT0);
qual->rx_failed = rt2x00_get_field32(reg, CNT0_FCS_ERROR);
/*
* Update False CCA count from register.
*/
- rt2400pci_bbp_read(rt2x00dev, 39, &bbp);
+ bbp = rt2400pci_bbp_read(rt2x00dev, 39);
qual->false_cca = bbp;
}
@@ -639,12 +642,12 @@
switch (queue->qid) {
case QID_RX:
- rt2x00mmio_register_read(rt2x00dev, RXCSR0, ®);
+ reg = rt2x00mmio_register_read(rt2x00dev, RXCSR0);
rt2x00_set_field32(®, RXCSR0_DISABLE_RX, 0);
rt2x00mmio_register_write(rt2x00dev, RXCSR0, reg);
break;
case QID_BEACON:
- rt2x00mmio_register_read(rt2x00dev, CSR14, ®);
+ reg = rt2x00mmio_register_read(rt2x00dev, CSR14);
rt2x00_set_field32(®, CSR14_TSF_COUNT, 1);
rt2x00_set_field32(®, CSR14_TBCN, 1);
rt2x00_set_field32(®, CSR14_BEACON_GEN, 1);
@@ -662,17 +665,17 @@
switch (queue->qid) {
case QID_AC_VO:
- rt2x00mmio_register_read(rt2x00dev, TXCSR0, ®);
+ reg = rt2x00mmio_register_read(rt2x00dev, TXCSR0);
rt2x00_set_field32(®, TXCSR0_KICK_PRIO, 1);
rt2x00mmio_register_write(rt2x00dev, TXCSR0, reg);
break;
case QID_AC_VI:
- rt2x00mmio_register_read(rt2x00dev, TXCSR0, ®);
+ reg = rt2x00mmio_register_read(rt2x00dev, TXCSR0);
rt2x00_set_field32(®, TXCSR0_KICK_TX, 1);
rt2x00mmio_register_write(rt2x00dev, TXCSR0, reg);
break;
case QID_ATIM:
- rt2x00mmio_register_read(rt2x00dev, TXCSR0, ®);
+ reg = rt2x00mmio_register_read(rt2x00dev, TXCSR0);
rt2x00_set_field32(®, TXCSR0_KICK_ATIM, 1);
rt2x00mmio_register_write(rt2x00dev, TXCSR0, reg);
break;
@@ -690,17 +693,17 @@
case QID_AC_VO:
case QID_AC_VI:
case QID_ATIM:
- rt2x00mmio_register_read(rt2x00dev, TXCSR0, ®);
+ reg = rt2x00mmio_register_read(rt2x00dev, TXCSR0);
rt2x00_set_field32(®, TXCSR0_ABORT, 1);
rt2x00mmio_register_write(rt2x00dev, TXCSR0, reg);
break;
case QID_RX:
- rt2x00mmio_register_read(rt2x00dev, RXCSR0, ®);
+ reg = rt2x00mmio_register_read(rt2x00dev, RXCSR0);
rt2x00_set_field32(®, RXCSR0_DISABLE_RX, 1);
rt2x00mmio_register_write(rt2x00dev, RXCSR0, reg);
break;
case QID_BEACON:
- rt2x00mmio_register_read(rt2x00dev, CSR14, ®);
+ reg = rt2x00mmio_register_read(rt2x00dev, CSR14);
rt2x00_set_field32(®, CSR14_TSF_COUNT, 0);
rt2x00_set_field32(®, CSR14_TBCN, 0);
rt2x00_set_field32(®, CSR14_BEACON_GEN, 0);
@@ -725,11 +728,11 @@
u32 word;
if (entry->queue->qid == QID_RX) {
- rt2x00_desc_read(entry_priv->desc, 0, &word);
+ word = rt2x00_desc_read(entry_priv->desc, 0);
return rt2x00_get_field32(word, RXD_W0_OWNER_NIC);
} else {
- rt2x00_desc_read(entry_priv->desc, 0, &word);
+ word = rt2x00_desc_read(entry_priv->desc, 0);
return (rt2x00_get_field32(word, TXD_W0_OWNER_NIC) ||
rt2x00_get_field32(word, TXD_W0_VALID));
@@ -743,19 +746,19 @@
u32 word;
if (entry->queue->qid == QID_RX) {
- rt2x00_desc_read(entry_priv->desc, 2, &word);
+ word = rt2x00_desc_read(entry_priv->desc, 2);
rt2x00_set_field32(&word, RXD_W2_BUFFER_LENGTH, entry->skb->len);
rt2x00_desc_write(entry_priv->desc, 2, word);
- rt2x00_desc_read(entry_priv->desc, 1, &word);
+ word = rt2x00_desc_read(entry_priv->desc, 1);
rt2x00_set_field32(&word, RXD_W1_BUFFER_ADDRESS, skbdesc->skb_dma);
rt2x00_desc_write(entry_priv->desc, 1, word);
- rt2x00_desc_read(entry_priv->desc, 0, &word);
+ word = rt2x00_desc_read(entry_priv->desc, 0);
rt2x00_set_field32(&word, RXD_W0_OWNER_NIC, 1);
rt2x00_desc_write(entry_priv->desc, 0, word);
} else {
- rt2x00_desc_read(entry_priv->desc, 0, &word);
+ word = rt2x00_desc_read(entry_priv->desc, 0);
rt2x00_set_field32(&word, TXD_W0_VALID, 0);
rt2x00_set_field32(&word, TXD_W0_OWNER_NIC, 0);
rt2x00_desc_write(entry_priv->desc, 0, word);
@@ -770,7 +773,7 @@
/*
* Initialize registers.
*/
- rt2x00mmio_register_read(rt2x00dev, TXCSR2, ®);
+ reg = rt2x00mmio_register_read(rt2x00dev, TXCSR2);
rt2x00_set_field32(®, TXCSR2_TXD_SIZE, rt2x00dev->tx[0].desc_size);
rt2x00_set_field32(®, TXCSR2_NUM_TXD, rt2x00dev->tx[1].limit);
rt2x00_set_field32(®, TXCSR2_NUM_ATIM, rt2x00dev->atim->limit);
@@ -778,36 +781,36 @@
rt2x00mmio_register_write(rt2x00dev, TXCSR2, reg);
entry_priv = rt2x00dev->tx[1].entries[0].priv_data;
- rt2x00mmio_register_read(rt2x00dev, TXCSR3, ®);
+ reg = rt2x00mmio_register_read(rt2x00dev, TXCSR3);
rt2x00_set_field32(®, TXCSR3_TX_RING_REGISTER,
entry_priv->desc_dma);
rt2x00mmio_register_write(rt2x00dev, TXCSR3, reg);
entry_priv = rt2x00dev->tx[0].entries[0].priv_data;
- rt2x00mmio_register_read(rt2x00dev, TXCSR5, ®);
+ reg = rt2x00mmio_register_read(rt2x00dev, TXCSR5);
rt2x00_set_field32(®, TXCSR5_PRIO_RING_REGISTER,
entry_priv->desc_dma);
rt2x00mmio_register_write(rt2x00dev, TXCSR5, reg);
entry_priv = rt2x00dev->atim->entries[0].priv_data;
- rt2x00mmio_register_read(rt2x00dev, TXCSR4, ®);
+ reg = rt2x00mmio_register_read(rt2x00dev, TXCSR4);
rt2x00_set_field32(®, TXCSR4_ATIM_RING_REGISTER,
entry_priv->desc_dma);
rt2x00mmio_register_write(rt2x00dev, TXCSR4, reg);
entry_priv = rt2x00dev->bcn->entries[0].priv_data;
- rt2x00mmio_register_read(rt2x00dev, TXCSR6, ®);
+ reg = rt2x00mmio_register_read(rt2x00dev, TXCSR6);
rt2x00_set_field32(®, TXCSR6_BEACON_RING_REGISTER,
entry_priv->desc_dma);
rt2x00mmio_register_write(rt2x00dev, TXCSR6, reg);
- rt2x00mmio_register_read(rt2x00dev, RXCSR1, ®);
+ reg = rt2x00mmio_register_read(rt2x00dev, RXCSR1);
rt2x00_set_field32(®, RXCSR1_RXD_SIZE, rt2x00dev->rx->desc_size);
rt2x00_set_field32(®, RXCSR1_NUM_RXD, rt2x00dev->rx->limit);
rt2x00mmio_register_write(rt2x00dev, RXCSR1, reg);
entry_priv = rt2x00dev->rx->entries[0].priv_data;
- rt2x00mmio_register_read(rt2x00dev, RXCSR2, ®);
+ reg = rt2x00mmio_register_read(rt2x00dev, RXCSR2);
rt2x00_set_field32(®, RXCSR2_RX_RING_REGISTER,
entry_priv->desc_dma);
rt2x00mmio_register_write(rt2x00dev, RXCSR2, reg);
@@ -824,18 +827,18 @@
rt2x00mmio_register_write(rt2x00dev, PSCSR2, 0x00023f20);
rt2x00mmio_register_write(rt2x00dev, PSCSR3, 0x00000002);
- rt2x00mmio_register_read(rt2x00dev, TIMECSR, ®);
+ reg = rt2x00mmio_register_read(rt2x00dev, TIMECSR);
rt2x00_set_field32(®, TIMECSR_US_COUNT, 33);
rt2x00_set_field32(®, TIMECSR_US_64_COUNT, 63);
rt2x00_set_field32(®, TIMECSR_BEACON_EXPECT, 0);
rt2x00mmio_register_write(rt2x00dev, TIMECSR, reg);
- rt2x00mmio_register_read(rt2x00dev, CSR9, ®);
+ reg = rt2x00mmio_register_read(rt2x00dev, CSR9);
rt2x00_set_field32(®, CSR9_MAX_FRAME_UNIT,
(rt2x00dev->rx->data_size / 128));
rt2x00mmio_register_write(rt2x00dev, CSR9, reg);
- rt2x00mmio_register_read(rt2x00dev, CSR14, ®);
+ reg = rt2x00mmio_register_read(rt2x00dev, CSR14);
rt2x00_set_field32(®, CSR14_TSF_COUNT, 0);
rt2x00_set_field32(®, CSR14_TSF_SYNC, 0);
rt2x00_set_field32(®, CSR14_TBCN, 0);
@@ -848,14 +851,14 @@
rt2x00mmio_register_write(rt2x00dev, CNT3, 0x3f080000);
- rt2x00mmio_register_read(rt2x00dev, ARCSR0, ®);
+ reg = rt2x00mmio_register_read(rt2x00dev, ARCSR0);
rt2x00_set_field32(®, ARCSR0_AR_BBP_DATA0, 133);
rt2x00_set_field32(®, ARCSR0_AR_BBP_ID0, 134);
rt2x00_set_field32(®, ARCSR0_AR_BBP_DATA1, 136);
rt2x00_set_field32(®, ARCSR0_AR_BBP_ID1, 135);
rt2x00mmio_register_write(rt2x00dev, ARCSR0, reg);
- rt2x00mmio_register_read(rt2x00dev, RXCSR3, ®);
+ reg = rt2x00mmio_register_read(rt2x00dev, RXCSR3);
rt2x00_set_field32(®, RXCSR3_BBP_ID0, 3); /* Tx power.*/
rt2x00_set_field32(®, RXCSR3_BBP_ID0_VALID, 1);
rt2x00_set_field32(®, RXCSR3_BBP_ID1, 32); /* Signal */
@@ -872,24 +875,24 @@
rt2x00mmio_register_write(rt2x00dev, MACCSR0, 0x00217223);
rt2x00mmio_register_write(rt2x00dev, MACCSR1, 0x00235518);
- rt2x00mmio_register_read(rt2x00dev, MACCSR2, ®);
+ reg = rt2x00mmio_register_read(rt2x00dev, MACCSR2);
rt2x00_set_field32(®, MACCSR2_DELAY, 64);
rt2x00mmio_register_write(rt2x00dev, MACCSR2, reg);
- rt2x00mmio_register_read(rt2x00dev, RALINKCSR, ®);
+ reg = rt2x00mmio_register_read(rt2x00dev, RALINKCSR);
rt2x00_set_field32(®, RALINKCSR_AR_BBP_DATA0, 17);
rt2x00_set_field32(®, RALINKCSR_AR_BBP_ID0, 154);
rt2x00_set_field32(®, RALINKCSR_AR_BBP_DATA1, 0);
rt2x00_set_field32(®, RALINKCSR_AR_BBP_ID1, 154);
rt2x00mmio_register_write(rt2x00dev, RALINKCSR, reg);
- rt2x00mmio_register_read(rt2x00dev, CSR1, ®);
+ reg = rt2x00mmio_register_read(rt2x00dev, CSR1);
rt2x00_set_field32(®, CSR1_SOFT_RESET, 1);
rt2x00_set_field32(®, CSR1_BBP_RESET, 0);
rt2x00_set_field32(®, CSR1_HOST_READY, 0);
rt2x00mmio_register_write(rt2x00dev, CSR1, reg);
- rt2x00mmio_register_read(rt2x00dev, CSR1, ®);
+ reg = rt2x00mmio_register_read(rt2x00dev, CSR1);
rt2x00_set_field32(®, CSR1_SOFT_RESET, 0);
rt2x00_set_field32(®, CSR1_HOST_READY, 1);
rt2x00mmio_register_write(rt2x00dev, CSR1, reg);
@@ -899,8 +902,8 @@
* These registers are cleared on read,
* so we may pass a useless variable to store the value.
*/
- rt2x00mmio_register_read(rt2x00dev, CNT0, ®);
- rt2x00mmio_register_read(rt2x00dev, CNT4, ®);
+ reg = rt2x00mmio_register_read(rt2x00dev, CNT0);
+ reg = rt2x00mmio_register_read(rt2x00dev, CNT4);
return 0;
}
@@ -911,7 +914,7 @@
u8 value;
for (i = 0; i < REGISTER_BUSY_COUNT; i++) {
- rt2400pci_bbp_read(rt2x00dev, 0, &value);
+ value = rt2400pci_bbp_read(rt2x00dev, 0);
if ((value != 0xff) && (value != 0x00))
return 0;
udelay(REGISTER_BUSY_DELAY);
@@ -947,7 +950,7 @@
rt2400pci_bbp_write(rt2x00dev, 31, 0x00);
for (i = 0; i < EEPROM_BBP_SIZE; i++) {
- rt2x00_eeprom_read(rt2x00dev, EEPROM_BBP_START + i, &eeprom);
+ eeprom = rt2x00_eeprom_read(rt2x00dev, EEPROM_BBP_START + i);
if (eeprom != 0xffff && eeprom != 0x0000) {
reg_id = rt2x00_get_field16(eeprom, EEPROM_BBP_REG_ID);
@@ -974,7 +977,7 @@
* should clear the register to assure a clean state.
*/
if (state == STATE_RADIO_IRQ_ON) {
- rt2x00mmio_register_read(rt2x00dev, CSR7, ®);
+ reg = rt2x00mmio_register_read(rt2x00dev, CSR7);
rt2x00mmio_register_write(rt2x00dev, CSR7, reg);
}
@@ -984,7 +987,7 @@
*/
spin_lock_irqsave(&rt2x00dev->irqmask_lock, flags);
- rt2x00mmio_register_read(rt2x00dev, CSR8, ®);
+ reg = rt2x00mmio_register_read(rt2x00dev, CSR8);
rt2x00_set_field32(®, CSR8_TBCN_EXPIRE, mask);
rt2x00_set_field32(®, CSR8_TXDONE_TXRING, mask);
rt2x00_set_field32(®, CSR8_TXDONE_ATIMRING, mask);
@@ -1037,7 +1040,7 @@
put_to_sleep = (state != STATE_AWAKE);
- rt2x00mmio_register_read(rt2x00dev, PWRCSR1, ®);
+ reg = rt2x00mmio_register_read(rt2x00dev, PWRCSR1);
rt2x00_set_field32(®, PWRCSR1_SET_STATE, 1);
rt2x00_set_field32(®, PWRCSR1_BBP_DESIRE_STATE, state);
rt2x00_set_field32(®, PWRCSR1_RF_DESIRE_STATE, state);
@@ -1050,7 +1053,7 @@
* device has entered the correct state.
*/
for (i = 0; i < REGISTER_BUSY_COUNT; i++) {
- rt2x00mmio_register_read(rt2x00dev, PWRCSR1, ®2);
+ reg2 = rt2x00mmio_register_read(rt2x00dev, PWRCSR1);
bbp_state = rt2x00_get_field32(reg2, PWRCSR1_BBP_CURR_STATE);
rf_state = rt2x00_get_field32(reg2, PWRCSR1_RF_CURR_STATE);
if (bbp_state == state && rf_state == state)
@@ -1110,16 +1113,16 @@
/*
* Start writing the descriptor words.
*/
- rt2x00_desc_read(txd, 1, &word);
+ word = rt2x00_desc_read(txd, 1);
rt2x00_set_field32(&word, TXD_W1_BUFFER_ADDRESS, skbdesc->skb_dma);
rt2x00_desc_write(txd, 1, word);
- rt2x00_desc_read(txd, 2, &word);
+ word = rt2x00_desc_read(txd, 2);
rt2x00_set_field32(&word, TXD_W2_BUFFER_LENGTH, txdesc->length);
rt2x00_set_field32(&word, TXD_W2_DATABYTE_COUNT, txdesc->length);
rt2x00_desc_write(txd, 2, word);
- rt2x00_desc_read(txd, 3, &word);
+ word = rt2x00_desc_read(txd, 3);
rt2x00_set_field32(&word, TXD_W3_PLCP_SIGNAL, txdesc->u.plcp.signal);
rt2x00_set_field32(&word, TXD_W3_PLCP_SIGNAL_REGNUM, 5);
rt2x00_set_field32(&word, TXD_W3_PLCP_SIGNAL_BUSY, 1);
@@ -1128,7 +1131,7 @@
rt2x00_set_field32(&word, TXD_W3_PLCP_SERVICE_BUSY, 1);
rt2x00_desc_write(txd, 3, word);
- rt2x00_desc_read(txd, 4, &word);
+ word = rt2x00_desc_read(txd, 4);
rt2x00_set_field32(&word, TXD_W4_PLCP_LENGTH_LOW,
txdesc->u.plcp.length_low);
rt2x00_set_field32(&word, TXD_W3_PLCP_LENGTH_LOW_REGNUM, 8);
@@ -1144,7 +1147,7 @@
* the device, whereby the device may take hold of the TXD before we
* finished updating it.
*/
- rt2x00_desc_read(txd, 0, &word);
+ word = rt2x00_desc_read(txd, 0);
rt2x00_set_field32(&word, TXD_W0_OWNER_NIC, 1);
rt2x00_set_field32(&word, TXD_W0_VALID, 1);
rt2x00_set_field32(&word, TXD_W0_MORE_FRAG,
@@ -1180,7 +1183,7 @@
* Disable beaconing while we are reloading the beacon data,
* otherwise we might be sending out invalid data.
*/
- rt2x00mmio_register_read(rt2x00dev, CSR14, ®);
+ reg = rt2x00mmio_register_read(rt2x00dev, CSR14);
rt2x00_set_field32(®, CSR14_BEACON_GEN, 0);
rt2x00mmio_register_write(rt2x00dev, CSR14, reg);
@@ -1225,10 +1228,10 @@
u32 rx_low;
u32 rx_high;
- rt2x00_desc_read(entry_priv->desc, 0, &word0);
- rt2x00_desc_read(entry_priv->desc, 2, &word2);
- rt2x00_desc_read(entry_priv->desc, 3, &word3);
- rt2x00_desc_read(entry_priv->desc, 4, &word4);
+ word0 = rt2x00_desc_read(entry_priv->desc, 0);
+ word2 = rt2x00_desc_read(entry_priv->desc, 2);
+ word3 = rt2x00_desc_read(entry_priv->desc, 3);
+ word4 = rt2x00_desc_read(entry_priv->desc, 4);
if (rt2x00_get_field32(word0, RXD_W0_CRC_ERROR))
rxdesc->flags |= RX_FLAG_FAILED_FCS_CRC;
@@ -1282,7 +1285,7 @@
while (!rt2x00queue_empty(queue)) {
entry = rt2x00queue_get_entry(queue, Q_INDEX_DONE);
entry_priv = entry->priv_data;
- rt2x00_desc_read(entry_priv->desc, 0, &word);
+ word = rt2x00_desc_read(entry_priv->desc, 0);
if (rt2x00_get_field32(word, TXD_W0_OWNER_NIC) ||
!rt2x00_get_field32(word, TXD_W0_VALID))
@@ -1320,7 +1323,7 @@
*/
spin_lock_irq(&rt2x00dev->irqmask_lock);
- rt2x00mmio_register_read(rt2x00dev, CSR8, ®);
+ reg = rt2x00mmio_register_read(rt2x00dev, CSR8);
rt2x00_set_field32(®, irq_field, 0);
rt2x00mmio_register_write(rt2x00dev, CSR8, reg);
@@ -1345,7 +1348,7 @@
if (test_bit(DEVICE_STATE_ENABLED_RADIO, &rt2x00dev->flags)) {
spin_lock_irq(&rt2x00dev->irqmask_lock);
- rt2x00mmio_register_read(rt2x00dev, CSR8, ®);
+ reg = rt2x00mmio_register_read(rt2x00dev, CSR8);
rt2x00_set_field32(®, CSR8_TXDONE_TXRING, 0);
rt2x00_set_field32(®, CSR8_TXDONE_ATIMRING, 0);
rt2x00_set_field32(®, CSR8_TXDONE_PRIORING, 0);
@@ -1381,7 +1384,7 @@
* Get the interrupt sources & saved to local variable.
* Write register value back to clear pending interrupts.
*/
- rt2x00mmio_register_read(rt2x00dev, CSR7, ®);
+ reg = rt2x00mmio_register_read(rt2x00dev, CSR7);
rt2x00mmio_register_write(rt2x00dev, CSR7, reg);
if (!reg)
@@ -1419,7 +1422,7 @@
*/
spin_lock(&rt2x00dev->irqmask_lock);
- rt2x00mmio_register_read(rt2x00dev, CSR8, ®);
+ reg = rt2x00mmio_register_read(rt2x00dev, CSR8);
reg |= mask;
rt2x00mmio_register_write(rt2x00dev, CSR8, reg);
@@ -1440,7 +1443,7 @@
u16 word;
u8 *mac;
- rt2x00mmio_register_read(rt2x00dev, CSR21, ®);
+ reg = rt2x00mmio_register_read(rt2x00dev, CSR21);
eeprom.data = rt2x00dev;
eeprom.register_read = rt2400pci_eepromregister_read;
@@ -1461,7 +1464,7 @@
mac = rt2x00_eeprom_addr(rt2x00dev, EEPROM_MAC_ADDR_0);
rt2x00lib_set_mac_address(rt2x00dev, mac);
- rt2x00_eeprom_read(rt2x00dev, EEPROM_ANTENNA, &word);
+ word = rt2x00_eeprom_read(rt2x00dev, EEPROM_ANTENNA);
if (word == 0xffff) {
rt2x00_err(rt2x00dev, "Invalid EEPROM data detected\n");
return -EINVAL;
@@ -1479,13 +1482,13 @@
/*
* Read EEPROM word for configuration.
*/
- rt2x00_eeprom_read(rt2x00dev, EEPROM_ANTENNA, &eeprom);
+ eeprom = rt2x00_eeprom_read(rt2x00dev, EEPROM_ANTENNA);
/*
* Identify RF chipset.
*/
value = rt2x00_get_field16(eeprom, EEPROM_ANTENNA_RF_TYPE);
- rt2x00mmio_register_read(rt2x00dev, CSR0, ®);
+ reg = rt2x00mmio_register_read(rt2x00dev, CSR0);
rt2x00_set_chip(rt2x00dev, RT2460, value,
rt2x00_get_field32(reg, CSR0_REVISION));
@@ -1630,7 +1633,7 @@
* Enable rfkill polling by setting GPIO direction of the
* rfkill switch GPIO pin correctly.
*/
- rt2x00mmio_register_read(rt2x00dev, GPIOCSR, ®);
+ reg = rt2x00mmio_register_read(rt2x00dev, GPIOCSR);
rt2x00_set_field32(®, GPIOCSR_DIR0, 1);
rt2x00mmio_register_write(rt2x00dev, GPIOCSR, reg);
@@ -1692,9 +1695,9 @@
u64 tsf;
u32 reg;
- rt2x00mmio_register_read(rt2x00dev, CSR17, ®);
+ reg = rt2x00mmio_register_read(rt2x00dev, CSR17);
tsf = (u64) rt2x00_get_field32(reg, CSR17_HIGH_TSFTIMER) << 32;
- rt2x00mmio_register_read(rt2x00dev, CSR16, ®);
+ reg = rt2x00mmio_register_read(rt2x00dev, CSR16);
tsf |= rt2x00_get_field32(reg, CSR16_LOW_TSFTIMER);
return tsf;
@@ -1705,7 +1708,7 @@
struct rt2x00_dev *rt2x00dev = hw->priv;
u32 reg;
- rt2x00mmio_register_read(rt2x00dev, CSR15, ®);
+ reg = rt2x00mmio_register_read(rt2x00dev, CSR15);
return rt2x00_get_field32(reg, CSR15_BEACON_SENT);
}
diff --git a/drivers/net/wireless/ralink/rt2x00/rt2500pci.c b/drivers/net/wireless/ralink/rt2x00/rt2500pci.c
index 791434d..1ff5434 100644
--- a/drivers/net/wireless/ralink/rt2x00/rt2500pci.c
+++ b/drivers/net/wireless/ralink/rt2x00/rt2500pci.c
@@ -77,10 +77,11 @@
mutex_unlock(&rt2x00dev->csr_mutex);
}
-static void rt2500pci_bbp_read(struct rt2x00_dev *rt2x00dev,
- const unsigned int word, u8 *value)
+static u8 rt2500pci_bbp_read(struct rt2x00_dev *rt2x00dev,
+ const unsigned int word)
{
u32 reg;
+ u8 value;
mutex_lock(&rt2x00dev->csr_mutex);
@@ -103,9 +104,11 @@
WAIT_FOR_BBP(rt2x00dev, ®);
}
- *value = rt2x00_get_field32(reg, BBPCSR_VALUE);
+ value = rt2x00_get_field32(reg, BBPCSR_VALUE);
mutex_unlock(&rt2x00dev->csr_mutex);
+
+ return value;
}
static void rt2500pci_rf_write(struct rt2x00_dev *rt2x00dev,
@@ -138,7 +141,7 @@
struct rt2x00_dev *rt2x00dev = eeprom->data;
u32 reg;
- rt2x00mmio_register_read(rt2x00dev, CSR21, ®);
+ reg = rt2x00mmio_register_read(rt2x00dev, CSR21);
eeprom->reg_data_in = !!rt2x00_get_field32(reg, CSR21_EEPROM_DATA_IN);
eeprom->reg_data_out = !!rt2x00_get_field32(reg, CSR21_EEPROM_DATA_OUT);
@@ -202,7 +205,7 @@
{
u32 reg;
- rt2x00mmio_register_read(rt2x00dev, GPIOCSR, ®);
+ reg = rt2x00mmio_register_read(rt2x00dev, GPIOCSR);
return rt2x00_get_field32(reg, GPIOCSR_VAL0);
}
@@ -215,7 +218,7 @@
unsigned int enabled = brightness != LED_OFF;
u32 reg;
- rt2x00mmio_register_read(led->rt2x00dev, LEDCSR, ®);
+ reg = rt2x00mmio_register_read(led->rt2x00dev, LEDCSR);
if (led->type == LED_TYPE_RADIO || led->type == LED_TYPE_ASSOC)
rt2x00_set_field32(®, LEDCSR_LINK, enabled);
@@ -233,7 +236,7 @@
container_of(led_cdev, struct rt2x00_led, led_dev);
u32 reg;
- rt2x00mmio_register_read(led->rt2x00dev, LEDCSR, ®);
+ reg = rt2x00mmio_register_read(led->rt2x00dev, LEDCSR);
rt2x00_set_field32(®, LEDCSR_ON_PERIOD, *delay_on);
rt2x00_set_field32(®, LEDCSR_OFF_PERIOD, *delay_off);
rt2x00mmio_register_write(led->rt2x00dev, LEDCSR, reg);
@@ -267,7 +270,7 @@
* and broadcast frames will always be accepted since
* there is no filter for it at this time.
*/
- rt2x00mmio_register_read(rt2x00dev, RXCSR0, ®);
+ reg = rt2x00mmio_register_read(rt2x00dev, RXCSR0);
rt2x00_set_field32(®, RXCSR0_DROP_CRC,
!(filter_flags & FIF_FCSFAIL));
rt2x00_set_field32(®, RXCSR0_DROP_PHYSICAL,
@@ -300,7 +303,7 @@
* Enable beacon config
*/
bcn_preload = PREAMBLE + GET_DURATION(IEEE80211_HEADER, 20);
- rt2x00mmio_register_read(rt2x00dev, BCNCSR1, ®);
+ reg = rt2x00mmio_register_read(rt2x00dev, BCNCSR1);
rt2x00_set_field32(®, BCNCSR1_PRELOAD, bcn_preload);
rt2x00_set_field32(®, BCNCSR1_BEACON_CWMIN, queue->cw_min);
rt2x00mmio_register_write(rt2x00dev, BCNCSR1, reg);
@@ -308,7 +311,7 @@
/*
* Enable synchronisation.
*/
- rt2x00mmio_register_read(rt2x00dev, CSR14, ®);
+ reg = rt2x00mmio_register_read(rt2x00dev, CSR14);
rt2x00_set_field32(®, CSR14_TSF_SYNC, conf->sync);
rt2x00mmio_register_write(rt2x00dev, CSR14, reg);
}
@@ -335,35 +338,35 @@
if (changed & BSS_CHANGED_ERP_PREAMBLE) {
preamble_mask = erp->short_preamble << 3;
- rt2x00mmio_register_read(rt2x00dev, TXCSR1, ®);
+ reg = rt2x00mmio_register_read(rt2x00dev, TXCSR1);
rt2x00_set_field32(®, TXCSR1_ACK_TIMEOUT, 0x162);
rt2x00_set_field32(®, TXCSR1_ACK_CONSUME_TIME, 0xa2);
rt2x00_set_field32(®, TXCSR1_TSF_OFFSET, IEEE80211_HEADER);
rt2x00_set_field32(®, TXCSR1_AUTORESPONDER, 1);
rt2x00mmio_register_write(rt2x00dev, TXCSR1, reg);
- rt2x00mmio_register_read(rt2x00dev, ARCSR2, ®);
+ reg = rt2x00mmio_register_read(rt2x00dev, ARCSR2);
rt2x00_set_field32(®, ARCSR2_SIGNAL, 0x00);
rt2x00_set_field32(®, ARCSR2_SERVICE, 0x04);
rt2x00_set_field32(®, ARCSR2_LENGTH,
GET_DURATION(ACK_SIZE, 10));
rt2x00mmio_register_write(rt2x00dev, ARCSR2, reg);
- rt2x00mmio_register_read(rt2x00dev, ARCSR3, ®);
+ reg = rt2x00mmio_register_read(rt2x00dev, ARCSR3);
rt2x00_set_field32(®, ARCSR3_SIGNAL, 0x01 | preamble_mask);
rt2x00_set_field32(®, ARCSR3_SERVICE, 0x04);
rt2x00_set_field32(®, ARCSR2_LENGTH,
GET_DURATION(ACK_SIZE, 20));
rt2x00mmio_register_write(rt2x00dev, ARCSR3, reg);
- rt2x00mmio_register_read(rt2x00dev, ARCSR4, ®);
+ reg = rt2x00mmio_register_read(rt2x00dev, ARCSR4);
rt2x00_set_field32(®, ARCSR4_SIGNAL, 0x02 | preamble_mask);
rt2x00_set_field32(®, ARCSR4_SERVICE, 0x04);
rt2x00_set_field32(®, ARCSR2_LENGTH,
GET_DURATION(ACK_SIZE, 55));
rt2x00mmio_register_write(rt2x00dev, ARCSR4, reg);
- rt2x00mmio_register_read(rt2x00dev, ARCSR5, ®);
+ reg = rt2x00mmio_register_read(rt2x00dev, ARCSR5);
rt2x00_set_field32(®, ARCSR5_SIGNAL, 0x03 | preamble_mask);
rt2x00_set_field32(®, ARCSR5_SERVICE, 0x84);
rt2x00_set_field32(®, ARCSR2_LENGTH,
@@ -375,23 +378,23 @@
rt2x00mmio_register_write(rt2x00dev, ARCSR1, erp->basic_rates);
if (changed & BSS_CHANGED_ERP_SLOT) {
- rt2x00mmio_register_read(rt2x00dev, CSR11, ®);
+ reg = rt2x00mmio_register_read(rt2x00dev, CSR11);
rt2x00_set_field32(®, CSR11_SLOT_TIME, erp->slot_time);
rt2x00mmio_register_write(rt2x00dev, CSR11, reg);
- rt2x00mmio_register_read(rt2x00dev, CSR18, ®);
+ reg = rt2x00mmio_register_read(rt2x00dev, CSR18);
rt2x00_set_field32(®, CSR18_SIFS, erp->sifs);
rt2x00_set_field32(®, CSR18_PIFS, erp->pifs);
rt2x00mmio_register_write(rt2x00dev, CSR18, reg);
- rt2x00mmio_register_read(rt2x00dev, CSR19, ®);
+ reg = rt2x00mmio_register_read(rt2x00dev, CSR19);
rt2x00_set_field32(®, CSR19_DIFS, erp->difs);
rt2x00_set_field32(®, CSR19_EIFS, erp->eifs);
rt2x00mmio_register_write(rt2x00dev, CSR19, reg);
}
if (changed & BSS_CHANGED_BEACON_INT) {
- rt2x00mmio_register_read(rt2x00dev, CSR12, ®);
+ reg = rt2x00mmio_register_read(rt2x00dev, CSR12);
rt2x00_set_field32(®, CSR12_BEACON_INTERVAL,
erp->beacon_int * 16);
rt2x00_set_field32(®, CSR12_CFP_MAX_DURATION,
@@ -415,9 +418,9 @@
BUG_ON(ant->rx == ANTENNA_SW_DIVERSITY ||
ant->tx == ANTENNA_SW_DIVERSITY);
- rt2x00mmio_register_read(rt2x00dev, BBPCSR1, ®);
- rt2500pci_bbp_read(rt2x00dev, 14, &r14);
- rt2500pci_bbp_read(rt2x00dev, 2, &r2);
+ reg = rt2x00mmio_register_read(rt2x00dev, BBPCSR1);
+ r14 = rt2500pci_bbp_read(rt2x00dev, 14);
+ r2 = rt2500pci_bbp_read(rt2x00dev, 2);
/*
* Configure the TX antenna.
@@ -538,7 +541,7 @@
/*
* Clear false CRC during channel switch.
*/
- rt2x00mmio_register_read(rt2x00dev, CNT0, &rf->rf1);
+ rf->rf1 = rt2x00mmio_register_read(rt2x00dev, CNT0);
}
static void rt2500pci_config_txpower(struct rt2x00_dev *rt2x00dev,
@@ -546,7 +549,7 @@
{
u32 rf3;
- rt2x00_rf_read(rt2x00dev, 3, &rf3);
+ rf3 = rt2x00_rf_read(rt2x00dev, 3);
rt2x00_set_field32(&rf3, RF3_TXPOWER, TXPOWER_TO_DEV(txpower));
rt2500pci_rf_write(rt2x00dev, 3, rf3);
}
@@ -556,7 +559,7 @@
{
u32 reg;
- rt2x00mmio_register_read(rt2x00dev, CSR11, ®);
+ reg = rt2x00mmio_register_read(rt2x00dev, CSR11);
rt2x00_set_field32(®, CSR11_LONG_RETRY,
libconf->conf->long_frame_max_tx_count);
rt2x00_set_field32(®, CSR11_SHORT_RETRY,
@@ -573,7 +576,7 @@
u32 reg;
if (state == STATE_SLEEP) {
- rt2x00mmio_register_read(rt2x00dev, CSR20, ®);
+ reg = rt2x00mmio_register_read(rt2x00dev, CSR20);
rt2x00_set_field32(®, CSR20_DELAY_AFTER_TBCN,
(rt2x00dev->beacon_int - 20) * 16);
rt2x00_set_field32(®, CSR20_TBCN_BEFORE_WAKEUP,
@@ -586,7 +589,7 @@
rt2x00_set_field32(®, CSR20_AUTOWAKE, 1);
rt2x00mmio_register_write(rt2x00dev, CSR20, reg);
} else {
- rt2x00mmio_register_read(rt2x00dev, CSR20, ®);
+ reg = rt2x00mmio_register_read(rt2x00dev, CSR20);
rt2x00_set_field32(®, CSR20_AUTOWAKE, 0);
rt2x00mmio_register_write(rt2x00dev, CSR20, reg);
}
@@ -622,13 +625,13 @@
/*
* Update FCS error count from register.
*/
- rt2x00mmio_register_read(rt2x00dev, CNT0, ®);
+ reg = rt2x00mmio_register_read(rt2x00dev, CNT0);
qual->rx_failed = rt2x00_get_field32(reg, CNT0_FCS_ERROR);
/*
* Update False CCA count from register.
*/
- rt2x00mmio_register_read(rt2x00dev, CNT3, ®);
+ reg = rt2x00mmio_register_read(rt2x00dev, CNT3);
qual->false_cca = rt2x00_get_field32(reg, CNT3_FALSE_CCA);
}
@@ -728,12 +731,12 @@
switch (queue->qid) {
case QID_RX:
- rt2x00mmio_register_read(rt2x00dev, RXCSR0, ®);
+ reg = rt2x00mmio_register_read(rt2x00dev, RXCSR0);
rt2x00_set_field32(®, RXCSR0_DISABLE_RX, 0);
rt2x00mmio_register_write(rt2x00dev, RXCSR0, reg);
break;
case QID_BEACON:
- rt2x00mmio_register_read(rt2x00dev, CSR14, ®);
+ reg = rt2x00mmio_register_read(rt2x00dev, CSR14);
rt2x00_set_field32(®, CSR14_TSF_COUNT, 1);
rt2x00_set_field32(®, CSR14_TBCN, 1);
rt2x00_set_field32(®, CSR14_BEACON_GEN, 1);
@@ -751,17 +754,17 @@
switch (queue->qid) {
case QID_AC_VO:
- rt2x00mmio_register_read(rt2x00dev, TXCSR0, ®);
+ reg = rt2x00mmio_register_read(rt2x00dev, TXCSR0);
rt2x00_set_field32(®, TXCSR0_KICK_PRIO, 1);
rt2x00mmio_register_write(rt2x00dev, TXCSR0, reg);
break;
case QID_AC_VI:
- rt2x00mmio_register_read(rt2x00dev, TXCSR0, ®);
+ reg = rt2x00mmio_register_read(rt2x00dev, TXCSR0);
rt2x00_set_field32(®, TXCSR0_KICK_TX, 1);
rt2x00mmio_register_write(rt2x00dev, TXCSR0, reg);
break;
case QID_ATIM:
- rt2x00mmio_register_read(rt2x00dev, TXCSR0, ®);
+ reg = rt2x00mmio_register_read(rt2x00dev, TXCSR0);
rt2x00_set_field32(®, TXCSR0_KICK_ATIM, 1);
rt2x00mmio_register_write(rt2x00dev, TXCSR0, reg);
break;
@@ -779,17 +782,17 @@
case QID_AC_VO:
case QID_AC_VI:
case QID_ATIM:
- rt2x00mmio_register_read(rt2x00dev, TXCSR0, ®);
+ reg = rt2x00mmio_register_read(rt2x00dev, TXCSR0);
rt2x00_set_field32(®, TXCSR0_ABORT, 1);
rt2x00mmio_register_write(rt2x00dev, TXCSR0, reg);
break;
case QID_RX:
- rt2x00mmio_register_read(rt2x00dev, RXCSR0, ®);
+ reg = rt2x00mmio_register_read(rt2x00dev, RXCSR0);
rt2x00_set_field32(®, RXCSR0_DISABLE_RX, 1);
rt2x00mmio_register_write(rt2x00dev, RXCSR0, reg);
break;
case QID_BEACON:
- rt2x00mmio_register_read(rt2x00dev, CSR14, ®);
+ reg = rt2x00mmio_register_read(rt2x00dev, CSR14);
rt2x00_set_field32(®, CSR14_TSF_COUNT, 0);
rt2x00_set_field32(®, CSR14_TBCN, 0);
rt2x00_set_field32(®, CSR14_BEACON_GEN, 0);
@@ -814,11 +817,11 @@
u32 word;
if (entry->queue->qid == QID_RX) {
- rt2x00_desc_read(entry_priv->desc, 0, &word);
+ word = rt2x00_desc_read(entry_priv->desc, 0);
return rt2x00_get_field32(word, RXD_W0_OWNER_NIC);
} else {
- rt2x00_desc_read(entry_priv->desc, 0, &word);
+ word = rt2x00_desc_read(entry_priv->desc, 0);
return (rt2x00_get_field32(word, TXD_W0_OWNER_NIC) ||
rt2x00_get_field32(word, TXD_W0_VALID));
@@ -832,15 +835,15 @@
u32 word;
if (entry->queue->qid == QID_RX) {
- rt2x00_desc_read(entry_priv->desc, 1, &word);
+ word = rt2x00_desc_read(entry_priv->desc, 1);
rt2x00_set_field32(&word, RXD_W1_BUFFER_ADDRESS, skbdesc->skb_dma);
rt2x00_desc_write(entry_priv->desc, 1, word);
- rt2x00_desc_read(entry_priv->desc, 0, &word);
+ word = rt2x00_desc_read(entry_priv->desc, 0);
rt2x00_set_field32(&word, RXD_W0_OWNER_NIC, 1);
rt2x00_desc_write(entry_priv->desc, 0, word);
} else {
- rt2x00_desc_read(entry_priv->desc, 0, &word);
+ word = rt2x00_desc_read(entry_priv->desc, 0);
rt2x00_set_field32(&word, TXD_W0_VALID, 0);
rt2x00_set_field32(&word, TXD_W0_OWNER_NIC, 0);
rt2x00_desc_write(entry_priv->desc, 0, word);
@@ -855,7 +858,7 @@
/*
* Initialize registers.
*/
- rt2x00mmio_register_read(rt2x00dev, TXCSR2, ®);
+ reg = rt2x00mmio_register_read(rt2x00dev, TXCSR2);
rt2x00_set_field32(®, TXCSR2_TXD_SIZE, rt2x00dev->tx[0].desc_size);
rt2x00_set_field32(®, TXCSR2_NUM_TXD, rt2x00dev->tx[1].limit);
rt2x00_set_field32(®, TXCSR2_NUM_ATIM, rt2x00dev->atim->limit);
@@ -863,36 +866,36 @@
rt2x00mmio_register_write(rt2x00dev, TXCSR2, reg);
entry_priv = rt2x00dev->tx[1].entries[0].priv_data;
- rt2x00mmio_register_read(rt2x00dev, TXCSR3, ®);
+ reg = rt2x00mmio_register_read(rt2x00dev, TXCSR3);
rt2x00_set_field32(®, TXCSR3_TX_RING_REGISTER,
entry_priv->desc_dma);
rt2x00mmio_register_write(rt2x00dev, TXCSR3, reg);
entry_priv = rt2x00dev->tx[0].entries[0].priv_data;
- rt2x00mmio_register_read(rt2x00dev, TXCSR5, ®);
+ reg = rt2x00mmio_register_read(rt2x00dev, TXCSR5);
rt2x00_set_field32(®, TXCSR5_PRIO_RING_REGISTER,
entry_priv->desc_dma);
rt2x00mmio_register_write(rt2x00dev, TXCSR5, reg);
entry_priv = rt2x00dev->atim->entries[0].priv_data;
- rt2x00mmio_register_read(rt2x00dev, TXCSR4, ®);
+ reg = rt2x00mmio_register_read(rt2x00dev, TXCSR4);
rt2x00_set_field32(®, TXCSR4_ATIM_RING_REGISTER,
entry_priv->desc_dma);
rt2x00mmio_register_write(rt2x00dev, TXCSR4, reg);
entry_priv = rt2x00dev->bcn->entries[0].priv_data;
- rt2x00mmio_register_read(rt2x00dev, TXCSR6, ®);
+ reg = rt2x00mmio_register_read(rt2x00dev, TXCSR6);
rt2x00_set_field32(®, TXCSR6_BEACON_RING_REGISTER,
entry_priv->desc_dma);
rt2x00mmio_register_write(rt2x00dev, TXCSR6, reg);
- rt2x00mmio_register_read(rt2x00dev, RXCSR1, ®);
+ reg = rt2x00mmio_register_read(rt2x00dev, RXCSR1);
rt2x00_set_field32(®, RXCSR1_RXD_SIZE, rt2x00dev->rx->desc_size);
rt2x00_set_field32(®, RXCSR1_NUM_RXD, rt2x00dev->rx->limit);
rt2x00mmio_register_write(rt2x00dev, RXCSR1, reg);
entry_priv = rt2x00dev->rx->entries[0].priv_data;
- rt2x00mmio_register_read(rt2x00dev, RXCSR2, ®);
+ reg = rt2x00mmio_register_read(rt2x00dev, RXCSR2);
rt2x00_set_field32(®, RXCSR2_RX_RING_REGISTER,
entry_priv->desc_dma);
rt2x00mmio_register_write(rt2x00dev, RXCSR2, reg);
@@ -909,13 +912,13 @@
rt2x00mmio_register_write(rt2x00dev, PSCSR2, 0x00020002);
rt2x00mmio_register_write(rt2x00dev, PSCSR3, 0x00000002);
- rt2x00mmio_register_read(rt2x00dev, TIMECSR, ®);
+ reg = rt2x00mmio_register_read(rt2x00dev, TIMECSR);
rt2x00_set_field32(®, TIMECSR_US_COUNT, 33);
rt2x00_set_field32(®, TIMECSR_US_64_COUNT, 63);
rt2x00_set_field32(®, TIMECSR_BEACON_EXPECT, 0);
rt2x00mmio_register_write(rt2x00dev, TIMECSR, reg);
- rt2x00mmio_register_read(rt2x00dev, CSR9, ®);
+ reg = rt2x00mmio_register_read(rt2x00dev, CSR9);
rt2x00_set_field32(®, CSR9_MAX_FRAME_UNIT,
rt2x00dev->rx->data_size / 128);
rt2x00mmio_register_write(rt2x00dev, CSR9, reg);
@@ -923,11 +926,11 @@
/*
* Always use CWmin and CWmax set in descriptor.
*/
- rt2x00mmio_register_read(rt2x00dev, CSR11, ®);
+ reg = rt2x00mmio_register_read(rt2x00dev, CSR11);
rt2x00_set_field32(®, CSR11_CW_SELECT, 0);
rt2x00mmio_register_write(rt2x00dev, CSR11, reg);
- rt2x00mmio_register_read(rt2x00dev, CSR14, ®);
+ reg = rt2x00mmio_register_read(rt2x00dev, CSR14);
rt2x00_set_field32(®, CSR14_TSF_COUNT, 0);
rt2x00_set_field32(®, CSR14_TSF_SYNC, 0);
rt2x00_set_field32(®, CSR14_TBCN, 0);
@@ -940,7 +943,7 @@
rt2x00mmio_register_write(rt2x00dev, CNT3, 0);
- rt2x00mmio_register_read(rt2x00dev, TXCSR8, ®);
+ reg = rt2x00mmio_register_read(rt2x00dev, TXCSR8);
rt2x00_set_field32(®, TXCSR8_BBP_ID0, 10);
rt2x00_set_field32(®, TXCSR8_BBP_ID0_VALID, 1);
rt2x00_set_field32(®, TXCSR8_BBP_ID1, 11);
@@ -951,28 +954,28 @@
rt2x00_set_field32(®, TXCSR8_BBP_ID3_VALID, 1);
rt2x00mmio_register_write(rt2x00dev, TXCSR8, reg);
- rt2x00mmio_register_read(rt2x00dev, ARTCSR0, ®);
+ reg = rt2x00mmio_register_read(rt2x00dev, ARTCSR0);
rt2x00_set_field32(®, ARTCSR0_ACK_CTS_1MBS, 112);
rt2x00_set_field32(®, ARTCSR0_ACK_CTS_2MBS, 56);
rt2x00_set_field32(®, ARTCSR0_ACK_CTS_5_5MBS, 20);
rt2x00_set_field32(®, ARTCSR0_ACK_CTS_11MBS, 10);
rt2x00mmio_register_write(rt2x00dev, ARTCSR0, reg);
- rt2x00mmio_register_read(rt2x00dev, ARTCSR1, ®);
+ reg = rt2x00mmio_register_read(rt2x00dev, ARTCSR1);
rt2x00_set_field32(®, ARTCSR1_ACK_CTS_6MBS, 45);
rt2x00_set_field32(®, ARTCSR1_ACK_CTS_9MBS, 37);
rt2x00_set_field32(®, ARTCSR1_ACK_CTS_12MBS, 33);
rt2x00_set_field32(®, ARTCSR1_ACK_CTS_18MBS, 29);
rt2x00mmio_register_write(rt2x00dev, ARTCSR1, reg);
- rt2x00mmio_register_read(rt2x00dev, ARTCSR2, ®);
+ reg = rt2x00mmio_register_read(rt2x00dev, ARTCSR2);
rt2x00_set_field32(®, ARTCSR2_ACK_CTS_24MBS, 29);
rt2x00_set_field32(®, ARTCSR2_ACK_CTS_36MBS, 25);
rt2x00_set_field32(®, ARTCSR2_ACK_CTS_48MBS, 25);
rt2x00_set_field32(®, ARTCSR2_ACK_CTS_54MBS, 25);
rt2x00mmio_register_write(rt2x00dev, ARTCSR2, reg);
- rt2x00mmio_register_read(rt2x00dev, RXCSR3, ®);
+ reg = rt2x00mmio_register_read(rt2x00dev, RXCSR3);
rt2x00_set_field32(®, RXCSR3_BBP_ID0, 47); /* CCK Signal */
rt2x00_set_field32(®, RXCSR3_BBP_ID0_VALID, 1);
rt2x00_set_field32(®, RXCSR3_BBP_ID1, 51); /* Rssi */
@@ -983,7 +986,7 @@
rt2x00_set_field32(®, RXCSR3_BBP_ID3_VALID, 1);
rt2x00mmio_register_write(rt2x00dev, RXCSR3, reg);
- rt2x00mmio_register_read(rt2x00dev, PCICSR, ®);
+ reg = rt2x00mmio_register_read(rt2x00dev, PCICSR);
rt2x00_set_field32(®, PCICSR_BIG_ENDIAN, 0);
rt2x00_set_field32(®, PCICSR_RX_TRESHOLD, 0);
rt2x00_set_field32(®, PCICSR_TX_TRESHOLD, 3);
@@ -1004,11 +1007,11 @@
rt2x00mmio_register_write(rt2x00dev, MACCSR0, 0x00213223);
rt2x00mmio_register_write(rt2x00dev, MACCSR1, 0x00235518);
- rt2x00mmio_register_read(rt2x00dev, MACCSR2, ®);
+ reg = rt2x00mmio_register_read(rt2x00dev, MACCSR2);
rt2x00_set_field32(®, MACCSR2_DELAY, 64);
rt2x00mmio_register_write(rt2x00dev, MACCSR2, reg);
- rt2x00mmio_register_read(rt2x00dev, RALINKCSR, ®);
+ reg = rt2x00mmio_register_read(rt2x00dev, RALINKCSR);
rt2x00_set_field32(®, RALINKCSR_AR_BBP_DATA0, 17);
rt2x00_set_field32(®, RALINKCSR_AR_BBP_ID0, 26);
rt2x00_set_field32(®, RALINKCSR_AR_BBP_VALID0, 1);
@@ -1021,13 +1024,13 @@
rt2x00mmio_register_write(rt2x00dev, TXACKCSR0, 0x00000020);
- rt2x00mmio_register_read(rt2x00dev, CSR1, ®);
+ reg = rt2x00mmio_register_read(rt2x00dev, CSR1);
rt2x00_set_field32(®, CSR1_SOFT_RESET, 1);
rt2x00_set_field32(®, CSR1_BBP_RESET, 0);
rt2x00_set_field32(®, CSR1_HOST_READY, 0);
rt2x00mmio_register_write(rt2x00dev, CSR1, reg);
- rt2x00mmio_register_read(rt2x00dev, CSR1, ®);
+ reg = rt2x00mmio_register_read(rt2x00dev, CSR1);
rt2x00_set_field32(®, CSR1_SOFT_RESET, 0);
rt2x00_set_field32(®, CSR1_HOST_READY, 1);
rt2x00mmio_register_write(rt2x00dev, CSR1, reg);
@@ -1037,8 +1040,8 @@
* These registers are cleared on read,
* so we may pass a useless variable to store the value.
*/
- rt2x00mmio_register_read(rt2x00dev, CNT0, ®);
- rt2x00mmio_register_read(rt2x00dev, CNT4, ®);
+ reg = rt2x00mmio_register_read(rt2x00dev, CNT0);
+ reg = rt2x00mmio_register_read(rt2x00dev, CNT4);
return 0;
}
@@ -1049,7 +1052,7 @@
u8 value;
for (i = 0; i < REGISTER_BUSY_COUNT; i++) {
- rt2500pci_bbp_read(rt2x00dev, 0, &value);
+ value = rt2500pci_bbp_read(rt2x00dev, 0);
if ((value != 0xff) && (value != 0x00))
return 0;
udelay(REGISTER_BUSY_DELAY);
@@ -1101,7 +1104,7 @@
rt2500pci_bbp_write(rt2x00dev, 62, 0x10);
for (i = 0; i < EEPROM_BBP_SIZE; i++) {
- rt2x00_eeprom_read(rt2x00dev, EEPROM_BBP_START + i, &eeprom);
+ eeprom = rt2x00_eeprom_read(rt2x00dev, EEPROM_BBP_START + i);
if (eeprom != 0xffff && eeprom != 0x0000) {
reg_id = rt2x00_get_field16(eeprom, EEPROM_BBP_REG_ID);
@@ -1128,7 +1131,7 @@
* should clear the register to assure a clean state.
*/
if (state == STATE_RADIO_IRQ_ON) {
- rt2x00mmio_register_read(rt2x00dev, CSR7, ®);
+ reg = rt2x00mmio_register_read(rt2x00dev, CSR7);
rt2x00mmio_register_write(rt2x00dev, CSR7, reg);
}
@@ -1138,7 +1141,7 @@
*/
spin_lock_irqsave(&rt2x00dev->irqmask_lock, flags);
- rt2x00mmio_register_read(rt2x00dev, CSR8, ®);
+ reg = rt2x00mmio_register_read(rt2x00dev, CSR8);
rt2x00_set_field32(®, CSR8_TBCN_EXPIRE, mask);
rt2x00_set_field32(®, CSR8_TXDONE_TXRING, mask);
rt2x00_set_field32(®, CSR8_TXDONE_ATIMRING, mask);
@@ -1190,7 +1193,7 @@
put_to_sleep = (state != STATE_AWAKE);
- rt2x00mmio_register_read(rt2x00dev, PWRCSR1, ®);
+ reg = rt2x00mmio_register_read(rt2x00dev, PWRCSR1);
rt2x00_set_field32(®, PWRCSR1_SET_STATE, 1);
rt2x00_set_field32(®, PWRCSR1_BBP_DESIRE_STATE, state);
rt2x00_set_field32(®, PWRCSR1_RF_DESIRE_STATE, state);
@@ -1203,7 +1206,7 @@
* device has entered the correct state.
*/
for (i = 0; i < REGISTER_BUSY_COUNT; i++) {
- rt2x00mmio_register_read(rt2x00dev, PWRCSR1, ®2);
+ reg2 = rt2x00mmio_register_read(rt2x00dev, PWRCSR1);
bbp_state = rt2x00_get_field32(reg2, PWRCSR1_BBP_CURR_STATE);
rf_state = rt2x00_get_field32(reg2, PWRCSR1_RF_CURR_STATE);
if (bbp_state == state && rf_state == state)
@@ -1263,18 +1266,18 @@
/*
* Start writing the descriptor words.
*/
- rt2x00_desc_read(txd, 1, &word);
+ word = rt2x00_desc_read(txd, 1);
rt2x00_set_field32(&word, TXD_W1_BUFFER_ADDRESS, skbdesc->skb_dma);
rt2x00_desc_write(txd, 1, word);
- rt2x00_desc_read(txd, 2, &word);
+ word = rt2x00_desc_read(txd, 2);
rt2x00_set_field32(&word, TXD_W2_IV_OFFSET, IEEE80211_HEADER);
rt2x00_set_field32(&word, TXD_W2_AIFS, entry->queue->aifs);
rt2x00_set_field32(&word, TXD_W2_CWMIN, entry->queue->cw_min);
rt2x00_set_field32(&word, TXD_W2_CWMAX, entry->queue->cw_max);
rt2x00_desc_write(txd, 2, word);
- rt2x00_desc_read(txd, 3, &word);
+ word = rt2x00_desc_read(txd, 3);
rt2x00_set_field32(&word, TXD_W3_PLCP_SIGNAL, txdesc->u.plcp.signal);
rt2x00_set_field32(&word, TXD_W3_PLCP_SERVICE, txdesc->u.plcp.service);
rt2x00_set_field32(&word, TXD_W3_PLCP_LENGTH_LOW,
@@ -1283,7 +1286,7 @@
txdesc->u.plcp.length_high);
rt2x00_desc_write(txd, 3, word);
- rt2x00_desc_read(txd, 10, &word);
+ word = rt2x00_desc_read(txd, 10);
rt2x00_set_field32(&word, TXD_W10_RTS,
test_bit(ENTRY_TXD_RTS_FRAME, &txdesc->flags));
rt2x00_desc_write(txd, 10, word);
@@ -1293,7 +1296,7 @@
* the device, whereby the device may take hold of the TXD before we
* finished updating it.
*/
- rt2x00_desc_read(txd, 0, &word);
+ word = rt2x00_desc_read(txd, 0);
rt2x00_set_field32(&word, TXD_W0_OWNER_NIC, 1);
rt2x00_set_field32(&word, TXD_W0_VALID, 1);
rt2x00_set_field32(&word, TXD_W0_MORE_FRAG,
@@ -1332,7 +1335,7 @@
* Disable beaconing while we are reloading the beacon data,
* otherwise we might be sending out invalid data.
*/
- rt2x00mmio_register_read(rt2x00dev, CSR14, ®);
+ reg = rt2x00mmio_register_read(rt2x00dev, CSR14);
rt2x00_set_field32(®, CSR14_BEACON_GEN, 0);
rt2x00mmio_register_write(rt2x00dev, CSR14, reg);
@@ -1368,8 +1371,8 @@
u32 word0;
u32 word2;
- rt2x00_desc_read(entry_priv->desc, 0, &word0);
- rt2x00_desc_read(entry_priv->desc, 2, &word2);
+ word0 = rt2x00_desc_read(entry_priv->desc, 0);
+ word2 = rt2x00_desc_read(entry_priv->desc, 2);
if (rt2x00_get_field32(word0, RXD_W0_CRC_ERROR))
rxdesc->flags |= RX_FLAG_FAILED_FCS_CRC;
@@ -1410,7 +1413,7 @@
while (!rt2x00queue_empty(queue)) {
entry = rt2x00queue_get_entry(queue, Q_INDEX_DONE);
entry_priv = entry->priv_data;
- rt2x00_desc_read(entry_priv->desc, 0, &word);
+ word = rt2x00_desc_read(entry_priv->desc, 0);
if (rt2x00_get_field32(word, TXD_W0_OWNER_NIC) ||
!rt2x00_get_field32(word, TXD_W0_VALID))
@@ -1448,7 +1451,7 @@
*/
spin_lock_irq(&rt2x00dev->irqmask_lock);
- rt2x00mmio_register_read(rt2x00dev, CSR8, ®);
+ reg = rt2x00mmio_register_read(rt2x00dev, CSR8);
rt2x00_set_field32(®, irq_field, 0);
rt2x00mmio_register_write(rt2x00dev, CSR8, reg);
@@ -1473,7 +1476,7 @@
if (test_bit(DEVICE_STATE_ENABLED_RADIO, &rt2x00dev->flags)) {
spin_lock_irq(&rt2x00dev->irqmask_lock);
- rt2x00mmio_register_read(rt2x00dev, CSR8, ®);
+ reg = rt2x00mmio_register_read(rt2x00dev, CSR8);
rt2x00_set_field32(®, CSR8_TXDONE_TXRING, 0);
rt2x00_set_field32(®, CSR8_TXDONE_ATIMRING, 0);
rt2x00_set_field32(®, CSR8_TXDONE_PRIORING, 0);
@@ -1509,7 +1512,7 @@
* Get the interrupt sources & saved to local variable.
* Write register value back to clear pending interrupts.
*/
- rt2x00mmio_register_read(rt2x00dev, CSR7, ®);
+ reg = rt2x00mmio_register_read(rt2x00dev, CSR7);
rt2x00mmio_register_write(rt2x00dev, CSR7, reg);
if (!reg)
@@ -1547,7 +1550,7 @@
*/
spin_lock(&rt2x00dev->irqmask_lock);
- rt2x00mmio_register_read(rt2x00dev, CSR8, ®);
+ reg = rt2x00mmio_register_read(rt2x00dev, CSR8);
reg |= mask;
rt2x00mmio_register_write(rt2x00dev, CSR8, reg);
@@ -1566,7 +1569,7 @@
u16 word;
u8 *mac;
- rt2x00mmio_register_read(rt2x00dev, CSR21, ®);
+ reg = rt2x00mmio_register_read(rt2x00dev, CSR21);
eeprom.data = rt2x00dev;
eeprom.register_read = rt2500pci_eepromregister_read;
@@ -1587,7 +1590,7 @@
mac = rt2x00_eeprom_addr(rt2x00dev, EEPROM_MAC_ADDR_0);
rt2x00lib_set_mac_address(rt2x00dev, mac);
- rt2x00_eeprom_read(rt2x00dev, EEPROM_ANTENNA, &word);
+ word = rt2x00_eeprom_read(rt2x00dev, EEPROM_ANTENNA);
if (word == 0xffff) {
rt2x00_set_field16(&word, EEPROM_ANTENNA_NUM, 2);
rt2x00_set_field16(&word, EEPROM_ANTENNA_TX_DEFAULT,
@@ -1603,7 +1606,7 @@
rt2x00_eeprom_dbg(rt2x00dev, "Antenna: 0x%04x\n", word);
}
- rt2x00_eeprom_read(rt2x00dev, EEPROM_NIC, &word);
+ word = rt2x00_eeprom_read(rt2x00dev, EEPROM_NIC);
if (word == 0xffff) {
rt2x00_set_field16(&word, EEPROM_NIC_CARDBUS_ACCEL, 0);
rt2x00_set_field16(&word, EEPROM_NIC_DYN_BBP_TUNE, 0);
@@ -1612,7 +1615,7 @@
rt2x00_eeprom_dbg(rt2x00dev, "NIC: 0x%04x\n", word);
}
- rt2x00_eeprom_read(rt2x00dev, EEPROM_CALIBRATE_OFFSET, &word);
+ word = rt2x00_eeprom_read(rt2x00dev, EEPROM_CALIBRATE_OFFSET);
if (word == 0xffff) {
rt2x00_set_field16(&word, EEPROM_CALIBRATE_OFFSET_RSSI,
DEFAULT_RSSI_OFFSET);
@@ -1633,13 +1636,13 @@
/*
* Read EEPROM word for configuration.
*/
- rt2x00_eeprom_read(rt2x00dev, EEPROM_ANTENNA, &eeprom);
+ eeprom = rt2x00_eeprom_read(rt2x00dev, EEPROM_ANTENNA);
/*
* Identify RF chipset.
*/
value = rt2x00_get_field16(eeprom, EEPROM_ANTENNA_RF_TYPE);
- rt2x00mmio_register_read(rt2x00dev, CSR0, ®);
+ reg = rt2x00mmio_register_read(rt2x00dev, CSR0);
rt2x00_set_chip(rt2x00dev, RT2560, value,
rt2x00_get_field32(reg, CSR0_REVISION));
@@ -1689,14 +1692,14 @@
/*
* Check if the BBP tuning should be enabled.
*/
- rt2x00_eeprom_read(rt2x00dev, EEPROM_NIC, &eeprom);
+ eeprom = rt2x00_eeprom_read(rt2x00dev, EEPROM_NIC);
if (!rt2x00_get_field16(eeprom, EEPROM_NIC_DYN_BBP_TUNE))
__set_bit(CAPABILITY_LINK_TUNING, &rt2x00dev->cap_flags);
/*
* Read the RSSI <-> dBm offset information.
*/
- rt2x00_eeprom_read(rt2x00dev, EEPROM_CALIBRATE_OFFSET, &eeprom);
+ eeprom = rt2x00_eeprom_read(rt2x00dev, EEPROM_CALIBRATE_OFFSET);
rt2x00dev->rssi_offset =
rt2x00_get_field16(eeprom, EEPROM_CALIBRATE_OFFSET_RSSI);
@@ -1955,7 +1958,7 @@
* Enable rfkill polling by setting GPIO direction of the
* rfkill switch GPIO pin correctly.
*/
- rt2x00mmio_register_read(rt2x00dev, GPIOCSR, ®);
+ reg = rt2x00mmio_register_read(rt2x00dev, GPIOCSR);
rt2x00_set_field32(®, GPIOCSR_DIR0, 1);
rt2x00mmio_register_write(rt2x00dev, GPIOCSR, reg);
@@ -1991,9 +1994,9 @@
u64 tsf;
u32 reg;
- rt2x00mmio_register_read(rt2x00dev, CSR17, ®);
+ reg = rt2x00mmio_register_read(rt2x00dev, CSR17);
tsf = (u64) rt2x00_get_field32(reg, CSR17_HIGH_TSFTIMER) << 32;
- rt2x00mmio_register_read(rt2x00dev, CSR16, ®);
+ reg = rt2x00mmio_register_read(rt2x00dev, CSR16);
tsf |= rt2x00_get_field32(reg, CSR16_LOW_TSFTIMER);
return tsf;
@@ -2004,7 +2007,7 @@
struct rt2x00_dev *rt2x00dev = hw->priv;
u32 reg;
- rt2x00mmio_register_read(rt2x00dev, CSR15, ®);
+ reg = rt2x00mmio_register_read(rt2x00dev, CSR15);
return rt2x00_get_field32(reg, CSR15_BEACON_SENT);
}
diff --git a/drivers/net/wireless/ralink/rt2x00/rt2500usb.c b/drivers/net/wireless/ralink/rt2x00/rt2500usb.c
index 0d2670a..529e059 100644
--- a/drivers/net/wireless/ralink/rt2x00/rt2500usb.c
+++ b/drivers/net/wireless/ralink/rt2x00/rt2500usb.c
@@ -55,26 +55,24 @@
* If the csr_mutex is already held then the _lock variants must
* be used instead.
*/
-static void rt2500usb_register_read(struct rt2x00_dev *rt2x00dev,
- const unsigned int offset,
- u16 *value)
+static u16 rt2500usb_register_read(struct rt2x00_dev *rt2x00dev,
+ const unsigned int offset)
{
__le16 reg;
rt2x00usb_vendor_request_buff(rt2x00dev, USB_MULTI_READ,
USB_VENDOR_REQUEST_IN, offset,
®, sizeof(reg));
- *value = le16_to_cpu(reg);
+ return le16_to_cpu(reg);
}
-static void rt2500usb_register_read_lock(struct rt2x00_dev *rt2x00dev,
- const unsigned int offset,
- u16 *value)
+static u16 rt2500usb_register_read_lock(struct rt2x00_dev *rt2x00dev,
+ const unsigned int offset)
{
__le16 reg;
rt2x00usb_vendor_req_buff_lock(rt2x00dev, USB_MULTI_READ,
USB_VENDOR_REQUEST_IN, offset,
®, sizeof(reg), REGISTER_TIMEOUT);
- *value = le16_to_cpu(reg);
+ return le16_to_cpu(reg);
}
static void rt2500usb_register_write(struct rt2x00_dev *rt2x00dev,
@@ -114,7 +112,7 @@
unsigned int i;
for (i = 0; i < REGISTER_USB_BUSY_COUNT; i++) {
- rt2500usb_register_read_lock(rt2x00dev, offset, reg);
+ *reg = rt2500usb_register_read_lock(rt2x00dev, offset);
if (!rt2x00_get_field16(*reg, field))
return 1;
udelay(REGISTER_BUSY_DELAY);
@@ -155,10 +153,11 @@
mutex_unlock(&rt2x00dev->csr_mutex);
}
-static void rt2500usb_bbp_read(struct rt2x00_dev *rt2x00dev,
- const unsigned int word, u8 *value)
+static u8 rt2500usb_bbp_read(struct rt2x00_dev *rt2x00dev,
+ const unsigned int word)
{
u16 reg;
+ u8 value;
mutex_lock(&rt2x00dev->csr_mutex);
@@ -178,12 +177,14 @@
rt2500usb_register_write_lock(rt2x00dev, PHY_CSR7, reg);
if (WAIT_FOR_BBP(rt2x00dev, ®))
- rt2500usb_register_read_lock(rt2x00dev, PHY_CSR7, ®);
+ reg = rt2500usb_register_read_lock(rt2x00dev, PHY_CSR7);
}
- *value = rt2x00_get_field16(reg, PHY_CSR7_DATA);
+ value = rt2x00_get_field16(reg, PHY_CSR7_DATA);
mutex_unlock(&rt2x00dev->csr_mutex);
+
+ return value;
}
static void rt2500usb_rf_write(struct rt2x00_dev *rt2x00dev,
@@ -216,14 +217,10 @@
}
#ifdef CONFIG_RT2X00_LIB_DEBUGFS
-static void _rt2500usb_register_read(struct rt2x00_dev *rt2x00dev,
- const unsigned int offset,
- u32 *value)
+static u32 _rt2500usb_register_read(struct rt2x00_dev *rt2x00dev,
+ const unsigned int offset)
{
- u16 tmp;
-
- rt2500usb_register_read(rt2x00dev, offset, &tmp);
- *value = tmp;
+ return rt2500usb_register_read(rt2x00dev, offset);
}
static void _rt2500usb_register_write(struct rt2x00_dev *rt2x00dev,
@@ -271,7 +268,7 @@
{
u16 reg;
- rt2500usb_register_read(rt2x00dev, MAC_CSR19, ®);
+ reg = rt2500usb_register_read(rt2x00dev, MAC_CSR19);
return rt2x00_get_field16(reg, MAC_CSR19_VAL7);
}
@@ -284,7 +281,7 @@
unsigned int enabled = brightness != LED_OFF;
u16 reg;
- rt2500usb_register_read(led->rt2x00dev, MAC_CSR20, ®);
+ reg = rt2500usb_register_read(led->rt2x00dev, MAC_CSR20);
if (led->type == LED_TYPE_RADIO || led->type == LED_TYPE_ASSOC)
rt2x00_set_field16(®, MAC_CSR20_LINK, enabled);
@@ -302,7 +299,7 @@
container_of(led_cdev, struct rt2x00_led, led_dev);
u16 reg;
- rt2500usb_register_read(led->rt2x00dev, MAC_CSR21, ®);
+ reg = rt2500usb_register_read(led->rt2x00dev, MAC_CSR21);
rt2x00_set_field16(®, MAC_CSR21_ON_PERIOD, *delay_on);
rt2x00_set_field16(®, MAC_CSR21_OFF_PERIOD, *delay_off);
rt2500usb_register_write(led->rt2x00dev, MAC_CSR21, reg);
@@ -356,7 +353,7 @@
*/
mask = TXRX_CSR0_KEY_ID.bit_mask;
- rt2500usb_register_read(rt2x00dev, TXRX_CSR0, ®);
+ reg = rt2500usb_register_read(rt2x00dev, TXRX_CSR0);
curr_cipher = rt2x00_get_field16(reg, TXRX_CSR0_ALGORITHM);
reg &= mask;
@@ -395,7 +392,7 @@
* TXRX_CSR0_KEY_ID contains only single-bit fields to indicate
* a particular key is valid.
*/
- rt2500usb_register_read(rt2x00dev, TXRX_CSR0, ®);
+ reg = rt2500usb_register_read(rt2x00dev, TXRX_CSR0);
rt2x00_set_field16(®, TXRX_CSR0_ALGORITHM, crypto->cipher);
rt2x00_set_field16(®, TXRX_CSR0_IV_OFFSET, IEEE80211_HEADER);
@@ -421,7 +418,7 @@
* and broadcast frames will always be accepted since
* there is no filter for it at this time.
*/
- rt2500usb_register_read(rt2x00dev, TXRX_CSR2, ®);
+ reg = rt2500usb_register_read(rt2x00dev, TXRX_CSR2);
rt2x00_set_field16(®, TXRX_CSR2_DROP_CRC,
!(filter_flags & FIF_FCSFAIL));
rt2x00_set_field16(®, TXRX_CSR2_DROP_PHYSICAL,
@@ -453,7 +450,7 @@
* Enable beacon config
*/
bcn_preload = PREAMBLE + GET_DURATION(IEEE80211_HEADER, 20);
- rt2500usb_register_read(rt2x00dev, TXRX_CSR20, ®);
+ reg = rt2500usb_register_read(rt2x00dev, TXRX_CSR20);
rt2x00_set_field16(®, TXRX_CSR20_OFFSET, bcn_preload >> 6);
rt2x00_set_field16(®, TXRX_CSR20_BCN_EXPECT_WINDOW,
2 * (conf->type != NL80211_IFTYPE_STATION));
@@ -462,11 +459,11 @@
/*
* Enable synchronisation.
*/
- rt2500usb_register_read(rt2x00dev, TXRX_CSR18, ®);
+ reg = rt2500usb_register_read(rt2x00dev, TXRX_CSR18);
rt2x00_set_field16(®, TXRX_CSR18_OFFSET, 0);
rt2500usb_register_write(rt2x00dev, TXRX_CSR18, reg);
- rt2500usb_register_read(rt2x00dev, TXRX_CSR19, ®);
+ reg = rt2500usb_register_read(rt2x00dev, TXRX_CSR19);
rt2x00_set_field16(®, TXRX_CSR19_TSF_SYNC, conf->sync);
rt2500usb_register_write(rt2x00dev, TXRX_CSR19, reg);
}
@@ -487,7 +484,7 @@
u16 reg;
if (changed & BSS_CHANGED_ERP_PREAMBLE) {
- rt2500usb_register_read(rt2x00dev, TXRX_CSR10, ®);
+ reg = rt2500usb_register_read(rt2x00dev, TXRX_CSR10);
rt2x00_set_field16(®, TXRX_CSR10_AUTORESPOND_PREAMBLE,
!!erp->short_preamble);
rt2500usb_register_write(rt2x00dev, TXRX_CSR10, reg);
@@ -498,7 +495,7 @@
erp->basic_rates);
if (changed & BSS_CHANGED_BEACON_INT) {
- rt2500usb_register_read(rt2x00dev, TXRX_CSR18, ®);
+ reg = rt2500usb_register_read(rt2x00dev, TXRX_CSR18);
rt2x00_set_field16(®, TXRX_CSR18_INTERVAL,
erp->beacon_int * 4);
rt2500usb_register_write(rt2x00dev, TXRX_CSR18, reg);
@@ -526,10 +523,10 @@
BUG_ON(ant->rx == ANTENNA_SW_DIVERSITY ||
ant->tx == ANTENNA_SW_DIVERSITY);
- rt2500usb_bbp_read(rt2x00dev, 2, &r2);
- rt2500usb_bbp_read(rt2x00dev, 14, &r14);
- rt2500usb_register_read(rt2x00dev, PHY_CSR5, &csr5);
- rt2500usb_register_read(rt2x00dev, PHY_CSR6, &csr6);
+ r2 = rt2500usb_bbp_read(rt2x00dev, 2);
+ r14 = rt2500usb_bbp_read(rt2x00dev, 14);
+ csr5 = rt2500usb_register_read(rt2x00dev, PHY_CSR5);
+ csr6 = rt2500usb_register_read(rt2x00dev, PHY_CSR6);
/*
* Configure the TX antenna.
@@ -629,7 +626,7 @@
{
u32 rf3;
- rt2x00_rf_read(rt2x00dev, 3, &rf3);
+ rf3 = rt2x00_rf_read(rt2x00dev, 3);
rt2x00_set_field32(&rf3, RF3_TXPOWER, TXPOWER_TO_DEV(txpower));
rt2500usb_rf_write(rt2x00dev, 3, rf3);
}
@@ -643,7 +640,7 @@
u16 reg;
if (state == STATE_SLEEP) {
- rt2500usb_register_read(rt2x00dev, MAC_CSR18, ®);
+ reg = rt2500usb_register_read(rt2x00dev, MAC_CSR18);
rt2x00_set_field16(®, MAC_CSR18_DELAY_AFTER_BEACON,
rt2x00dev->beacon_int - 20);
rt2x00_set_field16(®, MAC_CSR18_BEACONS_BEFORE_WAKEUP,
@@ -656,7 +653,7 @@
rt2x00_set_field16(®, MAC_CSR18_AUTO_WAKE, 1);
rt2500usb_register_write(rt2x00dev, MAC_CSR18, reg);
} else {
- rt2500usb_register_read(rt2x00dev, MAC_CSR18, ®);
+ reg = rt2500usb_register_read(rt2x00dev, MAC_CSR18);
rt2x00_set_field16(®, MAC_CSR18_AUTO_WAKE, 0);
rt2500usb_register_write(rt2x00dev, MAC_CSR18, reg);
}
@@ -690,13 +687,13 @@
/*
* Update FCS error count from register.
*/
- rt2500usb_register_read(rt2x00dev, STA_CSR0, ®);
+ reg = rt2500usb_register_read(rt2x00dev, STA_CSR0);
qual->rx_failed = rt2x00_get_field16(reg, STA_CSR0_FCS_ERROR);
/*
* Update False CCA count from register.
*/
- rt2500usb_register_read(rt2x00dev, STA_CSR3, ®);
+ reg = rt2500usb_register_read(rt2x00dev, STA_CSR3);
qual->false_cca = rt2x00_get_field16(reg, STA_CSR3_FALSE_CCA_ERROR);
}
@@ -706,19 +703,19 @@
u16 eeprom;
u16 value;
- rt2x00_eeprom_read(rt2x00dev, EEPROM_BBPTUNE_R24, &eeprom);
+ eeprom = rt2x00_eeprom_read(rt2x00dev, EEPROM_BBPTUNE_R24);
value = rt2x00_get_field16(eeprom, EEPROM_BBPTUNE_R24_LOW);
rt2500usb_bbp_write(rt2x00dev, 24, value);
- rt2x00_eeprom_read(rt2x00dev, EEPROM_BBPTUNE_R25, &eeprom);
+ eeprom = rt2x00_eeprom_read(rt2x00dev, EEPROM_BBPTUNE_R25);
value = rt2x00_get_field16(eeprom, EEPROM_BBPTUNE_R25_LOW);
rt2500usb_bbp_write(rt2x00dev, 25, value);
- rt2x00_eeprom_read(rt2x00dev, EEPROM_BBPTUNE_R61, &eeprom);
+ eeprom = rt2x00_eeprom_read(rt2x00dev, EEPROM_BBPTUNE_R61);
value = rt2x00_get_field16(eeprom, EEPROM_BBPTUNE_R61_LOW);
rt2500usb_bbp_write(rt2x00dev, 61, value);
- rt2x00_eeprom_read(rt2x00dev, EEPROM_BBPTUNE_VGC, &eeprom);
+ eeprom = rt2x00_eeprom_read(rt2x00dev, EEPROM_BBPTUNE_VGC);
value = rt2x00_get_field16(eeprom, EEPROM_BBPTUNE_VGCUPPER);
rt2500usb_bbp_write(rt2x00dev, 17, value);
@@ -735,12 +732,12 @@
switch (queue->qid) {
case QID_RX:
- rt2500usb_register_read(rt2x00dev, TXRX_CSR2, ®);
+ reg = rt2500usb_register_read(rt2x00dev, TXRX_CSR2);
rt2x00_set_field16(®, TXRX_CSR2_DISABLE_RX, 0);
rt2500usb_register_write(rt2x00dev, TXRX_CSR2, reg);
break;
case QID_BEACON:
- rt2500usb_register_read(rt2x00dev, TXRX_CSR19, ®);
+ reg = rt2500usb_register_read(rt2x00dev, TXRX_CSR19);
rt2x00_set_field16(®, TXRX_CSR19_TSF_COUNT, 1);
rt2x00_set_field16(®, TXRX_CSR19_TBCN, 1);
rt2x00_set_field16(®, TXRX_CSR19_BEACON_GEN, 1);
@@ -758,12 +755,12 @@
switch (queue->qid) {
case QID_RX:
- rt2500usb_register_read(rt2x00dev, TXRX_CSR2, ®);
+ reg = rt2500usb_register_read(rt2x00dev, TXRX_CSR2);
rt2x00_set_field16(®, TXRX_CSR2_DISABLE_RX, 1);
rt2500usb_register_write(rt2x00dev, TXRX_CSR2, reg);
break;
case QID_BEACON:
- rt2500usb_register_read(rt2x00dev, TXRX_CSR19, ®);
+ reg = rt2500usb_register_read(rt2x00dev, TXRX_CSR19);
rt2x00_set_field16(®, TXRX_CSR19_TSF_COUNT, 0);
rt2x00_set_field16(®, TXRX_CSR19_TBCN, 0);
rt2x00_set_field16(®, TXRX_CSR19_BEACON_GEN, 0);
@@ -786,54 +783,54 @@
rt2x00usb_vendor_request_sw(rt2x00dev, USB_SINGLE_WRITE, 0x0308,
0x00f0, REGISTER_TIMEOUT);
- rt2500usb_register_read(rt2x00dev, TXRX_CSR2, ®);
+ reg = rt2500usb_register_read(rt2x00dev, TXRX_CSR2);
rt2x00_set_field16(®, TXRX_CSR2_DISABLE_RX, 1);
rt2500usb_register_write(rt2x00dev, TXRX_CSR2, reg);
rt2500usb_register_write(rt2x00dev, MAC_CSR13, 0x1111);
rt2500usb_register_write(rt2x00dev, MAC_CSR14, 0x1e11);
- rt2500usb_register_read(rt2x00dev, MAC_CSR1, ®);
+ reg = rt2500usb_register_read(rt2x00dev, MAC_CSR1);
rt2x00_set_field16(®, MAC_CSR1_SOFT_RESET, 1);
rt2x00_set_field16(®, MAC_CSR1_BBP_RESET, 1);
rt2x00_set_field16(®, MAC_CSR1_HOST_READY, 0);
rt2500usb_register_write(rt2x00dev, MAC_CSR1, reg);
- rt2500usb_register_read(rt2x00dev, MAC_CSR1, ®);
+ reg = rt2500usb_register_read(rt2x00dev, MAC_CSR1);
rt2x00_set_field16(®, MAC_CSR1_SOFT_RESET, 0);
rt2x00_set_field16(®, MAC_CSR1_BBP_RESET, 0);
rt2x00_set_field16(®, MAC_CSR1_HOST_READY, 0);
rt2500usb_register_write(rt2x00dev, MAC_CSR1, reg);
- rt2500usb_register_read(rt2x00dev, TXRX_CSR5, ®);
+ reg = rt2500usb_register_read(rt2x00dev, TXRX_CSR5);
rt2x00_set_field16(®, TXRX_CSR5_BBP_ID0, 13);
rt2x00_set_field16(®, TXRX_CSR5_BBP_ID0_VALID, 1);
rt2x00_set_field16(®, TXRX_CSR5_BBP_ID1, 12);
rt2x00_set_field16(®, TXRX_CSR5_BBP_ID1_VALID, 1);
rt2500usb_register_write(rt2x00dev, TXRX_CSR5, reg);
- rt2500usb_register_read(rt2x00dev, TXRX_CSR6, ®);
+ reg = rt2500usb_register_read(rt2x00dev, TXRX_CSR6);
rt2x00_set_field16(®, TXRX_CSR6_BBP_ID0, 10);
rt2x00_set_field16(®, TXRX_CSR6_BBP_ID0_VALID, 1);
rt2x00_set_field16(®, TXRX_CSR6_BBP_ID1, 11);
rt2x00_set_field16(®, TXRX_CSR6_BBP_ID1_VALID, 1);
rt2500usb_register_write(rt2x00dev, TXRX_CSR6, reg);
- rt2500usb_register_read(rt2x00dev, TXRX_CSR7, ®);
+ reg = rt2500usb_register_read(rt2x00dev, TXRX_CSR7);
rt2x00_set_field16(®, TXRX_CSR7_BBP_ID0, 7);
rt2x00_set_field16(®, TXRX_CSR7_BBP_ID0_VALID, 1);
rt2x00_set_field16(®, TXRX_CSR7_BBP_ID1, 6);
rt2x00_set_field16(®, TXRX_CSR7_BBP_ID1_VALID, 1);
rt2500usb_register_write(rt2x00dev, TXRX_CSR7, reg);
- rt2500usb_register_read(rt2x00dev, TXRX_CSR8, ®);
+ reg = rt2500usb_register_read(rt2x00dev, TXRX_CSR8);
rt2x00_set_field16(®, TXRX_CSR8_BBP_ID0, 5);
rt2x00_set_field16(®, TXRX_CSR8_BBP_ID0_VALID, 1);
rt2x00_set_field16(®, TXRX_CSR8_BBP_ID1, 0);
rt2x00_set_field16(®, TXRX_CSR8_BBP_ID1_VALID, 0);
rt2500usb_register_write(rt2x00dev, TXRX_CSR8, reg);
- rt2500usb_register_read(rt2x00dev, TXRX_CSR19, ®);
+ reg = rt2500usb_register_read(rt2x00dev, TXRX_CSR19);
rt2x00_set_field16(®, TXRX_CSR19_TSF_COUNT, 0);
rt2x00_set_field16(®, TXRX_CSR19_TSF_SYNC, 0);
rt2x00_set_field16(®, TXRX_CSR19_TBCN, 0);
@@ -846,14 +843,14 @@
if (rt2x00dev->ops->lib->set_device_state(rt2x00dev, STATE_AWAKE))
return -EBUSY;
- rt2500usb_register_read(rt2x00dev, MAC_CSR1, ®);
+ reg = rt2500usb_register_read(rt2x00dev, MAC_CSR1);
rt2x00_set_field16(®, MAC_CSR1_SOFT_RESET, 0);
rt2x00_set_field16(®, MAC_CSR1_BBP_RESET, 0);
rt2x00_set_field16(®, MAC_CSR1_HOST_READY, 1);
rt2500usb_register_write(rt2x00dev, MAC_CSR1, reg);
if (rt2x00_rev(rt2x00dev) >= RT2570_VERSION_C) {
- rt2500usb_register_read(rt2x00dev, PHY_CSR2, ®);
+ reg = rt2500usb_register_read(rt2x00dev, PHY_CSR2);
rt2x00_set_field16(®, PHY_CSR2_LNA, 0);
} else {
reg = 0;
@@ -867,26 +864,26 @@
rt2500usb_register_write(rt2x00dev, MAC_CSR15, 0x01ee);
rt2500usb_register_write(rt2x00dev, MAC_CSR16, 0x0000);
- rt2500usb_register_read(rt2x00dev, MAC_CSR8, ®);
+ reg = rt2500usb_register_read(rt2x00dev, MAC_CSR8);
rt2x00_set_field16(®, MAC_CSR8_MAX_FRAME_UNIT,
rt2x00dev->rx->data_size);
rt2500usb_register_write(rt2x00dev, MAC_CSR8, reg);
- rt2500usb_register_read(rt2x00dev, TXRX_CSR0, ®);
+ reg = rt2500usb_register_read(rt2x00dev, TXRX_CSR0);
rt2x00_set_field16(®, TXRX_CSR0_ALGORITHM, CIPHER_NONE);
rt2x00_set_field16(®, TXRX_CSR0_IV_OFFSET, IEEE80211_HEADER);
rt2x00_set_field16(®, TXRX_CSR0_KEY_ID, 0);
rt2500usb_register_write(rt2x00dev, TXRX_CSR0, reg);
- rt2500usb_register_read(rt2x00dev, MAC_CSR18, ®);
+ reg = rt2500usb_register_read(rt2x00dev, MAC_CSR18);
rt2x00_set_field16(®, MAC_CSR18_DELAY_AFTER_BEACON, 90);
rt2500usb_register_write(rt2x00dev, MAC_CSR18, reg);
- rt2500usb_register_read(rt2x00dev, PHY_CSR4, ®);
+ reg = rt2500usb_register_read(rt2x00dev, PHY_CSR4);
rt2x00_set_field16(®, PHY_CSR4_LOW_RF_LE, 1);
rt2500usb_register_write(rt2x00dev, PHY_CSR4, reg);
- rt2500usb_register_read(rt2x00dev, TXRX_CSR1, ®);
+ reg = rt2500usb_register_read(rt2x00dev, TXRX_CSR1);
rt2x00_set_field16(®, TXRX_CSR1_AUTO_SEQUENCE, 1);
rt2500usb_register_write(rt2x00dev, TXRX_CSR1, reg);
@@ -899,7 +896,7 @@
u8 value;
for (i = 0; i < REGISTER_USB_BUSY_COUNT; i++) {
- rt2500usb_bbp_read(rt2x00dev, 0, &value);
+ value = rt2500usb_bbp_read(rt2x00dev, 0);
if ((value != 0xff) && (value != 0x00))
return 0;
udelay(REGISTER_BUSY_DELAY);
@@ -952,7 +949,7 @@
rt2500usb_bbp_write(rt2x00dev, 75, 0xff);
for (i = 0; i < EEPROM_BBP_SIZE; i++) {
- rt2x00_eeprom_read(rt2x00dev, EEPROM_BBP_START + i, &eeprom);
+ eeprom = rt2x00_eeprom_read(rt2x00dev, EEPROM_BBP_START + i);
if (eeprom != 0xffff && eeprom != 0x0000) {
reg_id = rt2x00_get_field16(eeprom, EEPROM_BBP_REG_ID);
@@ -1018,7 +1015,7 @@
* device has entered the correct state.
*/
for (i = 0; i < REGISTER_USB_BUSY_COUNT; i++) {
- rt2500usb_register_read(rt2x00dev, MAC_CSR17, ®2);
+ reg2 = rt2500usb_register_read(rt2x00dev, MAC_CSR17);
bbp_state = rt2x00_get_field16(reg2, MAC_CSR17_BBP_CURR_STATE);
rf_state = rt2x00_get_field16(reg2, MAC_CSR17_RF_CURR_STATE);
if (bbp_state == state && rf_state == state)
@@ -1077,7 +1074,7 @@
/*
* Start writing the descriptor words.
*/
- rt2x00_desc_read(txd, 0, &word);
+ word = rt2x00_desc_read(txd, 0);
rt2x00_set_field32(&word, TXD_W0_RETRY_LIMIT, txdesc->retry_limit);
rt2x00_set_field32(&word, TXD_W0_MORE_FRAG,
test_bit(ENTRY_TXD_MORE_FRAG, &txdesc->flags));
@@ -1095,14 +1092,14 @@
rt2x00_set_field32(&word, TXD_W0_KEY_ID, txdesc->key_idx);
rt2x00_desc_write(txd, 0, word);
- rt2x00_desc_read(txd, 1, &word);
+ word = rt2x00_desc_read(txd, 1);
rt2x00_set_field32(&word, TXD_W1_IV_OFFSET, txdesc->iv_offset);
rt2x00_set_field32(&word, TXD_W1_AIFS, entry->queue->aifs);
rt2x00_set_field32(&word, TXD_W1_CWMIN, entry->queue->cw_min);
rt2x00_set_field32(&word, TXD_W1_CWMAX, entry->queue->cw_max);
rt2x00_desc_write(txd, 1, word);
- rt2x00_desc_read(txd, 2, &word);
+ word = rt2x00_desc_read(txd, 2);
rt2x00_set_field32(&word, TXD_W2_PLCP_SIGNAL, txdesc->u.plcp.signal);
rt2x00_set_field32(&word, TXD_W2_PLCP_SERVICE, txdesc->u.plcp.service);
rt2x00_set_field32(&word, TXD_W2_PLCP_LENGTH_LOW,
@@ -1143,7 +1140,7 @@
* Disable beaconing while we are reloading the beacon data,
* otherwise we might be sending out invalid data.
*/
- rt2500usb_register_read(rt2x00dev, TXRX_CSR19, ®);
+ reg = rt2500usb_register_read(rt2x00dev, TXRX_CSR19);
rt2x00_set_field16(®, TXRX_CSR19_BEACON_GEN, 0);
rt2500usb_register_write(rt2x00dev, TXRX_CSR19, reg);
@@ -1250,8 +1247,8 @@
/*
* It is now safe to read the descriptor on all architectures.
*/
- rt2x00_desc_read(rxd, 0, &word0);
- rt2x00_desc_read(rxd, 1, &word1);
+ word0 = rt2x00_desc_read(rxd, 0);
+ word1 = rt2x00_desc_read(rxd, 1);
if (rt2x00_get_field32(word0, RXD_W0_CRC_ERROR))
rxdesc->flags |= RX_FLAG_FAILED_FCS_CRC;
@@ -1263,8 +1260,8 @@
rxdesc->cipher_status = RX_CRYPTO_FAIL_KEY;
if (rxdesc->cipher != CIPHER_NONE) {
- _rt2x00_desc_read(rxd, 2, &rxdesc->iv[0]);
- _rt2x00_desc_read(rxd, 3, &rxdesc->iv[1]);
+ rxdesc->iv[0] = _rt2x00_desc_read(rxd, 2);
+ rxdesc->iv[1] = _rt2x00_desc_read(rxd, 3);
rxdesc->dev_flags |= RXDONE_CRYPTO_IV;
/* ICV is located at the end of frame */
@@ -1342,7 +1339,7 @@
mac = rt2x00_eeprom_addr(rt2x00dev, EEPROM_MAC_ADDR_0);
rt2x00lib_set_mac_address(rt2x00dev, mac);
- rt2x00_eeprom_read(rt2x00dev, EEPROM_ANTENNA, &word);
+ word = rt2x00_eeprom_read(rt2x00dev, EEPROM_ANTENNA);
if (word == 0xffff) {
rt2x00_set_field16(&word, EEPROM_ANTENNA_NUM, 2);
rt2x00_set_field16(&word, EEPROM_ANTENNA_TX_DEFAULT,
@@ -1358,7 +1355,7 @@
rt2x00_eeprom_dbg(rt2x00dev, "Antenna: 0x%04x\n", word);
}
- rt2x00_eeprom_read(rt2x00dev, EEPROM_NIC, &word);
+ word = rt2x00_eeprom_read(rt2x00dev, EEPROM_NIC);
if (word == 0xffff) {
rt2x00_set_field16(&word, EEPROM_NIC_CARDBUS_ACCEL, 0);
rt2x00_set_field16(&word, EEPROM_NIC_DYN_BBP_TUNE, 0);
@@ -1367,7 +1364,7 @@
rt2x00_eeprom_dbg(rt2x00dev, "NIC: 0x%04x\n", word);
}
- rt2x00_eeprom_read(rt2x00dev, EEPROM_CALIBRATE_OFFSET, &word);
+ word = rt2x00_eeprom_read(rt2x00dev, EEPROM_CALIBRATE_OFFSET);
if (word == 0xffff) {
rt2x00_set_field16(&word, EEPROM_CALIBRATE_OFFSET_RSSI,
DEFAULT_RSSI_OFFSET);
@@ -1376,7 +1373,7 @@
word);
}
- rt2x00_eeprom_read(rt2x00dev, EEPROM_BBPTUNE, &word);
+ word = rt2x00_eeprom_read(rt2x00dev, EEPROM_BBPTUNE);
if (word == 0xffff) {
rt2x00_set_field16(&word, EEPROM_BBPTUNE_THRESHOLD, 45);
rt2x00_eeprom_write(rt2x00dev, EEPROM_BBPTUNE, word);
@@ -1387,10 +1384,10 @@
* Switch lower vgc bound to current BBP R17 value,
* lower the value a bit for better quality.
*/
- rt2500usb_bbp_read(rt2x00dev, 17, &bbp);
+ bbp = rt2500usb_bbp_read(rt2x00dev, 17);
bbp -= 6;
- rt2x00_eeprom_read(rt2x00dev, EEPROM_BBPTUNE_VGC, &word);
+ word = rt2x00_eeprom_read(rt2x00dev, EEPROM_BBPTUNE_VGC);
if (word == 0xffff) {
rt2x00_set_field16(&word, EEPROM_BBPTUNE_VGCUPPER, 0x40);
rt2x00_set_field16(&word, EEPROM_BBPTUNE_VGCLOWER, bbp);
@@ -1401,7 +1398,7 @@
rt2x00_eeprom_write(rt2x00dev, EEPROM_BBPTUNE_VGC, word);
}
- rt2x00_eeprom_read(rt2x00dev, EEPROM_BBPTUNE_R17, &word);
+ word = rt2x00_eeprom_read(rt2x00dev, EEPROM_BBPTUNE_R17);
if (word == 0xffff) {
rt2x00_set_field16(&word, EEPROM_BBPTUNE_R17_LOW, 0x48);
rt2x00_set_field16(&word, EEPROM_BBPTUNE_R17_HIGH, 0x41);
@@ -1409,7 +1406,7 @@
rt2x00_eeprom_dbg(rt2x00dev, "BBPtune r17: 0x%04x\n", word);
}
- rt2x00_eeprom_read(rt2x00dev, EEPROM_BBPTUNE_R24, &word);
+ word = rt2x00_eeprom_read(rt2x00dev, EEPROM_BBPTUNE_R24);
if (word == 0xffff) {
rt2x00_set_field16(&word, EEPROM_BBPTUNE_R24_LOW, 0x40);
rt2x00_set_field16(&word, EEPROM_BBPTUNE_R24_HIGH, 0x80);
@@ -1417,7 +1414,7 @@
rt2x00_eeprom_dbg(rt2x00dev, "BBPtune r24: 0x%04x\n", word);
}
- rt2x00_eeprom_read(rt2x00dev, EEPROM_BBPTUNE_R25, &word);
+ word = rt2x00_eeprom_read(rt2x00dev, EEPROM_BBPTUNE_R25);
if (word == 0xffff) {
rt2x00_set_field16(&word, EEPROM_BBPTUNE_R25_LOW, 0x40);
rt2x00_set_field16(&word, EEPROM_BBPTUNE_R25_HIGH, 0x50);
@@ -1425,7 +1422,7 @@
rt2x00_eeprom_dbg(rt2x00dev, "BBPtune r25: 0x%04x\n", word);
}
- rt2x00_eeprom_read(rt2x00dev, EEPROM_BBPTUNE_R61, &word);
+ word = rt2x00_eeprom_read(rt2x00dev, EEPROM_BBPTUNE_R61);
if (word == 0xffff) {
rt2x00_set_field16(&word, EEPROM_BBPTUNE_R61_LOW, 0x60);
rt2x00_set_field16(&word, EEPROM_BBPTUNE_R61_HIGH, 0x6d);
@@ -1445,13 +1442,13 @@
/*
* Read EEPROM word for configuration.
*/
- rt2x00_eeprom_read(rt2x00dev, EEPROM_ANTENNA, &eeprom);
+ eeprom = rt2x00_eeprom_read(rt2x00dev, EEPROM_ANTENNA);
/*
* Identify RF chipset.
*/
value = rt2x00_get_field16(eeprom, EEPROM_ANTENNA_RF_TYPE);
- rt2500usb_register_read(rt2x00dev, MAC_CSR0, ®);
+ reg = rt2500usb_register_read(rt2x00dev, MAC_CSR0);
rt2x00_set_chip(rt2x00dev, RT2570, value, reg);
if (((reg & 0xfff0) != 0) || ((reg & 0x0000000f) == 0)) {
@@ -1511,7 +1508,7 @@
/*
* Read the RSSI <-> dBm offset information.
*/
- rt2x00_eeprom_read(rt2x00dev, EEPROM_CALIBRATE_OFFSET, &eeprom);
+ eeprom = rt2x00_eeprom_read(rt2x00dev, EEPROM_CALIBRATE_OFFSET);
rt2x00dev->rssi_offset =
rt2x00_get_field16(eeprom, EEPROM_CALIBRATE_OFFSET_RSSI);
@@ -1776,7 +1773,7 @@
* Enable rfkill polling by setting GPIO direction of the
* rfkill switch GPIO pin correctly.
*/
- rt2500usb_register_read(rt2x00dev, MAC_CSR19, ®);
+ reg = rt2500usb_register_read(rt2x00dev, MAC_CSR19);
rt2x00_set_field16(®, MAC_CSR19_DIR0, 0);
rt2500usb_register_write(rt2x00dev, MAC_CSR19, reg);
diff --git a/drivers/net/wireless/ralink/rt2x00/rt2800lib.c b/drivers/net/wireless/ralink/rt2x00/rt2800lib.c
index d11c7b2..6e2e760 100644
--- a/drivers/net/wireless/ralink/rt2x00/rt2800lib.c
+++ b/drivers/net/wireless/ralink/rt2x00/rt2800lib.c
@@ -110,10 +110,10 @@
mutex_unlock(&rt2x00dev->csr_mutex);
}
-static void rt2800_bbp_read(struct rt2x00_dev *rt2x00dev,
- const unsigned int word, u8 *value)
+static u8 rt2800_bbp_read(struct rt2x00_dev *rt2x00dev, const unsigned int word)
{
u32 reg;
+ u8 value;
mutex_lock(&rt2x00dev->csr_mutex);
@@ -137,9 +137,11 @@
WAIT_FOR_BBP(rt2x00dev, ®);
}
- *value = rt2x00_get_field32(reg, BBP_CSR_CFG_VALUE);
+ value = rt2x00_get_field32(reg, BBP_CSR_CFG_VALUE);
mutex_unlock(&rt2x00dev->csr_mutex);
+
+ return value;
}
static void rt2800_rfcsr_write(struct rt2x00_dev *rt2x00dev,
@@ -203,10 +205,11 @@
rt2800_rfcsr_write_bank(rt2x00dev, 7, reg, value);
}
-static void rt2800_rfcsr_read(struct rt2x00_dev *rt2x00dev,
- const unsigned int word, u8 *value)
+static u8 rt2800_rfcsr_read(struct rt2x00_dev *rt2x00dev,
+ const unsigned int word)
{
u32 reg;
+ u8 value;
mutex_lock(&rt2x00dev->csr_mutex);
@@ -232,7 +235,7 @@
WAIT_FOR_RFCSR_MT7620(rt2x00dev, ®);
}
- *value = rt2x00_get_field32(reg, RF_CSR_CFG_DATA_MT7620);
+ value = rt2x00_get_field32(reg, RF_CSR_CFG_DATA_MT7620);
break;
default:
@@ -247,17 +250,19 @@
WAIT_FOR_RFCSR(rt2x00dev, ®);
}
- *value = rt2x00_get_field32(reg, RF_CSR_CFG_DATA);
+ value = rt2x00_get_field32(reg, RF_CSR_CFG_DATA);
break;
}
mutex_unlock(&rt2x00dev->csr_mutex);
+
+ return value;
}
-static void rt2800_rfcsr_read_bank(struct rt2x00_dev *rt2x00dev, const u8 bank,
- const unsigned int reg, u8 *value)
+static u8 rt2800_rfcsr_read_bank(struct rt2x00_dev *rt2x00dev, const u8 bank,
+ const unsigned int reg)
{
- rt2800_rfcsr_read(rt2x00dev, (reg | (bank << 6)), value);
+ return rt2800_rfcsr_read(rt2x00dev, (reg | (bank << 6)));
}
static void rt2800_rf_write(struct rt2x00_dev *rt2x00dev,
@@ -405,13 +410,13 @@
return rt2x00_eeprom_addr(rt2x00dev, index);
}
-static void rt2800_eeprom_read(struct rt2x00_dev *rt2x00dev,
- const enum rt2800_eeprom_word word, u16 *data)
+static u16 rt2800_eeprom_read(struct rt2x00_dev *rt2x00dev,
+ const enum rt2800_eeprom_word word)
{
unsigned int index;
index = rt2800_eeprom_word_index(rt2x00dev, word);
- rt2x00_eeprom_read(rt2x00dev, index, data);
+ return rt2x00_eeprom_read(rt2x00dev, index);
}
static void rt2800_eeprom_write(struct rt2x00_dev *rt2x00dev,
@@ -423,15 +428,14 @@
rt2x00_eeprom_write(rt2x00dev, index, data);
}
-static void rt2800_eeprom_read_from_array(struct rt2x00_dev *rt2x00dev,
- const enum rt2800_eeprom_word array,
- unsigned int offset,
- u16 *data)
+static u16 rt2800_eeprom_read_from_array(struct rt2x00_dev *rt2x00dev,
+ const enum rt2800_eeprom_word array,
+ unsigned int offset)
{
unsigned int index;
index = rt2800_eeprom_word_index(rt2x00dev, array);
- rt2x00_eeprom_read(rt2x00dev, index + offset, data);
+ return rt2x00_eeprom_read(rt2x00dev, index + offset);
}
static int rt2800_enable_wlan_rt3290(struct rt2x00_dev *rt2x00dev)
@@ -439,7 +443,7 @@
u32 reg;
int i, count;
- rt2800_register_read(rt2x00dev, WLAN_FUN_CTRL, ®);
+ reg = rt2800_register_read(rt2x00dev, WLAN_FUN_CTRL);
rt2x00_set_field32(®, WLAN_GPIO_OUT_OE_BIT_ALL, 0xff);
rt2x00_set_field32(®, FRC_WL_ANT_SET, 1);
rt2x00_set_field32(®, WLAN_CLK_EN, 0);
@@ -454,7 +458,7 @@
* Check PLL_LD & XTAL_RDY.
*/
for (i = 0; i < REGISTER_BUSY_COUNT; i++) {
- rt2800_register_read(rt2x00dev, CMB_CTRL, ®);
+ reg = rt2800_register_read(rt2x00dev, CMB_CTRL);
if (rt2x00_get_field32(reg, PLL_LD) &&
rt2x00_get_field32(reg, XTAL_RDY))
break;
@@ -477,7 +481,7 @@
count = 0;
}
- rt2800_register_read(rt2x00dev, WLAN_FUN_CTRL, ®);
+ reg = rt2800_register_read(rt2x00dev, WLAN_FUN_CTRL);
rt2x00_set_field32(®, PCIE_APP0_CLK_REQ, 0);
rt2x00_set_field32(®, WLAN_CLK_EN, 1);
rt2x00_set_field32(®, WLAN_RESET, 1);
@@ -532,7 +536,7 @@
u32 reg;
for (i = 0; i < REGISTER_BUSY_COUNT; i++) {
- rt2800_register_read(rt2x00dev, MAC_CSR0, ®);
+ reg = rt2800_register_read(rt2x00dev, MAC_CSR0);
if (reg && reg != ~0)
return 0;
msleep(1);
@@ -553,7 +557,7 @@
* before timing out.
*/
for (i = 0; i < REGISTER_BUSY_COUNT; i++) {
- rt2800_register_read(rt2x00dev, WPDMA_GLO_CFG, ®);
+ reg = rt2800_register_read(rt2x00dev, WPDMA_GLO_CFG);
if (!rt2x00_get_field32(reg, WPDMA_GLO_CFG_TX_DMA_BUSY) &&
!rt2x00_get_field32(reg, WPDMA_GLO_CFG_RX_DMA_BUSY))
return 0;
@@ -570,7 +574,7 @@
{
u32 reg;
- rt2800_register_read(rt2x00dev, WPDMA_GLO_CFG, ®);
+ reg = rt2800_register_read(rt2x00dev, WPDMA_GLO_CFG);
rt2x00_set_field32(®, WPDMA_GLO_CFG_ENABLE_TX_DMA, 0);
rt2x00_set_field32(®, WPDMA_GLO_CFG_TX_DMA_BUSY, 0);
rt2x00_set_field32(®, WPDMA_GLO_CFG_ENABLE_RX_DMA, 0);
@@ -720,7 +724,7 @@
rt2x00_rt(rt2x00dev, RT3572) ||
rt2x00_rt(rt2x00dev, RT5390) ||
rt2x00_rt(rt2x00dev, RT5392)) {
- rt2800_register_read(rt2x00dev, AUX_CTRL, ®);
+ reg = rt2800_register_read(rt2x00dev, AUX_CTRL);
rt2x00_set_field32(®, AUX_CTRL_FORCE_PCIE_CLK, 1);
rt2x00_set_field32(®, AUX_CTRL_WAKE_PCIE_EN, 1);
rt2800_register_write(rt2x00dev, AUX_CTRL, reg);
@@ -739,7 +743,7 @@
* Wait for device to stabilize.
*/
for (i = 0; i < REGISTER_BUSY_COUNT; i++) {
- rt2800_register_read(rt2x00dev, PBF_SYS_CTRL, ®);
+ reg = rt2800_register_read(rt2x00dev, PBF_SYS_CTRL);
if (rt2x00_get_field32(reg, PBF_SYS_CTRL_READY))
break;
msleep(1);
@@ -781,7 +785,7 @@
/*
* Initialize TX Info descriptor
*/
- rt2x00_desc_read(txwi, 0, &word);
+ word = rt2x00_desc_read(txwi, 0);
rt2x00_set_field32(&word, TXWI_W0_FRAG,
test_bit(ENTRY_TXD_MORE_FRAG, &txdesc->flags));
rt2x00_set_field32(&word, TXWI_W0_MIMO_PS,
@@ -803,7 +807,7 @@
rt2x00_set_field32(&word, TXWI_W0_PHYMODE, txdesc->rate_mode);
rt2x00_desc_write(txwi, 0, word);
- rt2x00_desc_read(txwi, 1, &word);
+ word = rt2x00_desc_read(txwi, 1);
rt2x00_set_field32(&word, TXWI_W1_ACK,
test_bit(ENTRY_TXD_ACK, &txdesc->flags));
rt2x00_set_field32(&word, TXWI_W1_NSEQ,
@@ -843,16 +847,16 @@
u8 offset2;
if (rt2x00dev->curr_band == NL80211_BAND_2GHZ) {
- rt2800_eeprom_read(rt2x00dev, EEPROM_RSSI_BG, &eeprom);
+ eeprom = rt2800_eeprom_read(rt2x00dev, EEPROM_RSSI_BG);
offset0 = rt2x00_get_field16(eeprom, EEPROM_RSSI_BG_OFFSET0);
offset1 = rt2x00_get_field16(eeprom, EEPROM_RSSI_BG_OFFSET1);
- rt2800_eeprom_read(rt2x00dev, EEPROM_RSSI_BG2, &eeprom);
+ eeprom = rt2800_eeprom_read(rt2x00dev, EEPROM_RSSI_BG2);
offset2 = rt2x00_get_field16(eeprom, EEPROM_RSSI_BG2_OFFSET2);
} else {
- rt2800_eeprom_read(rt2x00dev, EEPROM_RSSI_A, &eeprom);
+ eeprom = rt2800_eeprom_read(rt2x00dev, EEPROM_RSSI_A);
offset0 = rt2x00_get_field16(eeprom, EEPROM_RSSI_A_OFFSET0);
offset1 = rt2x00_get_field16(eeprom, EEPROM_RSSI_A_OFFSET1);
- rt2800_eeprom_read(rt2x00dev, EEPROM_RSSI_A2, &eeprom);
+ eeprom = rt2800_eeprom_read(rt2x00dev, EEPROM_RSSI_A2);
offset2 = rt2x00_get_field16(eeprom, EEPROM_RSSI_A2_OFFSET2);
}
@@ -881,12 +885,12 @@
__le32 *rxwi = (__le32 *) entry->skb->data;
u32 word;
- rt2x00_desc_read(rxwi, 0, &word);
+ word = rt2x00_desc_read(rxwi, 0);
rxdesc->cipher = rt2x00_get_field32(word, RXWI_W0_UDF);
rxdesc->size = rt2x00_get_field32(word, RXWI_W0_MPDU_TOTAL_BYTE_COUNT);
- rt2x00_desc_read(rxwi, 1, &word);
+ word = rt2x00_desc_read(rxwi, 1);
if (rt2x00_get_field32(word, RXWI_W1_SHORT_GI))
rxdesc->enc_flags |= RX_ENC_FLAG_SHORT_GI;
@@ -907,7 +911,7 @@
if (rxdesc->rate_mode == RATE_MODE_CCK)
rxdesc->signal &= ~0x8;
- rt2x00_desc_read(rxwi, 2, &word);
+ word = rt2x00_desc_read(rxwi, 2);
/*
* Convert descriptor AGC value to RSSI value.
@@ -968,7 +972,7 @@
* Obtain the status about this packet.
*/
txdesc.flags = 0;
- rt2x00_desc_read(txwi, 0, &word);
+ word = rt2x00_desc_read(txwi, 0);
mcs = rt2x00_get_field32(word, TXWI_W0_MCS);
ampdu = rt2x00_get_field32(word, TXWI_W0_AMPDU);
@@ -1093,7 +1097,7 @@
/*
* H/W sends up to MAC_BSSID_DW1_BSS_BCN_NUM + 1 consecutive beacons.
*/
- rt2800_register_read(rt2x00dev, MAC_BSSID_DW1, &bssid_dw1);
+ bssid_dw1 = rt2800_register_read(rt2x00dev, MAC_BSSID_DW1);
rt2x00_set_field32(&bssid_dw1, MAC_BSSID_DW1_BSS_BCN_NUM,
bcn_num > 0 ? bcn_num - 1 : 0);
rt2800_register_write(rt2x00dev, MAC_BSSID_DW1, bssid_dw1);
@@ -1112,7 +1116,7 @@
* Disable beaconing while we are reloading the beacon data,
* otherwise we might be sending out invalid data.
*/
- rt2800_register_read(rt2x00dev, BCN_TIME_CFG, ®);
+ reg = rt2800_register_read(rt2x00dev, BCN_TIME_CFG);
orig_reg = reg;
rt2x00_set_field32(®, BCN_TIME_CFG_BEACON_GEN, 0);
rt2800_register_write(rt2x00dev, BCN_TIME_CFG, reg);
@@ -1202,7 +1206,7 @@
* Disable beaconing while we are reloading the beacon data,
* otherwise we might be sending out invalid data.
*/
- rt2800_register_read(rt2x00dev, BCN_TIME_CFG, &orig_reg);
+ orig_reg = rt2800_register_read(rt2x00dev, BCN_TIME_CFG);
reg = orig_reg;
rt2x00_set_field32(®, BCN_TIME_CFG_BEACON_GEN, 0);
rt2800_register_write(rt2x00dev, BCN_TIME_CFG, reg);
@@ -1275,10 +1279,10 @@
u32 reg;
if (rt2x00_rt(rt2x00dev, RT3290)) {
- rt2800_register_read(rt2x00dev, WLAN_FUN_CTRL, ®);
+ reg = rt2800_register_read(rt2x00dev, WLAN_FUN_CTRL);
return rt2x00_get_field32(reg, WLAN_GPIO_IN_BIT0);
} else {
- rt2800_register_read(rt2x00dev, GPIO_CTRL, ®);
+ reg = rt2800_register_read(rt2x00dev, GPIO_CTRL);
return rt2x00_get_field32(reg, GPIO_CTRL_VAL2);
}
}
@@ -1303,7 +1307,7 @@
/* Check for SoC (SOC devices don't support MCU requests) */
if (rt2x00_is_soc(led->rt2x00dev)) {
- rt2800_register_read(led->rt2x00dev, LED_CFG, ®);
+ reg = rt2800_register_read(led->rt2x00dev, LED_CFG);
/* Set LED Polarity */
rt2x00_set_field32(®, LED_CFG_LED_POLAR, polarity);
@@ -1392,7 +1396,7 @@
* The BSS Idx numbers is split in a main value of 3 bits,
* and a extended field for adding one additional bit to the value.
*/
- rt2800_register_read(rt2x00dev, offset, ®);
+ reg = rt2800_register_read(rt2x00dev, offset);
rt2x00_set_field32(®, MAC_WCID_ATTRIBUTE_BSS_IDX, (bssidx & 0x7));
rt2x00_set_field32(®, MAC_WCID_ATTRIBUTE_BSS_IDX_EXT,
(bssidx & 0x8) >> 3);
@@ -1410,7 +1414,7 @@
offset = MAC_WCID_ATTR_ENTRY(key->hw_key_idx);
if (crypto->cmd == SET_KEY) {
- rt2800_register_read(rt2x00dev, offset, ®);
+ reg = rt2800_register_read(rt2x00dev, offset);
rt2x00_set_field32(®, MAC_WCID_ATTRIBUTE_KEYTAB,
!!(key->flags & IEEE80211_KEY_FLAG_PAIRWISE));
/*
@@ -1426,7 +1430,7 @@
rt2800_register_write(rt2x00dev, offset, reg);
} else {
/* Delete the cipher without touching the bssidx */
- rt2800_register_read(rt2x00dev, offset, ®);
+ reg = rt2800_register_read(rt2x00dev, offset);
rt2x00_set_field32(®, MAC_WCID_ATTRIBUTE_KEYTAB, 0);
rt2x00_set_field32(®, MAC_WCID_ATTRIBUTE_CIPHER, 0);
rt2x00_set_field32(®, MAC_WCID_ATTRIBUTE_CIPHER_EXT, 0);
@@ -1482,7 +1486,7 @@
offset = SHARED_KEY_MODE_ENTRY(key->hw_key_idx / 8);
- rt2800_register_read(rt2x00dev, offset, ®);
+ reg = rt2800_register_read(rt2x00dev, offset);
rt2x00_set_field32(®, field,
(crypto->cmd == SET_KEY) * crypto->cipher);
rt2800_register_write(rt2x00dev, offset, reg);
@@ -1548,7 +1552,7 @@
max_psdu = min(drv_data->max_psdu, i);
- rt2800_register_read(rt2x00dev, MAX_LEN_CFG, ®);
+ reg = rt2800_register_read(rt2x00dev, MAX_LEN_CFG);
rt2x00_set_field32(®, MAX_LEN_CFG_MAX_PSDU, max_psdu);
rt2800_register_write(rt2x00dev, MAX_LEN_CFG, reg);
}
@@ -1640,7 +1644,7 @@
* and broadcast frames will always be accepted since
* there is no filter for it at this time.
*/
- rt2800_register_read(rt2x00dev, RX_FILTER_CFG, ®);
+ reg = rt2800_register_read(rt2x00dev, RX_FILTER_CFG);
rt2x00_set_field32(®, RX_FILTER_CFG_DROP_CRC_ERROR,
!(filter_flags & FIF_FCSFAIL));
rt2x00_set_field32(®, RX_FILTER_CFG_DROP_PHY_ERROR,
@@ -1684,7 +1688,7 @@
/*
* Enable synchronisation.
*/
- rt2800_register_read(rt2x00dev, BCN_TIME_CFG, ®);
+ reg = rt2800_register_read(rt2x00dev, BCN_TIME_CFG);
rt2x00_set_field32(®, BCN_TIME_CFG_TSF_SYNC, conf->sync);
rt2800_register_write(rt2x00dev, BCN_TIME_CFG, reg);
@@ -1692,14 +1696,14 @@
/*
* Tune beacon queue transmit parameters for AP mode
*/
- rt2800_register_read(rt2x00dev, TBTT_SYNC_CFG, ®);
+ reg = rt2800_register_read(rt2x00dev, TBTT_SYNC_CFG);
rt2x00_set_field32(®, TBTT_SYNC_CFG_BCN_CWMIN, 0);
rt2x00_set_field32(®, TBTT_SYNC_CFG_BCN_AIFSN, 1);
rt2x00_set_field32(®, TBTT_SYNC_CFG_BCN_EXP_WIN, 32);
rt2x00_set_field32(®, TBTT_SYNC_CFG_TBTT_ADJUST, 0);
rt2800_register_write(rt2x00dev, TBTT_SYNC_CFG, reg);
} else {
- rt2800_register_read(rt2x00dev, TBTT_SYNC_CFG, ®);
+ reg = rt2800_register_read(rt2x00dev, TBTT_SYNC_CFG);
rt2x00_set_field32(®, TBTT_SYNC_CFG_BCN_CWMIN, 4);
rt2x00_set_field32(®, TBTT_SYNC_CFG_BCN_AIFSN, 2);
rt2x00_set_field32(®, TBTT_SYNC_CFG_BCN_EXP_WIN, 32);
@@ -1818,22 +1822,22 @@
gf20_mode = gf40_mode = 1;
/* Update HT protection config */
- rt2800_register_read(rt2x00dev, MM20_PROT_CFG, ®);
+ reg = rt2800_register_read(rt2x00dev, MM20_PROT_CFG);
rt2x00_set_field32(®, MM20_PROT_CFG_PROTECT_RATE, mm20_rate);
rt2x00_set_field32(®, MM20_PROT_CFG_PROTECT_CTRL, mm20_mode);
rt2800_register_write(rt2x00dev, MM20_PROT_CFG, reg);
- rt2800_register_read(rt2x00dev, MM40_PROT_CFG, ®);
+ reg = rt2800_register_read(rt2x00dev, MM40_PROT_CFG);
rt2x00_set_field32(®, MM40_PROT_CFG_PROTECT_RATE, mm40_rate);
rt2x00_set_field32(®, MM40_PROT_CFG_PROTECT_CTRL, mm40_mode);
rt2800_register_write(rt2x00dev, MM40_PROT_CFG, reg);
- rt2800_register_read(rt2x00dev, GF20_PROT_CFG, ®);
+ reg = rt2800_register_read(rt2x00dev, GF20_PROT_CFG);
rt2x00_set_field32(®, GF20_PROT_CFG_PROTECT_RATE, gf20_rate);
rt2x00_set_field32(®, GF20_PROT_CFG_PROTECT_CTRL, gf20_mode);
rt2800_register_write(rt2x00dev, GF20_PROT_CFG, reg);
- rt2800_register_read(rt2x00dev, GF40_PROT_CFG, ®);
+ reg = rt2800_register_read(rt2x00dev, GF40_PROT_CFG);
rt2x00_set_field32(®, GF40_PROT_CFG_PROTECT_RATE, gf40_rate);
rt2x00_set_field32(®, GF40_PROT_CFG_PROTECT_CTRL, gf40_mode);
rt2800_register_write(rt2x00dev, GF40_PROT_CFG, reg);
@@ -1845,14 +1849,14 @@
u32 reg;
if (changed & BSS_CHANGED_ERP_PREAMBLE) {
- rt2800_register_read(rt2x00dev, AUTO_RSP_CFG, ®);
+ reg = rt2800_register_read(rt2x00dev, AUTO_RSP_CFG);
rt2x00_set_field32(®, AUTO_RSP_CFG_AR_PREAMBLE,
!!erp->short_preamble);
rt2800_register_write(rt2x00dev, AUTO_RSP_CFG, reg);
}
if (changed & BSS_CHANGED_ERP_CTS_PROT) {
- rt2800_register_read(rt2x00dev, OFDM_PROT_CFG, ®);
+ reg = rt2800_register_read(rt2x00dev, OFDM_PROT_CFG);
rt2x00_set_field32(®, OFDM_PROT_CFG_PROTECT_CTRL,
erp->cts_protection ? 2 : 0);
rt2800_register_write(rt2x00dev, OFDM_PROT_CFG, reg);
@@ -1865,18 +1869,18 @@
}
if (changed & BSS_CHANGED_ERP_SLOT) {
- rt2800_register_read(rt2x00dev, BKOFF_SLOT_CFG, ®);
+ reg = rt2800_register_read(rt2x00dev, BKOFF_SLOT_CFG);
rt2x00_set_field32(®, BKOFF_SLOT_CFG_SLOT_TIME,
erp->slot_time);
rt2800_register_write(rt2x00dev, BKOFF_SLOT_CFG, reg);
- rt2800_register_read(rt2x00dev, XIFS_TIME_CFG, ®);
+ reg = rt2800_register_read(rt2x00dev, XIFS_TIME_CFG);
rt2x00_set_field32(®, XIFS_TIME_CFG_EIFS, erp->eifs);
rt2800_register_write(rt2x00dev, XIFS_TIME_CFG, reg);
}
if (changed & BSS_CHANGED_BEACON_INT) {
- rt2800_register_read(rt2x00dev, BCN_TIME_CFG, ®);
+ reg = rt2800_register_read(rt2x00dev, BCN_TIME_CFG);
rt2x00_set_field32(®, BCN_TIME_CFG_BEACON_INTERVAL,
erp->beacon_int * 16);
rt2800_register_write(rt2x00dev, BCN_TIME_CFG, reg);
@@ -1893,7 +1897,7 @@
u16 eeprom;
u8 led_ctrl, led_g_mode, led_r_mode;
- rt2800_register_read(rt2x00dev, GPIO_SWITCH, ®);
+ reg = rt2800_register_read(rt2x00dev, GPIO_SWITCH);
if (rt2x00dev->curr_band == NL80211_BAND_5GHZ) {
rt2x00_set_field32(®, GPIO_SWITCH_0, 1);
rt2x00_set_field32(®, GPIO_SWITCH_1, 1);
@@ -1903,12 +1907,12 @@
}
rt2800_register_write(rt2x00dev, GPIO_SWITCH, reg);
- rt2800_register_read(rt2x00dev, LED_CFG, ®);
+ reg = rt2800_register_read(rt2x00dev, LED_CFG);
led_g_mode = rt2x00_get_field32(reg, LED_CFG_LED_POLAR) ? 3 : 0;
led_r_mode = rt2x00_get_field32(reg, LED_CFG_LED_POLAR) ? 0 : 3;
if (led_g_mode != rt2x00_get_field32(reg, LED_CFG_G_LED_MODE) ||
led_r_mode != rt2x00_get_field32(reg, LED_CFG_R_LED_MODE)) {
- rt2800_eeprom_read(rt2x00dev, EEPROM_FREQ, &eeprom);
+ eeprom = rt2800_eeprom_read(rt2x00dev, EEPROM_FREQ);
led_ctrl = rt2x00_get_field16(eeprom, EEPROM_FREQ_LED_MODE);
if (led_ctrl == 0 || led_ctrl > 0x40) {
rt2x00_set_field32(®, LED_CFG_G_LED_MODE, led_g_mode);
@@ -1929,14 +1933,14 @@
u8 gpio_bit3 = (ant == ANTENNA_A) ? 0 : 1;
if (rt2x00_is_pci(rt2x00dev)) {
- rt2800_register_read(rt2x00dev, E2PROM_CSR, ®);
+ reg = rt2800_register_read(rt2x00dev, E2PROM_CSR);
rt2x00_set_field32(®, E2PROM_CSR_DATA_CLOCK, eesk_pin);
rt2800_register_write(rt2x00dev, E2PROM_CSR, reg);
} else if (rt2x00_is_usb(rt2x00dev))
rt2800_mcu_request(rt2x00dev, MCU_ANT_SELECT, 0xff,
eesk_pin, 0);
- rt2800_register_read(rt2x00dev, GPIO_CTRL, ®);
+ reg = rt2800_register_read(rt2x00dev, GPIO_CTRL);
rt2x00_set_field32(®, GPIO_CTRL_DIR3, 0);
rt2x00_set_field32(®, GPIO_CTRL_VAL3, gpio_bit3);
rt2800_register_write(rt2x00dev, GPIO_CTRL, reg);
@@ -1948,8 +1952,8 @@
u8 r3;
u16 eeprom;
- rt2800_bbp_read(rt2x00dev, 1, &r1);
- rt2800_bbp_read(rt2x00dev, 3, &r3);
+ r1 = rt2800_bbp_read(rt2x00dev, 1);
+ r3 = rt2800_bbp_read(rt2x00dev, 3);
if (rt2x00_rt(rt2x00dev, RT3572) &&
rt2x00_has_cap_bt_coexist(rt2x00dev))
@@ -1983,8 +1987,8 @@
rt2x00_rt(rt2x00dev, RT3090) ||
rt2x00_rt(rt2x00dev, RT3352) ||
rt2x00_rt(rt2x00dev, RT3390)) {
- rt2800_eeprom_read(rt2x00dev,
- EEPROM_NIC_CONF1, &eeprom);
+ eeprom = rt2800_eeprom_read(rt2x00dev,
+ EEPROM_NIC_CONF1);
if (rt2x00_get_field16(eeprom,
EEPROM_NIC_CONF1_ANT_DIVERSITY))
rt2800_set_ant_diversity(rt2x00dev,
@@ -2027,28 +2031,28 @@
short lna_gain;
if (libconf->rf.channel <= 14) {
- rt2800_eeprom_read(rt2x00dev, EEPROM_LNA, &eeprom);
+ eeprom = rt2800_eeprom_read(rt2x00dev, EEPROM_LNA);
lna_gain = rt2x00_get_field16(eeprom, EEPROM_LNA_BG);
} else if (libconf->rf.channel <= 64) {
- rt2800_eeprom_read(rt2x00dev, EEPROM_LNA, &eeprom);
+ eeprom = rt2800_eeprom_read(rt2x00dev, EEPROM_LNA);
lna_gain = rt2x00_get_field16(eeprom, EEPROM_LNA_A0);
} else if (libconf->rf.channel <= 128) {
if (rt2x00_rt(rt2x00dev, RT3593)) {
- rt2800_eeprom_read(rt2x00dev, EEPROM_EXT_LNA2, &eeprom);
+ eeprom = rt2800_eeprom_read(rt2x00dev, EEPROM_EXT_LNA2);
lna_gain = rt2x00_get_field16(eeprom,
EEPROM_EXT_LNA2_A1);
} else {
- rt2800_eeprom_read(rt2x00dev, EEPROM_RSSI_BG2, &eeprom);
+ eeprom = rt2800_eeprom_read(rt2x00dev, EEPROM_RSSI_BG2);
lna_gain = rt2x00_get_field16(eeprom,
EEPROM_RSSI_BG2_LNA_A1);
}
} else {
if (rt2x00_rt(rt2x00dev, RT3593)) {
- rt2800_eeprom_read(rt2x00dev, EEPROM_EXT_LNA2, &eeprom);
+ eeprom = rt2800_eeprom_read(rt2x00dev, EEPROM_EXT_LNA2);
lna_gain = rt2x00_get_field16(eeprom,
EEPROM_EXT_LNA2_A2);
} else {
- rt2800_eeprom_read(rt2x00dev, EEPROM_RSSI_A2, &eeprom);
+ eeprom = rt2800_eeprom_read(rt2x00dev, EEPROM_RSSI_A2);
lna_gain = rt2x00_get_field16(eeprom,
EEPROM_RSSI_A2_LNA_A2);
}
@@ -2072,7 +2076,7 @@
freq_offset = rt2x00_get_field8(rt2x00dev->freq_offset, RFCSR17_CODE);
freq_offset = min_t(u8, freq_offset, FREQ_OFFSET_BOUND);
- rt2800_rfcsr_read(rt2x00dev, 17, &rfcsr);
+ rfcsr = rt2800_rfcsr_read(rt2x00dev, 17);
prev_rfcsr = rfcsr;
rt2x00_set_field8(&rfcsr, RFCSR17_CODE, freq_offset);
@@ -2174,23 +2178,23 @@
rt2800_rfcsr_write(rt2x00dev, 2, rf->rf1);
- rt2800_rfcsr_read(rt2x00dev, 3, &rfcsr);
+ rfcsr = rt2800_rfcsr_read(rt2x00dev, 3);
rt2x00_set_field8(&rfcsr, RFCSR3_K, rf->rf3);
rt2800_rfcsr_write(rt2x00dev, 3, rfcsr);
- rt2800_rfcsr_read(rt2x00dev, 6, &rfcsr);
+ rfcsr = rt2800_rfcsr_read(rt2x00dev, 6);
rt2x00_set_field8(&rfcsr, RFCSR6_R1, rf->rf2);
rt2800_rfcsr_write(rt2x00dev, 6, rfcsr);
- rt2800_rfcsr_read(rt2x00dev, 12, &rfcsr);
+ rfcsr = rt2800_rfcsr_read(rt2x00dev, 12);
rt2x00_set_field8(&rfcsr, RFCSR12_TX_POWER, info->default_power1);
rt2800_rfcsr_write(rt2x00dev, 12, rfcsr);
- rt2800_rfcsr_read(rt2x00dev, 13, &rfcsr);
+ rfcsr = rt2800_rfcsr_read(rt2x00dev, 13);
rt2x00_set_field8(&rfcsr, RFCSR13_TX_POWER, info->default_power2);
rt2800_rfcsr_write(rt2x00dev, 13, rfcsr);
- rt2800_rfcsr_read(rt2x00dev, 1, &rfcsr);
+ rfcsr = rt2800_rfcsr_read(rt2x00dev, 1);
rt2x00_set_field8(&rfcsr, RFCSR1_RX0_PD, 0);
rt2x00_set_field8(&rfcsr, RFCSR1_RX1_PD,
rt2x00dev->default_ant.rx_chain_num <= 1);
@@ -2203,7 +2207,7 @@
rt2x00dev->default_ant.tx_chain_num <= 2);
rt2800_rfcsr_write(rt2x00dev, 1, rfcsr);
- rt2800_rfcsr_read(rt2x00dev, 23, &rfcsr);
+ rfcsr = rt2800_rfcsr_read(rt2x00dev, 23);
rt2x00_set_field8(&rfcsr, RFCSR23_FREQ_OFFSET, rt2x00dev->freq_offset);
rt2800_rfcsr_write(rt2x00dev, 23, rfcsr);
@@ -2220,19 +2224,19 @@
}
}
- rt2800_rfcsr_read(rt2x00dev, 24, &rfcsr);
+ rfcsr = rt2800_rfcsr_read(rt2x00dev, 24);
rt2x00_set_field8(&rfcsr, RFCSR24_TX_CALIB, calib_tx);
rt2800_rfcsr_write(rt2x00dev, 24, rfcsr);
- rt2800_rfcsr_read(rt2x00dev, 31, &rfcsr);
+ rfcsr = rt2800_rfcsr_read(rt2x00dev, 31);
rt2x00_set_field8(&rfcsr, RFCSR31_RX_CALIB, calib_rx);
rt2800_rfcsr_write(rt2x00dev, 31, rfcsr);
- rt2800_rfcsr_read(rt2x00dev, 7, &rfcsr);
+ rfcsr = rt2800_rfcsr_read(rt2x00dev, 7);
rt2x00_set_field8(&rfcsr, RFCSR7_RF_TUNING, 1);
rt2800_rfcsr_write(rt2x00dev, 7, rfcsr);
- rt2800_rfcsr_read(rt2x00dev, 30, &rfcsr);
+ rfcsr = rt2800_rfcsr_read(rt2x00dev, 30);
rt2x00_set_field8(&rfcsr, RFCSR30_RF_CALIBRATION, 1);
rt2800_rfcsr_write(rt2x00dev, 30, rfcsr);
@@ -2262,7 +2266,7 @@
rt2800_rfcsr_write(rt2x00dev, 2, rf->rf1);
rt2800_rfcsr_write(rt2x00dev, 3, rf->rf3);
- rt2800_rfcsr_read(rt2x00dev, 6, &rfcsr);
+ rfcsr = rt2800_rfcsr_read(rt2x00dev, 6);
rt2x00_set_field8(&rfcsr, RFCSR6_R1, rf->rf2);
if (rf->channel <= 14)
rt2x00_set_field8(&rfcsr, RFCSR6_TXDIV, 2);
@@ -2270,14 +2274,14 @@
rt2x00_set_field8(&rfcsr, RFCSR6_TXDIV, 1);
rt2800_rfcsr_write(rt2x00dev, 6, rfcsr);
- rt2800_rfcsr_read(rt2x00dev, 5, &rfcsr);
+ rfcsr = rt2800_rfcsr_read(rt2x00dev, 5);
if (rf->channel <= 14)
rt2x00_set_field8(&rfcsr, RFCSR5_R1, 1);
else
rt2x00_set_field8(&rfcsr, RFCSR5_R1, 2);
rt2800_rfcsr_write(rt2x00dev, 5, rfcsr);
- rt2800_rfcsr_read(rt2x00dev, 12, &rfcsr);
+ rfcsr = rt2800_rfcsr_read(rt2x00dev, 12);
if (rf->channel <= 14) {
rt2x00_set_field8(&rfcsr, RFCSR12_DR0, 3);
rt2x00_set_field8(&rfcsr, RFCSR12_TX_POWER,
@@ -2290,7 +2294,7 @@
}
rt2800_rfcsr_write(rt2x00dev, 12, rfcsr);
- rt2800_rfcsr_read(rt2x00dev, 13, &rfcsr);
+ rfcsr = rt2800_rfcsr_read(rt2x00dev, 13);
if (rf->channel <= 14) {
rt2x00_set_field8(&rfcsr, RFCSR13_DR0, 3);
rt2x00_set_field8(&rfcsr, RFCSR13_TX_POWER,
@@ -2303,7 +2307,7 @@
}
rt2800_rfcsr_write(rt2x00dev, 13, rfcsr);
- rt2800_rfcsr_read(rt2x00dev, 1, &rfcsr);
+ rfcsr = rt2800_rfcsr_read(rt2x00dev, 1);
rt2x00_set_field8(&rfcsr, RFCSR1_RX0_PD, 0);
rt2x00_set_field8(&rfcsr, RFCSR1_TX0_PD, 0);
rt2x00_set_field8(&rfcsr, RFCSR1_RX1_PD, 0);
@@ -2336,7 +2340,7 @@
}
rt2800_rfcsr_write(rt2x00dev, 1, rfcsr);
- rt2800_rfcsr_read(rt2x00dev, 23, &rfcsr);
+ rfcsr = rt2800_rfcsr_read(rt2x00dev, 23);
rt2x00_set_field8(&rfcsr, RFCSR23_FREQ_OFFSET, rt2x00dev->freq_offset);
rt2800_rfcsr_write(rt2x00dev, 23, rfcsr);
@@ -2366,7 +2370,7 @@
rt2800_rfcsr_write(rt2x00dev, 27, 0x00);
rt2800_rfcsr_write(rt2x00dev, 29, 0x9b);
} else {
- rt2800_rfcsr_read(rt2x00dev, 7, &rfcsr);
+ rfcsr = rt2800_rfcsr_read(rt2x00dev, 7);
rt2x00_set_field8(&rfcsr, RFCSR7_BIT2, 1);
rt2x00_set_field8(&rfcsr, RFCSR7_BIT3, 0);
rt2x00_set_field8(&rfcsr, RFCSR7_BIT4, 1);
@@ -2399,7 +2403,7 @@
rt2800_rfcsr_write(rt2x00dev, 29, 0x9f);
}
- rt2800_register_read(rt2x00dev, GPIO_CTRL, ®);
+ reg = rt2800_register_read(rt2x00dev, GPIO_CTRL);
rt2x00_set_field32(®, GPIO_CTRL_DIR7, 0);
if (rf->channel <= 14)
rt2x00_set_field32(®, GPIO_CTRL_VAL7, 1);
@@ -2407,7 +2411,7 @@
rt2x00_set_field32(®, GPIO_CTRL_VAL7, 0);
rt2800_register_write(rt2x00dev, GPIO_CTRL, reg);
- rt2800_rfcsr_read(rt2x00dev, 7, &rfcsr);
+ rfcsr = rt2800_rfcsr_read(rt2x00dev, 7);
rt2x00_set_field8(&rfcsr, RFCSR7_RF_TUNING, 1);
rt2800_rfcsr_write(rt2x00dev, 7, rfcsr);
}
@@ -2425,12 +2429,12 @@
const bool txbf_enabled = false; /* TODO */
/* TODO: use TX{0,1,2}FinePowerControl values from EEPROM */
- rt2800_bbp_read(rt2x00dev, 109, &bbp);
+ bbp = rt2800_bbp_read(rt2x00dev, 109);
rt2x00_set_field8(&bbp, BBP109_TX0_POWER, 0);
rt2x00_set_field8(&bbp, BBP109_TX1_POWER, 0);
rt2800_bbp_write(rt2x00dev, 109, bbp);
- rt2800_bbp_read(rt2x00dev, 110, &bbp);
+ bbp = rt2800_bbp_read(rt2x00dev, 110);
rt2x00_set_field8(&bbp, BBP110_TX2_POWER, 0);
rt2800_bbp_write(rt2x00dev, 110, bbp);
@@ -2450,11 +2454,11 @@
rt2800_rfcsr_write(rt2x00dev, 8, rf->rf1);
rt2800_rfcsr_write(rt2x00dev, 9, rf->rf3 & 0xf);
- rt2800_rfcsr_read(rt2x00dev, 11, &rfcsr);
+ rfcsr = rt2800_rfcsr_read(rt2x00dev, 11);
rt2x00_set_field8(&rfcsr, RFCSR11_R, (rf->rf2 & 0x3));
rt2800_rfcsr_write(rt2x00dev, 11, rfcsr);
- rt2800_rfcsr_read(rt2x00dev, 11, &rfcsr);
+ rfcsr = rt2800_rfcsr_read(rt2x00dev, 11);
rt2x00_set_field8(&rfcsr, RFCSR11_PLL_IDOH, 1);
if (rf->channel <= 14)
rt2x00_set_field8(&rfcsr, RFCSR11_PLL_MOD, 1);
@@ -2462,7 +2466,7 @@
rt2x00_set_field8(&rfcsr, RFCSR11_PLL_MOD, 2);
rt2800_rfcsr_write(rt2x00dev, 11, rfcsr);
- rt2800_rfcsr_read(rt2x00dev, 53, &rfcsr);
+ rfcsr = rt2800_rfcsr_read(rt2x00dev, 53);
if (rf->channel <= 14) {
rfcsr = 0;
rt2x00_set_field8(&rfcsr, RFCSR53_TX_POWER,
@@ -2477,7 +2481,7 @@
}
rt2800_rfcsr_write(rt2x00dev, 53, rfcsr);
- rt2800_rfcsr_read(rt2x00dev, 55, &rfcsr);
+ rfcsr = rt2800_rfcsr_read(rt2x00dev, 55);
if (rf->channel <= 14) {
rfcsr = 0;
rt2x00_set_field8(&rfcsr, RFCSR55_TX_POWER,
@@ -2492,7 +2496,7 @@
}
rt2800_rfcsr_write(rt2x00dev, 55, rfcsr);
- rt2800_rfcsr_read(rt2x00dev, 54, &rfcsr);
+ rfcsr = rt2800_rfcsr_read(rt2x00dev, 54);
if (rf->channel <= 14) {
rfcsr = 0;
rt2x00_set_field8(&rfcsr, RFCSR54_TX_POWER,
@@ -2507,7 +2511,7 @@
}
rt2800_rfcsr_write(rt2x00dev, 54, rfcsr);
- rt2800_rfcsr_read(rt2x00dev, 1, &rfcsr);
+ rfcsr = rt2800_rfcsr_read(rt2x00dev, 1);
rt2x00_set_field8(&rfcsr, RFCSR1_RX0_PD, 0);
rt2x00_set_field8(&rfcsr, RFCSR1_TX0_PD, 0);
rt2x00_set_field8(&rfcsr, RFCSR1_RX1_PD, 0);
@@ -2559,7 +2563,7 @@
/* NOTE: the reference driver does not writes the new value
* back to RFCSR 32
*/
- rt2800_rfcsr_read(rt2x00dev, 32, &rfcsr);
+ rfcsr = rt2800_rfcsr_read(rt2x00dev, 32);
rt2x00_set_field8(&rfcsr, RFCSR32_TX_AGC_FC, txrx_agc_fc);
if (rf->channel <= 14)
@@ -2568,34 +2572,34 @@
rfcsr = 0x80;
rt2800_rfcsr_write(rt2x00dev, 31, rfcsr);
- rt2800_rfcsr_read(rt2x00dev, 30, &rfcsr);
+ rfcsr = rt2800_rfcsr_read(rt2x00dev, 30);
rt2x00_set_field8(&rfcsr, RFCSR30_TX_H20M, txrx_h20m);
rt2x00_set_field8(&rfcsr, RFCSR30_RX_H20M, txrx_h20m);
rt2800_rfcsr_write(rt2x00dev, 30, rfcsr);
/* Band selection */
- rt2800_rfcsr_read(rt2x00dev, 36, &rfcsr);
+ rfcsr = rt2800_rfcsr_read(rt2x00dev, 36);
if (rf->channel <= 14)
rt2x00_set_field8(&rfcsr, RFCSR36_RF_BS, 1);
else
rt2x00_set_field8(&rfcsr, RFCSR36_RF_BS, 0);
rt2800_rfcsr_write(rt2x00dev, 36, rfcsr);
- rt2800_rfcsr_read(rt2x00dev, 34, &rfcsr);
+ rfcsr = rt2800_rfcsr_read(rt2x00dev, 34);
if (rf->channel <= 14)
rfcsr = 0x3c;
else
rfcsr = 0x20;
rt2800_rfcsr_write(rt2x00dev, 34, rfcsr);
- rt2800_rfcsr_read(rt2x00dev, 12, &rfcsr);
+ rfcsr = rt2800_rfcsr_read(rt2x00dev, 12);
if (rf->channel <= 14)
rfcsr = 0x1a;
else
rfcsr = 0x12;
rt2800_rfcsr_write(rt2x00dev, 12, rfcsr);
- rt2800_rfcsr_read(rt2x00dev, 6, &rfcsr);
+ rfcsr = rt2800_rfcsr_read(rt2x00dev, 6);
if (rf->channel >= 1 && rf->channel <= 14)
rt2x00_set_field8(&rfcsr, RFCSR6_VCO_IC, 1);
else if (rf->channel >= 36 && rf->channel <= 64)
@@ -2606,7 +2610,7 @@
rt2x00_set_field8(&rfcsr, RFCSR6_VCO_IC, 1);
rt2800_rfcsr_write(rt2x00dev, 6, rfcsr);
- rt2800_rfcsr_read(rt2x00dev, 30, &rfcsr);
+ rfcsr = rt2800_rfcsr_read(rt2x00dev, 30);
rt2x00_set_field8(&rfcsr, RFCSR30_RX_VCM, 2);
rt2800_rfcsr_write(rt2x00dev, 30, rfcsr);
@@ -2620,11 +2624,11 @@
rt2800_rfcsr_write(rt2x00dev, 13, 0x23);
}
- rt2800_rfcsr_read(rt2x00dev, 51, &rfcsr);
+ rfcsr = rt2800_rfcsr_read(rt2x00dev, 51);
rt2x00_set_field8(&rfcsr, RFCSR51_BITS01, 1);
rt2800_rfcsr_write(rt2x00dev, 51, rfcsr);
- rt2800_rfcsr_read(rt2x00dev, 51, &rfcsr);
+ rfcsr = rt2800_rfcsr_read(rt2x00dev, 51);
if (rf->channel <= 14) {
rt2x00_set_field8(&rfcsr, RFCSR51_BITS24, 5);
rt2x00_set_field8(&rfcsr, RFCSR51_BITS57, 3);
@@ -2634,7 +2638,7 @@
}
rt2800_rfcsr_write(rt2x00dev, 51, rfcsr);
- rt2800_rfcsr_read(rt2x00dev, 49, &rfcsr);
+ rfcsr = rt2800_rfcsr_read(rt2x00dev, 49);
if (rf->channel <= 14)
rt2x00_set_field8(&rfcsr, RFCSR49_TX_LO1_IC, 3);
else
@@ -2645,11 +2649,11 @@
rt2800_rfcsr_write(rt2x00dev, 49, rfcsr);
- rt2800_rfcsr_read(rt2x00dev, 50, &rfcsr);
+ rfcsr = rt2800_rfcsr_read(rt2x00dev, 50);
rt2x00_set_field8(&rfcsr, RFCSR50_TX_LO1_EN, 0);
rt2800_rfcsr_write(rt2x00dev, 50, rfcsr);
- rt2800_rfcsr_read(rt2x00dev, 57, &rfcsr);
+ rfcsr = rt2800_rfcsr_read(rt2x00dev, 57);
if (rf->channel <= 14)
rt2x00_set_field8(&rfcsr, RFCSR57_DRV_CC, 0x1b);
else
@@ -2665,7 +2669,7 @@
}
/* Initiate VCO calibration */
- rt2800_rfcsr_read(rt2x00dev, 3, &rfcsr);
+ rfcsr = rt2800_rfcsr_read(rt2x00dev, 3);
if (rf->channel <= 14) {
rt2x00_set_field8(&rfcsr, RFCSR3_VCOCAL_EN, 1);
} else {
@@ -2721,11 +2725,11 @@
rt2800_rfcsr_write(rt2x00dev, 8, rf->rf1);
rt2800_rfcsr_write(rt2x00dev, 9, rf->rf3);
- rt2800_rfcsr_read(rt2x00dev, 11, &rfcsr);
+ rfcsr = rt2800_rfcsr_read(rt2x00dev, 11);
rt2x00_set_field8(&rfcsr, RFCSR11_R, rf->rf2);
rt2800_rfcsr_write(rt2x00dev, 11, rfcsr);
- rt2800_rfcsr_read(rt2x00dev, 49, &rfcsr);
+ rfcsr = rt2800_rfcsr_read(rt2x00dev, 49);
if (info->default_power1 > POWER_BOUND)
rt2x00_set_field8(&rfcsr, RFCSR49_TX, POWER_BOUND);
else
@@ -2775,7 +2779,7 @@
rt2800_freq_cal_mode1(rt2x00dev);
- rt2800_rfcsr_read(rt2x00dev, 1, &rfcsr);
+ rfcsr = rt2800_rfcsr_read(rt2x00dev, 1);
rt2x00_set_field8(&rfcsr, RFCSR1_RX0_PD, 1);
rt2x00_set_field8(&rfcsr, RFCSR1_TX0_PD, 1);
@@ -2806,11 +2810,11 @@
rt2800_rfcsr_write(rt2x00dev, 8, rf->rf1);
rt2800_rfcsr_write(rt2x00dev, 9, rf->rf3);
- rt2800_rfcsr_read(rt2x00dev, 11, &rfcsr);
+ rfcsr = rt2800_rfcsr_read(rt2x00dev, 11);
rt2x00_set_field8(&rfcsr, RFCSR11_R, rf->rf2);
rt2800_rfcsr_write(rt2x00dev, 11, rfcsr);
- rt2800_rfcsr_read(rt2x00dev, 49, &rfcsr);
+ rfcsr = rt2800_rfcsr_read(rt2x00dev, 49);
if (info->default_power1 > POWER_BOUND)
rt2x00_set_field8(&rfcsr, RFCSR49_TX, POWER_BOUND);
else
@@ -2818,7 +2822,7 @@
rt2800_rfcsr_write(rt2x00dev, 49, rfcsr);
if (rt2x00_rt(rt2x00dev, RT5392)) {
- rt2800_rfcsr_read(rt2x00dev, 50, &rfcsr);
+ rfcsr = rt2800_rfcsr_read(rt2x00dev, 50);
if (info->default_power2 > POWER_BOUND)
rt2x00_set_field8(&rfcsr, RFCSR50_TX, POWER_BOUND);
else
@@ -2827,7 +2831,7 @@
rt2800_rfcsr_write(rt2x00dev, 50, rfcsr);
}
- rt2800_rfcsr_read(rt2x00dev, 1, &rfcsr);
+ rfcsr = rt2800_rfcsr_read(rt2x00dev, 1);
if (rt2x00_rt(rt2x00dev, RT5392)) {
rt2x00_set_field8(&rfcsr, RFCSR1_RX1_PD, 1);
rt2x00_set_field8(&rfcsr, RFCSR1_TX1_PD, 1);
@@ -2911,7 +2915,7 @@
const bool is_11b = false;
const bool is_type_ep = false;
- rt2800_register_read(rt2x00dev, LDO_CFG0, ®);
+ reg = rt2800_register_read(rt2x00dev, LDO_CFG0);
rt2x00_set_field32(®, LDO_CFG0_LDO_CORE_VLEVEL,
(rf->channel > 14 || conf_is_ht40(conf)) ? 5 : 0);
rt2800_register_write(rt2x00dev, LDO_CFG0, reg);
@@ -2919,13 +2923,13 @@
/* Order of values on rf_channel entry: N, K, mod, R */
rt2800_rfcsr_write(rt2x00dev, 8, rf->rf1 & 0xff);
- rt2800_rfcsr_read(rt2x00dev, 9, &rfcsr);
+ rfcsr = rt2800_rfcsr_read(rt2x00dev, 9);
rt2x00_set_field8(&rfcsr, RFCSR9_K, rf->rf2 & 0xf);
rt2x00_set_field8(&rfcsr, RFCSR9_N, (rf->rf1 & 0x100) >> 8);
rt2x00_set_field8(&rfcsr, RFCSR9_MOD, ((rf->rf3 - 8) & 0x4) >> 2);
rt2800_rfcsr_write(rt2x00dev, 9, rfcsr);
- rt2800_rfcsr_read(rt2x00dev, 11, &rfcsr);
+ rfcsr = rt2800_rfcsr_read(rt2x00dev, 11);
rt2x00_set_field8(&rfcsr, RFCSR11_R, rf->rf4 - 1);
rt2x00_set_field8(&rfcsr, RFCSR11_MOD, (rf->rf3 - 8) & 0x3);
rt2800_rfcsr_write(rt2x00dev, 11, rfcsr);
@@ -3093,7 +3097,7 @@
ep_reg = 0x3;
}
- rt2800_rfcsr_read(rt2x00dev, 49, &rfcsr);
+ rfcsr = rt2800_rfcsr_read(rt2x00dev, 49);
if (info->default_power1 > power_bound)
rt2x00_set_field8(&rfcsr, RFCSR49_TX, power_bound);
else
@@ -3102,7 +3106,7 @@
rt2x00_set_field8(&rfcsr, RFCSR49_EP, ep_reg);
rt2800_rfcsr_write(rt2x00dev, 49, rfcsr);
- rt2800_rfcsr_read(rt2x00dev, 50, &rfcsr);
+ rfcsr = rt2800_rfcsr_read(rt2x00dev, 50);
if (info->default_power2 > power_bound)
rt2x00_set_field8(&rfcsr, RFCSR50_TX, power_bound);
else
@@ -3111,7 +3115,7 @@
rt2x00_set_field8(&rfcsr, RFCSR50_EP, ep_reg);
rt2800_rfcsr_write(rt2x00dev, 50, rfcsr);
- rt2800_rfcsr_read(rt2x00dev, 1, &rfcsr);
+ rfcsr = rt2800_rfcsr_read(rt2x00dev, 1);
rt2x00_set_field8(&rfcsr, RFCSR1_RF_BLOCK_EN, 1);
rt2x00_set_field8(&rfcsr, RFCSR1_PLL_PD, 1);
@@ -3144,7 +3148,7 @@
rt2800_freq_cal_mode1(rt2x00dev);
/* TODO merge with others */
- rt2800_rfcsr_read(rt2x00dev, 3, &rfcsr);
+ rfcsr = rt2800_rfcsr_read(rt2x00dev, 3);
rt2x00_set_field8(&rfcsr, RFCSR3_VCOCAL_EN, 1);
rt2800_rfcsr_write(rt2x00dev, 3, rfcsr);
@@ -3186,7 +3190,7 @@
/* Rdiv setting (set 0x03 if Xtal==20)
* R13[1:0]
*/
- rt2800_rfcsr_read(rt2x00dev, 13, &rfcsr);
+ rfcsr = rt2800_rfcsr_read(rt2x00dev, 13);
rt2x00_set_field8(&rfcsr, RFCSR13_RDIV_MT7620,
rt2800_clk_is_20mhz(rt2x00dev) ? 3 : 0);
rt2800_rfcsr_write(rt2x00dev, 13, rfcsr);
@@ -3195,25 +3199,25 @@
* R20[7:0] in rf->rf1
* R21[0] always 0
*/
- rt2800_rfcsr_read(rt2x00dev, 20, &rfcsr);
+ rfcsr = rt2800_rfcsr_read(rt2x00dev, 20);
rfcsr = (rf->rf1 & 0x00ff);
rt2800_rfcsr_write(rt2x00dev, 20, rfcsr);
- rt2800_rfcsr_read(rt2x00dev, 21, &rfcsr);
+ rfcsr = rt2800_rfcsr_read(rt2x00dev, 21);
rt2x00_set_field8(&rfcsr, RFCSR21_BIT1, 0);
rt2800_rfcsr_write(rt2x00dev, 21, rfcsr);
/* K setting (always 0)
* R16[3:0] (RF PLL freq selection)
*/
- rt2800_rfcsr_read(rt2x00dev, 16, &rfcsr);
+ rfcsr = rt2800_rfcsr_read(rt2x00dev, 16);
rt2x00_set_field8(&rfcsr, RFCSR16_RF_PLL_FREQ_SEL_MT7620, 0);
rt2800_rfcsr_write(rt2x00dev, 16, rfcsr);
/* D setting (always 0)
* R22[2:0] (D=15, R22[2:0]=<111>)
*/
- rt2800_rfcsr_read(rt2x00dev, 22, &rfcsr);
+ rfcsr = rt2800_rfcsr_read(rt2x00dev, 22);
rt2x00_set_field8(&rfcsr, RFCSR22_FREQPLAN_D_MT7620, 0);
rt2800_rfcsr_write(rt2x00dev, 22, rfcsr);
@@ -3222,40 +3226,40 @@
* R18<7:0> in rf->rf3
* R19<1:0> in rf->rf4
*/
- rt2800_rfcsr_read(rt2x00dev, 17, &rfcsr);
+ rfcsr = rt2800_rfcsr_read(rt2x00dev, 17);
rfcsr = rf->rf2;
rt2800_rfcsr_write(rt2x00dev, 17, rfcsr);
- rt2800_rfcsr_read(rt2x00dev, 18, &rfcsr);
+ rfcsr = rt2800_rfcsr_read(rt2x00dev, 18);
rfcsr = rf->rf3;
rt2800_rfcsr_write(rt2x00dev, 18, rfcsr);
- rt2800_rfcsr_read(rt2x00dev, 19, &rfcsr);
+ rfcsr = rt2800_rfcsr_read(rt2x00dev, 19);
rt2x00_set_field8(&rfcsr, RFCSR19_K, rf->rf4);
rt2800_rfcsr_write(rt2x00dev, 19, rfcsr);
/* Default: XO=20MHz , SDM mode */
- rt2800_rfcsr_read(rt2x00dev, 16, &rfcsr);
+ rfcsr = rt2800_rfcsr_read(rt2x00dev, 16);
rt2x00_set_field8(&rfcsr, RFCSR16_SDM_MODE_MT7620, 0x80);
rt2800_rfcsr_write(rt2x00dev, 16, rfcsr);
- rt2800_rfcsr_read(rt2x00dev, 21, &rfcsr);
+ rfcsr = rt2800_rfcsr_read(rt2x00dev, 21);
rt2x00_set_field8(&rfcsr, RFCSR21_BIT8, 1);
rt2800_rfcsr_write(rt2x00dev, 21, rfcsr);
- rt2800_rfcsr_read(rt2x00dev, 1, &rfcsr);
+ rfcsr = rt2800_rfcsr_read(rt2x00dev, 1);
rt2x00_set_field8(&rfcsr, RFCSR1_TX2_EN_MT7620,
rt2x00dev->default_ant.tx_chain_num != 1);
rt2800_rfcsr_write(rt2x00dev, 1, rfcsr);
- rt2800_rfcsr_read(rt2x00dev, 2, &rfcsr);
+ rfcsr = rt2800_rfcsr_read(rt2x00dev, 2);
rt2x00_set_field8(&rfcsr, RFCSR2_TX2_EN_MT7620,
rt2x00dev->default_ant.tx_chain_num != 1);
rt2x00_set_field8(&rfcsr, RFCSR2_RX2_EN_MT7620,
rt2x00dev->default_ant.rx_chain_num != 1);
rt2800_rfcsr_write(rt2x00dev, 2, rfcsr);
- rt2800_rfcsr_read(rt2x00dev, 42, &rfcsr);
+ rfcsr = rt2800_rfcsr_read(rt2x00dev, 42);
rt2x00_set_field8(&rfcsr, RFCSR42_TX2_EN_MT7620,
rt2x00dev->default_ant.tx_chain_num != 1);
rt2800_rfcsr_write(rt2x00dev, 42, rfcsr);
@@ -3283,7 +3287,7 @@
rt2800_rfcsr_write_dccal(rt2x00dev, 59, 0x28);
}
- rt2800_rfcsr_read(rt2x00dev, 28, &rfcsr);
+ rfcsr = rt2800_rfcsr_read(rt2x00dev, 28);
rt2x00_set_field8(&rfcsr, RFCSR28_CH11_HT40,
conf_is_ht40(conf) && (rf->channel == 11));
rt2800_rfcsr_write(rt2x00dev, 28, rfcsr);
@@ -3296,36 +3300,36 @@
rx_agc_fc = drv_data->rx_calibration_bw20;
tx_agc_fc = drv_data->tx_calibration_bw20;
}
- rt2800_rfcsr_read_bank(rt2x00dev, 5, 6, &rfcsr);
+ rfcsr = rt2800_rfcsr_read_bank(rt2x00dev, 5, 6);
rfcsr &= (~0x3F);
rfcsr |= rx_agc_fc;
rt2800_rfcsr_write_bank(rt2x00dev, 5, 6, rfcsr);
- rt2800_rfcsr_read_bank(rt2x00dev, 5, 7, &rfcsr);
+ rfcsr = rt2800_rfcsr_read_bank(rt2x00dev, 5, 7);
rfcsr &= (~0x3F);
rfcsr |= rx_agc_fc;
rt2800_rfcsr_write_bank(rt2x00dev, 5, 7, rfcsr);
- rt2800_rfcsr_read_bank(rt2x00dev, 7, 6, &rfcsr);
+ rfcsr = rt2800_rfcsr_read_bank(rt2x00dev, 7, 6);
rfcsr &= (~0x3F);
rfcsr |= rx_agc_fc;
rt2800_rfcsr_write_bank(rt2x00dev, 7, 6, rfcsr);
- rt2800_rfcsr_read_bank(rt2x00dev, 7, 7, &rfcsr);
+ rfcsr = rt2800_rfcsr_read_bank(rt2x00dev, 7, 7);
rfcsr &= (~0x3F);
rfcsr |= rx_agc_fc;
rt2800_rfcsr_write_bank(rt2x00dev, 7, 7, rfcsr);
- rt2800_rfcsr_read_bank(rt2x00dev, 5, 58, &rfcsr);
+ rfcsr = rt2800_rfcsr_read_bank(rt2x00dev, 5, 58);
rfcsr &= (~0x3F);
rfcsr |= tx_agc_fc;
rt2800_rfcsr_write_bank(rt2x00dev, 5, 58, rfcsr);
- rt2800_rfcsr_read_bank(rt2x00dev, 5, 59, &rfcsr);
+ rfcsr = rt2800_rfcsr_read_bank(rt2x00dev, 5, 59);
rfcsr &= (~0x3F);
rfcsr |= tx_agc_fc;
rt2800_rfcsr_write_bank(rt2x00dev, 5, 59, rfcsr);
- rt2800_rfcsr_read_bank(rt2x00dev, 7, 58, &rfcsr);
+ rfcsr = rt2800_rfcsr_read_bank(rt2x00dev, 7, 58);
rfcsr &= (~0x3F);
rfcsr |= tx_agc_fc;
rt2800_rfcsr_write_bank(rt2x00dev, 7, 58, rfcsr);
- rt2800_rfcsr_read_bank(rt2x00dev, 7, 59, &rfcsr);
+ rfcsr = rt2800_rfcsr_read_bank(rt2x00dev, 7, 59);
rfcsr &= (~0x3F);
rfcsr |= tx_agc_fc;
rt2800_rfcsr_write_bank(rt2x00dev, 7, 59, rfcsr);
@@ -3350,34 +3354,33 @@
if (max_power > 0x2f)
max_power = 0x2f;
- rt2800_register_read(rt2x00dev, TX_ALC_CFG_0, ®);
+ reg = rt2800_register_read(rt2x00dev, TX_ALC_CFG_0);
rt2x00_set_field32(®, TX_ALC_CFG_0_CH_INIT_0, power_level);
rt2x00_set_field32(®, TX_ALC_CFG_0_CH_INIT_1, power_level);
rt2x00_set_field32(®, TX_ALC_CFG_0_LIMIT_0, max_power);
rt2x00_set_field32(®, TX_ALC_CFG_0_LIMIT_1, max_power);
- rt2800_eeprom_read(rt2x00dev, EEPROM_NIC_CONF1, &eeprom);
+ eeprom = rt2800_eeprom_read(rt2x00dev, EEPROM_NIC_CONF1);
if (rt2x00_get_field16(eeprom, EEPROM_NIC_CONF1_INTERNAL_TX_ALC)) {
/* init base power by eeprom target power */
- rt2800_eeprom_read(rt2x00dev, EEPROM_TXPOWER_INIT,
- &target_power);
+ target_power = rt2800_eeprom_read(rt2x00dev,
+ EEPROM_TXPOWER_INIT);
rt2x00_set_field32(®, TX_ALC_CFG_0_CH_INIT_0, target_power);
rt2x00_set_field32(®, TX_ALC_CFG_0_CH_INIT_1, target_power);
}
rt2800_register_write(rt2x00dev, TX_ALC_CFG_0, reg);
- rt2800_register_read(rt2x00dev, TX_ALC_CFG_1, ®);
+ reg = rt2800_register_read(rt2x00dev, TX_ALC_CFG_1);
rt2x00_set_field32(®, TX_ALC_CFG_1_TX_TEMP_COMP, 0);
rt2800_register_write(rt2x00dev, TX_ALC_CFG_1, reg);
/* Save MAC SYS CTRL registers */
- rt2800_register_read(rt2x00dev, MAC_SYS_CTRL, &mac_sys_ctrl);
+ mac_sys_ctrl = rt2800_register_read(rt2x00dev, MAC_SYS_CTRL);
/* Disable Tx/Rx */
rt2800_register_write(rt2x00dev, MAC_SYS_CTRL, 0);
/* Check MAC Tx/Rx idle */
for (i = 0; i < 10000; i++) {
- rt2800_register_read(rt2x00dev, MAC_STATUS_CFG,
- &mac_status);
+ mac_status = rt2800_register_read(rt2x00dev, MAC_STATUS_CFG);
if (mac_status & 0x3)
usleep_range(50, 200);
else
@@ -3388,7 +3391,7 @@
rt2x00_warn(rt2x00dev, "Wait MAC Status to MAX !!!\n");
if (chan->center_freq > 2457) {
- rt2800_bbp_read(rt2x00dev, 30, &bbp);
+ bbp = rt2800_bbp_read(rt2x00dev, 30);
bbp = 0x40;
rt2800_bbp_write(rt2x00dev, 30, bbp);
rt2800_rfcsr_write(rt2x00dev, 39, 0);
@@ -3397,7 +3400,7 @@
else
rt2800_rfcsr_write(rt2x00dev, 42, 0x7b);
} else {
- rt2800_bbp_read(rt2x00dev, 30, &bbp);
+ bbp = rt2800_bbp_read(rt2x00dev, 30);
bbp = 0x1f;
rt2800_bbp_write(rt2x00dev, 30, bbp);
rt2800_rfcsr_write(rt2x00dev, 39, 0x80);
@@ -3418,7 +3421,7 @@
u8 chain, reg;
for (chain = 0; chain < rt2x00dev->default_ant.rx_chain_num; chain++) {
- rt2800_bbp_read(rt2x00dev, 27, ®);
+ reg = rt2800_bbp_read(rt2x00dev, 27);
rt2x00_set_field8(®, BBP27_RX_CHAIN_SEL, chain);
rt2800_bbp_write(rt2x00dev, 27, reg);
@@ -3597,7 +3600,7 @@
rt2x00_rf(rt2x00dev, RF5372) ||
rt2x00_rf(rt2x00dev, RF5390) ||
rt2x00_rf(rt2x00dev, RF5392)) {
- rt2800_rfcsr_read(rt2x00dev, 30, &rfcsr);
+ rfcsr = rt2800_rfcsr_read(rt2x00dev, 30);
if (rt2x00_rf(rt2x00dev, RF3322)) {
rt2x00_set_field8(&rfcsr, RF3322_RFCSR30_TX_H20M,
conf_is_ht40(conf));
@@ -3611,7 +3614,7 @@
}
rt2800_rfcsr_write(rt2x00dev, 30, rfcsr);
- rt2800_rfcsr_read(rt2x00dev, 3, &rfcsr);
+ rfcsr = rt2800_rfcsr_read(rt2x00dev, 3);
rt2x00_set_field8(&rfcsr, RFCSR3_VCOCAL_EN, 1);
rt2800_rfcsr_write(rt2x00dev, 3, rfcsr);
}
@@ -3690,7 +3693,7 @@
rt2800_bbp_write(rt2x00dev, 75, 0x50);
}
- rt2800_register_read(rt2x00dev, TX_BAND_CFG, ®);
+ reg = rt2800_register_read(rt2x00dev, TX_BAND_CFG);
rt2x00_set_field32(®, TX_BAND_CFG_HT40_MINUS, conf_is_ht40_minus(conf));
rt2x00_set_field32(®, TX_BAND_CFG_A, rf->channel > 14);
rt2x00_set_field32(®, TX_BAND_CFG_BG, rf->channel <= 14);
@@ -3699,7 +3702,7 @@
if (rt2x00_rt(rt2x00dev, RT3572))
rt2800_rfcsr_write(rt2x00dev, 8, 0);
- rt2800_register_read(rt2x00dev, TX_PIN_CFG, &tx_pin);
+ tx_pin = rt2800_register_read(rt2x00dev, TX_PIN_CFG);
switch (rt2x00dev->default_ant.tx_chain_num) {
case 3:
@@ -3765,7 +3768,7 @@
}
if (rt2x00_rt(rt2x00dev, RT3593)) {
- rt2800_register_read(rt2x00dev, GPIO_CTRL, ®);
+ reg = rt2800_register_read(rt2x00dev, GPIO_CTRL);
/* Band selection */
if (rt2x00_is_usb(rt2x00dev) ||
@@ -3832,11 +3835,11 @@
rt2800_iq_calibrate(rt2x00dev, rf->channel);
}
- rt2800_bbp_read(rt2x00dev, 4, &bbp);
+ bbp = rt2800_bbp_read(rt2x00dev, 4);
rt2x00_set_field8(&bbp, BBP4_BANDWIDTH, 2 * conf_is_ht40(conf));
rt2800_bbp_write(rt2x00dev, 4, bbp);
- rt2800_bbp_read(rt2x00dev, 3, &bbp);
+ bbp = rt2800_bbp_read(rt2x00dev, 3);
rt2x00_set_field8(&bbp, BBP3_HT40_MINUS, conf_is_ht40_minus(conf));
rt2800_bbp_write(rt2x00dev, 3, bbp);
@@ -3857,16 +3860,16 @@
/*
* Clear channel statistic counters
*/
- rt2800_register_read(rt2x00dev, CH_IDLE_STA, ®);
- rt2800_register_read(rt2x00dev, CH_BUSY_STA, ®);
- rt2800_register_read(rt2x00dev, CH_BUSY_STA_SEC, ®);
+ reg = rt2800_register_read(rt2x00dev, CH_IDLE_STA);
+ reg = rt2800_register_read(rt2x00dev, CH_BUSY_STA);
+ reg = rt2800_register_read(rt2x00dev, CH_BUSY_STA_SEC);
/*
* Clear update flag
*/
if (rt2x00_rt(rt2x00dev, RT3352) ||
rt2x00_rt(rt2x00dev, RT5350)) {
- rt2800_bbp_read(rt2x00dev, 49, &bbp);
+ bbp = rt2800_bbp_read(rt2x00dev, 49);
rt2x00_set_field8(&bbp, BBP49_UPDATE_FLAG, 0);
rt2800_bbp_write(rt2x00dev, 49, bbp);
}
@@ -3883,7 +3886,7 @@
/*
* First check if temperature compensation is supported.
*/
- rt2800_eeprom_read(rt2x00dev, EEPROM_NIC_CONF1, &eeprom);
+ eeprom = rt2800_eeprom_read(rt2x00dev, EEPROM_NIC_CONF1);
if (!rt2x00_get_field16(eeprom, EEPROM_NIC_CONF1_EXTERNAL_TX_ALC))
return 0;
@@ -3896,62 +3899,62 @@
* Example TSSI bounds 0xF0 0xD0 0xB5 0xA0 0x88 0x45 0x25 0x15 0x00
*/
if (rt2x00dev->curr_band == NL80211_BAND_2GHZ) {
- rt2800_eeprom_read(rt2x00dev, EEPROM_TSSI_BOUND_BG1, &eeprom);
+ eeprom = rt2800_eeprom_read(rt2x00dev, EEPROM_TSSI_BOUND_BG1);
tssi_bounds[0] = rt2x00_get_field16(eeprom,
EEPROM_TSSI_BOUND_BG1_MINUS4);
tssi_bounds[1] = rt2x00_get_field16(eeprom,
EEPROM_TSSI_BOUND_BG1_MINUS3);
- rt2800_eeprom_read(rt2x00dev, EEPROM_TSSI_BOUND_BG2, &eeprom);
+ eeprom = rt2800_eeprom_read(rt2x00dev, EEPROM_TSSI_BOUND_BG2);
tssi_bounds[2] = rt2x00_get_field16(eeprom,
EEPROM_TSSI_BOUND_BG2_MINUS2);
tssi_bounds[3] = rt2x00_get_field16(eeprom,
EEPROM_TSSI_BOUND_BG2_MINUS1);
- rt2800_eeprom_read(rt2x00dev, EEPROM_TSSI_BOUND_BG3, &eeprom);
+ eeprom = rt2800_eeprom_read(rt2x00dev, EEPROM_TSSI_BOUND_BG3);
tssi_bounds[4] = rt2x00_get_field16(eeprom,
EEPROM_TSSI_BOUND_BG3_REF);
tssi_bounds[5] = rt2x00_get_field16(eeprom,
EEPROM_TSSI_BOUND_BG3_PLUS1);
- rt2800_eeprom_read(rt2x00dev, EEPROM_TSSI_BOUND_BG4, &eeprom);
+ eeprom = rt2800_eeprom_read(rt2x00dev, EEPROM_TSSI_BOUND_BG4);
tssi_bounds[6] = rt2x00_get_field16(eeprom,
EEPROM_TSSI_BOUND_BG4_PLUS2);
tssi_bounds[7] = rt2x00_get_field16(eeprom,
EEPROM_TSSI_BOUND_BG4_PLUS3);
- rt2800_eeprom_read(rt2x00dev, EEPROM_TSSI_BOUND_BG5, &eeprom);
+ eeprom = rt2800_eeprom_read(rt2x00dev, EEPROM_TSSI_BOUND_BG5);
tssi_bounds[8] = rt2x00_get_field16(eeprom,
EEPROM_TSSI_BOUND_BG5_PLUS4);
step = rt2x00_get_field16(eeprom,
EEPROM_TSSI_BOUND_BG5_AGC_STEP);
} else {
- rt2800_eeprom_read(rt2x00dev, EEPROM_TSSI_BOUND_A1, &eeprom);
+ eeprom = rt2800_eeprom_read(rt2x00dev, EEPROM_TSSI_BOUND_A1);
tssi_bounds[0] = rt2x00_get_field16(eeprom,
EEPROM_TSSI_BOUND_A1_MINUS4);
tssi_bounds[1] = rt2x00_get_field16(eeprom,
EEPROM_TSSI_BOUND_A1_MINUS3);
- rt2800_eeprom_read(rt2x00dev, EEPROM_TSSI_BOUND_A2, &eeprom);
+ eeprom = rt2800_eeprom_read(rt2x00dev, EEPROM_TSSI_BOUND_A2);
tssi_bounds[2] = rt2x00_get_field16(eeprom,
EEPROM_TSSI_BOUND_A2_MINUS2);
tssi_bounds[3] = rt2x00_get_field16(eeprom,
EEPROM_TSSI_BOUND_A2_MINUS1);
- rt2800_eeprom_read(rt2x00dev, EEPROM_TSSI_BOUND_A3, &eeprom);
+ eeprom = rt2800_eeprom_read(rt2x00dev, EEPROM_TSSI_BOUND_A3);
tssi_bounds[4] = rt2x00_get_field16(eeprom,
EEPROM_TSSI_BOUND_A3_REF);
tssi_bounds[5] = rt2x00_get_field16(eeprom,
EEPROM_TSSI_BOUND_A3_PLUS1);
- rt2800_eeprom_read(rt2x00dev, EEPROM_TSSI_BOUND_A4, &eeprom);
+ eeprom = rt2800_eeprom_read(rt2x00dev, EEPROM_TSSI_BOUND_A4);
tssi_bounds[6] = rt2x00_get_field16(eeprom,
EEPROM_TSSI_BOUND_A4_PLUS2);
tssi_bounds[7] = rt2x00_get_field16(eeprom,
EEPROM_TSSI_BOUND_A4_PLUS3);
- rt2800_eeprom_read(rt2x00dev, EEPROM_TSSI_BOUND_A5, &eeprom);
+ eeprom = rt2800_eeprom_read(rt2x00dev, EEPROM_TSSI_BOUND_A5);
tssi_bounds[8] = rt2x00_get_field16(eeprom,
EEPROM_TSSI_BOUND_A5_PLUS4);
@@ -3968,7 +3971,7 @@
/*
* Read current TSSI (BBP 49).
*/
- rt2800_bbp_read(rt2x00dev, 49, ¤t_tssi);
+ current_tssi = rt2800_bbp_read(rt2x00dev, 49);
/*
* Compare TSSI value (BBP49) with the compensation boundaries
@@ -3997,7 +4000,7 @@
u8 comp_type;
int comp_value = 0;
- rt2800_eeprom_read(rt2x00dev, EEPROM_TXPOWER_DELTA, &eeprom);
+ eeprom = rt2800_eeprom_read(rt2x00dev, EEPROM_TXPOWER_DELTA);
/*
* HT40 compensation not required.
@@ -4075,13 +4078,13 @@
* .11b data rate need add additional 4dbm
* when calculating eirp txpower.
*/
- rt2800_eeprom_read_from_array(rt2x00dev, EEPROM_TXPOWER_BYRATE,
- 1, &eeprom);
+ eeprom = rt2800_eeprom_read_from_array(rt2x00dev,
+ EEPROM_TXPOWER_BYRATE,
+ 1);
criterion = rt2x00_get_field16(eeprom,
EEPROM_TXPOWER_BYRATE_RATE0);
- rt2800_eeprom_read(rt2x00dev, EEPROM_EIRP_MAX_TX_POWER,
- &eeprom);
+ eeprom = rt2800_eeprom_read(rt2x00dev, EEPROM_EIRP_MAX_TX_POWER);
if (band == NL80211_BAND_2GHZ)
eirp_txpower_criterion = rt2x00_get_field16(eeprom,
@@ -4150,8 +4153,8 @@
offset += 8;
/* read the next four txpower values */
- rt2800_eeprom_read_from_array(rt2x00dev, EEPROM_TXPOWER_BYRATE,
- offset, &eeprom);
+ eeprom = rt2800_eeprom_read_from_array(rt2x00dev, EEPROM_TXPOWER_BYRATE,
+ offset);
/* CCK 1MBS,2MBS */
txpower = rt2x00_get_field16(eeprom, EEPROM_TXPOWER_BYRATE_RATE0);
@@ -4198,8 +4201,8 @@
TX_PWR_CFG_0_EXT_OFDM12_CH2, txpower);
/* read the next four txpower values */
- rt2800_eeprom_read_from_array(rt2x00dev, EEPROM_TXPOWER_BYRATE,
- offset + 1, &eeprom);
+ eeprom = rt2800_eeprom_read_from_array(rt2x00dev, EEPROM_TXPOWER_BYRATE,
+ offset + 1);
/* OFDM 24MBS,36MBS */
txpower = rt2x00_get_field16(eeprom, EEPROM_TXPOWER_BYRATE_RATE0);
@@ -4235,8 +4238,8 @@
TX_PWR_CFG_7_OFDM54_CH2, txpower);
/* read the next four txpower values */
- rt2800_eeprom_read_from_array(rt2x00dev, EEPROM_TXPOWER_BYRATE,
- offset + 2, &eeprom);
+ eeprom = rt2800_eeprom_read_from_array(rt2x00dev, EEPROM_TXPOWER_BYRATE,
+ offset + 2);
/* MCS 0,1 */
txpower = rt2x00_get_field16(eeprom, EEPROM_TXPOWER_BYRATE_RATE0);
@@ -4283,8 +4286,8 @@
TX_PWR_CFG_2_EXT_MCS6_CH2, txpower);
/* read the next four txpower values */
- rt2800_eeprom_read_from_array(rt2x00dev, EEPROM_TXPOWER_BYRATE,
- offset + 3, &eeprom);
+ eeprom = rt2800_eeprom_read_from_array(rt2x00dev, EEPROM_TXPOWER_BYRATE,
+ offset + 3);
/* MCS 7 */
txpower = rt2x00_get_field16(eeprom, EEPROM_TXPOWER_BYRATE_RATE0);
@@ -4331,8 +4334,8 @@
TX_PWR_CFG_3_EXT_MCS12_CH2, txpower);
/* read the next four txpower values */
- rt2800_eeprom_read_from_array(rt2x00dev, EEPROM_TXPOWER_BYRATE,
- offset + 4, &eeprom);
+ eeprom = rt2800_eeprom_read_from_array(rt2x00dev, EEPROM_TXPOWER_BYRATE,
+ offset + 4);
/* MCS 14 */
txpower = rt2x00_get_field16(eeprom, EEPROM_TXPOWER_BYRATE_RATE0);
@@ -4379,8 +4382,8 @@
TX_PWR_CFG_5_MCS18_CH2, txpower);
/* read the next four txpower values */
- rt2800_eeprom_read_from_array(rt2x00dev, EEPROM_TXPOWER_BYRATE,
- offset + 5, &eeprom);
+ eeprom = rt2800_eeprom_read_from_array(rt2x00dev, EEPROM_TXPOWER_BYRATE,
+ offset + 5);
/* MCS 20,21 */
txpower = rt2x00_get_field16(eeprom, EEPROM_TXPOWER_BYRATE_RATE0);
@@ -4416,8 +4419,8 @@
TX_PWR_CFG_8_MCS23_CH2, txpower);
/* read the next four txpower values */
- rt2800_eeprom_read_from_array(rt2x00dev, EEPROM_TXPOWER_BYRATE,
- offset + 6, &eeprom);
+ eeprom = rt2800_eeprom_read_from_array(rt2x00dev, EEPROM_TXPOWER_BYRATE,
+ offset + 6);
/* STBC, MCS 0,1 */
txpower = rt2x00_get_field16(eeprom, EEPROM_TXPOWER_BYRATE_RATE0);
@@ -4460,8 +4463,8 @@
txpower);
/* read the next four txpower values */
- rt2800_eeprom_read_from_array(rt2x00dev, EEPROM_TXPOWER_BYRATE,
- offset + 7, &eeprom);
+ eeprom = rt2800_eeprom_read_from_array(rt2x00dev, EEPROM_TXPOWER_BYRATE,
+ offset + 7);
/* STBC, MCS 7 */
txpower = rt2x00_get_field16(eeprom, EEPROM_TXPOWER_BYRATE_RATE0);
@@ -4541,8 +4544,9 @@
* board vendors expected when they populated the EEPROM...
*/
for (i = 0; i < 5; i++) {
- rt2800_eeprom_read_from_array(rt2x00dev, EEPROM_TXPOWER_BYRATE,
- i * 2, &eeprom);
+ eeprom = rt2800_eeprom_read_from_array(rt2x00dev,
+ EEPROM_TXPOWER_BYRATE,
+ i * 2);
data = eeprom;
@@ -4558,8 +4562,9 @@
gdata |= (t << 8);
- rt2800_eeprom_read_from_array(rt2x00dev, EEPROM_TXPOWER_BYRATE,
- (i * 2) + 1, &eeprom);
+ eeprom = rt2800_eeprom_read_from_array(rt2x00dev,
+ EEPROM_TXPOWER_BYRATE,
+ (i * 2) + 1);
t = eeprom & 0x3f;
if (t == 32)
@@ -4601,26 +4606,26 @@
/* For OFDM 54MBS use value from OFDM 48MBS */
pwreg = 0;
- rt2800_register_read(rt2x00dev, TX_PWR_CFG_1, ®);
+ reg = rt2800_register_read(rt2x00dev, TX_PWR_CFG_1);
t = rt2x00_get_field32(reg, TX_PWR_CFG_1B_48MBS);
rt2x00_set_field32(&pwreg, TX_PWR_CFG_7B_54MBS, t);
/* For MCS 7 use value from MCS 6 */
- rt2800_register_read(rt2x00dev, TX_PWR_CFG_2, ®);
+ reg = rt2800_register_read(rt2x00dev, TX_PWR_CFG_2);
t = rt2x00_get_field32(reg, TX_PWR_CFG_2B_MCS6_MCS7);
rt2x00_set_field32(&pwreg, TX_PWR_CFG_7B_MCS7, t);
rt2800_register_write(rt2x00dev, TX_PWR_CFG_7, pwreg);
/* For MCS 15 use value from MCS 14 */
pwreg = 0;
- rt2800_register_read(rt2x00dev, TX_PWR_CFG_3, ®);
+ reg = rt2800_register_read(rt2x00dev, TX_PWR_CFG_3);
t = rt2x00_get_field32(reg, TX_PWR_CFG_3B_MCS14);
rt2x00_set_field32(&pwreg, TX_PWR_CFG_8B_MCS15, t);
rt2800_register_write(rt2x00dev, TX_PWR_CFG_8, pwreg);
/* For STBC MCS 7 use value from STBC MCS 6 */
pwreg = 0;
- rt2800_register_read(rt2x00dev, TX_PWR_CFG_4, ®);
+ reg = rt2800_register_read(rt2x00dev, TX_PWR_CFG_4);
t = rt2x00_get_field32(reg, TX_PWR_CFG_4B_STBC_MCS6);
rt2x00_set_field32(&pwreg, TX_PWR_CFG_9B_STBC_MCS7, t);
rt2800_register_write(rt2x00dev, TX_PWR_CFG_9, pwreg);
@@ -4702,7 +4707,7 @@
} else {
power_ctrl = 0;
}
- rt2800_bbp_read(rt2x00dev, 1, &r1);
+ r1 = rt2800_bbp_read(rt2x00dev, 1);
rt2x00_set_field8(&r1, BBP1_TX_POWER_CTRL, power_ctrl);
rt2800_bbp_write(rt2x00dev, 1, r1);
@@ -4713,11 +4718,12 @@
if (offset > TX_PWR_CFG_4)
break;
- rt2800_register_read(rt2x00dev, offset, ®);
+ reg = rt2800_register_read(rt2x00dev, offset);
/* read the next four txpower values */
- rt2800_eeprom_read_from_array(rt2x00dev, EEPROM_TXPOWER_BYRATE,
- i, &eeprom);
+ eeprom = rt2800_eeprom_read_from_array(rt2x00dev,
+ EEPROM_TXPOWER_BYRATE,
+ i);
is_rate_b = i ? 0 : 1;
/*
@@ -4765,8 +4771,9 @@
rt2x00_set_field32(®, TX_PWR_CFG_RATE3, txpower);
/* read the next four txpower values */
- rt2800_eeprom_read_from_array(rt2x00dev, EEPROM_TXPOWER_BYRATE,
- i + 1, &eeprom);
+ eeprom = rt2800_eeprom_read_from_array(rt2x00dev,
+ EEPROM_TXPOWER_BYRATE,
+ i + 1);
is_rate_b = 0;
/*
@@ -4853,7 +4860,7 @@
* periodically to adjust the frequency to be precision.
*/
- rt2800_register_read(rt2x00dev, TX_PIN_CFG, &tx_pin);
+ tx_pin = rt2800_register_read(rt2x00dev, TX_PIN_CFG);
tx_pin &= TX_PIN_CFG_PA_PE_DISABLE;
rt2800_register_write(rt2x00dev, TX_PIN_CFG, tx_pin);
@@ -4864,7 +4871,7 @@
case RF3022:
case RF3320:
case RF3052:
- rt2800_rfcsr_read(rt2x00dev, 7, &rfcsr);
+ rfcsr = rt2800_rfcsr_read(rt2x00dev, 7);
rt2x00_set_field8(&rfcsr, RFCSR7_RF_TUNING, 1);
rt2800_rfcsr_write(rt2x00dev, 7, rfcsr);
break;
@@ -4879,7 +4886,7 @@
case RF5390:
case RF5392:
case RF5592:
- rt2800_rfcsr_read(rt2x00dev, 3, &rfcsr);
+ rfcsr = rt2800_rfcsr_read(rt2x00dev, 3);
rt2x00_set_field8(&rfcsr, RFCSR3_VCOCAL_EN, 1);
rt2800_rfcsr_write(rt2x00dev, 3, rfcsr);
min_sleep = 1000;
@@ -4887,7 +4894,7 @@
case RF7620:
rt2800_rfcsr_write(rt2x00dev, 5, 0x40);
rt2800_rfcsr_write(rt2x00dev, 4, 0x0C);
- rt2800_rfcsr_read(rt2x00dev, 4, &rfcsr);
+ rfcsr = rt2800_rfcsr_read(rt2x00dev, 4);
rt2x00_set_field8(&rfcsr, RFCSR4_VCOCAL_EN, 1);
rt2800_rfcsr_write(rt2x00dev, 4, rfcsr);
min_sleep = 2000;
@@ -4901,7 +4908,7 @@
if (min_sleep > 0)
usleep_range(min_sleep, min_sleep * 2);
- rt2800_register_read(rt2x00dev, TX_PIN_CFG, &tx_pin);
+ tx_pin = rt2800_register_read(rt2x00dev, TX_PIN_CFG);
if (rt2x00dev->rf_channel <= 14) {
switch (rt2x00dev->default_ant.tx_chain_num) {
case 3:
@@ -4975,7 +4982,7 @@
{
u32 reg;
- rt2800_register_read(rt2x00dev, TX_RTY_CFG, ®);
+ reg = rt2800_register_read(rt2x00dev, TX_RTY_CFG);
rt2x00_set_field32(®, TX_RTY_CFG_SHORT_RTY_LIMIT,
libconf->conf->short_frame_max_tx_count);
rt2x00_set_field32(®, TX_RTY_CFG_LONG_RTY_LIMIT,
@@ -4994,7 +5001,7 @@
if (state == STATE_SLEEP) {
rt2800_register_write(rt2x00dev, AUTOWAKEUP_CFG, 0);
- rt2800_register_read(rt2x00dev, AUTOWAKEUP_CFG, ®);
+ reg = rt2800_register_read(rt2x00dev, AUTOWAKEUP_CFG);
rt2x00_set_field32(®, AUTOWAKEUP_CFG_AUTO_LEAD_TIME, 5);
rt2x00_set_field32(®, AUTOWAKEUP_CFG_TBCN_BEFORE_WAKE,
libconf->conf->listen_interval - 1);
@@ -5003,7 +5010,7 @@
rt2x00dev->ops->lib->set_device_state(rt2x00dev, state);
} else {
- rt2800_register_read(rt2x00dev, AUTOWAKEUP_CFG, ®);
+ reg = rt2800_register_read(rt2x00dev, AUTOWAKEUP_CFG);
rt2x00_set_field32(®, AUTOWAKEUP_CFG_AUTO_LEAD_TIME, 0);
rt2x00_set_field32(®, AUTOWAKEUP_CFG_TBCN_BEFORE_WAKE, 0);
rt2x00_set_field32(®, AUTOWAKEUP_CFG_AUTOWAKE, 0);
@@ -5046,7 +5053,7 @@
/*
* Update FCS error count from register.
*/
- rt2800_register_read(rt2x00dev, RX_STA_CNT0, ®);
+ reg = rt2800_register_read(rt2x00dev, RX_STA_CNT0);
qual->rx_failed = rt2x00_get_field32(reg, RX_STA_CNT0_CRC_ERR);
}
EXPORT_SYMBOL_GPL(rt2800_link_stats);
@@ -5175,7 +5182,7 @@
rt2800_register_write(rt2x00dev, MAC_SYS_CTRL, 0x00000000);
- rt2800_register_read(rt2x00dev, BCN_TIME_CFG, ®);
+ reg = rt2800_register_read(rt2x00dev, BCN_TIME_CFG);
rt2x00_set_field32(®, BCN_TIME_CFG_BEACON_INTERVAL, 1600);
rt2x00_set_field32(®, BCN_TIME_CFG_TSF_TICKING, 0);
rt2x00_set_field32(®, BCN_TIME_CFG_TSF_SYNC, 0);
@@ -5186,43 +5193,43 @@
rt2800_config_filter(rt2x00dev, FIF_ALLMULTI);
- rt2800_register_read(rt2x00dev, BKOFF_SLOT_CFG, ®);
+ reg = rt2800_register_read(rt2x00dev, BKOFF_SLOT_CFG);
rt2x00_set_field32(®, BKOFF_SLOT_CFG_SLOT_TIME, 9);
rt2x00_set_field32(®, BKOFF_SLOT_CFG_CC_DELAY_TIME, 2);
rt2800_register_write(rt2x00dev, BKOFF_SLOT_CFG, reg);
if (rt2x00_rt(rt2x00dev, RT3290)) {
- rt2800_register_read(rt2x00dev, WLAN_FUN_CTRL, ®);
+ reg = rt2800_register_read(rt2x00dev, WLAN_FUN_CTRL);
if (rt2x00_get_field32(reg, WLAN_EN) == 1) {
rt2x00_set_field32(®, PCIE_APP0_CLK_REQ, 1);
rt2800_register_write(rt2x00dev, WLAN_FUN_CTRL, reg);
}
- rt2800_register_read(rt2x00dev, CMB_CTRL, ®);
+ reg = rt2800_register_read(rt2x00dev, CMB_CTRL);
if (!(rt2x00_get_field32(reg, LDO0_EN) == 1)) {
rt2x00_set_field32(®, LDO0_EN, 1);
rt2x00_set_field32(®, LDO_BGSEL, 3);
rt2800_register_write(rt2x00dev, CMB_CTRL, reg);
}
- rt2800_register_read(rt2x00dev, OSC_CTRL, ®);
+ reg = rt2800_register_read(rt2x00dev, OSC_CTRL);
rt2x00_set_field32(®, OSC_ROSC_EN, 1);
rt2x00_set_field32(®, OSC_CAL_REQ, 1);
rt2x00_set_field32(®, OSC_REF_CYCLE, 0x27);
rt2800_register_write(rt2x00dev, OSC_CTRL, reg);
- rt2800_register_read(rt2x00dev, COEX_CFG0, ®);
+ reg = rt2800_register_read(rt2x00dev, COEX_CFG0);
rt2x00_set_field32(®, COEX_CFG_ANT, 0x5e);
rt2800_register_write(rt2x00dev, COEX_CFG0, reg);
- rt2800_register_read(rt2x00dev, COEX_CFG2, ®);
+ reg = rt2800_register_read(rt2x00dev, COEX_CFG2);
rt2x00_set_field32(®, BT_COEX_CFG1, 0x00);
rt2x00_set_field32(®, BT_COEX_CFG0, 0x17);
rt2x00_set_field32(®, WL_COEX_CFG1, 0x93);
rt2x00_set_field32(®, WL_COEX_CFG0, 0x7f);
rt2800_register_write(rt2x00dev, COEX_CFG2, reg);
- rt2800_register_read(rt2x00dev, PLL_CTRL, ®);
+ reg = rt2800_register_read(rt2x00dev, PLL_CTRL);
rt2x00_set_field32(®, PLL_CONTROL, 1);
rt2800_register_write(rt2x00dev, PLL_CTRL, reg);
}
@@ -5243,8 +5250,7 @@
if (rt2x00_rt_rev_lt(rt2x00dev, RT3071, REV_RT3071E) ||
rt2x00_rt_rev_lt(rt2x00dev, RT3090, REV_RT3090E) ||
rt2x00_rt_rev_lt(rt2x00dev, RT3390, REV_RT3390E)) {
- rt2800_eeprom_read(rt2x00dev, EEPROM_NIC_CONF1,
- &eeprom);
+ eeprom = rt2800_eeprom_read(rt2x00dev, EEPROM_NIC_CONF1);
if (rt2x00_get_field16(eeprom, EEPROM_NIC_CONF1_DAC_TEST))
rt2800_register_write(rt2x00dev, TX_SW_CFG2,
0x0000002c);
@@ -5279,8 +5285,7 @@
rt2800_register_write(rt2x00dev, TX_SW_CFG0, 0x00000402);
rt2800_register_write(rt2x00dev, TX_SW_CFG1, 0x00000000);
if (rt2x00_rt_rev_lt(rt2x00dev, RT3593, REV_RT3593E)) {
- rt2800_eeprom_read(rt2x00dev, EEPROM_NIC_CONF1,
- &eeprom);
+ eeprom = rt2800_eeprom_read(rt2x00dev, EEPROM_NIC_CONF1);
if (rt2x00_get_field16(eeprom,
EEPROM_NIC_CONF1_DAC_TEST))
rt2800_register_write(rt2x00dev, TX_SW_CFG2,
@@ -5319,7 +5324,7 @@
0x3630363A);
rt2800_register_write(rt2x00dev, TX1_RF_GAIN_CORRECT,
0x3630363A);
- rt2800_register_read(rt2x00dev, TX_ALC_CFG_1, ®);
+ reg = rt2800_register_read(rt2x00dev, TX_ALC_CFG_1);
rt2x00_set_field32(®, TX_ALC_CFG_1_ROS_BUSY_EN, 0);
rt2800_register_write(rt2x00dev, TX_ALC_CFG_1, reg);
} else {
@@ -5327,7 +5332,7 @@
rt2800_register_write(rt2x00dev, TX_SW_CFG1, 0x00080606);
}
- rt2800_register_read(rt2x00dev, TX_LINK_CFG, ®);
+ reg = rt2800_register_read(rt2x00dev, TX_LINK_CFG);
rt2x00_set_field32(®, TX_LINK_CFG_REMOTE_MFB_LIFETIME, 32);
rt2x00_set_field32(®, TX_LINK_CFG_MFB_ENABLE, 0);
rt2x00_set_field32(®, TX_LINK_CFG_REMOTE_UMFS_ENABLE, 0);
@@ -5338,13 +5343,13 @@
rt2x00_set_field32(®, TX_LINK_CFG_REMOTE_MFS, 0);
rt2800_register_write(rt2x00dev, TX_LINK_CFG, reg);
- rt2800_register_read(rt2x00dev, TX_TIMEOUT_CFG, ®);
+ reg = rt2800_register_read(rt2x00dev, TX_TIMEOUT_CFG);
rt2x00_set_field32(®, TX_TIMEOUT_CFG_MPDU_LIFETIME, 9);
rt2x00_set_field32(®, TX_TIMEOUT_CFG_RX_ACK_TIMEOUT, 32);
rt2x00_set_field32(®, TX_TIMEOUT_CFG_TX_OP_TIMEOUT, 10);
rt2800_register_write(rt2x00dev, TX_TIMEOUT_CFG, reg);
- rt2800_register_read(rt2x00dev, MAX_LEN_CFG, ®);
+ reg = rt2800_register_read(rt2x00dev, MAX_LEN_CFG);
rt2x00_set_field32(®, MAX_LEN_CFG_MAX_MPDU, AGGREGATION_SIZE);
if (rt2x00_is_usb(rt2x00dev)) {
drv_data->max_psdu = 3;
@@ -5360,7 +5365,7 @@
rt2x00_set_field32(®, MAX_LEN_CFG_MIN_MPDU, 10);
rt2800_register_write(rt2x00dev, MAX_LEN_CFG, reg);
- rt2800_register_read(rt2x00dev, LED_CFG, ®);
+ reg = rt2800_register_read(rt2x00dev, LED_CFG);
rt2x00_set_field32(®, LED_CFG_ON_PERIOD, 70);
rt2x00_set_field32(®, LED_CFG_OFF_PERIOD, 30);
rt2x00_set_field32(®, LED_CFG_SLOW_BLINK_PERIOD, 3);
@@ -5372,7 +5377,7 @@
rt2800_register_write(rt2x00dev, PBF_MAX_PCNT, 0x1f3fbf9f);
- rt2800_register_read(rt2x00dev, TX_RTY_CFG, ®);
+ reg = rt2800_register_read(rt2x00dev, TX_RTY_CFG);
rt2x00_set_field32(®, TX_RTY_CFG_SHORT_RTY_LIMIT, 2);
rt2x00_set_field32(®, TX_RTY_CFG_LONG_RTY_LIMIT, 2);
rt2x00_set_field32(®, TX_RTY_CFG_LONG_RTY_THRE, 2000);
@@ -5381,7 +5386,7 @@
rt2x00_set_field32(®, TX_RTY_CFG_TX_AUTO_FB_ENABLE, 1);
rt2800_register_write(rt2x00dev, TX_RTY_CFG, reg);
- rt2800_register_read(rt2x00dev, AUTO_RSP_CFG, ®);
+ reg = rt2800_register_read(rt2x00dev, AUTO_RSP_CFG);
rt2x00_set_field32(®, AUTO_RSP_CFG_AUTORESPONDER, 1);
rt2x00_set_field32(®, AUTO_RSP_CFG_BAC_ACK_POLICY, 1);
rt2x00_set_field32(®, AUTO_RSP_CFG_CTS_40_MMODE, 1);
@@ -5391,7 +5396,7 @@
rt2x00_set_field32(®, AUTO_RSP_CFG_ACK_CTS_PSM_BIT, 0);
rt2800_register_write(rt2x00dev, AUTO_RSP_CFG, reg);
- rt2800_register_read(rt2x00dev, CCK_PROT_CFG, ®);
+ reg = rt2800_register_read(rt2x00dev, CCK_PROT_CFG);
rt2x00_set_field32(®, CCK_PROT_CFG_PROTECT_RATE, 3);
rt2x00_set_field32(®, CCK_PROT_CFG_PROTECT_CTRL, 0);
rt2x00_set_field32(®, CCK_PROT_CFG_PROTECT_NAV_SHORT, 1);
@@ -5404,7 +5409,7 @@
rt2x00_set_field32(®, CCK_PROT_CFG_RTS_TH_EN, 1);
rt2800_register_write(rt2x00dev, CCK_PROT_CFG, reg);
- rt2800_register_read(rt2x00dev, OFDM_PROT_CFG, ®);
+ reg = rt2800_register_read(rt2x00dev, OFDM_PROT_CFG);
rt2x00_set_field32(®, OFDM_PROT_CFG_PROTECT_RATE, 3);
rt2x00_set_field32(®, OFDM_PROT_CFG_PROTECT_CTRL, 0);
rt2x00_set_field32(®, OFDM_PROT_CFG_PROTECT_NAV_SHORT, 1);
@@ -5417,7 +5422,7 @@
rt2x00_set_field32(®, OFDM_PROT_CFG_RTS_TH_EN, 1);
rt2800_register_write(rt2x00dev, OFDM_PROT_CFG, reg);
- rt2800_register_read(rt2x00dev, MM20_PROT_CFG, ®);
+ reg = rt2800_register_read(rt2x00dev, MM20_PROT_CFG);
rt2x00_set_field32(®, MM20_PROT_CFG_PROTECT_RATE, 0x4004);
rt2x00_set_field32(®, MM20_PROT_CFG_PROTECT_CTRL, 1);
rt2x00_set_field32(®, MM20_PROT_CFG_PROTECT_NAV_SHORT, 1);
@@ -5430,7 +5435,7 @@
rt2x00_set_field32(®, MM20_PROT_CFG_RTS_TH_EN, 0);
rt2800_register_write(rt2x00dev, MM20_PROT_CFG, reg);
- rt2800_register_read(rt2x00dev, MM40_PROT_CFG, ®);
+ reg = rt2800_register_read(rt2x00dev, MM40_PROT_CFG);
rt2x00_set_field32(®, MM40_PROT_CFG_PROTECT_RATE, 0x4084);
rt2x00_set_field32(®, MM40_PROT_CFG_PROTECT_CTRL, 1);
rt2x00_set_field32(®, MM40_PROT_CFG_PROTECT_NAV_SHORT, 1);
@@ -5443,7 +5448,7 @@
rt2x00_set_field32(®, MM40_PROT_CFG_RTS_TH_EN, 0);
rt2800_register_write(rt2x00dev, MM40_PROT_CFG, reg);
- rt2800_register_read(rt2x00dev, GF20_PROT_CFG, ®);
+ reg = rt2800_register_read(rt2x00dev, GF20_PROT_CFG);
rt2x00_set_field32(®, GF20_PROT_CFG_PROTECT_RATE, 0x4004);
rt2x00_set_field32(®, GF20_PROT_CFG_PROTECT_CTRL, 1);
rt2x00_set_field32(®, GF20_PROT_CFG_PROTECT_NAV_SHORT, 1);
@@ -5456,7 +5461,7 @@
rt2x00_set_field32(®, GF20_PROT_CFG_RTS_TH_EN, 0);
rt2800_register_write(rt2x00dev, GF20_PROT_CFG, reg);
- rt2800_register_read(rt2x00dev, GF40_PROT_CFG, ®);
+ reg = rt2800_register_read(rt2x00dev, GF40_PROT_CFG);
rt2x00_set_field32(®, GF40_PROT_CFG_PROTECT_RATE, 0x4084);
rt2x00_set_field32(®, GF40_PROT_CFG_PROTECT_CTRL, 1);
rt2x00_set_field32(®, GF40_PROT_CFG_PROTECT_NAV_SHORT, 1);
@@ -5472,7 +5477,7 @@
if (rt2x00_is_usb(rt2x00dev)) {
rt2800_register_write(rt2x00dev, PBF_CFG, 0xf40006);
- rt2800_register_read(rt2x00dev, WPDMA_GLO_CFG, ®);
+ reg = rt2800_register_read(rt2x00dev, WPDMA_GLO_CFG);
rt2x00_set_field32(®, WPDMA_GLO_CFG_ENABLE_TX_DMA, 0);
rt2x00_set_field32(®, WPDMA_GLO_CFG_TX_DMA_BUSY, 0);
rt2x00_set_field32(®, WPDMA_GLO_CFG_ENABLE_RX_DMA, 0);
@@ -5489,7 +5494,7 @@
* The legacy driver also sets TXOP_CTRL_CFG_RESERVED_TRUN_EN to 1
* although it is reserved.
*/
- rt2800_register_read(rt2x00dev, TXOP_CTRL_CFG, ®);
+ reg = rt2800_register_read(rt2x00dev, TXOP_CTRL_CFG);
rt2x00_set_field32(®, TXOP_CTRL_CFG_TIMEOUT_TRUN_EN, 1);
rt2x00_set_field32(®, TXOP_CTRL_CFG_AC_TRUN_EN, 1);
rt2x00_set_field32(®, TXOP_CTRL_CFG_TXRATEGRP_TRUN_EN, 1);
@@ -5505,7 +5510,7 @@
reg = rt2x00_rt(rt2x00dev, RT5592) ? 0x00000082 : 0x00000002;
rt2800_register_write(rt2x00dev, TXOP_HLDR_ET, reg);
- rt2800_register_read(rt2x00dev, TX_RTS_CFG, ®);
+ reg = rt2800_register_read(rt2x00dev, TX_RTS_CFG);
rt2x00_set_field32(®, TX_RTS_CFG_AUTO_RTS_RETRY_LIMIT, 7);
rt2x00_set_field32(®, TX_RTS_CFG_RTS_THRES,
IEEE80211_MAX_RTS_THRESHOLD);
@@ -5521,7 +5526,7 @@
* connection problems with 11g + CTS protection. Hence, use the same
* defaults as the Ralink driver: 16 for both, CCK and OFDM SIFS.
*/
- rt2800_register_read(rt2x00dev, XIFS_TIME_CFG, ®);
+ reg = rt2800_register_read(rt2x00dev, XIFS_TIME_CFG);
rt2x00_set_field32(®, XIFS_TIME_CFG_CCKM_SIFS_TIME, 16);
rt2x00_set_field32(®, XIFS_TIME_CFG_OFDM_SIFS_TIME, 16);
rt2x00_set_field32(®, XIFS_TIME_CFG_OFDM_XIFS_TIME, 4);
@@ -5551,16 +5556,16 @@
rt2800_clear_beacon_register(rt2x00dev, i);
if (rt2x00_is_usb(rt2x00dev)) {
- rt2800_register_read(rt2x00dev, US_CYC_CNT, ®);
+ reg = rt2800_register_read(rt2x00dev, US_CYC_CNT);
rt2x00_set_field32(®, US_CYC_CNT_CLOCK_CYCLE, 30);
rt2800_register_write(rt2x00dev, US_CYC_CNT, reg);
} else if (rt2x00_is_pcie(rt2x00dev)) {
- rt2800_register_read(rt2x00dev, US_CYC_CNT, ®);
+ reg = rt2800_register_read(rt2x00dev, US_CYC_CNT);
rt2x00_set_field32(®, US_CYC_CNT_CLOCK_CYCLE, 125);
rt2800_register_write(rt2x00dev, US_CYC_CNT, reg);
}
- rt2800_register_read(rt2x00dev, HT_FBK_CFG0, ®);
+ reg = rt2800_register_read(rt2x00dev, HT_FBK_CFG0);
rt2x00_set_field32(®, HT_FBK_CFG0_HTMCS0FBK, 0);
rt2x00_set_field32(®, HT_FBK_CFG0_HTMCS1FBK, 0);
rt2x00_set_field32(®, HT_FBK_CFG0_HTMCS2FBK, 1);
@@ -5571,7 +5576,7 @@
rt2x00_set_field32(®, HT_FBK_CFG0_HTMCS7FBK, 6);
rt2800_register_write(rt2x00dev, HT_FBK_CFG0, reg);
- rt2800_register_read(rt2x00dev, HT_FBK_CFG1, ®);
+ reg = rt2800_register_read(rt2x00dev, HT_FBK_CFG1);
rt2x00_set_field32(®, HT_FBK_CFG1_HTMCS8FBK, 8);
rt2x00_set_field32(®, HT_FBK_CFG1_HTMCS9FBK, 8);
rt2x00_set_field32(®, HT_FBK_CFG1_HTMCS10FBK, 9);
@@ -5582,7 +5587,7 @@
rt2x00_set_field32(®, HT_FBK_CFG1_HTMCS15FBK, 14);
rt2800_register_write(rt2x00dev, HT_FBK_CFG1, reg);
- rt2800_register_read(rt2x00dev, LG_FBK_CFG0, ®);
+ reg = rt2800_register_read(rt2x00dev, LG_FBK_CFG0);
rt2x00_set_field32(®, LG_FBK_CFG0_OFDMMCS0FBK, 8);
rt2x00_set_field32(®, LG_FBK_CFG0_OFDMMCS1FBK, 8);
rt2x00_set_field32(®, LG_FBK_CFG0_OFDMMCS2FBK, 9);
@@ -5593,7 +5598,7 @@
rt2x00_set_field32(®, LG_FBK_CFG0_OFDMMCS7FBK, 14);
rt2800_register_write(rt2x00dev, LG_FBK_CFG0, reg);
- rt2800_register_read(rt2x00dev, LG_FBK_CFG1, ®);
+ reg = rt2800_register_read(rt2x00dev, LG_FBK_CFG1);
rt2x00_set_field32(®, LG_FBK_CFG0_CCKMCS0FBK, 0);
rt2x00_set_field32(®, LG_FBK_CFG0_CCKMCS1FBK, 0);
rt2x00_set_field32(®, LG_FBK_CFG0_CCKMCS2FBK, 1);
@@ -5603,7 +5608,7 @@
/*
* Do not force the BA window size, we use the TXWI to set it
*/
- rt2800_register_read(rt2x00dev, AMPDU_BA_WINSIZE, ®);
+ reg = rt2800_register_read(rt2x00dev, AMPDU_BA_WINSIZE);
rt2x00_set_field32(®, AMPDU_BA_WINSIZE_FORCE_WINSIZE_ENABLE, 0);
rt2x00_set_field32(®, AMPDU_BA_WINSIZE_FORCE_WINSIZE, 0);
rt2800_register_write(rt2x00dev, AMPDU_BA_WINSIZE, reg);
@@ -5613,24 +5618,24 @@
* These registers are cleared on read,
* so we may pass a useless variable to store the value.
*/
- rt2800_register_read(rt2x00dev, RX_STA_CNT0, ®);
- rt2800_register_read(rt2x00dev, RX_STA_CNT1, ®);
- rt2800_register_read(rt2x00dev, RX_STA_CNT2, ®);
- rt2800_register_read(rt2x00dev, TX_STA_CNT0, ®);
- rt2800_register_read(rt2x00dev, TX_STA_CNT1, ®);
- rt2800_register_read(rt2x00dev, TX_STA_CNT2, ®);
+ reg = rt2800_register_read(rt2x00dev, RX_STA_CNT0);
+ reg = rt2800_register_read(rt2x00dev, RX_STA_CNT1);
+ reg = rt2800_register_read(rt2x00dev, RX_STA_CNT2);
+ reg = rt2800_register_read(rt2x00dev, TX_STA_CNT0);
+ reg = rt2800_register_read(rt2x00dev, TX_STA_CNT1);
+ reg = rt2800_register_read(rt2x00dev, TX_STA_CNT2);
/*
* Setup leadtime for pre tbtt interrupt to 6ms
*/
- rt2800_register_read(rt2x00dev, INT_TIMER_CFG, ®);
+ reg = rt2800_register_read(rt2x00dev, INT_TIMER_CFG);
rt2x00_set_field32(®, INT_TIMER_CFG_PRE_TBTT_TIMER, 6 << 4);
rt2800_register_write(rt2x00dev, INT_TIMER_CFG, reg);
/*
* Set up channel statistics timer
*/
- rt2800_register_read(rt2x00dev, CH_TIME_CFG, ®);
+ reg = rt2800_register_read(rt2x00dev, CH_TIME_CFG);
rt2x00_set_field32(®, CH_TIME_CFG_EIFS_BUSY, 1);
rt2x00_set_field32(®, CH_TIME_CFG_NAV_BUSY, 1);
rt2x00_set_field32(®, CH_TIME_CFG_RX_BUSY, 1);
@@ -5647,7 +5652,7 @@
u32 reg;
for (i = 0; i < REGISTER_BUSY_COUNT; i++) {
- rt2800_register_read(rt2x00dev, MAC_STATUS_CFG, ®);
+ reg = rt2800_register_read(rt2x00dev, MAC_STATUS_CFG);
if (!rt2x00_get_field32(reg, MAC_STATUS_CFG_BBP_RF_BUSY))
return 0;
@@ -5672,7 +5677,7 @@
msleep(1);
for (i = 0; i < REGISTER_BUSY_COUNT; i++) {
- rt2800_bbp_read(rt2x00dev, 0, &value);
+ value = rt2800_bbp_read(rt2x00dev, 0);
if ((value != 0xff) && (value != 0x00))
return 0;
udelay(REGISTER_BUSY_DELAY);
@@ -5686,7 +5691,7 @@
{
u8 value;
- rt2800_bbp_read(rt2x00dev, 4, &value);
+ value = rt2800_bbp_read(rt2x00dev, 4);
rt2x00_set_field8(&value, BBP4_MAC_IF_CTRL, 1);
rt2800_bbp_write(rt2x00dev, 4, value);
}
@@ -5743,8 +5748,8 @@
u16 eeprom;
u8 value;
- rt2800_bbp_read(rt2x00dev, 138, &value);
- rt2800_eeprom_read(rt2x00dev, EEPROM_NIC_CONF0, &eeprom);
+ value = rt2800_bbp_read(rt2x00dev, 138);
+ eeprom = rt2800_eeprom_read(rt2x00dev, EEPROM_NIC_CONF0);
if (rt2x00_get_field16(eeprom, EEPROM_NIC_CONF0_TXPATH) == 1)
value |= 0x20;
if (rt2x00_get_field16(eeprom, EEPROM_NIC_CONF0_RXPATH) == 1)
@@ -5927,12 +5932,12 @@
rt2800_bbp_write(rt2x00dev, 155, 0x3b);
rt2800_bbp_write(rt2x00dev, 253, 0x04);
- rt2800_bbp_read(rt2x00dev, 47, &value);
+ value = rt2800_bbp_read(rt2x00dev, 47);
rt2x00_set_field8(&value, BBP47_TSSI_ADC6, 1);
rt2800_bbp_write(rt2x00dev, 47, value);
/* Use 5-bit ADC for Acquisition and 8-bit ADC for data */
- rt2800_bbp_read(rt2x00dev, 3, &value);
+ value = rt2800_bbp_read(rt2x00dev, 3);
rt2x00_set_field8(&value, BBP3_ADC_MODE_SWITCH, 1);
rt2x00_set_field8(&value, BBP3_ADC_INIT_MODE, 1);
rt2800_bbp_write(rt2x00dev, 3, value);
@@ -6191,7 +6196,7 @@
rt2800_disable_unused_dac_adc(rt2x00dev);
- rt2800_eeprom_read(rt2x00dev, EEPROM_NIC_CONF1, &eeprom);
+ eeprom = rt2800_eeprom_read(rt2x00dev, EEPROM_NIC_CONF1);
div_mode = rt2x00_get_field16(eeprom,
EEPROM_NIC_CONF1_ANT_DIVERSITY);
ant = (div_mode == 3) ? 1 : 0;
@@ -6200,7 +6205,7 @@
if (rt2x00_has_cap_bt_coexist(rt2x00dev)) {
u32 reg;
- rt2800_register_read(rt2x00dev, GPIO_CTRL, ®);
+ reg = rt2800_register_read(rt2x00dev, GPIO_CTRL);
rt2x00_set_field32(®, GPIO_CTRL_DIR3, 0);
rt2x00_set_field32(®, GPIO_CTRL_DIR6, 0);
rt2x00_set_field32(®, GPIO_CTRL_VAL3, 0);
@@ -6219,7 +6224,7 @@
rt2800_bbp_write(rt2x00dev, 154, 0); /* Clear previously selected antenna */
}
- rt2800_bbp_read(rt2x00dev, 152, &value);
+ value = rt2800_bbp_read(rt2x00dev, 152);
if (ant == 0)
rt2x00_set_field8(&value, BBP152_RX_DEFAULT_ANT, 1);
else
@@ -6237,7 +6242,7 @@
rt2800_init_bbp_early(rt2x00dev);
- rt2800_bbp_read(rt2x00dev, 105, &value);
+ value = rt2800_bbp_read(rt2x00dev, 105);
rt2x00_set_field8(&value, BBP105_MLD,
rt2x00dev->default_ant.rx_chain_num == 2);
rt2800_bbp_write(rt2x00dev, 105, value);
@@ -6277,10 +6282,10 @@
rt2800_bbp4_mac_if_ctrl(rt2x00dev);
- rt2800_eeprom_read(rt2x00dev, EEPROM_NIC_CONF1, &eeprom);
+ eeprom = rt2800_eeprom_read(rt2x00dev, EEPROM_NIC_CONF1);
div_mode = rt2x00_get_field16(eeprom, EEPROM_NIC_CONF1_ANT_DIVERSITY);
ant = (div_mode == 3) ? 1 : 0;
- rt2800_bbp_read(rt2x00dev, 152, &value);
+ value = rt2800_bbp_read(rt2x00dev, 152);
if (ant == 0) {
/* Main antenna */
rt2x00_set_field8(&value, BBP152_RX_DEFAULT_ANT, 1);
@@ -6291,7 +6296,7 @@
rt2800_bbp_write(rt2x00dev, 152, value);
if (rt2x00_rt_rev_gte(rt2x00dev, RT5592, REV_RT5592C)) {
- rt2800_bbp_read(rt2x00dev, 254, &value);
+ value = rt2800_bbp_read(rt2x00dev, 254);
rt2x00_set_field8(&value, BBP254_BIT7, 1);
rt2800_bbp_write(rt2x00dev, 254, value);
}
@@ -6317,11 +6322,10 @@
rt2800_bbp_write(rt2x00dev, 159, value);
}
-static void rt2800_bbp_dcoc_read(struct rt2x00_dev *rt2x00dev,
- const u8 reg, u8 *value)
+static u8 rt2800_bbp_dcoc_read(struct rt2x00_dev *rt2x00dev, const u8 reg)
{
rt2800_bbp_write(rt2x00dev, 158, reg);
- rt2800_bbp_read(rt2x00dev, 159, value);
+ return rt2800_bbp_read(rt2x00dev, 159);
}
static void rt2800_init_bbp_6352(struct rt2x00_dev *rt2x00dev)
@@ -6329,7 +6333,7 @@
u8 bbp;
/* Apply Maximum Likelihood Detection (MLD) for 2 stream case */
- rt2800_bbp_read(rt2x00dev, 105, &bbp);
+ bbp = rt2800_bbp_read(rt2x00dev, 105);
rt2x00_set_field8(&bbp, BBP105_MLD,
rt2x00dev->default_ant.rx_chain_num == 2);
rt2800_bbp_write(rt2x00dev, 105, bbp);
@@ -6338,7 +6342,7 @@
rt2800_bbp4_mac_if_ctrl(rt2x00dev);
/* Fix I/Q swap issue */
- rt2800_bbp_read(rt2x00dev, 1, &bbp);
+ bbp = rt2800_bbp_read(rt2x00dev, 1);
bbp |= 0x04;
rt2800_bbp_write(rt2x00dev, 1, bbp);
@@ -6578,8 +6582,8 @@
}
for (i = 0; i < EEPROM_BBP_SIZE; i++) {
- rt2800_eeprom_read_from_array(rt2x00dev, EEPROM_BBP_START, i,
- &eeprom);
+ eeprom = rt2800_eeprom_read_from_array(rt2x00dev,
+ EEPROM_BBP_START, i);
if (eeprom != 0xffff && eeprom != 0x0000) {
reg_id = rt2x00_get_field16(eeprom, EEPROM_BBP_REG_ID);
@@ -6593,7 +6597,7 @@
{
u32 reg;
- rt2800_register_read(rt2x00dev, OPT_14_CSR, ®);
+ reg = rt2800_register_read(rt2x00dev, OPT_14_CSR);
rt2x00_set_field32(®, OPT_14_CSR_BIT0, 1);
rt2800_register_write(rt2x00dev, OPT_14_CSR, reg);
}
@@ -6611,15 +6615,15 @@
rt2800_rfcsr_write(rt2x00dev, 24, rfcsr24);
- rt2800_bbp_read(rt2x00dev, 4, &bbp);
+ bbp = rt2800_bbp_read(rt2x00dev, 4);
rt2x00_set_field8(&bbp, BBP4_BANDWIDTH, 2 * bw40);
rt2800_bbp_write(rt2x00dev, 4, bbp);
- rt2800_rfcsr_read(rt2x00dev, 31, &rfcsr);
+ rfcsr = rt2800_rfcsr_read(rt2x00dev, 31);
rt2x00_set_field8(&rfcsr, RFCSR31_RX_H20M, bw40);
rt2800_rfcsr_write(rt2x00dev, 31, rfcsr);
- rt2800_rfcsr_read(rt2x00dev, 22, &rfcsr);
+ rfcsr = rt2800_rfcsr_read(rt2x00dev, 22);
rt2x00_set_field8(&rfcsr, RFCSR22_BASEBAND_LOOPBACK, 1);
rt2800_rfcsr_write(rt2x00dev, 22, rfcsr);
@@ -6632,7 +6636,7 @@
rt2800_bbp_write(rt2x00dev, 25, 0x90);
msleep(1);
- rt2800_bbp_read(rt2x00dev, 55, &passband);
+ passband = rt2800_bbp_read(rt2x00dev, 55);
if (passband)
break;
}
@@ -6646,7 +6650,7 @@
rt2800_bbp_write(rt2x00dev, 25, 0x90);
msleep(1);
- rt2800_bbp_read(rt2x00dev, 55, &stopband);
+ stopband = rt2800_bbp_read(rt2x00dev, 55);
if ((passband - stopband) <= filter_target) {
rfcsr24++;
@@ -6668,7 +6672,7 @@
{
u8 rfcsr;
- rt2800_rfcsr_read(rt2x00dev, rf_reg, &rfcsr);
+ rfcsr = rt2800_rfcsr_read(rt2x00dev, rf_reg);
rt2x00_set_field8(&rfcsr, FIELD8(0x80), 1);
rt2800_rfcsr_write(rt2x00dev, rf_reg, rfcsr);
msleep(1);
@@ -6702,22 +6706,22 @@
/*
* Save BBP 25 & 26 values for later use in channel switching (for 3052)
*/
- rt2800_bbp_read(rt2x00dev, 25, &drv_data->bbp25);
- rt2800_bbp_read(rt2x00dev, 26, &drv_data->bbp26);
+ drv_data->bbp25 = rt2800_bbp_read(rt2x00dev, 25);
+ drv_data->bbp26 = rt2800_bbp_read(rt2x00dev, 26);
/*
* Set back to initial state
*/
rt2800_bbp_write(rt2x00dev, 24, 0);
- rt2800_rfcsr_read(rt2x00dev, 22, &rfcsr);
+ rfcsr = rt2800_rfcsr_read(rt2x00dev, 22);
rt2x00_set_field8(&rfcsr, RFCSR22_BASEBAND_LOOPBACK, 0);
rt2800_rfcsr_write(rt2x00dev, 22, rfcsr);
/*
* Set BBP back to BW20
*/
- rt2800_bbp_read(rt2x00dev, 4, &bbp);
+ bbp = rt2800_bbp_read(rt2x00dev, 4);
rt2x00_set_field8(&bbp, BBP4_BANDWIDTH, 0);
rt2800_bbp_write(rt2x00dev, 4, bbp);
}
@@ -6728,7 +6732,7 @@
u8 min_gain, rfcsr, bbp;
u16 eeprom;
- rt2800_rfcsr_read(rt2x00dev, 17, &rfcsr);
+ rfcsr = rt2800_rfcsr_read(rt2x00dev, 17);
rt2x00_set_field8(&rfcsr, RFCSR17_TX_LO1_EN, 0);
if (rt2x00_rt(rt2x00dev, RT3070) ||
@@ -6749,8 +6753,8 @@
if (rt2x00_rt(rt2x00dev, RT3090)) {
/* Turn off unused DAC1 and ADC1 to reduce power consumption */
- rt2800_bbp_read(rt2x00dev, 138, &bbp);
- rt2800_eeprom_read(rt2x00dev, EEPROM_NIC_CONF0, &eeprom);
+ bbp = rt2800_bbp_read(rt2x00dev, 138);
+ eeprom = rt2800_eeprom_read(rt2x00dev, EEPROM_NIC_CONF0);
if (rt2x00_get_field16(eeprom, EEPROM_NIC_CONF0_RXPATH) == 1)
rt2x00_set_field8(&bbp, BBP138_RX_ADC1, 0);
if (rt2x00_get_field16(eeprom, EEPROM_NIC_CONF0_TXPATH) == 1)
@@ -6759,7 +6763,7 @@
}
if (rt2x00_rt(rt2x00dev, RT3070)) {
- rt2800_rfcsr_read(rt2x00dev, 27, &rfcsr);
+ rfcsr = rt2800_rfcsr_read(rt2x00dev, 27);
if (rt2x00_rt_rev_lt(rt2x00dev, RT3070, REV_RT3070F))
rt2x00_set_field8(&rfcsr, RFCSR27_R1, 3);
else
@@ -6771,7 +6775,7 @@
} else if (rt2x00_rt(rt2x00dev, RT3071) ||
rt2x00_rt(rt2x00dev, RT3090) ||
rt2x00_rt(rt2x00dev, RT3390)) {
- rt2800_rfcsr_read(rt2x00dev, 1, &rfcsr);
+ rfcsr = rt2800_rfcsr_read(rt2x00dev, 1);
rt2x00_set_field8(&rfcsr, RFCSR1_RF_BLOCK_EN, 1);
rt2x00_set_field8(&rfcsr, RFCSR1_RX0_PD, 0);
rt2x00_set_field8(&rfcsr, RFCSR1_TX0_PD, 0);
@@ -6779,15 +6783,15 @@
rt2x00_set_field8(&rfcsr, RFCSR1_TX1_PD, 1);
rt2800_rfcsr_write(rt2x00dev, 1, rfcsr);
- rt2800_rfcsr_read(rt2x00dev, 15, &rfcsr);
+ rfcsr = rt2800_rfcsr_read(rt2x00dev, 15);
rt2x00_set_field8(&rfcsr, RFCSR15_TX_LO2_EN, 0);
rt2800_rfcsr_write(rt2x00dev, 15, rfcsr);
- rt2800_rfcsr_read(rt2x00dev, 20, &rfcsr);
+ rfcsr = rt2800_rfcsr_read(rt2x00dev, 20);
rt2x00_set_field8(&rfcsr, RFCSR20_RX_LO1_EN, 0);
rt2800_rfcsr_write(rt2x00dev, 20, rfcsr);
- rt2800_rfcsr_read(rt2x00dev, 21, &rfcsr);
+ rfcsr = rt2800_rfcsr_read(rt2x00dev, 21);
rt2x00_set_field8(&rfcsr, RFCSR21_RX_LO2_EN, 0);
rt2800_rfcsr_write(rt2x00dev, 21, rfcsr);
}
@@ -6799,30 +6803,30 @@
u8 rfcsr;
u8 tx_gain;
- rt2800_rfcsr_read(rt2x00dev, 50, &rfcsr);
+ rfcsr = rt2800_rfcsr_read(rt2x00dev, 50);
rt2x00_set_field8(&rfcsr, RFCSR50_TX_LO2_EN, 0);
rt2800_rfcsr_write(rt2x00dev, 50, rfcsr);
- rt2800_rfcsr_read(rt2x00dev, 51, &rfcsr);
+ rfcsr = rt2800_rfcsr_read(rt2x00dev, 51);
tx_gain = rt2x00_get_field8(drv_data->txmixer_gain_24g,
RFCSR17_TXMIXER_GAIN);
rt2x00_set_field8(&rfcsr, RFCSR51_BITS24, tx_gain);
rt2800_rfcsr_write(rt2x00dev, 51, rfcsr);
- rt2800_rfcsr_read(rt2x00dev, 38, &rfcsr);
+ rfcsr = rt2800_rfcsr_read(rt2x00dev, 38);
rt2x00_set_field8(&rfcsr, RFCSR38_RX_LO1_EN, 0);
rt2800_rfcsr_write(rt2x00dev, 38, rfcsr);
- rt2800_rfcsr_read(rt2x00dev, 39, &rfcsr);
+ rfcsr = rt2800_rfcsr_read(rt2x00dev, 39);
rt2x00_set_field8(&rfcsr, RFCSR39_RX_LO2_EN, 0);
rt2800_rfcsr_write(rt2x00dev, 39, rfcsr);
- rt2800_rfcsr_read(rt2x00dev, 1, &rfcsr);
+ rfcsr = rt2800_rfcsr_read(rt2x00dev, 1);
rt2x00_set_field8(&rfcsr, RFCSR1_RF_BLOCK_EN, 1);
rt2x00_set_field8(&rfcsr, RFCSR1_PLL_PD, 1);
rt2800_rfcsr_write(rt2x00dev, 1, rfcsr);
- rt2800_rfcsr_read(rt2x00dev, 30, &rfcsr);
+ rfcsr = rt2800_rfcsr_read(rt2x00dev, 30);
rt2x00_set_field8(&rfcsr, RFCSR30_RX_VCM, 2);
rt2800_rfcsr_write(rt2x00dev, 30, rfcsr);
@@ -6835,25 +6839,25 @@
u16 eeprom;
/* Turn off unused DAC1 and ADC1 to reduce power consumption */
- rt2800_bbp_read(rt2x00dev, 138, ®);
- rt2800_eeprom_read(rt2x00dev, EEPROM_NIC_CONF0, &eeprom);
+ reg = rt2800_bbp_read(rt2x00dev, 138);
+ eeprom = rt2800_eeprom_read(rt2x00dev, EEPROM_NIC_CONF0);
if (rt2x00_get_field16(eeprom, EEPROM_NIC_CONF0_RXPATH) == 1)
rt2x00_set_field8(®, BBP138_RX_ADC1, 0);
if (rt2x00_get_field16(eeprom, EEPROM_NIC_CONF0_TXPATH) == 1)
rt2x00_set_field8(®, BBP138_TX_DAC1, 1);
rt2800_bbp_write(rt2x00dev, 138, reg);
- rt2800_rfcsr_read(rt2x00dev, 38, ®);
+ reg = rt2800_rfcsr_read(rt2x00dev, 38);
rt2x00_set_field8(®, RFCSR38_RX_LO1_EN, 0);
rt2800_rfcsr_write(rt2x00dev, 38, reg);
- rt2800_rfcsr_read(rt2x00dev, 39, ®);
+ reg = rt2800_rfcsr_read(rt2x00dev, 39);
rt2x00_set_field8(®, RFCSR39_RX_LO2_EN, 0);
rt2800_rfcsr_write(rt2x00dev, 39, reg);
rt2800_bbp4_mac_if_ctrl(rt2x00dev);
- rt2800_rfcsr_read(rt2x00dev, 30, ®);
+ reg = rt2800_rfcsr_read(rt2x00dev, 30);
rt2x00_set_field8(®, RFCSR30_RX_VCM, 2);
rt2800_rfcsr_write(rt2x00dev, 30, reg);
}
@@ -6926,7 +6930,7 @@
rt2800_rfcsr_write(rt2x00dev, 29, 0x1f);
if (rt2x00_rt_rev_lt(rt2x00dev, RT3070, REV_RT3070F)) {
- rt2800_register_read(rt2x00dev, LDO_CFG0, ®);
+ reg = rt2800_register_read(rt2x00dev, LDO_CFG0);
rt2x00_set_field32(®, LDO_CFG0_BGSEL, 1);
rt2x00_set_field32(®, LDO_CFG0_LDO_CORE_VLEVEL, 3);
rt2800_register_write(rt2x00dev, LDO_CFG0, reg);
@@ -6934,16 +6938,15 @@
rt2x00_rt(rt2x00dev, RT3090)) {
rt2800_rfcsr_write(rt2x00dev, 31, 0x14);
- rt2800_rfcsr_read(rt2x00dev, 6, &rfcsr);
+ rfcsr = rt2800_rfcsr_read(rt2x00dev, 6);
rt2x00_set_field8(&rfcsr, RFCSR6_R2, 1);
rt2800_rfcsr_write(rt2x00dev, 6, rfcsr);
- rt2800_register_read(rt2x00dev, LDO_CFG0, ®);
+ reg = rt2800_register_read(rt2x00dev, LDO_CFG0);
rt2x00_set_field32(®, LDO_CFG0_BGSEL, 1);
if (rt2x00_rt_rev_lt(rt2x00dev, RT3071, REV_RT3071E) ||
rt2x00_rt_rev_lt(rt2x00dev, RT3090, REV_RT3090E)) {
- rt2800_eeprom_read(rt2x00dev, EEPROM_NIC_CONF1,
- &eeprom);
+ eeprom = rt2800_eeprom_read(rt2x00dev, EEPROM_NIC_CONF1);
if (rt2x00_get_field16(eeprom, EEPROM_NIC_CONF1_DAC_TEST))
rt2x00_set_field32(®, LDO_CFG0_LDO_CORE_VLEVEL, 3);
else
@@ -6951,7 +6954,7 @@
}
rt2800_register_write(rt2x00dev, LDO_CFG0, reg);
- rt2800_register_read(rt2x00dev, GPIO_SWITCH, ®);
+ reg = rt2800_register_read(rt2x00dev, GPIO_SWITCH);
rt2x00_set_field32(®, GPIO_SWITCH_5, 0);
rt2800_register_write(rt2x00dev, GPIO_SWITCH, reg);
}
@@ -7020,7 +7023,7 @@
rt2800_rfcsr_write(rt2x00dev, 60, 0x45);
rt2800_rfcsr_write(rt2x00dev, 61, 0xc1);
- rt2800_rfcsr_read(rt2x00dev, 29, &rfcsr);
+ rfcsr = rt2800_rfcsr_read(rt2x00dev, 29);
rt2x00_set_field8(&rfcsr, RFCSR29_RSSI_GAIN, 3);
rt2800_rfcsr_write(rt2x00dev, 29, rfcsr);
@@ -7166,7 +7169,7 @@
rt2800_rfcsr_write(rt2x00dev, 30, 0x20);
rt2800_rfcsr_write(rt2x00dev, 31, 0x0f);
- rt2800_register_read(rt2x00dev, GPIO_SWITCH, ®);
+ reg = rt2800_register_read(rt2x00dev, GPIO_SWITCH);
rt2x00_set_field32(®, GPIO_SWITCH_5, 0);
rt2800_register_write(rt2x00dev, GPIO_SWITCH, reg);
@@ -7218,16 +7221,16 @@
rt2800_rfcsr_write(rt2x00dev, 30, 0x09);
rt2800_rfcsr_write(rt2x00dev, 31, 0x10);
- rt2800_rfcsr_read(rt2x00dev, 6, &rfcsr);
+ rfcsr = rt2800_rfcsr_read(rt2x00dev, 6);
rt2x00_set_field8(&rfcsr, RFCSR6_R2, 1);
rt2800_rfcsr_write(rt2x00dev, 6, rfcsr);
- rt2800_register_read(rt2x00dev, LDO_CFG0, ®);
+ reg = rt2800_register_read(rt2x00dev, LDO_CFG0);
rt2x00_set_field32(®, LDO_CFG0_LDO_CORE_VLEVEL, 3);
rt2x00_set_field32(®, LDO_CFG0_BGSEL, 1);
rt2800_register_write(rt2x00dev, LDO_CFG0, reg);
msleep(1);
- rt2800_register_read(rt2x00dev, LDO_CFG0, ®);
+ reg = rt2800_register_read(rt2x00dev, LDO_CFG0);
rt2x00_set_field32(®, LDO_CFG0_LDO_CORE_VLEVEL, 0);
rt2x00_set_field32(®, LDO_CFG0_BGSEL, 1);
rt2800_register_write(rt2x00dev, LDO_CFG0, reg);
@@ -7242,7 +7245,7 @@
u8 bbp;
bool txbf_enabled = false; /* FIXME */
- rt2800_bbp_read(rt2x00dev, 105, &bbp);
+ bbp = rt2800_bbp_read(rt2x00dev, 105);
if (rt2x00dev->default_ant.rx_chain_num == 1)
rt2x00_set_field8(&bbp, BBP105_MLD, 0);
else
@@ -7291,7 +7294,7 @@
u8 rfcsr;
/* Disable GPIO #4 and #7 function for LAN PE control */
- rt2800_register_read(rt2x00dev, GPIO_SWITCH, ®);
+ reg = rt2800_register_read(rt2x00dev, GPIO_SWITCH);
rt2x00_set_field32(®, GPIO_SWITCH_4, 0);
rt2x00_set_field32(®, GPIO_SWITCH_7, 0);
rt2800_register_write(rt2x00dev, GPIO_SWITCH, reg);
@@ -7332,22 +7335,22 @@
/* Initiate calibration */
/* TODO: use rt2800_rf_init_calibration ? */
- rt2800_rfcsr_read(rt2x00dev, 2, &rfcsr);
+ rfcsr = rt2800_rfcsr_read(rt2x00dev, 2);
rt2x00_set_field8(&rfcsr, RFCSR2_RESCAL_EN, 1);
rt2800_rfcsr_write(rt2x00dev, 2, rfcsr);
rt2800_freq_cal_mode1(rt2x00dev);
- rt2800_rfcsr_read(rt2x00dev, 18, &rfcsr);
+ rfcsr = rt2800_rfcsr_read(rt2x00dev, 18);
rt2x00_set_field8(&rfcsr, RFCSR18_XO_TUNE_BYPASS, 1);
rt2800_rfcsr_write(rt2x00dev, 18, rfcsr);
- rt2800_register_read(rt2x00dev, LDO_CFG0, ®);
+ reg = rt2800_register_read(rt2x00dev, LDO_CFG0);
rt2x00_set_field32(®, LDO_CFG0_LDO_CORE_VLEVEL, 3);
rt2x00_set_field32(®, LDO_CFG0_BGSEL, 1);
rt2800_register_write(rt2x00dev, LDO_CFG0, reg);
usleep_range(1000, 1500);
- rt2800_register_read(rt2x00dev, LDO_CFG0, ®);
+ reg = rt2800_register_read(rt2x00dev, LDO_CFG0);
rt2x00_set_field32(®, LDO_CFG0_LDO_CORE_VLEVEL, 0);
rt2800_register_write(rt2x00dev, LDO_CFG0, reg);
@@ -7356,8 +7359,8 @@
drv_data->calibration_bw40 = 0x2f;
/* Save BBP 25 & 26 values for later use in channel switching */
- rt2800_bbp_read(rt2x00dev, 25, &drv_data->bbp25);
- rt2800_bbp_read(rt2x00dev, 26, &drv_data->bbp26);
+ drv_data->bbp25 = rt2800_bbp_read(rt2x00dev, 25);
+ drv_data->bbp26 = rt2800_bbp_read(rt2x00dev, 26);
rt2800_led_open_drain_enable(rt2x00dev);
rt2800_normal_mode_setup_3593(rt2x00dev);
@@ -7651,19 +7654,19 @@
{
u8 bbp_val;
- rt2800_bbp_read(rt2x00dev, 21, &bbp_val);
+ bbp_val = rt2800_bbp_read(rt2x00dev, 21);
bbp_val |= 0x1;
rt2800_bbp_write(rt2x00dev, 21, bbp_val);
usleep_range(100, 200);
if (set_bw) {
- rt2800_bbp_read(rt2x00dev, 4, &bbp_val);
+ bbp_val = rt2800_bbp_read(rt2x00dev, 4);
rt2x00_set_field8(&bbp_val, BBP4_BANDWIDTH, 2 * is_ht40);
rt2800_bbp_write(rt2x00dev, 4, bbp_val);
usleep_range(100, 200);
}
- rt2800_bbp_read(rt2x00dev, 21, &bbp_val);
+ bbp_val = rt2800_bbp_read(rt2x00dev, 21);
bbp_val &= (~0x1);
rt2800_bbp_write(rt2x00dev, 21, bbp_val);
usleep_range(100, 200);
@@ -7680,7 +7683,7 @@
rt2800_register_write(rt2x00dev, RF_BYPASS0, 0x06);
- rt2800_rfcsr_read_bank(rt2x00dev, 5, 17, &rf_val);
+ rf_val = rt2800_rfcsr_read_bank(rt2x00dev, 5, 17);
rf_val |= 0x80;
rt2800_rfcsr_write_bank(rt2x00dev, 5, 17, rf_val);
@@ -7688,11 +7691,11 @@
rt2800_rfcsr_write_bank(rt2x00dev, 5, 18, 0xC1);
rt2800_rfcsr_write_bank(rt2x00dev, 5, 19, 0x20);
rt2800_rfcsr_write_bank(rt2x00dev, 5, 20, 0x02);
- rt2800_rfcsr_read_bank(rt2x00dev, 5, 3, &rf_val);
+ rf_val = rt2800_rfcsr_read_bank(rt2x00dev, 5, 3);
rf_val &= (~0x3F);
rf_val |= 0x3F;
rt2800_rfcsr_write_bank(rt2x00dev, 5, 3, rf_val);
- rt2800_rfcsr_read_bank(rt2x00dev, 5, 4, &rf_val);
+ rf_val = rt2800_rfcsr_read_bank(rt2x00dev, 5, 4);
rf_val &= (~0x3F);
rf_val |= 0x3F;
rt2800_rfcsr_write_bank(rt2x00dev, 5, 4, rf_val);
@@ -7701,11 +7704,11 @@
rt2800_rfcsr_write_bank(rt2x00dev, 5, 18, 0xF1);
rt2800_rfcsr_write_bank(rt2x00dev, 5, 19, 0x18);
rt2800_rfcsr_write_bank(rt2x00dev, 5, 20, 0x02);
- rt2800_rfcsr_read_bank(rt2x00dev, 5, 3, &rf_val);
+ rf_val = rt2800_rfcsr_read_bank(rt2x00dev, 5, 3);
rf_val &= (~0x3F);
rf_val |= 0x34;
rt2800_rfcsr_write_bank(rt2x00dev, 5, 3, rf_val);
- rt2800_rfcsr_read_bank(rt2x00dev, 5, 4, &rf_val);
+ rf_val = rt2800_rfcsr_read_bank(rt2x00dev, 5, 4);
rf_val &= (~0x3F);
rf_val |= 0x34;
rt2800_rfcsr_write_bank(rt2x00dev, 5, 4, rf_val);
@@ -7725,14 +7728,14 @@
cnt = 0;
do {
usleep_range(500, 2000);
- rt2800_bbp_read(rt2x00dev, 159, &bbp_val);
+ bbp_val = rt2800_bbp_read(rt2x00dev, 159);
if (bbp_val == 0x02 || cnt == 20)
break;
cnt++;
} while (cnt < 20);
- rt2800_bbp_dcoc_read(rt2x00dev, 0x39, &bbp_val);
+ bbp_val = rt2800_bbp_dcoc_read(rt2x00dev, 0x39);
cal_val = bbp_val & 0x7F;
if (cal_val >= 0x40)
cal_val -= 128;
@@ -7761,67 +7764,67 @@
u32 MAC_RF_CONTROL0, MAC_RF_BYPASS0;
/* Save MAC registers */
- rt2800_register_read(rt2x00dev, RF_CONTROL0, &MAC_RF_CONTROL0);
- rt2800_register_read(rt2x00dev, RF_BYPASS0, &MAC_RF_BYPASS0);
+ MAC_RF_CONTROL0 = rt2800_register_read(rt2x00dev, RF_CONTROL0);
+ MAC_RF_BYPASS0 = rt2800_register_read(rt2x00dev, RF_BYPASS0);
/* save BBP registers */
- rt2800_bbp_read(rt2x00dev, 23, &savebbpr23);
+ savebbpr23 = rt2800_bbp_read(rt2x00dev, 23);
- rt2800_bbp_dcoc_read(rt2x00dev, 0, &savebbp159r0);
- rt2800_bbp_dcoc_read(rt2x00dev, 2, &savebbp159r2);
+ savebbp159r0 = rt2800_bbp_dcoc_read(rt2x00dev, 0);
+ savebbp159r2 = rt2800_bbp_dcoc_read(rt2x00dev, 2);
/* Save RF registers */
- rt2800_rfcsr_read_bank(rt2x00dev, 5, 0, &saverfb5r00);
- rt2800_rfcsr_read_bank(rt2x00dev, 5, 1, &saverfb5r01);
- rt2800_rfcsr_read_bank(rt2x00dev, 5, 3, &saverfb5r03);
- rt2800_rfcsr_read_bank(rt2x00dev, 5, 4, &saverfb5r04);
- rt2800_rfcsr_read_bank(rt2x00dev, 5, 5, &saverfb5r05);
- rt2800_rfcsr_read_bank(rt2x00dev, 5, 6, &saverfb5r06);
- rt2800_rfcsr_read_bank(rt2x00dev, 5, 7, &saverfb5r07);
- rt2800_rfcsr_read_bank(rt2x00dev, 5, 8, &saverfb5r08);
- rt2800_rfcsr_read_bank(rt2x00dev, 5, 17, &saverfb5r17);
- rt2800_rfcsr_read_bank(rt2x00dev, 5, 18, &saverfb5r18);
- rt2800_rfcsr_read_bank(rt2x00dev, 5, 19, &saverfb5r19);
- rt2800_rfcsr_read_bank(rt2x00dev, 5, 20, &saverfb5r20);
+ saverfb5r00 = rt2800_rfcsr_read_bank(rt2x00dev, 5, 0);
+ saverfb5r01 = rt2800_rfcsr_read_bank(rt2x00dev, 5, 1);
+ saverfb5r03 = rt2800_rfcsr_read_bank(rt2x00dev, 5, 3);
+ saverfb5r04 = rt2800_rfcsr_read_bank(rt2x00dev, 5, 4);
+ saverfb5r05 = rt2800_rfcsr_read_bank(rt2x00dev, 5, 5);
+ saverfb5r06 = rt2800_rfcsr_read_bank(rt2x00dev, 5, 6);
+ saverfb5r07 = rt2800_rfcsr_read_bank(rt2x00dev, 5, 7);
+ saverfb5r08 = rt2800_rfcsr_read_bank(rt2x00dev, 5, 8);
+ saverfb5r17 = rt2800_rfcsr_read_bank(rt2x00dev, 5, 17);
+ saverfb5r18 = rt2800_rfcsr_read_bank(rt2x00dev, 5, 18);
+ saverfb5r19 = rt2800_rfcsr_read_bank(rt2x00dev, 5, 19);
+ saverfb5r20 = rt2800_rfcsr_read_bank(rt2x00dev, 5, 20);
- rt2800_rfcsr_read_bank(rt2x00dev, 5, 37, &saverfb5r37);
- rt2800_rfcsr_read_bank(rt2x00dev, 5, 38, &saverfb5r38);
- rt2800_rfcsr_read_bank(rt2x00dev, 5, 39, &saverfb5r39);
- rt2800_rfcsr_read_bank(rt2x00dev, 5, 40, &saverfb5r40);
- rt2800_rfcsr_read_bank(rt2x00dev, 5, 41, &saverfb5r41);
- rt2800_rfcsr_read_bank(rt2x00dev, 5, 42, &saverfb5r42);
- rt2800_rfcsr_read_bank(rt2x00dev, 5, 43, &saverfb5r43);
- rt2800_rfcsr_read_bank(rt2x00dev, 5, 44, &saverfb5r44);
- rt2800_rfcsr_read_bank(rt2x00dev, 5, 45, &saverfb5r45);
- rt2800_rfcsr_read_bank(rt2x00dev, 5, 46, &saverfb5r46);
+ saverfb5r37 = rt2800_rfcsr_read_bank(rt2x00dev, 5, 37);
+ saverfb5r38 = rt2800_rfcsr_read_bank(rt2x00dev, 5, 38);
+ saverfb5r39 = rt2800_rfcsr_read_bank(rt2x00dev, 5, 39);
+ saverfb5r40 = rt2800_rfcsr_read_bank(rt2x00dev, 5, 40);
+ saverfb5r41 = rt2800_rfcsr_read_bank(rt2x00dev, 5, 41);
+ saverfb5r42 = rt2800_rfcsr_read_bank(rt2x00dev, 5, 42);
+ saverfb5r43 = rt2800_rfcsr_read_bank(rt2x00dev, 5, 43);
+ saverfb5r44 = rt2800_rfcsr_read_bank(rt2x00dev, 5, 44);
+ saverfb5r45 = rt2800_rfcsr_read_bank(rt2x00dev, 5, 45);
+ saverfb5r46 = rt2800_rfcsr_read_bank(rt2x00dev, 5, 46);
- rt2800_rfcsr_read_bank(rt2x00dev, 5, 58, &saverfb5r58);
- rt2800_rfcsr_read_bank(rt2x00dev, 5, 59, &saverfb5r59);
+ saverfb5r58 = rt2800_rfcsr_read_bank(rt2x00dev, 5, 58);
+ saverfb5r59 = rt2800_rfcsr_read_bank(rt2x00dev, 5, 59);
- rt2800_rfcsr_read_bank(rt2x00dev, 5, 0, &rf_val);
+ rf_val = rt2800_rfcsr_read_bank(rt2x00dev, 5, 0);
rf_val |= 0x3;
rt2800_rfcsr_write_bank(rt2x00dev, 5, 0, rf_val);
- rt2800_rfcsr_read_bank(rt2x00dev, 5, 1, &rf_val);
+ rf_val = rt2800_rfcsr_read_bank(rt2x00dev, 5, 1);
rf_val |= 0x1;
rt2800_rfcsr_write_bank(rt2x00dev, 5, 1, rf_val);
cnt = 0;
do {
usleep_range(500, 2000);
- rt2800_rfcsr_read_bank(rt2x00dev, 5, 1, &rf_val);
+ rf_val = rt2800_rfcsr_read_bank(rt2x00dev, 5, 1);
if (((rf_val & 0x1) == 0x00) || (cnt == 40))
break;
cnt++;
} while (cnt < 40);
- rt2800_rfcsr_read_bank(rt2x00dev, 5, 0, &rf_val);
+ rf_val = rt2800_rfcsr_read_bank(rt2x00dev, 5, 0);
rf_val &= (~0x3);
rf_val |= 0x1;
rt2800_rfcsr_write_bank(rt2x00dev, 5, 0, rf_val);
/* I-3 */
- rt2800_bbp_read(rt2x00dev, 23, &bbp_val);
+ bbp_val = rt2800_bbp_read(rt2x00dev, 23);
bbp_val &= (~0x1F);
bbp_val |= 0x10;
rt2800_bbp_write(rt2x00dev, 23, bbp_val);
@@ -7844,7 +7847,7 @@
filter_target = rx_filter_target_40m;
}
- rt2800_rfcsr_read_bank(rt2x00dev, 5, 8, &rf_val);
+ rf_val = rt2800_rfcsr_read_bank(rt2x00dev, 5, 8);
rf_val &= (~0x04);
if (loop == 1)
rf_val |= 0x4;
@@ -7856,25 +7859,25 @@
rt2800_rf_lp_config(rt2x00dev, btxcal);
if (btxcal) {
tx_agc_fc = 0;
- rt2800_rfcsr_read_bank(rt2x00dev, 5, 58, &rf_val);
+ rf_val = rt2800_rfcsr_read_bank(rt2x00dev, 5, 58);
rf_val &= (~0x7F);
rt2800_rfcsr_write_bank(rt2x00dev, 5, 58, rf_val);
- rt2800_rfcsr_read_bank(rt2x00dev, 5, 59, &rf_val);
+ rf_val = rt2800_rfcsr_read_bank(rt2x00dev, 5, 59);
rf_val &= (~0x7F);
rt2800_rfcsr_write_bank(rt2x00dev, 5, 59, rf_val);
} else {
rx_agc_fc = 0;
- rt2800_rfcsr_read_bank(rt2x00dev, 5, 6, &rf_val);
+ rf_val = rt2800_rfcsr_read_bank(rt2x00dev, 5, 6);
rf_val &= (~0x7F);
rt2800_rfcsr_write_bank(rt2x00dev, 5, 6, rf_val);
- rt2800_rfcsr_read_bank(rt2x00dev, 5, 7, &rf_val);
+ rf_val = rt2800_rfcsr_read_bank(rt2x00dev, 5, 7);
rf_val &= (~0x7F);
rt2800_rfcsr_write_bank(rt2x00dev, 5, 7, rf_val);
}
usleep_range(1000, 2000);
- rt2800_bbp_dcoc_read(rt2x00dev, 2, &bbp_val);
+ bbp_val = rt2800_bbp_dcoc_read(rt2x00dev, 2);
bbp_val &= (~0x6);
rt2800_bbp_dcoc_write(rt2x00dev, 2, bbp_val);
@@ -7882,25 +7885,25 @@
cal_r32_init = rt2800_lp_tx_filter_bw_cal(rt2x00dev);
- rt2800_bbp_dcoc_read(rt2x00dev, 2, &bbp_val);
+ bbp_val = rt2800_bbp_dcoc_read(rt2x00dev, 2);
bbp_val |= 0x6;
rt2800_bbp_dcoc_write(rt2x00dev, 2, bbp_val);
do_cal:
if (btxcal) {
- rt2800_rfcsr_read_bank(rt2x00dev, 5, 58, &rf_val);
+ rf_val = rt2800_rfcsr_read_bank(rt2x00dev, 5, 58);
rf_val &= (~0x7F);
rf_val |= tx_agc_fc;
rt2800_rfcsr_write_bank(rt2x00dev, 5, 58, rf_val);
- rt2800_rfcsr_read_bank(rt2x00dev, 5, 59, &rf_val);
+ rf_val = rt2800_rfcsr_read_bank(rt2x00dev, 5, 59);
rf_val &= (~0x7F);
rf_val |= tx_agc_fc;
rt2800_rfcsr_write_bank(rt2x00dev, 5, 59, rf_val);
} else {
- rt2800_rfcsr_read_bank(rt2x00dev, 5, 6, &rf_val);
+ rf_val = rt2800_rfcsr_read_bank(rt2x00dev, 5, 6);
rf_val &= (~0x7F);
rf_val |= rx_agc_fc;
rt2800_rfcsr_write_bank(rt2x00dev, 5, 6, rf_val);
- rt2800_rfcsr_read_bank(rt2x00dev, 5, 7, &rf_val);
+ rf_val = rt2800_rfcsr_read_bank(rt2x00dev, 5, 7);
rf_val &= (~0x7F);
rf_val |= rx_agc_fc;
rt2800_rfcsr_write_bank(rt2x00dev, 5, 7, rf_val);
@@ -7980,7 +7983,7 @@
rt2800_bbp_dcoc_write(rt2x00dev, 0, savebbp159r0);
rt2800_bbp_dcoc_write(rt2x00dev, 2, savebbp159r2);
- rt2800_bbp_read(rt2x00dev, 4, &bbp_val);
+ bbp_val = rt2800_bbp_read(rt2x00dev, 4);
rt2x00_set_field8(&bbp_val, BBP4_BANDWIDTH,
2 * test_bit(CONFIG_CHANNEL_HT40, &rt2x00dev->flags));
rt2800_bbp_write(rt2x00dev, 4, bbp_val);
@@ -8355,20 +8358,20 @@
/*
* Enable RX.
*/
- rt2800_register_read(rt2x00dev, MAC_SYS_CTRL, ®);
+ reg = rt2800_register_read(rt2x00dev, MAC_SYS_CTRL);
rt2x00_set_field32(®, MAC_SYS_CTRL_ENABLE_TX, 1);
rt2x00_set_field32(®, MAC_SYS_CTRL_ENABLE_RX, 0);
rt2800_register_write(rt2x00dev, MAC_SYS_CTRL, reg);
udelay(50);
- rt2800_register_read(rt2x00dev, WPDMA_GLO_CFG, ®);
+ reg = rt2800_register_read(rt2x00dev, WPDMA_GLO_CFG);
rt2x00_set_field32(®, WPDMA_GLO_CFG_ENABLE_TX_DMA, 1);
rt2x00_set_field32(®, WPDMA_GLO_CFG_ENABLE_RX_DMA, 1);
rt2x00_set_field32(®, WPDMA_GLO_CFG_TX_WRITEBACK_DONE, 1);
rt2800_register_write(rt2x00dev, WPDMA_GLO_CFG, reg);
- rt2800_register_read(rt2x00dev, MAC_SYS_CTRL, ®);
+ reg = rt2800_register_read(rt2x00dev, MAC_SYS_CTRL);
rt2x00_set_field32(®, MAC_SYS_CTRL_ENABLE_TX, 1);
rt2x00_set_field32(®, MAC_SYS_CTRL_ENABLE_RX, 1);
rt2800_register_write(rt2x00dev, MAC_SYS_CTRL, reg);
@@ -8376,15 +8379,15 @@
/*
* Initialize LED control
*/
- rt2800_eeprom_read(rt2x00dev, EEPROM_LED_AG_CONF, &word);
+ word = rt2800_eeprom_read(rt2x00dev, EEPROM_LED_AG_CONF);
rt2800_mcu_request(rt2x00dev, MCU_LED_AG_CONF, 0xff,
word & 0xff, (word >> 8) & 0xff);
- rt2800_eeprom_read(rt2x00dev, EEPROM_LED_ACT_CONF, &word);
+ word = rt2800_eeprom_read(rt2x00dev, EEPROM_LED_ACT_CONF);
rt2800_mcu_request(rt2x00dev, MCU_LED_ACT_CONF, 0xff,
word & 0xff, (word >> 8) & 0xff);
- rt2800_eeprom_read(rt2x00dev, EEPROM_LED_POLARITY, &word);
+ word = rt2800_eeprom_read(rt2x00dev, EEPROM_LED_POLARITY);
rt2800_mcu_request(rt2x00dev, MCU_LED_LED_POLARITY, 0xff,
word & 0xff, (word >> 8) & 0xff);
@@ -8401,7 +8404,7 @@
/* Wait for DMA, ignore error */
rt2800_wait_wpdma_ready(rt2x00dev);
- rt2800_register_read(rt2x00dev, MAC_SYS_CTRL, ®);
+ reg = rt2800_register_read(rt2x00dev, MAC_SYS_CTRL);
rt2x00_set_field32(®, MAC_SYS_CTRL_ENABLE_TX, 0);
rt2x00_set_field32(®, MAC_SYS_CTRL_ENABLE_RX, 0);
rt2800_register_write(rt2x00dev, MAC_SYS_CTRL, reg);
@@ -8418,7 +8421,7 @@
else
efuse_ctrl_reg = EFUSE_CTRL;
- rt2800_register_read(rt2x00dev, efuse_ctrl_reg, ®);
+ reg = rt2800_register_read(rt2x00dev, efuse_ctrl_reg);
return rt2x00_get_field32(reg, EFUSE_CTRL_PRESENT);
}
EXPORT_SYMBOL_GPL(rt2800_efuse_detect);
@@ -8447,7 +8450,7 @@
}
mutex_lock(&rt2x00dev->csr_mutex);
- rt2800_register_read_lock(rt2x00dev, efuse_ctrl_reg, ®);
+ reg = rt2800_register_read_lock(rt2x00dev, efuse_ctrl_reg);
rt2x00_set_field32(®, EFUSE_CTRL_ADDRESS_IN, i);
rt2x00_set_field32(®, EFUSE_CTRL_MODE, 0);
rt2x00_set_field32(®, EFUSE_CTRL_KICK, 1);
@@ -8456,14 +8459,14 @@
/* Wait until the EEPROM has been loaded */
rt2800_regbusy_read(rt2x00dev, efuse_ctrl_reg, EFUSE_CTRL_KICK, ®);
/* Apparently the data is read from end to start */
- rt2800_register_read_lock(rt2x00dev, efuse_data3_reg, ®);
+ reg = rt2800_register_read_lock(rt2x00dev, efuse_data3_reg);
/* The returned value is in CPU order, but eeprom is le */
*(u32 *)&rt2x00dev->eeprom[i] = cpu_to_le32(reg);
- rt2800_register_read_lock(rt2x00dev, efuse_data2_reg, ®);
+ reg = rt2800_register_read_lock(rt2x00dev, efuse_data2_reg);
*(u32 *)&rt2x00dev->eeprom[i + 2] = cpu_to_le32(reg);
- rt2800_register_read_lock(rt2x00dev, efuse_data1_reg, ®);
+ reg = rt2800_register_read_lock(rt2x00dev, efuse_data1_reg);
*(u32 *)&rt2x00dev->eeprom[i + 4] = cpu_to_le32(reg);
- rt2800_register_read_lock(rt2x00dev, efuse_data0_reg, ®);
+ reg = rt2800_register_read_lock(rt2x00dev, efuse_data0_reg);
*(u32 *)&rt2x00dev->eeprom[i + 6] = cpu_to_le32(reg);
mutex_unlock(&rt2x00dev->csr_mutex);
@@ -8487,7 +8490,7 @@
if (rt2x00_rt(rt2x00dev, RT3593))
return 0;
- rt2800_eeprom_read(rt2x00dev, EEPROM_TXMIXER_GAIN_BG, &word);
+ word = rt2800_eeprom_read(rt2x00dev, EEPROM_TXMIXER_GAIN_BG);
if ((word & 0x00ff) != 0x00ff)
return rt2x00_get_field16(word, EEPROM_TXMIXER_GAIN_BG_VAL);
@@ -8501,7 +8504,7 @@
if (rt2x00_rt(rt2x00dev, RT3593))
return 0;
- rt2800_eeprom_read(rt2x00dev, EEPROM_TXMIXER_GAIN_A, &word);
+ word = rt2800_eeprom_read(rt2x00dev, EEPROM_TXMIXER_GAIN_A);
if ((word & 0x00ff) != 0x00ff)
return rt2x00_get_field16(word, EEPROM_TXMIXER_GAIN_A_VAL);
@@ -8529,7 +8532,7 @@
mac = rt2800_eeprom_addr(rt2x00dev, EEPROM_MAC_ADDR_0);
rt2x00lib_set_mac_address(rt2x00dev, mac);
- rt2800_eeprom_read(rt2x00dev, EEPROM_NIC_CONF0, &word);
+ word = rt2800_eeprom_read(rt2x00dev, EEPROM_NIC_CONF0);
if (word == 0xffff) {
rt2x00_set_field16(&word, EEPROM_NIC_CONF0_RXPATH, 2);
rt2x00_set_field16(&word, EEPROM_NIC_CONF0_TXPATH, 1);
@@ -8546,7 +8549,7 @@
rt2800_eeprom_write(rt2x00dev, EEPROM_NIC_CONF0, word);
}
- rt2800_eeprom_read(rt2x00dev, EEPROM_NIC_CONF1, &word);
+ word = rt2800_eeprom_read(rt2x00dev, EEPROM_NIC_CONF1);
if (word == 0xffff) {
rt2x00_set_field16(&word, EEPROM_NIC_CONF1_HW_RADIO, 0);
rt2x00_set_field16(&word, EEPROM_NIC_CONF1_EXTERNAL_TX_ALC, 0);
@@ -8567,7 +8570,7 @@
rt2x00_eeprom_dbg(rt2x00dev, "NIC: 0x%04x\n", word);
}
- rt2800_eeprom_read(rt2x00dev, EEPROM_FREQ, &word);
+ word = rt2800_eeprom_read(rt2x00dev, EEPROM_FREQ);
if ((word & 0x00ff) == 0x00ff) {
rt2x00_set_field16(&word, EEPROM_FREQ_OFFSET, 0);
rt2800_eeprom_write(rt2x00dev, EEPROM_FREQ, word);
@@ -8589,10 +8592,10 @@
* lna0 as correct value. Note that EEPROM_LNA
* is never validated.
*/
- rt2800_eeprom_read(rt2x00dev, EEPROM_LNA, &word);
+ word = rt2800_eeprom_read(rt2x00dev, EEPROM_LNA);
default_lna_gain = rt2x00_get_field16(word, EEPROM_LNA_A0);
- rt2800_eeprom_read(rt2x00dev, EEPROM_RSSI_BG, &word);
+ word = rt2800_eeprom_read(rt2x00dev, EEPROM_RSSI_BG);
if (abs(rt2x00_get_field16(word, EEPROM_RSSI_BG_OFFSET0)) > 10)
rt2x00_set_field16(&word, EEPROM_RSSI_BG_OFFSET0, 0);
if (abs(rt2x00_get_field16(word, EEPROM_RSSI_BG_OFFSET1)) > 10)
@@ -8601,7 +8604,7 @@
drv_data->txmixer_gain_24g = rt2800_get_txmixer_gain_24g(rt2x00dev);
- rt2800_eeprom_read(rt2x00dev, EEPROM_RSSI_BG2, &word);
+ word = rt2800_eeprom_read(rt2x00dev, EEPROM_RSSI_BG2);
if (abs(rt2x00_get_field16(word, EEPROM_RSSI_BG2_OFFSET2)) > 10)
rt2x00_set_field16(&word, EEPROM_RSSI_BG2_OFFSET2, 0);
if (!rt2x00_rt(rt2x00dev, RT3593)) {
@@ -8614,14 +8617,14 @@
drv_data->txmixer_gain_5g = rt2800_get_txmixer_gain_5g(rt2x00dev);
- rt2800_eeprom_read(rt2x00dev, EEPROM_RSSI_A, &word);
+ word = rt2800_eeprom_read(rt2x00dev, EEPROM_RSSI_A);
if (abs(rt2x00_get_field16(word, EEPROM_RSSI_A_OFFSET0)) > 10)
rt2x00_set_field16(&word, EEPROM_RSSI_A_OFFSET0, 0);
if (abs(rt2x00_get_field16(word, EEPROM_RSSI_A_OFFSET1)) > 10)
rt2x00_set_field16(&word, EEPROM_RSSI_A_OFFSET1, 0);
rt2800_eeprom_write(rt2x00dev, EEPROM_RSSI_A, word);
- rt2800_eeprom_read(rt2x00dev, EEPROM_RSSI_A2, &word);
+ word = rt2800_eeprom_read(rt2x00dev, EEPROM_RSSI_A2);
if (abs(rt2x00_get_field16(word, EEPROM_RSSI_A2_OFFSET2)) > 10)
rt2x00_set_field16(&word, EEPROM_RSSI_A2_OFFSET2, 0);
if (!rt2x00_rt(rt2x00dev, RT3593)) {
@@ -8633,7 +8636,7 @@
rt2800_eeprom_write(rt2x00dev, EEPROM_RSSI_A2, word);
if (rt2x00_rt(rt2x00dev, RT3593)) {
- rt2800_eeprom_read(rt2x00dev, EEPROM_EXT_LNA2, &word);
+ word = rt2800_eeprom_read(rt2x00dev, EEPROM_EXT_LNA2);
if (rt2x00_get_field16(word, EEPROM_EXT_LNA2_A1) == 0x00 ||
rt2x00_get_field16(word, EEPROM_EXT_LNA2_A1) == 0xff)
rt2x00_set_field16(&word, EEPROM_EXT_LNA2_A1,
@@ -8657,7 +8660,7 @@
/*
* Read EEPROM word for configuration.
*/
- rt2800_eeprom_read(rt2x00dev, EEPROM_NIC_CONF0, &eeprom);
+ eeprom = rt2800_eeprom_read(rt2x00dev, EEPROM_NIC_CONF0);
/*
* Identify RF chipset by EEPROM value
@@ -8668,7 +8671,7 @@
rt2x00_rt(rt2x00dev, RT5390) ||
rt2x00_rt(rt2x00dev, RT5392) ||
rt2x00_rt(rt2x00dev, RT6352))
- rt2800_eeprom_read(rt2x00dev, EEPROM_CHIP_ID, &rf);
+ rf = rt2800_eeprom_read(rt2x00dev, EEPROM_CHIP_ID);
else if (rt2x00_rt(rt2x00dev, RT3352))
rf = RF3322;
else if (rt2x00_rt(rt2x00dev, RT5350))
@@ -8717,7 +8720,7 @@
rt2x00dev->default_ant.rx_chain_num =
rt2x00_get_field16(eeprom, EEPROM_NIC_CONF0_RXPATH);
- rt2800_eeprom_read(rt2x00dev, EEPROM_NIC_CONF1, &eeprom);
+ eeprom = rt2800_eeprom_read(rt2x00dev, EEPROM_NIC_CONF1);
if (rt2x00_rt(rt2x00dev, RT3070) ||
rt2x00_rt(rt2x00dev, RT3090) ||
@@ -8771,7 +8774,7 @@
/*
* Read frequency offset and RF programming sequence.
*/
- rt2800_eeprom_read(rt2x00dev, EEPROM_FREQ, &eeprom);
+ eeprom = rt2800_eeprom_read(rt2x00dev, EEPROM_FREQ);
rt2x00dev->freq_offset = rt2x00_get_field16(eeprom, EEPROM_FREQ_OFFSET);
/*
@@ -8788,7 +8791,7 @@
/*
* Check if support EIRP tx power limit feature.
*/
- rt2800_eeprom_read(rt2x00dev, EEPROM_EIRP_MAX_TX_POWER, &eeprom);
+ eeprom = rt2800_eeprom_read(rt2x00dev, EEPROM_EIRP_MAX_TX_POWER);
if (rt2x00_get_field16(eeprom, EEPROM_EIRP_MAX_TX_POWER_2GHZ) <
EIRP_MAX_TX_POWER_LIMIT)
@@ -8797,7 +8800,7 @@
/*
* Detect if device uses internal or external PA
*/
- rt2800_eeprom_read(rt2x00dev, EEPROM_NIC_CONF1, &eeprom);
+ eeprom = rt2800_eeprom_read(rt2x00dev, EEPROM_NIC_CONF1);
if (rt2x00_rt(rt2x00dev, RT3352)) {
if (rt2x00_get_field16(eeprom,
@@ -9239,7 +9242,7 @@
break;
case RF5592:
- rt2800_register_read(rt2x00dev, MAC_DEBUG_INDEX, ®);
+ reg = rt2800_register_read(rt2x00dev, MAC_DEBUG_INDEX);
if (rt2x00_get_field32(reg, MAC_DEBUG_INDEX_XTAL)) {
spec->num_channels = ARRAY_SIZE(rf_vals_5592_xtal40);
spec->channels = rf_vals_5592_xtal40;
@@ -9378,9 +9381,9 @@
u32 rev;
if (rt2x00_rt(rt2x00dev, RT3290))
- rt2800_register_read(rt2x00dev, MAC_CSR0_3290, ®);
+ reg = rt2800_register_read(rt2x00dev, MAC_CSR0_3290);
else
- rt2800_register_read(rt2x00dev, MAC_CSR0, ®);
+ reg = rt2800_register_read(rt2x00dev, MAC_CSR0);
rt = rt2x00_get_field32(reg, MAC_CSR0_CHIPSET);
rev = rt2x00_get_field32(reg, MAC_CSR0_REVISION);
@@ -9440,7 +9443,7 @@
* Enable rfkill polling by setting GPIO direction of the
* rfkill switch GPIO pin correctly.
*/
- rt2800_register_read(rt2x00dev, GPIO_CTRL, ®);
+ reg = rt2800_register_read(rt2x00dev, GPIO_CTRL);
rt2x00_set_field32(®, GPIO_CTRL_DIR2, 1);
rt2800_register_write(rt2x00dev, GPIO_CTRL, reg);
@@ -9515,31 +9518,31 @@
u32 reg;
bool enabled = (value < IEEE80211_MAX_RTS_THRESHOLD);
- rt2800_register_read(rt2x00dev, TX_RTS_CFG, ®);
+ reg = rt2800_register_read(rt2x00dev, TX_RTS_CFG);
rt2x00_set_field32(®, TX_RTS_CFG_RTS_THRES, value);
rt2800_register_write(rt2x00dev, TX_RTS_CFG, reg);
- rt2800_register_read(rt2x00dev, CCK_PROT_CFG, ®);
+ reg = rt2800_register_read(rt2x00dev, CCK_PROT_CFG);
rt2x00_set_field32(®, CCK_PROT_CFG_RTS_TH_EN, enabled);
rt2800_register_write(rt2x00dev, CCK_PROT_CFG, reg);
- rt2800_register_read(rt2x00dev, OFDM_PROT_CFG, ®);
+ reg = rt2800_register_read(rt2x00dev, OFDM_PROT_CFG);
rt2x00_set_field32(®, OFDM_PROT_CFG_RTS_TH_EN, enabled);
rt2800_register_write(rt2x00dev, OFDM_PROT_CFG, reg);
- rt2800_register_read(rt2x00dev, MM20_PROT_CFG, ®);
+ reg = rt2800_register_read(rt2x00dev, MM20_PROT_CFG);
rt2x00_set_field32(®, MM20_PROT_CFG_RTS_TH_EN, enabled);
rt2800_register_write(rt2x00dev, MM20_PROT_CFG, reg);
- rt2800_register_read(rt2x00dev, MM40_PROT_CFG, ®);
+ reg = rt2800_register_read(rt2x00dev, MM40_PROT_CFG);
rt2x00_set_field32(®, MM40_PROT_CFG_RTS_TH_EN, enabled);
rt2800_register_write(rt2x00dev, MM40_PROT_CFG, reg);
- rt2800_register_read(rt2x00dev, GF20_PROT_CFG, ®);
+ reg = rt2800_register_read(rt2x00dev, GF20_PROT_CFG);
rt2x00_set_field32(®, GF20_PROT_CFG_RTS_TH_EN, enabled);
rt2800_register_write(rt2x00dev, GF20_PROT_CFG, reg);
- rt2800_register_read(rt2x00dev, GF40_PROT_CFG, ®);
+ reg = rt2800_register_read(rt2x00dev, GF40_PROT_CFG);
rt2x00_set_field32(®, GF40_PROT_CFG_RTS_TH_EN, enabled);
rt2800_register_write(rt2x00dev, GF40_PROT_CFG, reg);
@@ -9582,7 +9585,7 @@
field.bit_offset = (queue_idx & 1) * 16;
field.bit_mask = 0xffff << field.bit_offset;
- rt2800_register_read(rt2x00dev, offset, ®);
+ reg = rt2800_register_read(rt2x00dev, offset);
rt2x00_set_field32(®, field, queue->txop);
rt2800_register_write(rt2x00dev, offset, reg);
@@ -9590,22 +9593,22 @@
field.bit_offset = queue_idx * 4;
field.bit_mask = 0xf << field.bit_offset;
- rt2800_register_read(rt2x00dev, WMM_AIFSN_CFG, ®);
+ reg = rt2800_register_read(rt2x00dev, WMM_AIFSN_CFG);
rt2x00_set_field32(®, field, queue->aifs);
rt2800_register_write(rt2x00dev, WMM_AIFSN_CFG, reg);
- rt2800_register_read(rt2x00dev, WMM_CWMIN_CFG, ®);
+ reg = rt2800_register_read(rt2x00dev, WMM_CWMIN_CFG);
rt2x00_set_field32(®, field, queue->cw_min);
rt2800_register_write(rt2x00dev, WMM_CWMIN_CFG, reg);
- rt2800_register_read(rt2x00dev, WMM_CWMAX_CFG, ®);
+ reg = rt2800_register_read(rt2x00dev, WMM_CWMAX_CFG);
rt2x00_set_field32(®, field, queue->cw_max);
rt2800_register_write(rt2x00dev, WMM_CWMAX_CFG, reg);
/* Update EDCA registers */
offset = EDCA_AC0_CFG + (sizeof(u32) * queue_idx);
- rt2800_register_read(rt2x00dev, offset, ®);
+ reg = rt2800_register_read(rt2x00dev, offset);
rt2x00_set_field32(®, EDCA_AC0_CFG_TX_OP, queue->txop);
rt2x00_set_field32(®, EDCA_AC0_CFG_AIFSN, queue->aifs);
rt2x00_set_field32(®, EDCA_AC0_CFG_CWMIN, queue->cw_min);
@@ -9622,9 +9625,9 @@
u64 tsf;
u32 reg;
- rt2800_register_read(rt2x00dev, TSF_TIMER_DW1, ®);
+ reg = rt2800_register_read(rt2x00dev, TSF_TIMER_DW1);
tsf = (u64) rt2x00_get_field32(reg, TSF_TIMER_DW1_HIGH_WORD) << 32;
- rt2800_register_read(rt2x00dev, TSF_TIMER_DW0, ®);
+ reg = rt2800_register_read(rt2x00dev, TSF_TIMER_DW0);
tsf |= rt2x00_get_field32(reg, TSF_TIMER_DW0_LOW_WORD);
return tsf;
@@ -9691,9 +9694,9 @@
survey->channel = conf->chandef.chan;
- rt2800_register_read(rt2x00dev, CH_IDLE_STA, &idle);
- rt2800_register_read(rt2x00dev, CH_BUSY_STA, &busy);
- rt2800_register_read(rt2x00dev, CH_BUSY_STA_SEC, &busy_ext);
+ idle = rt2800_register_read(rt2x00dev, CH_IDLE_STA);
+ busy = rt2800_register_read(rt2x00dev, CH_BUSY_STA);
+ busy_ext = rt2800_register_read(rt2x00dev, CH_BUSY_STA_SEC);
if (idle || busy) {
survey->filled = SURVEY_INFO_TIME |
diff --git a/drivers/net/wireless/ralink/rt2x00/rt2800lib.h b/drivers/net/wireless/ralink/rt2x00/rt2800lib.h
index f357531..275e396 100644
--- a/drivers/net/wireless/ralink/rt2x00/rt2800lib.h
+++ b/drivers/net/wireless/ralink/rt2x00/rt2800lib.h
@@ -49,10 +49,10 @@
};
struct rt2800_ops {
- void (*register_read)(struct rt2x00_dev *rt2x00dev,
- const unsigned int offset, u32 *value);
- void (*register_read_lock)(struct rt2x00_dev *rt2x00dev,
- const unsigned int offset, u32 *value);
+ u32 (*register_read)(struct rt2x00_dev *rt2x00dev,
+ const unsigned int offset);
+ u32 (*register_read_lock)(struct rt2x00_dev *rt2x00dev,
+ const unsigned int offset);
void (*register_write)(struct rt2x00_dev *rt2x00dev,
const unsigned int offset, u32 value);
void (*register_write_lock)(struct rt2x00_dev *rt2x00dev,
@@ -78,22 +78,20 @@
__le32 *(*drv_get_txwi)(struct queue_entry *entry);
};
-static inline void rt2800_register_read(struct rt2x00_dev *rt2x00dev,
- const unsigned int offset,
- u32 *value)
+static inline u32 rt2800_register_read(struct rt2x00_dev *rt2x00dev,
+ const unsigned int offset)
{
const struct rt2800_ops *rt2800ops = rt2x00dev->ops->drv;
- rt2800ops->register_read(rt2x00dev, offset, value);
+ return rt2800ops->register_read(rt2x00dev, offset);
}
-static inline void rt2800_register_read_lock(struct rt2x00_dev *rt2x00dev,
- const unsigned int offset,
- u32 *value)
+static inline u32 rt2800_register_read_lock(struct rt2x00_dev *rt2x00dev,
+ const unsigned int offset)
{
const struct rt2800_ops *rt2800ops = rt2x00dev->ops->drv;
- rt2800ops->register_read_lock(rt2x00dev, offset, value);
+ return rt2800ops->register_read_lock(rt2x00dev, offset);
}
static inline void rt2800_register_write(struct rt2x00_dev *rt2x00dev,
diff --git a/drivers/net/wireless/ralink/rt2x00/rt2800mmio.c b/drivers/net/wireless/ralink/rt2x00/rt2800mmio.c
index 3ab3b53..ee5276e 100644
--- a/drivers/net/wireless/ralink/rt2x00/rt2800mmio.c
+++ b/drivers/net/wireless/ralink/rt2x00/rt2800mmio.c
@@ -109,7 +109,7 @@
__le32 *rxd = entry_priv->desc;
u32 word;
- rt2x00_desc_read(rxd, 3, &word);
+ word = rt2x00_desc_read(rxd, 3);
if (rt2x00_get_field32(word, RXD_W3_CRC_ERROR))
rxdesc->flags |= RX_FLAG_FAILED_FCS_CRC;
@@ -175,7 +175,7 @@
wcid = rt2x00_get_field32(status, TX_STA_FIFO_WCID);
txwi = rt2800_drv_get_txwi(entry);
- rt2x00_desc_read(txwi, 1, &word);
+ word = rt2x00_desc_read(txwi, 1);
tx_wcid = rt2x00_get_field32(word, TXWI_W1_WIRELESS_CLI_ID);
return (tx_wcid == wcid);
@@ -331,7 +331,7 @@
* access needs locking.
*/
spin_lock_irq(&rt2x00dev->irqmask_lock);
- rt2x00mmio_register_read(rt2x00dev, INT_MASK_CSR, ®);
+ reg = rt2x00mmio_register_read(rt2x00dev, INT_MASK_CSR);
rt2x00_set_field32(®, irq_field, 1);
rt2x00mmio_register_write(rt2x00dev, INT_MASK_CSR, reg);
spin_unlock_irq(&rt2x00dev->irqmask_lock);
@@ -376,12 +376,12 @@
* interval every 64 beacons by 64us to mitigate this effect.
*/
if (drv_data->tbtt_tick == (BCN_TBTT_OFFSET - 2)) {
- rt2x00mmio_register_read(rt2x00dev, BCN_TIME_CFG, ®);
+ reg = rt2x00mmio_register_read(rt2x00dev, BCN_TIME_CFG);
rt2x00_set_field32(®, BCN_TIME_CFG_BEACON_INTERVAL,
(rt2x00dev->beacon_int * 16) - 1);
rt2x00mmio_register_write(rt2x00dev, BCN_TIME_CFG, reg);
} else if (drv_data->tbtt_tick == (BCN_TBTT_OFFSET - 1)) {
- rt2x00mmio_register_read(rt2x00dev, BCN_TIME_CFG, ®);
+ reg = rt2x00mmio_register_read(rt2x00dev, BCN_TIME_CFG);
rt2x00_set_field32(®, BCN_TIME_CFG_BEACON_INTERVAL,
(rt2x00dev->beacon_int * 16));
rt2x00mmio_register_write(rt2x00dev, BCN_TIME_CFG, reg);
@@ -439,7 +439,7 @@
* need to lock the kfifo.
*/
for (i = 0; i < rt2x00dev->tx->limit; i++) {
- rt2x00mmio_register_read(rt2x00dev, TX_STA_FIFO, &status);
+ status = rt2x00mmio_register_read(rt2x00dev, TX_STA_FIFO);
if (!rt2x00_get_field32(status, TX_STA_FIFO_VALID))
break;
@@ -460,7 +460,7 @@
u32 reg, mask;
/* Read status and ACK all interrupts */
- rt2x00mmio_register_read(rt2x00dev, INT_SOURCE_CSR, ®);
+ reg = rt2x00mmio_register_read(rt2x00dev, INT_SOURCE_CSR);
rt2x00mmio_register_write(rt2x00dev, INT_SOURCE_CSR, reg);
if (!reg)
@@ -501,7 +501,7 @@
* the tasklet will reenable the appropriate interrupts.
*/
spin_lock(&rt2x00dev->irqmask_lock);
- rt2x00mmio_register_read(rt2x00dev, INT_MASK_CSR, ®);
+ reg = rt2x00mmio_register_read(rt2x00dev, INT_MASK_CSR);
reg &= mask;
rt2x00mmio_register_write(rt2x00dev, INT_MASK_CSR, reg);
spin_unlock(&rt2x00dev->irqmask_lock);
@@ -521,7 +521,7 @@
* should clear the register to assure a clean state.
*/
if (state == STATE_RADIO_IRQ_ON) {
- rt2x00mmio_register_read(rt2x00dev, INT_SOURCE_CSR, ®);
+ reg = rt2x00mmio_register_read(rt2x00dev, INT_SOURCE_CSR);
rt2x00mmio_register_write(rt2x00dev, INT_SOURCE_CSR, reg);
}
@@ -560,18 +560,18 @@
switch (queue->qid) {
case QID_RX:
- rt2x00mmio_register_read(rt2x00dev, MAC_SYS_CTRL, ®);
+ reg = rt2x00mmio_register_read(rt2x00dev, MAC_SYS_CTRL);
rt2x00_set_field32(®, MAC_SYS_CTRL_ENABLE_RX, 1);
rt2x00mmio_register_write(rt2x00dev, MAC_SYS_CTRL, reg);
break;
case QID_BEACON:
- rt2x00mmio_register_read(rt2x00dev, BCN_TIME_CFG, ®);
+ reg = rt2x00mmio_register_read(rt2x00dev, BCN_TIME_CFG);
rt2x00_set_field32(®, BCN_TIME_CFG_TSF_TICKING, 1);
rt2x00_set_field32(®, BCN_TIME_CFG_TBTT_ENABLE, 1);
rt2x00_set_field32(®, BCN_TIME_CFG_BEACON_GEN, 1);
rt2x00mmio_register_write(rt2x00dev, BCN_TIME_CFG, reg);
- rt2x00mmio_register_read(rt2x00dev, INT_TIMER_EN, ®);
+ reg = rt2x00mmio_register_read(rt2x00dev, INT_TIMER_EN);
rt2x00_set_field32(®, INT_TIMER_EN_PRE_TBTT_TIMER, 1);
rt2x00mmio_register_write(rt2x00dev, INT_TIMER_EN, reg);
break;
@@ -613,18 +613,18 @@
switch (queue->qid) {
case QID_RX:
- rt2x00mmio_register_read(rt2x00dev, MAC_SYS_CTRL, ®);
+ reg = rt2x00mmio_register_read(rt2x00dev, MAC_SYS_CTRL);
rt2x00_set_field32(®, MAC_SYS_CTRL_ENABLE_RX, 0);
rt2x00mmio_register_write(rt2x00dev, MAC_SYS_CTRL, reg);
break;
case QID_BEACON:
- rt2x00mmio_register_read(rt2x00dev, BCN_TIME_CFG, ®);
+ reg = rt2x00mmio_register_read(rt2x00dev, BCN_TIME_CFG);
rt2x00_set_field32(®, BCN_TIME_CFG_TSF_TICKING, 0);
rt2x00_set_field32(®, BCN_TIME_CFG_TBTT_ENABLE, 0);
rt2x00_set_field32(®, BCN_TIME_CFG_BEACON_GEN, 0);
rt2x00mmio_register_write(rt2x00dev, BCN_TIME_CFG, reg);
- rt2x00mmio_register_read(rt2x00dev, INT_TIMER_EN, ®);
+ reg = rt2x00mmio_register_read(rt2x00dev, INT_TIMER_EN);
rt2x00_set_field32(®, INT_TIMER_EN_PRE_TBTT_TIMER, 0);
rt2x00mmio_register_write(rt2x00dev, INT_TIMER_EN, reg);
@@ -696,11 +696,11 @@
u32 word;
if (entry->queue->qid == QID_RX) {
- rt2x00_desc_read(entry_priv->desc, 1, &word);
+ word = rt2x00_desc_read(entry_priv->desc, 1);
return (!rt2x00_get_field32(word, RXD_W1_DMA_DONE));
} else {
- rt2x00_desc_read(entry_priv->desc, 1, &word);
+ word = rt2x00_desc_read(entry_priv->desc, 1);
return (!rt2x00_get_field32(word, TXD_W1_DMA_DONE));
}
@@ -715,11 +715,11 @@
u32 word;
if (entry->queue->qid == QID_RX) {
- rt2x00_desc_read(entry_priv->desc, 0, &word);
+ word = rt2x00_desc_read(entry_priv->desc, 0);
rt2x00_set_field32(&word, RXD_W0_SDP0, skbdesc->skb_dma);
rt2x00_desc_write(entry_priv->desc, 0, word);
- rt2x00_desc_read(entry_priv->desc, 1, &word);
+ word = rt2x00_desc_read(entry_priv->desc, 1);
rt2x00_set_field32(&word, RXD_W1_DMA_DONE, 0);
rt2x00_desc_write(entry_priv->desc, 1, word);
@@ -730,7 +730,7 @@
rt2x00mmio_register_write(rt2x00dev, RX_CRX_IDX,
entry->entry_idx);
} else {
- rt2x00_desc_read(entry_priv->desc, 1, &word);
+ word = rt2x00_desc_read(entry_priv->desc, 1);
rt2x00_set_field32(&word, TXD_W1_DMA_DONE, 1);
rt2x00_desc_write(entry_priv->desc, 1, word);
}
@@ -810,7 +810,7 @@
/*
* Reset DMA indexes
*/
- rt2x00mmio_register_read(rt2x00dev, WPDMA_RST_IDX, ®);
+ reg = rt2x00mmio_register_read(rt2x00dev, WPDMA_RST_IDX);
rt2x00_set_field32(®, WPDMA_RST_IDX_DTX_IDX0, 1);
rt2x00_set_field32(®, WPDMA_RST_IDX_DTX_IDX1, 1);
rt2x00_set_field32(®, WPDMA_RST_IDX_DTX_IDX2, 1);
@@ -831,7 +831,7 @@
rt2x00_rt(rt2x00dev, RT5390) ||
rt2x00_rt(rt2x00dev, RT5392) ||
rt2x00_rt(rt2x00dev, RT5592))) {
- rt2x00mmio_register_read(rt2x00dev, AUX_CTRL, ®);
+ reg = rt2x00mmio_register_read(rt2x00dev, AUX_CTRL);
rt2x00_set_field32(®, AUX_CTRL_FORCE_PCIE_CLK, 1);
rt2x00_set_field32(®, AUX_CTRL_WAKE_PCIE_EN, 1);
rt2x00mmio_register_write(rt2x00dev, AUX_CTRL, reg);
diff --git a/drivers/net/wireless/ralink/rt2x00/rt2800pci.c b/drivers/net/wireless/ralink/rt2x00/rt2800pci.c
index 0af2257..5cf655f 100644
--- a/drivers/net/wireless/ralink/rt2x00/rt2800pci.c
+++ b/drivers/net/wireless/ralink/rt2x00/rt2800pci.c
@@ -69,7 +69,7 @@
return;
for (i = 0; i < 200; i++) {
- rt2x00mmio_register_read(rt2x00dev, H2M_MAILBOX_CID, ®);
+ reg = rt2x00mmio_register_read(rt2x00dev, H2M_MAILBOX_CID);
if ((rt2x00_get_field32(reg, H2M_MAILBOX_CID_CMD0) == token) ||
(rt2x00_get_field32(reg, H2M_MAILBOX_CID_CMD1) == token) ||
@@ -92,7 +92,7 @@
struct rt2x00_dev *rt2x00dev = eeprom->data;
u32 reg;
- rt2x00mmio_register_read(rt2x00dev, E2PROM_CSR, ®);
+ reg = rt2x00mmio_register_read(rt2x00dev, E2PROM_CSR);
eeprom->reg_data_in = !!rt2x00_get_field32(reg, E2PROM_CSR_DATA_IN);
eeprom->reg_data_out = !!rt2x00_get_field32(reg, E2PROM_CSR_DATA_OUT);
@@ -122,7 +122,7 @@
struct eeprom_93cx6 eeprom;
u32 reg;
- rt2x00mmio_register_read(rt2x00dev, E2PROM_CSR, ®);
+ reg = rt2x00mmio_register_read(rt2x00dev, E2PROM_CSR);
eeprom.data = rt2x00dev;
eeprom.register_read = rt2800pci_eepromregister_read;
diff --git a/drivers/net/wireless/ralink/rt2x00/rt2800usb.c b/drivers/net/wireless/ralink/rt2x00/rt2800usb.c
index f11e3f5..685b8e0 100644
--- a/drivers/net/wireless/ralink/rt2x00/rt2800usb.c
+++ b/drivers/net/wireless/ralink/rt2x00/rt2800usb.c
@@ -61,12 +61,12 @@
switch (queue->qid) {
case QID_RX:
- rt2x00usb_register_read(rt2x00dev, MAC_SYS_CTRL, ®);
+ reg = rt2x00usb_register_read(rt2x00dev, MAC_SYS_CTRL);
rt2x00_set_field32(®, MAC_SYS_CTRL_ENABLE_RX, 1);
rt2x00usb_register_write(rt2x00dev, MAC_SYS_CTRL, reg);
break;
case QID_BEACON:
- rt2x00usb_register_read(rt2x00dev, BCN_TIME_CFG, ®);
+ reg = rt2x00usb_register_read(rt2x00dev, BCN_TIME_CFG);
rt2x00_set_field32(®, BCN_TIME_CFG_TSF_TICKING, 1);
rt2x00_set_field32(®, BCN_TIME_CFG_TBTT_ENABLE, 1);
rt2x00_set_field32(®, BCN_TIME_CFG_BEACON_GEN, 1);
@@ -84,12 +84,12 @@
switch (queue->qid) {
case QID_RX:
- rt2x00usb_register_read(rt2x00dev, MAC_SYS_CTRL, ®);
+ reg = rt2x00usb_register_read(rt2x00dev, MAC_SYS_CTRL);
rt2x00_set_field32(®, MAC_SYS_CTRL_ENABLE_RX, 0);
rt2x00usb_register_write(rt2x00dev, MAC_SYS_CTRL, reg);
break;
case QID_BEACON:
- rt2x00usb_register_read(rt2x00dev, BCN_TIME_CFG, ®);
+ reg = rt2x00usb_register_read(rt2x00dev, BCN_TIME_CFG);
rt2x00_set_field32(®, BCN_TIME_CFG_TSF_TICKING, 0);
rt2x00_set_field32(®, BCN_TIME_CFG_TBTT_ENABLE, 0);
rt2x00_set_field32(®, BCN_TIME_CFG_BEACON_GEN, 0);
@@ -333,7 +333,7 @@
if (rt2800_wait_csr_ready(rt2x00dev))
return -EBUSY;
- rt2x00usb_register_read(rt2x00dev, PBF_SYS_CTRL, ®);
+ reg = rt2x00usb_register_read(rt2x00dev, PBF_SYS_CTRL);
rt2x00usb_register_write(rt2x00dev, PBF_SYS_CTRL, reg & ~0x00002000);
reg = 0;
@@ -456,7 +456,7 @@
/*
* Initialize TXINFO descriptor
*/
- rt2x00_desc_read(txi, 0, &word);
+ word = rt2x00_desc_read(txi, 0);
/*
* The size of TXINFO_W0_USB_DMA_TX_PKT_LEN is
@@ -527,7 +527,7 @@
*/
txwi = rt2800usb_get_txwi(entry);
- rt2x00_desc_read(txwi, 1, &word);
+ word = rt2x00_desc_read(txwi, 1);
tx_wcid = rt2x00_get_field32(word, TXWI_W1_WIRELESS_CLI_ID);
tx_ack = rt2x00_get_field32(word, TXWI_W1_ACK);
tx_pid = rt2x00_get_field32(word, TXWI_W1_PACKETID);
@@ -652,7 +652,7 @@
* | RXINFO | RXWI | header | L2 pad | payload | pad | RXD | USB pad |
* |<------------ rx_pkt_len -------------->|
*/
- rt2x00_desc_read(rxi, 0, &word);
+ word = rt2x00_desc_read(rxi, 0);
rx_pkt_len = rt2x00_get_field32(word, RXINFO_W0_USB_DMA_RX_PKT_LEN);
/*
@@ -676,7 +676,7 @@
/*
* It is now safe to read the descriptor on all architectures.
*/
- rt2x00_desc_read(rxd, 0, &word);
+ word = rt2x00_desc_read(rxd, 0);
if (rt2x00_get_field32(word, RXD_W0_CRC_ERROR))
rxdesc->flags |= RX_FLAG_FAILED_FCS_CRC;
@@ -1156,6 +1156,8 @@
{ USB_DEVICE(0x2001, 0x3c17) },
/* Panasonic */
{ USB_DEVICE(0x083a, 0xb511) },
+ /* Accton/Arcadyan/Epson */
+ { USB_DEVICE(0x083a, 0xb512) },
/* Philips */
{ USB_DEVICE(0x0471, 0x20dd) },
/* Ralink */
diff --git a/drivers/net/wireless/ralink/rt2x00/rt2x00.h b/drivers/net/wireless/ralink/rt2x00/rt2x00.h
index 1bc353e..1f38c33 100644
--- a/drivers/net/wireless/ralink/rt2x00/rt2x00.h
+++ b/drivers/net/wireless/ralink/rt2x00/rt2x00.h
@@ -1049,11 +1049,11 @@
* Generic RF access.
* The RF is being accessed by word index.
*/
-static inline void rt2x00_rf_read(struct rt2x00_dev *rt2x00dev,
- const unsigned int word, u32 *data)
+static inline u32 rt2x00_rf_read(struct rt2x00_dev *rt2x00dev,
+ const unsigned int word)
{
BUG_ON(word < 1 || word > rt2x00dev->ops->rf_size / sizeof(u32));
- *data = rt2x00dev->rf[word - 1];
+ return rt2x00dev->rf[word - 1];
}
static inline void rt2x00_rf_write(struct rt2x00_dev *rt2x00dev,
@@ -1072,10 +1072,10 @@
return (void *)&rt2x00dev->eeprom[word];
}
-static inline void rt2x00_eeprom_read(struct rt2x00_dev *rt2x00dev,
- const unsigned int word, u16 *data)
+static inline u16 rt2x00_eeprom_read(struct rt2x00_dev *rt2x00dev,
+ const unsigned int word)
{
- *data = le16_to_cpu(rt2x00dev->eeprom[word]);
+ return le16_to_cpu(rt2x00dev->eeprom[word]);
}
static inline void rt2x00_eeprom_write(struct rt2x00_dev *rt2x00dev,
diff --git a/drivers/net/wireless/ralink/rt2x00/rt2x00debug.c b/drivers/net/wireless/ralink/rt2x00/rt2x00debug.c
index 964aefd..51520a0 100644
--- a/drivers/net/wireless/ralink/rt2x00/rt2x00debug.c
+++ b/drivers/net/wireless/ralink/rt2x00/rt2x00debug.c
@@ -188,7 +188,7 @@
return;
}
- dump_hdr = (struct rt2x00dump_hdr *)skb_put(skbcopy, sizeof(*dump_hdr));
+ dump_hdr = skb_put(skbcopy, sizeof(*dump_hdr));
dump_hdr->version = cpu_to_le32(DUMP_HEADER_VERSION);
dump_hdr->header_length = cpu_to_le32(sizeof(*dump_hdr));
dump_hdr->desc_length = cpu_to_le32(skbdesc->desc_len);
@@ -203,9 +203,8 @@
dump_hdr->timestamp_usec = cpu_to_le32(timestamp.tv_usec);
if (!(skbdesc->flags & SKBDESC_DESC_IN_SKB))
- memcpy(skb_put(skbcopy, skbdesc->desc_len), skbdesc->desc,
- skbdesc->desc_len);
- memcpy(skb_put(skbcopy, skb->len), skb->data, skb->len);
+ skb_put_data(skbcopy, skbdesc->desc, skbdesc->desc_len);
+ skb_put_data(skbcopy, skb->data, skb->len);
skb_queue_tail(&intf->frame_dump_skbqueue, skbcopy);
wake_up_interruptible(&intf->frame_dump_waitqueue);
@@ -460,7 +459,7 @@
if (debug->__name.flags & RT2X00DEBUGFS_OFFSET) \
index *= debug->__name.word_size; \
\
- debug->__name.read(intf->rt2x00dev, index, &value); \
+ value = debug->__name.read(intf->rt2x00dev, index); \
\
size = sprintf(line, __format, value); \
\
diff --git a/drivers/net/wireless/ralink/rt2x00/rt2x00debug.h b/drivers/net/wireless/ralink/rt2x00/rt2x00debug.h
index e65712c..a357a07 100644
--- a/drivers/net/wireless/ralink/rt2x00/rt2x00debug.h
+++ b/drivers/net/wireless/ralink/rt2x00/rt2x00debug.h
@@ -38,8 +38,8 @@
#define RT2X00DEBUGFS_REGISTER_ENTRY(__name, __type) \
struct reg##__name { \
- void (*read)(struct rt2x00_dev *rt2x00dev, \
- const unsigned int word, __type *data); \
+ __type (*read)(struct rt2x00_dev *rt2x00dev, \
+ const unsigned int word); \
void (*write)(struct rt2x00_dev *rt2x00dev, \
const unsigned int word, __type data); \
\
diff --git a/drivers/net/wireless/ralink/rt2x00/rt2x00mmio.c b/drivers/net/wireless/ralink/rt2x00/rt2x00mmio.c
index da38d25..528cb04 100644
--- a/drivers/net/wireless/ralink/rt2x00/rt2x00mmio.c
+++ b/drivers/net/wireless/ralink/rt2x00/rt2x00mmio.c
@@ -43,7 +43,7 @@
return 0;
for (i = 0; i < REGISTER_BUSY_COUNT; i++) {
- rt2x00mmio_register_read(rt2x00dev, offset, reg);
+ *reg = rt2x00mmio_register_read(rt2x00dev, offset);
if (!rt2x00_get_field32(*reg, field))
return 1;
udelay(REGISTER_BUSY_DELAY);
diff --git a/drivers/net/wireless/ralink/rt2x00/rt2x00mmio.h b/drivers/net/wireless/ralink/rt2x00/rt2x00mmio.h
index 701c312..184a414 100644
--- a/drivers/net/wireless/ralink/rt2x00/rt2x00mmio.h
+++ b/drivers/net/wireless/ralink/rt2x00/rt2x00mmio.h
@@ -29,11 +29,10 @@
/*
* Register access.
*/
-static inline void rt2x00mmio_register_read(struct rt2x00_dev *rt2x00dev,
- const unsigned int offset,
- u32 *value)
+static inline u32 rt2x00mmio_register_read(struct rt2x00_dev *rt2x00dev,
+ const unsigned int offset)
{
- *value = readl(rt2x00dev->csr.base + offset);
+ return readl(rt2x00dev->csr.base + offset);
}
static inline void rt2x00mmio_register_multiread(struct rt2x00_dev *rt2x00dev,
diff --git a/drivers/net/wireless/ralink/rt2x00/rt2x00queue.h b/drivers/net/wireless/ralink/rt2x00/rt2x00queue.h
index 6055f36..a15bae2 100644
--- a/drivers/net/wireless/ralink/rt2x00/rt2x00queue.h
+++ b/drivers/net/wireless/ralink/rt2x00/rt2x00queue.h
@@ -642,11 +642,10 @@
* _rt2x00_desc_read - Read a word from the hardware descriptor.
* @desc: Base descriptor address
* @word: Word index from where the descriptor should be read.
- * @value: Address where the descriptor value should be written into.
*/
-static inline void _rt2x00_desc_read(__le32 *desc, const u8 word, __le32 *value)
+static inline __le32 _rt2x00_desc_read(__le32 *desc, const u8 word)
{
- *value = desc[word];
+ return desc[word];
}
/**
@@ -654,13 +653,10 @@
* function will take care of the byte ordering.
* @desc: Base descriptor address
* @word: Word index from where the descriptor should be read.
- * @value: Address where the descriptor value should be written into.
*/
-static inline void rt2x00_desc_read(__le32 *desc, const u8 word, u32 *value)
+static inline u32 rt2x00_desc_read(__le32 *desc, const u8 word)
{
- __le32 tmp;
- _rt2x00_desc_read(desc, word, &tmp);
- *value = le32_to_cpu(tmp);
+ return le32_to_cpu(_rt2x00_desc_read(desc, word));
}
/**
diff --git a/drivers/net/wireless/ralink/rt2x00/rt2x00usb.c b/drivers/net/wireless/ralink/rt2x00/rt2x00usb.c
index c696f0a..e2f4f57 100644
--- a/drivers/net/wireless/ralink/rt2x00/rt2x00usb.c
+++ b/drivers/net/wireless/ralink/rt2x00/rt2x00usb.c
@@ -145,7 +145,7 @@
return -ENODEV;
for (i = 0; i < REGISTER_USB_BUSY_COUNT; i++) {
- rt2x00usb_register_read_lock(rt2x00dev, offset, reg);
+ *reg = rt2x00usb_register_read_lock(rt2x00dev, offset);
if (!rt2x00_get_field32(*reg, field))
return 1;
udelay(REGISTER_BUSY_DELAY);
diff --git a/drivers/net/wireless/ralink/rt2x00/rt2x00usb.h b/drivers/net/wireless/ralink/rt2x00/rt2x00usb.h
index 569363d..ff94c69 100644
--- a/drivers/net/wireless/ralink/rt2x00/rt2x00usb.h
+++ b/drivers/net/wireless/ralink/rt2x00/rt2x00usb.h
@@ -190,40 +190,36 @@
* rt2x00usb_register_read - Read 32bit register word
* @rt2x00dev: Device pointer, see &struct rt2x00_dev.
* @offset: Register offset
- * @value: Pointer to where register contents should be stored
*
* This function is a simple wrapper for 32bit register access
* through rt2x00usb_vendor_request_buff().
*/
-static inline void rt2x00usb_register_read(struct rt2x00_dev *rt2x00dev,
- const unsigned int offset,
- u32 *value)
+static inline u32 rt2x00usb_register_read(struct rt2x00_dev *rt2x00dev,
+ const unsigned int offset)
{
__le32 reg = 0;
rt2x00usb_vendor_request_buff(rt2x00dev, USB_MULTI_READ,
USB_VENDOR_REQUEST_IN, offset,
®, sizeof(reg));
- *value = le32_to_cpu(reg);
+ return le32_to_cpu(reg);
}
/**
* rt2x00usb_register_read_lock - Read 32bit register word
* @rt2x00dev: Device pointer, see &struct rt2x00_dev.
* @offset: Register offset
- * @value: Pointer to where register contents should be stored
*
* This function is a simple wrapper for 32bit register access
* through rt2x00usb_vendor_req_buff_lock().
*/
-static inline void rt2x00usb_register_read_lock(struct rt2x00_dev *rt2x00dev,
- const unsigned int offset,
- u32 *value)
+static inline u32 rt2x00usb_register_read_lock(struct rt2x00_dev *rt2x00dev,
+ const unsigned int offset)
{
__le32 reg = 0;
rt2x00usb_vendor_req_buff_lock(rt2x00dev, USB_MULTI_READ,
USB_VENDOR_REQUEST_IN, offset,
®, sizeof(reg), REGISTER_TIMEOUT);
- *value = le32_to_cpu(reg);
+ return le32_to_cpu(reg);
}
/**
diff --git a/drivers/net/wireless/ralink/rt2x00/rt61pci.c b/drivers/net/wireless/ralink/rt2x00/rt61pci.c
index 973d418..2343102 100644
--- a/drivers/net/wireless/ralink/rt2x00/rt61pci.c
+++ b/drivers/net/wireless/ralink/rt2x00/rt61pci.c
@@ -86,10 +86,11 @@
mutex_unlock(&rt2x00dev->csr_mutex);
}
-static void rt61pci_bbp_read(struct rt2x00_dev *rt2x00dev,
- const unsigned int word, u8 *value)
+static u8 rt61pci_bbp_read(struct rt2x00_dev *rt2x00dev,
+ const unsigned int word)
{
u32 reg;
+ u8 value;
mutex_lock(&rt2x00dev->csr_mutex);
@@ -112,9 +113,11 @@
WAIT_FOR_BBP(rt2x00dev, ®);
}
- *value = rt2x00_get_field32(reg, PHY_CSR3_VALUE);
+ value = rt2x00_get_field32(reg, PHY_CSR3_VALUE);
mutex_unlock(&rt2x00dev->csr_mutex);
+
+ return value;
}
static void rt61pci_rf_write(struct rt2x00_dev *rt2x00dev,
@@ -161,7 +164,7 @@
rt2x00_set_field32(®, H2M_MAILBOX_CSR_ARG1, arg1);
rt2x00mmio_register_write(rt2x00dev, H2M_MAILBOX_CSR, reg);
- rt2x00mmio_register_read(rt2x00dev, HOST_CMD_CSR, ®);
+ reg = rt2x00mmio_register_read(rt2x00dev, HOST_CMD_CSR);
rt2x00_set_field32(®, HOST_CMD_CSR_HOST_COMMAND, command);
rt2x00_set_field32(®, HOST_CMD_CSR_INTERRUPT_MCU, 1);
rt2x00mmio_register_write(rt2x00dev, HOST_CMD_CSR, reg);
@@ -176,7 +179,7 @@
struct rt2x00_dev *rt2x00dev = eeprom->data;
u32 reg;
- rt2x00mmio_register_read(rt2x00dev, E2PROM_CSR, ®);
+ reg = rt2x00mmio_register_read(rt2x00dev, E2PROM_CSR);
eeprom->reg_data_in = !!rt2x00_get_field32(reg, E2PROM_CSR_DATA_IN);
eeprom->reg_data_out = !!rt2x00_get_field32(reg, E2PROM_CSR_DATA_OUT);
@@ -240,7 +243,7 @@
{
u32 reg;
- rt2x00mmio_register_read(rt2x00dev, MAC_CSR13, ®);
+ reg = rt2x00mmio_register_read(rt2x00dev, MAC_CSR13);
return rt2x00_get_field32(reg, MAC_CSR13_VAL5);
}
@@ -291,7 +294,7 @@
container_of(led_cdev, struct rt2x00_led, led_dev);
u32 reg;
- rt2x00mmio_register_read(led->rt2x00dev, MAC_CSR14, ®);
+ reg = rt2x00mmio_register_read(led->rt2x00dev, MAC_CSR14);
rt2x00_set_field32(®, MAC_CSR14_ON_PERIOD, *delay_on);
rt2x00_set_field32(®, MAC_CSR14_OFF_PERIOD, *delay_off);
rt2x00mmio_register_write(led->rt2x00dev, MAC_CSR14, reg);
@@ -336,7 +339,7 @@
*/
mask = (0xf << crypto->bssidx);
- rt2x00mmio_register_read(rt2x00dev, SEC_CSR0, ®);
+ reg = rt2x00mmio_register_read(rt2x00dev, SEC_CSR0);
reg &= mask;
if (reg && reg == mask)
@@ -369,14 +372,14 @@
field.bit_offset = (3 * key->hw_key_idx);
field.bit_mask = 0x7 << field.bit_offset;
- rt2x00mmio_register_read(rt2x00dev, SEC_CSR1, ®);
+ reg = rt2x00mmio_register_read(rt2x00dev, SEC_CSR1);
rt2x00_set_field32(®, field, crypto->cipher);
rt2x00mmio_register_write(rt2x00dev, SEC_CSR1, reg);
} else {
field.bit_offset = (3 * (key->hw_key_idx - 8));
field.bit_mask = 0x7 << field.bit_offset;
- rt2x00mmio_register_read(rt2x00dev, SEC_CSR5, ®);
+ reg = rt2x00mmio_register_read(rt2x00dev, SEC_CSR5);
rt2x00_set_field32(®, field, crypto->cipher);
rt2x00mmio_register_write(rt2x00dev, SEC_CSR5, reg);
}
@@ -401,7 +404,7 @@
*/
mask = 1 << key->hw_key_idx;
- rt2x00mmio_register_read(rt2x00dev, SEC_CSR0, ®);
+ reg = rt2x00mmio_register_read(rt2x00dev, SEC_CSR0);
if (crypto->cmd == SET_KEY)
reg |= mask;
else if (crypto->cmd == DISABLE_KEY)
@@ -430,10 +433,10 @@
* When both registers are full, we drop the key.
* Otherwise, we use the first invalid entry.
*/
- rt2x00mmio_register_read(rt2x00dev, SEC_CSR2, ®);
+ reg = rt2x00mmio_register_read(rt2x00dev, SEC_CSR2);
if (reg && reg == ~0) {
key->hw_key_idx = 32;
- rt2x00mmio_register_read(rt2x00dev, SEC_CSR3, ®);
+ reg = rt2x00mmio_register_read(rt2x00dev, SEC_CSR3);
if (reg && reg == ~0)
return -ENOSPC;
}
@@ -467,7 +470,7 @@
* Without this, received frames will not be decrypted
* by the hardware.
*/
- rt2x00mmio_register_read(rt2x00dev, SEC_CSR4, ®);
+ reg = rt2x00mmio_register_read(rt2x00dev, SEC_CSR4);
reg |= (1 << crypto->bssidx);
rt2x00mmio_register_write(rt2x00dev, SEC_CSR4, reg);
@@ -492,7 +495,7 @@
if (key->hw_key_idx < 32) {
mask = 1 << key->hw_key_idx;
- rt2x00mmio_register_read(rt2x00dev, SEC_CSR2, ®);
+ reg = rt2x00mmio_register_read(rt2x00dev, SEC_CSR2);
if (crypto->cmd == SET_KEY)
reg |= mask;
else if (crypto->cmd == DISABLE_KEY)
@@ -501,7 +504,7 @@
} else {
mask = 1 << (key->hw_key_idx - 32);
- rt2x00mmio_register_read(rt2x00dev, SEC_CSR3, ®);
+ reg = rt2x00mmio_register_read(rt2x00dev, SEC_CSR3);
if (crypto->cmd == SET_KEY)
reg |= mask;
else if (crypto->cmd == DISABLE_KEY)
@@ -523,7 +526,7 @@
* and broadcast frames will always be accepted since
* there is no filter for it at this time.
*/
- rt2x00mmio_register_read(rt2x00dev, TXRX_CSR0, ®);
+ reg = rt2x00mmio_register_read(rt2x00dev, TXRX_CSR0);
rt2x00_set_field32(®, TXRX_CSR0_DROP_CRC,
!(filter_flags & FIF_FCSFAIL));
rt2x00_set_field32(®, TXRX_CSR0_DROP_PHYSICAL,
@@ -555,7 +558,7 @@
/*
* Enable synchronisation.
*/
- rt2x00mmio_register_read(rt2x00dev, TXRX_CSR9, ®);
+ reg = rt2x00mmio_register_read(rt2x00dev, TXRX_CSR9);
rt2x00_set_field32(®, TXRX_CSR9_TSF_SYNC, conf->sync);
rt2x00mmio_register_write(rt2x00dev, TXRX_CSR9, reg);
}
@@ -586,13 +589,13 @@
{
u32 reg;
- rt2x00mmio_register_read(rt2x00dev, TXRX_CSR0, ®);
+ reg = rt2x00mmio_register_read(rt2x00dev, TXRX_CSR0);
rt2x00_set_field32(®, TXRX_CSR0_RX_ACK_TIMEOUT, 0x32);
rt2x00_set_field32(®, TXRX_CSR0_TSF_OFFSET, IEEE80211_HEADER);
rt2x00mmio_register_write(rt2x00dev, TXRX_CSR0, reg);
if (changed & BSS_CHANGED_ERP_PREAMBLE) {
- rt2x00mmio_register_read(rt2x00dev, TXRX_CSR4, ®);
+ reg = rt2x00mmio_register_read(rt2x00dev, TXRX_CSR4);
rt2x00_set_field32(®, TXRX_CSR4_AUTORESPOND_ENABLE, 1);
rt2x00_set_field32(®, TXRX_CSR4_AUTORESPOND_PREAMBLE,
!!erp->short_preamble);
@@ -604,18 +607,18 @@
erp->basic_rates);
if (changed & BSS_CHANGED_BEACON_INT) {
- rt2x00mmio_register_read(rt2x00dev, TXRX_CSR9, ®);
+ reg = rt2x00mmio_register_read(rt2x00dev, TXRX_CSR9);
rt2x00_set_field32(®, TXRX_CSR9_BEACON_INTERVAL,
erp->beacon_int * 16);
rt2x00mmio_register_write(rt2x00dev, TXRX_CSR9, reg);
}
if (changed & BSS_CHANGED_ERP_SLOT) {
- rt2x00mmio_register_read(rt2x00dev, MAC_CSR9, ®);
+ reg = rt2x00mmio_register_read(rt2x00dev, MAC_CSR9);
rt2x00_set_field32(®, MAC_CSR9_SLOT_TIME, erp->slot_time);
rt2x00mmio_register_write(rt2x00dev, MAC_CSR9, reg);
- rt2x00mmio_register_read(rt2x00dev, MAC_CSR8, ®);
+ reg = rt2x00mmio_register_read(rt2x00dev, MAC_CSR8);
rt2x00_set_field32(®, MAC_CSR8_SIFS, erp->sifs);
rt2x00_set_field32(®, MAC_CSR8_SIFS_AFTER_RX_OFDM, 3);
rt2x00_set_field32(®, MAC_CSR8_EIFS, erp->eifs);
@@ -630,9 +633,9 @@
u8 r4;
u8 r77;
- rt61pci_bbp_read(rt2x00dev, 3, &r3);
- rt61pci_bbp_read(rt2x00dev, 4, &r4);
- rt61pci_bbp_read(rt2x00dev, 77, &r77);
+ r3 = rt61pci_bbp_read(rt2x00dev, 3);
+ r4 = rt61pci_bbp_read(rt2x00dev, 4);
+ r77 = rt61pci_bbp_read(rt2x00dev, 77);
rt2x00_set_field8(&r3, BBP_R3_SMART_MODE, rt2x00_rf(rt2x00dev, RF5325));
@@ -676,9 +679,9 @@
u8 r4;
u8 r77;
- rt61pci_bbp_read(rt2x00dev, 3, &r3);
- rt61pci_bbp_read(rt2x00dev, 4, &r4);
- rt61pci_bbp_read(rt2x00dev, 77, &r77);
+ r3 = rt61pci_bbp_read(rt2x00dev, 3);
+ r4 = rt61pci_bbp_read(rt2x00dev, 4);
+ r77 = rt61pci_bbp_read(rt2x00dev, 77);
rt2x00_set_field8(&r3, BBP_R3_SMART_MODE, rt2x00_rf(rt2x00dev, RF2529));
rt2x00_set_field8(&r4, BBP_R4_RX_FRAME_END,
@@ -712,7 +715,7 @@
{
u32 reg;
- rt2x00mmio_register_read(rt2x00dev, MAC_CSR13, ®);
+ reg = rt2x00mmio_register_read(rt2x00dev, MAC_CSR13);
rt2x00_set_field32(®, MAC_CSR13_DIR4, 0);
rt2x00_set_field32(®, MAC_CSR13_VAL4, p1);
@@ -730,9 +733,9 @@
u8 r4;
u8 r77;
- rt61pci_bbp_read(rt2x00dev, 3, &r3);
- rt61pci_bbp_read(rt2x00dev, 4, &r4);
- rt61pci_bbp_read(rt2x00dev, 77, &r77);
+ r3 = rt61pci_bbp_read(rt2x00dev, 3);
+ r4 = rt61pci_bbp_read(rt2x00dev, 4);
+ r77 = rt61pci_bbp_read(rt2x00dev, 77);
/*
* Configure the RX antenna.
@@ -819,7 +822,7 @@
for (i = 0; i < ARRAY_SIZE(antenna_sel_a); i++)
rt61pci_bbp_write(rt2x00dev, sel[i].word, sel[i].value[lna]);
- rt2x00mmio_register_read(rt2x00dev, PHY_CSR0, ®);
+ reg = rt2x00mmio_register_read(rt2x00dev, PHY_CSR0);
rt2x00_set_field32(®, PHY_CSR0_PA_PE_BG,
rt2x00dev->curr_band == NL80211_BAND_2GHZ);
@@ -850,13 +853,13 @@
if (rt2x00_has_cap_external_lna_bg(rt2x00dev))
lna_gain += 14;
- rt2x00_eeprom_read(rt2x00dev, EEPROM_RSSI_OFFSET_BG, &eeprom);
+ eeprom = rt2x00_eeprom_read(rt2x00dev, EEPROM_RSSI_OFFSET_BG);
lna_gain -= rt2x00_get_field16(eeprom, EEPROM_RSSI_OFFSET_BG_1);
} else {
if (rt2x00_has_cap_external_lna_a(rt2x00dev))
lna_gain += 14;
- rt2x00_eeprom_read(rt2x00dev, EEPROM_RSSI_OFFSET_A, &eeprom);
+ eeprom = rt2x00_eeprom_read(rt2x00dev, EEPROM_RSSI_OFFSET_A);
lna_gain -= rt2x00_get_field16(eeprom, EEPROM_RSSI_OFFSET_A_1);
}
@@ -875,7 +878,7 @@
smart = !(rt2x00_rf(rt2x00dev, RF5225) || rt2x00_rf(rt2x00dev, RF2527));
- rt61pci_bbp_read(rt2x00dev, 3, &r3);
+ r3 = rt61pci_bbp_read(rt2x00dev, 3);
rt2x00_set_field8(&r3, BBP_R3_SMART_MODE, smart);
rt61pci_bbp_write(rt2x00dev, 3, r3);
@@ -913,10 +916,10 @@
{
struct rf_channel rf;
- rt2x00_rf_read(rt2x00dev, 1, &rf.rf1);
- rt2x00_rf_read(rt2x00dev, 2, &rf.rf2);
- rt2x00_rf_read(rt2x00dev, 3, &rf.rf3);
- rt2x00_rf_read(rt2x00dev, 4, &rf.rf4);
+ rf.rf1 = rt2x00_rf_read(rt2x00dev, 1);
+ rf.rf2 = rt2x00_rf_read(rt2x00dev, 2);
+ rf.rf3 = rt2x00_rf_read(rt2x00dev, 3);
+ rf.rf4 = rt2x00_rf_read(rt2x00dev, 4);
rt61pci_config_channel(rt2x00dev, &rf, txpower);
}
@@ -926,7 +929,7 @@
{
u32 reg;
- rt2x00mmio_register_read(rt2x00dev, TXRX_CSR4, ®);
+ reg = rt2x00mmio_register_read(rt2x00dev, TXRX_CSR4);
rt2x00_set_field32(®, TXRX_CSR4_OFDM_TX_RATE_DOWN, 1);
rt2x00_set_field32(®, TXRX_CSR4_OFDM_TX_RATE_STEP, 0);
rt2x00_set_field32(®, TXRX_CSR4_OFDM_TX_FALLBACK_CCK, 0);
@@ -946,7 +949,7 @@
u32 reg;
if (state == STATE_SLEEP) {
- rt2x00mmio_register_read(rt2x00dev, MAC_CSR11, ®);
+ reg = rt2x00mmio_register_read(rt2x00dev, MAC_CSR11);
rt2x00_set_field32(®, MAC_CSR11_DELAY_AFTER_TBCN,
rt2x00dev->beacon_int - 10);
rt2x00_set_field32(®, MAC_CSR11_TBCN_BEFORE_WAKEUP,
@@ -967,7 +970,7 @@
rt61pci_mcu_request(rt2x00dev, MCU_SLEEP, 0xff, 0, 0);
} else {
- rt2x00mmio_register_read(rt2x00dev, MAC_CSR11, ®);
+ reg = rt2x00mmio_register_read(rt2x00dev, MAC_CSR11);
rt2x00_set_field32(®, MAC_CSR11_DELAY_AFTER_TBCN, 0);
rt2x00_set_field32(®, MAC_CSR11_TBCN_BEFORE_WAKEUP, 0);
rt2x00_set_field32(®, MAC_CSR11_AUTOWAKE, 0);
@@ -1013,13 +1016,13 @@
/*
* Update FCS error count from register.
*/
- rt2x00mmio_register_read(rt2x00dev, STA_CSR0, ®);
+ reg = rt2x00mmio_register_read(rt2x00dev, STA_CSR0);
qual->rx_failed = rt2x00_get_field32(reg, STA_CSR0_FCS_ERROR);
/*
* Update False CCA count from register.
*/
- rt2x00mmio_register_read(rt2x00dev, STA_CSR1, ®);
+ reg = rt2x00mmio_register_read(rt2x00dev, STA_CSR1);
qual->false_cca = rt2x00_get_field32(reg, STA_CSR1_FALSE_CCA_ERROR);
}
@@ -1138,12 +1141,12 @@
switch (queue->qid) {
case QID_RX:
- rt2x00mmio_register_read(rt2x00dev, TXRX_CSR0, ®);
+ reg = rt2x00mmio_register_read(rt2x00dev, TXRX_CSR0);
rt2x00_set_field32(®, TXRX_CSR0_DISABLE_RX, 0);
rt2x00mmio_register_write(rt2x00dev, TXRX_CSR0, reg);
break;
case QID_BEACON:
- rt2x00mmio_register_read(rt2x00dev, TXRX_CSR9, ®);
+ reg = rt2x00mmio_register_read(rt2x00dev, TXRX_CSR9);
rt2x00_set_field32(®, TXRX_CSR9_TSF_TICKING, 1);
rt2x00_set_field32(®, TXRX_CSR9_TBTT_ENABLE, 1);
rt2x00_set_field32(®, TXRX_CSR9_BEACON_GEN, 1);
@@ -1161,22 +1164,22 @@
switch (queue->qid) {
case QID_AC_VO:
- rt2x00mmio_register_read(rt2x00dev, TX_CNTL_CSR, ®);
+ reg = rt2x00mmio_register_read(rt2x00dev, TX_CNTL_CSR);
rt2x00_set_field32(®, TX_CNTL_CSR_KICK_TX_AC0, 1);
rt2x00mmio_register_write(rt2x00dev, TX_CNTL_CSR, reg);
break;
case QID_AC_VI:
- rt2x00mmio_register_read(rt2x00dev, TX_CNTL_CSR, ®);
+ reg = rt2x00mmio_register_read(rt2x00dev, TX_CNTL_CSR);
rt2x00_set_field32(®, TX_CNTL_CSR_KICK_TX_AC1, 1);
rt2x00mmio_register_write(rt2x00dev, TX_CNTL_CSR, reg);
break;
case QID_AC_BE:
- rt2x00mmio_register_read(rt2x00dev, TX_CNTL_CSR, ®);
+ reg = rt2x00mmio_register_read(rt2x00dev, TX_CNTL_CSR);
rt2x00_set_field32(®, TX_CNTL_CSR_KICK_TX_AC2, 1);
rt2x00mmio_register_write(rt2x00dev, TX_CNTL_CSR, reg);
break;
case QID_AC_BK:
- rt2x00mmio_register_read(rt2x00dev, TX_CNTL_CSR, ®);
+ reg = rt2x00mmio_register_read(rt2x00dev, TX_CNTL_CSR);
rt2x00_set_field32(®, TX_CNTL_CSR_KICK_TX_AC3, 1);
rt2x00mmio_register_write(rt2x00dev, TX_CNTL_CSR, reg);
break;
@@ -1192,32 +1195,32 @@
switch (queue->qid) {
case QID_AC_VO:
- rt2x00mmio_register_read(rt2x00dev, TX_CNTL_CSR, ®);
+ reg = rt2x00mmio_register_read(rt2x00dev, TX_CNTL_CSR);
rt2x00_set_field32(®, TX_CNTL_CSR_ABORT_TX_AC0, 1);
rt2x00mmio_register_write(rt2x00dev, TX_CNTL_CSR, reg);
break;
case QID_AC_VI:
- rt2x00mmio_register_read(rt2x00dev, TX_CNTL_CSR, ®);
+ reg = rt2x00mmio_register_read(rt2x00dev, TX_CNTL_CSR);
rt2x00_set_field32(®, TX_CNTL_CSR_ABORT_TX_AC1, 1);
rt2x00mmio_register_write(rt2x00dev, TX_CNTL_CSR, reg);
break;
case QID_AC_BE:
- rt2x00mmio_register_read(rt2x00dev, TX_CNTL_CSR, ®);
+ reg = rt2x00mmio_register_read(rt2x00dev, TX_CNTL_CSR);
rt2x00_set_field32(®, TX_CNTL_CSR_ABORT_TX_AC2, 1);
rt2x00mmio_register_write(rt2x00dev, TX_CNTL_CSR, reg);
break;
case QID_AC_BK:
- rt2x00mmio_register_read(rt2x00dev, TX_CNTL_CSR, ®);
+ reg = rt2x00mmio_register_read(rt2x00dev, TX_CNTL_CSR);
rt2x00_set_field32(®, TX_CNTL_CSR_ABORT_TX_AC3, 1);
rt2x00mmio_register_write(rt2x00dev, TX_CNTL_CSR, reg);
break;
case QID_RX:
- rt2x00mmio_register_read(rt2x00dev, TXRX_CSR0, ®);
+ reg = rt2x00mmio_register_read(rt2x00dev, TXRX_CSR0);
rt2x00_set_field32(®, TXRX_CSR0_DISABLE_RX, 1);
rt2x00mmio_register_write(rt2x00dev, TXRX_CSR0, reg);
break;
case QID_BEACON:
- rt2x00mmio_register_read(rt2x00dev, TXRX_CSR9, ®);
+ reg = rt2x00mmio_register_read(rt2x00dev, TXRX_CSR9);
rt2x00_set_field32(®, TXRX_CSR9_TSF_TICKING, 0);
rt2x00_set_field32(®, TXRX_CSR9_TBTT_ENABLE, 0);
rt2x00_set_field32(®, TXRX_CSR9_BEACON_GEN, 0);
@@ -1299,7 +1302,7 @@
* Wait for stable hardware.
*/
for (i = 0; i < 100; i++) {
- rt2x00mmio_register_read(rt2x00dev, MAC_CSR0, ®);
+ reg = rt2x00mmio_register_read(rt2x00dev, MAC_CSR0);
if (reg)
break;
msleep(1);
@@ -1338,7 +1341,7 @@
rt2x00mmio_register_write(rt2x00dev, MCU_CNTL_CSR, reg);
for (i = 0; i < 100; i++) {
- rt2x00mmio_register_read(rt2x00dev, MCU_CNTL_CSR, ®);
+ reg = rt2x00mmio_register_read(rt2x00dev, MCU_CNTL_CSR);
if (rt2x00_get_field32(reg, MCU_CNTL_CSR_READY))
break;
msleep(1);
@@ -1362,12 +1365,12 @@
rt2x00_set_field32(®, MAC_CSR1_BBP_RESET, 1);
rt2x00mmio_register_write(rt2x00dev, MAC_CSR1, reg);
- rt2x00mmio_register_read(rt2x00dev, MAC_CSR1, ®);
+ reg = rt2x00mmio_register_read(rt2x00dev, MAC_CSR1);
rt2x00_set_field32(®, MAC_CSR1_SOFT_RESET, 0);
rt2x00_set_field32(®, MAC_CSR1_BBP_RESET, 0);
rt2x00mmio_register_write(rt2x00dev, MAC_CSR1, reg);
- rt2x00mmio_register_read(rt2x00dev, MAC_CSR1, ®);
+ reg = rt2x00mmio_register_read(rt2x00dev, MAC_CSR1);
rt2x00_set_field32(®, MAC_CSR1_HOST_READY, 1);
rt2x00mmio_register_write(rt2x00dev, MAC_CSR1, reg);
@@ -1383,11 +1386,11 @@
u32 word;
if (entry->queue->qid == QID_RX) {
- rt2x00_desc_read(entry_priv->desc, 0, &word);
+ word = rt2x00_desc_read(entry_priv->desc, 0);
return rt2x00_get_field32(word, RXD_W0_OWNER_NIC);
} else {
- rt2x00_desc_read(entry_priv->desc, 0, &word);
+ word = rt2x00_desc_read(entry_priv->desc, 0);
return (rt2x00_get_field32(word, TXD_W0_OWNER_NIC) ||
rt2x00_get_field32(word, TXD_W0_VALID));
@@ -1401,16 +1404,16 @@
u32 word;
if (entry->queue->qid == QID_RX) {
- rt2x00_desc_read(entry_priv->desc, 5, &word);
+ word = rt2x00_desc_read(entry_priv->desc, 5);
rt2x00_set_field32(&word, RXD_W5_BUFFER_PHYSICAL_ADDRESS,
skbdesc->skb_dma);
rt2x00_desc_write(entry_priv->desc, 5, word);
- rt2x00_desc_read(entry_priv->desc, 0, &word);
+ word = rt2x00_desc_read(entry_priv->desc, 0);
rt2x00_set_field32(&word, RXD_W0_OWNER_NIC, 1);
rt2x00_desc_write(entry_priv->desc, 0, word);
} else {
- rt2x00_desc_read(entry_priv->desc, 0, &word);
+ word = rt2x00_desc_read(entry_priv->desc, 0);
rt2x00_set_field32(&word, TXD_W0_VALID, 0);
rt2x00_set_field32(&word, TXD_W0_OWNER_NIC, 0);
rt2x00_desc_write(entry_priv->desc, 0, word);
@@ -1425,7 +1428,7 @@
/*
* Initialize registers.
*/
- rt2x00mmio_register_read(rt2x00dev, TX_RING_CSR0, ®);
+ reg = rt2x00mmio_register_read(rt2x00dev, TX_RING_CSR0);
rt2x00_set_field32(®, TX_RING_CSR0_AC0_RING_SIZE,
rt2x00dev->tx[0].limit);
rt2x00_set_field32(®, TX_RING_CSR0_AC1_RING_SIZE,
@@ -1436,36 +1439,36 @@
rt2x00dev->tx[3].limit);
rt2x00mmio_register_write(rt2x00dev, TX_RING_CSR0, reg);
- rt2x00mmio_register_read(rt2x00dev, TX_RING_CSR1, ®);
+ reg = rt2x00mmio_register_read(rt2x00dev, TX_RING_CSR1);
rt2x00_set_field32(®, TX_RING_CSR1_TXD_SIZE,
rt2x00dev->tx[0].desc_size / 4);
rt2x00mmio_register_write(rt2x00dev, TX_RING_CSR1, reg);
entry_priv = rt2x00dev->tx[0].entries[0].priv_data;
- rt2x00mmio_register_read(rt2x00dev, AC0_BASE_CSR, ®);
+ reg = rt2x00mmio_register_read(rt2x00dev, AC0_BASE_CSR);
rt2x00_set_field32(®, AC0_BASE_CSR_RING_REGISTER,
entry_priv->desc_dma);
rt2x00mmio_register_write(rt2x00dev, AC0_BASE_CSR, reg);
entry_priv = rt2x00dev->tx[1].entries[0].priv_data;
- rt2x00mmio_register_read(rt2x00dev, AC1_BASE_CSR, ®);
+ reg = rt2x00mmio_register_read(rt2x00dev, AC1_BASE_CSR);
rt2x00_set_field32(®, AC1_BASE_CSR_RING_REGISTER,
entry_priv->desc_dma);
rt2x00mmio_register_write(rt2x00dev, AC1_BASE_CSR, reg);
entry_priv = rt2x00dev->tx[2].entries[0].priv_data;
- rt2x00mmio_register_read(rt2x00dev, AC2_BASE_CSR, ®);
+ reg = rt2x00mmio_register_read(rt2x00dev, AC2_BASE_CSR);
rt2x00_set_field32(®, AC2_BASE_CSR_RING_REGISTER,
entry_priv->desc_dma);
rt2x00mmio_register_write(rt2x00dev, AC2_BASE_CSR, reg);
entry_priv = rt2x00dev->tx[3].entries[0].priv_data;
- rt2x00mmio_register_read(rt2x00dev, AC3_BASE_CSR, ®);
+ reg = rt2x00mmio_register_read(rt2x00dev, AC3_BASE_CSR);
rt2x00_set_field32(®, AC3_BASE_CSR_RING_REGISTER,
entry_priv->desc_dma);
rt2x00mmio_register_write(rt2x00dev, AC3_BASE_CSR, reg);
- rt2x00mmio_register_read(rt2x00dev, RX_RING_CSR, ®);
+ reg = rt2x00mmio_register_read(rt2x00dev, RX_RING_CSR);
rt2x00_set_field32(®, RX_RING_CSR_RING_SIZE, rt2x00dev->rx->limit);
rt2x00_set_field32(®, RX_RING_CSR_RXD_SIZE,
rt2x00dev->rx->desc_size / 4);
@@ -1473,26 +1476,26 @@
rt2x00mmio_register_write(rt2x00dev, RX_RING_CSR, reg);
entry_priv = rt2x00dev->rx->entries[0].priv_data;
- rt2x00mmio_register_read(rt2x00dev, RX_BASE_CSR, ®);
+ reg = rt2x00mmio_register_read(rt2x00dev, RX_BASE_CSR);
rt2x00_set_field32(®, RX_BASE_CSR_RING_REGISTER,
entry_priv->desc_dma);
rt2x00mmio_register_write(rt2x00dev, RX_BASE_CSR, reg);
- rt2x00mmio_register_read(rt2x00dev, TX_DMA_DST_CSR, ®);
+ reg = rt2x00mmio_register_read(rt2x00dev, TX_DMA_DST_CSR);
rt2x00_set_field32(®, TX_DMA_DST_CSR_DEST_AC0, 2);
rt2x00_set_field32(®, TX_DMA_DST_CSR_DEST_AC1, 2);
rt2x00_set_field32(®, TX_DMA_DST_CSR_DEST_AC2, 2);
rt2x00_set_field32(®, TX_DMA_DST_CSR_DEST_AC3, 2);
rt2x00mmio_register_write(rt2x00dev, TX_DMA_DST_CSR, reg);
- rt2x00mmio_register_read(rt2x00dev, LOAD_TX_RING_CSR, ®);
+ reg = rt2x00mmio_register_read(rt2x00dev, LOAD_TX_RING_CSR);
rt2x00_set_field32(®, LOAD_TX_RING_CSR_LOAD_TXD_AC0, 1);
rt2x00_set_field32(®, LOAD_TX_RING_CSR_LOAD_TXD_AC1, 1);
rt2x00_set_field32(®, LOAD_TX_RING_CSR_LOAD_TXD_AC2, 1);
rt2x00_set_field32(®, LOAD_TX_RING_CSR_LOAD_TXD_AC3, 1);
rt2x00mmio_register_write(rt2x00dev, LOAD_TX_RING_CSR, reg);
- rt2x00mmio_register_read(rt2x00dev, RX_CNTL_CSR, ®);
+ reg = rt2x00mmio_register_read(rt2x00dev, RX_CNTL_CSR);
rt2x00_set_field32(®, RX_CNTL_CSR_LOAD_RXD, 1);
rt2x00mmio_register_write(rt2x00dev, RX_CNTL_CSR, reg);
@@ -1503,13 +1506,13 @@
{
u32 reg;
- rt2x00mmio_register_read(rt2x00dev, TXRX_CSR0, ®);
+ reg = rt2x00mmio_register_read(rt2x00dev, TXRX_CSR0);
rt2x00_set_field32(®, TXRX_CSR0_AUTO_TX_SEQ, 1);
rt2x00_set_field32(®, TXRX_CSR0_DISABLE_RX, 0);
rt2x00_set_field32(®, TXRX_CSR0_TX_WITHOUT_WAITING, 0);
rt2x00mmio_register_write(rt2x00dev, TXRX_CSR0, reg);
- rt2x00mmio_register_read(rt2x00dev, TXRX_CSR1, ®);
+ reg = rt2x00mmio_register_read(rt2x00dev, TXRX_CSR1);
rt2x00_set_field32(®, TXRX_CSR1_BBP_ID0, 47); /* CCK Signal */
rt2x00_set_field32(®, TXRX_CSR1_BBP_ID0_VALID, 1);
rt2x00_set_field32(®, TXRX_CSR1_BBP_ID1, 30); /* Rssi */
@@ -1523,7 +1526,7 @@
/*
* CCK TXD BBP registers
*/
- rt2x00mmio_register_read(rt2x00dev, TXRX_CSR2, ®);
+ reg = rt2x00mmio_register_read(rt2x00dev, TXRX_CSR2);
rt2x00_set_field32(®, TXRX_CSR2_BBP_ID0, 13);
rt2x00_set_field32(®, TXRX_CSR2_BBP_ID0_VALID, 1);
rt2x00_set_field32(®, TXRX_CSR2_BBP_ID1, 12);
@@ -1537,7 +1540,7 @@
/*
* OFDM TXD BBP registers
*/
- rt2x00mmio_register_read(rt2x00dev, TXRX_CSR3, ®);
+ reg = rt2x00mmio_register_read(rt2x00dev, TXRX_CSR3);
rt2x00_set_field32(®, TXRX_CSR3_BBP_ID0, 7);
rt2x00_set_field32(®, TXRX_CSR3_BBP_ID0_VALID, 1);
rt2x00_set_field32(®, TXRX_CSR3_BBP_ID1, 6);
@@ -1546,21 +1549,21 @@
rt2x00_set_field32(®, TXRX_CSR3_BBP_ID2_VALID, 1);
rt2x00mmio_register_write(rt2x00dev, TXRX_CSR3, reg);
- rt2x00mmio_register_read(rt2x00dev, TXRX_CSR7, ®);
+ reg = rt2x00mmio_register_read(rt2x00dev, TXRX_CSR7);
rt2x00_set_field32(®, TXRX_CSR7_ACK_CTS_6MBS, 59);
rt2x00_set_field32(®, TXRX_CSR7_ACK_CTS_9MBS, 53);
rt2x00_set_field32(®, TXRX_CSR7_ACK_CTS_12MBS, 49);
rt2x00_set_field32(®, TXRX_CSR7_ACK_CTS_18MBS, 46);
rt2x00mmio_register_write(rt2x00dev, TXRX_CSR7, reg);
- rt2x00mmio_register_read(rt2x00dev, TXRX_CSR8, ®);
+ reg = rt2x00mmio_register_read(rt2x00dev, TXRX_CSR8);
rt2x00_set_field32(®, TXRX_CSR8_ACK_CTS_24MBS, 44);
rt2x00_set_field32(®, TXRX_CSR8_ACK_CTS_36MBS, 42);
rt2x00_set_field32(®, TXRX_CSR8_ACK_CTS_48MBS, 42);
rt2x00_set_field32(®, TXRX_CSR8_ACK_CTS_54MBS, 42);
rt2x00mmio_register_write(rt2x00dev, TXRX_CSR8, reg);
- rt2x00mmio_register_read(rt2x00dev, TXRX_CSR9, ®);
+ reg = rt2x00mmio_register_read(rt2x00dev, TXRX_CSR9);
rt2x00_set_field32(®, TXRX_CSR9_BEACON_INTERVAL, 0);
rt2x00_set_field32(®, TXRX_CSR9_TSF_TICKING, 0);
rt2x00_set_field32(®, TXRX_CSR9_TSF_SYNC, 0);
@@ -1573,7 +1576,7 @@
rt2x00mmio_register_write(rt2x00dev, MAC_CSR6, 0x00000fff);
- rt2x00mmio_register_read(rt2x00dev, MAC_CSR9, ®);
+ reg = rt2x00mmio_register_read(rt2x00dev, MAC_CSR9);
rt2x00_set_field32(®, MAC_CSR9_CW_SELECT, 0);
rt2x00mmio_register_write(rt2x00dev, MAC_CSR9, reg);
@@ -1619,24 +1622,24 @@
* These registers are cleared on read,
* so we may pass a useless variable to store the value.
*/
- rt2x00mmio_register_read(rt2x00dev, STA_CSR0, ®);
- rt2x00mmio_register_read(rt2x00dev, STA_CSR1, ®);
- rt2x00mmio_register_read(rt2x00dev, STA_CSR2, ®);
+ reg = rt2x00mmio_register_read(rt2x00dev, STA_CSR0);
+ reg = rt2x00mmio_register_read(rt2x00dev, STA_CSR1);
+ reg = rt2x00mmio_register_read(rt2x00dev, STA_CSR2);
/*
* Reset MAC and BBP registers.
*/
- rt2x00mmio_register_read(rt2x00dev, MAC_CSR1, ®);
+ reg = rt2x00mmio_register_read(rt2x00dev, MAC_CSR1);
rt2x00_set_field32(®, MAC_CSR1_SOFT_RESET, 1);
rt2x00_set_field32(®, MAC_CSR1_BBP_RESET, 1);
rt2x00mmio_register_write(rt2x00dev, MAC_CSR1, reg);
- rt2x00mmio_register_read(rt2x00dev, MAC_CSR1, ®);
+ reg = rt2x00mmio_register_read(rt2x00dev, MAC_CSR1);
rt2x00_set_field32(®, MAC_CSR1_SOFT_RESET, 0);
rt2x00_set_field32(®, MAC_CSR1_BBP_RESET, 0);
rt2x00mmio_register_write(rt2x00dev, MAC_CSR1, reg);
- rt2x00mmio_register_read(rt2x00dev, MAC_CSR1, ®);
+ reg = rt2x00mmio_register_read(rt2x00dev, MAC_CSR1);
rt2x00_set_field32(®, MAC_CSR1_HOST_READY, 1);
rt2x00mmio_register_write(rt2x00dev, MAC_CSR1, reg);
@@ -1649,7 +1652,7 @@
u8 value;
for (i = 0; i < REGISTER_BUSY_COUNT; i++) {
- rt61pci_bbp_read(rt2x00dev, 0, &value);
+ value = rt61pci_bbp_read(rt2x00dev, 0);
if ((value != 0xff) && (value != 0x00))
return 0;
udelay(REGISTER_BUSY_DELAY);
@@ -1695,7 +1698,7 @@
rt61pci_bbp_write(rt2x00dev, 107, 0x04);
for (i = 0; i < EEPROM_BBP_SIZE; i++) {
- rt2x00_eeprom_read(rt2x00dev, EEPROM_BBP_START + i, &eeprom);
+ eeprom = rt2x00_eeprom_read(rt2x00dev, EEPROM_BBP_START + i);
if (eeprom != 0xffff && eeprom != 0x0000) {
reg_id = rt2x00_get_field16(eeprom, EEPROM_BBP_REG_ID);
@@ -1722,10 +1725,10 @@
* should clear the register to assure a clean state.
*/
if (state == STATE_RADIO_IRQ_ON) {
- rt2x00mmio_register_read(rt2x00dev, INT_SOURCE_CSR, ®);
+ reg = rt2x00mmio_register_read(rt2x00dev, INT_SOURCE_CSR);
rt2x00mmio_register_write(rt2x00dev, INT_SOURCE_CSR, reg);
- rt2x00mmio_register_read(rt2x00dev, MCU_INT_SOURCE_CSR, ®);
+ reg = rt2x00mmio_register_read(rt2x00dev, MCU_INT_SOURCE_CSR);
rt2x00mmio_register_write(rt2x00dev, MCU_INT_SOURCE_CSR, reg);
}
@@ -1735,7 +1738,7 @@
*/
spin_lock_irqsave(&rt2x00dev->irqmask_lock, flags);
- rt2x00mmio_register_read(rt2x00dev, INT_MASK_CSR, ®);
+ reg = rt2x00mmio_register_read(rt2x00dev, INT_MASK_CSR);
rt2x00_set_field32(®, INT_MASK_CSR_TXDONE, mask);
rt2x00_set_field32(®, INT_MASK_CSR_RXDONE, mask);
rt2x00_set_field32(®, INT_MASK_CSR_BEACON_DONE, mask);
@@ -1743,7 +1746,7 @@
rt2x00_set_field32(®, INT_MASK_CSR_MITIGATION_PERIOD, 0xff);
rt2x00mmio_register_write(rt2x00dev, INT_MASK_CSR, reg);
- rt2x00mmio_register_read(rt2x00dev, MCU_INT_MASK_CSR, ®);
+ reg = rt2x00mmio_register_read(rt2x00dev, MCU_INT_MASK_CSR);
rt2x00_set_field32(®, MCU_INT_MASK_CSR_0, mask);
rt2x00_set_field32(®, MCU_INT_MASK_CSR_1, mask);
rt2x00_set_field32(®, MCU_INT_MASK_CSR_2, mask);
@@ -1783,7 +1786,7 @@
/*
* Enable RX.
*/
- rt2x00mmio_register_read(rt2x00dev, RX_CNTL_CSR, ®);
+ reg = rt2x00mmio_register_read(rt2x00dev, RX_CNTL_CSR);
rt2x00_set_field32(®, RX_CNTL_CSR_ENABLE_RX_DMA, 1);
rt2x00mmio_register_write(rt2x00dev, RX_CNTL_CSR, reg);
@@ -1806,7 +1809,7 @@
put_to_sleep = (state != STATE_AWAKE);
- rt2x00mmio_register_read(rt2x00dev, MAC_CSR12, ®);
+ reg = rt2x00mmio_register_read(rt2x00dev, MAC_CSR12);
rt2x00_set_field32(®, MAC_CSR12_FORCE_WAKEUP, !put_to_sleep);
rt2x00_set_field32(®, MAC_CSR12_PUT_TO_SLEEP, put_to_sleep);
rt2x00mmio_register_write(rt2x00dev, MAC_CSR12, reg);
@@ -1817,7 +1820,7 @@
* device has entered the correct state.
*/
for (i = 0; i < REGISTER_BUSY_COUNT; i++) {
- rt2x00mmio_register_read(rt2x00dev, MAC_CSR12, ®2);
+ reg2 = rt2x00mmio_register_read(rt2x00dev, MAC_CSR12);
state = rt2x00_get_field32(reg2, MAC_CSR12_BBP_CURRENT_STATE);
if (state == !put_to_sleep)
return 0;
@@ -1876,7 +1879,7 @@
/*
* Start writing the descriptor words.
*/
- rt2x00_desc_read(txd, 1, &word);
+ word = rt2x00_desc_read(txd, 1);
rt2x00_set_field32(&word, TXD_W1_HOST_Q_ID, entry->queue->qid);
rt2x00_set_field32(&word, TXD_W1_AIFSN, entry->queue->aifs);
rt2x00_set_field32(&word, TXD_W1_CWMIN, entry->queue->cw_min);
@@ -1887,7 +1890,7 @@
rt2x00_set_field32(&word, TXD_W1_BUFFER_COUNT, 1);
rt2x00_desc_write(txd, 1, word);
- rt2x00_desc_read(txd, 2, &word);
+ word = rt2x00_desc_read(txd, 2);
rt2x00_set_field32(&word, TXD_W2_PLCP_SIGNAL, txdesc->u.plcp.signal);
rt2x00_set_field32(&word, TXD_W2_PLCP_SERVICE, txdesc->u.plcp.service);
rt2x00_set_field32(&word, TXD_W2_PLCP_LENGTH_LOW,
@@ -1901,7 +1904,7 @@
_rt2x00_desc_write(txd, 4, skbdesc->iv[1]);
}
- rt2x00_desc_read(txd, 5, &word);
+ word = rt2x00_desc_read(txd, 5);
rt2x00_set_field32(&word, TXD_W5_PID_TYPE, entry->queue->qid);
rt2x00_set_field32(&word, TXD_W5_PID_SUBTYPE, entry->entry_idx);
rt2x00_set_field32(&word, TXD_W5_TX_POWER,
@@ -1910,12 +1913,12 @@
rt2x00_desc_write(txd, 5, word);
if (entry->queue->qid != QID_BEACON) {
- rt2x00_desc_read(txd, 6, &word);
+ word = rt2x00_desc_read(txd, 6);
rt2x00_set_field32(&word, TXD_W6_BUFFER_PHYSICAL_ADDRESS,
skbdesc->skb_dma);
rt2x00_desc_write(txd, 6, word);
- rt2x00_desc_read(txd, 11, &word);
+ word = rt2x00_desc_read(txd, 11);
rt2x00_set_field32(&word, TXD_W11_BUFFER_LENGTH0,
txdesc->length);
rt2x00_desc_write(txd, 11, word);
@@ -1926,7 +1929,7 @@
* the device, whereby the device may take hold of the TXD before we
* finished updating it.
*/
- rt2x00_desc_read(txd, 0, &word);
+ word = rt2x00_desc_read(txd, 0);
rt2x00_set_field32(&word, TXD_W0_OWNER_NIC, 1);
rt2x00_set_field32(&word, TXD_W0_VALID, 1);
rt2x00_set_field32(&word, TXD_W0_MORE_FRAG,
@@ -1975,7 +1978,7 @@
* Disable beaconing while we are reloading the beacon data,
* otherwise we might be sending out invalid data.
*/
- rt2x00mmio_register_read(rt2x00dev, TXRX_CSR9, ®);
+ reg = rt2x00mmio_register_read(rt2x00dev, TXRX_CSR9);
orig_reg = reg;
rt2x00_set_field32(®, TXRX_CSR9_BEACON_GEN, 0);
rt2x00mmio_register_write(rt2x00dev, TXRX_CSR9, reg);
@@ -2036,7 +2039,7 @@
* Disable beaconing while we are reloading the beacon data,
* otherwise we might be sending out invalid data.
*/
- rt2x00mmio_register_read(rt2x00dev, TXRX_CSR9, &orig_reg);
+ orig_reg = rt2x00mmio_register_read(rt2x00dev, TXRX_CSR9);
reg = orig_reg;
rt2x00_set_field32(®, TXRX_CSR9_BEACON_GEN, 0);
rt2x00mmio_register_write(rt2x00dev, TXRX_CSR9, reg);
@@ -2092,8 +2095,8 @@
u32 word0;
u32 word1;
- rt2x00_desc_read(entry_priv->desc, 0, &word0);
- rt2x00_desc_read(entry_priv->desc, 1, &word1);
+ word0 = rt2x00_desc_read(entry_priv->desc, 0);
+ word1 = rt2x00_desc_read(entry_priv->desc, 1);
if (rt2x00_get_field32(word0, RXD_W0_CRC_ERROR))
rxdesc->flags |= RX_FLAG_FAILED_FCS_CRC;
@@ -2102,11 +2105,11 @@
rxdesc->cipher_status = rt2x00_get_field32(word0, RXD_W0_CIPHER_ERROR);
if (rxdesc->cipher != CIPHER_NONE) {
- _rt2x00_desc_read(entry_priv->desc, 2, &rxdesc->iv[0]);
- _rt2x00_desc_read(entry_priv->desc, 3, &rxdesc->iv[1]);
+ rxdesc->iv[0] = _rt2x00_desc_read(entry_priv->desc, 2);
+ rxdesc->iv[1] = _rt2x00_desc_read(entry_priv->desc, 3);
rxdesc->dev_flags |= RXDONE_CRYPTO_IV;
- _rt2x00_desc_read(entry_priv->desc, 4, &rxdesc->icv);
+ rxdesc->icv = _rt2x00_desc_read(entry_priv->desc, 4);
rxdesc->dev_flags |= RXDONE_CRYPTO_ICV;
/*
@@ -2172,7 +2175,7 @@
* tx ring size for now.
*/
for (i = 0; i < rt2x00dev->tx->limit; i++) {
- rt2x00mmio_register_read(rt2x00dev, STA_CSR4, ®);
+ reg = rt2x00mmio_register_read(rt2x00dev, STA_CSR4);
if (!rt2x00_get_field32(reg, STA_CSR4_VALID))
break;
@@ -2195,7 +2198,7 @@
entry = &queue->entries[index];
entry_priv = entry->priv_data;
- rt2x00_desc_read(entry_priv->desc, 0, &word);
+ word = rt2x00_desc_read(entry_priv->desc, 0);
if (rt2x00_get_field32(word, TXD_W0_OWNER_NIC) ||
!rt2x00_get_field32(word, TXD_W0_VALID))
@@ -2258,7 +2261,7 @@
*/
spin_lock_irq(&rt2x00dev->irqmask_lock);
- rt2x00mmio_register_read(rt2x00dev, INT_MASK_CSR, ®);
+ reg = rt2x00mmio_register_read(rt2x00dev, INT_MASK_CSR);
rt2x00_set_field32(®, irq_field, 0);
rt2x00mmio_register_write(rt2x00dev, INT_MASK_CSR, reg);
@@ -2276,7 +2279,7 @@
*/
spin_lock_irq(&rt2x00dev->irqmask_lock);
- rt2x00mmio_register_read(rt2x00dev, MCU_INT_MASK_CSR, ®);
+ reg = rt2x00mmio_register_read(rt2x00dev, MCU_INT_MASK_CSR);
rt2x00_set_field32(®, irq_field, 0);
rt2x00mmio_register_write(rt2x00dev, MCU_INT_MASK_CSR, reg);
@@ -2328,10 +2331,10 @@
* Get the interrupt sources & saved to local variable.
* Write register value back to clear pending interrupts.
*/
- rt2x00mmio_register_read(rt2x00dev, MCU_INT_SOURCE_CSR, ®_mcu);
+ reg_mcu = rt2x00mmio_register_read(rt2x00dev, MCU_INT_SOURCE_CSR);
rt2x00mmio_register_write(rt2x00dev, MCU_INT_SOURCE_CSR, reg_mcu);
- rt2x00mmio_register_read(rt2x00dev, INT_SOURCE_CSR, ®);
+ reg = rt2x00mmio_register_read(rt2x00dev, INT_SOURCE_CSR);
rt2x00mmio_register_write(rt2x00dev, INT_SOURCE_CSR, reg);
if (!reg && !reg_mcu)
@@ -2369,11 +2372,11 @@
*/
spin_lock(&rt2x00dev->irqmask_lock);
- rt2x00mmio_register_read(rt2x00dev, INT_MASK_CSR, ®);
+ reg = rt2x00mmio_register_read(rt2x00dev, INT_MASK_CSR);
reg |= mask;
rt2x00mmio_register_write(rt2x00dev, INT_MASK_CSR, reg);
- rt2x00mmio_register_read(rt2x00dev, MCU_INT_MASK_CSR, ®);
+ reg = rt2x00mmio_register_read(rt2x00dev, MCU_INT_MASK_CSR);
reg |= mask_mcu;
rt2x00mmio_register_write(rt2x00dev, MCU_INT_MASK_CSR, reg);
@@ -2393,7 +2396,7 @@
u8 *mac;
s8 value;
- rt2x00mmio_register_read(rt2x00dev, E2PROM_CSR, ®);
+ reg = rt2x00mmio_register_read(rt2x00dev, E2PROM_CSR);
eeprom.data = rt2x00dev;
eeprom.register_read = rt61pci_eepromregister_read;
@@ -2414,7 +2417,7 @@
mac = rt2x00_eeprom_addr(rt2x00dev, EEPROM_MAC_ADDR_0);
rt2x00lib_set_mac_address(rt2x00dev, mac);
- rt2x00_eeprom_read(rt2x00dev, EEPROM_ANTENNA, &word);
+ word = rt2x00_eeprom_read(rt2x00dev, EEPROM_ANTENNA);
if (word == 0xffff) {
rt2x00_set_field16(&word, EEPROM_ANTENNA_NUM, 2);
rt2x00_set_field16(&word, EEPROM_ANTENNA_TX_DEFAULT,
@@ -2429,7 +2432,7 @@
rt2x00_eeprom_dbg(rt2x00dev, "Antenna: 0x%04x\n", word);
}
- rt2x00_eeprom_read(rt2x00dev, EEPROM_NIC, &word);
+ word = rt2x00_eeprom_read(rt2x00dev, EEPROM_NIC);
if (word == 0xffff) {
rt2x00_set_field16(&word, EEPROM_NIC_ENABLE_DIVERSITY, 0);
rt2x00_set_field16(&word, EEPROM_NIC_TX_DIVERSITY, 0);
@@ -2442,7 +2445,7 @@
rt2x00_eeprom_dbg(rt2x00dev, "NIC: 0x%04x\n", word);
}
- rt2x00_eeprom_read(rt2x00dev, EEPROM_LED, &word);
+ word = rt2x00_eeprom_read(rt2x00dev, EEPROM_LED);
if (word == 0xffff) {
rt2x00_set_field16(&word, EEPROM_LED_LED_MODE,
LED_MODE_DEFAULT);
@@ -2450,7 +2453,7 @@
rt2x00_eeprom_dbg(rt2x00dev, "Led: 0x%04x\n", word);
}
- rt2x00_eeprom_read(rt2x00dev, EEPROM_FREQ, &word);
+ word = rt2x00_eeprom_read(rt2x00dev, EEPROM_FREQ);
if (word == 0xffff) {
rt2x00_set_field16(&word, EEPROM_FREQ_OFFSET, 0);
rt2x00_set_field16(&word, EEPROM_FREQ_SEQ, 0);
@@ -2458,7 +2461,7 @@
rt2x00_eeprom_dbg(rt2x00dev, "Freq: 0x%04x\n", word);
}
- rt2x00_eeprom_read(rt2x00dev, EEPROM_RSSI_OFFSET_BG, &word);
+ word = rt2x00_eeprom_read(rt2x00dev, EEPROM_RSSI_OFFSET_BG);
if (word == 0xffff) {
rt2x00_set_field16(&word, EEPROM_RSSI_OFFSET_BG_1, 0);
rt2x00_set_field16(&word, EEPROM_RSSI_OFFSET_BG_2, 0);
@@ -2474,7 +2477,7 @@
rt2x00_eeprom_write(rt2x00dev, EEPROM_RSSI_OFFSET_BG, word);
}
- rt2x00_eeprom_read(rt2x00dev, EEPROM_RSSI_OFFSET_A, &word);
+ word = rt2x00_eeprom_read(rt2x00dev, EEPROM_RSSI_OFFSET_A);
if (word == 0xffff) {
rt2x00_set_field16(&word, EEPROM_RSSI_OFFSET_A_1, 0);
rt2x00_set_field16(&word, EEPROM_RSSI_OFFSET_A_2, 0);
@@ -2502,13 +2505,13 @@
/*
* Read EEPROM word for configuration.
*/
- rt2x00_eeprom_read(rt2x00dev, EEPROM_ANTENNA, &eeprom);
+ eeprom = rt2x00_eeprom_read(rt2x00dev, EEPROM_ANTENNA);
/*
* Identify RF chipset.
*/
value = rt2x00_get_field16(eeprom, EEPROM_ANTENNA_RF_TYPE);
- rt2x00mmio_register_read(rt2x00dev, MAC_CSR0, ®);
+ reg = rt2x00mmio_register_read(rt2x00dev, MAC_CSR0);
rt2x00_set_chip(rt2x00dev, rt2x00_get_field32(reg, MAC_CSR0_CHIPSET),
value, rt2x00_get_field32(reg, MAC_CSR0_REVISION));
@@ -2549,7 +2552,7 @@
/*
* Read frequency offset and RF programming sequence.
*/
- rt2x00_eeprom_read(rt2x00dev, EEPROM_FREQ, &eeprom);
+ eeprom = rt2x00_eeprom_read(rt2x00dev, EEPROM_FREQ);
if (rt2x00_get_field16(eeprom, EEPROM_FREQ_SEQ))
__set_bit(CAPABILITY_RF_SEQUENCE, &rt2x00dev->cap_flags);
@@ -2558,7 +2561,7 @@
/*
* Read external LNA informations.
*/
- rt2x00_eeprom_read(rt2x00dev, EEPROM_NIC, &eeprom);
+ eeprom = rt2x00_eeprom_read(rt2x00dev, EEPROM_NIC);
if (rt2x00_get_field16(eeprom, EEPROM_NIC_EXTERNAL_LNA_A))
__set_bit(CAPABILITY_EXTERNAL_LNA_A, &rt2x00dev->cap_flags);
@@ -2589,7 +2592,7 @@
* switch to default led mode.
*/
#ifdef CONFIG_RT2X00_LIB_LEDS
- rt2x00_eeprom_read(rt2x00dev, EEPROM_LED, &eeprom);
+ eeprom = rt2x00_eeprom_read(rt2x00dev, EEPROM_LED);
value = rt2x00_get_field16(eeprom, EEPROM_LED_LED_MODE);
rt61pci_init_led(rt2x00dev, &rt2x00dev->led_radio, LED_TYPE_RADIO);
@@ -2850,7 +2853,7 @@
* Enable rfkill polling by setting GPIO direction of the
* rfkill switch GPIO pin correctly.
*/
- rt2x00mmio_register_read(rt2x00dev, MAC_CSR13, ®);
+ reg = rt2x00mmio_register_read(rt2x00dev, MAC_CSR13);
rt2x00_set_field32(®, MAC_CSR13_DIR5, 1);
rt2x00mmio_register_write(rt2x00dev, MAC_CSR13, reg);
@@ -2922,7 +2925,7 @@
field.bit_offset = (queue_idx & 1) * 16;
field.bit_mask = 0xffff << field.bit_offset;
- rt2x00mmio_register_read(rt2x00dev, offset, ®);
+ reg = rt2x00mmio_register_read(rt2x00dev, offset);
rt2x00_set_field32(®, field, queue->txop);
rt2x00mmio_register_write(rt2x00dev, offset, reg);
@@ -2930,15 +2933,15 @@
field.bit_offset = queue_idx * 4;
field.bit_mask = 0xf << field.bit_offset;
- rt2x00mmio_register_read(rt2x00dev, AIFSN_CSR, ®);
+ reg = rt2x00mmio_register_read(rt2x00dev, AIFSN_CSR);
rt2x00_set_field32(®, field, queue->aifs);
rt2x00mmio_register_write(rt2x00dev, AIFSN_CSR, reg);
- rt2x00mmio_register_read(rt2x00dev, CWMIN_CSR, ®);
+ reg = rt2x00mmio_register_read(rt2x00dev, CWMIN_CSR);
rt2x00_set_field32(®, field, queue->cw_min);
rt2x00mmio_register_write(rt2x00dev, CWMIN_CSR, reg);
- rt2x00mmio_register_read(rt2x00dev, CWMAX_CSR, ®);
+ reg = rt2x00mmio_register_read(rt2x00dev, CWMAX_CSR);
rt2x00_set_field32(®, field, queue->cw_max);
rt2x00mmio_register_write(rt2x00dev, CWMAX_CSR, reg);
@@ -2951,9 +2954,9 @@
u64 tsf;
u32 reg;
- rt2x00mmio_register_read(rt2x00dev, TXRX_CSR13, ®);
+ reg = rt2x00mmio_register_read(rt2x00dev, TXRX_CSR13);
tsf = (u64) rt2x00_get_field32(reg, TXRX_CSR13_HIGH_TSFTIMER) << 32;
- rt2x00mmio_register_read(rt2x00dev, TXRX_CSR12, ®);
+ reg = rt2x00mmio_register_read(rt2x00dev, TXRX_CSR12);
tsf |= rt2x00_get_field32(reg, TXRX_CSR12_LOW_TSFTIMER);
return tsf;
diff --git a/drivers/net/wireless/ralink/rt2x00/rt73usb.c b/drivers/net/wireless/ralink/rt2x00/rt73usb.c
index bb8d307..fd91322 100644
--- a/drivers/net/wireless/ralink/rt2x00/rt73usb.c
+++ b/drivers/net/wireless/ralink/rt2x00/rt73usb.c
@@ -84,10 +84,11 @@
mutex_unlock(&rt2x00dev->csr_mutex);
}
-static void rt73usb_bbp_read(struct rt2x00_dev *rt2x00dev,
- const unsigned int word, u8 *value)
+static u8 rt73usb_bbp_read(struct rt2x00_dev *rt2x00dev,
+ const unsigned int word)
{
u32 reg;
+ u8 value;
mutex_lock(&rt2x00dev->csr_mutex);
@@ -110,9 +111,11 @@
WAIT_FOR_BBP(rt2x00dev, ®);
}
- *value = rt2x00_get_field32(reg, PHY_CSR3_VALUE);
+ value = rt2x00_get_field32(reg, PHY_CSR3_VALUE);
mutex_unlock(&rt2x00dev->csr_mutex);
+
+ return value;
}
static void rt73usb_rf_write(struct rt2x00_dev *rt2x00dev,
@@ -185,7 +188,7 @@
{
u32 reg;
- rt2x00usb_register_read(rt2x00dev, MAC_CSR13, ®);
+ reg = rt2x00usb_register_read(rt2x00dev, MAC_CSR13);
return rt2x00_get_field32(reg, MAC_CSR13_VAL7);
}
@@ -238,7 +241,7 @@
container_of(led_cdev, struct rt2x00_led, led_dev);
u32 reg;
- rt2x00usb_register_read(led->rt2x00dev, MAC_CSR14, ®);
+ reg = rt2x00usb_register_read(led->rt2x00dev, MAC_CSR14);
rt2x00_set_field32(®, MAC_CSR14_ON_PERIOD, *delay_on);
rt2x00_set_field32(®, MAC_CSR14_OFF_PERIOD, *delay_off);
rt2x00usb_register_write(led->rt2x00dev, MAC_CSR14, reg);
@@ -283,7 +286,7 @@
*/
mask = (0xf << crypto->bssidx);
- rt2x00usb_register_read(rt2x00dev, SEC_CSR0, ®);
+ reg = rt2x00usb_register_read(rt2x00dev, SEC_CSR0);
reg &= mask;
if (reg && reg == mask)
@@ -316,14 +319,14 @@
field.bit_offset = (3 * key->hw_key_idx);
field.bit_mask = 0x7 << field.bit_offset;
- rt2x00usb_register_read(rt2x00dev, SEC_CSR1, ®);
+ reg = rt2x00usb_register_read(rt2x00dev, SEC_CSR1);
rt2x00_set_field32(®, field, crypto->cipher);
rt2x00usb_register_write(rt2x00dev, SEC_CSR1, reg);
} else {
field.bit_offset = (3 * (key->hw_key_idx - 8));
field.bit_mask = 0x7 << field.bit_offset;
- rt2x00usb_register_read(rt2x00dev, SEC_CSR5, ®);
+ reg = rt2x00usb_register_read(rt2x00dev, SEC_CSR5);
rt2x00_set_field32(®, field, crypto->cipher);
rt2x00usb_register_write(rt2x00dev, SEC_CSR5, reg);
}
@@ -348,7 +351,7 @@
*/
mask = 1 << key->hw_key_idx;
- rt2x00usb_register_read(rt2x00dev, SEC_CSR0, ®);
+ reg = rt2x00usb_register_read(rt2x00dev, SEC_CSR0);
if (crypto->cmd == SET_KEY)
reg |= mask;
else if (crypto->cmd == DISABLE_KEY)
@@ -377,10 +380,10 @@
* When both registers are full, we drop the key,
* otherwise we use the first invalid entry.
*/
- rt2x00usb_register_read(rt2x00dev, SEC_CSR2, ®);
+ reg = rt2x00usb_register_read(rt2x00dev, SEC_CSR2);
if (reg && reg == ~0) {
key->hw_key_idx = 32;
- rt2x00usb_register_read(rt2x00dev, SEC_CSR3, ®);
+ reg = rt2x00usb_register_read(rt2x00dev, SEC_CSR3);
if (reg && reg == ~0)
return -ENOSPC;
}
@@ -417,7 +420,7 @@
* without this received frames will not be decrypted
* by the hardware.
*/
- rt2x00usb_register_read(rt2x00dev, SEC_CSR4, ®);
+ reg = rt2x00usb_register_read(rt2x00dev, SEC_CSR4);
reg |= (1 << crypto->bssidx);
rt2x00usb_register_write(rt2x00dev, SEC_CSR4, reg);
@@ -442,7 +445,7 @@
if (key->hw_key_idx < 32) {
mask = 1 << key->hw_key_idx;
- rt2x00usb_register_read(rt2x00dev, SEC_CSR2, ®);
+ reg = rt2x00usb_register_read(rt2x00dev, SEC_CSR2);
if (crypto->cmd == SET_KEY)
reg |= mask;
else if (crypto->cmd == DISABLE_KEY)
@@ -451,7 +454,7 @@
} else {
mask = 1 << (key->hw_key_idx - 32);
- rt2x00usb_register_read(rt2x00dev, SEC_CSR3, ®);
+ reg = rt2x00usb_register_read(rt2x00dev, SEC_CSR3);
if (crypto->cmd == SET_KEY)
reg |= mask;
else if (crypto->cmd == DISABLE_KEY)
@@ -473,7 +476,7 @@
* and broadcast frames will always be accepted since
* there is no filter for it at this time.
*/
- rt2x00usb_register_read(rt2x00dev, TXRX_CSR0, ®);
+ reg = rt2x00usb_register_read(rt2x00dev, TXRX_CSR0);
rt2x00_set_field32(®, TXRX_CSR0_DROP_CRC,
!(filter_flags & FIF_FCSFAIL));
rt2x00_set_field32(®, TXRX_CSR0_DROP_PHYSICAL,
@@ -505,7 +508,7 @@
/*
* Enable synchronisation.
*/
- rt2x00usb_register_read(rt2x00dev, TXRX_CSR9, ®);
+ reg = rt2x00usb_register_read(rt2x00dev, TXRX_CSR9);
rt2x00_set_field32(®, TXRX_CSR9_TSF_SYNC, conf->sync);
rt2x00usb_register_write(rt2x00dev, TXRX_CSR9, reg);
}
@@ -535,13 +538,13 @@
{
u32 reg;
- rt2x00usb_register_read(rt2x00dev, TXRX_CSR0, ®);
+ reg = rt2x00usb_register_read(rt2x00dev, TXRX_CSR0);
rt2x00_set_field32(®, TXRX_CSR0_RX_ACK_TIMEOUT, 0x32);
rt2x00_set_field32(®, TXRX_CSR0_TSF_OFFSET, IEEE80211_HEADER);
rt2x00usb_register_write(rt2x00dev, TXRX_CSR0, reg);
if (changed & BSS_CHANGED_ERP_PREAMBLE) {
- rt2x00usb_register_read(rt2x00dev, TXRX_CSR4, ®);
+ reg = rt2x00usb_register_read(rt2x00dev, TXRX_CSR4);
rt2x00_set_field32(®, TXRX_CSR4_AUTORESPOND_ENABLE, 1);
rt2x00_set_field32(®, TXRX_CSR4_AUTORESPOND_PREAMBLE,
!!erp->short_preamble);
@@ -553,18 +556,18 @@
erp->basic_rates);
if (changed & BSS_CHANGED_BEACON_INT) {
- rt2x00usb_register_read(rt2x00dev, TXRX_CSR9, ®);
+ reg = rt2x00usb_register_read(rt2x00dev, TXRX_CSR9);
rt2x00_set_field32(®, TXRX_CSR9_BEACON_INTERVAL,
erp->beacon_int * 16);
rt2x00usb_register_write(rt2x00dev, TXRX_CSR9, reg);
}
if (changed & BSS_CHANGED_ERP_SLOT) {
- rt2x00usb_register_read(rt2x00dev, MAC_CSR9, ®);
+ reg = rt2x00usb_register_read(rt2x00dev, MAC_CSR9);
rt2x00_set_field32(®, MAC_CSR9_SLOT_TIME, erp->slot_time);
rt2x00usb_register_write(rt2x00dev, MAC_CSR9, reg);
- rt2x00usb_register_read(rt2x00dev, MAC_CSR8, ®);
+ reg = rt2x00usb_register_read(rt2x00dev, MAC_CSR8);
rt2x00_set_field32(®, MAC_CSR8_SIFS, erp->sifs);
rt2x00_set_field32(®, MAC_CSR8_SIFS_AFTER_RX_OFDM, 3);
rt2x00_set_field32(®, MAC_CSR8_EIFS, erp->eifs);
@@ -580,9 +583,9 @@
u8 r77;
u8 temp;
- rt73usb_bbp_read(rt2x00dev, 3, &r3);
- rt73usb_bbp_read(rt2x00dev, 4, &r4);
- rt73usb_bbp_read(rt2x00dev, 77, &r77);
+ r3 = rt73usb_bbp_read(rt2x00dev, 3);
+ r4 = rt73usb_bbp_read(rt2x00dev, 4);
+ r77 = rt73usb_bbp_read(rt2x00dev, 77);
rt2x00_set_field8(&r3, BBP_R3_SMART_MODE, 0);
@@ -627,9 +630,9 @@
u8 r4;
u8 r77;
- rt73usb_bbp_read(rt2x00dev, 3, &r3);
- rt73usb_bbp_read(rt2x00dev, 4, &r4);
- rt73usb_bbp_read(rt2x00dev, 77, &r77);
+ r3 = rt73usb_bbp_read(rt2x00dev, 3);
+ r4 = rt73usb_bbp_read(rt2x00dev, 4);
+ r77 = rt73usb_bbp_read(rt2x00dev, 77);
rt2x00_set_field8(&r3, BBP_R3_SMART_MODE, 0);
rt2x00_set_field8(&r4, BBP_R4_RX_FRAME_END,
@@ -715,7 +718,7 @@
for (i = 0; i < ARRAY_SIZE(antenna_sel_a); i++)
rt73usb_bbp_write(rt2x00dev, sel[i].word, sel[i].value[lna]);
- rt2x00usb_register_read(rt2x00dev, PHY_CSR0, ®);
+ reg = rt2x00usb_register_read(rt2x00dev, PHY_CSR0);
rt2x00_set_field32(®, PHY_CSR0_PA_PE_BG,
(rt2x00dev->curr_band == NL80211_BAND_2GHZ));
@@ -740,10 +743,10 @@
if (rt2x00_has_cap_external_lna_bg(rt2x00dev))
lna_gain += 14;
- rt2x00_eeprom_read(rt2x00dev, EEPROM_RSSI_OFFSET_BG, &eeprom);
+ eeprom = rt2x00_eeprom_read(rt2x00dev, EEPROM_RSSI_OFFSET_BG);
lna_gain -= rt2x00_get_field16(eeprom, EEPROM_RSSI_OFFSET_BG_1);
} else {
- rt2x00_eeprom_read(rt2x00dev, EEPROM_RSSI_OFFSET_A, &eeprom);
+ eeprom = rt2x00_eeprom_read(rt2x00dev, EEPROM_RSSI_OFFSET_A);
lna_gain -= rt2x00_get_field16(eeprom, EEPROM_RSSI_OFFSET_A_1);
}
@@ -762,7 +765,7 @@
smart = !(rt2x00_rf(rt2x00dev, RF5225) || rt2x00_rf(rt2x00dev, RF2527));
- rt73usb_bbp_read(rt2x00dev, 3, &r3);
+ r3 = rt73usb_bbp_read(rt2x00dev, 3);
rt2x00_set_field8(&r3, BBP_R3_SMART_MODE, smart);
rt73usb_bbp_write(rt2x00dev, 3, r3);
@@ -796,10 +799,10 @@
{
struct rf_channel rf;
- rt2x00_rf_read(rt2x00dev, 1, &rf.rf1);
- rt2x00_rf_read(rt2x00dev, 2, &rf.rf2);
- rt2x00_rf_read(rt2x00dev, 3, &rf.rf3);
- rt2x00_rf_read(rt2x00dev, 4, &rf.rf4);
+ rf.rf1 = rt2x00_rf_read(rt2x00dev, 1);
+ rf.rf2 = rt2x00_rf_read(rt2x00dev, 2);
+ rf.rf3 = rt2x00_rf_read(rt2x00dev, 3);
+ rf.rf4 = rt2x00_rf_read(rt2x00dev, 4);
rt73usb_config_channel(rt2x00dev, &rf, txpower);
}
@@ -809,7 +812,7 @@
{
u32 reg;
- rt2x00usb_register_read(rt2x00dev, TXRX_CSR4, ®);
+ reg = rt2x00usb_register_read(rt2x00dev, TXRX_CSR4);
rt2x00_set_field32(®, TXRX_CSR4_OFDM_TX_RATE_DOWN, 1);
rt2x00_set_field32(®, TXRX_CSR4_OFDM_TX_RATE_STEP, 0);
rt2x00_set_field32(®, TXRX_CSR4_OFDM_TX_FALLBACK_CCK, 0);
@@ -829,7 +832,7 @@
u32 reg;
if (state == STATE_SLEEP) {
- rt2x00usb_register_read(rt2x00dev, MAC_CSR11, ®);
+ reg = rt2x00usb_register_read(rt2x00dev, MAC_CSR11);
rt2x00_set_field32(®, MAC_CSR11_DELAY_AFTER_TBCN,
rt2x00dev->beacon_int - 10);
rt2x00_set_field32(®, MAC_CSR11_TBCN_BEFORE_WAKEUP,
@@ -846,7 +849,7 @@
rt2x00usb_vendor_request_sw(rt2x00dev, USB_DEVICE_MODE, 0,
USB_MODE_SLEEP, REGISTER_TIMEOUT);
} else {
- rt2x00usb_register_read(rt2x00dev, MAC_CSR11, ®);
+ reg = rt2x00usb_register_read(rt2x00dev, MAC_CSR11);
rt2x00_set_field32(®, MAC_CSR11_DELAY_AFTER_TBCN, 0);
rt2x00_set_field32(®, MAC_CSR11_TBCN_BEFORE_WAKEUP, 0);
rt2x00_set_field32(®, MAC_CSR11_AUTOWAKE, 0);
@@ -888,13 +891,13 @@
/*
* Update FCS error count from register.
*/
- rt2x00usb_register_read(rt2x00dev, STA_CSR0, ®);
+ reg = rt2x00usb_register_read(rt2x00dev, STA_CSR0);
qual->rx_failed = rt2x00_get_field32(reg, STA_CSR0_FCS_ERROR);
/*
* Update False CCA count from register.
*/
- rt2x00usb_register_read(rt2x00dev, STA_CSR1, ®);
+ reg = rt2x00usb_register_read(rt2x00dev, STA_CSR1);
qual->false_cca = rt2x00_get_field32(reg, STA_CSR1_FALSE_CCA_ERROR);
}
@@ -1025,12 +1028,12 @@
switch (queue->qid) {
case QID_RX:
- rt2x00usb_register_read(rt2x00dev, TXRX_CSR0, ®);
+ reg = rt2x00usb_register_read(rt2x00dev, TXRX_CSR0);
rt2x00_set_field32(®, TXRX_CSR0_DISABLE_RX, 0);
rt2x00usb_register_write(rt2x00dev, TXRX_CSR0, reg);
break;
case QID_BEACON:
- rt2x00usb_register_read(rt2x00dev, TXRX_CSR9, ®);
+ reg = rt2x00usb_register_read(rt2x00dev, TXRX_CSR9);
rt2x00_set_field32(®, TXRX_CSR9_TSF_TICKING, 1);
rt2x00_set_field32(®, TXRX_CSR9_TBTT_ENABLE, 1);
rt2x00_set_field32(®, TXRX_CSR9_BEACON_GEN, 1);
@@ -1048,12 +1051,12 @@
switch (queue->qid) {
case QID_RX:
- rt2x00usb_register_read(rt2x00dev, TXRX_CSR0, ®);
+ reg = rt2x00usb_register_read(rt2x00dev, TXRX_CSR0);
rt2x00_set_field32(®, TXRX_CSR0_DISABLE_RX, 1);
rt2x00usb_register_write(rt2x00dev, TXRX_CSR0, reg);
break;
case QID_BEACON:
- rt2x00usb_register_read(rt2x00dev, TXRX_CSR9, ®);
+ reg = rt2x00usb_register_read(rt2x00dev, TXRX_CSR9);
rt2x00_set_field32(®, TXRX_CSR9_TSF_TICKING, 0);
rt2x00_set_field32(®, TXRX_CSR9_TBTT_ENABLE, 0);
rt2x00_set_field32(®, TXRX_CSR9_BEACON_GEN, 0);
@@ -1112,7 +1115,7 @@
* Wait for stable hardware.
*/
for (i = 0; i < 100; i++) {
- rt2x00usb_register_read(rt2x00dev, MAC_CSR0, ®);
+ reg = rt2x00usb_register_read(rt2x00dev, MAC_CSR0);
if (reg)
break;
msleep(1);
@@ -1150,13 +1153,13 @@
{
u32 reg;
- rt2x00usb_register_read(rt2x00dev, TXRX_CSR0, ®);
+ reg = rt2x00usb_register_read(rt2x00dev, TXRX_CSR0);
rt2x00_set_field32(®, TXRX_CSR0_AUTO_TX_SEQ, 1);
rt2x00_set_field32(®, TXRX_CSR0_DISABLE_RX, 0);
rt2x00_set_field32(®, TXRX_CSR0_TX_WITHOUT_WAITING, 0);
rt2x00usb_register_write(rt2x00dev, TXRX_CSR0, reg);
- rt2x00usb_register_read(rt2x00dev, TXRX_CSR1, ®);
+ reg = rt2x00usb_register_read(rt2x00dev, TXRX_CSR1);
rt2x00_set_field32(®, TXRX_CSR1_BBP_ID0, 47); /* CCK Signal */
rt2x00_set_field32(®, TXRX_CSR1_BBP_ID0_VALID, 1);
rt2x00_set_field32(®, TXRX_CSR1_BBP_ID1, 30); /* Rssi */
@@ -1170,7 +1173,7 @@
/*
* CCK TXD BBP registers
*/
- rt2x00usb_register_read(rt2x00dev, TXRX_CSR2, ®);
+ reg = rt2x00usb_register_read(rt2x00dev, TXRX_CSR2);
rt2x00_set_field32(®, TXRX_CSR2_BBP_ID0, 13);
rt2x00_set_field32(®, TXRX_CSR2_BBP_ID0_VALID, 1);
rt2x00_set_field32(®, TXRX_CSR2_BBP_ID1, 12);
@@ -1184,7 +1187,7 @@
/*
* OFDM TXD BBP registers
*/
- rt2x00usb_register_read(rt2x00dev, TXRX_CSR3, ®);
+ reg = rt2x00usb_register_read(rt2x00dev, TXRX_CSR3);
rt2x00_set_field32(®, TXRX_CSR3_BBP_ID0, 7);
rt2x00_set_field32(®, TXRX_CSR3_BBP_ID0_VALID, 1);
rt2x00_set_field32(®, TXRX_CSR3_BBP_ID1, 6);
@@ -1193,21 +1196,21 @@
rt2x00_set_field32(®, TXRX_CSR3_BBP_ID2_VALID, 1);
rt2x00usb_register_write(rt2x00dev, TXRX_CSR3, reg);
- rt2x00usb_register_read(rt2x00dev, TXRX_CSR7, ®);
+ reg = rt2x00usb_register_read(rt2x00dev, TXRX_CSR7);
rt2x00_set_field32(®, TXRX_CSR7_ACK_CTS_6MBS, 59);
rt2x00_set_field32(®, TXRX_CSR7_ACK_CTS_9MBS, 53);
rt2x00_set_field32(®, TXRX_CSR7_ACK_CTS_12MBS, 49);
rt2x00_set_field32(®, TXRX_CSR7_ACK_CTS_18MBS, 46);
rt2x00usb_register_write(rt2x00dev, TXRX_CSR7, reg);
- rt2x00usb_register_read(rt2x00dev, TXRX_CSR8, ®);
+ reg = rt2x00usb_register_read(rt2x00dev, TXRX_CSR8);
rt2x00_set_field32(®, TXRX_CSR8_ACK_CTS_24MBS, 44);
rt2x00_set_field32(®, TXRX_CSR8_ACK_CTS_36MBS, 42);
rt2x00_set_field32(®, TXRX_CSR8_ACK_CTS_48MBS, 42);
rt2x00_set_field32(®, TXRX_CSR8_ACK_CTS_54MBS, 42);
rt2x00usb_register_write(rt2x00dev, TXRX_CSR8, reg);
- rt2x00usb_register_read(rt2x00dev, TXRX_CSR9, ®);
+ reg = rt2x00usb_register_read(rt2x00dev, TXRX_CSR9);
rt2x00_set_field32(®, TXRX_CSR9_BEACON_INTERVAL, 0);
rt2x00_set_field32(®, TXRX_CSR9_TSF_TICKING, 0);
rt2x00_set_field32(®, TXRX_CSR9_TSF_SYNC, 0);
@@ -1218,7 +1221,7 @@
rt2x00usb_register_write(rt2x00dev, TXRX_CSR15, 0x0000000f);
- rt2x00usb_register_read(rt2x00dev, MAC_CSR6, ®);
+ reg = rt2x00usb_register_read(rt2x00dev, MAC_CSR6);
rt2x00_set_field32(®, MAC_CSR6_MAX_FRAME_UNIT, 0xfff);
rt2x00usb_register_write(rt2x00dev, MAC_CSR6, reg);
@@ -1246,7 +1249,7 @@
rt2x00usb_register_write(rt2x00dev, PHY_CSR6, 0x00080606);
rt2x00usb_register_write(rt2x00dev, PHY_CSR7, 0x00000408);
- rt2x00usb_register_read(rt2x00dev, MAC_CSR9, ®);
+ reg = rt2x00usb_register_read(rt2x00dev, MAC_CSR9);
rt2x00_set_field32(®, MAC_CSR9_CW_SELECT, 0);
rt2x00usb_register_write(rt2x00dev, MAC_CSR9, reg);
@@ -1266,24 +1269,24 @@
* These registers are cleared on read,
* so we may pass a useless variable to store the value.
*/
- rt2x00usb_register_read(rt2x00dev, STA_CSR0, ®);
- rt2x00usb_register_read(rt2x00dev, STA_CSR1, ®);
- rt2x00usb_register_read(rt2x00dev, STA_CSR2, ®);
+ reg = rt2x00usb_register_read(rt2x00dev, STA_CSR0);
+ reg = rt2x00usb_register_read(rt2x00dev, STA_CSR1);
+ reg = rt2x00usb_register_read(rt2x00dev, STA_CSR2);
/*
* Reset MAC and BBP registers.
*/
- rt2x00usb_register_read(rt2x00dev, MAC_CSR1, ®);
+ reg = rt2x00usb_register_read(rt2x00dev, MAC_CSR1);
rt2x00_set_field32(®, MAC_CSR1_SOFT_RESET, 1);
rt2x00_set_field32(®, MAC_CSR1_BBP_RESET, 1);
rt2x00usb_register_write(rt2x00dev, MAC_CSR1, reg);
- rt2x00usb_register_read(rt2x00dev, MAC_CSR1, ®);
+ reg = rt2x00usb_register_read(rt2x00dev, MAC_CSR1);
rt2x00_set_field32(®, MAC_CSR1_SOFT_RESET, 0);
rt2x00_set_field32(®, MAC_CSR1_BBP_RESET, 0);
rt2x00usb_register_write(rt2x00dev, MAC_CSR1, reg);
- rt2x00usb_register_read(rt2x00dev, MAC_CSR1, ®);
+ reg = rt2x00usb_register_read(rt2x00dev, MAC_CSR1);
rt2x00_set_field32(®, MAC_CSR1_HOST_READY, 1);
rt2x00usb_register_write(rt2x00dev, MAC_CSR1, reg);
@@ -1296,7 +1299,7 @@
u8 value;
for (i = 0; i < REGISTER_USB_BUSY_COUNT; i++) {
- rt73usb_bbp_read(rt2x00dev, 0, &value);
+ value = rt73usb_bbp_read(rt2x00dev, 0);
if ((value != 0xff) && (value != 0x00))
return 0;
udelay(REGISTER_BUSY_DELAY);
@@ -1343,7 +1346,7 @@
rt73usb_bbp_write(rt2x00dev, 107, 0x04);
for (i = 0; i < EEPROM_BBP_SIZE; i++) {
- rt2x00_eeprom_read(rt2x00dev, EEPROM_BBP_START + i, &eeprom);
+ eeprom = rt2x00_eeprom_read(rt2x00dev, EEPROM_BBP_START + i);
if (eeprom != 0xffff && eeprom != 0x0000) {
reg_id = rt2x00_get_field16(eeprom, EEPROM_BBP_REG_ID);
@@ -1390,7 +1393,7 @@
put_to_sleep = (state != STATE_AWAKE);
- rt2x00usb_register_read(rt2x00dev, MAC_CSR12, ®);
+ reg = rt2x00usb_register_read(rt2x00dev, MAC_CSR12);
rt2x00_set_field32(®, MAC_CSR12_FORCE_WAKEUP, !put_to_sleep);
rt2x00_set_field32(®, MAC_CSR12_PUT_TO_SLEEP, put_to_sleep);
rt2x00usb_register_write(rt2x00dev, MAC_CSR12, reg);
@@ -1401,7 +1404,7 @@
* device has entered the correct state.
*/
for (i = 0; i < REGISTER_BUSY_COUNT; i++) {
- rt2x00usb_register_read(rt2x00dev, MAC_CSR12, ®2);
+ reg2 = rt2x00usb_register_read(rt2x00dev, MAC_CSR12);
state = rt2x00_get_field32(reg2, MAC_CSR12_BBP_CURRENT_STATE);
if (state == !put_to_sleep)
return 0;
@@ -1459,7 +1462,7 @@
/*
* Start writing the descriptor words.
*/
- rt2x00_desc_read(txd, 0, &word);
+ word = rt2x00_desc_read(txd, 0);
rt2x00_set_field32(&word, TXD_W0_BURST,
test_bit(ENTRY_TXD_BURST, &txdesc->flags));
rt2x00_set_field32(&word, TXD_W0_VALID, 1);
@@ -1485,7 +1488,7 @@
rt2x00_set_field32(&word, TXD_W0_CIPHER_ALG, txdesc->cipher);
rt2x00_desc_write(txd, 0, word);
- rt2x00_desc_read(txd, 1, &word);
+ word = rt2x00_desc_read(txd, 1);
rt2x00_set_field32(&word, TXD_W1_HOST_Q_ID, entry->queue->qid);
rt2x00_set_field32(&word, TXD_W1_AIFSN, entry->queue->aifs);
rt2x00_set_field32(&word, TXD_W1_CWMIN, entry->queue->cw_min);
@@ -1495,7 +1498,7 @@
test_bit(ENTRY_TXD_GENERATE_SEQ, &txdesc->flags));
rt2x00_desc_write(txd, 1, word);
- rt2x00_desc_read(txd, 2, &word);
+ word = rt2x00_desc_read(txd, 2);
rt2x00_set_field32(&word, TXD_W2_PLCP_SIGNAL, txdesc->u.plcp.signal);
rt2x00_set_field32(&word, TXD_W2_PLCP_SERVICE, txdesc->u.plcp.service);
rt2x00_set_field32(&word, TXD_W2_PLCP_LENGTH_LOW,
@@ -1509,7 +1512,7 @@
_rt2x00_desc_write(txd, 4, skbdesc->iv[1]);
}
- rt2x00_desc_read(txd, 5, &word);
+ word = rt2x00_desc_read(txd, 5);
rt2x00_set_field32(&word, TXD_W5_TX_POWER,
TXPOWER_TO_DEV(entry->queue->rt2x00dev->tx_power));
rt2x00_set_field32(&word, TXD_W5_WAITING_DMA_DONE_INT, 1);
@@ -1538,7 +1541,7 @@
* Disable beaconing while we are reloading the beacon data,
* otherwise we might be sending out invalid data.
*/
- rt2x00usb_register_read(rt2x00dev, TXRX_CSR9, ®);
+ reg = rt2x00usb_register_read(rt2x00dev, TXRX_CSR9);
orig_reg = reg;
rt2x00_set_field32(®, TXRX_CSR9_BEACON_GEN, 0);
rt2x00usb_register_write(rt2x00dev, TXRX_CSR9, reg);
@@ -1603,7 +1606,7 @@
* Disable beaconing while we are reloading the beacon data,
* otherwise we might be sending out invalid data.
*/
- rt2x00usb_register_read(rt2x00dev, TXRX_CSR9, &orig_reg);
+ orig_reg = rt2x00usb_register_read(rt2x00dev, TXRX_CSR9);
reg = orig_reg;
rt2x00_set_field32(®, TXRX_CSR9_BEACON_GEN, 0);
rt2x00usb_register_write(rt2x00dev, TXRX_CSR9, reg);
@@ -1691,8 +1694,8 @@
/*
* It is now safe to read the descriptor on all architectures.
*/
- rt2x00_desc_read(rxd, 0, &word0);
- rt2x00_desc_read(rxd, 1, &word1);
+ word0 = rt2x00_desc_read(rxd, 0);
+ word1 = rt2x00_desc_read(rxd, 1);
if (rt2x00_get_field32(word0, RXD_W0_CRC_ERROR))
rxdesc->flags |= RX_FLAG_FAILED_FCS_CRC;
@@ -1701,11 +1704,11 @@
rxdesc->cipher_status = rt2x00_get_field32(word0, RXD_W0_CIPHER_ERROR);
if (rxdesc->cipher != CIPHER_NONE) {
- _rt2x00_desc_read(rxd, 2, &rxdesc->iv[0]);
- _rt2x00_desc_read(rxd, 3, &rxdesc->iv[1]);
+ rxdesc->iv[0] = _rt2x00_desc_read(rxd, 2);
+ rxdesc->iv[1] = _rt2x00_desc_read(rxd, 3);
rxdesc->dev_flags |= RXDONE_CRYPTO_IV;
- _rt2x00_desc_read(rxd, 4, &rxdesc->icv);
+ rxdesc->icv = _rt2x00_desc_read(rxd, 4);
rxdesc->dev_flags |= RXDONE_CRYPTO_ICV;
/*
@@ -1768,7 +1771,7 @@
mac = rt2x00_eeprom_addr(rt2x00dev, EEPROM_MAC_ADDR_0);
rt2x00lib_set_mac_address(rt2x00dev, mac);
- rt2x00_eeprom_read(rt2x00dev, EEPROM_ANTENNA, &word);
+ word = rt2x00_eeprom_read(rt2x00dev, EEPROM_ANTENNA);
if (word == 0xffff) {
rt2x00_set_field16(&word, EEPROM_ANTENNA_NUM, 2);
rt2x00_set_field16(&word, EEPROM_ANTENNA_TX_DEFAULT,
@@ -1783,14 +1786,14 @@
rt2x00_eeprom_dbg(rt2x00dev, "Antenna: 0x%04x\n", word);
}
- rt2x00_eeprom_read(rt2x00dev, EEPROM_NIC, &word);
+ word = rt2x00_eeprom_read(rt2x00dev, EEPROM_NIC);
if (word == 0xffff) {
rt2x00_set_field16(&word, EEPROM_NIC_EXTERNAL_LNA, 0);
rt2x00_eeprom_write(rt2x00dev, EEPROM_NIC, word);
rt2x00_eeprom_dbg(rt2x00dev, "NIC: 0x%04x\n", word);
}
- rt2x00_eeprom_read(rt2x00dev, EEPROM_LED, &word);
+ word = rt2x00_eeprom_read(rt2x00dev, EEPROM_LED);
if (word == 0xffff) {
rt2x00_set_field16(&word, EEPROM_LED_POLARITY_RDY_G, 0);
rt2x00_set_field16(&word, EEPROM_LED_POLARITY_RDY_A, 0);
@@ -1806,7 +1809,7 @@
rt2x00_eeprom_dbg(rt2x00dev, "Led: 0x%04x\n", word);
}
- rt2x00_eeprom_read(rt2x00dev, EEPROM_FREQ, &word);
+ word = rt2x00_eeprom_read(rt2x00dev, EEPROM_FREQ);
if (word == 0xffff) {
rt2x00_set_field16(&word, EEPROM_FREQ_OFFSET, 0);
rt2x00_set_field16(&word, EEPROM_FREQ_SEQ, 0);
@@ -1814,7 +1817,7 @@
rt2x00_eeprom_dbg(rt2x00dev, "Freq: 0x%04x\n", word);
}
- rt2x00_eeprom_read(rt2x00dev, EEPROM_RSSI_OFFSET_BG, &word);
+ word = rt2x00_eeprom_read(rt2x00dev, EEPROM_RSSI_OFFSET_BG);
if (word == 0xffff) {
rt2x00_set_field16(&word, EEPROM_RSSI_OFFSET_BG_1, 0);
rt2x00_set_field16(&word, EEPROM_RSSI_OFFSET_BG_2, 0);
@@ -1830,7 +1833,7 @@
rt2x00_eeprom_write(rt2x00dev, EEPROM_RSSI_OFFSET_BG, word);
}
- rt2x00_eeprom_read(rt2x00dev, EEPROM_RSSI_OFFSET_A, &word);
+ word = rt2x00_eeprom_read(rt2x00dev, EEPROM_RSSI_OFFSET_A);
if (word == 0xffff) {
rt2x00_set_field16(&word, EEPROM_RSSI_OFFSET_A_1, 0);
rt2x00_set_field16(&word, EEPROM_RSSI_OFFSET_A_2, 0);
@@ -1858,13 +1861,13 @@
/*
* Read EEPROM word for configuration.
*/
- rt2x00_eeprom_read(rt2x00dev, EEPROM_ANTENNA, &eeprom);
+ eeprom = rt2x00_eeprom_read(rt2x00dev, EEPROM_ANTENNA);
/*
* Identify RF chipset.
*/
value = rt2x00_get_field16(eeprom, EEPROM_ANTENNA_RF_TYPE);
- rt2x00usb_register_read(rt2x00dev, MAC_CSR0, ®);
+ reg = rt2x00usb_register_read(rt2x00dev, MAC_CSR0);
rt2x00_set_chip(rt2x00dev, rt2x00_get_field32(reg, MAC_CSR0_CHIPSET),
value, rt2x00_get_field32(reg, MAC_CSR0_REVISION));
@@ -1904,13 +1907,13 @@
/*
* Read frequency offset.
*/
- rt2x00_eeprom_read(rt2x00dev, EEPROM_FREQ, &eeprom);
+ eeprom = rt2x00_eeprom_read(rt2x00dev, EEPROM_FREQ);
rt2x00dev->freq_offset = rt2x00_get_field16(eeprom, EEPROM_FREQ_OFFSET);
/*
* Read external LNA informations.
*/
- rt2x00_eeprom_read(rt2x00dev, EEPROM_NIC, &eeprom);
+ eeprom = rt2x00_eeprom_read(rt2x00dev, EEPROM_NIC);
if (rt2x00_get_field16(eeprom, EEPROM_NIC_EXTERNAL_LNA)) {
__set_bit(CAPABILITY_EXTERNAL_LNA_A, &rt2x00dev->cap_flags);
@@ -1921,7 +1924,7 @@
* Store led settings, for correct led behaviour.
*/
#ifdef CONFIG_RT2X00_LIB_LEDS
- rt2x00_eeprom_read(rt2x00dev, EEPROM_LED, &eeprom);
+ eeprom = rt2x00_eeprom_read(rt2x00dev, EEPROM_LED);
rt73usb_init_led(rt2x00dev, &rt2x00dev->led_radio, LED_TYPE_RADIO);
rt73usb_init_led(rt2x00dev, &rt2x00dev->led_assoc, LED_TYPE_ASSOC);
@@ -2188,7 +2191,7 @@
* Enable rfkill polling by setting GPIO direction of the
* rfkill switch GPIO pin correctly.
*/
- rt2x00usb_register_read(rt2x00dev, MAC_CSR13, ®);
+ reg = rt2x00usb_register_read(rt2x00dev, MAC_CSR13);
rt2x00_set_field32(®, MAC_CSR13_DIR7, 0);
rt2x00usb_register_write(rt2x00dev, MAC_CSR13, reg);
@@ -2260,7 +2263,7 @@
field.bit_offset = (queue_idx & 1) * 16;
field.bit_mask = 0xffff << field.bit_offset;
- rt2x00usb_register_read(rt2x00dev, offset, ®);
+ reg = rt2x00usb_register_read(rt2x00dev, offset);
rt2x00_set_field32(®, field, queue->txop);
rt2x00usb_register_write(rt2x00dev, offset, reg);
@@ -2268,15 +2271,15 @@
field.bit_offset = queue_idx * 4;
field.bit_mask = 0xf << field.bit_offset;
- rt2x00usb_register_read(rt2x00dev, AIFSN_CSR, ®);
+ reg = rt2x00usb_register_read(rt2x00dev, AIFSN_CSR);
rt2x00_set_field32(®, field, queue->aifs);
rt2x00usb_register_write(rt2x00dev, AIFSN_CSR, reg);
- rt2x00usb_register_read(rt2x00dev, CWMIN_CSR, ®);
+ reg = rt2x00usb_register_read(rt2x00dev, CWMIN_CSR);
rt2x00_set_field32(®, field, queue->cw_min);
rt2x00usb_register_write(rt2x00dev, CWMIN_CSR, reg);
- rt2x00usb_register_read(rt2x00dev, CWMAX_CSR, ®);
+ reg = rt2x00usb_register_read(rt2x00dev, CWMAX_CSR);
rt2x00_set_field32(®, field, queue->cw_max);
rt2x00usb_register_write(rt2x00dev, CWMAX_CSR, reg);
@@ -2289,9 +2292,9 @@
u64 tsf;
u32 reg;
- rt2x00usb_register_read(rt2x00dev, TXRX_CSR13, ®);
+ reg = rt2x00usb_register_read(rt2x00dev, TXRX_CSR13);
tsf = (u64) rt2x00_get_field32(reg, TXRX_CSR13_HIGH_TSFTIMER) << 32;
- rt2x00usb_register_read(rt2x00dev, TXRX_CSR12, ®);
+ reg = rt2x00usb_register_read(rt2x00dev, TXRX_CSR12);
tsf |= rt2x00_get_field32(reg, TXRX_CSR12_LOW_TSFTIMER);
return tsf;
diff --git a/drivers/net/wireless/ray_cs.c b/drivers/net/wireless/ray_cs.c
index b944794..170cd50 100644
--- a/drivers/net/wireless/ray_cs.c
+++ b/drivers/net/wireless/ray_cs.c
@@ -247,7 +247,10 @@
0x04, 0x08, /* Noise gain, limit offset */
0x28, 0x28, /* det rssi, med busy offsets */
7, /* det sync thresh */
- 0, 2, 2 /* test mode, min, max */
+ 0, 2, 2, /* test mode, min, max */
+ 0, /* rx/tx delay */
+ 0, 0, 0, 0, 0, 0, /* current BSS id */
+ 0 /* hop set */
};
/*===========================================================================*/
@@ -597,7 +600,7 @@
* a_beacon_period = hops a_beacon_period = KuS
*//* 64ms = 010000 */
if (local->fw_ver == 0x55) {
- memcpy((UCHAR *) &local->sparm.b4, b4_default_startup_parms,
+ memcpy(&local->sparm.b4, b4_default_startup_parms,
sizeof(struct b4_startup_params));
/* Translate sane kus input values to old build 4/5 format */
/* i = hop time in uS truncated to 3 bytes */
diff --git a/drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c b/drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c
index 35fe991..55198ac2 100644
--- a/drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c
+++ b/drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c
@@ -278,8 +278,7 @@
}
if (!priv->is_rtl8187b) {
- struct rtl8187_tx_hdr *hdr =
- (struct rtl8187_tx_hdr *)skb_push(skb, sizeof(*hdr));
+ struct rtl8187_tx_hdr *hdr = skb_push(skb, sizeof(*hdr));
hdr->flags = cpu_to_le32(flags);
hdr->len = 0;
hdr->rts_duration = rts_dur;
@@ -292,8 +291,7 @@
unsigned int epmap[4] = { 6, 7, 5, 4 };
u16 fc = le16_to_cpu(tx_hdr->frame_control);
- struct rtl8187b_tx_hdr *hdr =
- (struct rtl8187b_tx_hdr *)skb_push(skb, sizeof(*hdr));
+ struct rtl8187b_tx_hdr *hdr = skb_push(skb, sizeof(*hdr));
struct ieee80211_rate *txrate =
ieee80211_get_tx_rate(dev, info);
memset(hdr, 0, sizeof(*hdr));
diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c
index 39d5631..21e5ef0 100644
--- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c
+++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c
@@ -4952,7 +4952,7 @@
if (control && control->sta)
sta = control->sta;
- tx_desc = (struct rtl8xxxu_txdesc32 *)skb_push(skb, tx_desc_size);
+ tx_desc = skb_push(skb, tx_desc_size);
memset(tx_desc, 0, tx_desc_size);
tx_desc->pkt_size = cpu_to_le16(pktlen);
diff --git a/drivers/net/wireless/realtek/rtlwifi/base.c b/drivers/net/wireless/realtek/rtlwifi/base.c
index bdc3791..710e5b4 100644
--- a/drivers/net/wireless/realtek/rtlwifi/base.c
+++ b/drivers/net/wireless/realtek/rtlwifi/base.c
@@ -1875,8 +1875,7 @@
return NULL;
skb_reserve(skb, hw->extra_tx_headroom);
- action_frame = (void *)skb_put(skb, 27);
- memset(action_frame, 0, 27);
+ action_frame = skb_put_zero(skb, 27);
memcpy(action_frame->da, da, ETH_ALEN);
memcpy(action_frame->sa, rtlefuse->dev_addr, ETH_ALEN);
memcpy(action_frame->bssid, bssid, ETH_ALEN);
@@ -2005,8 +2004,7 @@
return NULL;
skb_reserve(skb, hw->extra_tx_headroom);
- action_frame = (void *)skb_put(skb, 34);
- memset(action_frame, 0, 34);
+ action_frame = skb_put_zero(skb, 34);
memcpy(action_frame->sa, sa, ETH_ALEN);
memcpy(action_frame->da, rtlefuse->dev_addr, ETH_ALEN);
memcpy(action_frame->bssid, bssid, ETH_ALEN);
diff --git a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8192e2ant.c b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8192e2ant.c
index 57e633d..9015512 100644
--- a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8192e2ant.c
+++ b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8192e2ant.c
@@ -456,6 +456,39 @@
btcoexist->btc_fill_h2c(btcoexist, 0x61, 1, h2c_parameter);
}
+static
+bool btc8192e2ant_is_wifi_status_changed(struct btc_coexist *btcoexist)
+{
+ static bool pre_wifi_busy = false, pre_under_4way = false,
+ pre_bt_hs_on = false;
+ bool wifi_busy = false, under_4way = false, bt_hs_on = false;
+ bool wifi_connected = false;
+
+ btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_CONNECTED,
+ &wifi_connected);
+ btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_BUSY, &wifi_busy);
+ btcoexist->btc_get(btcoexist, BTC_GET_BL_HS_OPERATION, &bt_hs_on);
+ btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_4_WAY_PROGRESS,
+ &under_4way);
+
+ if (wifi_connected) {
+ if (wifi_busy != pre_wifi_busy) {
+ pre_wifi_busy = wifi_busy;
+ return true;
+ }
+ if (under_4way != pre_under_4way) {
+ pre_under_4way = under_4way;
+ return true;
+ }
+ if (bt_hs_on != pre_bt_hs_on) {
+ pre_bt_hs_on = bt_hs_on;
+ return true;
+ }
+ }
+
+ return false;
+}
+
static void btc8192e2ant_update_bt_link_info(struct btc_coexist *btcoexist)
{
struct btc_bt_link_info *bt_link_info = &btcoexist->bt_link_info;
@@ -2886,9 +2919,8 @@
RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = %d/ %d",
"0x774(lp rx[31:16]/tx[15:0])",
coex_sta->low_priority_rx, coex_sta->low_priority_tx);
-#if (BT_AUTO_REPORT_ONLY_8192E_2ANT == 1)
- btc8192e2ant_monitor_bt_ctr(btcoexist);
-#endif
+ if (btcoexist->auto_report_2ant)
+ btc8192e2ant_monitor_bt_ctr(btcoexist);
btcoexist->btc_disp_dbg_msg(btcoexist, BTC_DBG_DISP_COEX_STATISTICS);
}
@@ -3078,14 +3110,12 @@
*/
}
-#if (BT_AUTO_REPORT_ONLY_8192E_2ANT == 0)
- if ((coex_sta->bt_info_ext & BIT4)) {
- /* BT auto report already enabled, do nothing */
- } else {
- btc8192e2ant_bt_auto_report(btcoexist, FORCE_EXEC,
- true);
+ if (!btcoexist->auto_report_2ant) {
+ if (!(coex_sta->bt_info_ext & BIT4))
+ btc8192e2ant_bt_auto_report(btcoexist,
+ FORCE_EXEC,
+ true);
}
-#endif
}
/* check BIT2 first ==> check if bt is under inquiry or page scan */
@@ -3207,13 +3237,13 @@
"************************************************\n");
}
-#if (BT_AUTO_REPORT_ONLY_8192E_2ANT == 0)
- btc8192e2ant_query_bt_info(btcoexist);
- btc8192e2ant_monitor_bt_ctr(btcoexist);
- btc8192e2ant_monitor_bt_enable_disable(btcoexist);
-#else
- if (btc8192e2ant_is_wifi_status_changed(btcoexist) ||
- coex_dm->auto_tdma_adjust)
- btc8192e2ant_run_coexist_mechanism(btcoexist);
-#endif
+ if (!btcoexist->auto_report_2ant) {
+ btc8192e2ant_query_bt_info(btcoexist);
+ btc8192e2ant_monitor_bt_ctr(btcoexist);
+ btc8192e2ant_monitor_bt_enable_disable(btcoexist);
+ } else {
+ if (btc8192e2ant_is_wifi_status_changed(btcoexist) ||
+ coex_dm->auto_tdma_adjust)
+ btc8192e2ant_run_coexist_mechanism(btcoexist);
+ }
}
diff --git a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8192e2ant.h b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8192e2ant.h
index fc0fa87..a57d694 100644
--- a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8192e2ant.h
+++ b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8192e2ant.h
@@ -25,8 +25,6 @@
/*****************************************************************
* The following is for 8192E 2Ant BT Co-exist definition
*****************************************************************/
-#define BT_AUTO_REPORT_ONLY_8192E_2ANT 0
-
#define BT_INFO_8192E_2ANT_B_FTP BIT7
#define BT_INFO_8192E_2ANT_B_A2DP BIT6
#define BT_INFO_8192E_2ANT_B_HID BIT5
diff --git a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8723b1ant.c b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8723b1ant.c
index 2003c8c..a0f3a18 100644
--- a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8723b1ant.c
+++ b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8723b1ant.c
@@ -210,11 +210,24 @@
btcoexist->btc_set(btcoexist, BTC_SET_ACT_AGGREGATE_CTRL, NULL);
}
+static void halbtc8723b1ant_query_bt_info(struct btc_coexist *btcoexist)
+{
+ u8 h2c_parameter[1] = {0};
+
+ coex_sta->c2h_bt_info_req_sent = true;
+
+ /* trigger */
+ h2c_parameter[0] |= BIT(0);
+
+ btcoexist->btc_fill_h2c(btcoexist, 0x61, 1, h2c_parameter);
+}
+
static void halbtc8723b1ant_monitor_bt_ctr(struct btc_coexist *btcoexist)
{
u32 reg_hp_txrx, reg_lp_txrx, u32tmp;
u32 reg_hp_tx = 0, reg_hp_rx = 0;
u32 reg_lp_tx = 0, reg_lp_rx = 0;
+ static u32 num_of_bt_counter_chk;
reg_hp_txrx = 0x770;
reg_lp_txrx = 0x774;
@@ -232,25 +245,122 @@
coex_sta->low_priority_tx = reg_lp_tx;
coex_sta->low_priority_rx = reg_lp_rx;
+ if ((coex_sta->low_priority_tx > 1050) &&
+ (!coex_sta->c2h_bt_inquiry_page))
+ coex_sta->pop_event_cnt++;
+
/* reset counter */
btcoexist->btc_write_1byte(btcoexist, 0x76e, 0xc);
+
+ /* This part is for wifi FW and driver to update BT's status as
+ * disabled.
+ *
+ * The flow is as the following
+ * 1. disable BT
+ * 2. if all BT Tx/Rx counter = 0, after 6 sec we query bt info
+ * 3. Because BT will not rsp from mailbox, so wifi fw will know BT is
+ * disabled
+ *
+ * 4. FW will rsp c2h for BT that driver will know BT is disabled.
+ */
+ if ((reg_hp_tx == 0) && (reg_hp_rx == 0) && (reg_lp_tx == 0) &&
+ (reg_lp_rx == 0)) {
+ num_of_bt_counter_chk++;
+ if (num_of_bt_counter_chk == 3)
+ halbtc8723b1ant_query_bt_info(btcoexist);
+ } else {
+ num_of_bt_counter_chk = 0;
+ }
}
-static void halbtc8723b1ant_query_bt_info(struct btc_coexist *btcoexist)
+static void halbtc8723b1ant_monitor_wifi_ctr(struct btc_coexist *btcoexist)
{
- struct rtl_priv *rtlpriv = btcoexist->adapter;
- u8 h2c_parameter[1] = {0};
+ s32 wifi_rssi = 0;
+ bool wifi_busy = false, wifi_under_b_mode = false;
+ static u8 cck_lock_counter;
+ u32 total_cnt;
- coex_sta->c2h_bt_info_req_sent = true;
+ btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_BUSY, &wifi_busy);
+ btcoexist->btc_get(btcoexist, BTC_GET_S4_WIFI_RSSI, &wifi_rssi);
+ btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_UNDER_B_MODE,
+ &wifi_under_b_mode);
- /* trigger */
- h2c_parameter[0] |= BIT0;
+ if (coex_sta->under_ips) {
+ coex_sta->crc_ok_cck = 0;
+ coex_sta->crc_ok_11g = 0;
+ coex_sta->crc_ok_11n = 0;
+ coex_sta->crc_ok_11n_agg = 0;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], Query Bt Info, FW write 0x61 = 0x%x\n",
- h2c_parameter[0]);
+ coex_sta->crc_err_cck = 0;
+ coex_sta->crc_err_11g = 0;
+ coex_sta->crc_err_11n = 0;
+ coex_sta->crc_err_11n_agg = 0;
+ } else {
+ coex_sta->crc_ok_cck =
+ btcoexist->btc_read_4byte(btcoexist, 0xf88);
+ coex_sta->crc_ok_11g =
+ btcoexist->btc_read_2byte(btcoexist, 0xf94);
+ coex_sta->crc_ok_11n =
+ btcoexist->btc_read_2byte(btcoexist, 0xf90);
+ coex_sta->crc_ok_11n_agg =
+ btcoexist->btc_read_2byte(btcoexist, 0xfb8);
- btcoexist->btc_fill_h2c(btcoexist, 0x61, 1, h2c_parameter);
+ coex_sta->crc_err_cck =
+ btcoexist->btc_read_4byte(btcoexist, 0xf84);
+ coex_sta->crc_err_11g =
+ btcoexist->btc_read_2byte(btcoexist, 0xf96);
+ coex_sta->crc_err_11n =
+ btcoexist->btc_read_2byte(btcoexist, 0xf92);
+ coex_sta->crc_err_11n_agg =
+ btcoexist->btc_read_2byte(btcoexist, 0xfba);
+ }
+
+ /* reset counter */
+ btcoexist->btc_write_1byte_bitmask(btcoexist, 0xf16, 0x1, 0x1);
+ btcoexist->btc_write_1byte_bitmask(btcoexist, 0xf16, 0x1, 0x0);
+
+ if ((wifi_busy) && (wifi_rssi >= 30) && (!wifi_under_b_mode)) {
+ total_cnt = coex_sta->crc_ok_cck + coex_sta->crc_ok_11g +
+ coex_sta->crc_ok_11n + coex_sta->crc_ok_11n_agg;
+
+ if ((coex_dm->bt_status == BT_8723B_1ANT_BT_STATUS_ACL_BUSY) ||
+ (coex_dm->bt_status ==
+ BT_8723B_1ANT_BT_STATUS_ACL_SCO_BUSY) ||
+ (coex_dm->bt_status == BT_8723B_1ANT_BT_STATUS_SCO_BUSY)) {
+ if (coex_sta->crc_ok_cck >
+ (total_cnt - coex_sta->crc_ok_cck)) {
+ if (cck_lock_counter < 3)
+ cck_lock_counter++;
+ } else {
+ if (cck_lock_counter > 0)
+ cck_lock_counter--;
+ }
+
+ } else {
+ if (cck_lock_counter > 0)
+ cck_lock_counter--;
+ }
+ } else {
+ if (cck_lock_counter > 0)
+ cck_lock_counter--;
+ }
+
+ if (!coex_sta->pre_ccklock) {
+ if (cck_lock_counter >= 3)
+ coex_sta->cck_lock = true;
+ else
+ coex_sta->cck_lock = false;
+ } else {
+ if (cck_lock_counter == 0)
+ coex_sta->cck_lock = false;
+ else
+ coex_sta->cck_lock = true;
+ }
+
+ if (coex_sta->cck_lock)
+ coex_sta->cck_ever_lock = true;
+
+ coex_sta->pre_ccklock = coex_sta->cck_lock;
}
static bool btc8723b1ant_is_wifi_status_changed(struct btc_coexist *btcoexist)
@@ -297,6 +407,7 @@
bt_link_info->a2dp_exist = coex_sta->a2dp_exist;
bt_link_info->pan_exist = coex_sta->pan_exist;
bt_link_info->hid_exist = coex_sta->hid_exist;
+ bt_link_info->bt_hi_pri_link_exist = coex_sta->bt_hi_pri_link_exist;
/* work around for HS mode. */
if (bt_hs_on) {
@@ -333,6 +444,35 @@
bt_link_info->hid_only = false;
}
+static void halbtc8723b1ant_set_bt_auto_report(struct btc_coexist *btcoexist,
+ bool enable_auto_report)
+{
+ u8 h2c_parameter[1] = {0};
+
+ h2c_parameter[0] = 0;
+
+ if (enable_auto_report)
+ h2c_parameter[0] |= BIT(0);
+
+ btcoexist->btc_fill_h2c(btcoexist, 0x68, 1, h2c_parameter);
+}
+
+static void halbtc8723b1ant_bt_auto_report(struct btc_coexist *btcoexist,
+ bool force_exec,
+ bool enable_auto_report)
+{
+ coex_dm->cur_bt_auto_report = enable_auto_report;
+
+ if (!force_exec) {
+ if (coex_dm->pre_bt_auto_report == coex_dm->cur_bt_auto_report)
+ return;
+ }
+ halbtc8723b1ant_set_bt_auto_report(btcoexist,
+ coex_dm->cur_bt_auto_report);
+
+ coex_dm->pre_bt_auto_report = coex_dm->cur_bt_auto_report;
+}
+
static void btc8723b1ant_set_sw_pen_tx_rate_adapt(struct btc_coexist *btcoexist,
bool low_penalty_ra)
{
@@ -430,6 +570,8 @@
static void halbtc8723b1ant_coex_table_with_type(struct btc_coexist *btcoexist,
bool force_exec, u8 type)
{
+ coex_sta->coex_table_type = type;
+
switch (type) {
case 0:
halbtc8723b1ant_coex_table(btcoexist, force_exec, 0x55555555,
@@ -445,24 +587,68 @@
break;
case 3:
halbtc8723b1ant_coex_table(btcoexist, force_exec, 0x55555555,
- 0xaaaaaaaa, 0xffffff, 0x3);
+ 0x5a5a5a5a, 0xffffff, 0x3);
break;
case 4:
- halbtc8723b1ant_coex_table(btcoexist, force_exec, 0x55555555,
- 0x5aaa5aaa, 0xffffff, 0x3);
+ if ((coex_sta->cck_ever_lock) && (coex_sta->scan_ap_num <= 5))
+ halbtc8723b1ant_coex_table(btcoexist, force_exec,
+ 0x55555555, 0xaaaa5a5a,
+ 0xffffff, 0x3);
+ else
+ halbtc8723b1ant_coex_table(btcoexist, force_exec,
+ 0x55555555, 0x5a5a5a5a,
+ 0xffffff, 0x3);
break;
case 5:
- halbtc8723b1ant_coex_table(btcoexist, force_exec, 0x5a5a5a5a,
- 0xaaaa5a5a, 0xffffff, 0x3);
+ if ((coex_sta->cck_ever_lock) && (coex_sta->scan_ap_num <= 5))
+ halbtc8723b1ant_coex_table(btcoexist, force_exec,
+ 0x5a5a5a5a, 0x5aaa5a5a,
+ 0xffffff, 0x3);
+ else
+ halbtc8723b1ant_coex_table(btcoexist, force_exec,
+ 0x5a5a5a5a, 0x5aaa5a5a,
+ 0xffffff, 0x3);
break;
case 6:
halbtc8723b1ant_coex_table(btcoexist, force_exec, 0x55555555,
- 0xaaaa5a5a, 0xffffff, 0x3);
+ 0xaaaaaaaa, 0xffffff, 0x3);
break;
case 7:
halbtc8723b1ant_coex_table(btcoexist, force_exec, 0xaaaaaaaa,
0xaaaaaaaa, 0xffffff, 0x3);
break;
+ case 8:
+ halbtc8723b1ant_coex_table(btcoexist, force_exec, 0x55dd55dd,
+ 0x5ada5ada, 0xffffff, 0x3);
+ break;
+ case 9:
+ halbtc8723b1ant_coex_table(btcoexist, force_exec, 0x55dd55dd,
+ 0x5ada5ada, 0xffffff, 0x3);
+ break;
+ case 10:
+ halbtc8723b1ant_coex_table(btcoexist, force_exec, 0x55dd55dd,
+ 0x5ada5ada, 0xffffff, 0x3);
+ break;
+ case 11:
+ halbtc8723b1ant_coex_table(btcoexist, force_exec, 0x55dd55dd,
+ 0x5ada5ada, 0xffffff, 0x3);
+ break;
+ case 12:
+ halbtc8723b1ant_coex_table(btcoexist, force_exec, 0x55dd55dd,
+ 0x5ada5ada, 0xffffff, 0x3);
+ break;
+ case 13:
+ halbtc8723b1ant_coex_table(btcoexist, force_exec, 0x5fff5fff,
+ 0xaaaaaaaa, 0xffffff, 0x3);
+ break;
+ case 14:
+ halbtc8723b1ant_coex_table(btcoexist, force_exec, 0x5fff5fff,
+ 0x5ada5ada, 0xffffff, 0x3);
+ break;
+ case 15:
+ halbtc8723b1ant_coex_table(btcoexist, force_exec, 0x55dd55dd,
+ 0xaaaaaaaa, 0xffffff, 0x3);
+ break;
default:
break;
}
@@ -611,14 +797,18 @@
}
static void halbtc8723b1ant_set_ant_path(struct btc_coexist *btcoexist,
- u8 ant_pos_type, bool init_hw_cfg,
- bool wifi_off)
+ u8 ant_pos_type, bool force_exec,
+ bool init_hw_cfg, bool wifi_off)
{
+ struct rtl_priv *rtlpriv = btcoexist->adapter;
struct btc_board_info *board_info = &btcoexist->board_info;
- u32 fw_ver = 0, u32tmp = 0;
+ u32 fw_ver = 0, u32tmp = 0, cnt_bt_cal_chk = 0;
bool pg_ext_switch = false;
bool use_ext_switch = false;
- u8 h2c_parameter[2] = {0};
+ bool is_in_mp_mode = false;
+ u8 h2c_parameter[2] = {0}, u8tmp = 0;
+
+ coex_dm->cur_ant_pos_type = ant_pos_type;
btcoexist->btc_get(btcoexist, BTC_GET_BL_EXT_SWITCH, &pg_ext_switch);
/* [31:16] = fw ver, [15:0] = fw sub ver */
@@ -628,24 +818,103 @@
use_ext_switch = true;
if (init_hw_cfg) {
- /*BT select s0/s1 is controlled by WiFi */
- btcoexist->btc_write_1byte_bitmask(btcoexist, 0x67, 0x20, 0x1);
+ /* WiFi TRx Mask on */
+ btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x1, 0xfffff,
+ 0x780);
+ /* remove due to interrupt is disabled that polling c2h will
+ * fail and delay 100ms.
+ */
- /*Force GNT_BT to Normal */
- btcoexist->btc_write_1byte_bitmask(btcoexist, 0x765, 0x18, 0x0);
- } else if (wifi_off) {
- /*Force GNT_BT to High */
- btcoexist->btc_write_1byte_bitmask(btcoexist, 0x765, 0x18, 0x3);
- /*BT select s0/s1 is controlled by BT */
+ if (fw_ver >= 0x180000) {
+ /* Use H2C to set GNT_BT to HIGH */
+ h2c_parameter[0] = 1;
+ btcoexist->btc_fill_h2c(btcoexist, 0x6E, 1,
+ h2c_parameter);
+ } else {
+ /* set grant_bt to high */
+ btcoexist->btc_write_1byte(btcoexist, 0x765, 0x18);
+ }
+ /* set wlan_act control by PTA */
+ btcoexist->btc_write_1byte(btcoexist, 0x76e, 0x4);
+
+ /* BT select s0/s1 is controlled by BT */
btcoexist->btc_write_1byte_bitmask(btcoexist, 0x67, 0x20, 0x0);
+ btcoexist->btc_write_1byte_bitmask(btcoexist, 0x39, 0x8, 0x1);
+ btcoexist->btc_write_1byte(btcoexist, 0x974, 0xff);
+ btcoexist->btc_write_1byte_bitmask(btcoexist, 0x944, 0x3, 0x3);
+ btcoexist->btc_write_1byte(btcoexist, 0x930, 0x77);
+ } else if (wifi_off) {
+ if (fw_ver >= 0x180000) {
+ /* Use H2C to set GNT_BT to HIGH */
+ h2c_parameter[0] = 1;
+ btcoexist->btc_fill_h2c(btcoexist, 0x6E, 1,
+ h2c_parameter);
+ } else {
+ /* set grant_bt to high */
+ btcoexist->btc_write_1byte(btcoexist, 0x765, 0x18);
+ }
+ /* set wlan_act to always low */
+ btcoexist->btc_write_1byte(btcoexist, 0x76e, 0x4);
- /* 0x4c[24:23] = 00, Set Antenna control by BT_RFE_CTRL
- * BT Vendor 0xac = 0xf002
+ btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_IS_IN_MP_MODE,
+ &is_in_mp_mode);
+ if (!is_in_mp_mode)
+ /* BT select s0/s1 is controlled by BT */
+ btcoexist->btc_write_1byte_bitmask(btcoexist, 0x67,
+ 0x20, 0x0);
+ else
+ /* BT select s0/s1 is controlled by WiFi */
+ btcoexist->btc_write_1byte_bitmask(btcoexist, 0x67,
+ 0x20, 0x1);
+
+ /* 0x4c[24:23]=00, Set Antenna control by BT_RFE_CTRL
+ * BT Vendor 0xac=0xf002
*/
u32tmp = btcoexist->btc_read_4byte(btcoexist, 0x4c);
u32tmp &= ~BIT23;
u32tmp &= ~BIT24;
btcoexist->btc_write_4byte(btcoexist, 0x4c, u32tmp);
+ } else {
+ /* Use H2C to set GNT_BT to LOW */
+ if (fw_ver >= 0x180000) {
+ if (btcoexist->btc_read_1byte(btcoexist, 0x765) != 0) {
+ h2c_parameter[0] = 0;
+ btcoexist->btc_fill_h2c(btcoexist, 0x6E, 1,
+ h2c_parameter);
+ }
+ } else {
+ /* BT calibration check */
+ while (cnt_bt_cal_chk <= 20) {
+ u8tmp = btcoexist->btc_read_1byte(btcoexist,
+ 0x49d);
+ cnt_bt_cal_chk++;
+ if (u8tmp & BIT(0)) {
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST,
+ DBG_LOUD,
+ "[BTCoex], ########### BT is calibrating (wait cnt=%d) ###########\n",
+ cnt_bt_cal_chk);
+ mdelay(50);
+ } else {
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST,
+ DBG_LOUD,
+ "[BTCoex], ********** BT is NOT calibrating (wait cnt=%d)**********\n",
+ cnt_bt_cal_chk);
+ break;
+ }
+ }
+
+ /* set grant_bt to PTA */
+ btcoexist->btc_write_1byte(btcoexist, 0x765, 0x0);
+ }
+
+ if (btcoexist->btc_read_1byte(btcoexist, 0x76e) != 0xc) {
+ /* set wlan_act control by PTA */
+ btcoexist->btc_write_1byte(btcoexist, 0x76e, 0xc);
+ }
+
+ btcoexist->btc_write_1byte_bitmask(
+ btcoexist, 0x67, 0x20,
+ 0x1); /* BT select s0/s1 is controlled by WiFi */
}
if (use_ext_switch) {
@@ -658,216 +927,278 @@
u32tmp |= BIT24;
btcoexist->btc_write_4byte(btcoexist, 0x4c, u32tmp);
+ /* fixed internal switch S1->WiFi, S0->BT */
+ btcoexist->btc_write_4byte(btcoexist, 0x948, 0x0);
+
if (board_info->btdm_ant_pos ==
BTC_ANTENNA_AT_MAIN_PORT) {
- /* Main Ant to BT for IPS case 0x4c[23] = 1 */
- btcoexist->btc_write_1byte_bitmask(btcoexist,
- 0x64, 0x1,
- 0x1);
-
/* tell firmware "no antenna inverse" */
h2c_parameter[0] = 0;
- h2c_parameter[1] = 1; /*ext switch type*/
+ /* ext switch type */
+ h2c_parameter[1] = 1;
btcoexist->btc_fill_h2c(btcoexist, 0x65, 2,
h2c_parameter);
} else {
- /* Aux Ant to BT for IPS case 0x4c[23] = 1 */
- btcoexist->btc_write_1byte_bitmask(btcoexist,
- 0x64, 0x1,
- 0x0);
-
/* tell firmware "antenna inverse" */
h2c_parameter[0] = 1;
- h2c_parameter[1] = 1; /* ext switch type */
+ /* ext switch type */
+ h2c_parameter[1] = 1;
btcoexist->btc_fill_h2c(btcoexist, 0x65, 2,
h2c_parameter);
}
}
- /* fixed internal switch first
- * fixed internal switch S1->WiFi, S0->BT
- */
- if (board_info->btdm_ant_pos == BTC_ANTENNA_AT_MAIN_PORT)
- btcoexist->btc_write_2byte(btcoexist, 0x948, 0x0);
- else /* fixed internal switch S0->WiFi, S1->BT */
- btcoexist->btc_write_2byte(btcoexist, 0x948, 0x280);
-
- /* ext switch setting */
- switch (ant_pos_type) {
- case BTC_ANT_PATH_WIFI:
- if (board_info->btdm_ant_pos ==
- BTC_ANTENNA_AT_MAIN_PORT)
- btcoexist->btc_write_1byte_bitmask(btcoexist,
- 0x92c, 0x3,
- 0x1);
- else
- btcoexist->btc_write_1byte_bitmask(btcoexist,
- 0x92c, 0x3,
- 0x2);
- break;
- case BTC_ANT_PATH_BT:
- if (board_info->btdm_ant_pos ==
- BTC_ANTENNA_AT_MAIN_PORT)
- btcoexist->btc_write_1byte_bitmask(btcoexist,
- 0x92c, 0x3,
- 0x2);
- else
- btcoexist->btc_write_1byte_bitmask(btcoexist,
- 0x92c, 0x3,
- 0x1);
- break;
- default:
- case BTC_ANT_PATH_PTA:
- if (board_info->btdm_ant_pos ==
- BTC_ANTENNA_AT_MAIN_PORT)
- btcoexist->btc_write_1byte_bitmask(btcoexist,
- 0x92c, 0x3,
- 0x1);
- else
- btcoexist->btc_write_1byte_bitmask(btcoexist,
- 0x92c, 0x3,
- 0x2);
- break;
+ if (force_exec ||
+ (coex_dm->cur_ant_pos_type != coex_dm->pre_ant_pos_type)) {
+ /* ext switch setting */
+ switch (ant_pos_type) {
+ case BTC_ANT_PATH_WIFI:
+ if (board_info->btdm_ant_pos ==
+ BTC_ANTENNA_AT_MAIN_PORT)
+ btcoexist->btc_write_1byte_bitmask(
+ btcoexist, 0x92c, 0x3, 0x1);
+ else
+ btcoexist->btc_write_1byte_bitmask(
+ btcoexist, 0x92c, 0x3, 0x2);
+ break;
+ case BTC_ANT_PATH_BT:
+ if (board_info->btdm_ant_pos ==
+ BTC_ANTENNA_AT_MAIN_PORT)
+ btcoexist->btc_write_1byte_bitmask(
+ btcoexist, 0x92c, 0x3, 0x2);
+ else
+ btcoexist->btc_write_1byte_bitmask(
+ btcoexist, 0x92c, 0x3, 0x1);
+ break;
+ default:
+ case BTC_ANT_PATH_PTA:
+ if (board_info->btdm_ant_pos ==
+ BTC_ANTENNA_AT_MAIN_PORT)
+ btcoexist->btc_write_1byte_bitmask(
+ btcoexist, 0x92c, 0x3, 0x1);
+ else
+ btcoexist->btc_write_1byte_bitmask(
+ btcoexist, 0x92c, 0x3, 0x2);
+ break;
+ }
}
-
} else {
if (init_hw_cfg) {
- /* 0x4c[23] = 1, 0x4c[24] = 0 Antenna control by 0x64 */
+ /* 0x4c[23] = 1, 0x4c[24] = 0,
+ * Antenna control by 0x64
+ */
u32tmp = btcoexist->btc_read_4byte(btcoexist, 0x4c);
u32tmp |= BIT23;
u32tmp &= ~BIT24;
btcoexist->btc_write_4byte(btcoexist, 0x4c, u32tmp);
+ /* Fix Ext switch Main->S1, Aux->S0 */
+ btcoexist->btc_write_1byte_bitmask(btcoexist, 0x64, 0x1,
+ 0x0);
+
if (board_info->btdm_ant_pos ==
BTC_ANTENNA_AT_MAIN_PORT) {
- /* Main Ant to WiFi for IPS case 0x4c[23] = 1 */
- btcoexist->btc_write_1byte_bitmask(btcoexist,
- 0x64, 0x1,
- 0x0);
-
/* tell firmware "no antenna inverse" */
h2c_parameter[0] = 0;
- h2c_parameter[1] = 0; /* internal switch type */
+ /* internal switch type */
+ h2c_parameter[1] = 0;
btcoexist->btc_fill_h2c(btcoexist, 0x65, 2,
h2c_parameter);
} else {
- /* Aux Ant to BT for IPS case 0x4c[23] = 1 */
- btcoexist->btc_write_1byte_bitmask(btcoexist,
- 0x64, 0x1,
- 0x1);
-
/* tell firmware "antenna inverse" */
h2c_parameter[0] = 1;
- h2c_parameter[1] = 0; /* internal switch type */
+ /* internal switch type */
+ h2c_parameter[1] = 0;
btcoexist->btc_fill_h2c(btcoexist, 0x65, 2,
h2c_parameter);
}
}
- /* fixed external switch first
- * Main->WiFi, Aux->BT
- */
- if (board_info->btdm_ant_pos ==
- BTC_ANTENNA_AT_MAIN_PORT)
- btcoexist->btc_write_1byte_bitmask(btcoexist, 0x92c,
- 0x3, 0x1);
- else /* Main->BT, Aux->WiFi */
- btcoexist->btc_write_1byte_bitmask(btcoexist, 0x92c,
- 0x3, 0x2);
-
- /* internal switch setting */
- switch (ant_pos_type) {
- case BTC_ANT_PATH_WIFI:
- if (board_info->btdm_ant_pos ==
- BTC_ANTENNA_AT_MAIN_PORT)
- btcoexist->btc_write_2byte(btcoexist, 0x948,
- 0x0);
- else
- btcoexist->btc_write_2byte(btcoexist, 0x948,
- 0x280);
- break;
- case BTC_ANT_PATH_BT:
- if (board_info->btdm_ant_pos ==
- BTC_ANTENNA_AT_MAIN_PORT)
- btcoexist->btc_write_2byte(btcoexist, 0x948,
- 0x280);
- else
- btcoexist->btc_write_2byte(btcoexist, 0x948,
- 0x0);
- break;
- default:
- case BTC_ANT_PATH_PTA:
- if (board_info->btdm_ant_pos ==
- BTC_ANTENNA_AT_MAIN_PORT)
- btcoexist->btc_write_2byte(btcoexist, 0x948,
- 0x200);
- else
- btcoexist->btc_write_2byte(btcoexist, 0x948,
- 0x80);
- break;
+ if (force_exec ||
+ (coex_dm->cur_ant_pos_type != coex_dm->pre_ant_pos_type)) {
+ /* internal switch setting */
+ switch (ant_pos_type) {
+ case BTC_ANT_PATH_WIFI:
+ if (board_info->btdm_ant_pos ==
+ BTC_ANTENNA_AT_MAIN_PORT)
+ btcoexist->btc_write_4byte(btcoexist,
+ 0x948, 0x0);
+ else
+ btcoexist->btc_write_4byte(btcoexist,
+ 0x948, 0x280);
+ break;
+ case BTC_ANT_PATH_BT:
+ if (board_info->btdm_ant_pos ==
+ BTC_ANTENNA_AT_MAIN_PORT)
+ btcoexist->btc_write_4byte(btcoexist,
+ 0x948, 0x280);
+ else
+ btcoexist->btc_write_4byte(btcoexist,
+ 0x948, 0x0);
+ break;
+ default:
+ case BTC_ANT_PATH_PTA:
+ if (board_info->btdm_ant_pos ==
+ BTC_ANTENNA_AT_MAIN_PORT)
+ btcoexist->btc_write_4byte(btcoexist,
+ 0x948, 0x200);
+ else
+ btcoexist->btc_write_4byte(btcoexist,
+ 0x948, 0x80);
+ break;
+ }
}
}
+
+ coex_dm->pre_ant_pos_type = coex_dm->cur_ant_pos_type;
}
static void halbtc8723b1ant_ps_tdma(struct btc_coexist *btcoexist,
bool force_exec, bool turn_on, u8 type)
{
- struct rtl_priv *rtlpriv = btcoexist->adapter;
+ struct btc_bt_link_info *bt_link_info = &btcoexist->bt_link_info;
bool wifi_busy = false;
u8 rssi_adjust_val = 0;
+ u8 ps_tdma_byte0_val = 0x51;
+ u8 ps_tdma_byte3_val = 0x10;
+ u8 ps_tdma_byte4_val = 0x50;
+ s8 wifi_duration_adjust = 0x0;
+ static bool pre_wifi_busy;
coex_dm->cur_ps_tdma_on = turn_on;
coex_dm->cur_ps_tdma = type;
btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_BUSY, &wifi_busy);
- if (!force_exec) {
- if (coex_dm->cur_ps_tdma_on)
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], ******** TDMA(on, %d) *********\n",
- coex_dm->cur_ps_tdma);
- else
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], ******** TDMA(off, %d) ********\n",
- coex_dm->cur_ps_tdma);
+ if (wifi_busy != pre_wifi_busy) {
+ force_exec = true;
+ pre_wifi_busy = wifi_busy;
+ }
+ if (!force_exec) {
if ((coex_dm->pre_ps_tdma_on == coex_dm->cur_ps_tdma_on) &&
(coex_dm->pre_ps_tdma == coex_dm->cur_ps_tdma))
return;
}
+
+ if (coex_sta->scan_ap_num <= 5) {
+ wifi_duration_adjust = 5;
+
+ if (coex_sta->a2dp_bit_pool >= 35)
+ wifi_duration_adjust = -10;
+ else if (coex_sta->a2dp_bit_pool >= 45)
+ wifi_duration_adjust = -15;
+ } else if (coex_sta->scan_ap_num >= 40) {
+ wifi_duration_adjust = -15;
+
+ if (coex_sta->a2dp_bit_pool < 35)
+ wifi_duration_adjust = -5;
+ else if (coex_sta->a2dp_bit_pool < 45)
+ wifi_duration_adjust = -10;
+ } else if (coex_sta->scan_ap_num >= 20) {
+ wifi_duration_adjust = -10;
+
+ if (coex_sta->a2dp_bit_pool >= 45)
+ wifi_duration_adjust = -15;
+ } else {
+ wifi_duration_adjust = 0;
+
+ if (coex_sta->a2dp_bit_pool >= 35)
+ wifi_duration_adjust = -10;
+ else if (coex_sta->a2dp_bit_pool >= 45)
+ wifi_duration_adjust = -15;
+ }
+
+ if ((type == 1) || (type == 2) || (type == 9) || (type == 11) ||
+ (type == 101) || (type == 102) || (type == 109) || (type == 101)) {
+ if (!coex_sta->force_lps_on) {
+ /* Native power save TDMA, only for A2DP-only case
+ * 1/2/9/11 while wifi noisy threshold > 30
+ */
+
+ /* no null-pkt */
+ ps_tdma_byte0_val = 0x61;
+ /* no tx-pause at BT-slot */
+ ps_tdma_byte3_val = 0x11;
+ /* 0x778 = d/1 toggle, no dynamic slot */
+ ps_tdma_byte4_val = 0x10;
+ } else {
+ /* null-pkt */
+ ps_tdma_byte0_val = 0x51;
+ /* tx-pause at BT-slot */
+ ps_tdma_byte3_val = 0x10;
+ /* 0x778 = d/1 toggle, dynamic slot */
+ ps_tdma_byte4_val = 0x50;
+ }
+ } else if ((type == 3) || (type == 13) || (type == 14) ||
+ (type == 103) || (type == 113) || (type == 114)) {
+ /* null-pkt */
+ ps_tdma_byte0_val = 0x51;
+ /* tx-pause at BT-slot */
+ ps_tdma_byte3_val = 0x10;
+ /* 0x778 = d/1 toggle, no dynamic slot */
+ ps_tdma_byte4_val = 0x10;
+ } else { /* native power save case */
+ /* no null-pkt */
+ ps_tdma_byte0_val = 0x61;
+ /* no tx-pause at BT-slot */
+ ps_tdma_byte3_val = 0x11;
+ /* 0x778 = d/1 toggle, no dynamic slot */
+ ps_tdma_byte4_val = 0x11;
+ /* psTdmaByte4Va is not define for 0x778 = d/1, 1/1 case */
+ }
+
+ /* if (bt_link_info->slave_role) */
+ if ((bt_link_info->slave_role) && (bt_link_info->a2dp_exist))
+ /* 0x778 = 0x1 at wifi slot (no blocking BT Low-Pri pkts) */
+ ps_tdma_byte4_val = ps_tdma_byte4_val | 0x1;
+
+ if (type > 100) {
+ /* set antenna control by SW */
+ ps_tdma_byte0_val = ps_tdma_byte0_val | 0x82;
+ /* set antenna no toggle, control by antenna diversity */
+ ps_tdma_byte3_val = ps_tdma_byte3_val | 0x60;
+ }
+
if (turn_on) {
switch (type) {
default:
halbtc8723b1ant_set_fw_ps_tdma(btcoexist, 0x51, 0x1a,
- 0x1a, 0x0, 0x50);
+ 0x1a, 0x0,
+ ps_tdma_byte4_val);
break;
case 1:
- halbtc8723b1ant_set_fw_ps_tdma(btcoexist, 0x51, 0x3a,
- 0x03, 0x10, 0x50);
+ halbtc8723b1ant_set_fw_ps_tdma(
+ btcoexist, ps_tdma_byte0_val,
+ 0x3a + wifi_duration_adjust, 0x03,
+ ps_tdma_byte3_val, ps_tdma_byte4_val);
rssi_adjust_val = 11;
break;
case 2:
- halbtc8723b1ant_set_fw_ps_tdma(btcoexist, 0x51, 0x2b,
- 0x03, 0x10, 0x50);
- rssi_adjust_val = 14;
+ halbtc8723b1ant_set_fw_ps_tdma(
+ btcoexist, ps_tdma_byte0_val,
+ 0x2d + wifi_duration_adjust, 0x03,
+ ps_tdma_byte3_val, ps_tdma_byte4_val);
break;
case 3:
- halbtc8723b1ant_set_fw_ps_tdma(btcoexist, 0x51, 0x1d,
- 0x1d, 0x0, 0x52);
+ halbtc8723b1ant_set_fw_ps_tdma(
+ btcoexist, ps_tdma_byte0_val, 0x30, 0x03,
+ ps_tdma_byte3_val, ps_tdma_byte4_val);
break;
case 4:
halbtc8723b1ant_set_fw_ps_tdma(btcoexist, 0x93, 0x15,
- 0x3, 0x14, 0x0);
- rssi_adjust_val = 17;
+ 0x3, 0x14, 0x0);
break;
case 5:
- halbtc8723b1ant_set_fw_ps_tdma(btcoexist, 0x61, 0x15,
- 0x3, 0x11, 0x10);
+ halbtc8723b1ant_set_fw_ps_tdma(
+ btcoexist, ps_tdma_byte0_val, 0x1f, 0x3,
+ ps_tdma_byte3_val, 0x11);
break;
case 6:
- halbtc8723b1ant_set_fw_ps_tdma(btcoexist, 0x61, 0x20,
- 0x3, 0x11, 0x13);
+ halbtc8723b1ant_set_fw_ps_tdma(
+ btcoexist, ps_tdma_byte0_val, 0x20, 0x3,
+ ps_tdma_byte3_val, 0x11);
break;
case 7:
halbtc8723b1ant_set_fw_ps_tdma(btcoexist, 0x13, 0xc,
@@ -875,33 +1206,44 @@
break;
case 8:
halbtc8723b1ant_set_fw_ps_tdma(btcoexist, 0x93, 0x25,
- 0x3, 0x10, 0x0);
+ 0x3, 0x10, 0x0);
break;
case 9:
- halbtc8723b1ant_set_fw_ps_tdma(btcoexist, 0x51, 0x21,
- 0x3, 0x10, 0x50);
- rssi_adjust_val = 18;
+ halbtc8723b1ant_set_fw_ps_tdma(
+ btcoexist, ps_tdma_byte0_val, 0x21, 0x3,
+ ps_tdma_byte3_val, ps_tdma_byte4_val);
break;
case 10:
halbtc8723b1ant_set_fw_ps_tdma(btcoexist, 0x13, 0xa,
0xa, 0x0, 0x40);
break;
case 11:
- halbtc8723b1ant_set_fw_ps_tdma(btcoexist, 0x51, 0x15,
- 0x03, 0x10, 0x50);
- rssi_adjust_val = 20;
+ halbtc8723b1ant_set_fw_ps_tdma(
+ btcoexist, ps_tdma_byte0_val, 0x21, 0x03,
+ ps_tdma_byte3_val, ps_tdma_byte4_val);
break;
case 12:
halbtc8723b1ant_set_fw_ps_tdma(btcoexist, 0x51, 0x0a,
- 0x0a, 0x0, 0x50);
+ 0x0a, 0x0, 0x50);
break;
case 13:
- halbtc8723b1ant_set_fw_ps_tdma(btcoexist, 0x51, 0x15,
- 0x15, 0x0, 0x50);
+ if (coex_sta->scan_ap_num <= 3)
+ halbtc8723b1ant_set_fw_ps_tdma(
+ btcoexist, ps_tdma_byte0_val, 0x40, 0x3,
+ ps_tdma_byte3_val, ps_tdma_byte4_val);
+ else
+ halbtc8723b1ant_set_fw_ps_tdma(
+ btcoexist, ps_tdma_byte0_val, 0x21, 0x3,
+ ps_tdma_byte3_val, ps_tdma_byte4_val);
break;
case 14:
- halbtc8723b1ant_set_fw_ps_tdma(btcoexist, 0x51, 0x21,
- 0x3, 0x10, 0x52);
+ if (coex_sta->scan_ap_num <= 3)
+ halbtc8723b1ant_set_fw_ps_tdma(
+ btcoexist, 0x51, 0x30, 0x3, 0x10, 0x50);
+ else
+ halbtc8723b1ant_set_fw_ps_tdma(
+ btcoexist, ps_tdma_byte0_val, 0x21, 0x3,
+ ps_tdma_byte3_val, ps_tdma_byte4_val);
break;
case 15:
halbtc8723b1ant_set_fw_ps_tdma(btcoexist, 0x13, 0xa,
@@ -909,103 +1251,173 @@
break;
case 16:
halbtc8723b1ant_set_fw_ps_tdma(btcoexist, 0x93, 0x15,
- 0x3, 0x10, 0x0);
- rssi_adjust_val = 18;
+ 0x3, 0x10, 0x0);
break;
case 18:
halbtc8723b1ant_set_fw_ps_tdma(btcoexist, 0x93, 0x25,
- 0x3, 0x10, 0x0);
- rssi_adjust_val = 14;
+ 0x3, 0x10, 0x0);
break;
case 20:
- halbtc8723b1ant_set_fw_ps_tdma(btcoexist, 0x61, 0x35,
- 0x03, 0x11, 0x10);
+ halbtc8723b1ant_set_fw_ps_tdma(
+ btcoexist, ps_tdma_byte0_val, 0x3f, 0x03,
+ ps_tdma_byte3_val, 0x10);
break;
case 21:
halbtc8723b1ant_set_fw_ps_tdma(btcoexist, 0x61, 0x25,
- 0x03, 0x11, 0x11);
+ 0x03, 0x11, 0x11);
break;
case 22:
- halbtc8723b1ant_set_fw_ps_tdma(btcoexist, 0x61, 0x25,
- 0x03, 0x11, 0x10);
+ halbtc8723b1ant_set_fw_ps_tdma(
+ btcoexist, ps_tdma_byte0_val, 0x25, 0x03,
+ ps_tdma_byte3_val, 0x10);
break;
case 23:
halbtc8723b1ant_set_fw_ps_tdma(btcoexist, 0xe3, 0x25,
- 0x3, 0x31, 0x18);
- rssi_adjust_val = 22;
+ 0x3, 0x31, 0x18);
break;
case 24:
halbtc8723b1ant_set_fw_ps_tdma(btcoexist, 0xe3, 0x15,
- 0x3, 0x31, 0x18);
- rssi_adjust_val = 22;
+ 0x3, 0x31, 0x18);
break;
case 25:
halbtc8723b1ant_set_fw_ps_tdma(btcoexist, 0xe3, 0xa,
0x3, 0x31, 0x18);
- rssi_adjust_val = 22;
break;
case 26:
halbtc8723b1ant_set_fw_ps_tdma(btcoexist, 0xe3, 0xa,
0x3, 0x31, 0x18);
- rssi_adjust_val = 22;
break;
case 27:
halbtc8723b1ant_set_fw_ps_tdma(btcoexist, 0xe3, 0x25,
- 0x3, 0x31, 0x98);
- rssi_adjust_val = 22;
+ 0x3, 0x31, 0x98);
break;
case 28:
halbtc8723b1ant_set_fw_ps_tdma(btcoexist, 0x69, 0x25,
- 0x3, 0x31, 0x0);
+ 0x3, 0x31, 0x0);
break;
case 29:
halbtc8723b1ant_set_fw_ps_tdma(btcoexist, 0xab, 0x1a,
- 0x1a, 0x1, 0x10);
+ 0x1a, 0x1, 0x10);
break;
case 30:
- halbtc8723b1ant_set_fw_ps_tdma(btcoexist, 0x51, 0x14,
- 0x3, 0x10, 0x50);
+ halbtc8723b1ant_set_fw_ps_tdma(btcoexist, 0x51, 0x30,
+ 0x3, 0x10, 0x10);
break;
case 31:
halbtc8723b1ant_set_fw_ps_tdma(btcoexist, 0xd3, 0x1a,
- 0x1a, 0, 0x58);
+ 0x1a, 0, 0x58);
break;
case 32:
- halbtc8723b1ant_set_fw_ps_tdma(btcoexist, 0x61, 0xa,
- 0x3, 0x10, 0x0);
+ halbtc8723b1ant_set_fw_ps_tdma(
+ btcoexist, ps_tdma_byte0_val, 0x35, 0x3,
+ ps_tdma_byte3_val, ps_tdma_byte4_val);
break;
case 33:
- halbtc8723b1ant_set_fw_ps_tdma(btcoexist, 0xa3, 0x25,
- 0x3, 0x30, 0x90);
+ halbtc8723b1ant_set_fw_ps_tdma(
+ btcoexist, ps_tdma_byte0_val, 0x35, 0x3,
+ ps_tdma_byte3_val, 0x10);
break;
case 34:
halbtc8723b1ant_set_fw_ps_tdma(btcoexist, 0x53, 0x1a,
- 0x1a, 0x0, 0x10);
+ 0x1a, 0x0, 0x10);
break;
case 35:
halbtc8723b1ant_set_fw_ps_tdma(btcoexist, 0x63, 0x1a,
- 0x1a, 0x0, 0x10);
+ 0x1a, 0x0, 0x10);
break;
case 36:
halbtc8723b1ant_set_fw_ps_tdma(btcoexist, 0xd3, 0x12,
- 0x3, 0x14, 0x50);
+ 0x3, 0x14, 0x50);
break;
- /* SoftAP only with no sta associated, BT disable,
- * TDMA mode for power saving
- * here softap mode screen off will cost 70-80mA for phone
- */
case 40:
+ /* SoftAP only with no sta associated,BT disable ,TDMA
+ * mode for power saving
+ *
+ * here softap mode screen off will cost 70-80mA for
+ * phone
+ */
halbtc8723b1ant_set_fw_ps_tdma(btcoexist, 0x23, 0x18,
- 0x00, 0x10, 0x24);
+ 0x00, 0x10, 0x24);
+ break;
+
+ case 101:
+ /* for 1-Ant translate to 2-Ant */
+ halbtc8723b1ant_set_fw_ps_tdma(
+ btcoexist, ps_tdma_byte0_val,
+ 0x3a + wifi_duration_adjust, 0x03,
+ ps_tdma_byte3_val, ps_tdma_byte4_val);
+ break;
+ case 102:
+ halbtc8723b1ant_set_fw_ps_tdma(
+ btcoexist, ps_tdma_byte0_val,
+ 0x2d + wifi_duration_adjust, 0x03,
+ ps_tdma_byte3_val, ps_tdma_byte4_val);
+ break;
+ case 103:
+ halbtc8723b1ant_set_fw_ps_tdma(
+ btcoexist, ps_tdma_byte0_val, 0x3a, 0x03,
+ ps_tdma_byte3_val, ps_tdma_byte4_val);
+ break;
+ case 105:
+ halbtc8723b1ant_set_fw_ps_tdma(
+ btcoexist, ps_tdma_byte0_val, 0x15, 0x3,
+ ps_tdma_byte3_val, 0x11);
+ break;
+ case 106:
+ halbtc8723b1ant_set_fw_ps_tdma(
+ btcoexist, ps_tdma_byte0_val, 0x20, 0x3,
+ ps_tdma_byte3_val, 0x11);
+ break;
+ case 109:
+ halbtc8723b1ant_set_fw_ps_tdma(
+ btcoexist, ps_tdma_byte0_val, 0x21, 0x3,
+ ps_tdma_byte3_val, ps_tdma_byte4_val);
+ break;
+ case 111:
+ halbtc8723b1ant_set_fw_ps_tdma(
+ btcoexist, ps_tdma_byte0_val, 0x21, 0x03,
+ ps_tdma_byte3_val, ps_tdma_byte4_val);
+ break;
+ case 113:
+ halbtc8723b1ant_set_fw_ps_tdma(
+ btcoexist, ps_tdma_byte0_val, 0x21, 0x3,
+ ps_tdma_byte3_val, ps_tdma_byte4_val);
+ break;
+ case 114:
+ halbtc8723b1ant_set_fw_ps_tdma(
+ btcoexist, ps_tdma_byte0_val, 0x21, 0x3,
+ ps_tdma_byte3_val, ps_tdma_byte4_val);
+ break;
+ case 120:
+ halbtc8723b1ant_set_fw_ps_tdma(
+ btcoexist, ps_tdma_byte0_val, 0x3f, 0x03,
+ ps_tdma_byte3_val, 0x10);
+ break;
+ case 122:
+ halbtc8723b1ant_set_fw_ps_tdma(
+ btcoexist, ps_tdma_byte0_val, 0x25, 0x03,
+ ps_tdma_byte3_val, 0x10);
+ break;
+ case 132:
+ halbtc8723b1ant_set_fw_ps_tdma(
+ btcoexist, ps_tdma_byte0_val, 0x25, 0x03,
+ ps_tdma_byte3_val, ps_tdma_byte4_val);
+ break;
+ case 133:
+ halbtc8723b1ant_set_fw_ps_tdma(
+ btcoexist, ps_tdma_byte0_val, 0x25, 0x03,
+ ps_tdma_byte3_val, 0x11);
break;
}
} else {
+ /* disable PS tdma */
switch (type) {
case 8: /* PTA Control */
halbtc8723b1ant_set_fw_ps_tdma(btcoexist, 0x8, 0x0,
0x0, 0x0, 0x0);
halbtc8723b1ant_set_ant_path(btcoexist,
BTC_ANT_PATH_PTA,
+ FORCE_EXEC,
false, false);
break;
case 0:
@@ -1013,17 +1425,10 @@
/* Software control, Antenna at BT side */
halbtc8723b1ant_set_fw_ps_tdma(btcoexist, 0x0, 0x0,
0x0, 0x0, 0x0);
- halbtc8723b1ant_set_ant_path(btcoexist,
- BTC_ANT_PATH_BT,
- false, false);
break;
- case 9:
- /* Software control, Antenna at WiFi side */
- halbtc8723b1ant_set_fw_ps_tdma(btcoexist, 0x0, 0x0,
- 0x0, 0x0, 0x0);
- halbtc8723b1ant_set_ant_path(btcoexist,
- BTC_ANT_PATH_WIFI,
- false, false);
+ case 1: /* 2-Ant, 0x778=3, antenna control by ant diversity */
+ halbtc8723b1ant_set_fw_ps_tdma(btcoexist, 0x0, 0x0, 0x0,
+ 0x48, 0x0);
break;
}
}
@@ -1037,8 +1442,191 @@
coex_dm->pre_ps_tdma = coex_dm->cur_ps_tdma;
}
+void btc8723b1ant_tdma_dur_adj_for_acl(struct btc_coexist *btcoexist,
+ u8 wifi_status)
+{
+ struct rtl_priv *rtlpriv = btcoexist->adapter;
+ static s32 up, dn, m, n, wait_count;
+ /* 0: no change, +1: increase WiFi duration,
+ * -1: decrease WiFi duration
+ */
+ s32 result;
+ u8 retry_count = 0, bt_info_ext;
+ bool wifi_busy = false;
+
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], TdmaDurationAdjustForAcl()\n");
+
+ if (wifi_status == BT_8723B_1ANT_WIFI_STATUS_CONNECTED_BUSY)
+ wifi_busy = true;
+ else
+ wifi_busy = false;
+
+ if ((wifi_status ==
+ BT_8723B_1ANT_WIFI_STATUS_NON_CONNECTED_ASSO_AUTH_SCAN) ||
+ (wifi_status == BT_8723B_1ANT_WIFI_STATUS_CONNECTED_SCAN) ||
+ (wifi_status == BT_8723B_1ANT_WIFI_STATUS_CONNECTED_SPECIAL_PKT)) {
+ if (coex_dm->cur_ps_tdma != 1 && coex_dm->cur_ps_tdma != 2 &&
+ coex_dm->cur_ps_tdma != 3 && coex_dm->cur_ps_tdma != 9) {
+ halbtc8723b1ant_ps_tdma(btcoexist, NORMAL_EXEC,
+ true, 9);
+ coex_dm->ps_tdma_du_adj_type = 9;
+
+ up = 0;
+ dn = 0;
+ m = 1;
+ n = 3;
+ result = 0;
+ wait_count = 0;
+ }
+ return;
+ }
+
+ if (!coex_dm->auto_tdma_adjust) {
+ coex_dm->auto_tdma_adjust = true;
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], first run TdmaDurationAdjust()!!\n");
+
+ halbtc8723b1ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 2);
+ coex_dm->ps_tdma_du_adj_type = 2;
+
+ up = 0;
+ dn = 0;
+ m = 1;
+ n = 3;
+ result = 0;
+ wait_count = 0;
+ } else {
+ /* acquire the BT TRx retry count from BT_Info byte2 */
+ retry_count = coex_sta->bt_retry_cnt;
+ bt_info_ext = coex_sta->bt_info_ext;
+
+ if ((coex_sta->low_priority_tx) > 1050 ||
+ (coex_sta->low_priority_rx) > 1250)
+ retry_count++;
+
+ result = 0;
+ wait_count++;
+ /* no retry in the last 2-second duration */
+ if (retry_count == 0) {
+ up++;
+ dn--;
+
+ if (dn <= 0)
+ dn = 0;
+
+ if (up >= n) {
+ /* if retry count during continuous n*2 seconds
+ * is 0, enlarge WiFi duration
+ */
+ wait_count = 0;
+ n = 3;
+ up = 0;
+ dn = 0;
+ result = 1;
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], Increase wifi duration!!\n");
+ }
+ } else if (retry_count <= 3) {
+ /* <=3 retry in the last 2-second duration */
+ up--;
+ dn++;
+
+ if (up <= 0)
+ up = 0;
+
+ if (dn == 2) {
+ /* if continuous 2 retry count(every 2 seconds)
+ * >0 and < 3, reduce WiFi duration
+ */
+ if (wait_count <= 2)
+ /* avoid loop between the two levels */
+ m++;
+ else
+ m = 1;
+
+ if (m >= 20)
+ /* maximum of m = 20 ' will recheck if
+ * need to adjust wifi duration in
+ * maximum time interval 120 seconds
+ */
+ m = 20;
+
+ n = 3 * m;
+ up = 0;
+ dn = 0;
+ wait_count = 0;
+ result = -1;
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], Decrease wifi duration for retryCounter<3!!\n");
+ }
+ } else {
+ /* retry count > 3, once retry count > 3, to reduce
+ * WiFi duration
+ */
+ if (wait_count == 1)
+ /* to avoid loop between the two levels */
+ m++;
+ else
+ m = 1;
+
+ if (m >= 20)
+ /* maximum of m = 20 ' will recheck if need to
+ * adjust wifi duration in maximum time interval
+ * 120 seconds
+ */
+ m = 20;
+
+ n = 3 * m;
+ up = 0;
+ dn = 0;
+ wait_count = 0;
+ result = -1;
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], Decrease wifi duration for retryCounter>3!!\n");
+ }
+
+ if (result == -1) {
+ if (coex_dm->cur_ps_tdma == 1) {
+ halbtc8723b1ant_ps_tdma(btcoexist, NORMAL_EXEC,
+ true, 2);
+ coex_dm->ps_tdma_du_adj_type = 2;
+ } else if (coex_dm->cur_ps_tdma == 2) {
+ halbtc8723b1ant_ps_tdma(btcoexist, NORMAL_EXEC,
+ true, 9);
+ coex_dm->ps_tdma_du_adj_type = 9;
+ } else if (coex_dm->cur_ps_tdma == 9) {
+ halbtc8723b1ant_ps_tdma(btcoexist, NORMAL_EXEC,
+ true, 11);
+ coex_dm->ps_tdma_du_adj_type = 11;
+ }
+ } else if (result == 1) {
+ if (coex_dm->cur_ps_tdma == 11) {
+ halbtc8723b1ant_ps_tdma(btcoexist, NORMAL_EXEC,
+ true, 9);
+ coex_dm->ps_tdma_du_adj_type = 9;
+ } else if (coex_dm->cur_ps_tdma == 9) {
+ halbtc8723b1ant_ps_tdma(btcoexist, NORMAL_EXEC,
+ true, 2);
+ coex_dm->ps_tdma_du_adj_type = 2;
+ } else if (coex_dm->cur_ps_tdma == 2) {
+ halbtc8723b1ant_ps_tdma(btcoexist, NORMAL_EXEC,
+ true, 1);
+ coex_dm->ps_tdma_du_adj_type = 1;
+ }
+ }
+
+ if (coex_dm->cur_ps_tdma != 1 && coex_dm->cur_ps_tdma != 2 &&
+ coex_dm->cur_ps_tdma != 9 && coex_dm->cur_ps_tdma != 11) {
+ /* recover to previous adjust type */
+ halbtc8723b1ant_ps_tdma(btcoexist, NORMAL_EXEC, true,
+ coex_dm->ps_tdma_du_adj_type);
+ }
+ }
+}
+
static void halbtc8723b1ant_ps_tdma_chk_pwr_save(struct btc_coexist *btcoexist,
- bool new_ps_state)
+ bool new_ps_state)
{
u8 lps_mode = 0x0;
@@ -1078,6 +1666,7 @@
btcoexist->btc_set(btcoexist, BTC_SET_ACT_DISABLE_LOW_POWER,
&low_pwr_disable);
btcoexist->btc_set(btcoexist, BTC_SET_ACT_NORMAL_LPS, NULL);
+ coex_sta->force_lps_on = false;
break;
case BTC_PS_LPS_ON:
halbtc8723b1ant_ps_tdma_chk_pwr_save(btcoexist, true);
@@ -1089,27 +1678,95 @@
&low_pwr_disable);
/* power save must executed before psTdma */
btcoexist->btc_set(btcoexist, BTC_SET_ACT_ENTER_LPS, NULL);
+ coex_sta->force_lps_on = true;
break;
case BTC_PS_LPS_OFF:
halbtc8723b1ant_ps_tdma_chk_pwr_save(btcoexist, false);
btcoexist->btc_set(btcoexist, BTC_SET_ACT_LEAVE_LPS, NULL);
+ coex_sta->force_lps_on = false;
break;
default:
break;
}
}
+static void halbtc8723b1ant_action_wifi_only(struct btc_coexist *btcoexist)
+{
+ halbtc8723b1ant_coex_table_with_type(btcoexist, FORCE_EXEC, 0);
+ halbtc8723b1ant_ps_tdma(btcoexist, FORCE_EXEC, false, 8);
+ halbtc8723b1ant_set_ant_path(btcoexist, BTC_ANT_PATH_PTA,
+ FORCE_EXEC, false, false);
+}
+
+/* check if BT is disabled */
+static void halbtc8723b1ant_monitor_bt_enable_disable(struct btc_coexist
+ *btcoexist)
+{
+ struct rtl_priv *rtlpriv = btcoexist->adapter;
+ static u32 bt_disable_cnt;
+ bool bt_active = true, bt_disabled = false;
+
+ if (coex_sta->high_priority_tx == 0 &&
+ coex_sta->high_priority_rx == 0 && coex_sta->low_priority_tx == 0 &&
+ coex_sta->low_priority_rx == 0)
+ bt_active = false;
+ if (coex_sta->high_priority_tx == 0xffff &&
+ coex_sta->high_priority_rx == 0xffff &&
+ coex_sta->low_priority_tx == 0xffff &&
+ coex_sta->low_priority_rx == 0xffff)
+ bt_active = false;
+ if (bt_active) {
+ bt_disable_cnt = 0;
+ bt_disabled = false;
+ } else {
+ bt_disable_cnt++;
+ if (bt_disable_cnt >= 2)
+ bt_disabled = true;
+ }
+ if (coex_sta->bt_disabled != bt_disabled) {
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], BT is from %s to %s!!\n",
+ (coex_sta->bt_disabled ? "disabled" : "enabled"),
+ (bt_disabled ? "disabled" : "enabled"));
+
+ coex_sta->bt_disabled = bt_disabled;
+ btcoexist->btc_set(btcoexist, BTC_SET_BL_BT_DISABLE,
+ &bt_disabled);
+ if (bt_disabled) {
+ halbtc8723b1ant_action_wifi_only(btcoexist);
+ btcoexist->btc_set(btcoexist, BTC_SET_ACT_LEAVE_LPS,
+ NULL);
+ btcoexist->btc_set(btcoexist, BTC_SET_ACT_NORMAL_LPS,
+ NULL);
+ }
+ }
+}
+
/*****************************************************
*
* Non-Software Coex Mechanism start
*
*****************************************************/
+
+static void halbtc8723b1ant_action_bt_whck_test(struct btc_coexist *btcoexist)
+{
+ halbtc8723b1ant_power_save_state(btcoexist, BTC_PS_WIFI_NATIVE, 0x0,
+ 0x0);
+
+ halbtc8723b1ant_ps_tdma(btcoexist, NORMAL_EXEC, false, 8);
+ halbtc8723b1ant_set_ant_path(btcoexist, BTC_ANT_PATH_PTA, NORMAL_EXEC,
+ false, false);
+ halbtc8723b1ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 0);
+}
+
static void halbtc8723b1ant_action_wifi_multiport(struct btc_coexist *btcoexist)
{
halbtc8723b1ant_power_save_state(btcoexist, BTC_PS_WIFI_NATIVE,
0x0, 0x0);
halbtc8723b1ant_ps_tdma(btcoexist, NORMAL_EXEC, false, 8);
+ halbtc8723b1ant_set_ant_path(btcoexist, BTC_ANT_PATH_PTA, NORMAL_EXEC,
+ false, false);
halbtc8723b1ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 2);
}
@@ -1123,35 +1780,56 @@
{
struct btc_bt_link_info *bt_link_info = &btcoexist->bt_link_info;
bool wifi_connected = false, ap_enable = false;
+ bool wifi_busy = false, bt_busy = false;
btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_AP_MODE_ENABLE,
&ap_enable);
btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_CONNECTED,
&wifi_connected);
+ btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_BUSY, &wifi_busy);
+ btcoexist->btc_set(btcoexist, BTC_SET_BL_BT_TRAFFIC_BUSY, &bt_busy);
- if (!wifi_connected) {
- halbtc8723b1ant_power_save_state(btcoexist,
- BTC_PS_WIFI_NATIVE, 0x0, 0x0);
- halbtc8723b1ant_ps_tdma(btcoexist, NORMAL_EXEC, false, 8);
- halbtc8723b1ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 2);
- } else if (bt_link_info->sco_exist || bt_link_info->hid_only) {
- /* SCO/HID-only busy */
+ if (coex_sta->bt_abnormal_scan) {
+ halbtc8723b1ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 33);
+ halbtc8723b1ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 7);
+ } else if (!wifi_connected && !coex_sta->wifi_is_high_pri_task) {
halbtc8723b1ant_power_save_state(btcoexist, BTC_PS_WIFI_NATIVE,
0x0, 0x0);
- halbtc8723b1ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 32);
- halbtc8723b1ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 1);
- } else {
- if (ap_enable)
- halbtc8723b1ant_power_save_state(btcoexist,
- BTC_PS_WIFI_NATIVE,
- 0x0, 0x0);
+ halbtc8723b1ant_ps_tdma(btcoexist, NORMAL_EXEC, false, 8);
+ halbtc8723b1ant_set_ant_path(btcoexist, BTC_ANT_PATH_PTA,
+ NORMAL_EXEC, false, false);
+ halbtc8723b1ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 0);
+ } else if (bt_link_info->sco_exist || bt_link_info->hid_exist ||
+ bt_link_info->a2dp_exist) {
+ /* SCO/HID/A2DP busy */
+ halbtc8723b1ant_power_save_state(btcoexist, BTC_PS_WIFI_NATIVE,
+ 0x0, 0x0);
+ if (coex_sta->c2h_bt_remote_name_req)
+ halbtc8723b1ant_ps_tdma(btcoexist, NORMAL_EXEC, true,
+ 33);
else
- halbtc8723b1ant_power_save_state(btcoexist,
- BTC_PS_LPS_ON,
- 0x50, 0x4);
+ halbtc8723b1ant_ps_tdma(btcoexist, NORMAL_EXEC, true,
+ 32);
- halbtc8723b1ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 30);
- halbtc8723b1ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 1);
+ halbtc8723b1ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 4);
+ } else if (bt_link_info->pan_exist || wifi_busy) {
+ halbtc8723b1ant_power_save_state(btcoexist, BTC_PS_WIFI_NATIVE,
+ 0x0, 0x0);
+ if (coex_sta->c2h_bt_remote_name_req)
+ halbtc8723b1ant_ps_tdma(btcoexist, NORMAL_EXEC, true,
+ 33);
+ else
+ halbtc8723b1ant_ps_tdma(btcoexist, NORMAL_EXEC, true,
+ 32);
+
+ halbtc8723b1ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 4);
+ } else {
+ halbtc8723b1ant_power_save_state(btcoexist, BTC_PS_WIFI_NATIVE,
+ 0x0, 0x0);
+ halbtc8723b1ant_ps_tdma(btcoexist, NORMAL_EXEC, false, 8);
+ halbtc8723b1ant_set_ant_path(btcoexist, BTC_ANT_PATH_PTA,
+ NORMAL_EXEC, false, false);
+ halbtc8723b1ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 7);
}
}
@@ -1167,7 +1845,7 @@
/* tdma and coex table */
if (bt_link_info->sco_exist) {
halbtc8723b1ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 5);
- halbtc8723b1ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 2);
+ halbtc8723b1ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 5);
} else {
/* HID */
halbtc8723b1ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 6);
@@ -1181,6 +1859,10 @@
{
struct btc_bt_link_info *bt_link_info = &btcoexist->bt_link_info;
+ if ((coex_sta->low_priority_rx >= 950) && (!coex_sta->under_ips))
+ bt_link_info->slave_role = true;
+ else
+ bt_link_info->slave_role = false;
if (bt_link_info->hid_only) { /* HID */
btc8723b1ant_act_bt_sco_hid_only_busy(btcoexist, wifi_status);
@@ -1189,39 +1871,40 @@
} else if (bt_link_info->a2dp_only) { /* A2DP */
if (wifi_status == BT_8723B_1ANT_WIFI_STATUS_CONNECTED_IDLE) {
halbtc8723b1ant_ps_tdma(btcoexist, NORMAL_EXEC,
- false, 8);
+ true, 32);
halbtc8723b1ant_coex_table_with_type(btcoexist,
- NORMAL_EXEC, 2);
+ NORMAL_EXEC, 4);
coex_dm->auto_tdma_adjust = false;
- } else { /* for low BT RSSI */
- halbtc8723b1ant_ps_tdma(btcoexist, NORMAL_EXEC,
- true, 11);
+ } else {
+ btc8723b1ant_tdma_dur_adj_for_acl(btcoexist,
+ wifi_status);
halbtc8723b1ant_coex_table_with_type(btcoexist,
NORMAL_EXEC, 1);
- coex_dm->auto_tdma_adjust = false;
+ coex_dm->auto_tdma_adjust = true;
}
- } else if (bt_link_info->hid_exist &&
- bt_link_info->a2dp_exist) { /* HID + A2DP */
+ } else if (((bt_link_info->a2dp_exist) && (bt_link_info->pan_exist)) ||
+ (bt_link_info->hid_exist && bt_link_info->a2dp_exist &&
+ bt_link_info->pan_exist)) {
+ /* A2DP + PAN(OPP,FTP), HID + A2DP + PAN(OPP,FTP) */
+ halbtc8723b1ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 13);
+ halbtc8723b1ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 4);
+ coex_dm->auto_tdma_adjust = false;
+ } else if (bt_link_info->hid_exist && bt_link_info->a2dp_exist) {
+ /* HID + A2DP */
halbtc8723b1ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 14);
coex_dm->auto_tdma_adjust = false;
- halbtc8723b1ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 6);
- /* PAN(OPP,FTP), HID + PAN(OPP,FTP) */
+ halbtc8723b1ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 4);
} else if (bt_link_info->pan_only ||
- (bt_link_info->hid_exist && bt_link_info->pan_exist)) {
+ (bt_link_info->hid_exist && bt_link_info->pan_exist)) {
+ /* PAN(OPP,FTP), HID + PAN(OPP,FTP) */
halbtc8723b1ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 3);
- halbtc8723b1ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 6);
- coex_dm->auto_tdma_adjust = false;
- /* A2DP + PAN(OPP,FTP), HID + A2DP + PAN(OPP,FTP) */
- } else if ((bt_link_info->a2dp_exist && bt_link_info->pan_exist) ||
- (bt_link_info->hid_exist && bt_link_info->a2dp_exist &&
- bt_link_info->pan_exist)) {
- halbtc8723b1ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 13);
- halbtc8723b1ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 1);
+ halbtc8723b1ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 4);
coex_dm->auto_tdma_adjust = false;
} else {
- halbtc8723b1ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 11);
- halbtc8723b1ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 1);
+ /* BT no-profile busy (0x9) */
+ halbtc8723b1ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 33);
+ halbtc8723b1ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 4);
coex_dm->auto_tdma_adjust = false;
}
}
@@ -1233,7 +1916,9 @@
0x0, 0x0);
/* tdma and coex table */
- halbtc8723b1ant_ps_tdma(btcoexist, NORMAL_EXEC, false, 8);
+ halbtc8723b1ant_ps_tdma(btcoexist, FORCE_EXEC, false, 8);
+ halbtc8723b1ant_set_ant_path(btcoexist, BTC_ANT_PATH_PTA, NORMAL_EXEC,
+ false, false);
halbtc8723b1ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 0);
}
@@ -1246,30 +1931,31 @@
0x0, 0x0);
/* tdma and coex table */
- if (BT_8723B_1ANT_BT_STATUS_ACL_BUSY == coex_dm->bt_status) {
- if (bt_link_info->a2dp_exist && bt_link_info->pan_exist) {
+ if (coex_dm->bt_status == BT_8723B_1ANT_BT_STATUS_ACL_BUSY) {
+ if (bt_link_info->a2dp_exist) {
+ halbtc8723b1ant_ps_tdma(btcoexist, NORMAL_EXEC,
+ true, 32);
+ halbtc8723b1ant_coex_table_with_type(btcoexist,
+ NORMAL_EXEC, 4);
+ } else if (bt_link_info->a2dp_exist) {
halbtc8723b1ant_ps_tdma(btcoexist, NORMAL_EXEC,
true, 22);
halbtc8723b1ant_coex_table_with_type(btcoexist,
- NORMAL_EXEC, 1);
- } else if (bt_link_info->pan_only) {
- halbtc8723b1ant_ps_tdma(btcoexist, NORMAL_EXEC,
- true, 20);
- halbtc8723b1ant_coex_table_with_type(btcoexist,
- NORMAL_EXEC, 2);
+ NORMAL_EXEC, 4);
} else {
halbtc8723b1ant_ps_tdma(btcoexist, NORMAL_EXEC,
true, 20);
halbtc8723b1ant_coex_table_with_type(btcoexist,
NORMAL_EXEC, 1);
}
- } else if ((BT_8723B_1ANT_BT_STATUS_SCO_BUSY == coex_dm->bt_status) ||
- (BT_8723B_1ANT_BT_STATUS_ACL_SCO_BUSY ==
- coex_dm->bt_status)){
+ } else if (coex_dm->bt_status == BT_8723B_1ANT_BT_STATUS_SCO_BUSY ||
+ coex_dm->bt_status == BT_8723B_1ANT_BT_STATUS_ACL_SCO_BUSY){
btc8723b1ant_act_bt_sco_hid_only_busy(btcoexist,
BT_8723B_1ANT_WIFI_STATUS_CONNECTED_SCAN);
} else {
halbtc8723b1ant_ps_tdma(btcoexist, NORMAL_EXEC, false, 8);
+ halbtc8723b1ant_set_ant_path(btcoexist, BTC_ANT_PATH_PTA,
+ NORMAL_EXEC, false, false);
halbtc8723b1ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 2);
}
}
@@ -1282,14 +1968,19 @@
halbtc8723b1ant_power_save_state(btcoexist, BTC_PS_WIFI_NATIVE,
0x0, 0x0);
- if ((BT_8723B_1ANT_BT_STATUS_CONNECTED_IDLE == coex_dm->bt_status) ||
- (bt_link_info->sco_exist) || (bt_link_info->hid_only) ||
- (bt_link_info->a2dp_only) || (bt_link_info->pan_only)) {
- halbtc8723b1ant_ps_tdma(btcoexist, NORMAL_EXEC, false, 8);
- halbtc8723b1ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 7);
- } else {
+ /* tdma and coex table */
+ if ((bt_link_info->sco_exist) || (bt_link_info->hid_exist) ||
+ (bt_link_info->a2dp_exist)) {
+ halbtc8723b1ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 32);
+ halbtc8723b1ant_coex_table_with_type(btcoexist, FORCE_EXEC, 4);
+ } else if (bt_link_info->pan_exist) {
halbtc8723b1ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 20);
- halbtc8723b1ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 1);
+ halbtc8723b1ant_coex_table_with_type(btcoexist, FORCE_EXEC, 4);
+ } else {
+ halbtc8723b1ant_ps_tdma(btcoexist, NORMAL_EXEC, false, 8);
+ halbtc8723b1ant_set_ant_path(btcoexist, BTC_ANT_PATH_PTA,
+ NORMAL_EXEC, false, false);
+ halbtc8723b1ant_coex_table_with_type(btcoexist, FORCE_EXEC, 2);
}
}
@@ -1301,30 +1992,32 @@
0x0, 0x0);
/* tdma and coex table */
- if (BT_8723B_1ANT_BT_STATUS_ACL_BUSY == coex_dm->bt_status) {
- if (bt_link_info->a2dp_exist && bt_link_info->pan_exist) {
+ if (coex_dm->bt_status == BT_8723B_1ANT_BT_STATUS_ACL_BUSY) {
+ if (bt_link_info->a2dp_exist) {
+ halbtc8723b1ant_ps_tdma(btcoexist, NORMAL_EXEC,
+ true, 32);
+ halbtc8723b1ant_coex_table_with_type(btcoexist,
+ NORMAL_EXEC, 4);
+ } else if (bt_link_info->a2dp_exist &&
+ bt_link_info->pan_exist) {
halbtc8723b1ant_ps_tdma(btcoexist, NORMAL_EXEC,
true, 22);
halbtc8723b1ant_coex_table_with_type(btcoexist,
- NORMAL_EXEC, 1);
- } else if (bt_link_info->pan_only) {
- halbtc8723b1ant_ps_tdma(btcoexist, NORMAL_EXEC,
- true, 20);
- halbtc8723b1ant_coex_table_with_type(btcoexist,
- NORMAL_EXEC, 2);
+ NORMAL_EXEC, 4);
} else {
halbtc8723b1ant_ps_tdma(btcoexist, NORMAL_EXEC,
true, 20);
halbtc8723b1ant_coex_table_with_type(btcoexist,
- NORMAL_EXEC, 1);
+ NORMAL_EXEC, 4);
}
- } else if ((BT_8723B_1ANT_BT_STATUS_SCO_BUSY == coex_dm->bt_status) ||
- (BT_8723B_1ANT_BT_STATUS_ACL_SCO_BUSY ==
- coex_dm->bt_status)) {
+ } else if (coex_dm->bt_status == BT_8723B_1ANT_BT_STATUS_SCO_BUSY ||
+ coex_dm->bt_status == BT_8723B_1ANT_BT_STATUS_ACL_SCO_BUSY) {
btc8723b1ant_act_bt_sco_hid_only_busy(btcoexist,
BT_8723B_1ANT_WIFI_STATUS_CONNECTED_SCAN);
} else {
halbtc8723b1ant_ps_tdma(btcoexist, NORMAL_EXEC, false, 8);
+ halbtc8723b1ant_set_ant_path(btcoexist, BTC_ANT_PATH_PTA,
+ NORMAL_EXEC, false, false);
halbtc8723b1ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 2);
}
}
@@ -1332,23 +2025,34 @@
static void halbtc8723b1ant_action_wifi_connected_special_packet(
struct btc_coexist *btcoexist)
{
- bool hs_connecting = false;
struct btc_bt_link_info *bt_link_info = &btcoexist->bt_link_info;
+ bool wifi_busy = false;
- btcoexist->btc_get(btcoexist, BTC_GET_BL_HS_CONNECTING, &hs_connecting);
+ btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_BUSY, &wifi_busy);
+
+ /* no special packet process for both WiFi and BT very busy */
+ if ((wifi_busy) &&
+ ((bt_link_info->pan_exist) || (coex_sta->num_of_profile >= 2)))
+ return;
halbtc8723b1ant_power_save_state(btcoexist, BTC_PS_WIFI_NATIVE,
0x0, 0x0);
/* tdma and coex table */
- if ((BT_8723B_1ANT_BT_STATUS_CONNECTED_IDLE == coex_dm->bt_status) ||
- (bt_link_info->sco_exist) || (bt_link_info->hid_only) ||
- (bt_link_info->a2dp_only) || (bt_link_info->pan_only)) {
- halbtc8723b1ant_ps_tdma(btcoexist, NORMAL_EXEC, false, 8);
- halbtc8723b1ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 7);
- } else {
+ if ((bt_link_info->sco_exist) || (bt_link_info->hid_exist)) {
+ halbtc8723b1ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 32);
+ halbtc8723b1ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 5);
+ } else if (bt_link_info->a2dp_exist) {
+ halbtc8723b1ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 32);
+ halbtc8723b1ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 4);
+ } else if (bt_link_info->pan_exist) {
halbtc8723b1ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 20);
- halbtc8723b1ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 1);
+ halbtc8723b1ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 4);
+ } else {
+ halbtc8723b1ant_ps_tdma(btcoexist, NORMAL_EXEC, false, 8);
+ halbtc8723b1ant_set_ant_path(btcoexist, BTC_ANT_PATH_PTA,
+ NORMAL_EXEC, false, false);
+ halbtc8723b1ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 2);
}
}
@@ -1391,12 +2095,31 @@
btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_BUSY, &wifi_busy);
/* power save state */
if (!ap_enable &&
- BT_8723B_1ANT_BT_STATUS_ACL_BUSY == coex_dm->bt_status &&
+ coex_dm->bt_status == BT_8723B_1ANT_BT_STATUS_ACL_BUSY &&
!btcoexist->bt_link_info.hid_only) {
- if (!wifi_busy && btcoexist->bt_link_info.a2dp_only)
- halbtc8723b1ant_power_save_state(btcoexist,
+ if (btcoexist->bt_link_info.a2dp_only) {
+ if (!wifi_busy) {
+ halbtc8723b1ant_power_save_state(btcoexist,
BTC_PS_WIFI_NATIVE,
0x0, 0x0);
+ } else { /* busy */
+ if (coex_sta->scan_ap_num >=
+ BT_8723B_1ANT_WIFI_NOISY_THRESH)
+ /* no force LPS, no PS-TDMA,
+ * use pure TDMA
+ */
+ halbtc8723b1ant_power_save_state(
+ btcoexist, BTC_PS_WIFI_NATIVE,
+ 0x0, 0x0);
+ else
+ halbtc8723b1ant_power_save_state(
+ btcoexist, BTC_PS_LPS_ON, 0x50,
+ 0x4);
+ }
+ } else if ((!coex_sta->pan_exist) && (!coex_sta->a2dp_exist) &&
+ (!coex_sta->hid_exist))
+ halbtc8723b1ant_power_save_state(
+ btcoexist, BTC_PS_WIFI_NATIVE, 0x0, 0x0);
else
halbtc8723b1ant_power_save_state(btcoexist,
BTC_PS_LPS_ON,
@@ -1407,36 +2130,44 @@
}
/* tdma and coex table */
if (!wifi_busy) {
- if (BT_8723B_1ANT_BT_STATUS_ACL_BUSY == coex_dm->bt_status) {
- halbtc8723b1ant_action_wifi_connected_bt_acl_busy(btcoexist,
- BT_8723B_1ANT_WIFI_STATUS_CONNECTED_IDLE);
- } else if ((BT_8723B_1ANT_BT_STATUS_SCO_BUSY ==
- coex_dm->bt_status) ||
- (BT_8723B_1ANT_BT_STATUS_ACL_SCO_BUSY ==
- coex_dm->bt_status)) {
+ if (coex_dm->bt_status == BT_8723B_1ANT_BT_STATUS_ACL_BUSY) {
+ halbtc8723b1ant_action_wifi_connected_bt_acl_busy(
+ btcoexist,
+ BT_8723B_1ANT_WIFI_STATUS_CONNECTED_IDLE);
+ } else if (coex_dm->bt_status ==
+ BT_8723B_1ANT_BT_STATUS_SCO_BUSY ||
+ coex_dm->bt_status ==
+ BT_8723B_1ANT_BT_STATUS_ACL_SCO_BUSY) {
btc8723b1ant_act_bt_sco_hid_only_busy(btcoexist,
BT_8723B_1ANT_WIFI_STATUS_CONNECTED_IDLE);
} else {
halbtc8723b1ant_ps_tdma(btcoexist, NORMAL_EXEC,
false, 8);
+ halbtc8723b1ant_set_ant_path(btcoexist,
+ BTC_ANT_PATH_PTA,
+ NORMAL_EXEC, false, false);
halbtc8723b1ant_coex_table_with_type(btcoexist,
NORMAL_EXEC, 2);
}
} else {
- if (BT_8723B_1ANT_BT_STATUS_ACL_BUSY == coex_dm->bt_status) {
- halbtc8723b1ant_action_wifi_connected_bt_acl_busy(btcoexist,
- BT_8723B_1ANT_WIFI_STATUS_CONNECTED_BUSY);
- } else if ((BT_8723B_1ANT_BT_STATUS_SCO_BUSY ==
- coex_dm->bt_status) ||
- (BT_8723B_1ANT_BT_STATUS_ACL_SCO_BUSY ==
- coex_dm->bt_status)) {
+ if (coex_dm->bt_status == BT_8723B_1ANT_BT_STATUS_ACL_BUSY) {
+ halbtc8723b1ant_action_wifi_connected_bt_acl_busy(
+ btcoexist,
+ BT_8723B_1ANT_WIFI_STATUS_CONNECTED_BUSY);
+ } else if (coex_dm->bt_status ==
+ BT_8723B_1ANT_BT_STATUS_SCO_BUSY ||
+ coex_dm->bt_status ==
+ BT_8723B_1ANT_BT_STATUS_ACL_SCO_BUSY) {
btc8723b1ant_act_bt_sco_hid_only_busy(btcoexist,
BT_8723B_1ANT_WIFI_STATUS_CONNECTED_BUSY);
} else {
halbtc8723b1ant_ps_tdma(btcoexist, NORMAL_EXEC,
- false, 8);
+ true, 32);
+ halbtc8723b1ant_set_ant_path(btcoexist,
+ BTC_ANT_PATH_PTA,
+ NORMAL_EXEC, false, false);
halbtc8723b1ant_coex_table_with_type(btcoexist,
- NORMAL_EXEC, 2);
+ NORMAL_EXEC, 4);
}
}
}
@@ -1445,12 +2176,15 @@
{
struct rtl_priv *rtlpriv = btcoexist->adapter;
struct btc_bt_link_info *bt_link_info = &btcoexist->bt_link_info;
- bool wifi_connected = false, bt_hs_on = false;
+ bool wifi_connected = false, bt_hs_on = false, wifi_busy = false;
bool increase_scan_dev_num = false;
bool bt_ctrl_agg_buf_size = false;
+ bool miracast_plus_bt = false;
u8 agg_buf_size = 5;
+ u8 iot_peer = BTC_IOT_PEER_UNKNOWN;
u32 wifi_link_status = 0;
u32 num_of_wifi_link = 0;
+ u32 wifi_bw;
RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
"[BTCoex], RunCoexistMechanism()===>\n");
@@ -1473,54 +2207,99 @@
return;
}
- if ((BT_8723B_1ANT_BT_STATUS_ACL_BUSY == coex_dm->bt_status) ||
- (BT_8723B_1ANT_BT_STATUS_SCO_BUSY == coex_dm->bt_status) ||
- (BT_8723B_1ANT_BT_STATUS_ACL_SCO_BUSY == coex_dm->bt_status)) {
- increase_scan_dev_num = true;
+ if (coex_sta->bt_whck_test) {
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], wifi is under IPS !!!\n");
+ halbtc8723b1ant_action_bt_whck_test(btcoexist);
+ return;
}
+ if (coex_dm->bt_status == BT_8723B_1ANT_BT_STATUS_ACL_BUSY ||
+ coex_dm->bt_status == BT_8723B_1ANT_BT_STATUS_SCO_BUSY ||
+ coex_dm->bt_status == BT_8723B_1ANT_BT_STATUS_ACL_SCO_BUSY)
+ increase_scan_dev_num = true;
+
btcoexist->btc_set(btcoexist, BTC_SET_BL_INC_SCAN_DEV_NUM,
&increase_scan_dev_num);
-
btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_CONNECTED,
&wifi_connected);
+ btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_BUSY, &wifi_busy);
btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_LINK_STATUS,
&wifi_link_status);
num_of_wifi_link = wifi_link_status >> 16;
- if (num_of_wifi_link >= 2) {
- halbtc8723b1ant_limited_tx(btcoexist, NORMAL_EXEC, 0, 0, 0, 0);
+
+ if (num_of_wifi_link >= 2 ||
+ wifi_link_status & WIFI_P2P_GO_CONNECTED) {
+ if (bt_link_info->bt_link_exist) {
+ halbtc8723b1ant_limited_tx(btcoexist, NORMAL_EXEC, 1, 1,
+ 0, 1);
+ miracast_plus_bt = true;
+ } else {
+ halbtc8723b1ant_limited_tx(btcoexist, NORMAL_EXEC, 0, 0,
+ 0, 0);
+ miracast_plus_bt = false;
+ }
+ btcoexist->btc_set(btcoexist, BTC_SET_BL_MIRACAST_PLUS_BT,
+ &miracast_plus_bt);
halbtc8723b1ant_limited_rx(btcoexist, NORMAL_EXEC, false,
- bt_ctrl_agg_buf_size,
- agg_buf_size);
- halbtc8723b1ant_action_wifi_multiport(btcoexist);
+ bt_ctrl_agg_buf_size, agg_buf_size);
+
+ if ((bt_link_info->a2dp_exist || wifi_busy) &&
+ (coex_sta->c2h_bt_inquiry_page))
+ halbtc8723b1ant_action_bt_inquiry(btcoexist);
+ else
+ halbtc8723b1ant_action_wifi_multiport(btcoexist);
+
return;
}
- if (!bt_link_info->sco_exist && !bt_link_info->hid_exist) {
- halbtc8723b1ant_limited_tx(btcoexist, NORMAL_EXEC, 0, 0, 0, 0);
+ miracast_plus_bt = false;
+ btcoexist->btc_set(btcoexist, BTC_SET_BL_MIRACAST_PLUS_BT,
+ &miracast_plus_bt);
+ btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_BW, &wifi_bw);
+
+ if (bt_link_info->bt_link_exist && wifi_connected) {
+ halbtc8723b1ant_limited_tx(btcoexist, NORMAL_EXEC, 1, 1, 0, 1);
+
+ btcoexist->btc_get(btcoexist, BTC_GET_U1_IOT_PEER, &iot_peer);
+
+ if (iot_peer != BTC_IOT_PEER_CISCO &&
+ iot_peer != BTC_IOT_PEER_BROADCOM) {
+ if (bt_link_info->sco_exist)
+ halbtc8723b1ant_limited_rx(btcoexist,
+ NORMAL_EXEC, false,
+ false, 0x5);
+ else
+ halbtc8723b1ant_limited_rx(btcoexist,
+ NORMAL_EXEC, false,
+ false, 0x5);
+ } else {
+ if (bt_link_info->sco_exist) {
+ halbtc8723b1ant_limited_rx(btcoexist,
+ NORMAL_EXEC, true,
+ false, 0x5);
+ } else {
+ if (wifi_bw == BTC_WIFI_BW_HT40)
+ halbtc8723b1ant_limited_rx(
+ btcoexist, NORMAL_EXEC, false,
+ true, 0x10);
+ else
+ halbtc8723b1ant_limited_rx(
+ btcoexist, NORMAL_EXEC, false,
+ true, 0x8);
+ }
+ }
+
+ halbtc8723b1ant_sw_mechanism(btcoexist, true);
} else {
- if (wifi_connected)
- halbtc8723b1ant_limited_tx(btcoexist,
- NORMAL_EXEC, 1, 1, 1, 1);
- else
- halbtc8723b1ant_limited_tx(btcoexist, NORMAL_EXEC,
- 0, 0, 0, 0);
- }
+ halbtc8723b1ant_limited_tx(btcoexist, NORMAL_EXEC, 0, 0, 0, 0);
- if (bt_link_info->sco_exist) {
- bt_ctrl_agg_buf_size = true;
- agg_buf_size = 0x3;
- } else if (bt_link_info->hid_exist) {
- bt_ctrl_agg_buf_size = true;
- agg_buf_size = 0x5;
- } else if (bt_link_info->a2dp_exist || bt_link_info->pan_exist) {
- bt_ctrl_agg_buf_size = true;
- agg_buf_size = 0x8;
- }
- halbtc8723b1ant_limited_rx(btcoexist, NORMAL_EXEC, false,
- bt_ctrl_agg_buf_size, agg_buf_size);
+ halbtc8723b1ant_limited_rx(btcoexist, NORMAL_EXEC, false, false,
+ 0x5);
+ halbtc8723b1ant_sw_mechanism(btcoexist, false);
+ }
btcoexist->btc_get(btcoexist, BTC_GET_BL_HS_OPERATION, &bt_hs_on);
if (coex_sta->c2h_bt_inquiry_page) {
@@ -1556,88 +2335,138 @@
}
}
+/* force coex mechanism to reset */
static void halbtc8723b1ant_init_coex_dm(struct btc_coexist *btcoexist)
{
/* sw all off */
halbtc8723b1ant_sw_mechanism(btcoexist, false);
- halbtc8723b1ant_ps_tdma(btcoexist, FORCE_EXEC, false, 8);
- halbtc8723b1ant_coex_table_with_type(btcoexist, FORCE_EXEC, 0);
+ coex_sta->pop_event_cnt = 0;
}
static void halbtc8723b1ant_init_hw_config(struct btc_coexist *btcoexist,
- bool backup)
+ bool backup, bool wifi_only)
{
struct rtl_priv *rtlpriv = btcoexist->adapter;
u32 u32tmp = 0;
- u8 u8tmp = 0;
- u32 cnt_bt_cal_chk = 0;
+ u8 u8tmpa = 0, u8tmpb = 0;
RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
"[BTCoex], 1Ant Init HW Config!!\n");
- if (backup) {/* backup rf 0x1e value */
- coex_dm->backup_arfr_cnt1 =
- btcoexist->btc_read_4byte(btcoexist, 0x430);
- coex_dm->backup_arfr_cnt2 =
- btcoexist->btc_read_4byte(btcoexist, 0x434);
- coex_dm->backup_retry_limit =
- btcoexist->btc_read_2byte(btcoexist, 0x42a);
- coex_dm->backup_ampdu_max_time =
- btcoexist->btc_read_1byte(btcoexist, 0x456);
- }
-
- /* WiFi goto standby while GNT_BT 0-->1 */
- btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x1, 0xfffff, 0x780);
- /* BT goto standby while GNT_BT 1-->0 */
- btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x2, 0xfffff, 0x500);
-
- btcoexist->btc_write_1byte(btcoexist, 0x974, 0xff);
- btcoexist->btc_write_1byte_bitmask(btcoexist, 0x944, 0x3, 0x3);
- btcoexist->btc_write_1byte(btcoexist, 0x930, 0x77);
-
- /* BT calibration check */
- while (cnt_bt_cal_chk <= 20) {
- u32tmp = btcoexist->btc_read_4byte(btcoexist, 0x49d);
- cnt_bt_cal_chk++;
- if (u32tmp & BIT0) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], ########### BT calibration(cnt=%d) ###########\n",
- cnt_bt_cal_chk);
- mdelay(50);
- } else {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], ********** BT NOT calibration (cnt=%d)**********\n",
- cnt_bt_cal_chk);
- break;
- }
- }
+ /* 0xf0[15:12] --> Chip Cut information */
+ coex_sta->cut_version =
+ (btcoexist->btc_read_1byte(btcoexist, 0xf1) & 0xf0) >> 4;
+ /* enable TBTT interrupt */
+ btcoexist->btc_write_1byte_bitmask(btcoexist, 0x550, 0x8, 0x1);
/* 0x790[5:0] = 0x5 */
- u8tmp = btcoexist->btc_read_1byte(btcoexist, 0x790);
- u8tmp &= 0xc0;
- u8tmp |= 0x5;
- btcoexist->btc_write_1byte(btcoexist, 0x790, u8tmp);
+ btcoexist->btc_write_1byte(btcoexist, 0x790, 0x5);
/* Enable counter statistics */
- /*0x76e[3] = 1, WLAN_Act control by PTA */
- btcoexist->btc_write_1byte(btcoexist, 0x76e, 0xc);
btcoexist->btc_write_1byte(btcoexist, 0x778, 0x1);
btcoexist->btc_write_1byte_bitmask(btcoexist, 0x40, 0x20, 0x1);
+ halbtc8723b1ant_ps_tdma(btcoexist, FORCE_EXEC, false, 8);
+
/* Antenna config */
- halbtc8723b1ant_set_ant_path(btcoexist, BTC_ANT_PATH_PTA, true, false);
+ if (wifi_only)
+ halbtc8723b1ant_set_ant_path(btcoexist, BTC_ANT_PATH_WIFI,
+ FORCE_EXEC, true, false);
+ else
+ halbtc8723b1ant_set_ant_path(btcoexist, BTC_ANT_PATH_BT,
+ FORCE_EXEC, true, false);
+
/* PTA parameter */
halbtc8723b1ant_coex_table_with_type(btcoexist, FORCE_EXEC, 0);
+
+ u32tmp = btcoexist->btc_read_4byte(btcoexist, 0x948);
+ u8tmpa = btcoexist->btc_read_1byte(btcoexist, 0x765);
+ u8tmpb = btcoexist->btc_read_1byte(btcoexist, 0x67);
+
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "############# [BTCoex], 0x948=0x%x, 0x765=0x%x, 0x67=0x%x\n",
+ u32tmp, u8tmpa, u8tmpb);
}
/**************************************************************
* extern function start with ex_halbtc8723b1ant_
**************************************************************/
-
-void ex_halbtc8723b1ant_init_hwconfig(struct btc_coexist *btcoexist)
+void ex_halbtc8723b1ant_power_on_setting(struct btc_coexist *btcoexist)
{
- halbtc8723b1ant_init_hw_config(btcoexist, true);
+ struct rtl_priv *rtlpriv = btcoexist->adapter;
+ struct btc_board_info *board_info = &btcoexist->board_info;
+ u8 u8tmp = 0x0;
+ u16 u16tmp = 0x0;
+ u32 value;
+
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "xxxxxxxxxxxxxxxx Execute 8723b 1-Ant PowerOn Setting xxxxxxxxxxxxxxxx!!\n");
+
+ btcoexist->stop_coex_dm = true;
+
+ btcoexist->btc_write_1byte(btcoexist, 0x67, 0x20);
+
+ /* enable BB, REG_SYS_FUNC_EN such that we can write 0x948 correctly. */
+ u16tmp = btcoexist->btc_read_2byte(btcoexist, 0x2);
+ btcoexist->btc_write_2byte(btcoexist, 0x2, u16tmp | BIT0 | BIT1);
+
+ /* set GRAN_BT = 1 */
+ btcoexist->btc_write_1byte(btcoexist, 0x765, 0x18);
+ /* set WLAN_ACT = 0 */
+ btcoexist->btc_write_1byte(btcoexist, 0x76e, 0x4);
+
+ /* S0 or S1 setting and Local register setting(By the setting fw can get
+ * ant number, S0/S1, ... info)
+ *
+ * Local setting bit define
+ * BIT0: "0" for no antenna inverse; "1" for antenna inverse
+ * BIT1: "0" for internal switch; "1" for external switch
+ * BIT2: "0" for one antenna; "1" for two antenna
+ * NOTE: here default all internal switch and 1-antenna ==> BIT1=0 and
+ * BIT2 = 0
+ */
+ if (btcoexist->chip_interface == BTC_INTF_USB) {
+ /* fixed at S0 for USB interface */
+ btcoexist->btc_write_4byte(btcoexist, 0x948, 0x0);
+
+ u8tmp |= 0x1; /* antenna inverse */
+ btcoexist->btc_write_local_reg_1byte(btcoexist, 0xfe08, u8tmp);
+
+ board_info->btdm_ant_pos = BTC_ANTENNA_AT_AUX_PORT;
+ } else {
+ /* for PCIE and SDIO interface, we check efuse 0xc3[6] */
+ if (board_info->single_ant_path == 0) {
+ /* set to S1 */
+ btcoexist->btc_write_4byte(btcoexist, 0x948, 0x280);
+ board_info->btdm_ant_pos = BTC_ANTENNA_AT_MAIN_PORT;
+ value = 1;
+ } else if (board_info->single_ant_path == 1) {
+ /* set to S0 */
+ btcoexist->btc_write_4byte(btcoexist, 0x948, 0x0);
+ u8tmp |= 0x1; /* antenna inverse */
+ board_info->btdm_ant_pos = BTC_ANTENNA_AT_AUX_PORT;
+ value = 0;
+ }
+
+ btcoexist->btc_set(btcoexist, BTC_SET_ACT_ANTPOSREGRISTRY_CTRL,
+ &value);
+
+ if (btcoexist->chip_interface == BTC_INTF_PCI)
+ btcoexist->btc_write_local_reg_1byte(btcoexist, 0x384,
+ u8tmp);
+ else if (btcoexist->chip_interface == BTC_INTF_SDIO)
+ btcoexist->btc_write_local_reg_1byte(btcoexist, 0x60,
+ u8tmp);
+ }
+}
+
+
+void ex_halbtc8723b1ant_init_hwconfig(struct btc_coexist *btcoexist,
+ bool wifi_only)
+{
+ halbtc8723b1ant_init_hw_config(btcoexist, true, wifi_only);
+ btcoexist->stop_coex_dm = false;
}
void ex_halbtc8723b1ant_init_coex_dm(struct btc_coexist *btcoexist)
@@ -1687,11 +2516,6 @@
"\r\n ==========================================");
}
- if (!board_info->bt_exist) {
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n BT not exists !!!");
- return;
- }
-
RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = %d/ %d/ %d",
"Ant PG Num/ Ant Mech/ Ant Pos:",
board_info->pg_ant_num, board_info->btdm_ant_num,
@@ -1760,7 +2584,7 @@
RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = [%s/ %d/ %d] ",
"BT [status/ rssi/ retryCnt]",
- ((btcoexist->bt_info.bt_disabled) ? ("disabled") :
+ ((coex_sta->bt_disabled) ? ("disabled") :
((coex_sta->c2h_bt_inquiry_page) ? ("inquiry/page scan") :
((BT_8723B_1ANT_BT_STATUS_NON_CONNECTED_IDLE ==
coex_dm->bt_status) ?
@@ -1835,6 +2659,9 @@
coex_dm->error_condition);
}
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = %d",
+ "Coex Table Type", coex_sta->coex_table_type);
+
/* Hw setting */
RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s",
"============[Hw setting]============");
@@ -1926,9 +2753,8 @@
RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = %d/ %d",
"0x774(low-pri rx/tx)", coex_sta->low_priority_rx,
coex_sta->low_priority_tx);
-#if (BT_AUTO_REPORT_ONLY_8723B_1ANT == 1)
- halbtc8723b1ant_monitor_bt_ctr(btcoexist);
-#endif
+ if (btcoexist->auto_report_1ant)
+ halbtc8723b1ant_monitor_bt_ctr(btcoexist);
btcoexist->btc_disp_dbg_msg(btcoexist, BTC_DBG_DISP_COEX_STATISTICS);
}
@@ -1945,7 +2771,7 @@
coex_sta->under_ips = true;
halbtc8723b1ant_set_ant_path(btcoexist, BTC_ANT_PATH_BT,
- false, true);
+ FORCE_EXEC, false, true);
/* set PTA control */
halbtc8723b1ant_ps_tdma(btcoexist, NORMAL_EXEC, false, 0);
halbtc8723b1ant_coex_table_with_type(btcoexist,
@@ -1955,7 +2781,7 @@
"[BTCoex], IPS LEAVE notify\n");
coex_sta->under_ips = false;
- halbtc8723b1ant_init_hw_config(btcoexist, false);
+ halbtc8723b1ant_init_hw_config(btcoexist, false, false);
halbtc8723b1ant_init_coex_dm(btcoexist);
halbtc8723b1ant_query_bt_info(btcoexist);
}
@@ -1983,13 +2809,41 @@
{
struct rtl_priv *rtlpriv = btcoexist->adapter;
bool wifi_connected = false, bt_hs_on = false;
+ u8 u8tmpa, u8tmpb;
+ u32 u32tmp;
u32 wifi_link_status = 0;
u32 num_of_wifi_link = 0;
bool bt_ctrl_agg_buf_size = false;
u8 agg_buf_size = 5;
- if (btcoexist->manual_control || btcoexist->stop_coex_dm ||
- btcoexist->bt_info.bt_disabled)
+ if (btcoexist->manual_control || btcoexist->stop_coex_dm)
+ return;
+
+ if (type == BTC_SCAN_START) {
+ coex_sta->wifi_is_high_pri_task = true;
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], SCAN START notify\n");
+ /* Force antenna setup for no scan result issue */
+ halbtc8723b1ant_ps_tdma(btcoexist, FORCE_EXEC, false, 8);
+ halbtc8723b1ant_set_ant_path(btcoexist, BTC_ANT_PATH_PTA,
+ FORCE_EXEC, false, false);
+ u32tmp = btcoexist->btc_read_4byte(btcoexist, 0x948);
+ u8tmpa = btcoexist->btc_read_1byte(btcoexist, 0x765);
+ u8tmpb = btcoexist->btc_read_1byte(btcoexist, 0x67);
+
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], 0x948=0x%x, 0x765=0x%x, 0x67=0x%x\n",
+ u32tmp, u8tmpa, u8tmpb);
+ } else {
+ coex_sta->wifi_is_high_pri_task = false;
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], SCAN FINISH notify\n");
+
+ btcoexist->btc_get(btcoexist, BTC_GET_U1_AP_NUM,
+ &coex_sta->scan_ap_num);
+ }
+
+ if (coex_sta->bt_disabled)
return;
btcoexist->btc_get(btcoexist, BTC_GET_BL_HS_OPERATION, &bt_hs_on);
@@ -2043,13 +2897,32 @@
bool wifi_connected = false, bt_hs_on = false;
u32 wifi_link_status = 0;
u32 num_of_wifi_link = 0;
- bool bt_ctrl_agg_buf_size = false;
+ bool bt_ctrl_agg_buf_size = false, under_4way = false;
u8 agg_buf_size = 5;
+ btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_4_WAY_PROGRESS,
+ &under_4way);
+
if (btcoexist->manual_control || btcoexist->stop_coex_dm ||
- btcoexist->bt_info.bt_disabled)
+ coex_sta->bt_disabled)
return;
+ if (type == BTC_ASSOCIATE_START) {
+ coex_sta->wifi_is_high_pri_task = true;
+
+ /* Force antenna setup for no scan result issue */
+ halbtc8723b1ant_ps_tdma(btcoexist, FORCE_EXEC, false, 8);
+ halbtc8723b1ant_set_ant_path(btcoexist, BTC_ANT_PATH_PTA,
+ FORCE_EXEC, false, false);
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], CONNECT START notify\n");
+ coex_dm->arp_cnt = 0;
+ } else {
+ coex_sta->wifi_is_high_pri_task = false;
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], CONNECT FINISH notify\n");
+ }
+
btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_LINK_STATUS,
&wifi_link_status);
num_of_wifi_link = wifi_link_status>>16;
@@ -2094,27 +2967,62 @@
struct rtl_priv *rtlpriv = btcoexist->adapter;
u8 h2c_parameter[3] = {0};
u32 wifi_bw;
- u8 wifiCentralChnl;
+ u8 wifi_central_chnl;
+ bool wifi_under_b_mode = false;
if (btcoexist->manual_control || btcoexist->stop_coex_dm ||
- btcoexist->bt_info.bt_disabled)
+ coex_sta->bt_disabled)
return;
- if (BTC_MEDIA_CONNECT == type)
+ if (type == BTC_MEDIA_CONNECT) {
RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
"[BTCoex], MEDIA connect notify\n");
- else
+ /* Force antenna setup for no scan result issue */
+ halbtc8723b1ant_ps_tdma(btcoexist, FORCE_EXEC, false, 8);
+ halbtc8723b1ant_set_ant_path(btcoexist, BTC_ANT_PATH_PTA,
+ FORCE_EXEC, false, false);
+ btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_UNDER_B_MODE,
+ &wifi_under_b_mode);
+
+ /* Set CCK Tx/Rx high Pri except 11b mode */
+ if (wifi_under_b_mode) {
+ btcoexist->btc_write_1byte(btcoexist, 0x6cd,
+ 0x00); /* CCK Tx */
+ btcoexist->btc_write_1byte(btcoexist, 0x6cf,
+ 0x00); /* CCK Rx */
+ } else {
+ btcoexist->btc_write_1byte(btcoexist, 0x6cd,
+ 0x00); /* CCK Tx */
+ btcoexist->btc_write_1byte(btcoexist, 0x6cf,
+ 0x10); /* CCK Rx */
+ }
+
+ coex_dm->backup_arfr_cnt1 =
+ btcoexist->btc_read_4byte(btcoexist, 0x430);
+ coex_dm->backup_arfr_cnt2 =
+ btcoexist->btc_read_4byte(btcoexist, 0x434);
+ coex_dm->backup_retry_limit =
+ btcoexist->btc_read_2byte(btcoexist, 0x42a);
+ coex_dm->backup_ampdu_max_time =
+ btcoexist->btc_read_1byte(btcoexist, 0x456);
+ } else {
RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
"[BTCoex], MEDIA disconnect notify\n");
+ coex_dm->arp_cnt = 0;
+
+ btcoexist->btc_write_1byte(btcoexist, 0x6cd, 0x0); /* CCK Tx */
+ btcoexist->btc_write_1byte(btcoexist, 0x6cf, 0x0); /* CCK Rx */
+
+ coex_sta->cck_ever_lock = false;
+ }
/* only 2.4G we need to inform bt the chnl mask */
btcoexist->btc_get(btcoexist, BTC_GET_U1_WIFI_CENTRAL_CHNL,
- &wifiCentralChnl);
+ &wifi_central_chnl);
- if ((BTC_MEDIA_CONNECT == type) &&
- (wifiCentralChnl <= 14)) {
+ if (type == BTC_MEDIA_CONNECT && wifi_central_chnl <= 14) {
h2c_parameter[0] = 0x0;
- h2c_parameter[1] = wifiCentralChnl;
+ h2c_parameter[1] = wifi_central_chnl;
btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_BW, &wifi_bw);
if (BTC_WIFI_BW_HT40 == wifi_bw)
h2c_parameter[2] = 0x30;
@@ -2141,13 +3049,46 @@
bool bt_hs_on = false;
u32 wifi_link_status = 0;
u32 num_of_wifi_link = 0;
- bool bt_ctrl_agg_buf_size = false;
+ bool bt_ctrl_agg_buf_size = false, under_4way = false;
u8 agg_buf_size = 5;
+ btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_4_WAY_PROGRESS,
+ &under_4way);
+
if (btcoexist->manual_control || btcoexist->stop_coex_dm ||
- btcoexist->bt_info.bt_disabled)
+ coex_sta->bt_disabled)
return;
+ if (type == BTC_PACKET_DHCP || type == BTC_PACKET_EAPOL ||
+ type == BTC_PACKET_ARP) {
+ if (type == BTC_PACKET_ARP) {
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], special Packet ARP notify\n");
+
+ coex_dm->arp_cnt++;
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], ARP Packet Count = %d\n",
+ coex_dm->arp_cnt);
+
+ if ((coex_dm->arp_cnt >= 10) && (!under_4way))
+ /* if APR PKT > 10 after connect, do not go to
+ * ActionWifiConnectedSpecificPacket(btcoexist)
+ */
+ coex_sta->wifi_is_high_pri_task = false;
+ else
+ coex_sta->wifi_is_high_pri_task = true;
+ } else {
+ coex_sta->wifi_is_high_pri_task = true;
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], special Packet DHCP or EAPOL notify\n");
+ }
+ } else {
+ coex_sta->wifi_is_high_pri_task = false;
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], special Packet [Type = %d] notify\n",
+ type);
+ }
+
btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_LINK_STATUS,
&wifi_link_status);
num_of_wifi_link = wifi_link_status >> 16;
@@ -2209,16 +3150,58 @@
"0x%02x, ", tmp_buf[i]);
}
- if (BT_INFO_SRC_8723B_1ANT_WIFI_FW != rsp_source) {
- coex_sta->bt_retry_cnt = /* [3:0] */
+ /* if 0xff, it means BT is under WHCK test */
+ if (bt_info == 0xff)
+ coex_sta->bt_whck_test = true;
+ else
+ coex_sta->bt_whck_test = false;
+
+ if (rsp_source != BT_INFO_SRC_8723B_1ANT_WIFI_FW) {
+ coex_sta->bt_retry_cnt = /* [3:0] */
coex_sta->bt_info_c2h[rsp_source][2] & 0xf;
+ if (coex_sta->bt_retry_cnt >= 1)
+ coex_sta->pop_event_cnt++;
+
+ if (coex_sta->bt_info_c2h[rsp_source][2] & 0x20)
+ coex_sta->c2h_bt_remote_name_req = true;
+ else
+ coex_sta->c2h_bt_remote_name_req = false;
+
coex_sta->bt_rssi =
- coex_sta->bt_info_c2h[rsp_source][3] * 2 + 10;
+ coex_sta->bt_info_c2h[rsp_source][3] * 2 - 90;
coex_sta->bt_info_ext =
coex_sta->bt_info_c2h[rsp_source][4];
+ if (coex_sta->bt_info_c2h[rsp_source][1] == 0x49) {
+ coex_sta->a2dp_bit_pool =
+ coex_sta->bt_info_c2h[rsp_source][6];
+ } else {
+ coex_sta->a2dp_bit_pool = 0;
+ }
+
+ coex_sta->bt_tx_rx_mask =
+ (coex_sta->bt_info_c2h[rsp_source][2] & 0x40);
+ btcoexist->btc_set(btcoexist, BTC_SET_BL_BT_TX_RX_MASK,
+ &coex_sta->bt_tx_rx_mask);
+
+ if (!coex_sta->bt_tx_rx_mask) {
+ /* BT into is responded by BT FW and BT RF REG
+ * 0x3C != 0x15 => Need to switch BT TRx Mask
+ */
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], Switch BT TRx Mask since BT RF REG 0x3C != 0x15\n");
+ btcoexist->btc_set_bt_reg(btcoexist, BTC_BT_REG_RF,
+ 0x3c, 0x15);
+
+ /* BT TRx Mask lock 0x2c[0], 0x30[0] = 0 */
+ btcoexist->btc_set_bt_reg(btcoexist, BTC_BT_REG_RF,
+ 0x2c, 0x7c44);
+ btcoexist->btc_set_bt_reg(btcoexist, BTC_BT_REG_RF,
+ 0x30, 0x7c44);
+ }
+
/* Here we need to resend some wifi info to BT
* because bt is reset and loss of the info.
*/
@@ -2247,14 +3230,15 @@
} else {
/* BT already NOT ignore Wlan active, do nothing here.*/
}
-#if (BT_AUTO_REPORT_ONLY_8723B_1ANT == 0)
- if (coex_sta->bt_info_ext & BIT4) {
- /* BT auto report already enabled, do nothing */
- } else {
- halbtc8723b1ant_bt_auto_report(btcoexist, FORCE_EXEC,
- true);
+ if (!btcoexist->auto_report_1ant) {
+ if (coex_sta->bt_info_ext & BIT4) {
+ /* BT auto report already enabled, do nothing */
+ } else {
+ halbtc8723b1ant_bt_auto_report(btcoexist,
+ FORCE_EXEC,
+ true);
+ }
}
-#endif
}
/* check BIT2 first ==> check if bt is under inquiry or page scan */
@@ -2263,6 +3247,8 @@
else
coex_sta->c2h_bt_inquiry_page = false;
+ coex_sta->num_of_profile = 0;
+
/* set link exist status */
if (!(bt_info & BT_INFO_8723B_1ANT_B_CONNECTION)) {
coex_sta->bt_link_exist = false;
@@ -2270,30 +3256,77 @@
coex_sta->a2dp_exist = false;
coex_sta->hid_exist = false;
coex_sta->sco_exist = false;
+
+ coex_sta->bt_hi_pri_link_exist = false;
} else {
/* connection exists */
coex_sta->bt_link_exist = true;
- if (bt_info & BT_INFO_8723B_1ANT_B_FTP)
+ if (bt_info & BT_INFO_8723B_1ANT_B_FTP) {
coex_sta->pan_exist = true;
- else
+ coex_sta->num_of_profile++;
+ } else {
coex_sta->pan_exist = false;
- if (bt_info & BT_INFO_8723B_1ANT_B_A2DP)
+ }
+ if (bt_info & BT_INFO_8723B_1ANT_B_A2DP) {
coex_sta->a2dp_exist = true;
- else
+ coex_sta->num_of_profile++;
+ } else {
coex_sta->a2dp_exist = false;
- if (bt_info & BT_INFO_8723B_1ANT_B_HID)
+ }
+ if (bt_info & BT_INFO_8723B_1ANT_B_HID) {
coex_sta->hid_exist = true;
- else
+ coex_sta->num_of_profile++;
+ } else {
coex_sta->hid_exist = false;
- if (bt_info & BT_INFO_8723B_1ANT_B_SCO_ESCO)
+ }
+ if (bt_info & BT_INFO_8723B_1ANT_B_SCO_ESCO) {
coex_sta->sco_exist = true;
- else
+ coex_sta->num_of_profile++;
+ } else {
coex_sta->sco_exist = false;
+ }
+
+ if ((!coex_sta->hid_exist) &&
+ (!coex_sta->c2h_bt_inquiry_page) &&
+ (!coex_sta->sco_exist)) {
+ if (coex_sta->high_priority_tx +
+ coex_sta->high_priority_rx >=
+ 160) {
+ coex_sta->hid_exist = true;
+ coex_sta->wrong_profile_notification++;
+ coex_sta->num_of_profile++;
+ bt_info = bt_info | 0x28;
+ }
+ }
+
+ /* Add Hi-Pri Tx/Rx counter to avoid false detection */
+ if (((coex_sta->hid_exist) || (coex_sta->sco_exist)) &&
+ (coex_sta->high_priority_tx + coex_sta->high_priority_rx >=
+ 160) &&
+ (!coex_sta->c2h_bt_inquiry_page))
+ coex_sta->bt_hi_pri_link_exist = true;
+
+ if ((bt_info & BT_INFO_8723B_1ANT_B_ACL_BUSY) &&
+ (coex_sta->num_of_profile == 0)) {
+ if (coex_sta->low_priority_tx +
+ coex_sta->low_priority_rx >=
+ 160) {
+ coex_sta->pan_exist = true;
+ coex_sta->num_of_profile++;
+ coex_sta->wrong_profile_notification++;
+ bt_info = bt_info | 0x88;
+ }
+ }
}
halbtc8723b1ant_update_bt_link_info(btcoexist);
- if (!(bt_info&BT_INFO_8723B_1ANT_B_CONNECTION)) {
+ /* mask profile bit for connect-ilde identification
+ * ( for CSR case: A2DP idle --> 0x41)
+ */
+ bt_info = bt_info & 0x1f;
+
+ if (!(bt_info & BT_INFO_8723B_1ANT_B_CONNECTION)) {
coex_dm->bt_status = BT_8723B_1ANT_BT_STATUS_NON_CONNECTED_IDLE;
RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
"[BTCoex], BtInfoNotify(), BT Non-Connected idle!\n");
@@ -2315,8 +3348,7 @@
RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
"[BTCoex], BtInfoNotify(), BT ACL busy!!!\n");
} else {
- coex_dm->bt_status =
- BT_8723B_1ANT_BT_STATUS_MAX;
+ coex_dm->bt_status = BT_8723B_1ANT_BT_STATUS_MAX;
RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
"[BTCoex], BtInfoNotify(), BT Non-Defined state!!\n");
}
@@ -2332,6 +3364,43 @@
halbtc8723b1ant_run_coexist_mechanism(btcoexist);
}
+void ex_halbtc8723b1ant_rf_status_notify(struct btc_coexist *btcoexist, u8 type)
+{
+ struct rtl_priv *rtlpriv = btcoexist->adapter;
+ u32 u32tmp;
+ u8 u8tmpa, u8tmpb, u8tmpc;
+
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], RF Status notify\n");
+
+ if (type == BTC_RF_ON) {
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], RF is turned ON!!\n");
+ btcoexist->stop_coex_dm = false;
+ } else if (type == BTC_RF_OFF) {
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], RF is turned OFF!!\n");
+
+ halbtc8723b1ant_power_save_state(btcoexist, BTC_PS_WIFI_NATIVE,
+ 0x0, 0x0);
+ halbtc8723b1ant_ps_tdma(btcoexist, FORCE_EXEC, false, 0);
+ halbtc8723b1ant_set_ant_path(btcoexist, BTC_ANT_PATH_BT,
+ FORCE_EXEC, false, true);
+
+ halbtc8723b1ant_ignore_wlan_act(btcoexist, FORCE_EXEC, true);
+ btcoexist->stop_coex_dm = true;
+
+ u32tmp = btcoexist->btc_read_4byte(btcoexist, 0x948);
+ u8tmpa = btcoexist->btc_read_1byte(btcoexist, 0x765);
+ u8tmpb = btcoexist->btc_read_1byte(btcoexist, 0x67);
+ u8tmpc = btcoexist->btc_read_1byte(btcoexist, 0x76e);
+
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "############# [BTCoex], 0x948=0x%x, 0x765=0x%x, 0x67=0x%x, 0x76e=0x%x\n",
+ u32tmp, u8tmpa, u8tmpb, u8tmpc);
+ }
+}
+
void ex_halbtc8723b1ant_halt_notify(struct btc_coexist *btcoexist)
{
struct rtl_priv *rtlpriv = btcoexist->adapter;
@@ -2340,7 +3409,8 @@
btcoexist->stop_coex_dm = true;
- halbtc8723b1ant_set_ant_path(btcoexist, BTC_ANT_PATH_BT, false, true);
+ halbtc8723b1ant_set_ant_path(btcoexist, BTC_ANT_PATH_BT, FORCE_EXEC,
+ false, true);
halbtc8723b1ant_ignore_wlan_act(btcoexist, FORCE_EXEC, true);
@@ -2349,6 +3419,8 @@
halbtc8723b1ant_ps_tdma(btcoexist, FORCE_EXEC, false, 0);
ex_halbtc8723b1ant_media_status_notify(btcoexist, BTC_MEDIA_DISCONNECT);
+
+ btcoexist->stop_coex_dm = true;
}
void ex_halbtc8723b1ant_pnp_notify(struct btc_coexist *btcoexist, u8 pnp_state)
@@ -2360,18 +3432,27 @@
if (BTC_WIFI_PNP_SLEEP == pnp_state) {
RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
"[BTCoex], Pnp notify to SLEEP\n");
- btcoexist->stop_coex_dm = true;
- halbtc8723b1ant_set_ant_path(btcoexist, BTC_ANT_PATH_BT, false,
- true);
+ halbtc8723b1ant_set_ant_path(btcoexist, BTC_ANT_PATH_BT,
+ FORCE_EXEC, false, true);
halbtc8723b1ant_power_save_state(btcoexist, BTC_PS_WIFI_NATIVE,
0x0, 0x0);
halbtc8723b1ant_ps_tdma(btcoexist, NORMAL_EXEC, false, 0);
halbtc8723b1ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 2);
+
+ /* Driver do not leave IPS/LPS when driver is going to sleep, so
+ * BTCoexistence think wifi is still under IPS/LPS
+ *
+ * BT should clear UnderIPS/UnderLPS state to avoid mismatch
+ * state after wakeup.
+ */
+ coex_sta->under_ips = false;
+ coex_sta->under_lps = false;
+ btcoexist->stop_coex_dm = true;
} else if (BTC_WIFI_PNP_WAKE_UP == pnp_state) {
RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
"[BTCoex], Pnp notify to WAKE UP\n");
btcoexist->stop_coex_dm = false;
- halbtc8723b1ant_init_hw_config(btcoexist, false);
+ halbtc8723b1ant_init_hw_config(btcoexist, false, false);
halbtc8723b1ant_init_coex_dm(btcoexist);
halbtc8723b1ant_query_bt_info(btcoexist);
}
@@ -2384,57 +3465,33 @@
RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
"[BTCoex], *****************Coex DM Reset****************\n");
- halbtc8723b1ant_init_hw_config(btcoexist, false);
- btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x1, 0xfffff, 0x0);
- btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x2, 0xfffff, 0x0);
+ halbtc8723b1ant_init_hw_config(btcoexist, false, false);
halbtc8723b1ant_init_coex_dm(btcoexist);
}
void ex_halbtc8723b1ant_periodical(struct btc_coexist *btcoexist)
{
struct rtl_priv *rtlpriv = btcoexist->adapter;
- struct btc_board_info *board_info = &btcoexist->board_info;
- struct btc_stack_info *stack_info = &btcoexist->stack_info;
- static u8 dis_ver_info_cnt;
- u32 fw_ver = 0, bt_patch_ver = 0;
+ struct btc_bt_link_info *bt_link_info = &btcoexist->bt_link_info;
RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
"[BTCoex], ==========================Periodical===========================\n");
- if (dis_ver_info_cnt <= 5) {
- dis_ver_info_cnt += 1;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], ****************************************************************\n");
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], Ant PG Num/ Ant Mech/ Ant Pos = %d/ %d/ %d\n",
- board_info->pg_ant_num, board_info->btdm_ant_num,
- board_info->btdm_ant_pos);
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], BT stack/ hci ext ver = %s / %d\n",
- stack_info->profile_notified ? "Yes" : "No",
- stack_info->hci_version);
- btcoexist->btc_get(btcoexist, BTC_GET_U4_BT_PATCH_VER,
- &bt_patch_ver);
- btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_FW_VER, &fw_ver);
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], CoexVer/ FwVer/ PatchVer = %d_%x/ 0x%x/ 0x%x(%d)\n",
- glcoex_ver_date_8723b_1ant,
- glcoex_ver_8723b_1ant, fw_ver,
- bt_patch_ver, bt_patch_ver);
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], ****************************************************************\n");
- }
+ if (!btcoexist->auto_report_1ant) {
+ halbtc8723b1ant_query_bt_info(btcoexist);
+ halbtc8723b1ant_monitor_bt_enable_disable(btcoexist);
+ } else {
+ halbtc8723b1ant_monitor_bt_ctr(btcoexist);
+ halbtc8723b1ant_monitor_wifi_ctr(btcoexist);
-#if (BT_AUTO_REPORT_ONLY_8723B_1ANT == 0)
- halbtc8723b1ant_query_bt_info(btcoexist);
- halbtc8723b1ant_monitor_bt_ctr(btcoexist);
- halbtc8723b1ant_monitor_bt_enable_disable(btcoexist);
-#else
- if (btc8723b1ant_is_wifi_status_changed(btcoexist) ||
- coex_dm->auto_tdma_adjust) {
- halbtc8723b1ant_run_coexist_mechanism(btcoexist);
- }
+ if ((coex_sta->high_priority_tx + coex_sta->high_priority_rx < 50) &&
+ bt_link_info->hid_exist)
+ bt_link_info->hid_exist = false;
- coex_sta->special_pkt_period_cnt++;
-#endif
+ if (btc8723b1ant_is_wifi_status_changed(btcoexist) ||
+ coex_dm->auto_tdma_adjust) {
+ halbtc8723b1ant_run_coexist_mechanism(btcoexist);
+ }
+ coex_sta->special_pkt_period_cnt++;
+ }
}
diff --git a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8723b1ant.h b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8723b1ant.h
index 75f8094..506961a 100644
--- a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8723b1ant.h
+++ b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8723b1ant.h
@@ -25,8 +25,6 @@
/**********************************************************************
* The following is for 8723B 1ANT BT Co-exist definition
**********************************************************************/
-#define BT_AUTO_REPORT_ONLY_8723B_1ANT 1
-
#define BT_INFO_8723B_1ANT_B_FTP BIT7
#define BT_INFO_8723B_1ANT_B_A2DP BIT6
#define BT_INFO_8723B_1ANT_B_HID BIT5
@@ -41,6 +39,8 @@
#define BTC_RSSI_COEX_THRESH_TOL_8723B_1ANT 2
+#define BT_8723B_1ANT_WIFI_NOISY_THRESH 50
+
enum _BT_INFO_SRC_8723B_1ANT {
BT_INFO_SRC_8723B_1ANT_WIFI_FW = 0x0,
BT_INFO_SRC_8723B_1ANT_BT_RSP = 0x1,
@@ -84,13 +84,16 @@
};
struct coex_dm_8723b_1ant {
+ /* hw setting */
+ u8 pre_ant_pos_type;
+ u8 cur_ant_pos_type;
/* fw mechanism */
bool cur_ignore_wlan_act;
bool pre_ignore_wlan_act;
u8 pre_ps_tdma;
u8 cur_ps_tdma;
u8 ps_tdma_para[5];
- u8 tdma_adj_type;
+ u8 ps_tdma_du_adj_type;
bool auto_tdma_adjust;
bool pre_ps_tdma_on;
bool cur_ps_tdma_on;
@@ -133,16 +136,21 @@
u8 cur_retry_limit_type;
u8 pre_ampdu_time_type;
u8 cur_ampdu_time_type;
+ u32 arp_cnt;
u8 error_condition;
};
struct coex_sta_8723b_1ant {
+ bool bt_disabled;
bool bt_link_exist;
bool sco_exist;
bool a2dp_exist;
bool hid_exist;
bool pan_exist;
+ bool bt_hi_pri_link_exist;
+ u8 num_of_profile;
+ bool bt_abnormal_scan;
bool under_lps;
bool under_ips;
@@ -154,18 +162,47 @@
u8 bt_rssi;
u8 pre_bt_rssi_state;
u8 pre_wifi_rssi_state[4];
+ bool bt_tx_rx_mask;
bool c2h_bt_info_req_sent;
u8 bt_info_c2h[BT_INFO_SRC_8723B_1ANT_MAX][10];
u32 bt_info_c2h_cnt[BT_INFO_SRC_8723B_1ANT_MAX];
+ bool bt_whck_test;
bool c2h_bt_inquiry_page;
+ bool c2h_bt_remote_name_req;
+ bool wifi_is_high_pri_task;
u8 bt_retry_cnt;
u8 bt_info_ext;
+ u8 scan_ap_num;
+ bool cck_ever_lock;
+ u8 coex_table_type;
+ bool force_lps_on;
+ u32 pop_event_cnt;
+
+ u32 crc_ok_cck;
+ u32 crc_ok_11g;
+ u32 crc_ok_11n;
+ u32 crc_ok_11n_agg;
+
+ u32 crc_err_cck;
+ u32 crc_err_11g;
+ u32 crc_err_11n;
+ u32 crc_err_11n_agg;
+
+ bool cck_lock;
+ bool pre_ccklock;
+
+ u32 wrong_profile_notification;
+
+ u8 a2dp_bit_pool;
+ u8 cut_version;
};
/*************************************************************************
* The following is interface which will notify coex module.
*************************************************************************/
-void ex_halbtc8723b1ant_init_hwconfig(struct btc_coexist *btcoexist);
+void ex_halbtc8723b1ant_power_on_setting(struct btc_coexist *btcoexist);
+void ex_halbtc8723b1ant_init_hwconfig(struct btc_coexist *btcoexist,
+ bool wifi_only);
void ex_halbtc8723b1ant_init_coex_dm(struct btc_coexist *btcoexist);
void ex_halbtc8723b1ant_ips_notify(struct btc_coexist *btcoexist, u8 type);
void ex_halbtc8723b1ant_lps_notify(struct btc_coexist *btcoexist, u8 type);
@@ -177,6 +214,8 @@
u8 type);
void ex_halbtc8723b1ant_bt_info_notify(struct btc_coexist *btcoexist,
u8 *tmpbuf, u8 length);
+void ex_halbtc8723b1ant_rf_status_notify(struct btc_coexist *btcoexist,
+ u8 type);
void ex_halbtc8723b1ant_halt_notify(struct btc_coexist *btcoexist);
void ex_halbtc8723b1ant_pnp_notify(struct btc_coexist *btcoexist, u8 pnpstate);
void ex_halbtc8723b1ant_coex_dm_reset(struct btc_coexist *btcoexist);
diff --git a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8723b2ant.c b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8723b2ant.c
index 2f3946b..31965f0 100644
--- a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8723b2ant.c
+++ b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8723b2ant.c
@@ -707,6 +707,36 @@
coex_dm->pre_dec_bt_pwr_lvl = coex_dm->cur_dec_bt_pwr_lvl;
}
+static
+void halbtc8723b2ant_set_bt_auto_report(struct btc_coexist *btcoexist,
+ bool enable_auto_report)
+{
+ u8 h2c_parameter[1] = {0};
+
+ h2c_parameter[0] = 0;
+
+ if (enable_auto_report)
+ h2c_parameter[0] |= BIT(0);
+
+ btcoexist->btc_fill_h2c(btcoexist, 0x68, 1, h2c_parameter);
+}
+
+static
+void btc8723b2ant_bt_auto_report(struct btc_coexist *btcoexist,
+ bool force_exec, bool enable_auto_report)
+{
+ coex_dm->cur_bt_auto_report = enable_auto_report;
+
+ if (!force_exec) {
+ if (coex_dm->pre_bt_auto_report == coex_dm->cur_bt_auto_report)
+ return;
+ }
+ halbtc8723b2ant_set_bt_auto_report(btcoexist,
+ coex_dm->cur_bt_auto_report);
+
+ coex_dm->pre_bt_auto_report = coex_dm->cur_bt_auto_report;
+}
+
static void btc8723b2ant_fw_dac_swing_lvl(struct btc_coexist *btcoexist,
bool force_exec, u8 fw_dac_swing_lvl)
{
@@ -3666,6 +3696,7 @@
btcoexist->btc_write_1byte(btcoexist, 0x76e, 0x4);
btcoexist->btc_write_1byte(btcoexist, 0x778, 0x3);
btcoexist->btc_write_1byte_bitmask(btcoexist, 0x40, 0x20, 0x1);
+ btcoexist->auto_report_2ant = true;
}
void ex_btc8723b2ant_power_on_setting(struct btc_coexist *btcoexist)
@@ -3966,9 +3997,8 @@
RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = %d/ %d",
"0x774(low-pri rx/tx)", coex_sta->low_priority_rx,
coex_sta->low_priority_tx);
-#if (BT_AUTO_REPORT_ONLY_8723B_2ANT == 1)
- btc8723b2ant_monitor_bt_ctr(btcoexist);
-#endif
+ if (btcoexist->auto_report_2ant)
+ btc8723b2ant_monitor_bt_ctr(btcoexist);
btcoexist->btc_disp_dbg_msg(btcoexist,
BTC_DBG_DISP_COEX_STATISTICS);
}
@@ -4190,14 +4220,11 @@
} else {
/* BT already NOT ignore Wlan active, do nothing here.*/
}
-#if (BT_AUTO_REPORT_ONLY_8723B_2ANT == 0)
- if ((coex_sta->bt_info_ext & BIT4)) {
- /* BT auto report already enabled, do nothing*/
- } else {
- btc8723b2ant_bt_auto_report(btcoexist, FORCE_EXEC,
- true);
+ if (!btcoexist->auto_report_2ant) {
+ if (!(coex_sta->bt_info_ext & BIT4))
+ btc8723b2ant_bt_auto_report(btcoexist,
+ FORCE_EXEC, true);
}
-#endif
}
/* check BIT2 first ==> check if bt is under inquiry or page scan */
@@ -4347,21 +4374,22 @@
}
}
-#if (BT_AUTO_REPORT_ONLY_8723B_2ANT == 0)
- btc8723b2ant_query_bt_info(btcoexist);
-#else
- btc8723b2ant_monitor_bt_ctr(btcoexist);
- btc8723b2ant_monitor_wifi_ctr(btcoexist);
+ if (!btcoexist->auto_report_2ant) {
+ btc8723b2ant_query_bt_info(btcoexist);
+ } else {
+ btc8723b2ant_monitor_bt_ctr(btcoexist);
+ btc8723b2ant_monitor_wifi_ctr(btcoexist);
- /* for some BT speakers that High-Priority pkts appear before
- * playing, this will cause HID exist
- */
- if ((coex_sta->high_priority_tx + coex_sta->high_priority_rx < 50) &&
- (bt_link_info->hid_exist))
- bt_link_info->hid_exist = false;
+ /* for some BT speakers that High-Priority pkts appear before
+ * playing, this will cause HID exist
+ */
+ if ((coex_sta->high_priority_tx +
+ coex_sta->high_priority_rx < 50) &&
+ (bt_link_info->hid_exist))
+ bt_link_info->hid_exist = false;
- if (btc8723b2ant_is_wifi_status_changed(btcoexist) ||
- coex_dm->auto_tdma_adjust)
- btc8723b2ant_run_coexist_mechanism(btcoexist);
-#endif
+ if (btc8723b2ant_is_wifi_status_changed(btcoexist) ||
+ coex_dm->auto_tdma_adjust)
+ btc8723b2ant_run_coexist_mechanism(btcoexist);
+ }
}
diff --git a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8723b2ant.h b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8723b2ant.h
index 18a35c7..50726be 100644
--- a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8723b2ant.h
+++ b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8723b2ant.h
@@ -28,8 +28,6 @@
/************************************************************************
* The following is for 8723B 2Ant BT Co-exist definition
************************************************************************/
-#define BT_AUTO_REPORT_ONLY_8723B_2ANT 1
-
#define BT_INFO_8723B_2ANT_B_FTP BIT7
#define BT_INFO_8723B_2ANT_B_A2DP BIT6
#define BT_INFO_8723B_2ANT_B_HID BIT5
diff --git a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8821a1ant.c b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8821a1ant.c
index 5e9f3b0..4efac5f 100644
--- a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8821a1ant.c
+++ b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8821a1ant.c
@@ -1107,8 +1107,8 @@
0x3, 0x11, 0x10);
break;
case 6:
- btc8821a1ant_set_fw_ps_tdma(btcoexist, 0x13, 0xa,
- 0x3, 0x0, 0x0);
+ btc8821a1ant_set_fw_ps_tdma(btcoexist, 0x61, 0x20,
+ 0x3, 0x11, 0x13);
break;
case 7:
btc8821a1ant_set_fw_ps_tdma(btcoexist, 0x13, 0xc,
@@ -1128,8 +1128,8 @@
0xa, 0x0, 0x40);
break;
case 11:
- btc8821a1ant_set_fw_ps_tdma(btcoexist, 0x51, 0x14,
- 0x03, 0x10, 0x10);
+ btc8821a1ant_set_fw_ps_tdma(btcoexist, 0x51, 0x15,
+ 0x03, 0x10, 0x50);
rssi_adjust_val = 20;
break;
case 12:
@@ -1137,8 +1137,8 @@
0x0a, 0x0, 0x50);
break;
case 13:
- btc8821a1ant_set_fw_ps_tdma(btcoexist, 0x51, 0x18,
- 0x18, 0x0, 0x10);
+ btc8821a1ant_set_fw_ps_tdma(btcoexist, 0x51, 0x12,
+ 0x12, 0x0, 0x50);
break;
case 14:
btc8821a1ant_set_fw_ps_tdma(btcoexist, 0x51, 0x1e,
@@ -1163,8 +1163,8 @@
0x03, 0x11, 0x10);
break;
case 21:
- btc8821a1ant_set_fw_ps_tdma(btcoexist, 0x61, 0x15,
- 0x03, 0x11, 0x10);
+ btc8821a1ant_set_fw_ps_tdma(btcoexist, 0x61, 0x25,
+ 0x03, 0x11, 0x11);
break;
case 22:
btc8821a1ant_set_fw_ps_tdma(btcoexist, 0x61, 0x25,
@@ -1204,16 +1204,16 @@
0x1a, 0x1, 0x10);
break;
case 30:
- btc8821a1ant_set_fw_ps_tdma(btcoexist, 0x51, 0x14,
- 0x3, 0x10, 0x50);
+ btc8821a1ant_set_fw_ps_tdma(btcoexist, 0x51, 0x30,
+ 0x3, 0x10, 0x10);
break;
case 31:
btc8821a1ant_set_fw_ps_tdma(btcoexist, 0xd3, 0x1a,
0x1a, 0, 0x58);
break;
case 32:
- btc8821a1ant_set_fw_ps_tdma(btcoexist, 0x61, 0xa,
- 0x3, 0x10, 0x0);
+ btc8821a1ant_set_fw_ps_tdma(btcoexist, 0x61, 0x35,
+ 0x3, 0x11, 0x11);
break;
case 33:
btc8821a1ant_set_fw_ps_tdma(btcoexist, 0xa3, 0x25,
@@ -1231,6 +1231,28 @@
btc8821a1ant_set_fw_ps_tdma(btcoexist, 0xd3, 0x12,
0x3, 0x14, 0x50);
break;
+ case 40:
+ /* SoftAP only with no sta associated, BT disable, TDMA
+ * mode for power saving
+ *
+ * here softap mode screen off will cost 70-80mA for
+ * phone
+ */
+ btc8821a1ant_set_fw_ps_tdma(btcoexist, 0x23, 0x18,
+ 0x00, 0x10, 0x24);
+ break;
+ case 41:
+ btc8821a1ant_set_fw_ps_tdma(btcoexist, 0x51, 0x15,
+ 0x3, 0x11, 0x11);
+ break;
+ case 42:
+ btc8821a1ant_set_fw_ps_tdma(btcoexist, 0x51, 0x20,
+ 0x3, 0x11, 0x11);
+ break;
+ case 43:
+ btc8821a1ant_set_fw_ps_tdma(btcoexist, 0x51, 0x30,
+ 0x3, 0x10, 0x11);
+ break;
}
} else {
/* disable PS tdma */
@@ -1619,15 +1641,23 @@
return;
} else if (bt_link_info->a2dp_only) {
/* A2DP */
- if ((bt_rssi_state != BTC_RSSI_STATE_HIGH) &&
- (bt_rssi_state != BTC_RSSI_STATE_STAY_HIGH)) {
+ if (wifi_status == BT_8821A_1ANT_WIFI_STATUS_CONNECTED_IDLE) {
+ btc8821a1ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 32);
+ btc8821a1ant_coex_table_with_type(btcoexist,
+ NORMAL_EXEC, 1);
+ coex_dm->auto_tdma_adjust = false;
+ } else if ((bt_rssi_state != BTC_RSSI_STATE_HIGH) &&
+ (bt_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
+ btc8821a1ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 14);
+ btc8821a1ant_coex_table_with_type(btcoexist,
+ NORMAL_EXEC, 1);
+ } else {
/* for low BT RSSI */
- btc8821a1ant_ps_tdma(btcoexist, NORMAL_EXEC,
- true, 11);
+ btc8821a1ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 14);
+ btc8821a1ant_coex_table_with_type(btcoexist,
+ NORMAL_EXEC, 1);
coex_dm->auto_tdma_adjust = false;
}
-
- btc8821a1ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 1);
} else if (bt_link_info->hid_exist && bt_link_info->a2dp_exist) {
/* HID+A2DP */
if ((bt_rssi_state == BTC_RSSI_STATE_HIGH) ||
@@ -1638,7 +1668,7 @@
} else {
/*for low BT RSSI*/
btc8821a1ant_ps_tdma(btcoexist, NORMAL_EXEC,
- true, 11);
+ true, 14);
coex_dm->auto_tdma_adjust = false;
}
@@ -1647,13 +1677,13 @@
(bt_link_info->hid_exist && bt_link_info->pan_exist)) {
/* PAN(OPP, FTP), HID+PAN(OPP, FTP) */
btc8821a1ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 3);
- btc8821a1ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 1);
+ btc8821a1ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 6);
coex_dm->auto_tdma_adjust = false;
} else if (((bt_link_info->a2dp_exist) && (bt_link_info->pan_exist)) ||
(bt_link_info->hid_exist && bt_link_info->a2dp_exist &&
bt_link_info->pan_exist)) {
/* A2DP+PAN(OPP, FTP), HID+A2DP+PAN(OPP, FTP) */
- btc8821a1ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 13);
+ btc8821a1ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 43);
btc8821a1ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 1);
coex_dm->auto_tdma_adjust = false;
} else {
@@ -1718,52 +1748,49 @@
/* tdma and coex table */
if (BT_8821A_1ANT_BT_STATUS_ACL_BUSY == coex_dm->bt_status) {
- if (bt_link_info->a2dp_exist && bt_link_info->pan_exist) {
- btc8821a1ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 22);
+ if (bt_link_info->a2dp_exist) {
+ btc8821a1ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 14);
btc8821a1ant_coex_table_with_type(btcoexist,
NORMAL_EXEC, 1);
} else {
- btc8821a1ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 20);
- btc8821a1ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 1);
- }
- } else if ((BT_8821A_1ANT_BT_STATUS_SCO_BUSY ==
- coex_dm->bt_status) ||
- (BT_8821A_1ANT_BT_STATUS_ACL_SCO_BUSY ==
- coex_dm->bt_status)) {
+ btc8821a1ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 20);
+ btc8821a1ant_coex_table_with_type(btcoexist,
+ NORMAL_EXEC, 4);
+ }
+ } else if ((coex_dm->bt_status == BT_8821A_1ANT_BT_STATUS_SCO_BUSY) ||
+ (coex_dm->bt_status ==
+ BT_8821A_1ANT_BT_STATUS_ACL_SCO_BUSY)) {
btc8821a1ant_act_bt_sco_hid_only_busy(btcoexist,
BT_8821A_1ANT_WIFI_STATUS_CONNECTED_SCAN);
} else {
- btc8821a1ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 20);
- btc8821a1ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 1);
+ btc8821a1ant_ps_tdma(btcoexist, NORMAL_EXEC, false, 8);
+ btc8821a1ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 2);
}
}
static void btc8821a1ant_act_wifi_conn_sp_pkt(struct btc_coexist *btcoexist)
{
struct btc_bt_link_info *bt_link_info = &btcoexist->bt_link_info;
- bool hs_connecting = false;
-
- btcoexist->btc_get(btcoexist, BTC_GET_BL_HS_CONNECTING, &hs_connecting);
btc8821a1ant_power_save_state(btcoexist, BTC_PS_WIFI_NATIVE,
0x0, 0x0);
/* tdma and coex table */
- if (coex_dm->bt_status == BT_8821A_1ANT_BT_STATUS_ACL_BUSY) {
- if (bt_link_info->a2dp_exist && bt_link_info->pan_exist) {
- btc8821a1ant_ps_tdma(btcoexist, NORMAL_EXEC,
- true, 22);
- btc8821a1ant_coex_table_with_type(btcoexist,
- NORMAL_EXEC, 1);
- } else {
- btc8821a1ant_ps_tdma(btcoexist, NORMAL_EXEC,
- true, 20);
- btc8821a1ant_coex_table_with_type(btcoexist,
- NORMAL_EXEC, 1);
- }
- } else {
- btc8821a1ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 20);
+ if ((bt_link_info->sco_exist) || (bt_link_info->hid_exist) ||
+ (bt_link_info->a2dp_exist)) {
+ btc8821a1ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 32);
+ btc8821a1ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 4);
+ }
+
+ if ((bt_link_info->hid_exist) && (bt_link_info->a2dp_exist)) {
+ btc8821a1ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 14);
btc8821a1ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 1);
+ } else if (bt_link_info->pan_exist) {
+ btc8821a1ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 20);
+ btc8821a1ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 4);
+ } else {
+ btc8821a1ant_ps_tdma(btcoexist, NORMAL_EXEC, false, 8);
+ btc8821a1ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 2);
}
}
@@ -1773,6 +1800,7 @@
bool wifi_busy = false;
bool scan = false, link = false, roam = false;
bool under_4way = false;
+ bool ap_enable = false;
RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
"[BTCoex], CoexForWifiConnect()===>\n");
@@ -1790,24 +1818,37 @@
btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_LINK, &link);
btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_ROAM, &roam);
if (scan || link || roam) {
- btc8821a1ant_action_wifi_connected_scan(btcoexist);
+ if (scan)
+ btc8821a1ant_action_wifi_connected_scan(btcoexist);
+ else
+ btc8821a1ant_act_wifi_conn_sp_pkt(btcoexist);
+
RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
"[BTCoex], CoexForWifiConnect(), return for wifi is under scan<===\n");
return;
}
/* power save state*/
+ btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_AP_MODE_ENABLE,
+ &ap_enable);
+ btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_BUSY, &wifi_busy);
if (BT_8821A_1ANT_BT_STATUS_ACL_BUSY ==
- coex_dm->bt_status && !btcoexist->bt_link_info.hid_only)
- btc8821a1ant_power_save_state(btcoexist,
- BTC_PS_LPS_ON, 0x50, 0x4);
- else
+ coex_dm->bt_status && !ap_enable &&
+ !btcoexist->bt_link_info.hid_only) {
+ if (!wifi_busy && btcoexist->bt_link_info.a2dp_only)
+ /* A2DP */
+ btc8821a1ant_power_save_state(btcoexist,
+ BTC_PS_WIFI_NATIVE, 0x0, 0x0);
+ else
+ btc8821a1ant_power_save_state(btcoexist, BTC_PS_LPS_ON,
+ 0x50, 0x4);
+ } else {
btc8821a1ant_power_save_state(btcoexist,
BTC_PS_WIFI_NATIVE,
0x0, 0x0);
+ }
/* tdma and coex table */
- btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_BUSY, &wifi_busy);
if (!wifi_busy) {
if (BT_8821A_1ANT_BT_STATUS_ACL_BUSY == coex_dm->bt_status) {
btc8821a1ant_act_wifi_con_bt_acl_busy(btcoexist,
@@ -1819,8 +1860,7 @@
btc8821a1ant_act_bt_sco_hid_only_busy(btcoexist,
BT_8821A_1ANT_WIFI_STATUS_CONNECTED_IDLE);
} else {
- btc8821a1ant_ps_tdma(btcoexist, NORMAL_EXEC,
- true, 5);
+ btc8821a1ant_ps_tdma(btcoexist, NORMAL_EXEC, false, 8);
btc8821a1ant_coex_table_with_type(btcoexist,
NORMAL_EXEC, 2);
}
@@ -1835,7 +1875,7 @@
btc8821a1ant_act_bt_sco_hid_only_busy(btcoexist,
BT_8821A_1ANT_WIFI_STATUS_CONNECTED_BUSY);
} else {
- btc8821a1ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 5);
+ btc8821a1ant_ps_tdma(btcoexist, NORMAL_EXEC, false, 8);
btc8821a1ant_coex_table_with_type(btcoexist,
NORMAL_EXEC, 2);
}
@@ -1988,11 +2028,11 @@
(wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
btc8821a1ant_limited_tx(btcoexist,
NORMAL_EXEC, 1, 1,
- 1, 1);
+ 0, 1);
} else {
btc8821a1ant_limited_tx(btcoexist,
NORMAL_EXEC, 1, 1,
- 1, 1);
+ 0, 1);
}
} else {
btc8821a1ant_limited_tx(btcoexist, NORMAL_EXEC,
@@ -2056,7 +2096,6 @@
*/
btc8821a1ant_sw_mechanism(btcoexist, false);
- btc8821a1ant_ps_tdma(btcoexist, FORCE_EXEC, false, 8);
btc8821a1ant_coex_table_with_type(btcoexist, FORCE_EXEC, 0);
}
@@ -2116,6 +2155,7 @@
void ex_btc8821a1ant_init_hwconfig(struct btc_coexist *btcoexist, bool wifionly)
{
btc8821a1ant_init_hw_config(btcoexist, true, wifionly);
+ btcoexist->auto_report_1ant = true;
}
void ex_btc8821a1ant_init_coex_dm(struct btc_coexist *btcoexist)
@@ -2406,9 +2446,8 @@
RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
"\r\n %-35s = %d/ %d", "0x774(low-pri rx/tx)",
coex_sta->low_priority_rx, coex_sta->low_priority_tx);
-#if (BT_AUTO_REPORT_ONLY_8821A_1ANT == 1)
- btc8821a1ant_monitor_bt_ctr(btcoexist);
-#endif
+ if (btcoexist->auto_report_1ant)
+ btc8821a1ant_monitor_bt_ctr(btcoexist);
btcoexist->btc_disp_dbg_msg(btcoexist, BTC_DBG_DISP_COEX_STATISTICS);
}
@@ -2434,7 +2473,7 @@
btc8821a1ant_set_ant_path(btcoexist,
BTC_ANT_PATH_BT, false, true);
/* set PTA control */
- btc8821a1ant_ps_tdma(btcoexist, NORMAL_EXEC, false, 8);
+ btc8821a1ant_ps_tdma(btcoexist, NORMAL_EXEC, false, 0);
btc8821a1ant_coex_table_with_type(btcoexist,
NORMAL_EXEC, 0);
} else if (BTC_IPS_LEAVE == type) {
@@ -2442,7 +2481,9 @@
"[BTCoex], IPS LEAVE notify\n");
coex_sta->under_ips = false;
- btc8821a1ant_run_coexist_mechanism(btcoexist);
+ btc8821a1ant_init_hw_config(btcoexist, false, false);
+ btc8821a1ant_init_coex_dm(btcoexist);
+ btc8821a1ant_query_bt_info(btcoexist);
}
}
@@ -2484,6 +2525,19 @@
return;
}
+ if (type == BTC_SCAN_START) {
+ coex_sta->wifi_is_high_pri_task = true;
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], SCAN START notify\n");
+
+ /* Force antenna setup for no scan result issue */
+ btc8821a1ant_ps_tdma(btcoexist, FORCE_EXEC, false, 8);
+ } else {
+ coex_sta->wifi_is_high_pri_task = false;
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], SCAN FINISH notify\n");
+ }
+
if (coex_sta->bt_disabled)
return;
@@ -2538,7 +2592,7 @@
void ex_btc8821a1ant_connect_notify(struct btc_coexist *btcoexist, u8 type)
{
struct rtl_priv *rtlpriv = btcoexist->adapter;
- bool wifi_connected = false, bt_hs_on = false;
+ bool wifi_connected = false, bt_hs_on = false;
u32 wifi_link_status = 0;
u32 num_of_wifi_link = 0;
bool bt_ctrl_agg_buf_size = false;
@@ -2556,6 +2610,18 @@
return;
}
+ if (type == BTC_ASSOCIATE_START) {
+ coex_sta->wifi_is_high_pri_task = true;
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], CONNECT START notify\n");
+ coex_dm->arp_cnt = 0;
+ } else {
+ coex_sta->wifi_is_high_pri_task = false;
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], CONNECT FINISH notify\n");
+ coex_dm->arp_cnt = 0;
+ }
+
btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_LINK_STATUS,
&wifi_link_status);
num_of_wifi_link = wifi_link_status >> 16;
@@ -2621,6 +2687,7 @@
} else {
RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
"[BTCoex], MEDIA disconnect notify\n");
+ coex_dm->arp_cnt = 0;
}
/* only 2.4G we need to inform bt the chnl mask */
@@ -2674,6 +2741,24 @@
return;
}
+ if (type == BTC_PACKET_DHCP || type == BTC_PACKET_EAPOL ||
+ type == BTC_PACKET_ARP) {
+ coex_sta->wifi_is_high_pri_task = true;
+
+ if (type == BTC_PACKET_ARP) {
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], specific Packet ARP notify\n");
+ } else {
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], specific Packet DHCP or EAPOL notify\n");
+ }
+ } else {
+ coex_sta->wifi_is_high_pri_task = false;
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], specific Packet [Type = %d] notify\n",
+ type);
+ }
+
coex_sta->special_pkt_period_cnt = 0;
btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_LINK_STATUS,
@@ -2696,8 +2781,20 @@
return;
}
- if (BTC_PACKET_DHCP == type ||
- BTC_PACKET_EAPOL == type) {
+ if (type == BTC_PACKET_DHCP || type == BTC_PACKET_EAPOL ||
+ type == BTC_PACKET_ARP) {
+ if (type == BTC_PACKET_ARP) {
+ coex_dm->arp_cnt++;
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], ARP Packet Count = %d\n",
+ coex_dm->arp_cnt);
+ if (coex_dm->arp_cnt >= 10)
+ /* if APR PKT > 10 after connect, do not go to
+ * btc8821a1ant_act_wifi_conn_sp_pkt
+ */
+ return;
+ }
+
RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
"[BTCoex], special Packet(%d) notify\n", type);
btc8821a1ant_act_wifi_conn_sp_pkt(btcoexist);
@@ -2742,14 +2839,28 @@
}
if (BT_INFO_SRC_8821A_1ANT_WIFI_FW != rsp_source) {
- coex_sta->bt_retry_cnt = /* [3:0]*/
- coex_sta->bt_info_c2h[rsp_source][2]&0xf;
+ /* [3:0] */
+ coex_sta->bt_retry_cnt =
+ coex_sta->bt_info_c2h[rsp_source][2] & 0xf;
coex_sta->bt_rssi =
- coex_sta->bt_info_c2h[rsp_source][3]*2+10;
+ coex_sta->bt_info_c2h[rsp_source][3] * 2 + 10;
- coex_sta->bt_info_ext =
- coex_sta->bt_info_c2h[rsp_source][4];
+ coex_sta->bt_info_ext = coex_sta->bt_info_c2h[rsp_source][4];
+
+ coex_sta->bt_tx_rx_mask =
+ (coex_sta->bt_info_c2h[rsp_source][2] & 0x40);
+ btcoexist->btc_set(btcoexist, BTC_SET_BL_BT_TX_RX_MASK,
+ &coex_sta->bt_tx_rx_mask);
+ if (!coex_sta->bt_tx_rx_mask) {
+ /* BT into is responded by BT FW and BT RF REG 0x3C !=
+ * 0x15 => Need to switch BT TRx Mask
+ */
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], Switch BT TRx Mask since BT RF REG 0x3C != 0x15\n");
+ btcoexist->btc_set_bt_reg(btcoexist, BTC_BT_REG_RF,
+ 0x3c, 0x15);
+ }
/* Here we need to resend some wifi info to BT
* because bt is reset and lost the info
@@ -2831,11 +2942,11 @@
RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
"[BTCoex], BtInfoNotify(), BT Connected-idle!!!\n");
} else if ((bt_info&BT_INFO_8821A_1ANT_B_SCO_ESCO) ||
- (bt_info&BT_INFO_8821A_1ANT_B_SCO_BUSY)) {
+ (bt_info & BT_INFO_8821A_1ANT_B_SCO_BUSY)) {
coex_dm->bt_status = BT_8821A_1ANT_BT_STATUS_SCO_BUSY;
RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
"[BTCoex], BtInfoNotify(), BT SCO busy!!!\n");
- } else if (bt_info&BT_INFO_8821A_1ANT_B_ACL_BUSY) {
+ } else if (bt_info & BT_INFO_8821A_1ANT_B_ACL_BUSY) {
if (BT_8821A_1ANT_BT_STATUS_ACL_BUSY != coex_dm->bt_status)
coex_dm->auto_tdma_adjust = false;
coex_dm->bt_status = BT_8821A_1ANT_BT_STATUS_ACL_BUSY;
@@ -2964,10 +3075,10 @@
"[BTCoex], ****************************************************************\n");
}
-#if (BT_AUTO_REPORT_ONLY_8821A_1ANT == 0)
- btc8821a1ant_query_bt_info(btcoexist);
- btc8821a1ant_monitor_bt_ctr(btcoexist);
-#else
- coex_sta->special_pkt_period_cnt++;
-#endif
+ if (!btcoexist->auto_report_1ant) {
+ btc8821a1ant_query_bt_info(btcoexist);
+ btc8821a1ant_monitor_bt_ctr(btcoexist);
+ } else {
+ coex_sta->special_pkt_period_cnt++;
+ }
}
diff --git a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8821a1ant.h b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8821a1ant.h
index 1bd1ebe..cb32e7a 100644
--- a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8821a1ant.h
+++ b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8821a1ant.h
@@ -27,8 +27,6 @@
* The following is for 8821A 1ANT BT Co-exist definition
*===========================================
*/
-#define BT_AUTO_REPORT_ONLY_8821A_1ANT 0
-
#define BT_INFO_8821A_1ANT_B_FTP BIT7
#define BT_INFO_8821A_1ANT_B_A2DP BIT6
#define BT_INFO_8821A_1ANT_B_HID BIT5
@@ -135,6 +133,7 @@
u8 cur_retry_limit_type;
u8 pre_ampdu_time_type;
u8 cur_ampdu_time_type;
+ u32 arp_cnt;
u8 error_condition;
};
@@ -155,6 +154,7 @@
u32 low_priority_tx;
u32 low_priority_rx;
u8 bt_rssi;
+ bool bt_tx_rx_mask;
u8 pre_bt_rssi_state;
u8 pre_wifi_rssi_state[4];
bool c2h_bt_info_req_sent;
diff --git a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8821a2ant.c b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8821a2ant.c
index 841b4a8..41943c3 100644
--- a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8821a2ant.c
+++ b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8821a2ant.c
@@ -3220,12 +3220,16 @@
/* HID+A2DP+PAN(EDR) */
static void btc8821a2ant_act_hid_a2dp_pan_edr(struct btc_coexist *btcoexist)
{
- u8 wifi_rssi_state, bt_rssi_state, bt_info_ext;
+ u8 wifi_rssi_state, wifi_rssi_state1, bt_rssi_state;
u32 wifi_bw;
- bt_info_ext = coex_sta->bt_info_ext;
wifi_rssi_state = btc8821a2ant_wifi_rssi_state(btcoexist, 0, 2, 15, 0);
- bt_rssi_state = btc8821a2ant_bt_rssi_state(btcoexist, 2, 35, 0);
+ wifi_rssi_state1 = btc8821a2ant_wifi_rssi_state(btcoexist, 1, 2,
+ BT_8821A_2ANT_WIFI_RSSI_COEXSWITCH_THRES, 0);
+ bt_rssi_state = btc8821a2ant_bt_rssi_state(btcoexist,
+ 2, BT_8821A_2ANT_BT_RSSI_COEXSWITCH_THRES, 0);
+
+ btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x1, 0xfffff, 0x0);
btc8821a2ant_limited_rx(btcoexist, NORMAL_EXEC, false, false, 0x8);
btc8821a2ant_fw_dac_swing_lvl(btcoexist, NORMAL_EXEC, 6);
@@ -3235,44 +3239,32 @@
else
btc8821a2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, 0);
- btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_BW, &wifi_bw);
-
- if (wifi_bw == BTC_WIFI_BW_LEGACY) {
- /* for HID at 11b/g mode */
- btc8821a2ant_coex_table(btcoexist, NORMAL_EXEC, 0x55ff55ff,
- 0x5a5a5a5a, 0xffff, 0x3);
+ if (BTC_RSSI_HIGH(wifi_rssi_state1) && BTC_RSSI_HIGH(bt_rssi_state)) {
+ btc8821a2ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 7);
+ btc8821a2ant_power_save_state(btcoexist, BTC_PS_WIFI_NATIVE,
+ 0x0, 0x0);
} else {
- /* for HID quality & wifi performance balance at 11n mode */
- btc8821a2ant_coex_table(btcoexist, NORMAL_EXEC, 0x55ff55ff,
- 0x5a5a5a5a, 0xffff, 0x3);
+ btc8821a2ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 14);
+ btc8821a2ant_power_save_state(btcoexist, BTC_PS_LPS_ON, 0x50,
+ 0x4);
}
- if (BTC_WIFI_BW_HT40 == wifi_bw) {
- /* fw mechanism */
- if ((bt_rssi_state == BTC_RSSI_STATE_HIGH) ||
- (bt_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
- if (bt_info_ext&BIT0) {
- /* a2dp basic rate */
- btc8821a2ant_tdma_duration_adjust(btcoexist,
- true, true, 3);
- } else {
- /* a2dp edr rate */
- btc8821a2ant_tdma_duration_adjust(btcoexist,
- true, true, 3);
- }
- } else {
- if (bt_info_ext&BIT0) {
- /* a2dp basic rate */
- btc8821a2ant_tdma_duration_adjust(btcoexist,
- true, true, 3);
- } else {
- /* a2dp edr rate */
- btc8821a2ant_tdma_duration_adjust(btcoexist,
- true, true, 3);
- }
- }
+ btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_BW, &wifi_bw);
- /* sw mechanism */
+ if ((bt_rssi_state == BTC_RSSI_STATE_HIGH) ||
+ (bt_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
+ if (wifi_bw == BTC_WIFI_BW_HT40)
+ btc8821a2ant_tdma_duration_adjust(btcoexist, true,
+ true, 3);
+ else
+ btc8821a2ant_tdma_duration_adjust(btcoexist, true,
+ false, 3);
+ } else {
+ btc8821a2ant_tdma_duration_adjust(btcoexist, true, true, 3);
+ }
+
+ /* sw mechanism */
+ if (wifi_bw == BTC_WIFI_BW_HT40) {
if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) ||
(wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
btc8821a2ant_sw_mechanism1(btcoexist, true, true,
@@ -3286,33 +3278,6 @@
false, 0x18);
}
} else {
- /* fw mechanism */
- if ((bt_rssi_state == BTC_RSSI_STATE_HIGH) ||
- (bt_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
- if (bt_info_ext&BIT0) {
- /* a2dp basic rate */
- btc8821a2ant_tdma_duration_adjust(btcoexist,
- true, false, 3);
- } else {
- /* a2dp edr rate */
- btc8821a2ant_tdma_duration_adjust(btcoexist,
- true, false, 3);
- }
- } else {
- if (bt_info_ext&BIT0) {
- /* a2dp basic rate */
- btc8821a2ant_tdma_duration_adjust(btcoexist,
- true, true,
- 3);
- } else {
- /* a2dp edr rate */
- btc8821a2ant_tdma_duration_adjust(btcoexist,
- true, true,
- 3);
- }
- }
-
- /* sw mechanism */
if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) ||
(wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
btc8821a2ant_sw_mechanism1(btcoexist, false, true,
@@ -3330,19 +3295,46 @@
static void btc8821a2ant_action_hid_a2dp(struct btc_coexist *btcoexist)
{
- u8 wifi_rssi_state, bt_rssi_state, bt_info_ext;
u32 wifi_bw;
+ u8 wifi_rssi_state, wifi_rssi_state1, bt_rssi_state;
+ u8 ap_num = 0;
- bt_info_ext = coex_sta->bt_info_ext;
wifi_rssi_state = btc8821a2ant_wifi_rssi_state(btcoexist, 0, 2, 15, 0);
- bt_rssi_state = btc8821a2ant_bt_rssi_state(btcoexist, 2, 35, 0);
+ wifi_rssi_state1 = btc8821a2ant_wifi_rssi_state(btcoexist, 1, 2,
+ BT_8821A_2ANT_WIFI_RSSI_COEXSWITCH_THRES, 0);
+ bt_rssi_state = btc8821a2ant_bt_rssi_state(btcoexist,
+ 3, BT_8821A_2ANT_BT_RSSI_COEXSWITCH_THRES, 37);
- if (BTC_RSSI_HIGH(bt_rssi_state))
- btc8821a2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, true);
- else
- btc8821a2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, false);
+ btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x1, 0xfffff, 0x0);
+
+ btc8821a2ant_limited_rx(btcoexist, NORMAL_EXEC, false, true, 0x5);
+ btc8821a2ant_fw_dac_swing_lvl(btcoexist, NORMAL_EXEC, 6);
btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_BW, &wifi_bw);
+ if (wifi_bw == BTC_WIFI_BW_LEGACY) {
+ if (BTC_RSSI_HIGH(bt_rssi_state))
+ btc8821a2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, 2);
+ else if (BTC_RSSI_MEDIUM(bt_rssi_state))
+ btc8821a2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, 2);
+ else
+ btc8821a2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, 0);
+ } else {
+ /* only 802.11N mode we have to dec bt power to 4 degree */
+ if (BTC_RSSI_HIGH(bt_rssi_state)) {
+ btcoexist->btc_get(btcoexist, BTC_GET_U1_AP_NUM,
+ &ap_num);
+ if (ap_num < 10)
+ btc8821a2ant_dec_bt_pwr(btcoexist,
+ NORMAL_EXEC, 4);
+ else
+ btc8821a2ant_dec_bt_pwr(btcoexist,
+ NORMAL_EXEC, 2);
+ } else if (BTC_RSSI_MEDIUM(bt_rssi_state)) {
+ btc8821a2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, 2);
+ } else {
+ btc8821a2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, 0);
+ }
+ }
if (wifi_bw == BTC_WIFI_BW_LEGACY) {
btc8821a2ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 7);
@@ -3354,36 +3346,15 @@
0x4);
}
- if (BTC_WIFI_BW_HT40 == wifi_bw) {
- /* fw mechanism */
- if ((bt_rssi_state == BTC_RSSI_STATE_HIGH) ||
- (bt_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
- if (bt_info_ext & BIT0) {
- /* a2dp basic rate */
- btc8821a2ant_tdma_duration_adjust(btcoexist,
- true, true,
- 2);
- } else {
- /* a2dp edr rate */
- btc8821a2ant_tdma_duration_adjust(btcoexist,
- true, true,
- 2);
- }
- } else {
- if (bt_info_ext & BIT0) {
- /* a2dp basic rate */
- btc8821a2ant_tdma_duration_adjust(btcoexist,
- true, true,
- 2);
- } else {
- /* a2dp edr rate */
- btc8821a2ant_tdma_duration_adjust(btcoexist,
- true, true,
- 2);
- }
- }
+ if ((bt_rssi_state == BTC_RSSI_STATE_HIGH) ||
+ (bt_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
+ btc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 23);
+ } else {
+ btc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 23);
+ }
- /* sw mechanism */
+ /* sw mechanism */
+ if (wifi_bw == BTC_WIFI_BW_HT40) {
if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) ||
(wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
btc8821a2ant_sw_mechanism1(btcoexist, true, true,
@@ -3397,36 +3368,6 @@
false, 0x18);
}
} else {
- /* fw mechanism */
- if ((bt_rssi_state == BTC_RSSI_STATE_HIGH) ||
- (bt_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
- if (bt_info_ext & BIT0) {
- /* a2dp basic rate */
- btc8821a2ant_tdma_duration_adjust(btcoexist,
- true, true,
- 2);
-
- } else {
- /* a2dp edr rate */
- btc8821a2ant_tdma_duration_adjust(btcoexist,
- true, true,
- 2);
- }
- } else {
- if (bt_info_ext & BIT0) {
- /*a2dp basic rate*/
- btc8821a2ant_tdma_duration_adjust(btcoexist,
- true, true,
- 2);
- } else {
- /*a2dp edr rate*/
- btc8821a2ant_tdma_duration_adjust(btcoexist,
- true, true,
- 2);
- }
- }
-
- /* sw mechanism */
if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) ||
(wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
btc8821a2ant_sw_mechanism1(btcoexist, false, true,
@@ -3544,14 +3485,14 @@
if (btc8821a2ant_is_common_action(btcoexist)) {
RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
"[BTCoex], Action 2-Ant common\n");
- coex_dm->reset_tdma_adjust = true;
+ coex_dm->auto_tdma_adjust = true;
} else {
if (coex_dm->cur_algorithm != coex_dm->pre_algorithm) {
RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
"[BTCoex], pre_algorithm = %d, cur_algorithm = %d\n",
coex_dm->pre_algorithm,
coex_dm->cur_algorithm);
- coex_dm->reset_tdma_adjust = true;
+ coex_dm->auto_tdma_adjust = false;
}
switch (coex_dm->cur_algorithm) {
case BT_8821A_2ANT_COEX_ALGO_SCO:
@@ -3614,6 +3555,26 @@
}
}
+static void btc8821a2ant_wifi_off_hw_cfg(struct btc_coexist *btcoexist)
+{
+ u8 h2c_parameter[2] = {0};
+ u32 fw_ver = 0;
+
+ /* set wlan_act to low */
+ btcoexist->btc_write_1byte(btcoexist, 0x76e, 0x4);
+
+ /* WiFi goto standby while GNT_BT 0-->1 */
+ btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x1, 0xfffff, 0x780);
+ btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_FW_VER, &fw_ver);
+ if (fw_ver >= 0x180000) {
+ /* Use H2C to set GNT_BT to HIGH */
+ h2c_parameter[0] = 1;
+ btcoexist->btc_fill_h2c(btcoexist, 0x6E, 1, h2c_parameter);
+ } else {
+ btcoexist->btc_write_1byte(btcoexist, 0x765, 0x18);
+ }
+}
+
/**************************************************************
* extern function start with ex_btc8821a2ant_
**************************************************************/
@@ -3637,6 +3598,7 @@
/* Antenna config */
btc8821a2ant_set_ant_path(btcoexist, BTC_ANT_WIFI_AT_MAIN, true, false);
+ coex_sta->dis_ver_info_cnt = 0;
/* PTA parameter */
btc8821a2ant_coex_table_with_type(btcoexist, FORCE_EXEC, 0);
@@ -3648,6 +3610,43 @@
btcoexist->btc_write_1byte_bitmask(btcoexist, 0x40, 0x20, 0x1);
}
+void ex_btc8821a2ant_pre_load_firmware(struct btc_coexist *btcoexist)
+{
+ struct btc_board_info *board_info = &btcoexist->board_info;
+ u8 u8tmp = 0x4; /* Set BIT2 by default since it's 2ant case */
+
+ /**
+ * S0 or S1 setting and Local register setting(By the setting fw can get
+ * ant number, S0/S1, ... info)
+ *
+ * Local setting bit define
+ * BIT0: "0" for no antenna inverse; "1" for antenna inverse
+ * BIT1: "0" for internal switch; "1" for external switch
+ * BIT2: "0" for one antenna; "1" for two antenna
+ * NOTE: here default all internal switch and 1-antenna ==> BIT1=0 and
+ * BIT2=0
+ */
+ if (btcoexist->chip_interface == BTC_INTF_USB) {
+ /* fixed at S0 for USB interface */
+ u8tmp |= 0x1; /* antenna inverse */
+ btcoexist->btc_write_local_reg_1byte(btcoexist, 0xfe08, u8tmp);
+ } else {
+ /* for PCIE and SDIO interface, we check efuse 0xc3[6] */
+ if (board_info->single_ant_path == 0) {
+ } else if (board_info->single_ant_path == 1) {
+ /* set to S0 */
+ u8tmp |= 0x1; /* antenna inverse */
+ }
+
+ if (btcoexist->chip_interface == BTC_INTF_PCI)
+ btcoexist->btc_write_local_reg_1byte(btcoexist, 0x384,
+ u8tmp);
+ else if (btcoexist->chip_interface == BTC_INTF_SDIO)
+ btcoexist->btc_write_local_reg_1byte(btcoexist, 0x60,
+ u8tmp);
+ }
+}
+
void ex_btc8821a2ant_init_coex_dm(struct btc_coexist *btcoexist)
{
struct rtl_priv *rtlpriv = btcoexist->adapter;
@@ -3748,14 +3747,6 @@
((BTC_WIFI_TRAFFIC_TX == wifi_traffic_dir) ?
"uplink" : "downlink")));
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
- "\r\n %-35s = [%s/ %d/ %d] ", "BT [status/ rssi/ retryCnt]",
- ((coex_sta->c2h_bt_inquiry_page) ? ("inquiry/page scan") :
- ((BT_8821A_2ANT_BT_STATUS_IDLE == coex_dm->bt_status)
- ? "idle" : ((BT_8821A_2ANT_BT_STATUS_CON_IDLE ==
- coex_dm->bt_status) ? "connected-idle" : "busy"))),
- coex_sta->bt_rssi, coex_sta->bt_retry_cnt);
-
if (stack_info->profile_notified) {
RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
"\r\n %-35s = %d / %d / %d / %d", "SCO/HID/PAN/A2DP",
@@ -3791,11 +3782,6 @@
RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s",
"============[Sw mechanism]============");
RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
- "\r\n %-35s = %d/ %d/ %d/ %d ",
- "SM1[ShRf/ LpRA/ LimDig/ btLna]",
- coex_dm->cur_rf_rx_lpf_shrink, coex_dm->cur_low_penalty_ra,
- coex_dm->limited_dig, coex_dm->cur_bt_lna_constrain);
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
"\r\n %-35s = %d/ %d/ %d(0x%x) ",
"SM2[AgcT/ AdcB/ SwDacSwing(lvl)]",
coex_dm->cur_agc_table_en, coex_dm->cur_adc_back_off,
@@ -3900,11 +3886,16 @@
RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
"[BTCoex], IPS ENTER notify\n");
coex_sta->under_ips = true;
+ btc8821a2ant_wifi_off_hw_cfg(btcoexist);
+ btc8821a2ant_ignore_wlan_act(btcoexist, FORCE_EXEC, true);
btc8821a2ant_coex_all_off(btcoexist);
} else if (BTC_IPS_LEAVE == type) {
RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
"[BTCoex], IPS LEAVE notify\n");
coex_sta->under_ips = false;
+ ex_btc8821a2ant_init_hwconfig(btcoexist);
+ btc8821a2ant_init_coex_dm(btcoexist);
+ btc8821a2ant_query_bt_info(btcoexist);
}
}
@@ -4016,9 +4007,12 @@
u8 bt_info = 0;
u8 i, rsp_source = 0;
bool bt_busy = false, limited_dig = false;
- bool wifi_connected = false, bt_hs_on = false;
+ bool wifi_connected = false, wifi_under_5g = false;
coex_sta->c2h_bt_info_req_sent = false;
+ btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_UNDER_5G, &wifi_under_5g);
+ btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_CONNECTED,
+ &wifi_connected);
rsp_source = tmp_buf[0] & 0xf;
if (rsp_source >= BT_INFO_SRC_8821A_2ANT_MAX)
@@ -4041,16 +4035,35 @@
}
}
+ if (btcoexist->manual_control) {
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], BtInfoNotify(), return for Manual CTRL<===\n");
+ return;
+ }
+
if (BT_INFO_SRC_8821A_2ANT_WIFI_FW != rsp_source) {
/* [3:0] */
coex_sta->bt_retry_cnt =
coex_sta->bt_info_c2h[rsp_source][2]&0xf;
coex_sta->bt_rssi =
- coex_sta->bt_info_c2h[rsp_source][3]*2+10;
+ coex_sta->bt_info_c2h[rsp_source][3] * 2 + 10;
- coex_sta->bt_info_ext =
- coex_sta->bt_info_c2h[rsp_source][4];
+ coex_sta->bt_info_ext = coex_sta->bt_info_c2h[rsp_source][4];
+
+ coex_sta->bt_tx_rx_mask =
+ (coex_sta->bt_info_c2h[rsp_source][2] & 0x40);
+ btcoexist->btc_set(btcoexist, BTC_SET_BL_BT_TX_RX_MASK,
+ &coex_sta->bt_tx_rx_mask);
+ if (coex_sta->bt_tx_rx_mask) {
+ /* BT into is responded by BT FW and BT RF REG 0x3C !=
+ * 0x01 => Need to switch BT TRx Mask
+ */
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], Switch BT TRx Mask since BT RF REG 0x3C != 0x01\n");
+ btcoexist->btc_set_bt_reg(btcoexist, BTC_BT_REG_RF,
+ 0x3c, 0x01);
+ }
/* Here we need to resend some wifi info to BT
* because bt is reset and loss of the info
@@ -4068,70 +4081,121 @@
}
- if ((coex_sta->bt_info_ext & BIT3)) {
- btc8821a2ant_ignore_wlan_act(btcoexist,
- FORCE_EXEC, false);
- } else {
- /* BT already NOT ignore Wlan active, do nothing here.*/
+ if (!btcoexist->manual_control && !wifi_under_5g) {
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], BT ext info = 0x%x!!\n",
+ coex_sta->bt_info_ext);
+ if ((coex_sta->bt_info_ext & BIT(3))) {
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], BT ext info bit3=1, wifi_connected=%d\n",
+ wifi_connected);
+ if (wifi_connected) {
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST,
+ DBG_LOUD,
+ "[BTCoex], BT ext info bit3 check, set BT NOT to ignore Wlan active!!\n");
+ btc8821a2ant_ignore_wlan_act(btcoexist,
+ FORCE_EXEC,
+ false);
+ }
+ } else {
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], BT ext info bit3=0, wifi_connected=%d\n",
+ wifi_connected);
+ /* BT already NOT ignore Wlan active, do nothing
+ * here.
+ */
+ if (!wifi_connected) {
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST,
+ DBG_LOUD,
+ "[BTCoex], BT ext info bit3 check, set BT to ignore Wlan active!!\n");
+ btc8821a2ant_ignore_wlan_act(
+ btcoexist, FORCE_EXEC, true);
+ }
+ }
}
}
- btcoexist->btc_get(btcoexist, BTC_GET_BL_HS_OPERATION, &bt_hs_on);
/* check BIT2 first ==> check if bt is under inquiry or page scan*/
if (bt_info & BT_INFO_8821A_2ANT_B_INQ_PAGE) {
coex_sta->c2h_bt_inquiry_page = true;
- coex_dm->bt_status = BT_8821A_2ANT_BT_STATUS_NON_IDLE;
} else {
coex_sta->c2h_bt_inquiry_page = false;
- if (bt_info == 0x1) {
- /* connection exists but not busy*/
- coex_sta->bt_link_exist = true;
- coex_dm->bt_status = BT_8821A_2ANT_BT_STATUS_CON_IDLE;
- } else if (bt_info & BT_INFO_8821A_2ANT_B_CONNECTION) {
- /* connection exists and some link is busy*/
- coex_sta->bt_link_exist = true;
- if (bt_info & BT_INFO_8821A_2ANT_B_FTP)
- coex_sta->pan_exist = true;
- else
- coex_sta->pan_exist = false;
- if (bt_info & BT_INFO_8821A_2ANT_B_A2DP)
- coex_sta->a2dp_exist = true;
- else
- coex_sta->a2dp_exist = false;
- if (bt_info & BT_INFO_8821A_2ANT_B_HID)
- coex_sta->hid_exist = true;
- else
- coex_sta->hid_exist = false;
- if (bt_info & BT_INFO_8821A_2ANT_B_SCO_ESCO)
- coex_sta->sco_exist = true;
- else
- coex_sta->sco_exist = false;
- coex_dm->bt_status = BT_8821A_2ANT_BT_STATUS_NON_IDLE;
- } else {
- coex_sta->bt_link_exist = false;
+ }
+ /* set link exist status */
+ if (!(bt_info & BT_INFO_8821A_2ANT_B_CONNECTION)) {
+ coex_sta->bt_link_exist = false;
+ coex_sta->pan_exist = false;
+ coex_sta->a2dp_exist = false;
+ coex_sta->hid_exist = false;
+ coex_sta->sco_exist = false;
+ } else { /* connection exists */
+ coex_sta->bt_link_exist = true;
+ if (bt_info & BT_INFO_8821A_2ANT_B_FTP)
+ coex_sta->pan_exist = true;
+ else
coex_sta->pan_exist = false;
+ if (bt_info & BT_INFO_8821A_2ANT_B_A2DP)
+ coex_sta->a2dp_exist = true;
+ else
coex_sta->a2dp_exist = false;
+ if (bt_info & BT_INFO_8821A_2ANT_B_HID)
+ coex_sta->hid_exist = true;
+ else
coex_sta->hid_exist = false;
+ if (bt_info & BT_INFO_8821A_2ANT_B_SCO_ESCO)
+ coex_sta->sco_exist = true;
+ else
coex_sta->sco_exist = false;
- coex_dm->bt_status = BT_8821A_2ANT_BT_STATUS_IDLE;
- }
- btc8821a2ant_update_bt_link_info(btcoexist);
+ if ((!coex_sta->hid_exist) &&
+ (!coex_sta->c2h_bt_inquiry_page) &&
+ (!coex_sta->sco_exist)) {
+ if (coex_sta->high_priority_tx +
+ coex_sta->high_priority_rx >= 160)
+ coex_sta->hid_exist = true;
+ }
}
- if (BT_8821A_2ANT_BT_STATUS_NON_IDLE == coex_dm->bt_status)
+ btc8821a2ant_update_bt_link_info(btcoexist);
+
+ if (!(bt_info & BT_INFO_8821A_2ANT_B_CONNECTION)) {
+ coex_dm->bt_status = BT_8821A_2ANT_BT_STATUS_IDLE;
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], BtInfoNotify(), BT Non-Connected idle!!!\n");
+ } else if (bt_info == BT_INFO_8821A_2ANT_B_CONNECTION) {
+ /* connection exists but no busy */
+ coex_dm->bt_status = BT_8821A_2ANT_BT_STATUS_CON_IDLE;
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], BtInfoNotify(), BT Connected-idle!!!\n");
+ } else if ((bt_info & BT_INFO_8821A_2ANT_B_SCO_ESCO) ||
+ (bt_info & BT_INFO_8821A_2ANT_B_SCO_BUSY)) {
+ coex_dm->bt_status = BT_8821A_2ANT_BT_STATUS_SCO_BUSY;
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], BtInfoNotify(), BT SCO busy!!!\n");
+ } else if (bt_info & BT_INFO_8821A_2ANT_B_ACL_BUSY) {
+ coex_dm->bt_status = BT_8821A_2ANT_BT_STATUS_ACL_BUSY;
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], BtInfoNotify(), BT ACL busy!!!\n");
+ } else {
+ coex_dm->bt_status = BT_8821A_2ANT_BT_STATUS_MAX;
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], BtInfoNotify(), BT Non-Defined state!!!\n");
+ }
+
+ if ((coex_dm->bt_status == BT_8821A_2ANT_BT_STATUS_ACL_BUSY) ||
+ (coex_dm->bt_status == BT_8821A_2ANT_BT_STATUS_SCO_BUSY) ||
+ (coex_dm->bt_status == BT_8821A_2ANT_BT_STATUS_ACL_SCO_BUSY)) {
bt_busy = true;
- else
+ limited_dig = true;
+ } else {
bt_busy = false;
+ limited_dig = false;
+ }
+
btcoexist->btc_set(btcoexist, BTC_SET_BL_BT_TRAFFIC_BUSY, &bt_busy);
- if (BT_8821A_2ANT_BT_STATUS_IDLE != coex_dm->bt_status)
- limited_dig = true;
- else
- limited_dig = false;
coex_dm->limited_dig = limited_dig;
- btcoexist->btc_set(btcoexist,
- BTC_SET_BL_BT_LIMITED_DIG, &limited_dig);
+ btcoexist->btc_set(btcoexist, BTC_SET_BL_BT_LIMITED_DIG, &limited_dig);
btc8821a2ant_run_coexist_mechanism(btcoexist);
}
@@ -4143,46 +4207,57 @@
RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
"[BTCoex], Halt notify\n");
+ btc8821a2ant_wifi_off_hw_cfg(btcoexist);
btc8821a2ant_ignore_wlan_act(btcoexist, FORCE_EXEC, true);
ex_btc8821a2ant_media_status_notify(btcoexist, BTC_MEDIA_DISCONNECT);
}
+void ex_btc8821a2ant_pnp_notify(struct btc_coexist *btcoexist, u8 pnp_state)
+{
+ struct rtl_priv *rtlpriv = btcoexist->adapter;
+
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, "[BTCoex], Pnp notify\n");
+
+ if (pnp_state == BTC_WIFI_PNP_SLEEP) {
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], Pnp notify to SLEEP\n");
+ } else if (pnp_state == BTC_WIFI_PNP_WAKE_UP) {
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], Pnp notify to WAKE UP\n");
+ ex_btc8821a2ant_init_hwconfig(btcoexist);
+ btc8821a2ant_init_coex_dm(btcoexist);
+ btc8821a2ant_query_bt_info(btcoexist);
+ }
+}
+
void ex_btc8821a2ant_periodical(struct btc_coexist *btcoexist)
{
struct rtl_priv *rtlpriv = btcoexist->adapter;
- static u8 dis_ver_info_cnt;
- struct btc_board_info *board_info = &btcoexist->board_info;
- struct btc_stack_info *stack_info = &btcoexist->stack_info;
- u32 fw_ver = 0, bt_patch_ver = 0;
RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
"[BTCoex], ==========================Periodical===========================\n");
- if (dis_ver_info_cnt <= 5) {
- dis_ver_info_cnt += 1;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], ****************************************************************\n");
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], Ant PG Num/ Ant Mech/ Ant Pos = %d/ %d/ %d\n",
- board_info->pg_ant_num,
- board_info->btdm_ant_num,
- board_info->btdm_ant_pos);
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], BT stack/ hci ext ver = %s / %d\n",
- stack_info->profile_notified ? "Yes" : "No",
- stack_info->hci_version);
- btcoexist->btc_get(btcoexist, BTC_GET_U4_BT_PATCH_VER,
- &bt_patch_ver);
- btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_FW_VER, &fw_ver);
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], CoexVer/ FwVer/ PatchVer = %d_%x/ 0x%x/ 0x%x(%d)\n",
- glcoex_ver_date_8821a_2ant, glcoex_ver_8821a_2ant,
- fw_ver, bt_patch_ver, bt_patch_ver);
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], ****************************************************************\n");
+ if (coex_sta->dis_ver_info_cnt <= 5) {
+ coex_sta->dis_ver_info_cnt += 1;
+ if (coex_sta->dis_ver_info_cnt == 3) {
+ /* Antenna config to set 0x765 = 0x0 (GNT_BT control by
+ * PTA) after initial
+ */
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], Set GNT_BT control by PTA\n");
+ btc8821a2ant_set_ant_path(btcoexist,
+ BTC_ANT_WIFI_AT_MAIN, false, false);
+ }
}
- btc8821a2ant_query_bt_info(btcoexist);
- btc8821a2ant_monitor_bt_ctr(btcoexist);
- btc8821a2ant_monitor_wifi_ctr(btcoexist);
+ if (btcoexist->auto_report_2ant) {
+ btc8821a2ant_query_bt_info(btcoexist);
+ } else {
+ btc8821a2ant_monitor_bt_ctr(btcoexist);
+ btc8821a2ant_monitor_wifi_ctr(btcoexist);
+
+ if (btc8821a2ant_is_wifi_status_changed(btcoexist) ||
+ coex_dm->auto_tdma_adjust)
+ btc8821a2ant_run_coexist_mechanism(btcoexist);
+ }
}
diff --git a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8821a2ant.h b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8821a2ant.h
index 535ca10..a1603e2 100644
--- a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8821a2ant.h
+++ b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8821a2ant.h
@@ -54,6 +54,9 @@
BT_8821A_2ANT_BT_STATUS_IDLE = 0x0,
BT_8821A_2ANT_BT_STATUS_CON_IDLE = 0x1,
BT_8821A_2ANT_BT_STATUS_NON_IDLE = 0x2,
+ BT_8821A_2ANT_BT_STATUS_ACL_BUSY = 0x3,
+ BT_8821A_2ANT_BT_STATUS_SCO_BUSY = 0x4,
+ BT_8821A_2ANT_BT_STATUS_ACL_SCO_BUSY = 0x5,
BT_8821A_2ANT_BT_STATUS_MAX
};
@@ -76,10 +79,6 @@
/* fw mechanism */
bool pre_dec_bt_pwr_lvl;
bool cur_dec_bt_pwr_lvl;
- bool pre_bt_lna_constrain;
- bool cur_bt_lna_constrain;
- u8 pre_bt_psd_mode;
- u8 cur_bt_psd_mode;
u8 pre_fw_dac_swing_lvl;
u8 cur_fw_dac_swing_lvl;
bool cur_ignore_wlan_act;
@@ -143,6 +142,7 @@
u32 low_priority_tx;
u32 low_priority_rx;
u8 bt_rssi;
+ bool bt_tx_rx_mask;
u8 pre_bt_rssi_state;
u8 pre_wifi_rssi_state[4];
bool c2h_bt_info_req_sent;
@@ -164,6 +164,8 @@
u8 coex_table_type;
bool force_lps_on;
+
+ u8 dis_ver_info_cnt;
};
/*===========================================
diff --git a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtcoutsrc.h b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtcoutsrc.h
index c827113..c5c360e 100644
--- a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtcoutsrc.h
+++ b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtcoutsrc.h
@@ -30,6 +30,9 @@
#define NORMAL_EXEC false
#define FORCE_EXEC true
+#define BTC_RF_OFF 0x0
+#define BTC_RF_ON 0x1
+
#define BTC_RF_A RF90_PATH_A
#define BTC_RF_B RF90_PATH_B
#define BTC_RF_C RF90_PATH_C
@@ -196,6 +199,24 @@
BTC_WIFI_PNP_MAX
};
+enum btc_iot_peer {
+ BTC_IOT_PEER_UNKNOWN = 0,
+ BTC_IOT_PEER_REALTEK = 1,
+ BTC_IOT_PEER_REALTEK_92SE = 2,
+ BTC_IOT_PEER_BROADCOM = 3,
+ BTC_IOT_PEER_RALINK = 4,
+ BTC_IOT_PEER_ATHEROS = 5,
+ BTC_IOT_PEER_CISCO = 6,
+ BTC_IOT_PEER_MERU = 7,
+ BTC_IOT_PEER_MARVELL = 8,
+ BTC_IOT_PEER_REALTEK_SOFTAP = 9,
+ BTC_IOT_PEER_SELF_SOFTAP = 10, /* Self is SoftAP */
+ BTC_IOT_PEER_AIRGO = 11,
+ BTC_IOT_PEER_REALTEK_JAGUAR_BCUTAP = 12,
+ BTC_IOT_PEER_REALTEK_JAGUAR_CCUTAP = 13,
+ BTC_IOT_PEER_MAX,
+};
+
enum btc_get_type {
/* type bool */
BTC_GET_BL_HS_OPERATION,
@@ -235,6 +256,7 @@
BTC_GET_U1_WIFI_HS_CHNL,
BTC_GET_U1_MAC_PHY_MODE,
BTC_GET_U1_AP_NUM,
+ BTC_GET_U1_IOT_PEER,
/* for 1Ant */
BTC_GET_U1_LPS_MODE,
@@ -468,6 +490,7 @@
struct btc_bt_link_info {
bool bt_link_exist;
+ bool bt_hi_pri_link_exist;
bool sco_exist;
bool sco_only;
bool a2dp_exist;
@@ -496,6 +519,11 @@
enum btc_chip_interface chip_interface;
struct btc_bt_link_info bt_link_info;
+ /* boolean variables to replace BT_AUTO_REPORT_ONLY_XXXXY_ZANT
+ * configuration parameters
+ */
+ bool auto_report_1ant;
+ bool auto_report_2ant;
bool initilized;
bool stop_coex_dm;
bool manual_control;
diff --git a/drivers/net/wireless/realtek/rtlwifi/pci.c b/drivers/net/wireless/realtek/rtlwifi/pci.c
index 2e6b888..df5f679 100644
--- a/drivers/net/wireless/realtek/rtlwifi/pci.c
+++ b/drivers/net/wireless/realtek/rtlwifi/pci.c
@@ -729,14 +729,12 @@
dev_kfree_skb_any(skb);
} else {
struct sk_buff *uskb = NULL;
- u8 *pdata;
uskb = dev_alloc_skb(skb->len + 128);
if (likely(uskb)) {
memcpy(IEEE80211_SKB_RXCB(uskb), &rx_status,
sizeof(rx_status));
- pdata = (u8 *)skb_put(uskb, skb->len);
- memcpy(pdata, skb->data, skb->len);
+ skb_put_data(uskb, skb->data, skb->len);
dev_kfree_skb_any(skb);
ieee80211_rx_irqsafe(hw, uskb);
} else {
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/fw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/fw.c
index 21ed9ad..a2eca66 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/fw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/fw.c
@@ -620,8 +620,7 @@
u1rsvdpageloc, 3);
skb = dev_alloc_skb(totalpacketlen);
- memcpy(skb_put(skb, totalpacketlen),
- &reserved_page_packet, totalpacketlen);
+ skb_put_data(skb, &reserved_page_packet, totalpacketlen);
rtstatus = rtl_cmd_send_packet(hw, skb);
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192c/fw_common.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192c/fw_common.c
index c7a7746..015476e 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192c/fw_common.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192c/fw_common.c
@@ -647,8 +647,7 @@
skb = dev_alloc_skb(totalpacketlen);
- memcpy((u8 *)skb_put(skb, totalpacketlen),
- &reserved_page_packet, totalpacketlen);
+ skb_put_data(skb, &reserved_page_packet, totalpacketlen);
if (cmd_send_packet)
rtstatus = cmd_send_packet(hw, skb);
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/trx.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/trx.c
index 41422e4..de6c342 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/trx.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/trx.c
@@ -512,7 +512,7 @@
seq_number = (le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_SEQ) >> 4;
rtl_get_tcb_desc(hw, info, sta, skb, tcb_desc);
- txdesc = (u8 *)skb_push(skb, RTL_TX_HEADER_SIZE);
+ txdesc = skb_push(skb, RTL_TX_HEADER_SIZE);
memset(txdesc, 0, RTL_TX_HEADER_SIZE);
SET_TX_DESC_PKT_SIZE(txdesc, pktlen);
SET_TX_DESC_LINIP(txdesc, 0);
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/fw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/fw.c
index 88faeab..f4129cf 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/fw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/fw.c
@@ -668,8 +668,7 @@
if (!skb) {
dlok = false;
} else {
- memcpy((u8 *) skb_put(skb, totalpacketlen),
- &reserved_page_packet, totalpacketlen);
+ skb_put_data(skb, &reserved_page_packet, totalpacketlen);
rtstatus = _rtl92d_cmd_send_packet(hw, skb);
if (rtstatus)
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/fw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/fw.c
index 1f42ce5..6f8f75a 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/fw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/fw.c
@@ -708,8 +708,7 @@
u1rsvdpageloc, 3);
skb = dev_alloc_skb(totalpacketlen);
- memcpy((u8 *)skb_put(skb, totalpacketlen),
- &reserved_page_packet, totalpacketlen);
+ skb_put_data(skb, &reserved_page_packet, totalpacketlen);
b_dlok = true;
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/hw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/hw.c
index 6f5098a..11d97fa 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/hw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/hw.c
@@ -2506,7 +2506,7 @@
"add one entry\n");
if (is_pairwise) {
RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG,
- "set Pairwiase key\n");
+ "set Pairwise key\n");
rtl_cam_add_one_entry(hw, macaddr, key_index,
entry_id, enc_algo,
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192se/fw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/fw.c
index 89a0a28..e7b1d7c 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192se/fw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/fw.c
@@ -188,10 +188,9 @@
if (!skb)
return false;
skb_reserve(skb, extra_descoffset);
- seg_ptr = (u8 *)skb_put(skb, (u32)(frag_length -
- extra_descoffset));
- memcpy(seg_ptr, code_virtual_address + frag_offset,
- (u32)(frag_length - extra_descoffset));
+ seg_ptr = skb_put_data(skb,
+ code_virtual_address + frag_offset,
+ (u32)(frag_length - extra_descoffset));
tcb_desc = (struct rtl_tcb_desc *)(skb->cb);
tcb_desc->queue_index = TXCMD_QUEUE;
@@ -463,7 +462,7 @@
break;
/* Clear content */
- ph2c_buffer = (u8 *)skb_put(skb, (u32)len);
+ ph2c_buffer = skb_put(skb, (u32)len);
memset((ph2c_buffer + totallen + tx_desclen), 0, len);
/* CMD len */
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/fw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/fw.c
index a954a87b..bf9859f 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/fw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/fw.c
@@ -470,8 +470,7 @@
u1rsvdpageloc, 3);
skb = dev_alloc_skb(totalpacketlen);
- memcpy((u8 *)skb_put(skb, totalpacketlen),
- &reserved_page_packet, totalpacketlen);
+ skb_put_data(skb, &reserved_page_packet, totalpacketlen);
rtstatus = rtl_cmd_send_packet(hw, skb);
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/hw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/hw.c
index 859c045..5ac7b81 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/hw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/hw.c
@@ -2264,7 +2264,7 @@
"add one entry\n");
if (is_pairwise) {
RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG,
- "set Pairwiase key\n");
+ "set Pairwise key\n");
rtl_cam_add_one_entry(hw, macaddr, key_index,
entry_id, enc_algo,
@@ -2313,7 +2313,7 @@
rtlpriv->btcoexist.eeprom_bt_radio_shared;
RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
- "BT Coexistance = 0x%x\n",
+ "BT Coexistence = 0x%x\n",
rtlpriv->btcoexist.bt_coexistence);
if (rtlpriv->btcoexist.bt_coexistence) {
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/fw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/fw.c
index 4fc839b..01205578 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/fw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/fw.c
@@ -527,8 +527,7 @@
u1rsvdpageloc, sizeof(u1rsvdpageloc));
skb = dev_alloc_skb(totalpacketlen);
- memcpy((u8 *)skb_put(skb, totalpacketlen),
- &reserved_page_packet, totalpacketlen);
+ skb_put_data(skb, &reserved_page_packet, totalpacketlen);
rtstatus = rtl_cmd_send_packet(hw, skb);
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/hw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/hw.c
index 1acbfb8..a79f936 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/hw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/hw.c
@@ -2637,7 +2637,7 @@
"add one entry\n");
if (is_pairwise) {
RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG,
- "set Pairwiase key\n");
+ "set Pairwise key\n");
rtl_cam_add_one_entry(hw, macaddr, key_index,
entry_id, enc_algo,
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/fw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/fw.c
index 7335010..67aec20 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/fw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/fw.c
@@ -1588,8 +1588,7 @@
&reserved_page_packet_8812[0], totalpacketlen);
skb = dev_alloc_skb(totalpacketlen);
- memcpy((u8 *)skb_put(skb, totalpacketlen),
- &reserved_page_packet_8812, totalpacketlen);
+ skb_put_data(skb, &reserved_page_packet_8812, totalpacketlen);
rtstatus = rtl_cmd_send_packet(hw, skb);
@@ -1725,8 +1724,7 @@
&reserved_page_packet_8821[0], totalpacketlen);
skb = dev_alloc_skb(totalpacketlen);
- memcpy((u8 *)skb_put(skb, totalpacketlen),
- &reserved_page_packet_8821, totalpacketlen);
+ skb_put_data(skb, &reserved_page_packet_8821, totalpacketlen);
rtstatus = rtl_cmd_send_packet(hw, skb);
diff --git a/drivers/net/wireless/realtek/rtlwifi/usb.c b/drivers/net/wireless/realtek/rtlwifi/usb.c
index 4d989b8..5590d07 100644
--- a/drivers/net/wireless/realtek/rtlwifi/usb.c
+++ b/drivers/net/wireless/realtek/rtlwifi/usb.c
@@ -653,7 +653,7 @@
/* reserve some space for mac80211's radiotap */
skb_reserve(skb, __RADIO_TAP_SIZE_RSV);
- memcpy(skb_put(skb, size), _urb->transfer_buffer, size);
+ skb_put_data(skb, _urb->transfer_buffer, size);
skb_queue_tail(&rtlusb->rx_queue, skb);
tasklet_schedule(&rtlusb->rx_work_tasklet);
diff --git a/drivers/net/wireless/rsi/Makefile b/drivers/net/wireless/rsi/Makefile
index 25828b6..a475c81 100644
--- a/drivers/net/wireless/rsi/Makefile
+++ b/drivers/net/wireless/rsi/Makefile
@@ -2,7 +2,7 @@
rsi_91x-y += rsi_91x_core.o
rsi_91x-y += rsi_91x_mac80211.o
rsi_91x-y += rsi_91x_mgmt.o
-rsi_91x-y += rsi_91x_pkt.o
+rsi_91x-y += rsi_91x_hal.o
rsi_91x-$(CONFIG_RSI_DEBUGFS) += rsi_91x_debugfs.o
rsi_usb-y += rsi_91x_usb.o rsi_91x_usb_ops.o
diff --git a/drivers/net/wireless/rsi/rsi_91x_core.c b/drivers/net/wireless/rsi/rsi_91x_core.c
index f3d3995..68f04a7 100644
--- a/drivers/net/wireless/rsi/rsi_91x_core.c
+++ b/drivers/net/wireless/rsi/rsi_91x_core.c
@@ -306,7 +306,7 @@
tstamp_2 = jiffies;
mutex_unlock(&common->tx_rxlock);
- if (tstamp_2 > tstamp_1 + (300 * HZ / 1000))
+ if (time_after(tstamp_2, tstamp_1 + (300 * HZ) / 1000))
schedule();
}
}
diff --git a/drivers/net/wireless/rsi/rsi_91x_hal.c b/drivers/net/wireless/rsi/rsi_91x_hal.c
new file mode 100644
index 0000000..3d24e8e
--- /dev/null
+++ b/drivers/net/wireless/rsi/rsi_91x_hal.c
@@ -0,0 +1,740 @@
+/**
+ * Copyright (c) 2014 Redpine Signals Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include <linux/firmware.h>
+#include "rsi_mgmt.h"
+#include "rsi_hal.h"
+#include "rsi_sdio.h"
+
+/* FLASH Firmware */
+static struct ta_metadata metadata_flash_content[] = {
+ {"flash_content", 0x00010000},
+ {"rsi/rs9113_wlan_qspi.rps", 0x00010000},
+};
+
+/**
+ * rsi_send_data_pkt() - This function sends the recieved data packet from
+ * driver to device.
+ * @common: Pointer to the driver private structure.
+ * @skb: Pointer to the socket buffer structure.
+ *
+ * Return: status: 0 on success, -1 on failure.
+ */
+int rsi_send_data_pkt(struct rsi_common *common, struct sk_buff *skb)
+{
+ struct rsi_hw *adapter = common->priv;
+ struct ieee80211_hdr *tmp_hdr;
+ struct ieee80211_tx_info *info;
+ struct skb_info *tx_params;
+ struct ieee80211_bss_conf *bss;
+ int status;
+ u8 ieee80211_size = MIN_802_11_HDR_LEN;
+ u8 extnd_size;
+ __le16 *frame_desc;
+ u16 seq_num;
+
+ info = IEEE80211_SKB_CB(skb);
+ bss = &info->control.vif->bss_conf;
+ tx_params = (struct skb_info *)info->driver_data;
+
+ if (!bss->assoc) {
+ status = -EINVAL;
+ goto err;
+ }
+
+ tmp_hdr = (struct ieee80211_hdr *)&skb->data[0];
+ seq_num = (le16_to_cpu(tmp_hdr->seq_ctrl) >> 4);
+
+ extnd_size = ((uintptr_t)skb->data & 0x3);
+
+ if ((FRAME_DESC_SZ + extnd_size) > skb_headroom(skb)) {
+ rsi_dbg(ERR_ZONE, "%s: Unable to send pkt\n", __func__);
+ status = -ENOSPC;
+ goto err;
+ }
+
+ skb_push(skb, (FRAME_DESC_SZ + extnd_size));
+ frame_desc = (__le16 *)&skb->data[0];
+ memset((u8 *)frame_desc, 0, FRAME_DESC_SZ);
+
+ if (ieee80211_is_data_qos(tmp_hdr->frame_control)) {
+ ieee80211_size += 2;
+ frame_desc[6] |= cpu_to_le16(BIT(12));
+ }
+
+ if ((!(info->flags & IEEE80211_TX_INTFL_DONT_ENCRYPT)) &&
+ (common->secinfo.security_enable)) {
+ if (rsi_is_cipher_wep(common))
+ ieee80211_size += 4;
+ else
+ ieee80211_size += 8;
+ frame_desc[6] |= cpu_to_le16(BIT(15));
+ }
+
+ frame_desc[0] = cpu_to_le16((skb->len - FRAME_DESC_SZ) |
+ (RSI_WIFI_DATA_Q << 12));
+ frame_desc[2] = cpu_to_le16((extnd_size) | (ieee80211_size) << 8);
+
+ if (common->min_rate != 0xffff) {
+ /* Send fixed rate */
+ frame_desc[3] = cpu_to_le16(RATE_INFO_ENABLE);
+ frame_desc[4] = cpu_to_le16(common->min_rate);
+
+ if (conf_is_ht40(&common->priv->hw->conf))
+ frame_desc[5] = cpu_to_le16(FULL40M_ENABLE);
+
+ if (common->vif_info[0].sgi) {
+ if (common->min_rate & 0x100) /* Only MCS rates */
+ frame_desc[4] |=
+ cpu_to_le16(ENABLE_SHORTGI_RATE);
+ }
+
+ }
+
+ frame_desc[6] |= cpu_to_le16(seq_num & 0xfff);
+ frame_desc[7] = cpu_to_le16(((tx_params->tid & 0xf) << 4) |
+ (skb->priority & 0xf) |
+ (tx_params->sta_id << 8));
+
+ status = adapter->host_intf_ops->write_pkt(common->priv, skb->data,
+ skb->len);
+ if (status)
+ rsi_dbg(ERR_ZONE, "%s: Failed to write pkt\n",
+ __func__);
+
+err:
+ ++common->tx_stats.total_tx_pkt_freed[skb->priority];
+ rsi_indicate_tx_status(common->priv, skb, status);
+ return status;
+}
+
+/**
+ * rsi_send_mgmt_pkt() - This functions sends the received management packet
+ * from driver to device.
+ * @common: Pointer to the driver private structure.
+ * @skb: Pointer to the socket buffer structure.
+ *
+ * Return: status: 0 on success, -1 on failure.
+ */
+int rsi_send_mgmt_pkt(struct rsi_common *common,
+ struct sk_buff *skb)
+{
+ struct rsi_hw *adapter = common->priv;
+ struct ieee80211_hdr *wh;
+ struct ieee80211_tx_info *info;
+ struct ieee80211_bss_conf *bss;
+ struct ieee80211_hw *hw = adapter->hw;
+ struct ieee80211_conf *conf = &hw->conf;
+ struct skb_info *tx_params;
+ int status = -E2BIG;
+ __le16 *msg;
+ u8 extnd_size;
+ u8 vap_id = 0;
+
+ info = IEEE80211_SKB_CB(skb);
+ tx_params = (struct skb_info *)info->driver_data;
+ extnd_size = ((uintptr_t)skb->data & 0x3);
+
+ if (tx_params->flags & INTERNAL_MGMT_PKT) {
+ if ((extnd_size) > skb_headroom(skb)) {
+ rsi_dbg(ERR_ZONE, "%s: Unable to send pkt\n", __func__);
+ dev_kfree_skb(skb);
+ return -ENOSPC;
+ }
+ skb_push(skb, extnd_size);
+ skb->data[extnd_size + 4] = extnd_size;
+ status = adapter->host_intf_ops->write_pkt(common->priv,
+ (u8 *)skb->data,
+ skb->len);
+ if (status) {
+ rsi_dbg(ERR_ZONE,
+ "%s: Failed to write the packet\n", __func__);
+ }
+ dev_kfree_skb(skb);
+ return status;
+ }
+
+ bss = &info->control.vif->bss_conf;
+ wh = (struct ieee80211_hdr *)&skb->data[0];
+
+ if (FRAME_DESC_SZ > skb_headroom(skb))
+ goto err;
+
+ skb_push(skb, FRAME_DESC_SZ);
+ memset(skb->data, 0, FRAME_DESC_SZ);
+ msg = (__le16 *)skb->data;
+
+ if (skb->len > MAX_MGMT_PKT_SIZE) {
+ rsi_dbg(INFO_ZONE, "%s: Dropping mgmt pkt > 512\n", __func__);
+ goto err;
+ }
+
+ msg[0] = cpu_to_le16((skb->len - FRAME_DESC_SZ) |
+ (RSI_WIFI_MGMT_Q << 12));
+ msg[1] = cpu_to_le16(TX_DOT11_MGMT);
+ msg[2] = cpu_to_le16(MIN_802_11_HDR_LEN << 8);
+ msg[3] = cpu_to_le16(RATE_INFO_ENABLE);
+ msg[6] = cpu_to_le16(le16_to_cpu(wh->seq_ctrl) >> 4);
+
+ if (wh->addr1[0] & BIT(0))
+ msg[3] |= cpu_to_le16(RSI_BROADCAST_PKT);
+
+ if (common->band == NL80211_BAND_2GHZ)
+ msg[4] = cpu_to_le16(RSI_11B_MODE);
+ else
+ msg[4] = cpu_to_le16((RSI_RATE_6 & 0x0f) | RSI_11G_MODE);
+
+ if (conf_is_ht40(conf)) {
+ msg[4] = cpu_to_le16(0xB | RSI_11G_MODE);
+ msg[5] = cpu_to_le16(0x6);
+ }
+
+ /* Indicate to firmware to give cfm */
+ if ((skb->data[16] == IEEE80211_STYPE_PROBE_REQ) && (!bss->assoc)) {
+ msg[1] |= cpu_to_le16(BIT(10));
+ msg[7] = cpu_to_le16(PROBEREQ_CONFIRM);
+ common->mgmt_q_block = true;
+ }
+
+ msg[7] |= cpu_to_le16(vap_id << 8);
+
+ status = adapter->host_intf_ops->write_pkt(common->priv, (u8 *)msg,
+ skb->len);
+ if (status)
+ rsi_dbg(ERR_ZONE, "%s: Failed to write the packet\n", __func__);
+
+err:
+ rsi_indicate_tx_status(common->priv, skb, status);
+ return status;
+}
+
+static void bl_cmd_timeout(unsigned long priv)
+{
+ struct rsi_hw *adapter = (struct rsi_hw *)priv;
+
+ adapter->blcmd_timer_expired = true;
+ del_timer(&adapter->bl_cmd_timer);
+}
+
+static int bl_start_cmd_timer(struct rsi_hw *adapter, u32 timeout)
+{
+ init_timer(&adapter->bl_cmd_timer);
+ adapter->bl_cmd_timer.data = (unsigned long)adapter;
+ adapter->bl_cmd_timer.function = (void *)&bl_cmd_timeout;
+ adapter->bl_cmd_timer.expires = (msecs_to_jiffies(timeout) + jiffies);
+
+ adapter->blcmd_timer_expired = false;
+ add_timer(&adapter->bl_cmd_timer);
+
+ return 0;
+}
+
+static int bl_stop_cmd_timer(struct rsi_hw *adapter)
+{
+ adapter->blcmd_timer_expired = false;
+ if (timer_pending(&adapter->bl_cmd_timer))
+ del_timer(&adapter->bl_cmd_timer);
+
+ return 0;
+}
+
+static int bl_write_cmd(struct rsi_hw *adapter, u8 cmd, u8 exp_resp,
+ u16 *cmd_resp)
+{
+ struct rsi_host_intf_ops *hif_ops = adapter->host_intf_ops;
+ u32 regin_val = 0, regout_val = 0;
+ u32 regin_input = 0;
+ u8 output = 0;
+ int status;
+
+ regin_input = (REGIN_INPUT | adapter->priv->coex_mode);
+
+ while (!adapter->blcmd_timer_expired) {
+ regin_val = 0;
+ status = hif_ops->master_reg_read(adapter, SWBL_REGIN,
+ ®in_val, 2);
+ if (status < 0) {
+ rsi_dbg(ERR_ZONE,
+ "%s: Command %0x REGIN reading failed..\n",
+ __func__, cmd);
+ return status;
+ }
+ mdelay(1);
+ if ((regin_val >> 12) != REGIN_VALID)
+ break;
+ }
+ if (adapter->blcmd_timer_expired) {
+ rsi_dbg(ERR_ZONE,
+ "%s: Command %0x REGIN reading timed out..\n",
+ __func__, cmd);
+ return -ETIMEDOUT;
+ }
+
+ rsi_dbg(INFO_ZONE,
+ "Issuing write to Regin val:%0x sending cmd:%0x\n",
+ regin_val, (cmd | regin_input << 8));
+ status = hif_ops->master_reg_write(adapter, SWBL_REGIN,
+ (cmd | regin_input << 8), 2);
+ if (status < 0)
+ return status;
+ mdelay(1);
+
+ if (cmd == LOAD_HOSTED_FW || cmd == JUMP_TO_ZERO_PC) {
+ /* JUMP_TO_ZERO_PC doesn't expect
+ * any response. So return from here
+ */
+ return 0;
+ }
+
+ while (!adapter->blcmd_timer_expired) {
+ regout_val = 0;
+ status = hif_ops->master_reg_read(adapter, SWBL_REGOUT,
+ ®out_val, 2);
+ if (status < 0) {
+ rsi_dbg(ERR_ZONE,
+ "%s: Command %0x REGOUT reading failed..\n",
+ __func__, cmd);
+ return status;
+ }
+ mdelay(1);
+ if ((regout_val >> 8) == REGOUT_VALID)
+ break;
+ }
+ if (adapter->blcmd_timer_expired) {
+ rsi_dbg(ERR_ZONE,
+ "%s: Command %0x REGOUT reading timed out..\n",
+ __func__, cmd);
+ return status;
+ }
+
+ *cmd_resp = ((u16 *)®out_val)[0] & 0xffff;
+
+ output = ((u8 *)®out_val)[0] & 0xff;
+
+ status = hif_ops->master_reg_write(adapter, SWBL_REGOUT,
+ (cmd | REGOUT_INVALID << 8), 2);
+ if (status < 0) {
+ rsi_dbg(ERR_ZONE,
+ "%s: Command %0x REGOUT writing failed..\n",
+ __func__, cmd);
+ return status;
+ }
+ mdelay(1);
+
+ if (output != exp_resp) {
+ rsi_dbg(ERR_ZONE,
+ "%s: Recvd resp %x for cmd %0x\n",
+ __func__, output, cmd);
+ return -EINVAL;
+ }
+ rsi_dbg(INFO_ZONE,
+ "%s: Recvd Expected resp %x for cmd %0x\n",
+ __func__, output, cmd);
+
+ return 0;
+}
+
+static int bl_cmd(struct rsi_hw *adapter, u8 cmd, u8 exp_resp, char *str)
+{
+ u16 regout_val = 0;
+ u32 timeout;
+ int status;
+
+ if ((cmd == EOF_REACHED) || (cmd == PING_VALID) || (cmd == PONG_VALID))
+ timeout = BL_BURN_TIMEOUT;
+ else
+ timeout = BL_CMD_TIMEOUT;
+
+ bl_start_cmd_timer(adapter, timeout);
+ status = bl_write_cmd(adapter, cmd, exp_resp, ®out_val);
+ if (status < 0) {
+ rsi_dbg(ERR_ZONE,
+ "%s: Command %s (%0x) writing failed..\n",
+ __func__, str, cmd);
+ return status;
+ }
+ bl_stop_cmd_timer(adapter);
+ return 0;
+}
+
+#define CHECK_SUM_OFFSET 20
+#define LEN_OFFSET 8
+#define ADDR_OFFSET 16
+static int bl_write_header(struct rsi_hw *adapter, u8 *flash_content,
+ u32 content_size)
+{
+ struct rsi_host_intf_ops *hif_ops = adapter->host_intf_ops;
+ struct bl_header bl_hdr;
+ u32 write_addr, write_len;
+ int status;
+
+ bl_hdr.flags = 0;
+ bl_hdr.image_no = cpu_to_le32(adapter->priv->coex_mode);
+ bl_hdr.check_sum = cpu_to_le32(
+ *(u32 *)&flash_content[CHECK_SUM_OFFSET]);
+ bl_hdr.flash_start_address = cpu_to_le32(
+ *(u32 *)&flash_content[ADDR_OFFSET]);
+ bl_hdr.flash_len = cpu_to_le32(*(u32 *)&flash_content[LEN_OFFSET]);
+ write_len = sizeof(struct bl_header);
+
+ if (adapter->rsi_host_intf == RSI_HOST_INTF_USB) {
+ write_addr = PING_BUFFER_ADDRESS;
+ status = hif_ops->write_reg_multiple(adapter, write_addr,
+ (u8 *)&bl_hdr, write_len);
+ if (status < 0) {
+ rsi_dbg(ERR_ZONE,
+ "%s: Failed to load Version/CRC structure\n",
+ __func__);
+ return status;
+ }
+ } else {
+ write_addr = PING_BUFFER_ADDRESS >> 16;
+ status = hif_ops->master_access_msword(adapter, write_addr);
+ if (status < 0) {
+ rsi_dbg(ERR_ZONE,
+ "%s: Unable to set ms word to common reg\n",
+ __func__);
+ return status;
+ }
+ write_addr = RSI_SD_REQUEST_MASTER |
+ (PING_BUFFER_ADDRESS & 0xFFFF);
+ status = hif_ops->write_reg_multiple(adapter, write_addr,
+ (u8 *)&bl_hdr, write_len);
+ if (status < 0) {
+ rsi_dbg(ERR_ZONE,
+ "%s: Failed to load Version/CRC structure\n",
+ __func__);
+ return status;
+ }
+ }
+ return 0;
+}
+
+static u32 read_flash_capacity(struct rsi_hw *adapter)
+{
+ u32 flash_sz = 0;
+
+ if ((adapter->host_intf_ops->master_reg_read(adapter, FLASH_SIZE_ADDR,
+ &flash_sz, 2)) < 0) {
+ rsi_dbg(ERR_ZONE,
+ "%s: Flash size reading failed..\n",
+ __func__);
+ return 0;
+ }
+ rsi_dbg(INIT_ZONE, "Flash capacity: %d KiloBytes\n", flash_sz);
+
+ return (flash_sz * 1024); /* Return size in kbytes */
+}
+
+static int ping_pong_write(struct rsi_hw *adapter, u8 cmd, u8 *addr, u32 size)
+{
+ struct rsi_host_intf_ops *hif_ops = adapter->host_intf_ops;
+ u32 block_size = adapter->block_size;
+ u32 cmd_addr;
+ u16 cmd_resp, cmd_req;
+ u8 *str;
+ int status;
+
+ if (cmd == PING_WRITE) {
+ cmd_addr = PING_BUFFER_ADDRESS;
+ cmd_resp = PONG_AVAIL;
+ cmd_req = PING_VALID;
+ str = "PING_VALID";
+ } else {
+ cmd_addr = PONG_BUFFER_ADDRESS;
+ cmd_resp = PING_AVAIL;
+ cmd_req = PONG_VALID;
+ str = "PONG_VALID";
+ }
+
+ status = hif_ops->load_data_master_write(adapter, cmd_addr, size,
+ block_size, addr);
+ if (status) {
+ rsi_dbg(ERR_ZONE, "%s: Unable to write blk at addr %0x\n",
+ __func__, *addr);
+ return status;
+ }
+
+ status = bl_cmd(adapter, cmd_req, cmd_resp, str);
+ if (status) {
+ bl_stop_cmd_timer(adapter);
+ return status;
+ }
+ return 0;
+}
+
+static int auto_fw_upgrade(struct rsi_hw *adapter, u8 *flash_content,
+ u32 content_size)
+{
+ u8 cmd, *temp_flash_content;
+ u32 temp_content_size, num_flash, index;
+ u32 flash_start_address;
+ int status;
+
+ temp_flash_content = flash_content;
+
+ if (content_size > MAX_FLASH_FILE_SIZE) {
+ rsi_dbg(ERR_ZONE,
+ "%s: Flash Content size is more than 400K %u\n",
+ __func__, MAX_FLASH_FILE_SIZE);
+ return -EINVAL;
+ }
+
+ flash_start_address = *(u32 *)&flash_content[FLASH_START_ADDRESS];
+ rsi_dbg(INFO_ZONE, "flash start address: %08x\n", flash_start_address);
+
+ if (flash_start_address < FW_IMAGE_MIN_ADDRESS) {
+ rsi_dbg(ERR_ZONE,
+ "%s: Fw image Flash Start Address is less than 64K\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ if (flash_start_address % FLASH_SECTOR_SIZE) {
+ rsi_dbg(ERR_ZONE,
+ "%s: Flash Start Address is not multiple of 4K\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ if ((flash_start_address + content_size) > adapter->flash_capacity) {
+ rsi_dbg(ERR_ZONE,
+ "%s: Flash Content will cross max flash size\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ temp_content_size = content_size;
+ num_flash = content_size / FLASH_WRITE_CHUNK_SIZE;
+
+ rsi_dbg(INFO_ZONE, "content_size: %d, num_flash: %d\n",
+ content_size, num_flash);
+
+ for (index = 0; index <= num_flash; index++) {
+ rsi_dbg(INFO_ZONE, "flash index: %d\n", index);
+ if (index != num_flash) {
+ content_size = FLASH_WRITE_CHUNK_SIZE;
+ rsi_dbg(INFO_ZONE, "QSPI content_size:%d\n",
+ content_size);
+ } else {
+ content_size =
+ temp_content_size % FLASH_WRITE_CHUNK_SIZE;
+ rsi_dbg(INFO_ZONE,
+ "Writing last sector content_size:%d\n",
+ content_size);
+ if (!content_size) {
+ rsi_dbg(INFO_ZONE, "instruction size zero\n");
+ break;
+ }
+ }
+
+ if (index % 2)
+ cmd = PING_WRITE;
+ else
+ cmd = PONG_WRITE;
+
+ status = ping_pong_write(adapter, cmd, flash_content,
+ content_size);
+ if (status) {
+ rsi_dbg(ERR_ZONE, "%s: Unable to load %d block\n",
+ __func__, index);
+ return status;
+ }
+
+ rsi_dbg(INFO_ZONE,
+ "%s: Successfully loaded %d instructions\n",
+ __func__, index);
+ flash_content += content_size;
+ }
+
+ status = bl_cmd(adapter, EOF_REACHED, FW_LOADING_SUCCESSFUL,
+ "EOF_REACHED");
+ if (status) {
+ bl_stop_cmd_timer(adapter);
+ return status;
+ }
+ rsi_dbg(INFO_ZONE, "FW loading is done and FW is running..\n");
+ return 0;
+}
+
+static int rsi_load_firmware(struct rsi_hw *adapter)
+{
+ struct rsi_host_intf_ops *hif_ops = adapter->host_intf_ops;
+ const struct firmware *fw_entry = NULL;
+ u32 regout_val = 0, content_size;
+ u16 tmp_regout_val = 0;
+ u8 *flash_content = NULL;
+ struct ta_metadata *metadata_p;
+ int status;
+
+ bl_start_cmd_timer(adapter, BL_CMD_TIMEOUT);
+
+ while (!adapter->blcmd_timer_expired) {
+ status = hif_ops->master_reg_read(adapter, SWBL_REGOUT,
+ ®out_val, 2);
+ if (status < 0) {
+ rsi_dbg(ERR_ZONE,
+ "%s: REGOUT read failed\n", __func__);
+ return status;
+ }
+ mdelay(1);
+ if ((regout_val >> 8) == REGOUT_VALID)
+ break;
+ }
+ if (adapter->blcmd_timer_expired) {
+ rsi_dbg(ERR_ZONE, "%s: REGOUT read timedout\n", __func__);
+ rsi_dbg(ERR_ZONE,
+ "%s: Soft boot loader not present\n", __func__);
+ return -ETIMEDOUT;
+ }
+ bl_stop_cmd_timer(adapter);
+
+ rsi_dbg(INFO_ZONE, "Received Board Version Number: %x\n",
+ (regout_val & 0xff));
+
+ status = hif_ops->master_reg_write(adapter, SWBL_REGOUT,
+ (REGOUT_INVALID | REGOUT_INVALID << 8),
+ 2);
+ if (status < 0) {
+ rsi_dbg(ERR_ZONE, "%s: REGOUT writing failed..\n", __func__);
+ return status;
+ }
+ mdelay(1);
+
+ status = bl_cmd(adapter, CONFIG_AUTO_READ_MODE, CMD_PASS,
+ "AUTO_READ_CMD");
+ if (status < 0)
+ return status;
+
+ adapter->flash_capacity = read_flash_capacity(adapter);
+ if (adapter->flash_capacity <= 0) {
+ rsi_dbg(ERR_ZONE,
+ "%s: Unable to read flash size from EEPROM\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ metadata_p = &metadata_flash_content[adapter->priv->coex_mode];
+
+ rsi_dbg(INIT_ZONE, "%s: Loading file %s\n", __func__, metadata_p->name);
+ adapter->fw_file_name = metadata_p->name;
+
+ status = request_firmware(&fw_entry, metadata_p->name, adapter->device);
+ if (status < 0) {
+ rsi_dbg(ERR_ZONE, "%s: Failed to open file %s\n",
+ __func__, metadata_p->name);
+ return status;
+ }
+ flash_content = kmemdup(fw_entry->data, fw_entry->size, GFP_KERNEL);
+ if (!flash_content) {
+ rsi_dbg(ERR_ZONE, "%s: Failed to copy firmware\n", __func__);
+ status = -EIO;
+ goto fail;
+ }
+ content_size = fw_entry->size;
+ rsi_dbg(INFO_ZONE, "FW Length = %d bytes\n", content_size);
+
+ status = bl_write_header(adapter, flash_content, content_size);
+ if (status) {
+ rsi_dbg(ERR_ZONE,
+ "%s: RPS Image header loading failed\n",
+ __func__);
+ goto fail;
+ }
+
+ bl_start_cmd_timer(adapter, BL_CMD_TIMEOUT);
+ status = bl_write_cmd(adapter, CHECK_CRC, CMD_PASS, &tmp_regout_val);
+ if (status) {
+ bl_stop_cmd_timer(adapter);
+ rsi_dbg(ERR_ZONE,
+ "%s: CHECK_CRC Command writing failed..\n",
+ __func__);
+ if ((tmp_regout_val & 0xff) == CMD_FAIL) {
+ rsi_dbg(ERR_ZONE,
+ "CRC Fail.. Proceeding to Upgrade mode\n");
+ goto fw_upgrade;
+ }
+ }
+ bl_stop_cmd_timer(adapter);
+
+ status = bl_cmd(adapter, POLLING_MODE, CMD_PASS, "POLLING_MODE");
+ if (status)
+ goto fail;
+
+load_image_cmd:
+ status = bl_cmd(adapter, LOAD_HOSTED_FW, LOADING_INITIATED,
+ "LOAD_HOSTED_FW");
+ if (status)
+ goto fail;
+ rsi_dbg(INFO_ZONE, "Load Image command passed..\n");
+ goto success;
+
+fw_upgrade:
+ status = bl_cmd(adapter, BURN_HOSTED_FW, SEND_RPS_FILE, "FW_UPGRADE");
+ if (status)
+ goto fail;
+
+ rsi_dbg(INFO_ZONE, "Burn Command Pass.. Upgrading the firmware\n");
+
+ status = auto_fw_upgrade(adapter, flash_content, content_size);
+ if (status == 0) {
+ rsi_dbg(ERR_ZONE, "Firmware upgradation Done\n");
+ goto load_image_cmd;
+ }
+ rsi_dbg(ERR_ZONE, "Firmware upgrade failed\n");
+
+ status = bl_cmd(adapter, CONFIG_AUTO_READ_MODE, CMD_PASS,
+ "AUTO_READ_MODE");
+ if (status)
+ goto fail;
+
+success:
+ rsi_dbg(ERR_ZONE, "***** Firmware Loading successful *****\n");
+ kfree(flash_content);
+ release_firmware(fw_entry);
+ return 0;
+
+fail:
+ rsi_dbg(ERR_ZONE, "##### Firmware loading failed #####\n");
+ kfree(flash_content);
+ release_firmware(fw_entry);
+ return status;
+}
+
+int rsi_hal_device_init(struct rsi_hw *adapter)
+{
+ struct rsi_common *common = adapter->priv;
+
+ common->coex_mode = 1;
+ adapter->device_model = RSI_DEV_9113;
+
+ switch (adapter->device_model) {
+ case RSI_DEV_9113:
+ if (rsi_load_firmware(adapter)) {
+ rsi_dbg(ERR_ZONE,
+ "%s: Failed to load TA instructions\n",
+ __func__);
+ return -EINVAL;
+ }
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(rsi_hal_device_init);
+
diff --git a/drivers/net/wireless/rsi/rsi_91x_mgmt.c b/drivers/net/wireless/rsi/rsi_91x_mgmt.c
index fac87c0..a0f0437 100644
--- a/drivers/net/wireless/rsi/rsi_91x_mgmt.c
+++ b/drivers/net/wireless/rsi/rsi_91x_mgmt.c
@@ -389,9 +389,7 @@
struct ieee80211_tx_info *info;
struct skb_info *rx_params;
u8 pad_bytes = msg[4];
- u8 pkt_recv;
struct sk_buff *skb;
- char *buffer;
if (type == RX_DOT11_MGMT) {
if (!adapter->sc_nvifs)
@@ -412,13 +410,9 @@
return -ENOMEM;
}
- buffer = skb_put(skb, msg_len);
-
- memcpy(buffer,
- (u8 *)(msg + FRAME_DESC_SZ + pad_bytes),
- msg_len);
-
- pkt_recv = buffer[0];
+ skb_put_data(skb,
+ (u8 *)(msg + FRAME_DESC_SZ + pad_bytes),
+ msg_len);
info = IEEE80211_SKB_CB(skb);
rx_params = (struct skb_info *)info->driver_data;
diff --git a/drivers/net/wireless/rsi/rsi_91x_pkt.c b/drivers/net/wireless/rsi/rsi_91x_pkt.c
deleted file mode 100644
index 02920c9..0000000
--- a/drivers/net/wireless/rsi/rsi_91x_pkt.c
+++ /dev/null
@@ -1,215 +0,0 @@
-/**
- * Copyright (c) 2014 Redpine Signals Inc.
- *
- * Permission to use, copy, modify, and/or distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
- * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
- * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
- * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
- */
-
-#include "rsi_mgmt.h"
-
-/**
- * rsi_send_data_pkt() - This function sends the recieved data packet from
- * driver to device.
- * @common: Pointer to the driver private structure.
- * @skb: Pointer to the socket buffer structure.
- *
- * Return: status: 0 on success, -1 on failure.
- */
-int rsi_send_data_pkt(struct rsi_common *common, struct sk_buff *skb)
-{
- struct rsi_hw *adapter = common->priv;
- struct ieee80211_hdr *tmp_hdr;
- struct ieee80211_tx_info *info;
- struct skb_info *tx_params;
- struct ieee80211_bss_conf *bss;
- int status;
- u8 ieee80211_size = MIN_802_11_HDR_LEN;
- u8 extnd_size;
- __le16 *frame_desc;
- u16 seq_num;
-
- info = IEEE80211_SKB_CB(skb);
- bss = &info->control.vif->bss_conf;
- tx_params = (struct skb_info *)info->driver_data;
-
- if (!bss->assoc) {
- status = -EINVAL;
- goto err;
- }
-
- tmp_hdr = (struct ieee80211_hdr *)&skb->data[0];
- seq_num = (le16_to_cpu(tmp_hdr->seq_ctrl) >> 4);
-
- extnd_size = ((uintptr_t)skb->data & 0x3);
-
- if ((FRAME_DESC_SZ + extnd_size) > skb_headroom(skb)) {
- rsi_dbg(ERR_ZONE, "%s: Unable to send pkt\n", __func__);
- status = -ENOSPC;
- goto err;
- }
-
- skb_push(skb, (FRAME_DESC_SZ + extnd_size));
- frame_desc = (__le16 *)&skb->data[0];
- memset((u8 *)frame_desc, 0, FRAME_DESC_SZ);
-
- if (ieee80211_is_data_qos(tmp_hdr->frame_control)) {
- ieee80211_size += 2;
- frame_desc[6] |= cpu_to_le16(BIT(12));
- }
-
- if ((!(info->flags & IEEE80211_TX_INTFL_DONT_ENCRYPT)) &&
- (common->secinfo.security_enable)) {
- if (rsi_is_cipher_wep(common))
- ieee80211_size += 4;
- else
- ieee80211_size += 8;
- frame_desc[6] |= cpu_to_le16(BIT(15));
- }
-
- frame_desc[0] = cpu_to_le16((skb->len - FRAME_DESC_SZ) |
- (RSI_WIFI_DATA_Q << 12));
- frame_desc[2] = cpu_to_le16((extnd_size) | (ieee80211_size) << 8);
-
- if (common->min_rate != 0xffff) {
- /* Send fixed rate */
- frame_desc[3] = cpu_to_le16(RATE_INFO_ENABLE);
- frame_desc[4] = cpu_to_le16(common->min_rate);
-
- if (conf_is_ht40(&common->priv->hw->conf))
- frame_desc[5] = cpu_to_le16(FULL40M_ENABLE);
-
- if (common->vif_info[0].sgi) {
- if (common->min_rate & 0x100) /* Only MCS rates */
- frame_desc[4] |=
- cpu_to_le16(ENABLE_SHORTGI_RATE);
- }
-
- }
-
- frame_desc[6] |= cpu_to_le16(seq_num & 0xfff);
- frame_desc[7] = cpu_to_le16(((tx_params->tid & 0xf) << 4) |
- (skb->priority & 0xf) |
- (tx_params->sta_id << 8));
-
- status = adapter->host_intf_write_pkt(common->priv,
- skb->data,
- skb->len);
- if (status)
- rsi_dbg(ERR_ZONE, "%s: Failed to write pkt\n",
- __func__);
-
-err:
- ++common->tx_stats.total_tx_pkt_freed[skb->priority];
- rsi_indicate_tx_status(common->priv, skb, status);
- return status;
-}
-
-/**
- * rsi_send_mgmt_pkt() - This functions sends the received management packet
- * from driver to device.
- * @common: Pointer to the driver private structure.
- * @skb: Pointer to the socket buffer structure.
- *
- * Return: status: 0 on success, -1 on failure.
- */
-int rsi_send_mgmt_pkt(struct rsi_common *common,
- struct sk_buff *skb)
-{
- struct rsi_hw *adapter = common->priv;
- struct ieee80211_hdr *wh;
- struct ieee80211_tx_info *info;
- struct ieee80211_bss_conf *bss;
- struct ieee80211_hw *hw = adapter->hw;
- struct ieee80211_conf *conf = &hw->conf;
- struct skb_info *tx_params;
- int status = -E2BIG;
- __le16 *msg;
- u8 extnd_size;
- u8 vap_id = 0;
-
- info = IEEE80211_SKB_CB(skb);
- tx_params = (struct skb_info *)info->driver_data;
- extnd_size = ((uintptr_t)skb->data & 0x3);
-
- if (tx_params->flags & INTERNAL_MGMT_PKT) {
- if ((extnd_size) > skb_headroom(skb)) {
- rsi_dbg(ERR_ZONE, "%s: Unable to send pkt\n", __func__);
- dev_kfree_skb(skb);
- return -ENOSPC;
- }
- skb_push(skb, extnd_size);
- skb->data[extnd_size + 4] = extnd_size;
- status = adapter->host_intf_write_pkt(common->priv,
- (u8 *)skb->data,
- skb->len);
- if (status) {
- rsi_dbg(ERR_ZONE,
- "%s: Failed to write the packet\n", __func__);
- }
- dev_kfree_skb(skb);
- return status;
- }
-
- bss = &info->control.vif->bss_conf;
- wh = (struct ieee80211_hdr *)&skb->data[0];
-
- if (FRAME_DESC_SZ > skb_headroom(skb))
- goto err;
-
- skb_push(skb, FRAME_DESC_SZ);
- memset(skb->data, 0, FRAME_DESC_SZ);
- msg = (__le16 *)skb->data;
-
- if (skb->len > MAX_MGMT_PKT_SIZE) {
- rsi_dbg(INFO_ZONE, "%s: Dropping mgmt pkt > 512\n", __func__);
- goto err;
- }
-
- msg[0] = cpu_to_le16((skb->len - FRAME_DESC_SZ) |
- (RSI_WIFI_MGMT_Q << 12));
- msg[1] = cpu_to_le16(TX_DOT11_MGMT);
- msg[2] = cpu_to_le16(MIN_802_11_HDR_LEN << 8);
- msg[3] = cpu_to_le16(RATE_INFO_ENABLE);
- msg[6] = cpu_to_le16(le16_to_cpu(wh->seq_ctrl) >> 4);
-
- if (wh->addr1[0] & BIT(0))
- msg[3] |= cpu_to_le16(RSI_BROADCAST_PKT);
-
- if (common->band == NL80211_BAND_2GHZ)
- msg[4] = cpu_to_le16(RSI_11B_MODE);
- else
- msg[4] = cpu_to_le16((RSI_RATE_6 & 0x0f) | RSI_11G_MODE);
-
- if (conf_is_ht40(conf)) {
- msg[4] = cpu_to_le16(0xB | RSI_11G_MODE);
- msg[5] = cpu_to_le16(0x6);
- }
-
- /* Indicate to firmware to give cfm */
- if ((skb->data[16] == IEEE80211_STYPE_PROBE_REQ) && (!bss->assoc)) {
- msg[1] |= cpu_to_le16(BIT(10));
- msg[7] = cpu_to_le16(PROBEREQ_CONFIRM);
- common->mgmt_q_block = true;
- }
-
- msg[7] |= cpu_to_le16(vap_id << 8);
-
- status = adapter->host_intf_write_pkt(common->priv,
- (u8 *)msg,
- skb->len);
- if (status)
- rsi_dbg(ERR_ZONE, "%s: Failed to write the packet\n", __func__);
-
-err:
- rsi_indicate_tx_status(common->priv, skb, status);
- return status;
-}
diff --git a/drivers/net/wireless/rsi/rsi_91x_sdio.c b/drivers/net/wireless/rsi/rsi_91x_sdio.c
index 8428858..2ef844a 100644
--- a/drivers/net/wireless/rsi/rsi_91x_sdio.c
+++ b/drivers/net/wireless/rsi/rsi_91x_sdio.c
@@ -18,6 +18,7 @@
#include <linux/module.h>
#include "rsi_sdio.h"
#include "rsi_common.h"
+#include "rsi_hal.h"
/**
* rsi_sdio_set_cmd52_arg() - This function prepares cmd 52 read/write arg.
@@ -365,6 +366,7 @@
status = sdio_set_block_size(dev->pfunction, length);
dev->pfunction->max_blksize = 256;
+ adapter->block_size = dev->pfunction->max_blksize;
rsi_dbg(INFO_ZONE,
"%s: Operational blk length is %d\n", __func__, length);
@@ -487,8 +489,8 @@
*/
static int rsi_sdio_read_register_multiple(struct rsi_hw *adapter,
u32 addr,
- u32 count,
- u8 *data)
+ u8 *data,
+ u16 count)
{
struct rsi_91x_sdiodev *dev =
(struct rsi_91x_sdiodev *)adapter->rsi_dev;
@@ -518,7 +520,7 @@
int rsi_sdio_write_register_multiple(struct rsi_hw *adapter,
u32 addr,
u8 *data,
- u32 count)
+ u16 count)
{
struct rsi_91x_sdiodev *dev =
(struct rsi_91x_sdiodev *)adapter->rsi_dev;
@@ -552,6 +554,182 @@
return status;
}
+static int rsi_sdio_load_data_master_write(struct rsi_hw *adapter,
+ u32 base_address,
+ u32 instructions_sz,
+ u16 block_size,
+ u8 *ta_firmware)
+{
+ u32 num_blocks, offset, i;
+ u16 msb_address, lsb_address;
+ u8 temp_buf[block_size];
+ int status;
+
+ num_blocks = instructions_sz / block_size;
+ msb_address = base_address >> 16;
+
+ rsi_dbg(INFO_ZONE, "ins_size: %d, num_blocks: %d\n",
+ instructions_sz, num_blocks);
+
+ /* Loading DM ms word in the sdio slave */
+ status = rsi_sdio_master_access_msword(adapter, msb_address);
+ if (status < 0) {
+ rsi_dbg(ERR_ZONE, "%s: Unable to set ms word reg\n", __func__);
+ return status;
+ }
+
+ for (offset = 0, i = 0; i < num_blocks; i++, offset += block_size) {
+ memset(temp_buf, 0, block_size);
+ memcpy(temp_buf, ta_firmware + offset, block_size);
+ lsb_address = (u16)base_address;
+ status = rsi_sdio_write_register_multiple
+ (adapter,
+ lsb_address | RSI_SD_REQUEST_MASTER,
+ temp_buf, block_size);
+ if (status < 0) {
+ rsi_dbg(ERR_ZONE, "%s: failed to write\n", __func__);
+ return status;
+ }
+ rsi_dbg(INFO_ZONE, "%s: loading block: %d\n", __func__, i);
+ base_address += block_size;
+
+ if ((base_address >> 16) != msb_address) {
+ msb_address += 1;
+
+ /* Loading DM ms word in the sdio slave */
+ status = rsi_sdio_master_access_msword(adapter,
+ msb_address);
+ if (status < 0) {
+ rsi_dbg(ERR_ZONE,
+ "%s: Unable to set ms word reg\n",
+ __func__);
+ return status;
+ }
+ }
+ }
+
+ if (instructions_sz % block_size) {
+ memset(temp_buf, 0, block_size);
+ memcpy(temp_buf, ta_firmware + offset,
+ instructions_sz % block_size);
+ lsb_address = (u16)base_address;
+ status = rsi_sdio_write_register_multiple
+ (adapter,
+ lsb_address | RSI_SD_REQUEST_MASTER,
+ temp_buf,
+ instructions_sz % block_size);
+ if (status < 0)
+ return status;
+ rsi_dbg(INFO_ZONE,
+ "Written Last Block in Address 0x%x Successfully\n",
+ offset | RSI_SD_REQUEST_MASTER);
+ }
+ return 0;
+}
+
+#define FLASH_SIZE_ADDR 0x04000016
+static int rsi_sdio_master_reg_read(struct rsi_hw *adapter, u32 addr,
+ u32 *read_buf, u16 size)
+{
+ u32 addr_on_bus, *data;
+ u32 align[2] = {};
+ u16 ms_addr;
+ int status;
+
+ data = PTR_ALIGN(&align[0], 8);
+
+ ms_addr = (addr >> 16);
+ status = rsi_sdio_master_access_msword(adapter, ms_addr);
+ if (status < 0) {
+ rsi_dbg(ERR_ZONE,
+ "%s: Unable to set ms word to common reg\n",
+ __func__);
+ return status;
+ }
+ addr &= 0xFFFF;
+
+ addr_on_bus = (addr & 0xFF000000);
+ if ((addr_on_bus == (FLASH_SIZE_ADDR & 0xFF000000)) ||
+ (addr_on_bus == 0x0))
+ addr_on_bus = (addr & ~(0x3));
+ else
+ addr_on_bus = addr;
+
+ /* Bring TA out of reset */
+ status = rsi_sdio_read_register_multiple
+ (adapter,
+ (addr_on_bus | RSI_SD_REQUEST_MASTER),
+ (u8 *)data, 4);
+ if (status < 0) {
+ rsi_dbg(ERR_ZONE, "%s: AHB register read failed\n", __func__);
+ return status;
+ }
+ if (size == 2) {
+ if ((addr & 0x3) == 0)
+ *read_buf = *data;
+ else
+ *read_buf = (*data >> 16);
+ *read_buf = (*read_buf & 0xFFFF);
+ } else if (size == 1) {
+ if ((addr & 0x3) == 0)
+ *read_buf = *data;
+ else if ((addr & 0x3) == 1)
+ *read_buf = (*data >> 8);
+ else if ((addr & 0x3) == 2)
+ *read_buf = (*data >> 16);
+ else
+ *read_buf = (*data >> 24);
+ *read_buf = (*read_buf & 0xFF);
+ } else {
+ *read_buf = *data;
+ }
+
+ return 0;
+}
+
+static int rsi_sdio_master_reg_write(struct rsi_hw *adapter,
+ unsigned long addr,
+ unsigned long data, u16 size)
+{
+ unsigned long data1[2], *data_aligned;
+ int status;
+
+ data_aligned = PTR_ALIGN(&data1[0], 8);
+
+ if (size == 2) {
+ *data_aligned = ((data << 16) | (data & 0xFFFF));
+ } else if (size == 1) {
+ u32 temp_data = data & 0xFF;
+
+ *data_aligned = ((temp_data << 24) | (temp_data << 16) |
+ (temp_data << 8) | temp_data);
+ } else {
+ *data_aligned = data;
+ }
+ size = 4;
+
+ status = rsi_sdio_master_access_msword(adapter, (addr >> 16));
+ if (status < 0) {
+ rsi_dbg(ERR_ZONE,
+ "%s: Unable to set ms word to common reg\n",
+ __func__);
+ return -EIO;
+ }
+ addr = addr & 0xFFFF;
+
+ /* Bring TA out of reset */
+ status = rsi_sdio_write_register_multiple
+ (adapter,
+ (addr | RSI_SD_REQUEST_MASTER),
+ (u8 *)data_aligned, size);
+ if (status < 0) {
+ rsi_dbg(ERR_ZONE,
+ "%s: Unable to do AHB reg write\n", __func__);
+ return status;
+ }
+ return 0;
+}
+
/**
* rsi_sdio_host_intf_write_pkt() - This function writes the packet to device.
* @adapter: Pointer to the adapter structure.
@@ -614,8 +792,8 @@
status = rsi_sdio_read_register_multiple(adapter,
length,
- length, /*num of bytes*/
- (u8 *)pkt);
+ (u8 *)pkt,
+ length); /*num of bytes*/
if (status)
rsi_dbg(ERR_ZONE, "%s: Failed to read frame: %d\n", __func__,
@@ -676,8 +854,6 @@
}
sdio_release_host(pfunction);
- adapter->host_intf_write_pkt = rsi_sdio_host_intf_write_pkt;
- adapter->host_intf_read_pkt = rsi_sdio_host_intf_read_pkt;
adapter->determine_event_timeout = rsi_sdio_determine_event_timeout;
adapter->check_hw_queue_status = rsi_sdio_read_buffer_status_register;
@@ -691,6 +867,17 @@
return status;
}
+static struct rsi_host_intf_ops sdio_host_intf_ops = {
+ .write_pkt = rsi_sdio_host_intf_write_pkt,
+ .read_pkt = rsi_sdio_host_intf_read_pkt,
+ .master_access_msword = rsi_sdio_master_access_msword,
+ .read_reg_multiple = rsi_sdio_read_register_multiple,
+ .write_reg_multiple = rsi_sdio_write_register_multiple,
+ .master_reg_read = rsi_sdio_master_reg_read,
+ .master_reg_write = rsi_sdio_master_reg_write,
+ .load_data_master_write = rsi_sdio_load_data_master_write,
+};
+
/**
* rsi_probe() - This function is called by kernel when the driver provided
* Vendor and device IDs are matched. All the initialization
@@ -713,6 +900,8 @@
__func__);
return 1;
}
+ adapter->rsi_host_intf = RSI_HOST_INTF_SDIO;
+ adapter->host_intf_ops = &sdio_host_intf_ops;
if (rsi_init_sdio_interface(adapter, pfunction)) {
rsi_dbg(ERR_ZONE, "%s: Failed to init sdio interface\n",
@@ -720,13 +909,19 @@
goto fail;
}
- if (rsi_sdio_device_init(adapter->priv)) {
+ if (rsi_hal_device_init(adapter)) {
rsi_dbg(ERR_ZONE, "%s: Failed in device init\n", __func__);
sdio_claim_host(pfunction);
sdio_disable_func(pfunction);
sdio_release_host(pfunction);
goto fail;
}
+ rsi_dbg(INFO_ZONE, "===> RSI Device Init Done <===\n");
+
+ if (rsi_sdio_master_access_msword(adapter, MISC_CFG_BASE_ADDR)) {
+ rsi_dbg(ERR_ZONE, "%s: Unable to set ms word reg\n", __func__);
+ return -EIO;
+ }
sdio_claim_host(pfunction);
if (sdio_claim_irq(pfunction, rsi_handle_interrupt)) {
diff --git a/drivers/net/wireless/rsi/rsi_91x_sdio_ops.c b/drivers/net/wireless/rsi/rsi_91x_sdio_ops.c
index 40d7231..df2a63b 100644
--- a/drivers/net/wireless/rsi/rsi_91x_sdio_ops.c
+++ b/drivers/net/wireless/rsi/rsi_91x_sdio_ops.c
@@ -27,8 +27,7 @@
*
* Return: status: 0 on success, -1 on failure.
*/
-static int rsi_sdio_master_access_msword(struct rsi_hw *adapter,
- u16 ms_word)
+int rsi_sdio_master_access_msword(struct rsi_hw *adapter, u16 ms_word)
{
u8 byte;
u8 function = 0;
@@ -61,171 +60,6 @@
}
/**
- * rsi_copy_to_card() - This function includes the actual funtionality of
- * copying the TA firmware to the card.Basically this
- * function includes opening the TA file,reading the
- * TA file and writing their values in blocks of data.
- * @common: Pointer to the driver private structure.
- * @fw: Pointer to the firmware value to be written.
- * @len: length of firmware file.
- * @num_blocks: Number of blocks to be written to the card.
- *
- * Return: 0 on success and -1 on failure.
- */
-static int rsi_copy_to_card(struct rsi_common *common,
- const u8 *fw,
- u32 len,
- u32 num_blocks)
-{
- struct rsi_hw *adapter = common->priv;
- struct rsi_91x_sdiodev *dev =
- (struct rsi_91x_sdiodev *)adapter->rsi_dev;
- u32 indx, ii;
- u32 block_size = dev->tx_blk_size;
- u32 lsb_address;
- __le32 data[] = { TA_HOLD_THREAD_VALUE, TA_SOFT_RST_CLR,
- TA_PC_ZERO, TA_RELEASE_THREAD_VALUE };
- u32 address[] = { TA_HOLD_THREAD_REG, TA_SOFT_RESET_REG,
- TA_TH0_PC_REG, TA_RELEASE_THREAD_REG };
- u32 base_address;
- u16 msb_address;
-
- base_address = TA_LOAD_ADDRESS;
- msb_address = base_address >> 16;
-
- for (indx = 0, ii = 0; ii < num_blocks; ii++, indx += block_size) {
- lsb_address = ((u16) base_address | RSI_SD_REQUEST_MASTER);
- if (rsi_sdio_write_register_multiple(adapter,
- lsb_address,
- (u8 *)(fw + indx),
- block_size)) {
- rsi_dbg(ERR_ZONE,
- "%s: Unable to load %s blk\n", __func__,
- FIRMWARE_RSI9113);
- return -1;
- }
- rsi_dbg(INIT_ZONE, "%s: loading block: %d\n", __func__, ii);
- base_address += block_size;
- if ((base_address >> 16) != msb_address) {
- msb_address += 1;
- if (rsi_sdio_master_access_msword(adapter,
- msb_address)) {
- rsi_dbg(ERR_ZONE,
- "%s: Unable to set ms word reg\n",
- __func__);
- return -1;
- }
- }
- }
-
- if (len % block_size) {
- lsb_address = ((u16) base_address | RSI_SD_REQUEST_MASTER);
- if (rsi_sdio_write_register_multiple(adapter,
- lsb_address,
- (u8 *)(fw + indx),
- len % block_size)) {
- rsi_dbg(ERR_ZONE,
- "%s: Unable to load f/w\n", __func__);
- return -1;
- }
- }
- rsi_dbg(INIT_ZONE,
- "%s: Succesfully loaded TA instructions\n", __func__);
-
- if (rsi_sdio_master_access_msword(adapter, TA_BASE_ADDR)) {
- rsi_dbg(ERR_ZONE,
- "%s: Unable to set ms word to common reg\n",
- __func__);
- return -1;
- }
-
- for (ii = 0; ii < ARRAY_SIZE(data); ii++) {
- /* Bringing TA out of reset */
- if (rsi_sdio_write_register_multiple(adapter,
- (address[ii] |
- RSI_SD_REQUEST_MASTER),
- (u8 *)&data[ii],
- 4)) {
- rsi_dbg(ERR_ZONE,
- "%s: Unable to hold TA threads\n", __func__);
- return -1;
- }
- }
-
- rsi_dbg(INIT_ZONE, "%s: loaded firmware\n", __func__);
- return 0;
-}
-
-/**
- * rsi_load_ta_instructions() - This function includes the actual funtionality
- * of loading the TA firmware.This function also
- * includes opening the TA file,reading the TA
- * file and writing their value in blocks of data.
- * @common: Pointer to the driver private structure.
- *
- * Return: status: 0 on success, -1 on failure.
- */
-static int rsi_load_ta_instructions(struct rsi_common *common)
-{
- struct rsi_hw *adapter = common->priv;
- struct rsi_91x_sdiodev *dev =
- (struct rsi_91x_sdiodev *)adapter->rsi_dev;
- u32 len;
- u32 num_blocks;
- const u8 *fw;
- const struct firmware *fw_entry = NULL;
- u32 block_size = dev->tx_blk_size;
- int status = 0;
- u32 base_address;
- u16 msb_address;
-
- if (rsi_sdio_master_access_msword(adapter, TA_BASE_ADDR)) {
- rsi_dbg(ERR_ZONE,
- "%s: Unable to set ms word to common reg\n",
- __func__);
- return -1;
- }
- base_address = TA_LOAD_ADDRESS;
- msb_address = (base_address >> 16);
-
- if (rsi_sdio_master_access_msword(adapter, msb_address)) {
- rsi_dbg(ERR_ZONE,
- "%s: Unable to set ms word reg\n", __func__);
- return -1;
- }
-
- status = request_firmware(&fw_entry, FIRMWARE_RSI9113, adapter->device);
- if (status < 0) {
- rsi_dbg(ERR_ZONE, "%s Firmware file %s not found\n",
- __func__, FIRMWARE_RSI9113);
- return status;
- }
-
- /* Copy firmware into DMA-accessible memory */
- fw = kmemdup(fw_entry->data, fw_entry->size, GFP_KERNEL);
- if (!fw) {
- status = -ENOMEM;
- goto out;
- }
- len = fw_entry->size;
-
- if (len % 4)
- len += (4 - (len % 4));
-
- num_blocks = (len / block_size);
-
- rsi_dbg(INIT_ZONE, "%s: Instruction size:%d\n", __func__, len);
- rsi_dbg(INIT_ZONE, "%s: num blocks: %d\n", __func__, num_blocks);
-
- status = rsi_copy_to_card(common, fw, len, num_blocks);
- kfree(fw);
-
-out:
- release_firmware(fw_entry);
- return status;
-}
-
-/**
* rsi_process_pkt() - This Function reads rx_blocks register and figures out
* the size of the rx pkt.
* @common: Pointer to the driver private structure.
@@ -472,28 +306,6 @@
}
/**
- * rsi_device_init() - This Function Initializes The HAL.
- * @common: Pointer to the driver private structure.
- *
- * Return: 0 on success, -1 on failure.
- */
-int rsi_sdio_device_init(struct rsi_common *common)
-{
- if (rsi_load_ta_instructions(common))
- return -1;
-
- if (rsi_sdio_master_access_msword(common->priv, MISC_CFG_BASE_ADDR)) {
- rsi_dbg(ERR_ZONE, "%s: Unable to set ms word reg\n",
- __func__);
- return -1;
- }
- rsi_dbg(INIT_ZONE,
- "%s: Setting ms word to 0x41050000\n", __func__);
-
- return 0;
-}
-
-/**
* rsi_sdio_read_buffer_status_register() - This function is used to the read
* buffer status register and set
* relevant fields in
diff --git a/drivers/net/wireless/rsi/rsi_91x_usb.c b/drivers/net/wireless/rsi/rsi_91x_usb.c
index cc8deec..f5de693 100644
--- a/drivers/net/wireless/rsi/rsi_91x_usb.c
+++ b/drivers/net/wireless/rsi/rsi_91x_usb.c
@@ -17,6 +17,7 @@
#include <linux/module.h>
#include "rsi_usb.h"
+#include "rsi_hal.h"
/**
* rsi_usb_card_write() - This function writes to the USB Card.
@@ -141,6 +142,9 @@
return 0;
}
+#define RSI_USB_REQ_OUT (USB_TYPE_VENDOR | USB_DIR_OUT | USB_RECIP_DEVICE)
+#define RSI_USB_REQ_IN (USB_TYPE_VENDOR | USB_DIR_IN | USB_RECIP_DEVICE)
+
/* rsi_usb_reg_read() - This function reads data from given register address.
* @usbdev: Pointer to the usb_device structure.
* @reg: Address of the register to be read.
@@ -164,11 +168,11 @@
status = usb_control_msg(usbdev,
usb_rcvctrlpipe(usbdev, 0),
USB_VENDOR_REGISTER_READ,
- USB_TYPE_VENDOR,
+ RSI_USB_REQ_IN,
((reg & 0xffff0000) >> 16), (reg & 0xffff),
(void *)buf,
len,
- HZ * 5);
+ USB_CTRL_GET_TIMEOUT);
*value = (buf[0] | (buf[1] << 8));
if (status < 0) {
@@ -211,12 +215,12 @@
status = usb_control_msg(usbdev,
usb_sndctrlpipe(usbdev, 0),
USB_VENDOR_REGISTER_WRITE,
- USB_TYPE_VENDOR,
+ RSI_USB_REQ_OUT,
((reg & 0xffff0000) >> 16),
(reg & 0xffff),
(void *)usb_reg_buf,
len,
- HZ * 5);
+ USB_CTRL_SET_TIMEOUT);
if (status < 0) {
rsi_dbg(ERR_ZONE,
"%s: Reg write failed with error code :%d\n",
@@ -273,6 +277,46 @@
return status;
}
+static int rsi_usb_read_register_multiple(struct rsi_hw *adapter, u32 addr,
+ u8 *data, u16 count)
+{
+ struct rsi_91x_usbdev *dev = (struct rsi_91x_usbdev *)adapter->rsi_dev;
+ u8 *buf;
+ u16 transfer;
+ int status;
+
+ if (!addr)
+ return -EINVAL;
+
+ buf = kzalloc(RSI_USB_BUF_SIZE, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ while (count) {
+ transfer = min_t(u16, count, RSI_USB_BUF_SIZE);
+ status = usb_control_msg(dev->usbdev,
+ usb_rcvctrlpipe(dev->usbdev, 0),
+ USB_VENDOR_REGISTER_READ,
+ RSI_USB_REQ_IN,
+ ((addr & 0xffff0000) >> 16),
+ (addr & 0xffff), (void *)buf,
+ transfer, USB_CTRL_GET_TIMEOUT);
+ if (status < 0) {
+ rsi_dbg(ERR_ZONE,
+ "Reg read failed with error code :%d\n",
+ status);
+ kfree(buf);
+ return status;
+ }
+ memcpy(data, buf, transfer);
+ count -= transfer;
+ data += transfer;
+ addr += transfer;
+ }
+ kfree(buf);
+ return 0;
+}
+
/**
* rsi_usb_write_register_multiple() - This function writes multiple bytes of
* information to multiple registers.
@@ -283,41 +327,40 @@
*
* Return: status: 0 on success, a negative error code on failure.
*/
-int rsi_usb_write_register_multiple(struct rsi_hw *adapter,
- u32 addr,
- u8 *data,
- u32 count)
+static int rsi_usb_write_register_multiple(struct rsi_hw *adapter, u32 addr,
+ u8 *data, u16 count)
{
struct rsi_91x_usbdev *dev = (struct rsi_91x_usbdev *)adapter->rsi_dev;
u8 *buf;
- u8 transfer;
+ u16 transfer;
int status = 0;
- buf = kzalloc(4096, GFP_KERNEL);
+ buf = kzalloc(RSI_USB_BUF_SIZE, GFP_KERNEL);
if (!buf)
return -ENOMEM;
while (count) {
- transfer = (u8)(min_t(u32, count, 4096));
+ transfer = min_t(u16, count, RSI_USB_BUF_SIZE);
memcpy(buf, data, transfer);
status = usb_control_msg(dev->usbdev,
usb_sndctrlpipe(dev->usbdev, 0),
USB_VENDOR_REGISTER_WRITE,
- USB_TYPE_VENDOR,
+ RSI_USB_REQ_OUT,
((addr & 0xffff0000) >> 16),
(addr & 0xffff),
(void *)buf,
transfer,
- HZ * 5);
+ USB_CTRL_SET_TIMEOUT);
if (status < 0) {
rsi_dbg(ERR_ZONE,
"Reg write failed with error code :%d\n",
status);
- } else {
- count -= transfer;
- data += transfer;
- addr += transfer;
+ kfree(buf);
+ return status;
}
+ count -= transfer;
+ data += transfer;
+ addr += transfer;
}
kfree(buf);
@@ -348,6 +391,77 @@
len);
}
+static int rsi_usb_master_reg_read(struct rsi_hw *adapter, u32 reg,
+ u32 *value, u16 len)
+{
+ struct usb_device *usbdev =
+ ((struct rsi_91x_usbdev *)adapter->rsi_dev)->usbdev;
+
+ return rsi_usb_reg_read(usbdev, reg, (u16 *)value, len);
+}
+
+static int rsi_usb_master_reg_write(struct rsi_hw *adapter,
+ unsigned long reg,
+ unsigned long value, u16 len)
+{
+ struct usb_device *usbdev =
+ ((struct rsi_91x_usbdev *)adapter->rsi_dev)->usbdev;
+
+ return rsi_usb_reg_write(usbdev, reg, value, len);
+}
+
+static int rsi_usb_load_data_master_write(struct rsi_hw *adapter,
+ u32 base_address,
+ u32 instructions_sz, u16 block_size,
+ u8 *ta_firmware)
+{
+ u16 num_blocks;
+ u32 cur_indx, i;
+ u8 temp_buf[256];
+ int status;
+
+ num_blocks = instructions_sz / block_size;
+ rsi_dbg(INFO_ZONE, "num_blocks: %d\n", num_blocks);
+
+ for (cur_indx = 0, i = 0; i < num_blocks; i++, cur_indx += block_size) {
+ memset(temp_buf, 0, block_size);
+ memcpy(temp_buf, ta_firmware + cur_indx, block_size);
+ status = rsi_usb_write_register_multiple(adapter, base_address,
+ (u8 *)(temp_buf),
+ block_size);
+ if (status < 0)
+ return status;
+
+ rsi_dbg(INFO_ZONE, "%s: loading block: %d\n", __func__, i);
+ base_address += block_size;
+ }
+
+ if (instructions_sz % block_size) {
+ memset(temp_buf, 0, block_size);
+ memcpy(temp_buf, ta_firmware + cur_indx,
+ instructions_sz % block_size);
+ status = rsi_usb_write_register_multiple
+ (adapter, base_address,
+ (u8 *)temp_buf,
+ instructions_sz % block_size);
+ if (status < 0)
+ return status;
+ rsi_dbg(INFO_ZONE,
+ "Written Last Block in Address 0x%x Successfully\n",
+ cur_indx);
+ }
+ return 0;
+}
+
+static struct rsi_host_intf_ops usb_host_intf_ops = {
+ .write_pkt = rsi_usb_host_intf_write_pkt,
+ .read_reg_multiple = rsi_usb_read_register_multiple,
+ .write_reg_multiple = rsi_usb_write_register_multiple,
+ .master_reg_read = rsi_usb_master_reg_read,
+ .master_reg_write = rsi_usb_master_reg_write,
+ .load_data_master_write = rsi_usb_load_data_master_write,
+};
+
/**
* rsi_deinit_usb_interface() - This function deinitializes the usb interface.
* @adapter: Pointer to the adapter structure.
@@ -410,12 +524,14 @@
}
rsi_dev->rx_usb_urb[0]->transfer_buffer = adapter->priv->rx_data_pkt;
rsi_dev->tx_blk_size = 252;
+ adapter->block_size = rsi_dev->tx_blk_size;
/* Initializing function callbacks */
adapter->rx_urb_submit = rsi_rx_urb_submit;
- adapter->host_intf_write_pkt = rsi_usb_host_intf_write_pkt;
adapter->check_hw_queue_status = rsi_usb_check_queue_status;
adapter->determine_event_timeout = rsi_usb_event_timeout;
+ adapter->rsi_host_intf = RSI_HOST_INTF_USB;
+ adapter->host_intf_ops = &usb_host_intf_ops;
rsi_init_event(&rsi_dev->rx_thread.event);
status = rsi_create_kthread(common, &rsi_dev->rx_thread,
@@ -467,6 +583,7 @@
__func__);
return -ENOMEM;
}
+ adapter->rsi_host_intf = RSI_HOST_INTF_USB;
status = rsi_init_usb_interface(adapter, pfunction);
if (status) {
@@ -480,25 +597,20 @@
dev = (struct rsi_91x_usbdev *)adapter->rsi_dev;
status = rsi_usb_reg_read(dev->usbdev, FW_STATUS_REG, &fw_status, 2);
- if (status)
+ if (status < 0)
goto err1;
else
fw_status &= 1;
if (!fw_status) {
- status = rsi_usb_device_init(adapter->priv);
+ rsi_dbg(INIT_ZONE, "Loading firmware...\n");
+ status = rsi_hal_device_init(adapter);
if (status) {
rsi_dbg(ERR_ZONE, "%s: Failed in device init\n",
__func__);
goto err1;
}
-
- status = rsi_usb_reg_write(dev->usbdev,
- USB_INTERNAL_REG_1,
- RSI_USB_READY_MAGIC_NUM, 1);
- if (status)
- goto err1;
- rsi_dbg(INIT_ZONE, "%s: Performed device init\n", __func__);
+ rsi_dbg(INIT_ZONE, "%s: Device Init Done\n", __func__);
}
status = rsi_rx_urb_submit(adapter);
diff --git a/drivers/net/wireless/rsi/rsi_91x_usb_ops.c b/drivers/net/wireless/rsi/rsi_91x_usb_ops.c
index de49008..d3e0a07 100644
--- a/drivers/net/wireless/rsi/rsi_91x_usb_ops.c
+++ b/drivers/net/wireless/rsi/rsi_91x_usb_ops.c
@@ -19,67 +19,6 @@
#include "rsi_usb.h"
/**
- * rsi_copy_to_card() - This function includes the actual funtionality of
- * copying the TA firmware to the card.Basically this
- * function includes opening the TA file,reading the TA
- * file and writing their values in blocks of data.
- * @common: Pointer to the driver private structure.
- * @fw: Pointer to the firmware value to be written.
- * @len: length of firmware file.
- * @num_blocks: Number of blocks to be written to the card.
- *
- * Return: 0 on success and -1 on failure.
- */
-static int rsi_copy_to_card(struct rsi_common *common,
- const u8 *fw,
- u32 len,
- u32 num_blocks)
-{
- struct rsi_hw *adapter = common->priv;
- struct rsi_91x_usbdev *dev = (struct rsi_91x_usbdev *)adapter->rsi_dev;
- u32 indx, ii;
- u32 block_size = dev->tx_blk_size;
- u32 lsb_address;
- u32 base_address;
-
- base_address = TA_LOAD_ADDRESS;
-
- for (indx = 0, ii = 0; ii < num_blocks; ii++, indx += block_size) {
- lsb_address = base_address;
- if (rsi_usb_write_register_multiple(adapter,
- lsb_address,
- (u8 *)(fw + indx),
- block_size)) {
- rsi_dbg(ERR_ZONE,
- "%s: Unable to load %s blk\n", __func__,
- FIRMWARE_RSI9113);
- return -EIO;
- }
- rsi_dbg(INIT_ZONE, "%s: loading block: %d\n", __func__, ii);
- base_address += block_size;
- }
-
- if (len % block_size) {
- lsb_address = base_address;
- if (rsi_usb_write_register_multiple(adapter,
- lsb_address,
- (u8 *)(fw + indx),
- len % block_size)) {
- rsi_dbg(ERR_ZONE,
- "%s: Unable to load %s blk\n", __func__,
- FIRMWARE_RSI9113);
- return -EIO;
- }
- }
- rsi_dbg(INIT_ZONE,
- "%s: Succesfully loaded %s instructions\n", __func__,
- FIRMWARE_RSI9113);
-
- rsi_dbg(INIT_ZONE, "%s: loaded firmware\n", __func__);
- return 0;
-}
-
-/**
* rsi_usb_rx_thread() - This is a kernel thread to receive the packets from
* the USB device.
* @common: Pointer to the driver private structure.
@@ -119,67 +58,3 @@
complete_and_exit(&dev->rx_thread.completion, 0);
}
-
-/**
- * rsi_load_ta_instructions() - This function includes the actual funtionality
- * of loading the TA firmware.This function also
- * includes opening the TA file,reading the TA
- * file and writing their value in blocks of data.
- * @common: Pointer to the driver private structure.
- *
- * Return: status: 0 on success, -1 on failure.
- */
-static int rsi_load_ta_instructions(struct rsi_common *common)
-{
- struct rsi_hw *adapter = common->priv;
- struct rsi_91x_usbdev *dev = (struct rsi_91x_usbdev *)adapter->rsi_dev;
- const struct firmware *fw_entry = NULL;
- u32 block_size = dev->tx_blk_size;
- const u8 *fw;
- u32 num_blocks, len;
- int status = 0;
-
- status = request_firmware(&fw_entry, FIRMWARE_RSI9113, adapter->device);
- if (status < 0) {
- rsi_dbg(ERR_ZONE, "%s Firmware file %s not found\n",
- __func__, FIRMWARE_RSI9113);
- return status;
- }
-
- /* Copy firmware into DMA-accessible memory */
- fw = kmemdup(fw_entry->data, fw_entry->size, GFP_KERNEL);
- if (!fw) {
- status = -ENOMEM;
- goto out;
- }
- len = fw_entry->size;
-
- if (len % 4)
- len += (4 - (len % 4));
-
- num_blocks = (len / block_size);
-
- rsi_dbg(INIT_ZONE, "%s: Instruction size:%d\n", __func__, len);
- rsi_dbg(INIT_ZONE, "%s: num blocks: %d\n", __func__, num_blocks);
-
- status = rsi_copy_to_card(common, fw, len, num_blocks);
- kfree(fw);
-
-out:
- release_firmware(fw_entry);
- return status;
-}
-
-/**
- * rsi_device_init() - This Function Initializes The HAL.
- * @common: Pointer to the driver private structure.
- *
- * Return: 0 on success, -1 on failure.
- */
-int rsi_usb_device_init(struct rsi_common *common)
-{
- if (rsi_load_ta_instructions(common))
- return -EIO;
-
- return 0;
- }
diff --git a/drivers/net/wireless/rsi/rsi_common.h b/drivers/net/wireless/rsi/rsi_common.h
index d3fbe33..4434969 100644
--- a/drivers/net/wireless/rsi/rsi_common.h
+++ b/drivers/net/wireless/rsi/rsi_common.h
@@ -20,8 +20,7 @@
#include <linux/kthread.h>
#define EVENT_WAIT_FOREVER 0
-#define TA_LOAD_ADDRESS 0x00
-#define FIRMWARE_RSI9113 "rsi_91x.fw"
+#define FIRMWARE_RSI9113 "rs9113_wlan_qspi.rps"
#define QUEUE_NOT_FULL 1
#define QUEUE_FULL 0
diff --git a/drivers/net/wireless/rsi/rsi_hal.h b/drivers/net/wireless/rsi/rsi_hal.h
new file mode 100644
index 0000000..b95200d
--- /dev/null
+++ b/drivers/net/wireless/rsi/rsi_hal.h
@@ -0,0 +1,81 @@
+/**
+ * Copyright (c) 2017 Redpine Signals Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef __RSI_HAL_H__
+#define __RSI_HAL_H__
+
+#define FLASH_WRITE_CHUNK_SIZE (4 * 1024)
+#define FLASH_SECTOR_SIZE (4 * 1024)
+
+#define FLASH_SIZE_ADDR 0x04000016
+#define PING_BUFFER_ADDRESS 0x19000
+#define PONG_BUFFER_ADDRESS 0x1a000
+#define SWBL_REGIN 0x41050034
+#define SWBL_REGOUT 0x4105003c
+#define PING_WRITE 0x1
+#define PONG_WRITE 0x2
+
+#define BL_CMD_TIMEOUT 2000
+#define BL_BURN_TIMEOUT (50 * 1000)
+
+#define REGIN_VALID 0xA
+#define REGIN_INPUT 0xA0
+#define REGOUT_VALID 0xAB
+#define REGOUT_INVALID (~0xAB)
+#define CMD_PASS 0xAA
+#define CMD_FAIL 0xCC
+
+#define LOAD_HOSTED_FW 'A'
+#define BURN_HOSTED_FW 'B'
+#define PING_VALID 'I'
+#define PONG_VALID 'O'
+#define PING_AVAIL 'I'
+#define PONG_AVAIL 'O'
+#define EOF_REACHED 'E'
+#define CHECK_CRC 'K'
+#define POLLING_MODE 'P'
+#define CONFIG_AUTO_READ_MODE 'R'
+#define JUMP_TO_ZERO_PC 'J'
+#define FW_LOADING_SUCCESSFUL 'S'
+#define LOADING_INITIATED '1'
+
+/* Boot loader commands */
+#define SEND_RPS_FILE '2'
+
+#define FW_IMAGE_MIN_ADDRESS (68 * 1024)
+#define MAX_FLASH_FILE_SIZE (400 * 1024) //400K
+#define FLASH_START_ADDRESS 16
+
+#define COMMON_HAL_CARD_READY_IND 0x0
+
+#define COMMAN_HAL_WAIT_FOR_CARD_READY 1
+
+struct bl_header {
+ __le32 flags;
+ __le32 image_no;
+ __le32 check_sum;
+ __le32 flash_start_address;
+ __le32 flash_len;
+} __packed;
+
+struct ta_metadata {
+ char *name;
+ unsigned int address;
+};
+
+int rsi_hal_device_init(struct rsi_hw *adapter);
+
+#endif
diff --git a/drivers/net/wireless/rsi/rsi_main.h b/drivers/net/wireless/rsi/rsi_main.h
index 1d5904b..ea4fc22 100644
--- a/drivers/net/wireless/rsi/rsi_main.h
+++ b/drivers/net/wireless/rsi/rsi_main.h
@@ -82,6 +82,8 @@
((_q) == VI_Q) ? IEEE80211_AC_VI : \
IEEE80211_AC_VO)
+#define RSI_DEV_9113 1
+
struct version_info {
u16 major;
u16 minor;
@@ -204,13 +206,20 @@
struct cqm_info cqm_info;
bool hw_data_qs_blocked;
+ u8 coex_mode;
int tx_power;
u8 ant_in_use;
};
+enum host_intf {
+ RSI_HOST_INTF_SDIO = 0,
+ RSI_HOST_INTF_USB
+};
+
struct rsi_hw {
struct rsi_common *priv;
+ u8 device_model;
struct ieee80211_hw *hw;
struct ieee80211_vif *vifs[RSI_MAX_VIFS];
struct ieee80211_tx_queue_params edca_params[NUM_EDCA_QUEUES];
@@ -219,16 +228,39 @@
struct device *device;
u8 sc_nvifs;
+ enum host_intf rsi_host_intf;
+ u16 block_size;
#ifdef CONFIG_RSI_DEBUGFS
struct rsi_debugfs *dfsentry;
u8 num_debugfs_entries;
#endif
+ char *fw_file_name;
+ struct timer_list bl_cmd_timer;
+ bool blcmd_timer_expired;
+ u32 flash_capacity;
u8 dfs_region;
void *rsi_dev;
- int (*host_intf_read_pkt)(struct rsi_hw *adapter, u8 *pkt, u32 len);
- int (*host_intf_write_pkt)(struct rsi_hw *adapter, u8 *pkt, u32 len);
+ struct rsi_host_intf_ops *host_intf_ops;
int (*check_hw_queue_status)(struct rsi_hw *adapter, u8 q_num);
int (*rx_urb_submit)(struct rsi_hw *adapter);
int (*determine_event_timeout)(struct rsi_hw *adapter);
};
+
+struct rsi_host_intf_ops {
+ int (*read_pkt)(struct rsi_hw *adapter, u8 *pkt, u32 len);
+ int (*write_pkt)(struct rsi_hw *adapter, u8 *pkt, u32 len);
+ int (*master_access_msword)(struct rsi_hw *adapter, u16 ms_word);
+ int (*read_reg_multiple)(struct rsi_hw *adapter, u32 addr,
+ u8 *data, u16 count);
+ int (*write_reg_multiple)(struct rsi_hw *adapter, u32 addr,
+ u8 *data, u16 count);
+ int (*master_reg_read)(struct rsi_hw *adapter, u32 addr,
+ u32 *read_buf, u16 size);
+ int (*master_reg_write)(struct rsi_hw *adapter,
+ unsigned long addr, unsigned long data,
+ u16 size);
+ int (*load_data_master_write)(struct rsi_hw *adapter, u32 addr,
+ u32 instructions_size, u16 block_size,
+ u8 *fw);
+};
#endif
diff --git a/drivers/net/wireless/rsi/rsi_sdio.h b/drivers/net/wireless/rsi/rsi_sdio.h
index c7e8f2b..9fb73f6 100644
--- a/drivers/net/wireless/rsi/rsi_sdio.h
+++ b/drivers/net/wireless/rsi/rsi_sdio.h
@@ -110,19 +110,19 @@
u8 sdio_clock_speed;
u32 cardcapability;
u8 prev_desc[16];
- u32 tx_blk_size;
+ u16 tx_blk_size;
u8 write_fail;
};
void rsi_interrupt_handler(struct rsi_hw *adapter);
int rsi_init_sdio_slave_regs(struct rsi_hw *adapter);
-int rsi_sdio_device_init(struct rsi_common *common);
int rsi_sdio_read_register(struct rsi_hw *adapter, u32 addr, u8 *data);
int rsi_sdio_host_intf_read_pkt(struct rsi_hw *adapter, u8 *pkt, u32 length);
int rsi_sdio_write_register(struct rsi_hw *adapter, u8 function,
u32 addr, u8 *data);
int rsi_sdio_write_register_multiple(struct rsi_hw *adapter, u32 addr,
- u8 *data, u32 count);
+ u8 *data, u16 count);
+int rsi_sdio_master_access_msword(struct rsi_hw *adapter, u16 ms_word);
void rsi_sdio_ack_intr(struct rsi_hw *adapter, u8 int_bit);
int rsi_sdio_determine_event_timeout(struct rsi_hw *adapter);
int rsi_sdio_read_buffer_status_register(struct rsi_hw *adapter, u8 q_num);
diff --git a/drivers/net/wireless/rsi/rsi_usb.h b/drivers/net/wireless/rsi/rsi_usb.h
index ebea0c4..59513ac 100644
--- a/drivers/net/wireless/rsi/rsi_usb.h
+++ b/drivers/net/wireless/rsi/rsi_usb.h
@@ -35,6 +35,8 @@
#define MGMT_EP 1
#define DATA_EP 2
+#define RSI_USB_BUF_SIZE 4096
+
struct rsi_91x_usbdev {
struct rsi_thread rx_thread;
u8 endpoint;
@@ -61,8 +63,5 @@
return EVENT_WAIT_FOREVER;
}
-int rsi_usb_device_init(struct rsi_common *common);
-int rsi_usb_write_register_multiple(struct rsi_hw *adapter, u32 addr,
- u8 *data, u32 count);
void rsi_usb_rx_thread(struct rsi_common *common);
#endif
diff --git a/drivers/net/wireless/st/cw1200/scan.c b/drivers/net/wireless/st/cw1200/scan.c
index 0a0ff7e..cc2ce60 100644
--- a/drivers/net/wireless/st/cw1200/scan.c
+++ b/drivers/net/wireless/st/cw1200/scan.c
@@ -84,7 +84,7 @@
return -ENOMEM;
if (req->ie_len)
- memcpy(skb_put(frame.skb, req->ie_len), req->ie, req->ie_len);
+ skb_put_data(frame.skb, req->ie, req->ie_len);
/* will be unlocked in cw1200_scan_work() */
down(&priv->scan.lock);
diff --git a/drivers/net/wireless/st/cw1200/txrx.c b/drivers/net/wireless/st/cw1200/txrx.c
index cd63ffe..e9050b4 100644
--- a/drivers/net/wireless/st/cw1200/txrx.c
+++ b/drivers/net/wireless/st/cw1200/txrx.c
@@ -574,7 +574,7 @@
return NULL;
}
- wsm = (struct wsm_tx *)skb_push(t->skb, sizeof(struct wsm_tx));
+ wsm = skb_push(t->skb, sizeof(struct wsm_tx));
t->txpriv.offset += sizeof(struct wsm_tx);
memset(wsm, 0, sizeof(*wsm));
wsm->hdr.len = __cpu_to_le16(t->skb->len);
diff --git a/drivers/net/wireless/ti/wl1251/main.c b/drivers/net/wireless/ti/wl1251/main.c
index bbf7604..08f0477 100644
--- a/drivers/net/wireless/ti/wl1251/main.c
+++ b/drivers/net/wireless/ti/wl1251/main.c
@@ -1036,7 +1036,7 @@
goto out_idle;
}
if (req->ie_len)
- memcpy(skb_put(skb, req->ie_len), req->ie, req->ie_len);
+ skb_put_data(skb, req->ie, req->ie_len);
ret = wl1251_cmd_template_set(wl, CMD_PROBE_REQ, skb->data,
skb->len);
diff --git a/drivers/net/wireless/ti/wl1251/tx.c b/drivers/net/wireless/ti/wl1251/tx.c
index 81de83c..de2fa67 100644
--- a/drivers/net/wireless/ti/wl1251/tx.c
+++ b/drivers/net/wireless/ti/wl1251/tx.c
@@ -161,8 +161,7 @@
return id;
fc = *(u16 *)skb->data;
- tx_hdr = (struct tx_double_buffer_desc *) skb_push(skb,
- sizeof(*tx_hdr));
+ tx_hdr = skb_push(skb, sizeof(*tx_hdr));
tx_hdr->length = cpu_to_le16(skb->len - sizeof(*tx_hdr));
rate = ieee80211_get_tx_rate(wl->hw, control);
diff --git a/drivers/net/wireless/ti/wlcore/cmd.c b/drivers/net/wireless/ti/wlcore/cmd.c
index 7f4da72..2bfc12f 100644
--- a/drivers/net/wireless/ti/wlcore/cmd.c
+++ b/drivers/net/wireless/ti/wlcore/cmd.c
@@ -1156,9 +1156,9 @@
goto out;
}
if (ie0_len)
- memcpy(skb_put(skb, ie0_len), ie0, ie0_len);
+ skb_put_data(skb, ie0, ie0_len);
if (ie1_len)
- memcpy(skb_put(skb, ie1_len), ie1, ie1_len);
+ skb_put_data(skb, ie1, ie1_len);
if (sched_scan &&
(wl->quirks & WLCORE_QUIRK_DUAL_PROBE_TMPL)) {
@@ -1233,8 +1233,7 @@
skb_reserve(skb, sizeof(*hdr) + WL1271_EXTRA_SPACE_MAX);
- tmpl = (struct wl12xx_arp_rsp_template *)skb_put(skb, sizeof(*tmpl));
- memset(tmpl, 0, sizeof(*tmpl));
+ tmpl = skb_put_zero(skb, sizeof(*tmpl));
/* llc layer */
memcpy(tmpl->llc_hdr, rfc1042_header, sizeof(rfc1042_header));
@@ -1283,7 +1282,7 @@
memset(skb_push(skb, sizeof(__le16)), 0, sizeof(__le16));
/* mac80211 header */
- hdr = (struct ieee80211_hdr_3addr *)skb_push(skb, sizeof(*hdr));
+ hdr = skb_push(skb, sizeof(*hdr));
memset(hdr, 0, sizeof(*hdr));
fc = IEEE80211_FTYPE_DATA | IEEE80211_FCTL_TODS;
if (wlvif->sta.qos)
diff --git a/drivers/net/wireless/ti/wlcore/debugfs.c b/drivers/net/wireless/ti/wlcore/debugfs.c
index de7e2a5..a2cb408 100644
--- a/drivers/net/wireless/ti/wlcore/debugfs.c
+++ b/drivers/net/wireless/ti/wlcore/debugfs.c
@@ -1149,15 +1149,9 @@
part.mem.start = *ppos;
part.mem.size = bytes;
- buf = kmalloc(bytes, GFP_KERNEL);
- if (!buf)
- return -ENOMEM;
-
- ret = copy_from_user(buf, user_buf, bytes);
- if (ret) {
- ret = -EFAULT;
- goto err_out;
- }
+ buf = memdup_user(user_buf, bytes);
+ if (IS_ERR(buf))
+ return PTR_ERR(buf);
mutex_lock(&wl->mutex);
@@ -1197,7 +1191,6 @@
if (ret == 0)
*ppos += bytes;
-err_out:
kfree(buf);
return ((ret == 0) ? bytes : ret);
diff --git a/drivers/net/wireless/ti/wlcore/main.c b/drivers/net/wireless/ti/wlcore/main.c
index 382ec15..60aaa85 100644
--- a/drivers/net/wireless/ti/wlcore/main.c
+++ b/drivers/net/wireless/ti/wlcore/main.c
@@ -1308,13 +1308,12 @@
skb_reserve(skb, sizeof(struct wl1271_tx_hw_descr));
- hdr = (struct ieee80211_hdr_3addr *) skb_put(skb, sizeof(*hdr));
- memset(hdr, 0, sizeof(*hdr));
+ hdr = skb_put_zero(skb, sizeof(*hdr));
hdr->frame_control = cpu_to_le16(IEEE80211_FTYPE_DATA |
IEEE80211_STYPE_NULLFUNC |
IEEE80211_FCTL_TODS);
- memset(skb_put(skb, dummy_packet_size), 0, dummy_packet_size);
+ skb_put_zero(skb, dummy_packet_size);
/* Dummy packets require the TID to be management */
skb->priority = WL1271_TID_MGMT;
diff --git a/drivers/net/wireless/ti/wlcore/rx.c b/drivers/net/wireless/ti/wlcore/rx.c
index 52a55f9..0f15696 100644
--- a/drivers/net/wireless/ti/wlcore/rx.c
+++ b/drivers/net/wireless/ti/wlcore/rx.c
@@ -117,7 +117,6 @@
struct wl1271_rx_descriptor *desc;
struct sk_buff *skb;
struct ieee80211_hdr *hdr;
- u8 *buf;
u8 beacon = 0;
u8 is_data = 0;
u8 reserved = 0, offset_to_data = 0;
@@ -174,15 +173,13 @@
/* reserve the unaligned payload(if any) */
skb_reserve(skb, reserved);
- buf = skb_put(skb, pkt_data_len);
-
/*
* Copy packets from aggregation buffer to the skbs without rx
* descriptor and with packet payload aligned care. In case of unaligned
* packets copy the packets in offset of 2 bytes guarantee IP header
* payload aligned to 4 bytes.
*/
- memcpy(buf, data + sizeof(*desc), pkt_data_len);
+ skb_put_data(skb, data + sizeof(*desc), pkt_data_len);
if (rx_align == WLCORE_RX_BUF_PADDED)
skb_pull(skb, RX_BUF_ALIGN);
diff --git a/drivers/net/wireless/ti/wlcore/spi.c b/drivers/net/wireless/ti/wlcore/spi.c
index f949ad2b..fa3547e 100644
--- a/drivers/net/wireless/ti/wlcore/spi.c
+++ b/drivers/net/wireless/ti/wlcore/spi.c
@@ -70,10 +70,10 @@
#define WSPI_MAX_CHUNK_SIZE 4092
/*
- * wl18xx driver aggregation buffer size is (13 * PAGE_SIZE) compared to
- * (4 * PAGE_SIZE) for wl12xx, so use the larger buffer needed for wl18xx
+ * wl18xx driver aggregation buffer size is (13 * 4K) compared to
+ * (4 * 4K) for wl12xx, so use the larger buffer needed for wl18xx
*/
-#define SPI_AGGR_BUFFER_SIZE (13 * PAGE_SIZE)
+#define SPI_AGGR_BUFFER_SIZE (13 * SZ_4K)
/* Maximum number of SPI write chunks */
#define WSPI_MAX_NUM_OF_CHUNKS \
diff --git a/drivers/net/wireless/ti/wlcore/tx.c b/drivers/net/wireless/ti/wlcore/tx.c
index c1b8e4e..a3f5e9c 100644
--- a/drivers/net/wireless/ti/wlcore/tx.c
+++ b/drivers/net/wireless/ti/wlcore/tx.c
@@ -223,8 +223,7 @@
total_blocks = wlcore_hw_calc_tx_blocks(wl, total_len, spare_blocks);
if (total_blocks <= wl->tx_blocks_available) {
- desc = (struct wl1271_tx_hw_descr *)skb_push(
- skb, total_len - skb->len);
+ desc = skb_push(skb, total_len - skb->len);
wlcore_hw_set_tx_desc_blocks(wl, desc, total_blocks,
spare_blocks);
diff --git a/drivers/net/wireless/zydas/zd1201.c b/drivers/net/wireless/zydas/zd1201.c
index de7ff39..7f586d7 100644
--- a/drivers/net/wireless/zydas/zd1201.c
+++ b/drivers/net/wireless/zydas/zd1201.c
@@ -326,13 +326,13 @@
if (!(skb = dev_alloc_skb(datalen+24)))
goto resubmit;
- memcpy(skb_put(skb, 2), &data[datalen-16], 2);
- memcpy(skb_put(skb, 2), &data[datalen-2], 2);
- memcpy(skb_put(skb, 6), &data[datalen-14], 6);
- memcpy(skb_put(skb, 6), &data[datalen-22], 6);
- memcpy(skb_put(skb, 6), &data[datalen-8], 6);
- memcpy(skb_put(skb, 2), &data[datalen-24], 2);
- memcpy(skb_put(skb, len), data, len);
+ skb_put_data(skb, &data[datalen - 16], 2);
+ skb_put_data(skb, &data[datalen - 2], 2);
+ skb_put_data(skb, &data[datalen - 14], 6);
+ skb_put_data(skb, &data[datalen - 22], 6);
+ skb_put_data(skb, &data[datalen - 8], 6);
+ skb_put_data(skb, &data[datalen - 24], 2);
+ skb_put_data(skb, data, len);
skb->protocol = eth_type_trans(skb, zd->dev);
zd->dev->stats.rx_packets++;
zd->dev->stats.rx_bytes += skb->len;
@@ -359,9 +359,9 @@
frag->skb = skb;
frag->seq = seq & IEEE80211_SCTL_SEQ;
skb_reserve(skb, 2);
- memcpy(skb_put(skb, 12), &data[datalen-14], 12);
- memcpy(skb_put(skb, 2), &data[6], 2);
- memcpy(skb_put(skb, len), data+8, len);
+ skb_put_data(skb, &data[datalen - 14], 12);
+ skb_put_data(skb, &data[6], 2);
+ skb_put_data(skb, data + 8, len);
hlist_add_head(&frag->fnode, &zd->fraglist);
goto resubmit;
}
@@ -385,9 +385,9 @@
if (!skb)
goto resubmit;
skb_reserve(skb, 2);
- memcpy(skb_put(skb, 12), &data[datalen-14], 12);
- memcpy(skb_put(skb, 2), &data[6], 2);
- memcpy(skb_put(skb, len), data+8, len);
+ skb_put_data(skb, &data[datalen - 14], 12);
+ skb_put_data(skb, &data[6], 2);
+ skb_put_data(skb, data + 8, len);
}
skb->protocol = eth_type_trans(skb, zd->dev);
zd->dev->stats.rx_packets++;
diff --git a/drivers/net/wireless/zydas/zd1211rw/zd_mac.c b/drivers/net/wireless/zydas/zd1211rw/zd_mac.c
index fe6517a6..b785742 100644
--- a/drivers/net/wireless/zydas/zd1211rw/zd_mac.c
+++ b/drivers/net/wireless/zydas/zd1211rw/zd_mac.c
@@ -868,8 +868,7 @@
unsigned int frag_len = skb->len + FCS_LEN;
unsigned int packet_length;
struct ieee80211_rate *txrate;
- struct zd_ctrlset *cs = (struct zd_ctrlset *)
- skb_push(skb, sizeof(struct zd_ctrlset));
+ struct zd_ctrlset *cs = skb_push(skb, sizeof(struct zd_ctrlset));
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
ZD_ASSERT(frag_len <= 0xffff);
@@ -1103,7 +1102,7 @@
}
/* FIXME : could we avoid this big memcpy ? */
- memcpy(skb_put(skb, length), buffer, length);
+ skb_put_data(skb, buffer, length);
memcpy(IEEE80211_SKB_RXCB(skb), &stats, sizeof(stats));
ieee80211_rx_irqsafe(hw, skb);
diff --git a/drivers/nfc/fdp/fdp.c b/drivers/nfc/fdp/fdp.c
index 7c1eaea..badd816 100644
--- a/drivers/nfc/fdp/fdp.c
+++ b/drivers/nfc/fdp/fdp.c
@@ -228,8 +228,7 @@
skb_reserve(skb, NCI_CTRL_HDR_SIZE);
- memcpy(skb_put(skb, payload_size), fw->data + (fw->size - len),
- payload_size);
+ skb_put_data(skb, fw->data + (fw->size - len), payload_size);
rc = nci_send_data(ndev, conn_id, skb);
diff --git a/drivers/nfc/fdp/i2c.c b/drivers/nfc/fdp/i2c.c
index 712936f..e0baec8 100644
--- a/drivers/nfc/fdp/i2c.c
+++ b/drivers/nfc/fdp/i2c.c
@@ -79,14 +79,14 @@
/* Add length header */
len = skb->len;
- *skb_push(skb, 1) = len & 0xff;
- *skb_push(skb, 1) = len >> 8;
+ *(u8 *)skb_push(skb, 1) = len & 0xff;
+ *(u8 *)skb_push(skb, 1) = len >> 8;
/* Compute and add lrc */
for (i = 0; i < len + 2; i++)
lrc ^= skb->data[i];
- *skb_put(skb, 1) = lrc;
+ skb_put_u8(skb, lrc);
}
static void fdp_nci_i2c_remove_len_lrc(struct sk_buff *skb)
@@ -186,7 +186,7 @@
goto flush;
}
- memcpy(skb_put(*skb, len), tmp, len);
+ skb_put_data(*skb, tmp, len);
fdp_nci_i2c_dump_skb(&client->dev, "fdp_rd", *skb);
fdp_nci_i2c_remove_len_lrc(*skb);
diff --git a/drivers/nfc/microread/i2c.c b/drivers/nfc/microread/i2c.c
index e0e8afd..b668b7b 100644
--- a/drivers/nfc/microread/i2c.c
+++ b/drivers/nfc/microread/i2c.c
@@ -70,12 +70,12 @@
int len;
len = skb->len;
- *skb_push(skb, 1) = len;
+ *(u8 *)skb_push(skb, 1) = len;
for (i = 0; i < skb->len; i++)
crc = crc ^ skb->data[i];
- *skb_put(skb, 1) = crc;
+ skb_put_u8(skb, crc);
}
static void microread_i2c_remove_len_crc(struct sk_buff *skb)
@@ -173,7 +173,7 @@
goto flush;
}
- *skb_put(*skb, 1) = len;
+ skb_put_u8(*skb, len);
r = i2c_master_recv(client, skb_put(*skb, len), len);
if (r != len) {
diff --git a/drivers/nfc/microread/microread.c b/drivers/nfc/microread/microread.c
index f454dc6..e5d5d2d 100644
--- a/drivers/nfc/microread/microread.c
+++ b/drivers/nfc/microread/microread.c
@@ -419,7 +419,7 @@
pr_info("data exchange to gate 0x%x\n", target->hci_reader_gate);
if (target->hci_reader_gate == MICROREAD_GATE_ID_P2P_INITIATOR) {
- *skb_push(skb, 1) = 0;
+ *(u8 *)skb_push(skb, 1) = 0;
return nfc_hci_send_event(hdev, target->hci_reader_gate,
MICROREAD_EVT_P2P_INITIATOR_EXCHANGE_TO_RF,
@@ -441,8 +441,8 @@
crc = crc_ccitt(0xffff, skb->data, skb->len);
crc = ~crc;
- *skb_put(skb, 1) = crc & 0xff;
- *skb_put(skb, 1) = crc >> 8;
+ skb_put_u8(skb, crc & 0xff);
+ skb_put_u8(skb, crc >> 8);
break;
case MICROREAD_GATE_ID_MREAD_NFC_T3:
control_bits = 0xDB;
@@ -453,7 +453,7 @@
return 1;
}
- *skb_push(skb, 1) = control_bits;
+ *(u8 *)skb_push(skb, 1) = control_bits;
info->async_cb_type = MICROREAD_CB_TYPE_READER_ALL;
info->async_cb = cb;
diff --git a/drivers/nfc/nfcmrvl/fw_dnld.c b/drivers/nfc/nfcmrvl/fw_dnld.c
index c38bdd6..f9f000c 100644
--- a/drivers/nfc/nfcmrvl/fw_dnld.c
+++ b/drivers/nfc/nfcmrvl/fw_dnld.c
@@ -92,7 +92,7 @@
return NULL;
}
- hdr = (struct nci_data_hdr *) skb_put(skb, NCI_DATA_HDR_SIZE);
+ hdr = skb_put(skb, NCI_DATA_HDR_SIZE);
hdr->conn_id = NCI_CORE_LC_CONNID_PROP_FW_DL;
hdr->rfu = 0;
hdr->plen = plen;
@@ -292,7 +292,7 @@
out_skb = alloc_lc_skb(priv, 1);
if (!out_skb)
return -ENOMEM;
- *skb_put(out_skb, 1) = 0xBF;
+ skb_put_u8(out_skb, 0xBF);
nci_send_frame(priv->ndev, out_skb);
priv->fw_dnld.substate = SUBSTATE_WAIT_NACK_CREDIT;
return 0;
@@ -301,7 +301,7 @@
out_skb = alloc_lc_skb(priv, 1);
if (!out_skb)
return -ENOMEM;
- *skb_put(out_skb, 1) = HELPER_ACK_PACKET_FORMAT;
+ skb_put_u8(out_skb, HELPER_ACK_PACKET_FORMAT);
nci_send_frame(priv->ndev, out_skb);
priv->fw_dnld.substate = SUBSTATE_WAIT_ACK_CREDIT;
break;
@@ -324,10 +324,9 @@
out_skb = alloc_lc_skb(priv, priv->fw_dnld.chunk_len);
if (!out_skb)
return -ENOMEM;
- memcpy(skb_put(out_skb, priv->fw_dnld.chunk_len),
- ((uint8_t *)priv->fw_dnld.fw->data) +
- priv->fw_dnld.offset,
- priv->fw_dnld.chunk_len);
+ skb_put_data(out_skb,
+ ((uint8_t *)priv->fw_dnld.fw->data) + priv->fw_dnld.offset,
+ priv->fw_dnld.chunk_len);
nci_send_frame(priv->ndev, out_skb);
priv->fw_dnld.substate = SUBSTATE_WAIT_DATA_CREDIT;
}
diff --git a/drivers/nfc/nfcmrvl/i2c.c b/drivers/nfc/nfcmrvl/i2c.c
index 78b7aa8..ffec103 100644
--- a/drivers/nfc/nfcmrvl/i2c.c
+++ b/drivers/nfc/nfcmrvl/i2c.c
@@ -60,7 +60,7 @@
return -ENOMEM;
/* Copy NCI header into the SKB */
- memcpy(skb_put(*skb, NCI_CTRL_HDR_SIZE), &nci_hdr, NCI_CTRL_HDR_SIZE);
+ skb_put_data(*skb, &nci_hdr, NCI_CTRL_HDR_SIZE);
if (nci_hdr.plen) {
/* Read the NCI payload */
diff --git a/drivers/nfc/nfcmrvl/main.c b/drivers/nfc/nfcmrvl/main.c
index 51c8240..c5038e6 100644
--- a/drivers/nfc/nfcmrvl/main.c
+++ b/drivers/nfc/nfcmrvl/main.c
@@ -68,7 +68,7 @@
unsigned char *hdr;
unsigned char len = skb->len;
- hdr = (char *) skb_push(skb, NFCMRVL_HCI_EVENT_HEADER_SIZE);
+ hdr = skb_push(skb, NFCMRVL_HCI_EVENT_HEADER_SIZE);
hdr[0] = NFCMRVL_HCI_COMMAND_CODE;
hdr[1] = NFCMRVL_HCI_OGF;
hdr[2] = NFCMRVL_HCI_OCF;
diff --git a/drivers/nfc/nfcmrvl/usb.c b/drivers/nfc/nfcmrvl/usb.c
index 585a0f2..699aa9d 100644
--- a/drivers/nfc/nfcmrvl/usb.c
+++ b/drivers/nfc/nfcmrvl/usb.c
@@ -83,8 +83,8 @@
if (!skb) {
nfc_err(&drv_data->udev->dev, "failed to alloc mem\n");
} else {
- memcpy(skb_put(skb, urb->actual_length),
- urb->transfer_buffer, urb->actual_length);
+ skb_put_data(skb, urb->transfer_buffer,
+ urb->actual_length);
if (nfcmrvl_nci_recv_frame(drv_data->priv, skb) < 0)
nfc_err(&drv_data->udev->dev,
"corrupted Rx packet\n");
diff --git a/drivers/nfc/nxp-nci/firmware.c b/drivers/nfc/nxp-nci/firmware.c
index 553011f..e50c6f6 100644
--- a/drivers/nfc/nxp-nci/firmware.c
+++ b/drivers/nfc/nxp-nci/firmware.c
@@ -124,8 +124,7 @@
header |= chunk_len & NXP_NCI_FW_FRAME_LEN_MASK;
put_unaligned_be16(header, skb_put(skb, NXP_NCI_FW_HDR_LEN));
- memcpy(skb_put(skb, chunk_len), fw_info->data + fw_info->written,
- chunk_len);
+ skb_put_data(skb, fw_info->data + fw_info->written, chunk_len);
crc = nxp_nci_fw_crc(skb->data, chunk_len + NXP_NCI_FW_HDR_LEN);
put_unaligned_be16(crc, skb_put(skb, NXP_NCI_FW_CRC_LEN));
@@ -312,8 +311,7 @@
if (nxp_nci_fw_check_crc(skb) != 0x00)
fw_info->cmd_result = -EBADMSG;
else
- fw_info->cmd_result = nxp_nci_fw_read_status(
- *skb_pull(skb, NXP_NCI_FW_HDR_LEN));
+ fw_info->cmd_result = nxp_nci_fw_read_status(*(u8 *)skb_pull(skb, NXP_NCI_FW_HDR_LEN));
kfree_skb(skb);
} else {
fw_info->cmd_result = -EIO;
diff --git a/drivers/nfc/nxp-nci/i2c.c b/drivers/nfc/nxp-nci/i2c.c
index ff22d76..198585b 100644
--- a/drivers/nfc/nxp-nci/i2c.c
+++ b/drivers/nfc/nxp-nci/i2c.c
@@ -135,7 +135,7 @@
goto fw_read_exit;
}
- memcpy(skb_put(*skb, NXP_NCI_FW_HDR_LEN), &header, NXP_NCI_FW_HDR_LEN);
+ skb_put_data(*skb, &header, NXP_NCI_FW_HDR_LEN);
r = i2c_master_recv(client, skb_put(*skb, frame_len), frame_len);
if (r != frame_len) {
@@ -176,8 +176,7 @@
goto nci_read_exit;
}
- memcpy(skb_put(*skb, NCI_CTRL_HDR_SIZE), (void *) &header,
- NCI_CTRL_HDR_SIZE);
+ skb_put_data(*skb, (void *)&header, NCI_CTRL_HDR_SIZE);
r = i2c_master_recv(client, skb_put(*skb, header.plen), header.plen);
if (r != header.plen) {
diff --git a/drivers/nfc/pn533/pn533.c b/drivers/nfc/pn533/pn533.c
index 65bbaa5..c05cb63 100644
--- a/drivers/nfc/pn533/pn533.c
+++ b/drivers/nfc/pn533/pn533.c
@@ -1006,7 +1006,7 @@
static struct sk_buff *pn533_alloc_poll_tg_frame(struct pn533 *dev)
{
struct sk_buff *skb;
- u8 *felica, *nfcid3, *gb;
+ u8 *felica, *nfcid3;
u8 *gbytes = dev->gb;
size_t gbytes_len = dev->gb_len;
@@ -1032,29 +1032,26 @@
return NULL;
/* DEP support only */
- *skb_put(skb, 1) = PN533_INIT_TARGET_DEP;
+ skb_put_u8(skb, PN533_INIT_TARGET_DEP);
/* MIFARE params */
- memcpy(skb_put(skb, 6), mifare_params, 6);
+ skb_put_data(skb, mifare_params, 6);
/* Felica params */
- felica = skb_put(skb, 18);
- memcpy(felica, felica_params, 18);
+ felica = skb_put_data(skb, felica_params, 18);
get_random_bytes(felica + 2, 6);
/* NFCID3 */
- nfcid3 = skb_put(skb, 10);
- memset(nfcid3, 0, 10);
+ nfcid3 = skb_put_zero(skb, 10);
memcpy(nfcid3, felica, 8);
/* General bytes */
- *skb_put(skb, 1) = gbytes_len;
+ skb_put_u8(skb, gbytes_len);
- gb = skb_put(skb, gbytes_len);
- memcpy(gb, gbytes, gbytes_len);
+ skb_put_data(skb, gbytes, gbytes_len);
/* Len Tk */
- *skb_put(skb, 1) = 0;
+ skb_put_u8(skb, 0);
return skb;
}
@@ -1283,8 +1280,8 @@
if (!skb)
return;
- *skb_put(skb, 1) = PN533_CFGITEM_RF_FIELD;
- *skb_put(skb, 1) = PN533_CFGITEM_RF_FIELD_AUTO_RFCA;
+ skb_put_u8(skb, PN533_CFGITEM_RF_FIELD);
+ skb_put_u8(skb, PN533_CFGITEM_RF_FIELD_AUTO_RFCA);
rc = pn533_send_cmd_async(dev, PN533_CMD_RF_CONFIGURATION, skb,
pn533_rf_complete, NULL);
@@ -1378,22 +1375,21 @@
if (!skb)
return -ENOMEM;
- *skb_put(skb, 1) = 0x01; /* Active */
- *skb_put(skb, 1) = 0x02; /* 424 kbps */
+ skb_put_u8(skb, 0x01); /* Active */
+ skb_put_u8(skb, 0x02); /* 424 kbps */
next = skb_put(skb, 1); /* Next */
*next = 0;
/* Copy passive data */
- memcpy(skb_put(skb, PASSIVE_DATA_LEN), passive_data, PASSIVE_DATA_LEN);
+ skb_put_data(skb, passive_data, PASSIVE_DATA_LEN);
*next |= 1;
/* Copy NFCID3 (which is NFCID2 from SENSF_RES) */
- memcpy(skb_put(skb, NFC_NFCID3_MAXSIZE), nfcid3,
- NFC_NFCID3_MAXSIZE);
+ skb_put_data(skb, nfcid3, NFC_NFCID3_MAXSIZE);
*next |= 2;
- memcpy(skb_put(skb, dev->gb_len), dev->gb, dev->gb_len);
+ skb_put_data(skb, dev->gb, dev->gb_len);
*next |= 4; /* We have some Gi */
rc = pn533_send_cmd_async(dev, PN533_CMD_IN_JUMP_FOR_DEP, skb,
@@ -1473,7 +1469,7 @@
if (!skb)
return NULL;
- memcpy(skb_put(skb, mod->len), &mod->data, mod->len);
+ skb_put_data(skb, &mod->data, mod->len);
return skb;
}
@@ -1624,8 +1620,8 @@
if (!skb)
return -ENOMEM;
- *skb_put(skb, sizeof(u8)) = 1; /* TG */
- *skb_put(skb, sizeof(u8)) = 0; /* Next */
+ skb_put_u8(skb, 1); /* TG */
+ skb_put_u8(skb, 0); /* Next */
resp = pn533_send_cmd_sync(dev, PN533_CMD_IN_ATR, skb);
if (IS_ERR(resp))
@@ -1741,7 +1737,7 @@
if (!skb)
return;
- *skb_put(skb, 1) = 1; /* TG*/
+ skb_put_u8(skb, 1); /* TG*/
rc = pn533_send_cmd_async(dev, PN533_CMD_IN_RELEASE, skb,
pn533_deactivate_target_complete, NULL);
@@ -1852,14 +1848,14 @@
if (!skb)
return -ENOMEM;
- *skb_put(skb, 1) = !comm_mode; /* ActPass */
- *skb_put(skb, 1) = 0x02; /* 424 kbps */
+ skb_put_u8(skb, !comm_mode); /* ActPass */
+ skb_put_u8(skb, 0x02); /* 424 kbps */
next = skb_put(skb, 1); /* Next */
*next = 0;
/* Copy passive data */
- memcpy(skb_put(skb, PASSIVE_DATA_LEN), passive_data, PASSIVE_DATA_LEN);
+ skb_put_data(skb, passive_data, PASSIVE_DATA_LEN);
*next |= 1;
/* Copy NFCID3 (which is NFCID2 from SENSF_RES) */
@@ -1867,12 +1863,11 @@
memcpy(skb_put(skb, NFC_NFCID3_MAXSIZE), target->nfcid2,
target->nfcid2_len);
else
- memcpy(skb_put(skb, NFC_NFCID3_MAXSIZE), nfcid3,
- NFC_NFCID3_MAXSIZE);
+ skb_put_data(skb, nfcid3, NFC_NFCID3_MAXSIZE);
*next |= 2;
if (gb != NULL && gb_len > 0) {
- memcpy(skb_put(skb, gb_len), gb, gb_len);
+ skb_put_data(skb, gb, gb_len);
*next |= 4; /* We have some Gi */
} else {
*next = 0;
@@ -2095,13 +2090,13 @@
/* MI + TG */
if (frag_size == PN533_CMD_DATAFRAME_MAXLEN)
- *skb_push(frag, sizeof(u8)) =
- (PN533_CMD_MI_MASK | 1);
+ *(u8 *)skb_push(frag, sizeof(u8)) =
+ (PN533_CMD_MI_MASK | 1);
else
- *skb_push(frag, sizeof(u8)) = 1; /* TG */
+ *(u8 *)skb_push(frag, sizeof(u8)) = 1; /* TG */
}
- memcpy(skb_put(frag, frag_size), skb->data, frag_size);
+ skb_put_data(frag, skb->data, frag_size);
/* Reduce the size of incoming buffer */
skb_pull(skb, frag_size);
@@ -2165,7 +2160,7 @@
goto error;
}
} else {
- *skb_push(skb, sizeof(u8)) = 1; /* TG */
+ *(u8 *)skb_push(skb, sizeof(u8)) = 1; /* TG */
}
rc = pn533_send_data_async(dev, PN533_CMD_IN_DATA_EXCHANGE,
@@ -2279,7 +2274,7 @@
break;
}
default:
- *skb_put(skb, sizeof(u8)) = 1; /*TG*/
+ skb_put_u8(skb, 1); /*TG*/
rc = pn533_send_cmd_direct_async(dev,
PN533_CMD_IN_DATA_EXCHANGE,
@@ -2375,8 +2370,8 @@
if (!skb)
return -ENOMEM;
- *skb_put(skb, sizeof(cfgitem)) = cfgitem;
- memcpy(skb_put(skb, cfgdata_len), cfgdata, cfgdata_len);
+ skb_put_u8(skb, cfgitem);
+ skb_put_data(skb, cfgdata, cfgdata_len);
resp = pn533_send_cmd_sync(dev, PN533_CMD_RF_CONFIGURATION, skb);
if (IS_ERR(resp))
@@ -2420,7 +2415,7 @@
if (!skb)
return -ENOMEM;
- *skb_put(skb, sizeof(u8)) = 0x1;
+ skb_put_u8(skb, 0x1);
resp = pn533_send_cmd_sync(dev, 0x18, skb);
if (IS_ERR(resp))
@@ -2459,7 +2454,7 @@
if (!skb)
return -ENOMEM;
- *skb_put(skb, 1) = 0x01;
+ skb_put_u8(skb, 0x01);
resp = pn533_send_cmd_sync(dev, PN533_CMD_SAM_CONFIGURATION, skb);
if (IS_ERR(resp))
diff --git a/drivers/nfc/pn533/usb.c b/drivers/nfc/pn533/usb.c
index 8ed203e..e153e8b 100644
--- a/drivers/nfc/pn533/usb.c
+++ b/drivers/nfc/pn533/usb.c
@@ -75,8 +75,8 @@
if (!skb) {
nfc_err(&phy->udev->dev, "failed to alloc memory\n");
} else {
- memcpy(skb_put(skb, urb->actual_length),
- urb->transfer_buffer, urb->actual_length);
+ skb_put_data(skb, urb->transfer_buffer,
+ urb->actual_length);
}
}
diff --git a/drivers/nfc/pn544/i2c.c b/drivers/nfc/pn544/i2c.c
index 71ac083..fedde9d 100644
--- a/drivers/nfc/pn544/i2c.c
+++ b/drivers/nfc/pn544/i2c.c
@@ -283,12 +283,12 @@
int len;
len = skb->len + 2;
- *skb_push(skb, 1) = len;
+ *(u8 *)skb_push(skb, 1) = len;
crc = crc_ccitt(0xffff, skb->data, skb->len);
crc = ~crc;
- *skb_put(skb, 1) = crc & 0xff;
- *skb_put(skb, 1) = crc >> 8;
+ skb_put_u8(skb, crc & 0xff);
+ skb_put_u8(skb, crc >> 8);
}
static void pn544_hci_i2c_remove_len_crc(struct sk_buff *skb)
@@ -391,7 +391,7 @@
goto flush;
}
- *skb_put(*skb, 1) = len;
+ skb_put_u8(*skb, len);
r = i2c_master_recv(client, skb_put(*skb, len), len);
if (r != len) {
diff --git a/drivers/nfc/pn544/pn544.c b/drivers/nfc/pn544/pn544.c
index 12e819d..70e898e 100644
--- a/drivers/nfc/pn544/pn544.c
+++ b/drivers/nfc/pn544/pn544.c
@@ -649,8 +649,8 @@
} else
return 1;
case PN544_RF_READER_F_GATE:
- *skb_push(skb, 1) = 0;
- *skb_push(skb, 1) = 0;
+ *(u8 *)skb_push(skb, 1) = 0;
+ *(u8 *)skb_push(skb, 1) = 0;
info->async_cb_type = PN544_CB_TYPE_READER_F;
info->async_cb = cb;
@@ -665,7 +665,7 @@
PN544_JEWEL_RAW_CMD, skb->data,
skb->len, cb, cb_context);
case PN544_RF_READER_NFCIP1_INITIATOR_GATE:
- *skb_push(skb, 1) = 0;
+ *(u8 *)skb_push(skb, 1) = 0;
return nfc_hci_send_event(hdev, target->hci_reader_gate,
PN544_HCI_EVT_SND_DATA, skb->data,
@@ -680,7 +680,7 @@
int r;
/* Set default false for multiple information chaining */
- *skb_push(skb, 1) = 0;
+ *(u8 *)skb_push(skb, 1) = 0;
r = nfc_hci_send_event(hdev, PN544_RF_READER_NFCIP1_TARGET_GATE,
PN544_HCI_EVT_SND_DATA, skb->data, skb->len);
diff --git a/drivers/nfc/port100.c b/drivers/nfc/port100.c
index 19be93e..bb43ceb 100644
--- a/drivers/nfc/port100.c
+++ b/drivers/nfc/port100.c
@@ -991,7 +991,7 @@
if (!skb)
return -ENOMEM;
- *skb_put(skb, sizeof(u8)) = command_type;
+ skb_put_u8(skb, command_type);
resp = port100_send_cmd_sync(dev, PORT100_CMD_SET_COMMAND_TYPE, skb);
if (IS_ERR(resp))
@@ -1059,7 +1059,7 @@
if (!skb)
return -ENOMEM;
- *skb_put(skb, 1) = on ? 1 : 0;
+ skb_put_u8(skb, on ? 1 : 0);
/* Cancel the last command if the device is being switched off */
if (!on)
@@ -1089,9 +1089,8 @@
if (!skb)
return -ENOMEM;
- memcpy(skb_put(skb, sizeof(struct port100_in_rf_setting)),
- &in_rf_settings[rf],
- sizeof(struct port100_in_rf_setting));
+ skb_put_data(skb, &in_rf_settings[rf],
+ sizeof(struct port100_in_rf_setting));
resp = port100_send_cmd_sync(dev, PORT100_CMD_IN_SET_RF, skb);
@@ -1133,7 +1132,7 @@
if (!skb)
return -ENOMEM;
- memcpy(skb_put(skb, size), protocols, size);
+ skb_put_data(skb, protocols, size);
resp = port100_send_cmd_sync(dev, PORT100_CMD_IN_SET_PROTOCOL, skb);
@@ -1247,9 +1246,8 @@
if (!skb)
return -ENOMEM;
- memcpy(skb_put(skb, sizeof(struct port100_tg_rf_setting)),
- &tg_rf_settings[rf],
- sizeof(struct port100_tg_rf_setting));
+ skb_put_data(skb, &tg_rf_settings[rf],
+ sizeof(struct port100_tg_rf_setting));
resp = port100_send_cmd_sync(dev, PORT100_CMD_TG_SET_RF, skb);
@@ -1291,7 +1289,7 @@
if (!skb)
return -ENOMEM;
- memcpy(skb_put(skb, size), protocols, size);
+ skb_put_data(skb, protocols, size);
resp = port100_send_cmd_sync(dev, PORT100_CMD_TG_SET_PROTOCOL, skb);
diff --git a/drivers/nfc/s3fwrn5/firmware.c b/drivers/nfc/s3fwrn5/firmware.c
index 5f97da1..38548bd 100644
--- a/drivers/nfc/s3fwrn5/firmware.c
+++ b/drivers/nfc/s3fwrn5/firmware.c
@@ -76,9 +76,9 @@
if (!skb)
return -ENOMEM;
- memcpy(skb_put(skb, S3FWRN5_FW_HDR_SIZE), &hdr, S3FWRN5_FW_HDR_SIZE);
+ skb_put_data(skb, &hdr, S3FWRN5_FW_HDR_SIZE);
if (len)
- memcpy(skb_put(skb, len), data, len);
+ skb_put_data(skb, data, len);
*msg = skb;
diff --git a/drivers/nfc/s3fwrn5/i2c.c b/drivers/nfc/s3fwrn5/i2c.c
index 3ed0adf..3f09d7f 100644
--- a/drivers/nfc/s3fwrn5/i2c.c
+++ b/drivers/nfc/s3fwrn5/i2c.c
@@ -157,7 +157,7 @@
if (!skb)
return -ENOMEM;
- memcpy(skb_put(skb, hdr_size), hdr, hdr_size);
+ skb_put_data(skb, hdr, hdr_size);
if (data_len == 0)
goto out;
diff --git a/drivers/nfc/st-nci/ndlc.c b/drivers/nfc/st-nci/ndlc.c
index 50880d7..9477994 100644
--- a/drivers/nfc/st-nci/ndlc.c
+++ b/drivers/nfc/st-nci/ndlc.c
@@ -87,7 +87,7 @@
u8 pcb = PCB_TYPE_DATAFRAME | PCB_DATAFRAME_RETRANSMIT_NO |
PCB_FRAME_CRC_INFO_NOTPRESENT;
- *skb_push(skb, 1) = pcb;
+ *(u8 *)skb_push(skb, 1) = pcb;
skb_queue_tail(&ndlc->send_q, skb);
schedule_work(&ndlc->sm_work);
diff --git a/drivers/nfc/st21nfca/core.c b/drivers/nfc/st21nfca/core.c
index 50be3b7..e803fdf 100644
--- a/drivers/nfc/st21nfca/core.c
+++ b/drivers/nfc/st21nfca/core.c
@@ -782,12 +782,12 @@
if (target->supported_protocols == NFC_PROTO_NFC_DEP_MASK)
return st21nfca_im_send_dep_req(hdev, skb);
- *skb_push(skb, 1) = 0x1a;
+ *(u8 *)skb_push(skb, 1) = 0x1a;
return nfc_hci_send_cmd_async(hdev, target->hci_reader_gate,
ST21NFCA_WR_XCHG_DATA, skb->data,
skb->len, cb, cb_context);
case ST21NFCA_RF_READER_14443_3_A_GATE:
- *skb_push(skb, 1) = 0x1a; /* CTR, see spec:10.2.2.1 */
+ *(u8 *)skb_push(skb, 1) = 0x1a; /* CTR, see spec:10.2.2.1 */
return nfc_hci_send_cmd_async(hdev, target->hci_reader_gate,
ST21NFCA_WR_XCHG_DATA, skb->data,
@@ -797,7 +797,7 @@
info->async_cb = cb;
info->async_cb_context = cb_context;
- *skb_push(skb, 1) = 0x17;
+ *(u8 *)skb_push(skb, 1) = 0x17;
return nfc_hci_send_cmd_async(hdev, target->hci_reader_gate,
ST21NFCA_WR_XCHG_DATA, skb->data,
diff --git a/drivers/nfc/st21nfca/dep.c b/drivers/nfc/st21nfca/dep.c
index 798a32b..fd08be2 100644
--- a/drivers/nfc/st21nfca/dep.c
+++ b/drivers/nfc/st21nfca/dep.c
@@ -315,10 +315,10 @@
int r;
struct st21nfca_hci_info *info = nfc_hci_get_clientdata(hdev);
- *skb_push(skb, 1) = info->dep_info.curr_nfc_dep_pni;
- *skb_push(skb, 1) = ST21NFCA_NFCIP1_DEP_RES;
- *skb_push(skb, 1) = ST21NFCA_NFCIP1_RES;
- *skb_push(skb, 1) = skb->len;
+ *(u8 *)skb_push(skb, 1) = info->dep_info.curr_nfc_dep_pni;
+ *(u8 *)skb_push(skb, 1) = ST21NFCA_NFCIP1_DEP_RES;
+ *(u8 *)skb_push(skb, 1) = ST21NFCA_NFCIP1_RES;
+ *(u8 *)skb_push(skb, 1) = skb->len;
r = nfc_hci_send_event(hdev, ST21NFCA_RF_CARD_F_GATE,
ST21NFCA_EVT_SEND_DATA, skb->data, skb->len);
@@ -466,7 +466,7 @@
psl_req->brs = (0x30 & bsi << 4) | (bri & 0x03);
psl_req->fsl = lri;
- *skb_push(skb, 1) = info->dep_info.to | 0x10;
+ *(u8 *)skb_push(skb, 1) = info->dep_info.to | 0x10;
st21nfca_im_send_pdu(info, skb);
}
@@ -564,11 +564,11 @@
atr_req->ppi = ST21NFCA_LR_BITS_PAYLOAD_SIZE_254B;
if (gb_len) {
atr_req->ppi |= ST21NFCA_GB_BIT;
- memcpy(skb_put(skb, gb_len), gb, gb_len);
+ skb_put_data(skb, gb, gb_len);
}
atr_req->length = sizeof(struct st21nfca_atr_req) + hdev->gb_len;
- *skb_push(skb, 1) = info->dep_info.to | 0x10; /* timeout */
+ *(u8 *)skb_push(skb, 1) = info->dep_info.to | 0x10; /* timeout */
info->async_cb_type = ST21NFCA_CB_TYPE_READER_F;
info->async_cb_context = info;
@@ -629,10 +629,10 @@
case ST21NFCA_NFC_DEP_PFB_SUPERVISOR_PDU:
pr_err("Received a SUPERVISOR PDU\n");
skb_pull(skb, size);
- *skb_push(skb, 1) = ST21NFCA_NFCIP1_DEP_REQ;
- *skb_push(skb, 1) = ST21NFCA_NFCIP1_REQ;
- *skb_push(skb, 1) = skb->len;
- *skb_push(skb, 1) = info->dep_info.to | 0x10;
+ *(u8 *)skb_push(skb, 1) = ST21NFCA_NFCIP1_DEP_REQ;
+ *(u8 *)skb_push(skb, 1) = ST21NFCA_NFCIP1_REQ;
+ *(u8 *)skb_push(skb, 1) = skb->len;
+ *(u8 *)skb_push(skb, 1) = info->dep_info.to | 0x10;
st21nfca_im_send_pdu(info, skb);
break;
@@ -655,12 +655,12 @@
info->async_cb_context = info;
info->async_cb = st21nfca_im_recv_dep_res_cb;
- *skb_push(skb, 1) = info->dep_info.curr_nfc_dep_pni;
- *skb_push(skb, 1) = ST21NFCA_NFCIP1_DEP_REQ;
- *skb_push(skb, 1) = ST21NFCA_NFCIP1_REQ;
- *skb_push(skb, 1) = skb->len;
+ *(u8 *)skb_push(skb, 1) = info->dep_info.curr_nfc_dep_pni;
+ *(u8 *)skb_push(skb, 1) = ST21NFCA_NFCIP1_DEP_REQ;
+ *(u8 *)skb_push(skb, 1) = ST21NFCA_NFCIP1_REQ;
+ *(u8 *)skb_push(skb, 1) = skb->len;
- *skb_push(skb, 1) = info->dep_info.to | 0x10;
+ *(u8 *)skb_push(skb, 1) = info->dep_info.to | 0x10;
return nfc_hci_send_cmd_async(hdev, ST21NFCA_RF_READER_F_GATE,
ST21NFCA_WR_XCHG_DATA,
diff --git a/drivers/nfc/st21nfca/i2c.c b/drivers/nfc/st21nfca/i2c.c
index 02a920c..4bff76b 100644
--- a/drivers/nfc/st21nfca/i2c.c
+++ b/drivers/nfc/st21nfca/i2c.c
@@ -171,16 +171,16 @@
u16 crc;
u8 tmp;
- *skb_push(skb, 1) = 0;
+ *(u8 *)skb_push(skb, 1) = 0;
crc = crc_ccitt(0xffff, skb->data, skb->len);
crc = ~crc;
tmp = crc & 0x00ff;
- *skb_put(skb, 1) = tmp;
+ skb_put_u8(skb, tmp);
tmp = (crc >> 8) & 0x00ff;
- *skb_put(skb, 1) = tmp;
+ skb_put_u8(skb, tmp);
}
static void st21nfca_hci_remove_len_crc(struct sk_buff *skb)
@@ -214,9 +214,9 @@
st21nfca_hci_add_len_crc(skb);
/* add ST21NFCA_SOF_EOF on tail */
- *skb_put(skb, 1) = ST21NFCA_SOF_EOF;
+ skb_put_u8(skb, ST21NFCA_SOF_EOF);
/* add ST21NFCA_SOF_EOF on head */
- *skb_push(skb, 1) = ST21NFCA_SOF_EOF;
+ *(u8 *)skb_push(skb, 1) = ST21NFCA_SOF_EOF;
/*
* Compute byte stuffing
@@ -407,7 +407,7 @@
phy->current_read_len = 0;
}
- memcpy(skb_put(skb, len), buf, len);
+ skb_put_data(skb, buf, len);
if (skb->data[skb->len - 1] == ST21NFCA_SOF_EOF) {
phy->current_read_len = 0;
diff --git a/drivers/nfc/st95hf/core.c b/drivers/nfc/st95hf/core.c
index c2840e4..2b26f76 100644
--- a/drivers/nfc/st95hf/core.c
+++ b/drivers/nfc/st95hf/core.c
@@ -949,7 +949,7 @@
switch (stcontext->current_rf_tech) {
case NFC_DIGITAL_RF_TECH_106A:
len_data_to_tag = skb->len + 1;
- *skb_put(skb, 1) = stcontext->sendrcv_trflag;
+ skb_put_u8(skb, stcontext->sendrcv_trflag);
break;
case NFC_DIGITAL_RF_TECH_106B:
case NFC_DIGITAL_RF_TECH_ISO15693:
diff --git a/drivers/of/of_mdio.c b/drivers/of/of_mdio.c
index 7e4c80f..e0dbd6e 100644
--- a/drivers/of/of_mdio.c
+++ b/drivers/of/of_mdio.c
@@ -119,28 +119,6 @@
child->name, addr);
}
-int of_mdio_parse_addr(struct device *dev, const struct device_node *np)
-{
- u32 addr;
- int ret;
-
- ret = of_property_read_u32(np, "reg", &addr);
- if (ret < 0) {
- dev_err(dev, "%s has invalid PHY address\n", np->full_name);
- return ret;
- }
-
- /* A PHY must have a reg property in the range [0-31] */
- if (addr >= PHY_MAX_ADDR) {
- dev_err(dev, "%s PHY address %i is too large\n",
- np->full_name, addr);
- return -EINVAL;
- }
-
- return addr;
-}
-EXPORT_SYMBOL(of_mdio_parse_addr);
-
/* The following is a list of PHY compatible strings which appear in
* some DTBs. The compatible string is never matched against a PHY
* driver, so is pointless. We only expect devices which are not PHYs
@@ -226,7 +204,6 @@
/* Get bus level PHY reset GPIO details */
mdio->reset_delay_us = DEFAULT_GPIO_RESET_DELAY;
of_property_read_u32(np, "reset-delay-us", &mdio->reset_delay_us);
- mdio->num_reset_gpios = of_gpio_named_count(np, "reset-gpios");
/* Register the MDIO bus */
rc = mdiobus_register(mdio);
diff --git a/drivers/ptp/Kconfig b/drivers/ptp/Kconfig
index 384f661..a21ad10 100644
--- a/drivers/ptp/Kconfig
+++ b/drivers/ptp/Kconfig
@@ -25,6 +25,22 @@
To compile this driver as a module, choose M here: the module
will be called ptp.
+config PTP_1588_CLOCK_DTE
+ tristate "Broadcom DTE as PTP clock"
+ depends on PTP_1588_CLOCK
+ depends on NET && HAS_IOMEM
+ depends on ARCH_BCM_MOBILE || (ARCH_BCM_IPROC && !(ARCH_BCM_NSP || ARCH_BCM_5301X)) || COMPILE_TEST
+ default y
+ help
+ This driver adds support for using the Digital timing engine
+ (DTE) in the Broadcom SoC's as a PTP clock.
+
+ The clock can be used in both wired and wireless networks
+ for PTP purposes.
+
+ To compile this driver as a module, choose M here: the module
+ will be called ptp_dte.
+
config PTP_1588_CLOCK_GIANFAR
tristate "Freescale eTSEC as PTP clock"
depends on GIANFAR
diff --git a/drivers/ptp/Makefile b/drivers/ptp/Makefile
index 5307361..d1f2fb1 100644
--- a/drivers/ptp/Makefile
+++ b/drivers/ptp/Makefile
@@ -4,6 +4,7 @@
ptp-y := ptp_clock.o ptp_chardev.o ptp_sysfs.o
obj-$(CONFIG_PTP_1588_CLOCK) += ptp.o
+obj-$(CONFIG_PTP_1588_CLOCK_DTE) += ptp_dte.o
obj-$(CONFIG_PTP_1588_CLOCK_IXP46X) += ptp_ixp46x.o
obj-$(CONFIG_PTP_1588_CLOCK_PCH) += ptp_pch.o
obj-$(CONFIG_PTP_1588_CLOCK_KVM) += ptp_kvm.o
diff --git a/drivers/ptp/ptp_dte.c b/drivers/ptp/ptp_dte.c
new file mode 100644
index 0000000..00145a3
--- /dev/null
+++ b/drivers/ptp/ptp_dte.c
@@ -0,0 +1,353 @@
+/*
+ * Copyright 2017 Broadcom
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation version 2.
+ *
+ * This program is distributed "as is" WITHOUT ANY WARRANTY of any
+ * kind, whether express or implied; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/ptp_clock_kernel.h>
+#include <linux/types.h>
+
+#define DTE_NCO_LOW_TIME_REG 0x00
+#define DTE_NCO_TIME_REG 0x04
+#define DTE_NCO_OVERFLOW_REG 0x08
+#define DTE_NCO_INC_REG 0x0c
+
+#define DTE_NCO_SUM2_MASK 0xffffffff
+#define DTE_NCO_SUM2_SHIFT 4ULL
+
+#define DTE_NCO_SUM3_MASK 0xff
+#define DTE_NCO_SUM3_SHIFT 36ULL
+#define DTE_NCO_SUM3_WR_SHIFT 8
+
+#define DTE_NCO_TS_WRAP_MASK 0xfff
+#define DTE_NCO_TS_WRAP_LSHIFT 32
+
+#define DTE_NCO_INC_DEFAULT 0x80000000
+#define DTE_NUM_REGS_TO_RESTORE 4
+
+/* Full wrap around is 44bits in ns (~4.887 hrs) */
+#define DTE_WRAP_AROUND_NSEC_SHIFT 44
+
+/* 44 bits NCO */
+#define DTE_NCO_MAX_NS 0xFFFFFFFFFFF
+
+/* 125MHz with 3.29 reg cfg */
+#define DTE_PPB_ADJ(ppb) (u32)(div64_u64((((u64)abs(ppb) * BIT(28)) +\
+ 62500000ULL), 125000000ULL))
+
+/* ptp dte priv structure */
+struct ptp_dte {
+ void __iomem *regs;
+ struct ptp_clock *ptp_clk;
+ struct ptp_clock_info caps;
+ struct device *dev;
+ u32 ts_ovf_last;
+ u32 ts_wrap_cnt;
+ spinlock_t lock;
+ u32 reg_val[DTE_NUM_REGS_TO_RESTORE];
+};
+
+static void dte_write_nco(void __iomem *regs, s64 ns)
+{
+ u32 sum2, sum3;
+
+ sum2 = (u32)((ns >> DTE_NCO_SUM2_SHIFT) & DTE_NCO_SUM2_MASK);
+ /* compensate for ignoring sum1 */
+ if (sum2 != DTE_NCO_SUM2_MASK)
+ sum2++;
+
+ /* to write sum3, bits [15:8] needs to be written */
+ sum3 = (u32)(((ns >> DTE_NCO_SUM3_SHIFT) & DTE_NCO_SUM3_MASK) <<
+ DTE_NCO_SUM3_WR_SHIFT);
+
+ writel(0, (regs + DTE_NCO_LOW_TIME_REG));
+ writel(sum2, (regs + DTE_NCO_TIME_REG));
+ writel(sum3, (regs + DTE_NCO_OVERFLOW_REG));
+}
+
+static s64 dte_read_nco(void __iomem *regs)
+{
+ u32 sum2, sum3;
+ s64 ns;
+
+ /*
+ * ignoring sum1 (4 bits) gives a 16ns resolution, which
+ * works due to the async register read.
+ */
+ sum3 = readl(regs + DTE_NCO_OVERFLOW_REG) & DTE_NCO_SUM3_MASK;
+ sum2 = readl(regs + DTE_NCO_TIME_REG);
+ ns = ((s64)sum3 << DTE_NCO_SUM3_SHIFT) |
+ ((s64)sum2 << DTE_NCO_SUM2_SHIFT);
+
+ return ns;
+}
+
+static void dte_write_nco_delta(struct ptp_dte *ptp_dte, s64 delta)
+{
+ s64 ns;
+
+ ns = dte_read_nco(ptp_dte->regs);
+
+ /* handle wraparound conditions */
+ if ((delta < 0) && (abs(delta) > ns)) {
+ if (ptp_dte->ts_wrap_cnt) {
+ ns += DTE_NCO_MAX_NS + delta;
+ ptp_dte->ts_wrap_cnt--;
+ } else {
+ ns = 0;
+ }
+ } else {
+ ns += delta;
+ if (ns > DTE_NCO_MAX_NS) {
+ ptp_dte->ts_wrap_cnt++;
+ ns -= DTE_NCO_MAX_NS;
+ }
+ }
+
+ dte_write_nco(ptp_dte->regs, ns);
+
+ ptp_dte->ts_ovf_last = (ns >> DTE_NCO_TS_WRAP_LSHIFT) &
+ DTE_NCO_TS_WRAP_MASK;
+}
+
+static s64 dte_read_nco_with_ovf(struct ptp_dte *ptp_dte)
+{
+ u32 ts_ovf;
+ s64 ns = 0;
+
+ ns = dte_read_nco(ptp_dte->regs);
+
+ /*Timestamp overflow: 8 LSB bits of sum3, 4 MSB bits of sum2 */
+ ts_ovf = (ns >> DTE_NCO_TS_WRAP_LSHIFT) & DTE_NCO_TS_WRAP_MASK;
+
+ /* Check for wrap around */
+ if (ts_ovf < ptp_dte->ts_ovf_last)
+ ptp_dte->ts_wrap_cnt++;
+
+ ptp_dte->ts_ovf_last = ts_ovf;
+
+ /* adjust for wraparounds */
+ ns += (s64)(BIT_ULL(DTE_WRAP_AROUND_NSEC_SHIFT) * ptp_dte->ts_wrap_cnt);
+
+ return ns;
+}
+
+static int ptp_dte_adjfreq(struct ptp_clock_info *ptp, s32 ppb)
+{
+ u32 nco_incr;
+ unsigned long flags;
+ struct ptp_dte *ptp_dte = container_of(ptp, struct ptp_dte, caps);
+
+ if (abs(ppb) > ptp_dte->caps.max_adj) {
+ dev_err(ptp_dte->dev, "ppb adj too big\n");
+ return -EINVAL;
+ }
+
+ if (ppb < 0)
+ nco_incr = DTE_NCO_INC_DEFAULT - DTE_PPB_ADJ(ppb);
+ else
+ nco_incr = DTE_NCO_INC_DEFAULT + DTE_PPB_ADJ(ppb);
+
+ spin_lock_irqsave(&ptp_dte->lock, flags);
+ writel(nco_incr, ptp_dte->regs + DTE_NCO_INC_REG);
+ spin_unlock_irqrestore(&ptp_dte->lock, flags);
+
+ return 0;
+}
+
+static int ptp_dte_adjtime(struct ptp_clock_info *ptp, s64 delta)
+{
+ unsigned long flags;
+ struct ptp_dte *ptp_dte = container_of(ptp, struct ptp_dte, caps);
+
+ spin_lock_irqsave(&ptp_dte->lock, flags);
+ dte_write_nco_delta(ptp_dte, delta);
+ spin_unlock_irqrestore(&ptp_dte->lock, flags);
+
+ return 0;
+}
+
+static int ptp_dte_gettime(struct ptp_clock_info *ptp, struct timespec64 *ts)
+{
+ unsigned long flags;
+ struct ptp_dte *ptp_dte = container_of(ptp, struct ptp_dte, caps);
+
+ spin_lock_irqsave(&ptp_dte->lock, flags);
+ *ts = ns_to_timespec64(dte_read_nco_with_ovf(ptp_dte));
+ spin_unlock_irqrestore(&ptp_dte->lock, flags);
+
+ return 0;
+}
+
+static int ptp_dte_settime(struct ptp_clock_info *ptp,
+ const struct timespec64 *ts)
+{
+ unsigned long flags;
+ struct ptp_dte *ptp_dte = container_of(ptp, struct ptp_dte, caps);
+
+ spin_lock_irqsave(&ptp_dte->lock, flags);
+
+ /* Disable nco increment */
+ writel(0, ptp_dte->regs + DTE_NCO_INC_REG);
+
+ dte_write_nco(ptp_dte->regs, timespec64_to_ns(ts));
+
+ /* reset overflow and wrap counter */
+ ptp_dte->ts_ovf_last = 0;
+ ptp_dte->ts_wrap_cnt = 0;
+
+ /* Enable nco increment */
+ writel(DTE_NCO_INC_DEFAULT, ptp_dte->regs + DTE_NCO_INC_REG);
+
+ spin_unlock_irqrestore(&ptp_dte->lock, flags);
+
+ return 0;
+}
+
+static int ptp_dte_enable(struct ptp_clock_info *ptp,
+ struct ptp_clock_request *rq, int on)
+{
+ return -EOPNOTSUPP;
+}
+
+static struct ptp_clock_info ptp_dte_caps = {
+ .owner = THIS_MODULE,
+ .name = "DTE PTP timer",
+ .max_adj = 50000000,
+ .n_ext_ts = 0,
+ .n_pins = 0,
+ .pps = 0,
+ .adjfreq = ptp_dte_adjfreq,
+ .adjtime = ptp_dte_adjtime,
+ .gettime64 = ptp_dte_gettime,
+ .settime64 = ptp_dte_settime,
+ .enable = ptp_dte_enable,
+};
+
+static int ptp_dte_probe(struct platform_device *pdev)
+{
+ struct ptp_dte *ptp_dte;
+ struct device *dev = &pdev->dev;
+ struct resource *res;
+
+ ptp_dte = devm_kzalloc(dev, sizeof(struct ptp_dte), GFP_KERNEL);
+ if (!ptp_dte)
+ return -ENOMEM;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ ptp_dte->regs = devm_ioremap_resource(dev, res);
+ if (IS_ERR(ptp_dte->regs)) {
+ dev_err(dev,
+ "%s: io remap failed\n", __func__);
+ return PTR_ERR(ptp_dte->regs);
+ }
+
+ spin_lock_init(&ptp_dte->lock);
+
+ ptp_dte->dev = dev;
+ ptp_dte->caps = ptp_dte_caps;
+ ptp_dte->ptp_clk = ptp_clock_register(&ptp_dte->caps, &pdev->dev);
+ if (IS_ERR(ptp_dte->ptp_clk)) {
+ dev_err(dev,
+ "%s: Failed to register ptp clock\n", __func__);
+ return PTR_ERR(ptp_dte->ptp_clk);
+ }
+
+ platform_set_drvdata(pdev, ptp_dte);
+
+ dev_info(dev, "ptp clk probe done\n");
+
+ return 0;
+}
+
+static int ptp_dte_remove(struct platform_device *pdev)
+{
+ struct ptp_dte *ptp_dte = platform_get_drvdata(pdev);
+ u8 i;
+
+ ptp_clock_unregister(ptp_dte->ptp_clk);
+
+ for (i = 0; i < DTE_NUM_REGS_TO_RESTORE; i++)
+ writel(0, ptp_dte->regs + (i * sizeof(u32)));
+
+ return 0;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int ptp_dte_suspend(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct ptp_dte *ptp_dte = platform_get_drvdata(pdev);
+ u8 i;
+
+ for (i = 0; i < DTE_NUM_REGS_TO_RESTORE; i++) {
+ ptp_dte->reg_val[i] =
+ readl(ptp_dte->regs + (i * sizeof(u32)));
+ }
+
+ /* disable the nco */
+ writel(0, ptp_dte->regs + DTE_NCO_INC_REG);
+
+ return 0;
+}
+
+static int ptp_dte_resume(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct ptp_dte *ptp_dte = platform_get_drvdata(pdev);
+ u8 i;
+
+ for (i = 0; i < DTE_NUM_REGS_TO_RESTORE; i++) {
+ if ((i * sizeof(u32)) != DTE_NCO_OVERFLOW_REG)
+ writel(ptp_dte->reg_val[i],
+ (ptp_dte->regs + (i * sizeof(u32))));
+ else
+ writel(((ptp_dte->reg_val[i] &
+ DTE_NCO_SUM3_MASK) << DTE_NCO_SUM3_WR_SHIFT),
+ (ptp_dte->regs + (i * sizeof(u32))));
+ }
+
+ return 0;
+}
+
+static const struct dev_pm_ops ptp_dte_pm_ops = {
+ .suspend = ptp_dte_suspend,
+ .resume = ptp_dte_resume
+};
+
+#define PTP_DTE_PM_OPS (&ptp_dte_pm_ops)
+#else
+#define PTP_DTE_PM_OPS NULL
+#endif
+
+static const struct of_device_id ptp_dte_of_match[] = {
+ { .compatible = "brcm,ptp-dte", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, ptp_dte_of_match);
+
+static struct platform_driver ptp_dte_driver = {
+ .driver = {
+ .name = "ptp-dte",
+ .pm = PTP_DTE_PM_OPS,
+ .of_match_table = ptp_dte_of_match,
+ },
+ .probe = ptp_dte_probe,
+ .remove = ptp_dte_remove,
+};
+module_platform_driver(ptp_dte_driver);
+
+MODULE_AUTHOR("Broadcom");
+MODULE_DESCRIPTION("Broadcom DTE PTP Clock driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/rpmsg/rpmsg_char.c b/drivers/rpmsg/rpmsg_char.c
index 0ca2ccc..2576284 100644
--- a/drivers/rpmsg/rpmsg_char.c
+++ b/drivers/rpmsg/rpmsg_char.c
@@ -116,7 +116,7 @@
if (!skb)
return -ENOMEM;
- memcpy(skb_put(skb, len), buf, len);
+ skb_put_data(skb, buf, len);
spin_lock(&eptdev->queue_lock);
skb_queue_tail(&eptdev->queue, skb);
diff --git a/drivers/s390/net/ctcm_fsms.c b/drivers/s390/net/ctcm_fsms.c
index 730d961..e9847ce 100644
--- a/drivers/s390/net/ctcm_fsms.c
+++ b/drivers/s390/net/ctcm_fsms.c
@@ -1279,7 +1279,7 @@
__func__, data_space);
while ((skb = skb_dequeue(&ch->collect_queue))) {
- memcpy(skb_put(ch->trans_skb, skb->len), skb->data, skb->len);
+ skb_put_data(ch->trans_skb, skb->data, skb->len);
p_header = (struct pdu *)
(skb_tail_pointer(ch->trans_skb) - skb->len);
p_header->pdu_flag = 0x00;
@@ -1431,13 +1431,12 @@
break;
case MPCG_STATE_FLOWC:
case MPCG_STATE_READY:
- memcpy(skb_put(new_skb, block_len),
- skb->data, block_len);
+ skb_put_data(new_skb, skb->data, block_len);
skb_queue_tail(&ch->io_queue, new_skb);
tasklet_schedule(&ch->ch_tasklet);
break;
default:
- memcpy(skb_put(new_skb, len), skb->data, len);
+ skb_put_data(new_skb, skb->data, len);
skb_queue_tail(&ch->io_queue, new_skb);
tasklet_hi_schedule(&ch->ch_tasklet);
break;
diff --git a/drivers/s390/net/ctcm_main.c b/drivers/s390/net/ctcm_main.c
index 198842c..9912135 100644
--- a/drivers/s390/net/ctcm_main.c
+++ b/drivers/s390/net/ctcm_main.c
@@ -522,7 +522,7 @@
ctcm_clear_busy(ch->netdev);
return -ENOMEM;
} else {
- memcpy(skb_put(nskb, skb->len), skb->data, skb->len);
+ skb_put_data(nskb, skb->data, skb->len);
atomic_inc(&nskb->users);
atomic_dec(&skb->users);
dev_kfree_skb_irq(skb);
@@ -638,7 +638,7 @@
header->th.th_seq_num = 0x00;
header->sw.th_last_seq = ch->th_seq_num;
- memcpy(skb_put(sweep_skb, TH_SWEEP_LENGTH), header, TH_SWEEP_LENGTH);
+ skb_put_data(sweep_skb, header, TH_SWEEP_LENGTH);
kfree(header);
@@ -728,7 +728,7 @@
if (!nskb) {
goto nomem_exit;
} else {
- memcpy(skb_put(nskb, skb->len), skb->data, skb->len);
+ skb_put_data(nskb, skb->data, skb->len);
atomic_inc(&nskb->users);
atomic_dec(&skb->users);
dev_kfree_skb_irq(skb);
@@ -809,7 +809,7 @@
skb_reset_tail_pointer(ch->trans_skb);
ch->trans_skb->len = 0;
ch->ccw[1].count = skb->len;
- memcpy(skb_put(ch->trans_skb, skb->len), skb->data, skb->len);
+ skb_put_data(ch->trans_skb, skb->data, skb->len);
atomic_dec(&skb->users);
dev_kfree_skb_irq(skb);
ccw_idx = 0;
@@ -960,7 +960,7 @@
}
newskb->protocol = skb->protocol;
skb_reserve(newskb, TH_HEADER_LENGTH + PDU_HEADER_LENGTH);
- memcpy(skb_put(newskb, skb->len), skb->data, skb->len);
+ skb_put_data(newskb, skb->data, skb->len);
dev_kfree_skb_any(skb);
skb = newskb;
}
diff --git a/drivers/s390/net/ctcm_mpc.c b/drivers/s390/net/ctcm_mpc.c
index c103fc7..f8be396 100644
--- a/drivers/s390/net/ctcm_mpc.c
+++ b/drivers/s390/net/ctcm_mpc.c
@@ -667,7 +667,7 @@
header->th.th_seq_num = 0x00;
header->sw.th_last_seq = ch->th_seq_num;
- memcpy(skb_put(sweep_skb, TH_SWEEP_LENGTH), header, TH_SWEEP_LENGTH);
+ skb_put_data(sweep_skb, header, TH_SWEEP_LENGTH);
kfree(header);
@@ -974,9 +974,8 @@
skb_reset_tail_pointer(ch->xid_skb);
ch->xid_skb->len = 0;
- memcpy(skb_put(ch->xid_skb, grp->xid_skb->len),
- grp->xid_skb->data,
- grp->xid_skb->len);
+ skb_put_data(ch->xid_skb, grp->xid_skb->data,
+ grp->xid_skb->len);
ch->xid->xid2_dlc_type =
((CHANNEL_DIRECTION(ch->flags) == CTCM_READ)
@@ -1149,7 +1148,7 @@
fsm_event(grp->fsm, MPCG_EVENT_INOP, dev);
goto done;
}
- memcpy(skb_put(skb, new_len), pskb->data, new_len);
+ skb_put_data(skb, pskb->data, new_len);
skb_reset_mac_header(skb);
skb->dev = pskb->dev;
@@ -1297,16 +1296,15 @@
/* base xid for all channels in group */
grp->xid_skb_data = grp->xid_skb->data;
grp->xid_th = (struct th_header *)grp->xid_skb->data;
- memcpy(skb_put(grp->xid_skb, TH_HEADER_LENGTH),
- &thnorm, TH_HEADER_LENGTH);
+ skb_put_data(grp->xid_skb, &thnorm, TH_HEADER_LENGTH);
grp->xid = (struct xid2 *)skb_tail_pointer(grp->xid_skb);
- memcpy(skb_put(grp->xid_skb, XID2_LENGTH), &init_xid, XID2_LENGTH);
+ skb_put_data(grp->xid_skb, &init_xid, XID2_LENGTH);
grp->xid->xid2_adj_id = jiffies | 0xfff00000;
grp->xid->xid2_sender_id = jiffies;
grp->xid_id = skb_tail_pointer(grp->xid_skb);
- memcpy(skb_put(grp->xid_skb, 4), "VTAM", 4);
+ skb_put_data(grp->xid_skb, "VTAM", 4);
grp->rcvd_xid_skb =
__dev_alloc_skb(MPC_BUFSIZE_DEFAULT, GFP_ATOMIC|GFP_DMA);
@@ -1318,8 +1316,7 @@
}
grp->rcvd_xid_data = grp->rcvd_xid_skb->data;
grp->rcvd_xid_th = (struct th_header *)grp->rcvd_xid_skb->data;
- memcpy(skb_put(grp->rcvd_xid_skb, TH_HEADER_LENGTH),
- &thnorm, TH_HEADER_LENGTH);
+ skb_put_data(grp->rcvd_xid_skb, &thnorm, TH_HEADER_LENGTH);
grp->saved_xid2 = NULL;
priv->xid = grp->xid;
priv->mpcg = grp;
@@ -1410,8 +1407,7 @@
skb_reset_tail_pointer(grp->rcvd_xid_skb);
grp->rcvd_xid_skb->len = 0;
grp->rcvd_xid_th = (struct th_header *)grp->rcvd_xid_skb->data;
- memcpy(skb_put(grp->rcvd_xid_skb, TH_HEADER_LENGTH), &thnorm,
- TH_HEADER_LENGTH);
+ skb_put_data(grp->rcvd_xid_skb, &thnorm, TH_HEADER_LENGTH);
if (grp->send_qllc_disc == 1) {
grp->send_qllc_disc = 0;
@@ -1590,8 +1586,7 @@
grp->saved_xid2 =
(struct xid2 *)skb_tail_pointer(grp->rcvd_xid_skb);
- memcpy(skb_put(grp->rcvd_xid_skb,
- XID2_LENGTH), xid, XID2_LENGTH);
+ skb_put_data(grp->rcvd_xid_skb, xid, XID2_LENGTH);
grp->rcvd_xid_skb->data = grp->rcvd_xid_data;
skb_reset_tail_pointer(grp->rcvd_xid_skb);
@@ -1908,17 +1903,15 @@
if (fsm_getstate(ch->fsm) == CH_XID7_PENDING1) {
fsm_newstate(ch->fsm, CH_XID7_PENDING2);
ch->ccw[8].cmd_code = CCW_CMD_SENSE_CMD;
- memcpy(skb_put(ch->xid_skb,
- TH_HEADER_LENGTH),
- &thdummy, TH_HEADER_LENGTH);
+ skb_put_data(ch->xid_skb, &thdummy,
+ TH_HEADER_LENGTH);
send = 1;
}
} else if (fsm_getstate(ch->fsm) < CH_XID7_PENDING2) {
fsm_newstate(ch->fsm, CH_XID7_PENDING2);
ch->ccw[8].cmd_code = CCW_CMD_WRITE_CTL;
- memcpy(skb_put(ch->xid_skb,
- TH_HEADER_LENGTH),
- &thnorm, TH_HEADER_LENGTH);
+ skb_put_data(ch->xid_skb, &thnorm,
+ TH_HEADER_LENGTH);
send = 1;
}
} else {
@@ -1926,17 +1919,16 @@
if (grp->roll == YSIDE) {
if (fsm_getstate(ch->fsm) < CH_XID7_PENDING4) {
fsm_newstate(ch->fsm, CH_XID7_PENDING4);
- memcpy(skb_put(ch->xid_skb,
- TH_HEADER_LENGTH),
- &thnorm, TH_HEADER_LENGTH);
+ skb_put_data(ch->xid_skb, &thnorm,
+ TH_HEADER_LENGTH);
ch->ccw[8].cmd_code = CCW_CMD_WRITE_CTL;
send = 1;
}
} else if (fsm_getstate(ch->fsm) == CH_XID7_PENDING3) {
fsm_newstate(ch->fsm, CH_XID7_PENDING4);
ch->ccw[8].cmd_code = CCW_CMD_SENSE_CMD;
- memcpy(skb_put(ch->xid_skb, TH_HEADER_LENGTH),
- &thdummy, TH_HEADER_LENGTH);
+ skb_put_data(ch->xid_skb, &thdummy,
+ TH_HEADER_LENGTH);
send = 1;
}
}
@@ -2122,7 +2114,7 @@
return -ENOMEM;
}
- memcpy(skb_put(skb, new_len), qllcptr, new_len);
+ skb_put_data(skb, qllcptr, new_len);
kfree(qllcptr);
if (skb_headroom(skb) < 4) {
diff --git a/drivers/s390/net/lcs.c b/drivers/s390/net/lcs.c
index 211b31d..337bacb 100644
--- a/drivers/s390/net/lcs.c
+++ b/drivers/s390/net/lcs.c
@@ -1796,7 +1796,7 @@
card->stats.rx_dropped++;
return;
}
- memcpy(skb_put(skb, skb_len), skb_data, skb_len);
+ skb_put_data(skb, skb_data, skb_len);
skb->protocol = card->lan_type_trans(skb, card->dev);
card->stats.rx_bytes += skb_len;
card->stats.rx_packets++;
diff --git a/drivers/s390/net/netiucv.c b/drivers/s390/net/netiucv.c
index fa732bd..7db427c 100644
--- a/drivers/s390/net/netiucv.c
+++ b/drivers/s390/net/netiucv.c
@@ -759,8 +759,7 @@
spin_lock_irqsave(&conn->collect_lock, saveflags);
while ((skb = skb_dequeue(&conn->collect_queue))) {
header.next = conn->tx_buff->len + skb->len + NETIUCV_HDRLEN;
- memcpy(skb_put(conn->tx_buff, NETIUCV_HDRLEN), &header,
- NETIUCV_HDRLEN);
+ skb_put_data(conn->tx_buff, &header, NETIUCV_HDRLEN);
skb_copy_from_linear_data(skb,
skb_put(conn->tx_buff, skb->len),
skb->len);
@@ -780,7 +779,7 @@
}
header.next = 0;
- memcpy(skb_put(conn->tx_buff, NETIUCV_HDRLEN), &header, NETIUCV_HDRLEN);
+ skb_put_data(conn->tx_buff, &header, NETIUCV_HDRLEN);
conn->prof.send_stamp = jiffies;
txmsg.class = 0;
txmsg.tag = 0;
@@ -1201,8 +1200,7 @@
return rc;
} else {
skb_reserve(nskb, NETIUCV_HDRLEN);
- memcpy(skb_put(nskb, skb->len),
- skb->data, skb->len);
+ skb_put_data(nskb, skb->data, skb->len);
}
copied = 1;
}
@@ -1212,7 +1210,7 @@
header.next = nskb->len + NETIUCV_HDRLEN;
memcpy(skb_push(nskb, NETIUCV_HDRLEN), &header, NETIUCV_HDRLEN);
header.next = 0;
- memcpy(skb_put(nskb, NETIUCV_HDRLEN), &header, NETIUCV_HDRLEN);
+ skb_put_data(nskb, &header, NETIUCV_HDRLEN);
fsm_newstate(conn->fsm, CONN_STATE_TX);
conn->prof.send_stamp = jiffies;
diff --git a/drivers/s390/net/qeth_core.h b/drivers/s390/net/qeth_core.h
index 30bc610..7a0ffc7 100644
--- a/drivers/s390/net/qeth_core.h
+++ b/drivers/s390/net/qeth_core.h
@@ -659,6 +659,7 @@
int max_mtu;
int broadcast_capable;
int unique_id;
+ bool layer_enforced;
struct qeth_card_blkt blkt;
enum qeth_ipa_promisc_modes promisc_mode;
__u32 diagass_support;
@@ -696,6 +697,7 @@
};
enum qeth_discipline_id {
+ QETH_DISCIPLINE_UNDETERMINED = -1,
QETH_DISCIPLINE_LAYER3 = 0,
QETH_DISCIPLINE_LAYER2 = 1,
};
@@ -984,6 +986,7 @@
int qeth_set_features(struct net_device *, netdev_features_t);
int qeth_recover_features(struct net_device *);
netdev_features_t qeth_fix_features(struct net_device *, netdev_features_t);
+int qeth_vm_request_mac(struct qeth_card *card);
/* exports for OSN */
int qeth_osn_assist(struct net_device *, void *, int);
diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c
index fc6d85f..3b657d5 100644
--- a/drivers/s390/net/qeth_core_main.c
+++ b/drivers/s390/net/qeth_core_main.c
@@ -27,6 +27,9 @@
#include <asm/io.h>
#include <asm/sysinfo.h>
#include <asm/compat.h>
+#include <asm/diag.h>
+#include <asm/cio.h>
+#include <asm/ccwdev.h>
#include "qeth_core.h"
@@ -1723,6 +1726,25 @@
(prcd[0x11] == _ascebc['M']));
}
+/* Determine whether the device requires a specific layer discipline */
+static enum qeth_discipline_id qeth_enforce_discipline(struct qeth_card *card)
+{
+ if (card->info.type == QETH_CARD_TYPE_OSM ||
+ card->info.type == QETH_CARD_TYPE_OSN) {
+ QETH_DBF_TEXT(SETUP, 3, "force l2");
+ return QETH_DISCIPLINE_LAYER2;
+ }
+
+ /* virtual HiperSocket is L3 only: */
+ if (card->info.guestlan && card->info.type == QETH_CARD_TYPE_IQD) {
+ QETH_DBF_TEXT(SETUP, 3, "force l3");
+ return QETH_DISCIPLINE_LAYER3;
+ }
+
+ QETH_DBF_TEXT(SETUP, 3, "force no");
+ return QETH_DISCIPLINE_UNDETERMINED;
+}
+
static void qeth_configure_blkt_default(struct qeth_card *card, char *prcd)
{
QETH_DBF_TEXT(SETUP, 2, "cfgblkt");
@@ -3347,6 +3369,28 @@
(u16)qdio_err, (u8)sbalf15);
}
+/**
+ * qeth_prep_flush_pack_buffer - Prepares flushing of a packing buffer.
+ * @queue: queue to check for packing buffer
+ *
+ * Returns number of buffers that were prepared for flush.
+ */
+static int qeth_prep_flush_pack_buffer(struct qeth_qdio_out_q *queue)
+{
+ struct qeth_qdio_out_buffer *buffer;
+
+ buffer = queue->bufs[queue->next_buf_to_fill];
+ if ((atomic_read(&buffer->state) == QETH_QDIO_BUF_EMPTY) &&
+ (buffer->next_element_to_fill > 0)) {
+ /* it's a packing buffer */
+ atomic_set(&buffer->state, QETH_QDIO_BUF_PRIMED);
+ queue->next_buf_to_fill =
+ (queue->next_buf_to_fill + 1) % QDIO_MAX_BUFFERS_PER_Q;
+ return 1;
+ }
+ return 0;
+}
+
/*
* Switched to packing state if the number of used buffers on a queue
* reaches a certain limit.
@@ -3373,9 +3417,6 @@
*/
static int qeth_switch_to_nonpacking_if_needed(struct qeth_qdio_out_q *queue)
{
- struct qeth_qdio_out_buffer *buffer;
- int flush_count = 0;
-
if (queue->do_pack) {
if (atomic_read(&queue->used_buffers)
<= QETH_LOW_WATERMARK_PACK) {
@@ -3384,42 +3425,9 @@
if (queue->card->options.performance_stats)
queue->card->perf_stats.sc_p_dp++;
queue->do_pack = 0;
- /* flush packing buffers */
- buffer = queue->bufs[queue->next_buf_to_fill];
- if ((atomic_read(&buffer->state) ==
- QETH_QDIO_BUF_EMPTY) &&
- (buffer->next_element_to_fill > 0)) {
- atomic_set(&buffer->state,
- QETH_QDIO_BUF_PRIMED);
- flush_count++;
- queue->next_buf_to_fill =
- (queue->next_buf_to_fill + 1) %
- QDIO_MAX_BUFFERS_PER_Q;
- }
+ return qeth_prep_flush_pack_buffer(queue);
}
}
- return flush_count;
-}
-
-
-/*
- * Called to flush a packing buffer if no more pci flags are on the queue.
- * Checks if there is a packing buffer and prepares it to be flushed.
- * In that case returns 1, otherwise zero.
- */
-static int qeth_flush_buffers_on_no_pci(struct qeth_qdio_out_q *queue)
-{
- struct qeth_qdio_out_buffer *buffer;
-
- buffer = queue->bufs[queue->next_buf_to_fill];
- if ((atomic_read(&buffer->state) == QETH_QDIO_BUF_EMPTY) &&
- (buffer->next_element_to_fill > 0)) {
- /* it's a packing buffer */
- atomic_set(&buffer->state, QETH_QDIO_BUF_PRIMED);
- queue->next_buf_to_fill =
- (queue->next_buf_to_fill + 1) % QDIO_MAX_BUFFERS_PER_Q;
- return 1;
- }
return 0;
}
@@ -3532,8 +3540,7 @@
flush_cnt += qeth_switch_to_nonpacking_if_needed(queue);
if (!flush_cnt &&
!atomic_read(&queue->set_pci_flags_count))
- flush_cnt +=
- qeth_flush_buffers_on_no_pci(queue);
+ flush_cnt += qeth_prep_flush_pack_buffer(queue);
if (queue->card->options.performance_stats &&
q_was_packing)
queue->card->perf_stats.bufs_sent_pack +=
@@ -4099,7 +4106,8 @@
flush_count);
atomic_set(&queue->state,
QETH_OUT_Q_UNLOCKED);
- return -EBUSY;
+ rc = -EBUSY;
+ goto out;
}
}
}
@@ -4118,19 +4126,21 @@
* In that case we will enter this loop
*/
while (atomic_dec_return(&queue->state)) {
- flush_count = 0;
start_index = queue->next_buf_to_fill;
/* check if we can go back to non-packing state */
- flush_count += qeth_switch_to_nonpacking_if_needed(queue);
+ tmp = qeth_switch_to_nonpacking_if_needed(queue);
/*
* check if we need to flush a packing buffer to get a pci
* flag out on the queue
*/
- if (!flush_count && !atomic_read(&queue->set_pci_flags_count))
- flush_count += qeth_flush_buffers_on_no_pci(queue);
- if (flush_count)
- qeth_flush_buffers(queue, start_index, flush_count);
+ if (!tmp && !atomic_read(&queue->set_pci_flags_count))
+ tmp = qeth_prep_flush_pack_buffer(queue);
+ if (tmp) {
+ qeth_flush_buffers(queue, start_index, tmp);
+ flush_count += tmp;
+ }
}
+out:
/* at this point the queue is UNLOCKED again */
if (queue->card->options.performance_stats && do_pack)
queue->card->perf_stats.bufs_sent_pack += flush_count;
@@ -4199,8 +4209,7 @@
sprintf(dbf_text, "%8x", new_mtu);
QETH_CARD_TEXT(card, 4, dbf_text);
- if ((!qeth_is_supported(card, IPA_IP_FRAGMENTATION)) &&
- (!qeth_mtu_is_valid(card, new_mtu)))
+ if (!qeth_mtu_is_valid(card, new_mtu))
return -EINVAL;
dev->mtu = new_mtu;
return 0;
@@ -4767,6 +4776,64 @@
(void *)carrier_info);
}
+/**
+ * qeth_vm_request_mac() - Request a hypervisor-managed MAC address
+ * @card: pointer to a qeth_card
+ *
+ * Returns
+ * 0, if a MAC address has been set for the card's netdevice
+ * a return code, for various error conditions
+ */
+int qeth_vm_request_mac(struct qeth_card *card)
+{
+ struct diag26c_mac_resp *response;
+ struct diag26c_mac_req *request;
+ struct ccw_dev_id id;
+ int rc;
+
+ QETH_DBF_TEXT(SETUP, 2, "vmreqmac");
+
+ if (!card->dev)
+ return -ENODEV;
+
+ request = kzalloc(sizeof(*request), GFP_KERNEL | GFP_DMA);
+ response = kzalloc(sizeof(*response), GFP_KERNEL | GFP_DMA);
+ if (!request || !response) {
+ rc = -ENOMEM;
+ goto out;
+ }
+
+ ccw_device_get_id(CARD_DDEV(card), &id);
+ request->resp_buf_len = sizeof(*response);
+ request->resp_version = DIAG26C_VERSION2;
+ request->op_code = DIAG26C_GET_MAC;
+ request->devno = id.devno;
+
+ rc = diag26c(request, response, DIAG26C_MAC_SERVICES);
+ if (rc)
+ goto out;
+
+ if (request->resp_buf_len < sizeof(*response) ||
+ response->version != request->resp_version) {
+ rc = -EIO;
+ QETH_DBF_TEXT(SETUP, 2, "badresp");
+ QETH_DBF_HEX(SETUP, 2, &request->resp_buf_len,
+ sizeof(request->resp_buf_len));
+ } else if (!is_valid_ether_addr(response->mac)) {
+ rc = -EINVAL;
+ QETH_DBF_TEXT(SETUP, 2, "badmac");
+ QETH_DBF_HEX(SETUP, 2, response->mac, ETH_ALEN);
+ } else {
+ ether_addr_copy(card->dev->dev_addr, response->mac);
+ }
+
+out:
+ kfree(response);
+ kfree(request);
+ return rc;
+}
+EXPORT_SYMBOL_GPL(qeth_vm_request_mac);
+
static inline int qeth_get_qdio_q_format(struct qeth_card *card)
{
if (card->info.type == QETH_CARD_TYPE_IQD)
@@ -5144,12 +5211,11 @@
skb_reserve(*pskb, ETH_HLEN);
if (data_len <= QETH_RX_PULL_LEN) {
- memcpy(skb_put(*pskb, data_len), element->addr + offset,
- data_len);
+ skb_put_data(*pskb, element->addr + offset, data_len);
} else {
get_page(page);
- memcpy(skb_put(*pskb, QETH_RX_PULL_LEN),
- element->addr + offset, QETH_RX_PULL_LEN);
+ skb_put_data(*pskb, element->addr + offset,
+ QETH_RX_PULL_LEN);
skb_fill_page_desc(*pskb, *pfrag, page,
offset + QETH_RX_PULL_LEN,
data_len - QETH_RX_PULL_LEN);
@@ -5245,8 +5311,7 @@
&skb, offset, &frag, data_len))
goto no_mem;
} else {
- memcpy(skb_put(skb, data_len), data_ptr,
- data_len);
+ skb_put_data(skb, data_ptr, data_len);
}
}
skb_len -= data_len;
@@ -5501,6 +5566,7 @@
enum qeth_discipline_id discipline)
{
int rc = 0;
+
mutex_lock(&qeth_mod_mutex);
switch (discipline) {
case QETH_DISCIPLINE_LAYER3:
@@ -5511,7 +5577,10 @@
card->discipline = try_then_request_module(
symbol_get(qeth_l2_discipline), "qeth_l2");
break;
+ default:
+ break;
}
+
if (!card->discipline) {
dev_err(&card->gdev->dev, "There is no kernel module to "
"support discipline %d\n", discipline);
@@ -5614,6 +5683,7 @@
struct qeth_card *card;
struct device *dev;
int rc;
+ enum qeth_discipline_id enforced_disc;
unsigned long flags;
char dbf_name[DBF_NAME_LEN];
@@ -5661,10 +5731,15 @@
goto err_card;
}
- switch (card->info.type) {
- case QETH_CARD_TYPE_OSN:
- case QETH_CARD_TYPE_OSM:
- rc = qeth_core_load_discipline(card, QETH_DISCIPLINE_LAYER2);
+ qeth_determine_capabilities(card);
+ enforced_disc = qeth_enforce_discipline(card);
+ switch (enforced_disc) {
+ case QETH_DISCIPLINE_UNDETERMINED:
+ gdev->dev.type = &qeth_generic_devtype;
+ break;
+ default:
+ card->info.layer_enforced = true;
+ rc = qeth_core_load_discipline(card, enforced_disc);
if (rc)
goto err_card;
@@ -5675,16 +5750,11 @@
if (rc)
goto err_disc;
break;
- default:
- gdev->dev.type = &qeth_generic_devtype;
- break;
}
write_lock_irqsave(&qeth_core_card_list.rwlock, flags);
list_add_tail(&card->list, &qeth_core_card_list.list);
write_unlock_irqrestore(&qeth_core_card_list.rwlock, flags);
-
- qeth_determine_capabilities(card);
return 0;
err_disc:
@@ -5721,7 +5791,7 @@
{
struct qeth_card *card = dev_get_drvdata(&gdev->dev);
int rc = 0;
- int def_discipline;
+ enum qeth_discipline_id def_discipline;
if (!card->discipline) {
if (card->info.type == QETH_CARD_TYPE_IQD)
@@ -6406,11 +6476,8 @@
features &= ~NETIF_F_IP_CSUM;
if (!qeth_is_supported(card, IPA_INBOUND_CHECKSUM))
features &= ~NETIF_F_RXCSUM;
- if (!qeth_is_supported(card, IPA_OUTBOUND_TSO)) {
+ if (!qeth_is_supported(card, IPA_OUTBOUND_TSO))
features &= ~NETIF_F_TSO;
- dev_info(&card->gdev->dev, "Outbound TSO not supported on %s\n",
- QETH_CARD_IFNAME(card));
- }
/* if the card isn't up, remove features that require hw changes */
if (card->state == CARD_STATE_DOWN ||
card->state == CARD_STATE_RECOVER)
diff --git a/drivers/s390/net/qeth_core_mpc.c b/drivers/s390/net/qeth_core_mpc.c
index beb4bdc..6dd7d05 100644
--- a/drivers/s390/net/qeth_core_mpc.c
+++ b/drivers/s390/net/qeth_core_mpc.c
@@ -167,13 +167,21 @@
{IPA_RC_IP_TABLE_FULL, "Add Addr IP Table Full - ipv6"},
{IPA_RC_UNKNOWN_ERROR, "IPA command failed - reason unknown"},
{IPA_RC_UNSUPPORTED_COMMAND, "Command not supported"},
+ {IPA_RC_TRACE_ALREADY_ACTIVE, "trace already active"},
+ {IPA_RC_INVALID_FORMAT, "invalid format or length"},
{IPA_RC_DUP_IPV6_REMOTE, "ipv6 address already registered remote"},
+ {IPA_RC_SBP_IQD_NOT_CONFIGURED, "Not configured for bridgeport"},
{IPA_RC_DUP_IPV6_HOME, "ipv6 address already registered"},
{IPA_RC_UNREGISTERED_ADDR, "Address not registered"},
{IPA_RC_NO_ID_AVAILABLE, "No identifiers available"},
{IPA_RC_ID_NOT_FOUND, "Identifier not found"},
+ {IPA_RC_SBP_IQD_ANO_DEV_PRIMARY, "Primary bridgeport exists already"},
+ {IPA_RC_SBP_IQD_CURRENT_SECOND, "Bridgeport is currently secondary"},
+ {IPA_RC_SBP_IQD_LIMIT_SECOND, "Limit of secondary bridgeports reached"},
{IPA_RC_INVALID_IP_VERSION, "IP version incorrect"},
+ {IPA_RC_SBP_IQD_CURRENT_PRIMARY, "Bridgeport is currently primary"},
{IPA_RC_LAN_FRAME_MISMATCH, "LAN and frame mismatch"},
+ {IPA_RC_SBP_IQD_NO_QDIO_QUEUES, "QDIO queues not established"},
{IPA_RC_L2_UNSUPPORTED_CMD, "Unsupported layer 2 command"},
{IPA_RC_L2_DUP_MAC, "Duplicate MAC address"},
{IPA_RC_L2_ADDR_TABLE_FULL, "Layer2 address table full"},
@@ -185,6 +193,14 @@
{IPA_RC_L2_INVALID_VLAN_ID, "L2 invalid vlan id"},
{IPA_RC_L2_DUP_VLAN_ID, "L2 duplicate vlan id"},
{IPA_RC_L2_VLAN_ID_NOT_FOUND, "L2 vlan id not found"},
+ {IPA_RC_SBP_OSA_NOT_CONFIGURED, "Not configured for bridgeport"},
+ {IPA_RC_SBP_OSA_OS_MISMATCH, "OS mismatch"},
+ {IPA_RC_SBP_OSA_ANO_DEV_PRIMARY, "Primary bridgeport exists already"},
+ {IPA_RC_SBP_OSA_CURRENT_SECOND, "Bridgeport is currently secondary"},
+ {IPA_RC_SBP_OSA_LIMIT_SECOND, "Limit of secondary bridgeports reached"},
+ {IPA_RC_SBP_OSA_NOT_AUTHD_BY_ZMAN, "Not authorized by zManager"},
+ {IPA_RC_SBP_OSA_CURRENT_PRIMARY, "Bridgeport is currently primary"},
+ {IPA_RC_SBP_OSA_NO_QDIO_QUEUES, "QDIO queues not established"},
{IPA_RC_DATA_MISMATCH, "Data field mismatch (v4/v6 mixed)"},
{IPA_RC_INVALID_MTU_SIZE, "Invalid MTU size"},
{IPA_RC_INVALID_LANTYPE, "Invalid LAN type"},
diff --git a/drivers/s390/net/qeth_core_mpc.h b/drivers/s390/net/qeth_core_mpc.h
index 4accb0a..912e010 100644
--- a/drivers/s390/net/qeth_core_mpc.h
+++ b/drivers/s390/net/qeth_core_mpc.h
@@ -142,12 +142,18 @@
IPA_RC_TRACE_ALREADY_ACTIVE = 0x0005,
IPA_RC_INVALID_FORMAT = 0x0006,
IPA_RC_DUP_IPV6_REMOTE = 0x0008,
+ IPA_RC_SBP_IQD_NOT_CONFIGURED = 0x000C,
IPA_RC_DUP_IPV6_HOME = 0x0010,
IPA_RC_UNREGISTERED_ADDR = 0x0011,
IPA_RC_NO_ID_AVAILABLE = 0x0012,
IPA_RC_ID_NOT_FOUND = 0x0013,
+ IPA_RC_SBP_IQD_ANO_DEV_PRIMARY = 0x0014,
+ IPA_RC_SBP_IQD_CURRENT_SECOND = 0x0018,
+ IPA_RC_SBP_IQD_LIMIT_SECOND = 0x001C,
IPA_RC_INVALID_IP_VERSION = 0x0020,
+ IPA_RC_SBP_IQD_CURRENT_PRIMARY = 0x0024,
IPA_RC_LAN_FRAME_MISMATCH = 0x0040,
+ IPA_RC_SBP_IQD_NO_QDIO_QUEUES = 0x00EB,
IPA_RC_L2_UNSUPPORTED_CMD = 0x2003,
IPA_RC_L2_DUP_MAC = 0x2005,
IPA_RC_L2_ADDR_TABLE_FULL = 0x2006,
@@ -159,6 +165,14 @@
IPA_RC_L2_INVALID_VLAN_ID = 0x2015,
IPA_RC_L2_DUP_VLAN_ID = 0x2016,
IPA_RC_L2_VLAN_ID_NOT_FOUND = 0x2017,
+ IPA_RC_SBP_OSA_NOT_CONFIGURED = 0x2B0C,
+ IPA_RC_SBP_OSA_OS_MISMATCH = 0x2B10,
+ IPA_RC_SBP_OSA_ANO_DEV_PRIMARY = 0x2B14,
+ IPA_RC_SBP_OSA_CURRENT_SECOND = 0x2B18,
+ IPA_RC_SBP_OSA_LIMIT_SECOND = 0x2B1C,
+ IPA_RC_SBP_OSA_NOT_AUTHD_BY_ZMAN = 0x2B20,
+ IPA_RC_SBP_OSA_CURRENT_PRIMARY = 0x2B24,
+ IPA_RC_SBP_OSA_NO_QDIO_QUEUES = 0x2BEB,
IPA_RC_DATA_MISMATCH = 0xe001,
IPA_RC_INVALID_MTU_SIZE = 0xe002,
IPA_RC_INVALID_LANTYPE = 0xe003,
@@ -187,12 +201,16 @@
#define IPA_RC_INVALID_SUBCMD IPA_RC_IP_TABLE_FULL
#define IPA_RC_HARDWARE_AUTH_ERROR IPA_RC_UNKNOWN_ERROR
+/* for SETBRIDGEPORT (double occupancies) */
+#define IPA_RC_SBP_IQD_OS_MISMATCH IPA_RC_DUP_IPV6_HOME
+#define IPA_RC_SBP_IQD_NOT_AUTHD_BY_ZMAN IPA_RC_INVALID_IP_VERSION
+
/* IPA function flags; each flag marks availability of respective function */
enum qeth_ipa_funcs {
IPA_ARP_PROCESSING = 0x00000001L,
IPA_INBOUND_CHECKSUM = 0x00000002L,
IPA_OUTBOUND_CHECKSUM = 0x00000004L,
- IPA_IP_FRAGMENTATION = 0x00000008L,
+ /* RESERVED = 0x00000008L,*/
IPA_FILTERING = 0x00000010L,
IPA_IPV6 = 0x00000020L,
IPA_MULTICASTING = 0x00000040L,
diff --git a/drivers/s390/net/qeth_core_sys.c b/drivers/s390/net/qeth_core_sys.c
index db6a285..6d255c2 100644
--- a/drivers/s390/net/qeth_core_sys.c
+++ b/drivers/s390/net/qeth_core_sys.c
@@ -413,7 +413,7 @@
if (card->options.layer2 == newdis)
goto out;
- if (card->info.type == QETH_CARD_TYPE_OSM) {
+ if (card->info.layer_enforced) {
/* fixed layer, can't switch */
rc = -EOPNOTSUPP;
goto out;
diff --git a/drivers/s390/net/qeth_l2_main.c b/drivers/s390/net/qeth_l2_main.c
index bd2df62..ad110ab 100644
--- a/drivers/s390/net/qeth_l2_main.c
+++ b/drivers/s390/net/qeth_l2_main.c
@@ -21,6 +21,7 @@
#include <linux/hash.h>
#include <linux/hashtable.h>
#include <linux/string.h>
+#include <asm/setup.h>
#include "qeth_core.h"
#include "qeth_l2.h"
@@ -505,9 +506,19 @@
int rc = 0;
char vendor_pre[] = {0x02, 0x00, 0x00};
- QETH_DBF_TEXT(SETUP, 2, "doL2init");
+ QETH_DBF_TEXT(SETUP, 2, "l2reqmac");
QETH_DBF_TEXT_(SETUP, 2, "doL2%s", CARD_BUS_ID(card));
+ if (MACHINE_IS_VM) {
+ rc = qeth_vm_request_mac(card);
+ if (!rc)
+ goto out;
+ QETH_DBF_MESSAGE(2, "z/VM MAC Service failed on device %s: x%x\n",
+ CARD_BUS_ID(card), rc);
+ QETH_DBF_TEXT_(SETUP, 2, "err%04x", rc);
+ /* fall back to alternative mechanism: */
+ }
+
if (qeth_is_supported(card, IPA_SETADAPTERPARMS)) {
rc = qeth_query_setadapterparms(card);
if (rc) {
@@ -528,11 +539,12 @@
QETH_DBF_TEXT_(SETUP, 2, "1err%04x", rc);
return rc;
}
- QETH_DBF_HEX(SETUP, 2, card->dev->dev_addr, OSA_ADDR_LEN);
} else {
eth_random_addr(card->dev->dev_addr);
memcpy(card->dev->dev_addr, vendor_pre, 3);
}
+out:
+ QETH_DBF_HEX(SETUP, 2, card->dev->dev_addr, card->dev->addr_len);
return 0;
}
@@ -759,8 +771,7 @@
sizeof(struct qeth_hdr));
if (!new_skb)
goto tx_drop;
- hdr = (struct qeth_hdr *)skb_push(new_skb,
- sizeof(struct qeth_hdr));
+ hdr = skb_push(new_skb, sizeof(struct qeth_hdr));
skb_set_mac_header(new_skb, sizeof(struct qeth_hdr));
qeth_l2_fill_header(card, hdr, new_skb, cast_type);
if (new_skb->ip_summed == CHECKSUM_PARTIAL)
@@ -1017,6 +1028,13 @@
return 0;
}
+static void qeth_l2_trace_features(struct qeth_card *card)
+{
+ QETH_CARD_TEXT(card, 2, "l2featur");
+ QETH_CARD_HEX(card, 2, &card->options.sbp.supported_funcs,
+ sizeof(card->options.sbp.supported_funcs));
+}
+
static int __qeth_l2_set_online(struct ccwgroup_device *gdev, int recovery_mode)
{
struct qeth_card *card = dev_get_drvdata(&gdev->dev);
@@ -1040,6 +1058,7 @@
dev_info(&card->gdev->dev,
"The device represents a Bridge Capable Port\n");
qeth_trace_features(card);
+ qeth_l2_trace_features(card);
if (!card->dev && qeth_l2_setup_netdev(card)) {
rc = -ENODEV;
@@ -1643,27 +1662,27 @@
if ((is_iqd && (cbctl->ipa_rc == IPA_RC_SUCCESS)) ||
(!is_iqd && (cbctl->ipa_rc == cbctl->cmd_rc)))
switch (cbctl->cmd_rc) {
- case 0x0000:
+ case IPA_RC_SUCCESS:
rc = 0;
break;
- case 0x2B04:
- case 0x0004:
+ case IPA_RC_L2_UNSUPPORTED_CMD:
+ case IPA_RC_UNSUPPORTED_COMMAND:
rc = -EOPNOTSUPP;
break;
- case 0x2B0C:
- case 0x000C: /* Not configured as bridge Port */
+ case IPA_RC_SBP_OSA_NOT_CONFIGURED:
+ case IPA_RC_SBP_IQD_NOT_CONFIGURED:
rc = -ENODEV; /* maybe not the best code here? */
dev_err(&card->gdev->dev,
"The device is not configured as a Bridge Port\n");
break;
- case 0x2B10:
- case 0x0010: /* OS mismatch */
+ case IPA_RC_SBP_OSA_OS_MISMATCH:
+ case IPA_RC_SBP_IQD_OS_MISMATCH:
rc = -EPERM;
dev_err(&card->gdev->dev,
"A Bridge Port is already configured by a different operating system\n");
break;
- case 0x2B14:
- case 0x0014: /* Another device is Primary */
+ case IPA_RC_SBP_OSA_ANO_DEV_PRIMARY:
+ case IPA_RC_SBP_IQD_ANO_DEV_PRIMARY:
switch (setcmd) {
case IPA_SBP_SET_PRIMARY_BRIDGE_PORT:
rc = -EEXIST;
@@ -1679,26 +1698,26 @@
rc = -EIO;
}
break;
- case 0x2B18:
- case 0x0018: /* This device is currently Secondary */
+ case IPA_RC_SBP_OSA_CURRENT_SECOND:
+ case IPA_RC_SBP_IQD_CURRENT_SECOND:
rc = -EBUSY;
dev_err(&card->gdev->dev,
"The device is already a secondary Bridge Port\n");
break;
- case 0x2B1C:
- case 0x001C: /* Limit for Secondary devices reached */
+ case IPA_RC_SBP_OSA_LIMIT_SECOND:
+ case IPA_RC_SBP_IQD_LIMIT_SECOND:
rc = -EEXIST;
dev_err(&card->gdev->dev,
"The LAN cannot have more secondary Bridge Ports\n");
break;
- case 0x2B24:
- case 0x0024: /* This device is currently Primary */
+ case IPA_RC_SBP_OSA_CURRENT_PRIMARY:
+ case IPA_RC_SBP_IQD_CURRENT_PRIMARY:
rc = -EBUSY;
dev_err(&card->gdev->dev,
"The device is already a primary Bridge Port\n");
break;
- case 0x2B20:
- case 0x0020: /* Not authorized by zManager */
+ case IPA_RC_SBP_OSA_NOT_AUTHD_BY_ZMAN:
+ case IPA_RC_SBP_IQD_NOT_AUTHD_BY_ZMAN:
rc = -EACCES;
dev_err(&card->gdev->dev,
"The device is not authorized to be a Bridge Port\n");
diff --git a/drivers/s390/net/qeth_l3.h b/drivers/s390/net/qeth_l3.h
index 26f7953..9b5e439 100644
--- a/drivers/s390/net/qeth_l3.h
+++ b/drivers/s390/net/qeth_l3.h
@@ -65,6 +65,7 @@
int mask_bits;
};
+extern const struct attribute_group *qeth_l3_attr_groups[];
void qeth_l3_ipaddr_to_string(enum qeth_prot_versions, const __u8 *, char *);
int qeth_l3_string_to_ipaddr(const char *, enum qeth_prot_versions, __u8 *);
diff --git a/drivers/s390/net/qeth_l3_main.c b/drivers/s390/net/qeth_l3_main.c
index d8df1e6..3062cde 100644
--- a/drivers/s390/net/qeth_l3_main.c
+++ b/drivers/s390/net/qeth_l3_main.c
@@ -956,31 +956,6 @@
return rc;
}
-static int qeth_l3_start_ipa_ip_fragmentation(struct qeth_card *card)
-{
- int rc;
-
- QETH_CARD_TEXT(card, 3, "ipaipfrg");
-
- if (!qeth_is_supported(card, IPA_IP_FRAGMENTATION)) {
- dev_info(&card->gdev->dev,
- "Hardware IP fragmentation not supported on %s\n",
- QETH_CARD_IFNAME(card));
- return -EOPNOTSUPP;
- }
-
- rc = qeth_send_simple_setassparms(card, IPA_IP_FRAGMENTATION,
- IPA_CMD_ASS_START, 0);
- if (rc) {
- dev_warn(&card->gdev->dev,
- "Starting IP fragmentation support for %s failed\n",
- QETH_CARD_IFNAME(card));
- } else
- dev_info(&card->gdev->dev,
- "Hardware IP fragmentation enabled \n");
- return rc;
-}
-
static int qeth_l3_start_ipa_source_mac(struct qeth_card *card)
{
int rc;
@@ -1060,9 +1035,6 @@
QETH_CARD_TEXT(card, 3, "softipv6");
- if (card->info.type == QETH_CARD_TYPE_IQD)
- goto out;
-
rc = qeth_query_ipassists(card, QETH_PROT_IPV6);
if (rc) {
dev_err(&card->gdev->dev,
@@ -1070,6 +1042,10 @@
QETH_CARD_IFNAME(card));
return rc;
}
+
+ if (card->info.type == QETH_CARD_TYPE_IQD)
+ goto out;
+
rc = qeth_send_simple_setassparms(card, IPA_IPV6,
IPA_CMD_ASS_START, 3);
if (rc) {
@@ -1171,7 +1147,6 @@
if (qeth_set_access_ctrl_online(card, 0))
return -EIO;
qeth_l3_start_ipa_arp_processing(card); /* go on*/
- qeth_l3_start_ipa_ip_fragmentation(card); /* go on*/
qeth_l3_start_ipa_source_mac(card); /* go on*/
qeth_l3_start_ipa_vlan(card); /* go on*/
qeth_l3_start_ipa_multicast(card); /* go on*/
@@ -2702,8 +2677,7 @@
use_tso = skb_is_gso(skb) &&
(qeth_get_ip_protocol(skb) == IPPROTO_TCP) && (ipv == 4);
- if ((card->info.type == QETH_CARD_TYPE_IQD) &&
- !skb_is_nonlinear(skb)) {
+ if (card->info.type == QETH_CARD_TYPE_IQD) {
new_skb = skb;
data_offset = ETH_HLEN;
hdr = kmem_cache_alloc(qeth_core_header_cache, GFP_ATOMIC);
@@ -2716,12 +2690,7 @@
+ VLAN_HLEN);
if (!new_skb)
goto tx_drop;
- }
- if (card->info.type == QETH_CARD_TYPE_IQD) {
- if (data_offset < 0)
- skb_pull(new_skb, ETH_HLEN);
- } else {
if (ipv == 4) {
skb_pull(new_skb, ETH_HLEN);
}
@@ -2760,16 +2729,14 @@
}
if (use_tso) {
- hdr = (struct qeth_hdr *)skb_push(new_skb,
- sizeof(struct qeth_hdr_tso));
+ hdr = skb_push(new_skb, sizeof(struct qeth_hdr_tso));
memset(hdr, 0, sizeof(struct qeth_hdr_tso));
qeth_l3_fill_header(card, hdr, new_skb, ipv, cast_type);
qeth_tso_fill_header(card, hdr, new_skb);
hdr_elements++;
} else {
if (data_offset < 0) {
- hdr = (struct qeth_hdr *)skb_push(new_skb,
- sizeof(struct qeth_hdr));
+ hdr = skb_push(new_skb, sizeof(struct qeth_hdr));
qeth_l3_fill_header(card, hdr, new_skb, ipv,
cast_type);
} else {
@@ -3036,14 +3003,21 @@
return register_netdev(card->dev);
}
+static const struct device_type qeth_l3_devtype = {
+ .name = "qeth_layer3",
+ .groups = qeth_l3_attr_groups,
+};
+
static int qeth_l3_probe_device(struct ccwgroup_device *gdev)
{
struct qeth_card *card = dev_get_drvdata(&gdev->dev);
int rc;
- rc = qeth_l3_create_device_attributes(&gdev->dev);
- if (rc)
- return rc;
+ if (gdev->dev.type == &qeth_generic_devtype) {
+ rc = qeth_l3_create_device_attributes(&gdev->dev);
+ if (rc)
+ return rc;
+ }
hash_init(card->ip_htable);
hash_init(card->ip_mc_htable);
card->options.layer2 = 0;
@@ -3055,7 +3029,8 @@
{
struct qeth_card *card = dev_get_drvdata(&cgdev->dev);
- qeth_l3_remove_device_attributes(&cgdev->dev);
+ if (cgdev->dev.type == &qeth_generic_devtype)
+ qeth_l3_remove_device_attributes(&cgdev->dev);
qeth_set_allowed_threads(card, 0, 1);
wait_event(card->wait_q, qeth_threads_running(card, 0xffffffff) == 0);
@@ -3311,7 +3286,7 @@
}
struct qeth_discipline qeth_l3_discipline = {
- .devtype = &qeth_generic_devtype,
+ .devtype = &qeth_l3_devtype,
.start_poll = qeth_qdio_start_poll,
.input_handler = (qdio_handler_t *) qeth_qdio_input_handler,
.output_handler = (qdio_handler_t *) qeth_qdio_output_handler,
diff --git a/drivers/s390/net/qeth_l3_sys.c b/drivers/s390/net/qeth_l3_sys.c
index ff29a4b..f2f94f5 100644
--- a/drivers/s390/net/qeth_l3_sys.c
+++ b/drivers/s390/net/qeth_l3_sys.c
@@ -1049,3 +1049,14 @@
sysfs_remove_group(&dev->kobj, &qeth_device_vipa_group);
sysfs_remove_group(&dev->kobj, &qeth_device_rxip_group);
}
+
+const struct attribute_group *qeth_l3_attr_groups[] = {
+ &qeth_device_attr_group,
+ &qeth_device_blkt_group,
+ /* l3 specific, see l3_{create,remove}_device_attributes(): */
+ &qeth_l3_device_attr_group,
+ &qeth_device_ipato_group,
+ &qeth_device_vipa_group,
+ &qeth_device_rxip_group,
+NULL,
+};
diff --git a/drivers/scsi/bnx2fc/bnx2fc_fcoe.c b/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
index 902722d..b025ee5 100644
--- a/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
+++ b/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
@@ -351,7 +351,7 @@
frag = &skb_shinfo(skb)->frags[skb_shinfo(skb)->nr_frags - 1];
cp = kmap_atomic(skb_frag_page(frag)) + frag->page_offset;
} else {
- cp = (struct fcoe_crc_eof *)skb_put(skb, tlen);
+ cp = skb_put(skb, tlen);
}
memset(cp, 0, sizeof(*cp));
diff --git a/drivers/scsi/cxgbi/cxgb3i/cxgb3i.c b/drivers/scsi/cxgbi/cxgb3i/cxgb3i.c
index 1880eb6..7b09e7dd 100644
--- a/drivers/scsi/cxgbi/cxgb3i/cxgb3i.c
+++ b/drivers/scsi/cxgbi/cxgb3i/cxgb3i.c
@@ -354,7 +354,7 @@
struct l2t_entry *l2t = csk->l2t;
skb_reset_transport_header(skb);
- req = (struct tx_data_wr *)__skb_push(skb, sizeof(*req));
+ req = __skb_push(skb, sizeof(*req));
req->wr_hi = htonl(V_WR_OP(FW_WROPCODE_OFLD_TX_DATA) |
(req_completion ? F_WR_COMPL : 0));
req->wr_lo = htonl(V_WR_TID(csk->tid));
diff --git a/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c b/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c
index 0aae094..5485d68 100644
--- a/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c
+++ b/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c
@@ -644,7 +644,7 @@
unsigned int wr_ulp_mode = 0, val;
bool imm = is_ofld_imm(skb);
- req = (struct fw_ofld_tx_data_wr *)__skb_push(skb, sizeof(*req));
+ req = __skb_push(skb, sizeof(*req));
if (imm) {
req->op_to_immdlen = htonl(FW_WR_OP_V(FW_OFLD_TX_DATA_WR) |
@@ -806,7 +806,7 @@
cxgbi_sock_get(csk);
csk->tid = tid;
- cxgb4_insert_tid(lldi->tids, csk, tid);
+ cxgb4_insert_tid(lldi->tids, csk, tid, csk->csk_family);
cxgbi_sock_set_flag(csk, CTPF_HAS_TID);
free_atid(csk);
@@ -956,7 +956,8 @@
if (status && status != CPL_ERR_TCAM_FULL &&
status != CPL_ERR_CONN_EXIST &&
status != CPL_ERR_ARP_MISS)
- cxgb4_remove_tid(lldi->tids, csk->port_id, GET_TID(rpl));
+ cxgb4_remove_tid(lldi->tids, csk->port_id, GET_TID(rpl),
+ csk->csk_family);
cxgbi_sock_get(csk);
spin_lock_bh(&csk->lock);
@@ -1590,7 +1591,8 @@
free_atid(csk);
else if (cxgbi_sock_flag(csk, CTPF_HAS_TID)) {
lldi = cxgbi_cdev_priv(csk->cdev);
- cxgb4_remove_tid(lldi->tids, 0, csk->tid);
+ cxgb4_remove_tid(lldi->tids, 0, csk->tid,
+ csk->csk_family);
cxgbi_sock_clear_flag(csk, CTPF_HAS_TID);
cxgbi_sock_put(csk);
}
diff --git a/drivers/scsi/fcoe/fcoe.c b/drivers/scsi/fcoe/fcoe.c
index 90939f6..539e23e 100644
--- a/drivers/scsi/fcoe/fcoe.c
+++ b/drivers/scsi/fcoe/fcoe.c
@@ -1543,7 +1543,7 @@
cp = kmap_atomic(skb_frag_page(frag))
+ frag->page_offset;
} else {
- cp = (struct fcoe_crc_eof *)skb_put(skb, tlen);
+ cp = skb_put(skb, tlen);
}
memset(cp, 0, sizeof(*cp));
diff --git a/drivers/scsi/fcoe/fcoe_ctlr.c b/drivers/scsi/fcoe/fcoe_ctlr.c
index 656463f..fff6f18 100644
--- a/drivers/scsi/fcoe/fcoe_ctlr.c
+++ b/drivers/scsi/fcoe/fcoe_ctlr.c
@@ -626,7 +626,7 @@
fh = (struct fc_frame_header *)skb->data;
op = *(u8 *)(fh + 1);
dlen = sizeof(struct fip_encaps) + skb->len; /* len before push */
- cap = (struct fip_encaps_head *)skb_push(skb, sizeof(*cap));
+ cap = skb_push(skb, sizeof(*cap));
memset(cap, 0, sizeof(*cap));
if (lport->point_to_multipoint) {
@@ -660,8 +660,7 @@
if (op != ELS_LS_RJT) {
dlen += sizeof(*mac);
- mac = (struct fip_mac_desc *)skb_put(skb, sizeof(*mac));
- memset(mac, 0, sizeof(*mac));
+ mac = skb_put_zero(skb, sizeof(*mac));
mac->fd_desc.fip_dtype = FIP_DT_MAC;
mac->fd_desc.fip_dlen = sizeof(*mac) / FIP_BPW;
if (dtype != FIP_DT_FLOGI && dtype != FIP_DT_FDISC) {
diff --git a/drivers/scsi/fnic/fnic_fcs.c b/drivers/scsi/fnic/fnic_fcs.c
index 245dcd9..e72beca 100644
--- a/drivers/scsi/fnic/fnic_fcs.c
+++ b/drivers/scsi/fnic/fnic_fcs.c
@@ -640,7 +640,7 @@
eh = (struct ethhdr *)skb->data;
if (eh->h_proto == htons(ETH_P_8021Q)) {
memmove((u8 *)eh + VLAN_HLEN, eh, ETH_ALEN * 2);
- eh = (struct ethhdr *)skb_pull(skb, VLAN_HLEN);
+ eh = skb_pull(skb, VLAN_HLEN);
skb_reset_mac_header(skb);
}
if (eh->h_proto == htons(ETH_P_FIP)) {
@@ -1000,8 +1000,7 @@
if (!fnic->vlan_hw_insert) {
eth_hdr = (struct ethhdr *)skb_mac_header(skb);
- vlan_hdr = (struct vlan_ethhdr *)skb_push(skb,
- sizeof(*vlan_hdr) - sizeof(*eth_hdr));
+ vlan_hdr = skb_push(skb, sizeof(*vlan_hdr) - sizeof(*eth_hdr));
memcpy(vlan_hdr, eth_hdr, 2 * ETH_ALEN);
vlan_hdr->h_vlan_proto = htons(ETH_P_8021Q);
vlan_hdr->h_vlan_encapsulated_proto = eth_hdr->h_proto;
@@ -1067,7 +1066,7 @@
if (!fnic->vlan_hw_insert) {
eth_hdr_len = sizeof(*vlan_hdr) + sizeof(*fcoe_hdr);
- vlan_hdr = (struct vlan_ethhdr *)skb_push(skb, eth_hdr_len);
+ vlan_hdr = skb_push(skb, eth_hdr_len);
eth_hdr = (struct ethhdr *)vlan_hdr;
vlan_hdr->h_vlan_proto = htons(ETH_P_8021Q);
vlan_hdr->h_vlan_encapsulated_proto = htons(ETH_P_FCOE);
@@ -1075,7 +1074,7 @@
fcoe_hdr = (struct fcoe_hdr *)(vlan_hdr + 1);
} else {
eth_hdr_len = sizeof(*eth_hdr) + sizeof(*fcoe_hdr);
- eth_hdr = (struct ethhdr *)skb_push(skb, eth_hdr_len);
+ eth_hdr = skb_push(skb, eth_hdr_len);
eth_hdr->h_proto = htons(ETH_P_FCOE);
fcoe_hdr = (struct fcoe_hdr *)(eth_hdr + 1);
}
diff --git a/drivers/scsi/libfc/fc_libfc.c b/drivers/scsi/libfc/fc_libfc.c
index d623d08..dbadbc8 100644
--- a/drivers/scsi/libfc/fc_libfc.c
+++ b/drivers/scsi/libfc/fc_libfc.c
@@ -178,7 +178,7 @@
fill = -fr_len(fp) & 3;
if (fill) {
/* TODO, this may be a problem with fragmented skb */
- memset(skb_put(fp_skb(fp), fill), 0, fill);
+ skb_put_zero(fp_skb(fp), fill);
f_ctl |= fill;
}
fr_eof(fp) = FC_EOF_T;
diff --git a/drivers/scsi/qedf/qedf_fip.c b/drivers/scsi/qedf/qedf_fip.c
index e10b91c..0d4bf70 100644
--- a/drivers/scsi/qedf/qedf_fip.c
+++ b/drivers/scsi/qedf/qedf_fip.c
@@ -125,8 +125,7 @@
sub = fiph->fip_subcode;
if (!qedf->vlan_hw_insert) {
- vlan_hdr = (struct vlan_ethhdr *)skb_push(skb, sizeof(*vlan_hdr)
- - sizeof(*eth_hdr));
+ vlan_hdr = skb_push(skb, sizeof(*vlan_hdr) - sizeof(*eth_hdr));
memcpy(vlan_hdr, eth_hdr, 2 * ETH_ALEN);
vlan_hdr->h_vlan_proto = htons(ETH_P_8021Q);
vlan_hdr->h_vlan_encapsulated_proto = eth_hdr->h_proto;
diff --git a/drivers/scsi/qedf/qedf_main.c b/drivers/scsi/qedf/qedf_main.c
index a5c9734..542a6e7 100644
--- a/drivers/scsi/qedf/qedf_main.c
+++ b/drivers/scsi/qedf/qedf_main.c
@@ -874,7 +874,7 @@
frag = &skb_shinfo(skb)->frags[skb_shinfo(skb)->nr_frags - 1];
cp = kmap_atomic(skb_frag_page(frag)) + frag->page_offset;
} else {
- cp = (struct fcoe_crc_eof *)skb_put(skb, tlen);
+ cp = skb_put(skb, tlen);
}
memset(cp, 0, sizeof(*cp));
@@ -2117,7 +2117,7 @@
/* Undo VLAN encapsulation */
if (eh->h_proto == htons(ETH_P_8021Q)) {
memmove((u8 *)eh + VLAN_HLEN, eh, ETH_ALEN * 2);
- eh = (struct ethhdr *)skb_pull(skb, VLAN_HLEN);
+ eh = skb_pull(skb, VLAN_HLEN);
skb_reset_mac_header(skb);
}
@@ -2954,7 +2954,7 @@
"WWPN=%016llx.\n", qedf->wwnn, qedf->wwpn);
sprintf(host_buf, "host_%d", host->host_no);
- qed_ops->common->set_id(qedf->cdev, host_buf, QEDF_VERSION);
+ qed_ops->common->set_name(qedf->cdev, host_buf);
/* Set xid max values */
diff --git a/drivers/scsi/qedi/qedi_fw.c b/drivers/scsi/qedi/qedi_fw.c
index 8bc7ee1..2ee92aa 100644
--- a/drivers/scsi/qedi/qedi_fw.c
+++ b/drivers/scsi/qedi/qedi_fw.c
@@ -2101,14 +2101,16 @@
/* Update header info */
SET_FIELD(cmd_pdu_header.flags_attr, ISCSI_CMD_HDR_ATTR,
ISCSI_ATTR_SIMPLE);
- if (sc->sc_data_direction == DMA_TO_DEVICE) {
- SET_FIELD(cmd_pdu_header.flags_attr,
- ISCSI_CMD_HDR_WRITE, 1);
- task_type = ISCSI_TASK_TYPE_INITIATOR_WRITE;
- } else {
- SET_FIELD(cmd_pdu_header.flags_attr,
- ISCSI_CMD_HDR_READ, 1);
- task_type = ISCSI_TASK_TYPE_INITIATOR_READ;
+ if (hdr->cdb[0] != TEST_UNIT_READY) {
+ if (sc->sc_data_direction == DMA_TO_DEVICE) {
+ SET_FIELD(cmd_pdu_header.flags_attr,
+ ISCSI_CMD_HDR_WRITE, 1);
+ task_type = ISCSI_TASK_TYPE_INITIATOR_WRITE;
+ } else {
+ SET_FIELD(cmd_pdu_header.flags_attr,
+ ISCSI_CMD_HDR_READ, 1);
+ task_type = ISCSI_TASK_TYPE_INITIATOR_READ;
+ }
}
cmd_pdu_header.lun.lo = be32_to_cpu(scsi_lun[0]);
@@ -2119,7 +2121,7 @@
cmd_pdu_header.expected_transfer_length = cpu_to_be32(hdr->data_length);
cmd_pdu_header.hdr_second_dword = ntoh24(hdr->dlength);
cmd_pdu_header.cmd_sn = be32_to_cpu(hdr->cmdsn);
- cmd_pdu_header.opcode = hdr->opcode;
+ cmd_pdu_header.hdr_first_byte = hdr->opcode;
qedi_cpy_scsi_cdb(sc, (u32 *)cmd_pdu_header.cdb);
/* Fill tx AHS and rx buffer */
diff --git a/drivers/scsi/qedi/qedi_fw_api.c b/drivers/scsi/qedi/qedi_fw_api.c
index fd354d4..7df32a6 100644
--- a/drivers/scsi/qedi/qedi_fw_api.c
+++ b/drivers/scsi/qedi/qedi_fw_api.c
@@ -578,7 +578,8 @@
(struct iscsi_common_hdr *)cmd_header,
tx_sgl_params, cmd_params,
dif_task_params);
- else if (GET_FIELD(cmd_header->flags_attr, ISCSI_CMD_HDR_READ))
+ else if (GET_FIELD(cmd_header->flags_attr, ISCSI_CMD_HDR_READ) ||
+ (task_params->rx_io_size == 0 && task_params->tx_io_size == 0))
return init_rw_iscsi_task(task_params,
ISCSI_TASK_TYPE_INITIATOR_READ,
conn_params,
diff --git a/drivers/scsi/qedi/qedi_iscsi.c b/drivers/scsi/qedi/qedi_iscsi.c
index 87f0af3..80edd28 100644
--- a/drivers/scsi/qedi/qedi_iscsi.c
+++ b/drivers/scsi/qedi/qedi_iscsi.c
@@ -1466,9 +1466,6 @@
{ ISCSI_CONN_ERROR_OUT_OF_SGES_ERROR,
"out of sge error"
},
- { ISCSI_CONN_ERROR_TCP_SEG_PROC_IP_OPTIONS_ERROR,
- "tcp seg ip options error"
- },
{ ISCSI_CONN_ERROR_TCP_IP_FRAGMENT_ERROR,
"tcp ip fragment error"
},
diff --git a/drivers/scsi/qedi/qedi_main.c b/drivers/scsi/qedi/qedi_main.c
index 09a2946..f468803 100644
--- a/drivers/scsi/qedi/qedi_main.c
+++ b/drivers/scsi/qedi/qedi_main.c
@@ -1844,7 +1844,7 @@
qedi->mac);
sprintf(host_buf, "host_%d", qedi->shost->host_no);
- qedi_ops->common->set_id(qedi->cdev, host_buf, QEDI_MODULE_VERSION);
+ qedi_ops->common->set_name(qedi->cdev, host_buf);
qedi_ops->register_ops(qedi->cdev, &qedi_cb_ops, qedi);
diff --git a/drivers/ssb/main.c b/drivers/ssb/main.c
index d1a7507..65420a9 100644
--- a/drivers/ssb/main.c
+++ b/drivers/ssb/main.c
@@ -480,7 +480,6 @@
devwrap = kzalloc(sizeof(*devwrap), GFP_KERNEL);
if (!devwrap) {
- ssb_err("Could not allocate device\n");
err = -ENOMEM;
goto error;
}
diff --git a/drivers/staging/gdm724x/gdm_lte.c b/drivers/staging/gdm724x/gdm_lte.c
index cf80998..9ab6ce2 100644
--- a/drivers/staging/gdm724x/gdm_lte.c
+++ b/drivers/staging/gdm724x/gdm_lte.c
@@ -161,12 +161,9 @@
return -ENOMEM;
skb_reserve(skb_out, NET_IP_ALIGN);
- memcpy(skb_put(skb_out, mac_header_len), mac_header_data,
- mac_header_len);
- memcpy(skb_put(skb_out, sizeof(struct arphdr)), arp_out,
- sizeof(struct arphdr));
- memcpy(skb_put(skb_out, sizeof(struct arpdata)), arp_data_out,
- sizeof(struct arpdata));
+ skb_put_data(skb_out, mac_header_data, mac_header_len);
+ skb_put_data(skb_out, arp_out, sizeof(struct arphdr));
+ skb_put_data(skb_out, arp_data_out, sizeof(struct arpdata));
skb_out->protocol = ((struct ethhdr *)mac_header_data)->h_proto;
skb_out->dev = skb_in->dev;
@@ -322,14 +319,10 @@
return -ENOMEM;
skb_reserve(skb_out, NET_IP_ALIGN);
- memcpy(skb_put(skb_out, mac_header_len), mac_header_data,
- mac_header_len);
- memcpy(skb_put(skb_out, sizeof(struct ipv6hdr)), &ipv6_out,
- sizeof(struct ipv6hdr));
- memcpy(skb_put(skb_out, sizeof(struct icmp6hdr)), &icmp6_out,
- sizeof(struct icmp6hdr));
- memcpy(skb_put(skb_out, sizeof(struct neighbour_advertisement)), &na,
- sizeof(struct neighbour_advertisement));
+ skb_put_data(skb_out, mac_header_data, mac_header_len);
+ skb_put_data(skb_out, &ipv6_out, sizeof(struct ipv6hdr));
+ skb_put_data(skb_out, &icmp6_out, sizeof(struct icmp6hdr));
+ skb_put_data(skb_out, &na, sizeof(struct neighbour_advertisement));
skb_out->protocol = ((struct ethhdr *)mac_header_data)->h_proto;
skb_out->dev = skb_in->dev;
@@ -669,8 +662,8 @@
return;
skb_reserve(skb, NET_IP_ALIGN);
- memcpy(skb_put(skb, mac_header_len), mac_header_data, mac_header_len);
- memcpy(skb_put(skb, len), buf, len);
+ skb_put_data(skb, mac_header_data, mac_header_len);
+ skb_put_data(skb, buf, len);
skb->protocol = ((struct ethhdr *)mac_header_data)->h_proto;
skb->dev = dev;
diff --git a/drivers/staging/ks7010/ks_hostif.c b/drivers/staging/ks7010/ks_hostif.c
index 49e9542..da801d3 100644
--- a/drivers/staging/ks7010/ks_hostif.c
+++ b/drivers/staging/ks7010/ks_hostif.c
@@ -466,12 +466,12 @@
DPRINTK(4, "SNAP, rx_ind_size = %d\n", rx_ind_size);
size = ETH_ALEN * 2;
- memcpy(skb_put(skb, size), priv->rxp, size);
+ skb_put_data(skb, priv->rxp, size);
/* (SNAP+UI..) skip */
size = rx_ind_size - (ETH_ALEN * 2);
- memcpy(skb_put(skb, size), ð_hdr->h_proto, size);
+ skb_put_data(skb, ð_hdr->h_proto, size);
aa1x_hdr = (struct ieee802_1x_hdr *)(priv->rxp + ETHER_HDR_SIZE);
break;
@@ -484,14 +484,13 @@
}
DPRINTK(3, "NETBEUI/NetBIOS rx_ind_size=%d\n", rx_ind_size);
- memcpy(skb_put(skb, 12), priv->rxp, 12); /* 8802/FDDI MAC copy */
+ skb_put_data(skb, priv->rxp, 12); /* 8802/FDDI MAC copy */
temp[0] = (((rx_ind_size - 12) >> 8) & 0xff); /* NETBEUI size add */
temp[1] = ((rx_ind_size - 12) & 0xff);
- memcpy(skb_put(skb, 2), temp, 2);
+ skb_put_data(skb, temp, 2);
- memcpy(skb_put(skb, rx_ind_size - 14), priv->rxp + 12,
- rx_ind_size - 14); /* copy after Type */
+ skb_put_data(skb, priv->rxp + 12, rx_ind_size - 14); /* copy after Type */
aa1x_hdr = (struct ieee802_1x_hdr *)(priv->rxp + 14);
break;
diff --git a/drivers/staging/most/aim-network/networking.c b/drivers/staging/most/aim-network/networking.c
index ce1764c..995674f 100644
--- a/drivers/staging/most/aim-network/networking.c
+++ b/drivers/staging/most/aim-network/networking.c
@@ -486,11 +486,11 @@
ether_addr_copy(skb_put(skb, ETH_ALEN), dev->dev_addr);
/* src */
- memcpy(skb_put(skb, 4), &zero, 4);
- memcpy(skb_put(skb, 2), buf + 5, 2);
+ skb_put_data(skb, &zero, 4);
+ skb_put_data(skb, buf + 5, 2);
/* eth type */
- memcpy(skb_put(skb, 2), buf + 10, 2);
+ skb_put_data(skb, buf + 10, 2);
buf += MDP_HDR_LEN;
len -= MDP_HDR_LEN;
@@ -499,7 +499,7 @@
len -= MEP_HDR_LEN;
}
- memcpy(skb_put(skb, len), buf, len);
+ skb_put_data(skb, buf, len);
skb->protocol = eth_type_trans(skb, dev);
skb_len = skb->len;
if (netif_rx(skb) == NET_RX_SUCCESS) {
diff --git a/drivers/staging/netlogic/xlr_net.c b/drivers/staging/netlogic/xlr_net.c
index 781ef62..e05ae46 100644
--- a/drivers/staging/netlogic/xlr_net.c
+++ b/drivers/staging/netlogic/xlr_net.c
@@ -179,7 +179,10 @@
if (!phydev)
return -ENODEV;
- return phy_ethtool_ksettings_get(phydev, ecmd);
+
+ phy_ethtool_ksettings_get(phydev, ecmd);
+
+ return 0;
}
static int xlr_set_link_ksettings(struct net_device *ndev,
diff --git a/drivers/staging/octeon/ethernet-rx.c b/drivers/staging/octeon/ethernet-rx.c
index 65a2856..72baede 100644
--- a/drivers/staging/octeon/ethernet-rx.c
+++ b/drivers/staging/octeon/ethernet-rx.c
@@ -287,8 +287,7 @@
else
ptr += 6;
}
- memcpy(skb_put(skb, work->word1.len), ptr,
- work->word1.len);
+ skb_put_data(skb, ptr, work->word1.len);
/* No packet buffers to free */
} else {
int segments = work->word2.s.bufs;
@@ -323,10 +322,9 @@
if (segment_size > len)
segment_size = len;
/* Copy the data into the packet */
- memcpy(skb_put(skb, segment_size),
- cvmx_phys_to_ptr(
- segment_ptr.s.addr),
- segment_size);
+ skb_put_data(skb,
+ cvmx_phys_to_ptr(segment_ptr.s.addr),
+ segment_size);
len -= segment_size;
segment_ptr = next_ptr;
}
diff --git a/drivers/staging/octeon/ethernet-tx.c b/drivers/staging/octeon/ethernet-tx.c
index ff4119e..31f3502 100644
--- a/drivers/staging/octeon/ethernet-tx.c
+++ b/drivers/staging/octeon/ethernet-tx.c
@@ -251,8 +251,7 @@
if ((skb_tail_pointer(skb) + add_bytes) <=
skb_end_pointer(skb))
- memset(__skb_put(skb, add_bytes), 0,
- add_bytes);
+ __skb_put_zero(skb, add_bytes);
}
}
}
diff --git a/drivers/staging/rtl8188eu/core/rtw_recv.c b/drivers/staging/rtl8188eu/core/rtw_recv.c
index c6c4404..afb9dad 100644
--- a/drivers/staging/rtl8188eu/core/rtw_recv.c
+++ b/drivers/staging/rtl8188eu/core/rtw_recv.c
@@ -1510,7 +1510,6 @@
u8 nr_subframes, i;
unsigned char *pdata;
struct rx_pkt_attrib *pattrib;
- unsigned char *data_ptr;
struct sk_buff *sub_skb, *subframes[MAX_SUBFRAME_COUNT];
struct recv_priv *precvpriv = &padapter->recvpriv;
struct __queue *pfree_recv_queue = &(precvpriv->free_recv_queue);
@@ -1544,8 +1543,7 @@
sub_skb = dev_alloc_skb(nSubframe_Length + 12);
if (sub_skb) {
skb_reserve(sub_skb, 12);
- data_ptr = (u8 *)skb_put(sub_skb, nSubframe_Length);
- memcpy(data_ptr, pdata, nSubframe_Length);
+ skb_put_data(sub_skb, pdata, nSubframe_Length);
} else {
sub_skb = skb_clone(prframe->pkt, GFP_ATOMIC);
if (sub_skb) {
diff --git a/drivers/staging/rtl8188eu/os_dep/mon.c b/drivers/staging/rtl8188eu/os_dep/mon.c
index 859d0d6..225c23f 100644
--- a/drivers/staging/rtl8188eu/os_dep/mon.c
+++ b/drivers/staging/rtl8188eu/os_dep/mon.c
@@ -53,7 +53,7 @@
skb = netdev_alloc_skb(dev, data_len);
if (!skb)
return;
- memcpy(skb_put(skb, data_len), data, data_len);
+ skb_put_data(skb, data, data_len);
/*
* Frame data is not encrypted. Strip off protection so
diff --git a/drivers/staging/rtl8192e/rtl819x_BAProc.c b/drivers/staging/rtl8192e/rtl819x_BAProc.c
index 1d39631..1720e1b 100644
--- a/drivers/staging/rtl8192e/rtl819x_BAProc.c
+++ b/drivers/staging/rtl8192e/rtl819x_BAProc.c
@@ -95,8 +95,7 @@
skb_reserve(skb, ieee->tx_headroom);
- BAReq = (struct rtllib_hdr_3addr *)skb_put(skb,
- sizeof(struct rtllib_hdr_3addr));
+ BAReq = skb_put(skb, sizeof(struct rtllib_hdr_3addr));
ether_addr_copy(BAReq->addr1, Dst);
ether_addr_copy(BAReq->addr2, ieee->dev->dev_addr);
@@ -104,7 +103,7 @@
ether_addr_copy(BAReq->addr3, ieee->current_network.bssid);
BAReq->frame_ctl = cpu_to_le16(RTLLIB_STYPE_MANAGE_ACT);
- tag = (u8 *)skb_put(skb, 9);
+ tag = skb_put(skb, 9);
*tag++ = ACT_CAT_BA;
*tag++ = type;
*tag++ = pBA->DialogToken;
@@ -159,15 +158,14 @@
skb_reserve(skb, ieee->tx_headroom);
- Delba = (struct rtllib_hdr_3addr *) skb_put(skb,
- sizeof(struct rtllib_hdr_3addr));
+ Delba = skb_put(skb, sizeof(struct rtllib_hdr_3addr));
ether_addr_copy(Delba->addr1, dst);
ether_addr_copy(Delba->addr2, ieee->dev->dev_addr);
ether_addr_copy(Delba->addr3, ieee->current_network.bssid);
Delba->frame_ctl = cpu_to_le16(RTLLIB_STYPE_MANAGE_ACT);
- tag = (u8 *)skb_put(skb, 6);
+ tag = skb_put(skb, 6);
*tag++ = ACT_CAT_BA;
*tag++ = ACT_DELBA;
diff --git a/drivers/staging/rtl8192e/rtllib_rx.c b/drivers/staging/rtl8192e/rtllib_rx.c
index 43a7774..03a81ba 100644
--- a/drivers/staging/rtl8192e/rtllib_rx.c
+++ b/drivers/staging/rtl8192e/rtllib_rx.c
@@ -782,7 +782,6 @@
u8 nPadding_Length = 0;
u16 SeqNum = 0;
struct sk_buff *sub_skb;
- u8 *data_ptr;
/* just for debug purpose */
SeqNum = WLAN_GET_SEQ_SEQ(le16_to_cpu(hdr->seq_ctl));
if ((RTLLIB_QOS_HAS_SEQ(fc)) &&
@@ -817,8 +816,7 @@
if (!sub_skb)
return 0;
skb_reserve(sub_skb, 12);
- data_ptr = (u8 *)skb_put(sub_skb, skb->len);
- memcpy(data_ptr, skb->data, skb->len);
+ skb_put_data(sub_skb, skb->data, skb->len);
sub_skb->dev = ieee->dev;
rxb->subframes[0] = sub_skb;
@@ -870,8 +868,7 @@
if (!sub_skb)
return 0;
skb_reserve(sub_skb, 12);
- data_ptr = (u8 *)skb_put(sub_skb, nSubframe_Length);
- memcpy(data_ptr, skb->data, nSubframe_Length);
+ skb_put_data(sub_skb, skb->data, nSubframe_Length);
sub_skb->dev = ieee->dev;
rxb->subframes[rxb->nr_subframes++] = sub_skb;
@@ -1141,13 +1138,12 @@
/* copy first fragment (including full headers) into
* beginning of the fragment cache skb
*/
- memcpy(skb_put(frag_skb, flen), skb->data, flen);
+ skb_put_data(frag_skb, skb->data, flen);
} else {
/* append frame payload to the end of the fragment
* cache skb
*/
- memcpy(skb_put(frag_skb, flen), skb->data + hdrlen,
- flen);
+ skb_put_data(frag_skb, skb->data + hdrlen, flen);
}
dev_kfree_skb_any(skb);
skb = NULL;
diff --git a/drivers/staging/rtl8192e/rtllib_softmac.c b/drivers/staging/rtl8192e/rtllib_softmac.c
index eeda17d..09d2c86 100644
--- a/drivers/staging/rtl8192e/rtllib_softmac.c
+++ b/drivers/staging/rtl8192e/rtllib_softmac.c
@@ -351,8 +351,7 @@
skb_reserve(skb, ieee->tx_headroom);
- req = (struct rtllib_probe_request *) skb_put(skb,
- sizeof(struct rtllib_probe_request));
+ req = skb_put(skb, sizeof(struct rtllib_probe_request));
req->header.frame_ctl = cpu_to_le16(RTLLIB_STYPE_PROBE_REQ);
req->header.duration_id = 0;
@@ -360,7 +359,7 @@
ether_addr_copy(req->header.addr2, ieee->dev->dev_addr);
eth_broadcast_addr(req->header.addr3);
- tag = (u8 *) skb_put(skb, len + 2 + rate_len);
+ tag = skb_put(skb, len + 2 + rate_len);
*tag++ = MFIE_TYPE_SSID;
*tag++ = len;
@@ -789,8 +788,7 @@
skb_reserve(skb, ieee->tx_headroom);
- auth = (struct rtllib_authentication *)
- skb_put(skb, sizeof(struct rtllib_authentication));
+ auth = skb_put(skb, sizeof(struct rtllib_authentication));
auth->header.frame_ctl = cpu_to_le16(RTLLIB_STYPE_AUTH);
if (challengelen)
@@ -889,8 +887,7 @@
skb_reserve(skb, ieee->tx_headroom);
- beacon_buf = (struct rtllib_probe_response *) skb_put(skb,
- (beacon_size - ieee->tx_headroom));
+ beacon_buf = skb_put(skb, (beacon_size - ieee->tx_headroom));
ether_addr_copy(beacon_buf->header.addr1, dest);
ether_addr_copy(beacon_buf->header.addr2, ieee->dev->dev_addr);
ether_addr_copy(beacon_buf->header.addr3, ieee->current_network.bssid);
@@ -984,8 +981,7 @@
skb_reserve(skb, ieee->tx_headroom);
- assoc = (struct rtllib_assoc_response_frame *)
- skb_put(skb, sizeof(struct rtllib_assoc_response_frame));
+ assoc = skb_put(skb, sizeof(struct rtllib_assoc_response_frame));
assoc->header.frame_ctl = cpu_to_le16(RTLLIB_STYPE_ASSOC_RESP);
ether_addr_copy(assoc->header.addr1, dest);
@@ -1016,7 +1012,7 @@
else
ieee->assoc_id++;
- tag = (u8 *) skb_put(skb, rate_len);
+ tag = skb_put(skb, rate_len);
rtllib_MFIE_Brate(ieee, &tag);
rtllib_MFIE_Grate(ieee, &tag);
@@ -1038,8 +1034,7 @@
skb_reserve(skb, ieee->tx_headroom);
- auth = (struct rtllib_authentication *)
- skb_put(skb, sizeof(struct rtllib_authentication));
+ auth = skb_put(skb, sizeof(struct rtllib_authentication));
auth->status = cpu_to_le16(status);
auth->transaction = cpu_to_le16(2);
@@ -1065,8 +1060,7 @@
skb_reserve(skb, ieee->tx_headroom);
- hdr = (struct rtllib_hdr_3addr *)skb_put(skb,
- sizeof(struct rtllib_hdr_3addr));
+ hdr = skb_put(skb, sizeof(struct rtllib_hdr_3addr));
ether_addr_copy(hdr->addr1, ieee->current_network.bssid);
ether_addr_copy(hdr->addr2, ieee->dev->dev_addr);
@@ -1092,8 +1086,7 @@
skb_reserve(skb, ieee->tx_headroom);
- hdr = (struct rtllib_pspoll_hdr *)skb_put(skb,
- sizeof(struct rtllib_pspoll_hdr));
+ hdr = skb_put(skb, sizeof(struct rtllib_pspoll_hdr));
ether_addr_copy(hdr->bssid, ieee->current_network.bssid);
ether_addr_copy(hdr->ta, ieee->dev->dev_addr);
@@ -1243,8 +1236,7 @@
skb_reserve(skb, ieee->tx_headroom);
- hdr = (struct rtllib_assoc_request_frame *)
- skb_put(skb, sizeof(struct rtllib_assoc_request_frame) + 2);
+ hdr = skb_put(skb, sizeof(struct rtllib_assoc_request_frame) + 2);
hdr->header.frame_ctl = cpu_to_le16(RTLLIB_STYPE_ASSOC_REQ);
@@ -1272,8 +1264,7 @@
hdr->info_element[0].id = MFIE_TYPE_SSID;
hdr->info_element[0].len = beacon->ssid_len;
- tag = skb_put(skb, beacon->ssid_len);
- memcpy(tag, beacon->ssid, beacon->ssid_len);
+ skb_put_data(skb, beacon->ssid, beacon->ssid_len);
tag = skb_put(skb, rate_len);
@@ -1349,8 +1340,7 @@
}
if (wpa_ie_len) {
- tag = skb_put(skb, ieee->wpa_ie_len);
- memcpy(tag, ieee->wpa_ie, ieee->wpa_ie_len);
+ skb_put_data(skb, ieee->wpa_ie, ieee->wpa_ie_len);
if (PMKCacheIdx >= 0) {
tag = skb_put(skb, 18);
@@ -1366,13 +1356,13 @@
}
if (wps_ie_len && ieee->wps_ie) {
- tag = skb_put(skb, wps_ie_len);
- memcpy(tag, ieee->wps_ie, wps_ie_len);
+ skb_put_data(skb, ieee->wps_ie, wps_ie_len);
}
- tag = skb_put(skb, turbo_info_len);
- if (turbo_info_len)
+ if (turbo_info_len) {
+ tag = skb_put(skb, turbo_info_len);
rtllib_TURBO_Info(ieee, &tag);
+ }
if (ieee->pHTInfo->bCurrentHTSupport && ieee->pHTInfo->bEnableHT) {
if (ieee->pHTInfo->ePeerHTSpecVer == HT_SPEC_VER_EWC) {
@@ -3417,8 +3407,7 @@
skb_reserve(skb, ieee->tx_headroom);
- disauth = (struct rtllib_disauth *) skb_put(skb,
- sizeof(struct rtllib_disauth));
+ disauth = skb_put(skb, sizeof(struct rtllib_disauth));
disauth->header.frame_ctl = cpu_to_le16(RTLLIB_STYPE_DEAUTH);
disauth->header.duration_id = 0;
@@ -3445,8 +3434,7 @@
skb_reserve(skb, ieee->tx_headroom);
- disass = (struct rtllib_disassoc *) skb_put(skb,
- sizeof(struct rtllib_disassoc));
+ disass = skb_put(skb, sizeof(struct rtllib_disassoc));
disass->header.frame_ctl = cpu_to_le16(RTLLIB_STYPE_DISASSOC);
disass->header.duration_id = 0;
diff --git a/drivers/staging/rtl8192e/rtllib_tx.c b/drivers/staging/rtl8192e/rtllib_tx.c
index 78a3ad5..fc88d47 100644
--- a/drivers/staging/rtl8192e/rtllib_tx.c
+++ b/drivers/staging/rtl8192e/rtllib_tx.c
@@ -624,8 +624,7 @@
txb->encrypted = 0;
txb->payload_size = cpu_to_le16(skb->len);
- memcpy(skb_put(txb->fragments[0], skb->len), skb->data,
- skb->len);
+ skb_put_data(txb->fragments[0], skb->data, skb->len);
goto success;
}
@@ -818,9 +817,7 @@
} else {
tcb_desc->bHwSec = 0;
}
- frag_hdr = (struct rtllib_hdr_3addrqos *)
- skb_put(skb_frag, hdr_len);
- memcpy(frag_hdr, &header, hdr_len);
+ frag_hdr = skb_put_data(skb_frag, &header, hdr_len);
/* If this is not the last fragment, then add the
* MOREFRAGS bit to the frame control
@@ -852,7 +849,7 @@
bytes -= SNAP_SIZE + sizeof(u16);
}
- memcpy(skb_put(skb_frag, bytes), skb->data, bytes);
+ skb_put_data(skb_frag, skb->data, bytes);
/* Advance the SKB... */
skb_pull(skb, bytes);
@@ -895,8 +892,7 @@
txb->encrypted = 0;
txb->payload_size = cpu_to_le16(skb->len);
- memcpy(skb_put(txb->fragments[0], skb->len), skb->data,
- skb->len);
+ skb_put_data(txb->fragments[0], skb->data, skb->len);
}
success:
diff --git a/drivers/staging/rtl8192u/ieee80211/ieee80211_rx.c b/drivers/staging/rtl8192u/ieee80211/ieee80211_rx.c
index 7a31510..a4aedb4 100644
--- a/drivers/staging/rtl8192u/ieee80211/ieee80211_rx.c
+++ b/drivers/staging/rtl8192u/ieee80211/ieee80211_rx.c
@@ -780,7 +780,6 @@
u16 SeqNum=0;
struct sk_buff *sub_skb;
- u8 *data_ptr;
/* just for debug purpose */
SeqNum = WLAN_GET_SEQ_SEQ(le16_to_cpu(hdr->seq_ctl));
@@ -848,8 +847,7 @@
if (!sub_skb)
return 0;
skb_reserve(sub_skb, 12);
- data_ptr = (u8 *)skb_put(sub_skb, nSubframe_Length);
- memcpy(data_ptr, skb->data, nSubframe_Length);
+ skb_put_data(sub_skb, skb->data, nSubframe_Length);
#endif
rxb->subframes[rxb->nr_subframes++] = sub_skb;
if (rxb->nr_subframes >= MAX_SUBFRAME_COUNT) {
@@ -1180,12 +1178,11 @@
if (frag == 0) {
/* copy first fragment (including full headers) into
* beginning of the fragment cache skb */
- memcpy(skb_put(frag_skb, flen), skb->data, flen);
+ skb_put_data(frag_skb, skb->data, flen);
} else {
/* append frame payload to the end of the fragment
* cache skb */
- memcpy(skb_put(frag_skb, flen), skb->data + hdrlen,
- flen);
+ skb_put_data(frag_skb, skb->data + hdrlen, flen);
}
dev_kfree_skb_any(skb);
skb = NULL;
diff --git a/drivers/staging/rtl8192u/ieee80211/ieee80211_softmac.c b/drivers/staging/rtl8192u/ieee80211/ieee80211_softmac.c
index 14aea26..fe6f38b 100644
--- a/drivers/staging/rtl8192u/ieee80211/ieee80211_softmac.c
+++ b/drivers/staging/rtl8192u/ieee80211/ieee80211_softmac.c
@@ -341,7 +341,7 @@
skb_reserve(skb, ieee->tx_headroom);
- req = (struct ieee80211_probe_request *) skb_put(skb,sizeof(struct ieee80211_probe_request));
+ req = skb_put(skb, sizeof(struct ieee80211_probe_request));
req->header.frame_ctl = cpu_to_le16(IEEE80211_STYPE_PROBE_REQ);
req->header.duration_id = 0; /* FIXME: is this OK? */
@@ -349,7 +349,7 @@
memcpy(req->header.addr2, ieee->dev->dev_addr, ETH_ALEN);
eth_broadcast_addr(req->header.addr3);
- tag = (u8 *) skb_put(skb,len+2+rate_len);
+ tag = skb_put(skb, len + 2 + rate_len);
*tag++ = MFIE_TYPE_SSID;
*tag++ = len;
@@ -659,8 +659,7 @@
if (!skb) return NULL;
skb_reserve(skb, ieee->tx_headroom);
- auth = (struct ieee80211_authentication *)
- skb_put(skb, sizeof(struct ieee80211_authentication));
+ auth = skb_put(skb, sizeof(struct ieee80211_authentication));
if (challengelen)
auth->header.frame_ctl = cpu_to_le16(IEEE80211_STYPE_AUTH
@@ -768,7 +767,7 @@
if (!skb)
return NULL;
skb_reserve(skb, ieee->tx_headroom);
- beacon_buf = (struct ieee80211_probe_response *) skb_put(skb, (beacon_size - ieee->tx_headroom));
+ beacon_buf = skb_put(skb, (beacon_size - ieee->tx_headroom));
memcpy (beacon_buf->header.addr1, dest,ETH_ALEN);
memcpy (beacon_buf->header.addr2, ieee->dev->dev_addr, ETH_ALEN);
memcpy (beacon_buf->header.addr3, ieee->current_network.bssid, ETH_ALEN);
@@ -864,8 +863,7 @@
skb_reserve(skb, ieee->tx_headroom);
- assoc = (struct ieee80211_assoc_response_frame *)
- skb_put(skb, sizeof(struct ieee80211_assoc_response_frame));
+ assoc = skb_put(skb, sizeof(struct ieee80211_assoc_response_frame));
assoc->header.frame_ctl = cpu_to_le16(IEEE80211_STYPE_ASSOC_RESP);
memcpy(assoc->header.addr1, dest,ETH_ALEN);
@@ -892,7 +890,7 @@
if (ieee->assoc_id == 0x2007) ieee->assoc_id=0;
else ieee->assoc_id++;
- tag = (u8 *) skb_put(skb, rate_len);
+ tag = skb_put(skb, rate_len);
ieee80211_MFIE_Brate(ieee, &tag);
ieee80211_MFIE_Grate(ieee, &tag);
@@ -940,7 +938,7 @@
if (!skb)
return NULL;
- hdr = (struct rtl_80211_hdr_3addr *)skb_put(skb,sizeof(struct rtl_80211_hdr_3addr));
+ hdr = skb_put(skb, sizeof(struct rtl_80211_hdr_3addr));
memcpy(hdr->addr1, ieee->current_network.bssid, ETH_ALEN);
memcpy(hdr->addr2, ieee->dev->dev_addr, ETH_ALEN);
@@ -1086,8 +1084,7 @@
skb_reserve(skb, ieee->tx_headroom);
- hdr = (struct ieee80211_assoc_request_frame *)
- skb_put(skb, sizeof(struct ieee80211_assoc_request_frame)+2);
+ hdr = skb_put(skb, sizeof(struct ieee80211_assoc_request_frame) + 2);
hdr->header.frame_ctl = IEEE80211_STYPE_ASSOC_REQ;
@@ -1115,8 +1112,7 @@
hdr->info_element[0].id = MFIE_TYPE_SSID;
hdr->info_element[0].len = beacon->ssid_len;
- tag = skb_put(skb, beacon->ssid_len);
- memcpy(tag, beacon->ssid, beacon->ssid_len);
+ skb_put_data(skb, beacon->ssid, beacon->ssid_len);
tag = skb_put(skb, rate_len);
@@ -1188,18 +1184,17 @@
//choose what wpa_supplicant gives to associate.
- tag = skb_put(skb, wpa_ie_len);
if (wpa_ie_len) {
- memcpy(tag, ieee->wpa_ie, ieee->wpa_ie_len);
+ skb_put_data(skb, ieee->wpa_ie, wpa_ie_len);
}
- tag = skb_put(skb, wmm_info_len);
if (wmm_info_len) {
- ieee80211_WMM_Info(ieee, &tag);
+ tag = skb_put(skb, wmm_info_len);
+ ieee80211_WMM_Info(ieee, &tag);
}
#ifdef THOMAS_TURBO
- tag = skb_put(skb, turbo_info_len);
if (turbo_info_len) {
+ tag = skb_put(skb, turbo_info_len);
ieee80211_TURBO_Info(ieee, &tag);
}
#endif
@@ -3111,7 +3106,7 @@
if (!skb)
return NULL;
- disass = (struct ieee80211_disassoc *) skb_put(skb, sizeof(struct ieee80211_disassoc));
+ disass = skb_put(skb, sizeof(struct ieee80211_disassoc));
disass->header.frame_ctl = cpu_to_le16(IEEE80211_STYPE_DISASSOC);
disass->header.duration_id = 0;
diff --git a/drivers/staging/rtl8192u/ieee80211/ieee80211_tx.c b/drivers/staging/rtl8192u/ieee80211/ieee80211_tx.c
index bdb96a4..f58971a 100644
--- a/drivers/staging/rtl8192u/ieee80211/ieee80211_tx.c
+++ b/drivers/staging/rtl8192u/ieee80211/ieee80211_tx.c
@@ -794,8 +794,7 @@
{
tcb_desc->bHwSec = 0;
}
- frag_hdr = (struct rtl_80211_hdr_3addrqos *)skb_put(skb_frag, hdr_len);
- memcpy(frag_hdr, &header, hdr_len);
+ frag_hdr = skb_put_data(skb_frag, &header, hdr_len);
/* If this is not the last fragment, then add the MOREFRAGS
* bit to the frame control
@@ -826,7 +825,7 @@
bytes -= SNAP_SIZE + sizeof(u16);
}
- memcpy(skb_put(skb_frag, bytes), skb->data, bytes);
+ skb_put_data(skb_frag, skb->data, bytes);
/* Advance the SKB... */
skb_pull(skb, bytes);
@@ -869,7 +868,7 @@
txb->encrypted = 0;
txb->payload_size = __cpu_to_le16(skb->len);
- memcpy(skb_put(txb->fragments[0],skb->len), skb->data, skb->len);
+ skb_put_data(txb->fragments[0], skb->data, skb->len);
}
success:
diff --git a/drivers/staging/rtl8192u/ieee80211/rtl819x_BAProc.c b/drivers/staging/rtl8192u/ieee80211/rtl819x_BAProc.c
index e82b507..8aa38dc 100644
--- a/drivers/staging/rtl8192u/ieee80211/rtl819x_BAProc.c
+++ b/drivers/staging/rtl8192u/ieee80211/rtl819x_BAProc.c
@@ -125,7 +125,7 @@
memset(skb->data, 0, sizeof( struct rtl_80211_hdr_3addr)); //I wonder whether it's necessary. Apparently kernel will not do it when alloc a skb.
skb_reserve(skb, ieee->tx_headroom);
- BAReq = ( struct rtl_80211_hdr_3addr *) skb_put(skb,sizeof( struct rtl_80211_hdr_3addr));
+ BAReq = skb_put(skb, sizeof(struct rtl_80211_hdr_3addr));
memcpy(BAReq->addr1, Dst, ETH_ALEN);
memcpy(BAReq->addr2, ieee->dev->dev_addr, ETH_ALEN);
@@ -135,7 +135,7 @@
BAReq->frame_ctl = cpu_to_le16(IEEE80211_STYPE_MANAGE_ACT); //action frame
//tag += sizeof( struct rtl_80211_hdr_3addr); //move to action field
- tag = (u8 *)skb_put(skb, 9);
+ tag = skb_put(skb, 9);
*tag ++= ACT_CAT_BA;
*tag ++= type;
// Dialog Token
@@ -209,14 +209,14 @@
// memset(skb->data, 0, len+sizeof( struct rtl_80211_hdr_3addr));
skb_reserve(skb, ieee->tx_headroom);
- Delba = ( struct rtl_80211_hdr_3addr *) skb_put(skb,sizeof( struct rtl_80211_hdr_3addr));
+ Delba = skb_put(skb, sizeof(struct rtl_80211_hdr_3addr));
memcpy(Delba->addr1, dst, ETH_ALEN);
memcpy(Delba->addr2, ieee->dev->dev_addr, ETH_ALEN);
memcpy(Delba->addr3, ieee->current_network.bssid, ETH_ALEN);
Delba->frame_ctl = cpu_to_le16(IEEE80211_STYPE_MANAGE_ACT); //action frame
- tag = (u8 *)skb_put(skb, 6);
+ tag = skb_put(skb, 6);
*tag ++= ACT_CAT_BA;
*tag ++= ACT_DELBA;
diff --git a/drivers/staging/rtl8192u/r819xU_cmdpkt.c b/drivers/staging/rtl8192u/r819xU_cmdpkt.c
index bb6d8bd..87ab3ba 100644
--- a/drivers/staging/rtl8192u/r819xU_cmdpkt.c
+++ b/drivers/staging/rtl8192u/r819xU_cmdpkt.c
@@ -31,7 +31,6 @@
struct r8192_priv *priv = ieee80211_priv(dev);
struct sk_buff *skb;
struct cb_desc *tcb_desc;
- unsigned char *ptr_buf;
/* Get TCB and local buffer from common pool.
* (It is shared by CmdQ, MgntQ, and USB coalesce DataQ)
@@ -45,8 +44,7 @@
tcb_desc->bCmdOrInit = DESC_PACKET_TYPE_NORMAL;
tcb_desc->bLastIniPkt = 0;
skb_reserve(skb, USB_HWDESC_HEADER_LEN);
- ptr_buf = skb_put(skb, DataLen);
- memcpy(ptr_buf, pData, DataLen);
+ skb_put_data(skb, pData, DataLen);
tcb_desc->txbuf_size = (u16)DataLen;
if (!priv->ieee80211->check_nic_enough_desc(dev, tcb_desc->queue_index) ||
diff --git a/drivers/staging/rtl8712/rtl8712_recv.c b/drivers/staging/rtl8712/rtl8712_recv.c
index 266ffef..ea3eb94 100644
--- a/drivers/staging/rtl8712/rtl8712_recv.c
+++ b/drivers/staging/rtl8712/rtl8712_recv.c
@@ -340,7 +340,7 @@
int a_len, padding_len;
u16 eth_type, nSubframe_Length;
u8 nr_subframes, i;
- unsigned char *data_ptr, *pdata;
+ unsigned char *pdata;
struct rx_pkt_attrib *pattrib;
_pkt *sub_skb, *subframes[MAX_SUBFRAME_COUNT];
struct recv_priv *precvpriv = &padapter->recvpriv;
@@ -372,8 +372,7 @@
if (!sub_skb)
break;
skb_reserve(sub_skb, 12);
- data_ptr = (u8 *)skb_put(sub_skb, nSubframe_Length);
- memcpy(data_ptr, pdata, nSubframe_Length);
+ skb_put_data(sub_skb, pdata, nSubframe_Length);
subframes[nr_subframes++] = sub_skb;
if (nr_subframes >= MAX_SUBFRAME_COUNT) {
netdev_warn(padapter->pnetdev, "r8712u: ParseSubframe(): Too many Subframes! Packets dropped!\n");
diff --git a/drivers/staging/rtl8723bs/os_dep/recv_linux.c b/drivers/staging/rtl8723bs/os_dep/recv_linux.c
index e731ab4e..f42e000 100644
--- a/drivers/staging/rtl8723bs/os_dep/recv_linux.c
+++ b/drivers/staging/rtl8723bs/os_dep/recv_linux.c
@@ -72,7 +72,6 @@
_pkt *rtw_os_alloc_msdu_pkt(union recv_frame *prframe, u16 nSubframe_Length, u8 *pdata)
{
u16 eth_type;
- u8 *data_ptr;
_pkt *sub_skb;
struct rx_pkt_attrib *pattrib;
@@ -82,8 +81,7 @@
if (sub_skb)
{
skb_reserve(sub_skb, 12);
- data_ptr = (u8 *)skb_put(sub_skb, nSubframe_Length);
- memcpy(data_ptr, (pdata + ETH_HLEN), nSubframe_Length);
+ skb_put_data(sub_skb, (pdata + ETH_HLEN), nSubframe_Length);
}
else
{
diff --git a/drivers/staging/wilc1000/linux_mon.c b/drivers/staging/wilc1000/linux_mon.c
index c9782d4..01efa80 100644
--- a/drivers/staging/wilc1000/linux_mon.c
+++ b/drivers/staging/wilc1000/linux_mon.c
@@ -72,9 +72,9 @@
if (!skb)
return;
- memcpy(skb_put(skb, size), buff, size);
+ skb_put_data(skb, buff, size);
- cb_hdr = (struct wilc_wfi_radiotap_cb_hdr *)skb_push(skb, sizeof(*cb_hdr));
+ cb_hdr = skb_push(skb, sizeof(*cb_hdr));
memset(cb_hdr, 0, sizeof(struct wilc_wfi_radiotap_cb_hdr));
cb_hdr->hdr.it_version = 0; /* PKTHDR_RADIOTAP_VERSION; */
@@ -100,8 +100,8 @@
if (!skb)
return;
- memcpy(skb_put(skb, size), buff, size);
- hdr = (struct wilc_wfi_radiotap_hdr *)skb_push(skb, sizeof(*hdr));
+ skb_put_data(skb, buff, size);
+ hdr = skb_push(skb, sizeof(*hdr));
memset(hdr, 0, sizeof(struct wilc_wfi_radiotap_hdr));
hdr->hdr.it_version = 0; /* PKTHDR_RADIOTAP_VERSION; */
hdr->hdr.it_len = cpu_to_le16(sizeof(struct wilc_wfi_radiotap_hdr));
@@ -200,9 +200,9 @@
if (!skb2)
return -ENOMEM;
- memcpy(skb_put(skb2, skb->len), skb->data, skb->len);
+ skb_put_data(skb2, skb->data, skb->len);
- cb_hdr = (struct wilc_wfi_radiotap_cb_hdr *)skb_push(skb2, sizeof(*cb_hdr));
+ cb_hdr = skb_push(skb2, sizeof(*cb_hdr));
memset(cb_hdr, 0, sizeof(struct wilc_wfi_radiotap_cb_hdr));
cb_hdr->hdr.it_version = 0; /* PKTHDR_RADIOTAP_VERSION; */
diff --git a/drivers/staging/wilc1000/linux_wlan.c b/drivers/staging/wilc1000/linux_wlan.c
index d6d8034..f36598a8 100644
--- a/drivers/staging/wilc1000/linux_wlan.c
+++ b/drivers/staging/wilc1000/linux_wlan.c
@@ -1160,7 +1160,7 @@
skb->dev = wilc_netdev;
- memcpy(skb_put(skb, frame_len), buff_to_send, frame_len);
+ skb_put_data(skb, buff_to_send, frame_len);
skb->protocol = eth_type_trans(skb, wilc_netdev);
vif->netstats.rx_packets++;
diff --git a/drivers/staging/wlan-ng/hfa384x_usb.c b/drivers/staging/wlan-ng/hfa384x_usb.c
index a812e55..83ea8ab 100644
--- a/drivers/staging/wlan-ng/hfa384x_usb.c
+++ b/drivers/staging/wlan-ng/hfa384x_usb.c
@@ -3530,13 +3530,11 @@
/* Copy the 802.11 header to the skb
* (ctl frames may be less than a full header)
*/
- datap = skb_put(skb, hdrlen);
- memcpy(datap, &rxdesc->frame_control, hdrlen);
+ skb_put_data(skb, &rxdesc->frame_control, hdrlen);
/* If any, copy the data from the card to the skb */
if (datalen > 0) {
- datap = skb_put(skb, datalen);
- memcpy(datap, rxfrm->data, datalen);
+ datap = skb_put_data(skb, rxfrm->data, datalen);
/* check for unencrypted stuff if WEP bit set. */
if (*(datap - hdrlen + 1) & 0x40) /* wep set */
diff --git a/drivers/staging/wlan-ng/p80211conv.c b/drivers/staging/wlan-ng/p80211conv.c
index a062e80..fc8ad33 100644
--- a/drivers/staging/wlan-ng/p80211conv.c
+++ b/drivers/staging/wlan-ng/p80211conv.c
@@ -148,9 +148,7 @@
skb_pull(skb, ETH_HLEN);
/* tack on SNAP */
- e_snap =
- (struct wlan_snap *)skb_push(skb,
- sizeof(struct wlan_snap));
+ e_snap = skb_push(skb, sizeof(struct wlan_snap));
e_snap->type = htons(proto);
if (ethconv == WLAN_ETHCONV_8021h &&
p80211_stt_findproto(proto)) {
@@ -162,9 +160,7 @@
}
/* tack on llc */
- e_llc =
- (struct wlan_llc *)skb_push(skb,
- sizeof(struct wlan_llc));
+ e_llc = skb_push(skb, sizeof(struct wlan_llc));
e_llc->dsap = 0xAA; /* SNAP, see IEEE 802 */
e_llc->ssap = 0xAA;
e_llc->ctl = 0x03;
@@ -407,7 +403,7 @@
skb_pull(skb, payload_offset);
/* create 802.3 header at beginning of skb. */
- e_hdr = (struct wlan_ethhdr *)skb_push(skb, ETH_HLEN);
+ e_hdr = skb_push(skb, ETH_HLEN);
ether_addr_copy(e_hdr->daddr, daddr);
ether_addr_copy(e_hdr->saddr, saddr);
e_hdr->type = htons(payload_length);
@@ -448,7 +444,7 @@
skb_pull(skb, sizeof(struct wlan_snap));
/* create 802.3 header at beginning of skb. */
- e_hdr = (struct wlan_ethhdr *)skb_push(skb, ETH_HLEN);
+ e_hdr = skb_push(skb, ETH_HLEN);
e_hdr->type = e_snap->type;
ether_addr_copy(e_hdr->daddr, daddr);
ether_addr_copy(e_hdr->saddr, saddr);
@@ -475,7 +471,7 @@
skb_pull(skb, payload_offset);
/* create 802.3 header at beginning of skb. */
- e_hdr = (struct wlan_ethhdr *)skb_push(skb, ETH_HLEN);
+ e_hdr = skb_push(skb, ETH_HLEN);
ether_addr_copy(e_hdr->daddr, daddr);
ether_addr_copy(e_hdr->saddr, saddr);
e_hdr->type = htons(payload_length);
diff --git a/drivers/target/iscsi/cxgbit/cxgbit_cm.c b/drivers/target/iscsi/cxgbit/cxgbit_cm.c
index 37a0518..e583dd8 100644
--- a/drivers/target/iscsi/cxgbit/cxgbit_cm.c
+++ b/drivers/target/iscsi/cxgbit/cxgbit_cm.c
@@ -752,7 +752,8 @@
&sin6->sin6_addr.s6_addr, 1);
}
- cxgb4_remove_tid(csk->com.cdev->lldi.tids, 0, csk->tid);
+ cxgb4_remove_tid(csk->com.cdev->lldi.tids, 0, csk->tid,
+ csk->com.local_addr.ss_family);
dst_release(csk->dst);
cxgb4_l2t_release(csk->l2t);
@@ -1084,8 +1085,7 @@
return;
}
- rpl5 = (struct cpl_t5_pass_accept_rpl *)__skb_put(skb, len);
- memset(rpl5, 0, len);
+ rpl5 = __skb_put_zero(skb, len);
INIT_TP_WR(rpl5, csk->tid);
OPCODE_TID(rpl5) = cpu_to_be32(MK_OPCODE_TID(CPL_PASS_ACCEPT_RPL,
@@ -1313,8 +1313,7 @@
spin_lock(&cdev->cskq.lock);
list_add_tail(&csk->list, &cdev->cskq.list);
spin_unlock(&cdev->cskq.lock);
-
- cxgb4_insert_tid(t, csk, tid);
+ cxgb4_insert_tid(t, csk, tid, csk->com.local_addr.ss_family);
cxgbit_pass_accept_rpl(csk, req);
goto rel_skb;
@@ -1367,8 +1366,7 @@
flowclen16 = cxgbit_tx_flowc_wr_credits(csk, &nparams, &flowclen);
skb = __skb_dequeue(&csk->skbq);
- flowc = (struct fw_flowc_wr *)__skb_put(skb, flowclen);
- memset(flowc, 0, flowclen);
+ flowc = __skb_put_zero(skb, flowclen);
flowc->op_to_nparams = cpu_to_be32(FW_WR_OP_V(FW_FLOWC_WR) |
FW_FLOWC_WR_NPARAMS_V(nparams));
@@ -1439,8 +1437,7 @@
return -ENOMEM;
/* set up ulp submode */
- req = (struct cpl_set_tcb_field *)__skb_put(skb, len);
- memset(req, 0, len);
+ req = __skb_put_zero(skb, len);
INIT_TP_WR(req, csk->tid);
OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, csk->tid));
@@ -1476,8 +1473,7 @@
if (!skb)
return -ENOMEM;
- req = (struct cpl_set_tcb_field *)__skb_put(skb, len);
- memset(req, 0, len);
+ req = __skb_put_zero(skb, len);
INIT_TP_WR(req, csk->tid);
OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, csk->tid));
diff --git a/drivers/target/iscsi/cxgbit/cxgbit_ddp.c b/drivers/target/iscsi/cxgbit/cxgbit_ddp.c
index 5d78bdb..5fdb57c 100644
--- a/drivers/target/iscsi/cxgbit/cxgbit_ddp.c
+++ b/drivers/target/iscsi/cxgbit/cxgbit_ddp.c
@@ -79,7 +79,7 @@
if (!skb)
return NULL;
- req = (struct ulp_mem_io *)__skb_put(skb, wr_len);
+ req = __skb_put(skb, wr_len);
INIT_ULPTX_WR(req, wr_len, 0, tid);
req->wr.wr_hi = htonl(FW_WR_OP_V(FW_ULPTX_WR) |
FW_WR_ATOMIC_V(0));
diff --git a/drivers/target/iscsi/cxgbit/cxgbit_target.c b/drivers/target/iscsi/cxgbit/cxgbit_target.c
index bdcc8b4..dda13f1 100644
--- a/drivers/target/iscsi/cxgbit/cxgbit_target.c
+++ b/drivers/target/iscsi/cxgbit/cxgbit_target.c
@@ -136,7 +136,7 @@
unsigned int fslice = !!(iso_info->flags & CXGBIT_ISO_FSLICE);
unsigned int lslice = !!(iso_info->flags & CXGBIT_ISO_LSLICE);
- cpl = (struct cpl_tx_data_iso *)__skb_push(skb, sizeof(*cpl));
+ cpl = __skb_push(skb, sizeof(*cpl));
cpl->op_to_scsi = htonl(CPL_TX_DATA_ISO_OP_V(CPL_TX_DATA_ISO) |
CPL_TX_DATA_ISO_FIRST_V(fslice) |
@@ -183,8 +183,7 @@
if (cxgbit_is_ofld_imm(skb))
immlen += dlen;
- req = (struct fw_ofld_tx_data_wr *)__skb_push(skb,
- hdr_size);
+ req = __skb_push(skb, hdr_size);
req->op_to_immdlen = cpu_to_be32(FW_WR_OP_V(opcode) |
FW_WR_COMPL_V(compl) |
FW_WR_IMMDLEN_V(immlen));
diff --git a/drivers/tty/ipwireless/network.c b/drivers/tty/ipwireless/network.c
index c0dfb64..c2f9a32 100644
--- a/drivers/tty/ipwireless/network.c
+++ b/drivers/tty/ipwireless/network.c
@@ -355,7 +355,7 @@
if (skb == NULL)
return NULL;
skb_reserve(skb, 2);
- memcpy(skb_put(skb, length), data, length);
+ skb_put_data(skb, data, length);
return skb;
}
diff --git a/drivers/tty/n_gsm.c b/drivers/tty/n_gsm.c
index 2667a20..da830f8 100644
--- a/drivers/tty/n_gsm.c
+++ b/drivers/tty/n_gsm.c
@@ -2688,7 +2688,7 @@
return;
}
skb_reserve(skb, NET_IP_ALIGN);
- memcpy(skb_put(skb, size), in_buf, size);
+ skb_put_data(skb, in_buf, size);
skb->dev = net;
skb->protocol = htons(ETH_P_IP);
diff --git a/drivers/tty/synclink.c b/drivers/tty/synclink.c
index a2c308f..3fafc5a 100644
--- a/drivers/tty/synclink.c
+++ b/drivers/tty/synclink.c
@@ -7960,7 +7960,7 @@
return;
}
- memcpy(skb_put(skb, size), buf, size);
+ skb_put_data(skb, buf, size);
skb->protocol = hdlc_type_trans(skb, dev);
diff --git a/drivers/tty/synclink_gt.c b/drivers/tty/synclink_gt.c
index 31885f2..7e947ec 100644
--- a/drivers/tty/synclink_gt.c
+++ b/drivers/tty/synclink_gt.c
@@ -1755,7 +1755,7 @@
return;
}
- memcpy(skb_put(skb, size), buf, size);
+ skb_put_data(skb, buf, size);
skb->protocol = hdlc_type_trans(skb, dev);
diff --git a/drivers/tty/synclinkmp.c b/drivers/tty/synclinkmp.c
index 51e8846..9b4fb02 100644
--- a/drivers/tty/synclinkmp.c
+++ b/drivers/tty/synclinkmp.c
@@ -1874,7 +1874,7 @@
return;
}
- memcpy(skb_put(skb, size), buf, size);
+ skb_put_data(skb, buf, size);
skb->protocol = hdlc_type_trans(skb, dev);
diff --git a/drivers/usb/gadget/function/f_ncm.c b/drivers/usb/gadget/function/f_ncm.c
index 864819f..24e34cf 100644
--- a/drivers/usb/gadget/function/f_ncm.c
+++ b/drivers/usb/gadget/function/f_ncm.c
@@ -1004,18 +1004,15 @@
}
/* Insert NDP alignment. */
- ntb_iter = (void *) skb_put(skb2, ndp_pad);
- memset(ntb_iter, 0, ndp_pad);
+ skb_put_zero(skb2, ndp_pad);
/* Copy NTB across. */
- ntb_iter = (void *) skb_put(skb2, ncm->skb_tx_ndp->len);
- memcpy(ntb_iter, ncm->skb_tx_ndp->data, ncm->skb_tx_ndp->len);
+ skb_put_data(skb2, ncm->skb_tx_ndp->data, ncm->skb_tx_ndp->len);
dev_consume_skb_any(ncm->skb_tx_ndp);
ncm->skb_tx_ndp = NULL;
/* Insert zero'd datagram. */
- ntb_iter = (void *) skb_put(skb2, dgram_idx_len);
- memset(ntb_iter, 0, dgram_idx_len);
+ skb_put_zero(skb2, dgram_idx_len);
return skb2;
}
@@ -1049,7 +1046,7 @@
crc = ~crc32_le(~0,
skb->data,
skb->len);
- crc_pos = (void *) skb_put(skb, sizeof(uint32_t));
+ crc_pos = skb_put(skb, sizeof(uint32_t));
put_unaligned_le32(crc, crc_pos);
}
@@ -1080,8 +1077,7 @@
goto err;
ncm->skb_tx_data->dev = ncm->netdev;
- ntb_data = (void *) skb_put(ncm->skb_tx_data, ncb_len);
- memset(ntb_data, 0, ncb_len);
+ ntb_data = skb_put_zero(ncm->skb_tx_data, ncb_len);
/* dwSignature */
put_unaligned_le32(opts->nth_sign, ntb_data);
ntb_data += 2;
@@ -1100,8 +1096,7 @@
goto err;
ncm->skb_tx_ndp->dev = ncm->netdev;
- ntb_ndp = (void *) skb_put(ncm->skb_tx_ndp,
- opts->ndp_size);
+ ntb_ndp = skb_put(ncm->skb_tx_ndp, opts->ndp_size);
memset(ntb_ndp, 0, ncb_len);
/* dwSignature */
put_unaligned_le32(ncm->ndp_sign, ntb_ndp);
@@ -1118,8 +1113,7 @@
HRTIMER_MODE_REL);
/* Add the datagram position entries */
- ntb_ndp = (void *) skb_put(ncm->skb_tx_ndp, dgram_idx_len);
- memset(ntb_ndp, 0, dgram_idx_len);
+ ntb_ndp = skb_put_zero(ncm->skb_tx_ndp, dgram_idx_len);
ncb_len = ncm->skb_tx_data->len;
dgram_pad = ALIGN(ncb_len, div) + rem - ncb_len;
@@ -1132,10 +1126,8 @@
ncm->ndp_dgram_count++;
/* Add the new data to the skb */
- ntb_data = (void *) skb_put(ncm->skb_tx_data, dgram_pad);
- memset(ntb_data, 0, dgram_pad);
- ntb_data = (void *) skb_put(ncm->skb_tx_data, skb->len);
- memcpy(ntb_data, skb->data, skb->len);
+ skb_put_zero(ncm->skb_tx_data, dgram_pad);
+ skb_put_data(ncm->skb_tx_data, skb->data, skb->len);
dev_consume_skb_any(skb);
skb = NULL;
@@ -1318,8 +1310,8 @@
dg_len - crc_len);
if (skb2 == NULL)
goto err;
- memcpy(skb_put(skb2, dg_len - crc_len),
- skb->data + index, dg_len - crc_len);
+ skb_put_data(skb2, skb->data + index,
+ dg_len - crc_len);
skb_queue_tail(list, skb2);
diff --git a/drivers/usb/gadget/function/f_phonet.c b/drivers/usb/gadget/function/f_phonet.c
index 6a1ce6a..9c4c58e 100644
--- a/drivers/usb/gadget/function/f_phonet.c
+++ b/drivers/usb/gadget/function/f_phonet.c
@@ -336,7 +336,7 @@
skb->protocol = htons(ETH_P_PHONET);
skb_reset_mac_header(skb);
/* Can't use pskb_pull() on page in IRQ */
- memcpy(skb_put(skb, 1), page_address(page), 1);
+ skb_put_data(skb, page_address(page), 1);
}
skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page,
diff --git a/drivers/usb/gadget/function/rndis.c b/drivers/usb/gadget/function/rndis.c
index a3b5e46..d634104 100644
--- a/drivers/usb/gadget/function/rndis.c
+++ b/drivers/usb/gadget/function/rndis.c
@@ -999,7 +999,7 @@
if (!skb)
return;
- header = (void *)skb_push(skb, sizeof(*header));
+ header = skb_push(skb, sizeof(*header));
memset(header, 0, sizeof *header);
header->MessageType = cpu_to_le32(RNDIS_MSG_PACKET);
header->MessageLength = cpu_to_le32(skb->len);
diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c
index f61f852..e3d7ea1 100644
--- a/drivers/vhost/net.c
+++ b/drivers/vhost/net.c
@@ -28,6 +28,8 @@
#include <linux/if_macvlan.h>
#include <linux/if_tap.h>
#include <linux/if_vlan.h>
+#include <linux/skb_array.h>
+#include <linux/skbuff.h>
#include <net/sock.h>
@@ -85,6 +87,13 @@
struct vhost_virtqueue *vq;
};
+#define VHOST_RX_BATCH 64
+struct vhost_net_buf {
+ struct sk_buff **queue;
+ int tail;
+ int head;
+};
+
struct vhost_net_virtqueue {
struct vhost_virtqueue vq;
size_t vhost_hlen;
@@ -99,6 +108,8 @@
/* Reference counting for outstanding ubufs.
* Protected by vq mutex. Writers must also take device mutex. */
struct vhost_net_ubuf_ref *ubufs;
+ struct skb_array *rx_array;
+ struct vhost_net_buf rxq;
};
struct vhost_net {
@@ -117,6 +128,71 @@
static unsigned vhost_net_zcopy_mask __read_mostly;
+static void *vhost_net_buf_get_ptr(struct vhost_net_buf *rxq)
+{
+ if (rxq->tail != rxq->head)
+ return rxq->queue[rxq->head];
+ else
+ return NULL;
+}
+
+static int vhost_net_buf_get_size(struct vhost_net_buf *rxq)
+{
+ return rxq->tail - rxq->head;
+}
+
+static int vhost_net_buf_is_empty(struct vhost_net_buf *rxq)
+{
+ return rxq->tail == rxq->head;
+}
+
+static void *vhost_net_buf_consume(struct vhost_net_buf *rxq)
+{
+ void *ret = vhost_net_buf_get_ptr(rxq);
+ ++rxq->head;
+ return ret;
+}
+
+static int vhost_net_buf_produce(struct vhost_net_virtqueue *nvq)
+{
+ struct vhost_net_buf *rxq = &nvq->rxq;
+
+ rxq->head = 0;
+ rxq->tail = skb_array_consume_batched(nvq->rx_array, rxq->queue,
+ VHOST_RX_BATCH);
+ return rxq->tail;
+}
+
+static void vhost_net_buf_unproduce(struct vhost_net_virtqueue *nvq)
+{
+ struct vhost_net_buf *rxq = &nvq->rxq;
+
+ if (nvq->rx_array && !vhost_net_buf_is_empty(rxq)) {
+ skb_array_unconsume(nvq->rx_array, rxq->queue + rxq->head,
+ vhost_net_buf_get_size(rxq));
+ rxq->head = rxq->tail = 0;
+ }
+}
+
+static int vhost_net_buf_peek(struct vhost_net_virtqueue *nvq)
+{
+ struct vhost_net_buf *rxq = &nvq->rxq;
+
+ if (!vhost_net_buf_is_empty(rxq))
+ goto out;
+
+ if (!vhost_net_buf_produce(nvq))
+ return 0;
+
+out:
+ return __skb_array_len_with_tag(vhost_net_buf_get_ptr(rxq));
+}
+
+static void vhost_net_buf_init(struct vhost_net_buf *rxq)
+{
+ rxq->head = rxq->tail = 0;
+}
+
static void vhost_net_enable_zcopy(int vq)
{
vhost_net_zcopy_mask |= 0x1 << vq;
@@ -201,6 +277,7 @@
n->vqs[i].ubufs = NULL;
n->vqs[i].vhost_hlen = 0;
n->vqs[i].sock_hlen = 0;
+ vhost_net_buf_init(&n->vqs[i].rxq);
}
}
@@ -503,15 +580,14 @@
mutex_unlock(&vq->mutex);
}
-static int peek_head_len(struct sock *sk)
+static int peek_head_len(struct vhost_net_virtqueue *rvq, struct sock *sk)
{
- struct socket *sock = sk->sk_socket;
struct sk_buff *head;
int len = 0;
unsigned long flags;
- if (sock->ops->peek_len)
- return sock->ops->peek_len(sock);
+ if (rvq->rx_array)
+ return vhost_net_buf_peek(rvq);
spin_lock_irqsave(&sk->sk_receive_queue.lock, flags);
head = skb_peek(&sk->sk_receive_queue);
@@ -537,10 +613,11 @@
static int vhost_net_rx_peek_head_len(struct vhost_net *net, struct sock *sk)
{
+ struct vhost_net_virtqueue *rvq = &net->vqs[VHOST_NET_VQ_RX];
struct vhost_net_virtqueue *nvq = &net->vqs[VHOST_NET_VQ_TX];
struct vhost_virtqueue *vq = &nvq->vq;
unsigned long uninitialized_var(endtime);
- int len = peek_head_len(sk);
+ int len = peek_head_len(rvq, sk);
if (!len && vq->busyloop_timeout) {
/* Both tx vq and rx socket were polled here */
@@ -561,7 +638,7 @@
vhost_poll_queue(&vq->poll);
mutex_unlock(&vq->mutex);
- len = peek_head_len(sk);
+ len = peek_head_len(rvq, sk);
}
return len;
@@ -699,6 +776,8 @@
/* On error, stop handling until the next kick. */
if (unlikely(headcount < 0))
goto out;
+ if (nvq->rx_array)
+ msg.msg_control = vhost_net_buf_consume(&nvq->rxq);
/* On overrun, truncate and discard */
if (unlikely(headcount > UIO_MAXIOV)) {
iov_iter_init(&msg.msg_iter, READ, vq->iov, 1, 1);
@@ -815,6 +894,7 @@
struct vhost_net *n;
struct vhost_dev *dev;
struct vhost_virtqueue **vqs;
+ struct sk_buff **queue;
int i;
n = kvmalloc(sizeof *n, GFP_KERNEL | __GFP_REPEAT);
@@ -826,6 +906,15 @@
return -ENOMEM;
}
+ queue = kmalloc_array(VHOST_RX_BATCH, sizeof(struct sk_buff *),
+ GFP_KERNEL);
+ if (!queue) {
+ kfree(vqs);
+ kvfree(n);
+ return -ENOMEM;
+ }
+ n->vqs[VHOST_NET_VQ_RX].rxq.queue = queue;
+
dev = &n->dev;
vqs[VHOST_NET_VQ_TX] = &n->vqs[VHOST_NET_VQ_TX].vq;
vqs[VHOST_NET_VQ_RX] = &n->vqs[VHOST_NET_VQ_RX].vq;
@@ -838,6 +927,7 @@
n->vqs[i].done_idx = 0;
n->vqs[i].vhost_hlen = 0;
n->vqs[i].sock_hlen = 0;
+ vhost_net_buf_init(&n->vqs[i].rxq);
}
vhost_dev_init(dev, vqs, VHOST_NET_VQ_MAX);
@@ -853,11 +943,14 @@
struct vhost_virtqueue *vq)
{
struct socket *sock;
+ struct vhost_net_virtqueue *nvq =
+ container_of(vq, struct vhost_net_virtqueue, vq);
mutex_lock(&vq->mutex);
sock = vq->private_data;
vhost_net_disable_vq(n, vq);
vq->private_data = NULL;
+ vhost_net_buf_unproduce(nvq);
mutex_unlock(&vq->mutex);
return sock;
}
@@ -912,6 +1005,7 @@
/* We do an extra flush before freeing memory,
* since jobs can re-queue themselves. */
vhost_net_flush(n);
+ kfree(n->vqs[VHOST_NET_VQ_RX].rxq.queue);
kfree(n->dev.vqs);
kvfree(n);
return 0;
@@ -950,6 +1044,25 @@
return ERR_PTR(r);
}
+static struct skb_array *get_tap_skb_array(int fd)
+{
+ struct skb_array *array;
+ struct file *file = fget(fd);
+
+ if (!file)
+ return NULL;
+ array = tun_get_skb_array(file);
+ if (!IS_ERR(array))
+ goto out;
+ array = tap_get_skb_array(file);
+ if (!IS_ERR(array))
+ goto out;
+ array = NULL;
+out:
+ fput(file);
+ return array;
+}
+
static struct socket *get_tap_socket(int fd)
{
struct file *file = fget(fd);
@@ -1026,6 +1139,9 @@
vhost_net_disable_vq(n, vq);
vq->private_data = sock;
+ vhost_net_buf_unproduce(nvq);
+ if (index == VHOST_NET_VQ_RX)
+ nvq->rx_array = get_tap_skb_array(fd);
r = vhost_vq_init_access(vq);
if (r)
goto err_used;
diff --git a/fs/afs/rxrpc.c b/fs/afs/rxrpc.c
index d5990eb..02781e7 100644
--- a/fs/afs/rxrpc.c
+++ b/fs/afs/rxrpc.c
@@ -341,6 +341,7 @@
struct msghdr msg;
struct kvec iov[1];
size_t offset;
+ s64 tx_total_len;
u32 abort_code;
int ret;
@@ -364,9 +365,20 @@
srx.transport.sin.sin_port = call->port;
memcpy(&srx.transport.sin.sin_addr, addr, 4);
+ /* Work out the length we're going to transmit. This is awkward for
+ * calls such as FS.StoreData where there's an extra injection of data
+ * after the initial fixed part.
+ */
+ tx_total_len = call->request_size;
+ if (call->send_pages) {
+ tx_total_len += call->last_to - call->first_offset;
+ tx_total_len += (call->last - call->first) * PAGE_SIZE;
+ }
+
/* create a call */
rxcall = rxrpc_kernel_begin_call(afs_socket, &srx, call->key,
- (unsigned long) call, gfp,
+ (unsigned long)call,
+ tx_total_len, gfp,
(async ?
afs_wake_up_async_call :
afs_wake_up_call_waiter));
@@ -738,6 +750,8 @@
_enter("");
+ rxrpc_kernel_set_tx_length(afs_socket, call->rxcall, 0);
+
msg.msg_name = NULL;
msg.msg_namelen = 0;
iov_iter_kvec(&msg.msg_iter, WRITE | ITER_KVEC, NULL, 0, 0);
@@ -772,6 +786,8 @@
_enter("");
+ rxrpc_kernel_set_tx_length(afs_socket, call->rxcall, len);
+
iov[0].iov_base = (void *) buf;
iov[0].iov_len = len;
msg.msg_name = NULL;
diff --git a/include/linux/avf/virtchnl.h b/include/linux/avf/virtchnl.h
new file mode 100644
index 0000000..c893b95
--- /dev/null
+++ b/include/linux/avf/virtchnl.h
@@ -0,0 +1,701 @@
+/*******************************************************************************
+ *
+ * Intel Ethernet Controller XL710 Family Linux Virtual Function Driver
+ * Copyright(c) 2013 - 2014 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program. If not, see <http://www.gnu.org/licenses/>.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ ******************************************************************************/
+
+#ifndef _VIRTCHNL_H_
+#define _VIRTCHNL_H_
+
+/* Description:
+ * This header file describes the VF-PF communication protocol used
+ * by the drivers for all devices starting from our 40G product line
+ *
+ * Admin queue buffer usage:
+ * desc->opcode is always aqc_opc_send_msg_to_pf
+ * flags, retval, datalen, and data addr are all used normally.
+ * The Firmware copies the cookie fields when sending messages between the
+ * PF and VF, but uses all other fields internally. Due to this limitation,
+ * we must send all messages as "indirect", i.e. using an external buffer.
+ *
+ * All the VSI indexes are relative to the VF. Each VF can have maximum of
+ * three VSIs. All the queue indexes are relative to the VSI. Each VF can
+ * have a maximum of sixteen queues for all of its VSIs.
+ *
+ * The PF is required to return a status code in v_retval for all messages
+ * except RESET_VF, which does not require any response. The return value
+ * is of status_code type, defined in the shared type.h.
+ *
+ * In general, VF driver initialization should roughly follow the order of
+ * these opcodes. The VF driver must first validate the API version of the
+ * PF driver, then request a reset, then get resources, then configure
+ * queues and interrupts. After these operations are complete, the VF
+ * driver may start its queues, optionally add MAC and VLAN filters, and
+ * process traffic.
+ */
+
+/* START GENERIC DEFINES
+ * Need to ensure the following enums and defines hold the same meaning and
+ * value in current and future projects
+ */
+
+/* Error Codes */
+enum virtchnl_status_code {
+ VIRTCHNL_STATUS_SUCCESS = 0,
+ VIRTCHNL_ERR_PARAM = -5,
+ VIRTCHNL_STATUS_ERR_OPCODE_MISMATCH = -38,
+ VIRTCHNL_STATUS_ERR_CQP_COMPL_ERROR = -39,
+ VIRTCHNL_STATUS_ERR_INVALID_VF_ID = -40,
+ VIRTCHNL_STATUS_NOT_SUPPORTED = -64,
+};
+
+#define VIRTCHNL_LINK_SPEED_100MB_SHIFT 0x1
+#define VIRTCHNL_LINK_SPEED_1000MB_SHIFT 0x2
+#define VIRTCHNL_LINK_SPEED_10GB_SHIFT 0x3
+#define VIRTCHNL_LINK_SPEED_40GB_SHIFT 0x4
+#define VIRTCHNL_LINK_SPEED_20GB_SHIFT 0x5
+#define VIRTCHNL_LINK_SPEED_25GB_SHIFT 0x6
+
+enum virtchnl_link_speed {
+ VIRTCHNL_LINK_SPEED_UNKNOWN = 0,
+ VIRTCHNL_LINK_SPEED_100MB = BIT(VIRTCHNL_LINK_SPEED_100MB_SHIFT),
+ VIRTCHNL_LINK_SPEED_1GB = BIT(VIRTCHNL_LINK_SPEED_1000MB_SHIFT),
+ VIRTCHNL_LINK_SPEED_10GB = BIT(VIRTCHNL_LINK_SPEED_10GB_SHIFT),
+ VIRTCHNL_LINK_SPEED_40GB = BIT(VIRTCHNL_LINK_SPEED_40GB_SHIFT),
+ VIRTCHNL_LINK_SPEED_20GB = BIT(VIRTCHNL_LINK_SPEED_20GB_SHIFT),
+ VIRTCHNL_LINK_SPEED_25GB = BIT(VIRTCHNL_LINK_SPEED_25GB_SHIFT),
+};
+
+/* for hsplit_0 field of Rx HMC context */
+/* deprecated with AVF 1.0 */
+enum virtchnl_rx_hsplit {
+ VIRTCHNL_RX_HSPLIT_NO_SPLIT = 0,
+ VIRTCHNL_RX_HSPLIT_SPLIT_L2 = 1,
+ VIRTCHNL_RX_HSPLIT_SPLIT_IP = 2,
+ VIRTCHNL_RX_HSPLIT_SPLIT_TCP_UDP = 4,
+ VIRTCHNL_RX_HSPLIT_SPLIT_SCTP = 8,
+};
+
+/* END GENERIC DEFINES */
+
+/* Opcodes for VF-PF communication. These are placed in the v_opcode field
+ * of the virtchnl_msg structure.
+ */
+enum virtchnl_ops {
+/* The PF sends status change events to VFs using
+ * the VIRTCHNL_OP_EVENT opcode.
+ * VFs send requests to the PF using the other ops.
+ * Use of "advanced opcode" features must be negotiated as part of capabilities
+ * exchange and are not considered part of base mode feature set.
+ */
+ VIRTCHNL_OP_UNKNOWN = 0,
+ VIRTCHNL_OP_VERSION = 1, /* must ALWAYS be 1 */
+ VIRTCHNL_OP_RESET_VF = 2,
+ VIRTCHNL_OP_GET_VF_RESOURCES = 3,
+ VIRTCHNL_OP_CONFIG_TX_QUEUE = 4,
+ VIRTCHNL_OP_CONFIG_RX_QUEUE = 5,
+ VIRTCHNL_OP_CONFIG_VSI_QUEUES = 6,
+ VIRTCHNL_OP_CONFIG_IRQ_MAP = 7,
+ VIRTCHNL_OP_ENABLE_QUEUES = 8,
+ VIRTCHNL_OP_DISABLE_QUEUES = 9,
+ VIRTCHNL_OP_ADD_ETH_ADDR = 10,
+ VIRTCHNL_OP_DEL_ETH_ADDR = 11,
+ VIRTCHNL_OP_ADD_VLAN = 12,
+ VIRTCHNL_OP_DEL_VLAN = 13,
+ VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE = 14,
+ VIRTCHNL_OP_GET_STATS = 15,
+ VIRTCHNL_OP_RSVD = 16,
+ VIRTCHNL_OP_EVENT = 17, /* must ALWAYS be 17 */
+ VIRTCHNL_OP_IWARP = 20, /* advanced opcode */
+ VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP = 21, /* advanced opcode */
+ VIRTCHNL_OP_RELEASE_IWARP_IRQ_MAP = 22, /* advanced opcode */
+ VIRTCHNL_OP_CONFIG_RSS_KEY = 23,
+ VIRTCHNL_OP_CONFIG_RSS_LUT = 24,
+ VIRTCHNL_OP_GET_RSS_HENA_CAPS = 25,
+ VIRTCHNL_OP_SET_RSS_HENA = 26,
+};
+
+/* This macro is used to generate a compilation error if a structure
+ * is not exactly the correct length. It gives a divide by zero error if the
+ * structure is not of the correct size, otherwise it creates an enum that is
+ * never used.
+ */
+#define VIRTCHNL_CHECK_STRUCT_LEN(n, X) enum virtchnl_static_assert_enum_##X \
+ { virtchnl_static_assert_##X = (n)/((sizeof(struct X) == (n)) ? 1 : 0) }
+
+/* Virtual channel message descriptor. This overlays the admin queue
+ * descriptor. All other data is passed in external buffers.
+ */
+
+struct virtchnl_msg {
+ u8 pad[8]; /* AQ flags/opcode/len/retval fields */
+ enum virtchnl_ops v_opcode; /* avoid confusion with desc->opcode */
+ enum virtchnl_status_code v_retval; /* ditto for desc->retval */
+ u32 vfid; /* used by PF when sending to VF */
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(20, virtchnl_msg);
+
+/* Message descriptions and data structures.*/
+
+/* VIRTCHNL_OP_VERSION
+ * VF posts its version number to the PF. PF responds with its version number
+ * in the same format, along with a return code.
+ * Reply from PF has its major/minor versions also in param0 and param1.
+ * If there is a major version mismatch, then the VF cannot operate.
+ * If there is a minor version mismatch, then the VF can operate but should
+ * add a warning to the system log.
+ *
+ * This enum element MUST always be specified as == 1, regardless of other
+ * changes in the API. The PF must always respond to this message without
+ * error regardless of version mismatch.
+ */
+#define VIRTCHNL_VERSION_MAJOR 1
+#define VIRTCHNL_VERSION_MINOR 1
+#define VIRTCHNL_VERSION_MINOR_NO_VF_CAPS 0
+
+struct virtchnl_version_info {
+ u32 major;
+ u32 minor;
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(8, virtchnl_version_info);
+
+#define VF_IS_V10(_v) (((_v)->major == 1) && ((_v)->minor == 0))
+#define VF_IS_V11(_ver) (((_ver)->major == 1) && ((_ver)->minor == 1))
+
+/* VIRTCHNL_OP_RESET_VF
+ * VF sends this request to PF with no parameters
+ * PF does NOT respond! VF driver must delay then poll VFGEN_RSTAT register
+ * until reset completion is indicated. The admin queue must be reinitialized
+ * after this operation.
+ *
+ * When reset is complete, PF must ensure that all queues in all VSIs associated
+ * with the VF are stopped, all queue configurations in the HMC are set to 0,
+ * and all MAC and VLAN filters (except the default MAC address) on all VSIs
+ * are cleared.
+ */
+
+/* VSI types that use VIRTCHNL interface for VF-PF communication. VSI_SRIOV
+ * vsi_type should always be 6 for backward compatibility. Add other fields
+ * as needed.
+ */
+enum virtchnl_vsi_type {
+ VIRTCHNL_VSI_TYPE_INVALID = 0,
+ VIRTCHNL_VSI_SRIOV = 6,
+};
+
+/* VIRTCHNL_OP_GET_VF_RESOURCES
+ * Version 1.0 VF sends this request to PF with no parameters
+ * Version 1.1 VF sends this request to PF with u32 bitmap of its capabilities
+ * PF responds with an indirect message containing
+ * virtchnl_vf_resource and one or more
+ * virtchnl_vsi_resource structures.
+ */
+
+struct virtchnl_vsi_resource {
+ u16 vsi_id;
+ u16 num_queue_pairs;
+ enum virtchnl_vsi_type vsi_type;
+ u16 qset_handle;
+ u8 default_mac_addr[ETH_ALEN];
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(16, virtchnl_vsi_resource);
+
+/* VF offload flags
+ * VIRTCHNL_VF_OFFLOAD_L2 flag is inclusive of base mode L2 offloads including
+ * TX/RX Checksum offloading and TSO for non-tunnelled packets.
+ */
+#define VIRTCHNL_VF_OFFLOAD_L2 0x00000001
+#define VIRTCHNL_VF_OFFLOAD_IWARP 0x00000002
+#define VIRTCHNL_VF_OFFLOAD_RSVD 0x00000004
+#define VIRTCHNL_VF_OFFLOAD_RSS_AQ 0x00000008
+#define VIRTCHNL_VF_OFFLOAD_RSS_REG 0x00000010
+#define VIRTCHNL_VF_OFFLOAD_WB_ON_ITR 0x00000020
+#define VIRTCHNL_VF_OFFLOAD_VLAN 0x00010000
+#define VIRTCHNL_VF_OFFLOAD_RX_POLLING 0x00020000
+#define VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2 0x00040000
+#define VIRTCHNL_VF_OFFLOAD_RSS_PF 0X00080000
+#define VIRTCHNL_VF_OFFLOAD_ENCAP 0X00100000
+#define VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM 0X00200000
+#define VIRTCHNL_VF_OFFLOAD_RX_ENCAP_CSUM 0X00400000
+
+#define VF_BASE_MODE_OFFLOADS (VIRTCHNL_VF_OFFLOAD_L2 | \
+ VIRTCHNL_VF_OFFLOAD_VLAN | \
+ VIRTCHNL_VF_OFFLOAD_RSS_PF)
+
+struct virtchnl_vf_resource {
+ u16 num_vsis;
+ u16 num_queue_pairs;
+ u16 max_vectors;
+ u16 max_mtu;
+
+ u32 vf_offload_flags;
+ u32 rss_key_size;
+ u32 rss_lut_size;
+
+ struct virtchnl_vsi_resource vsi_res[1];
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(36, virtchnl_vf_resource);
+
+/* VIRTCHNL_OP_CONFIG_TX_QUEUE
+ * VF sends this message to set up parameters for one TX queue.
+ * External data buffer contains one instance of virtchnl_txq_info.
+ * PF configures requested queue and returns a status code.
+ */
+
+/* Tx queue config info */
+struct virtchnl_txq_info {
+ u16 vsi_id;
+ u16 queue_id;
+ u16 ring_len; /* number of descriptors, multiple of 8 */
+ u16 headwb_enabled; /* deprecated with AVF 1.0 */
+ u64 dma_ring_addr;
+ u64 dma_headwb_addr; /* deprecated with AVF 1.0 */
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(24, virtchnl_txq_info);
+
+/* VIRTCHNL_OP_CONFIG_RX_QUEUE
+ * VF sends this message to set up parameters for one RX queue.
+ * External data buffer contains one instance of virtchnl_rxq_info.
+ * PF configures requested queue and returns a status code.
+ */
+
+/* Rx queue config info */
+struct virtchnl_rxq_info {
+ u16 vsi_id;
+ u16 queue_id;
+ u32 ring_len; /* number of descriptors, multiple of 32 */
+ u16 hdr_size;
+ u16 splithdr_enabled; /* deprecated with AVF 1.0 */
+ u32 databuffer_size;
+ u32 max_pkt_size;
+ u32 pad1;
+ u64 dma_ring_addr;
+ enum virtchnl_rx_hsplit rx_split_pos; /* deprecated with AVF 1.0 */
+ u32 pad2;
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(40, virtchnl_rxq_info);
+
+/* VIRTCHNL_OP_CONFIG_VSI_QUEUES
+ * VF sends this message to set parameters for all active TX and RX queues
+ * associated with the specified VSI.
+ * PF configures queues and returns status.
+ * If the number of queues specified is greater than the number of queues
+ * associated with the VSI, an error is returned and no queues are configured.
+ */
+struct virtchnl_queue_pair_info {
+ /* NOTE: vsi_id and queue_id should be identical for both queues. */
+ struct virtchnl_txq_info txq;
+ struct virtchnl_rxq_info rxq;
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(64, virtchnl_queue_pair_info);
+
+struct virtchnl_vsi_queue_config_info {
+ u16 vsi_id;
+ u16 num_queue_pairs;
+ u32 pad;
+ struct virtchnl_queue_pair_info qpair[1];
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(72, virtchnl_vsi_queue_config_info);
+
+/* VIRTCHNL_OP_CONFIG_IRQ_MAP
+ * VF uses this message to map vectors to queues.
+ * The rxq_map and txq_map fields are bitmaps used to indicate which queues
+ * are to be associated with the specified vector.
+ * The "other" causes are always mapped to vector 0.
+ * PF configures interrupt mapping and returns status.
+ */
+struct virtchnl_vector_map {
+ u16 vsi_id;
+ u16 vector_id;
+ u16 rxq_map;
+ u16 txq_map;
+ u16 rxitr_idx;
+ u16 txitr_idx;
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(12, virtchnl_vector_map);
+
+struct virtchnl_irq_map_info {
+ u16 num_vectors;
+ struct virtchnl_vector_map vecmap[1];
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(14, virtchnl_irq_map_info);
+
+/* VIRTCHNL_OP_ENABLE_QUEUES
+ * VIRTCHNL_OP_DISABLE_QUEUES
+ * VF sends these message to enable or disable TX/RX queue pairs.
+ * The queues fields are bitmaps indicating which queues to act upon.
+ * (Currently, we only support 16 queues per VF, but we make the field
+ * u32 to allow for expansion.)
+ * PF performs requested action and returns status.
+ */
+struct virtchnl_queue_select {
+ u16 vsi_id;
+ u16 pad;
+ u32 rx_queues;
+ u32 tx_queues;
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(12, virtchnl_queue_select);
+
+/* VIRTCHNL_OP_ADD_ETH_ADDR
+ * VF sends this message in order to add one or more unicast or multicast
+ * address filters for the specified VSI.
+ * PF adds the filters and returns status.
+ */
+
+/* VIRTCHNL_OP_DEL_ETH_ADDR
+ * VF sends this message in order to remove one or more unicast or multicast
+ * filters for the specified VSI.
+ * PF removes the filters and returns status.
+ */
+
+struct virtchnl_ether_addr {
+ u8 addr[ETH_ALEN];
+ u8 pad[2];
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(8, virtchnl_ether_addr);
+
+struct virtchnl_ether_addr_list {
+ u16 vsi_id;
+ u16 num_elements;
+ struct virtchnl_ether_addr list[1];
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(12, virtchnl_ether_addr_list);
+
+/* VIRTCHNL_OP_ADD_VLAN
+ * VF sends this message to add one or more VLAN tag filters for receives.
+ * PF adds the filters and returns status.
+ * If a port VLAN is configured by the PF, this operation will return an
+ * error to the VF.
+ */
+
+/* VIRTCHNL_OP_DEL_VLAN
+ * VF sends this message to remove one or more VLAN tag filters for receives.
+ * PF removes the filters and returns status.
+ * If a port VLAN is configured by the PF, this operation will return an
+ * error to the VF.
+ */
+
+struct virtchnl_vlan_filter_list {
+ u16 vsi_id;
+ u16 num_elements;
+ u16 vlan_id[1];
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(6, virtchnl_vlan_filter_list);
+
+/* VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE
+ * VF sends VSI id and flags.
+ * PF returns status code in retval.
+ * Note: we assume that broadcast accept mode is always enabled.
+ */
+struct virtchnl_promisc_info {
+ u16 vsi_id;
+ u16 flags;
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(4, virtchnl_promisc_info);
+
+#define FLAG_VF_UNICAST_PROMISC 0x00000001
+#define FLAG_VF_MULTICAST_PROMISC 0x00000002
+
+/* VIRTCHNL_OP_GET_STATS
+ * VF sends this message to request stats for the selected VSI. VF uses
+ * the virtchnl_queue_select struct to specify the VSI. The queue_id
+ * field is ignored by the PF.
+ *
+ * PF replies with struct eth_stats in an external buffer.
+ */
+
+/* VIRTCHNL_OP_CONFIG_RSS_KEY
+ * VIRTCHNL_OP_CONFIG_RSS_LUT
+ * VF sends these messages to configure RSS. Only supported if both PF
+ * and VF drivers set the VIRTCHNL_VF_OFFLOAD_RSS_PF bit during
+ * configuration negotiation. If this is the case, then the RSS fields in
+ * the VF resource struct are valid.
+ * Both the key and LUT are initialized to 0 by the PF, meaning that
+ * RSS is effectively disabled until set up by the VF.
+ */
+struct virtchnl_rss_key {
+ u16 vsi_id;
+ u16 key_len;
+ u8 key[1]; /* RSS hash key, packed bytes */
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(6, virtchnl_rss_key);
+
+struct virtchnl_rss_lut {
+ u16 vsi_id;
+ u16 lut_entries;
+ u8 lut[1]; /* RSS lookup table*/
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(6, virtchnl_rss_lut);
+
+/* VIRTCHNL_OP_GET_RSS_HENA_CAPS
+ * VIRTCHNL_OP_SET_RSS_HENA
+ * VF sends these messages to get and set the hash filter enable bits for RSS.
+ * By default, the PF sets these to all possible traffic types that the
+ * hardware supports. The VF can query this value if it wants to change the
+ * traffic types that are hashed by the hardware.
+ */
+struct virtchnl_rss_hena {
+ u64 hena;
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(8, virtchnl_rss_hena);
+
+/* VIRTCHNL_OP_EVENT
+ * PF sends this message to inform the VF driver of events that may affect it.
+ * No direct response is expected from the VF, though it may generate other
+ * messages in response to this one.
+ */
+enum virtchnl_event_codes {
+ VIRTCHNL_EVENT_UNKNOWN = 0,
+ VIRTCHNL_EVENT_LINK_CHANGE,
+ VIRTCHNL_EVENT_RESET_IMPENDING,
+ VIRTCHNL_EVENT_PF_DRIVER_CLOSE,
+};
+
+#define PF_EVENT_SEVERITY_INFO 0
+#define PF_EVENT_SEVERITY_CERTAIN_DOOM 255
+
+struct virtchnl_pf_event {
+ enum virtchnl_event_codes event;
+ union {
+ struct {
+ enum virtchnl_link_speed link_speed;
+ bool link_status;
+ } link_event;
+ } event_data;
+
+ int severity;
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(16, virtchnl_pf_event);
+
+/* VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP
+ * VF uses this message to request PF to map IWARP vectors to IWARP queues.
+ * The request for this originates from the VF IWARP driver through
+ * a client interface between VF LAN and VF IWARP driver.
+ * A vector could have an AEQ and CEQ attached to it although
+ * there is a single AEQ per VF IWARP instance in which case
+ * most vectors will have an INVALID_IDX for aeq and valid idx for ceq.
+ * There will never be a case where there will be multiple CEQs attached
+ * to a single vector.
+ * PF configures interrupt mapping and returns status.
+ */
+
+struct virtchnl_iwarp_qv_info {
+ u32 v_idx; /* msix_vector */
+ u16 ceq_idx;
+ u16 aeq_idx;
+ u8 itr_idx;
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(12, virtchnl_iwarp_qv_info);
+
+struct virtchnl_iwarp_qvlist_info {
+ u32 num_vectors;
+ struct virtchnl_iwarp_qv_info qv_info[1];
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(16, virtchnl_iwarp_qvlist_info);
+
+/* VF reset states - these are written into the RSTAT register:
+ * VFGEN_RSTAT on the VF
+ * When the PF initiates a reset, it writes 0
+ * When the reset is complete, it writes 1
+ * When the PF detects that the VF has recovered, it writes 2
+ * VF checks this register periodically to determine if a reset has occurred,
+ * then polls it to know when the reset is complete.
+ * If either the PF or VF reads the register while the hardware
+ * is in a reset state, it will return DEADBEEF, which, when masked
+ * will result in 3.
+ */
+enum virtchnl_vfr_states {
+ VIRTCHNL_VFR_INPROGRESS = 0,
+ VIRTCHNL_VFR_COMPLETED,
+ VIRTCHNL_VFR_VFACTIVE,
+};
+
+/**
+ * virtchnl_vc_validate_vf_msg
+ * @ver: Virtchnl version info
+ * @v_opcode: Opcode for the message
+ * @msg: pointer to the msg buffer
+ * @msglen: msg length
+ *
+ * validate msg format against struct for each opcode
+ */
+static inline int
+virtchnl_vc_validate_vf_msg(struct virtchnl_version_info *ver, u32 v_opcode,
+ u8 *msg, u16 msglen)
+{
+ bool err_msg_format = false;
+ int valid_len = 0;
+
+ /* Validate message length. */
+ switch (v_opcode) {
+ case VIRTCHNL_OP_VERSION:
+ valid_len = sizeof(struct virtchnl_version_info);
+ break;
+ case VIRTCHNL_OP_RESET_VF:
+ break;
+ case VIRTCHNL_OP_GET_VF_RESOURCES:
+ if (VF_IS_V11(ver))
+ valid_len = sizeof(u32);
+ break;
+ case VIRTCHNL_OP_CONFIG_TX_QUEUE:
+ valid_len = sizeof(struct virtchnl_txq_info);
+ break;
+ case VIRTCHNL_OP_CONFIG_RX_QUEUE:
+ valid_len = sizeof(struct virtchnl_rxq_info);
+ break;
+ case VIRTCHNL_OP_CONFIG_VSI_QUEUES:
+ valid_len = sizeof(struct virtchnl_vsi_queue_config_info);
+ if (msglen >= valid_len) {
+ struct virtchnl_vsi_queue_config_info *vqc =
+ (struct virtchnl_vsi_queue_config_info *)msg;
+ valid_len += (vqc->num_queue_pairs *
+ sizeof(struct
+ virtchnl_queue_pair_info));
+ if (vqc->num_queue_pairs == 0)
+ err_msg_format = true;
+ }
+ break;
+ case VIRTCHNL_OP_CONFIG_IRQ_MAP:
+ valid_len = sizeof(struct virtchnl_irq_map_info);
+ if (msglen >= valid_len) {
+ struct virtchnl_irq_map_info *vimi =
+ (struct virtchnl_irq_map_info *)msg;
+ valid_len += (vimi->num_vectors *
+ sizeof(struct virtchnl_vector_map));
+ if (vimi->num_vectors == 0)
+ err_msg_format = true;
+ }
+ break;
+ case VIRTCHNL_OP_ENABLE_QUEUES:
+ case VIRTCHNL_OP_DISABLE_QUEUES:
+ valid_len = sizeof(struct virtchnl_queue_select);
+ break;
+ case VIRTCHNL_OP_ADD_ETH_ADDR:
+ case VIRTCHNL_OP_DEL_ETH_ADDR:
+ valid_len = sizeof(struct virtchnl_ether_addr_list);
+ if (msglen >= valid_len) {
+ struct virtchnl_ether_addr_list *veal =
+ (struct virtchnl_ether_addr_list *)msg;
+ valid_len += veal->num_elements *
+ sizeof(struct virtchnl_ether_addr);
+ if (veal->num_elements == 0)
+ err_msg_format = true;
+ }
+ break;
+ case VIRTCHNL_OP_ADD_VLAN:
+ case VIRTCHNL_OP_DEL_VLAN:
+ valid_len = sizeof(struct virtchnl_vlan_filter_list);
+ if (msglen >= valid_len) {
+ struct virtchnl_vlan_filter_list *vfl =
+ (struct virtchnl_vlan_filter_list *)msg;
+ valid_len += vfl->num_elements * sizeof(u16);
+ if (vfl->num_elements == 0)
+ err_msg_format = true;
+ }
+ break;
+ case VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE:
+ valid_len = sizeof(struct virtchnl_promisc_info);
+ break;
+ case VIRTCHNL_OP_GET_STATS:
+ valid_len = sizeof(struct virtchnl_queue_select);
+ break;
+ case VIRTCHNL_OP_IWARP:
+ /* These messages are opaque to us and will be validated in
+ * the RDMA client code. We just need to check for nonzero
+ * length. The firmware will enforce max length restrictions.
+ */
+ if (msglen)
+ valid_len = msglen;
+ else
+ err_msg_format = true;
+ break;
+ case VIRTCHNL_OP_RELEASE_IWARP_IRQ_MAP:
+ break;
+ case VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP:
+ valid_len = sizeof(struct virtchnl_iwarp_qvlist_info);
+ if (msglen >= valid_len) {
+ struct virtchnl_iwarp_qvlist_info *qv =
+ (struct virtchnl_iwarp_qvlist_info *)msg;
+ if (qv->num_vectors == 0) {
+ err_msg_format = true;
+ break;
+ }
+ valid_len += ((qv->num_vectors - 1) *
+ sizeof(struct virtchnl_iwarp_qv_info));
+ }
+ break;
+ case VIRTCHNL_OP_CONFIG_RSS_KEY:
+ valid_len = sizeof(struct virtchnl_rss_key);
+ if (msglen >= valid_len) {
+ struct virtchnl_rss_key *vrk =
+ (struct virtchnl_rss_key *)msg;
+ valid_len += vrk->key_len - 1;
+ }
+ break;
+ case VIRTCHNL_OP_CONFIG_RSS_LUT:
+ valid_len = sizeof(struct virtchnl_rss_lut);
+ if (msglen >= valid_len) {
+ struct virtchnl_rss_lut *vrl =
+ (struct virtchnl_rss_lut *)msg;
+ valid_len += vrl->lut_entries - 1;
+ }
+ break;
+ case VIRTCHNL_OP_GET_RSS_HENA_CAPS:
+ break;
+ case VIRTCHNL_OP_SET_RSS_HENA:
+ valid_len = sizeof(struct virtchnl_rss_hena);
+ break;
+ /* These are always errors coming from the VF. */
+ case VIRTCHNL_OP_EVENT:
+ case VIRTCHNL_OP_UNKNOWN:
+ default:
+ return VIRTCHNL_ERR_PARAM;
+ }
+ /* few more checks */
+ if ((valid_len != msglen) || (err_msg_format))
+ return VIRTCHNL_STATUS_ERR_OPCODE_MISMATCH;
+
+ return 0;
+}
+#endif /* _VIRTCHNL_H_ */
diff --git a/include/linux/bpf.h b/include/linux/bpf.h
index 6bb38d7..1bcbf0a 100644
--- a/include/linux/bpf.h
+++ b/include/linux/bpf.h
@@ -46,6 +46,7 @@
u32 max_entries;
u32 map_flags;
u32 pages;
+ u32 id;
struct user_struct *user;
const struct bpf_map_ops *ops;
struct work_struct work;
@@ -156,7 +157,7 @@
* with 'type' (read or write) is allowed
*/
bool (*is_valid_access)(int off, int size, enum bpf_access_type type,
- enum bpf_reg_type *reg_type);
+ enum bpf_reg_type *reg_type, int *ctx_field_size);
int (*gen_prologue)(struct bpf_insn *insn, bool direct_write,
const struct bpf_prog *prog);
u32 (*convert_ctx_access)(enum bpf_access_type type,
@@ -171,6 +172,8 @@
atomic_t refcnt;
u32 used_map_cnt;
u32 max_ctx_offset;
+ u32 stack_depth;
+ u32 id;
struct latch_tree_node ksym_tnode;
struct list_head ksym_lnode;
const struct bpf_verifier_ops *ops;
diff --git a/include/linux/bpf_verifier.h b/include/linux/bpf_verifier.h
index d5093b5..189741c 100644
--- a/include/linux/bpf_verifier.h
+++ b/include/linux/bpf_verifier.h
@@ -73,6 +73,7 @@
enum bpf_reg_type ptr_type; /* pointer type for load/store insns */
struct bpf_map *map_ptr; /* pointer for call insn into lookup_elem */
};
+ int ctx_field_size; /* the ctx field size for load/store insns, maybe 0 */
};
#define MAX_USED_MAPS 64 /* max number of maps accessed by one eBPF program */
diff --git a/include/linux/filter.h b/include/linux/filter.h
index 62d948f..1fa26dc 100644
--- a/include/linux/filter.h
+++ b/include/linux/filter.h
@@ -57,6 +57,9 @@
#define BPF_REG_AX MAX_BPF_REG
#define MAX_BPF_JIT_REG (MAX_BPF_REG + 1)
+/* unused opcode to mark special call to bpf_tail_call() helper */
+#define BPF_TAIL_CALL 0xf0
+
/* As per nm, we expose JITed images as text (code) section for
* kallsyms. That way, tools like perf can find it to match
* addresses.
@@ -66,8 +69,6 @@
/* BPF program can access up to 512 bytes of stack space. */
#define MAX_BPF_STACK 512
-#define BPF_TAG_SIZE 8
-
/* Helper macros for filter block array initializers. */
/* ALU ops on registers, bpf_add|sub|...: dst_reg += src_reg */
@@ -429,6 +430,7 @@
kmemcheck_bitfield_end(meta);
enum bpf_prog_type type; /* Type of BPF program */
u32 len; /* Number of filter blocks */
+ u32 jited_len; /* Size of jited insns in bytes */
u8 tag[BPF_TAG_SIZE];
struct bpf_prog_aux *aux; /* Auxiliary fields */
struct sock_fprog_kern *orig_prog; /* Original BPF program */
diff --git a/include/linux/ieee80211.h b/include/linux/ieee80211.h
index 6903335..34e1bcd 100644
--- a/include/linux/ieee80211.h
+++ b/include/linux/ieee80211.h
@@ -2400,7 +2400,11 @@
#define WLAN_MAX_KEY_LEN 32
+#define WLAN_PMK_NAME_LEN 16
#define WLAN_PMKID_LEN 16
+#define WLAN_PMK_LEN_EAP_LEAP 16
+#define WLAN_PMK_LEN 32
+#define WLAN_PMK_LEN_SUITE_B_192 48
#define WLAN_OUI_WFA 0x506f9a
#define WLAN_OUI_TYPE_WFA_P2P 9
diff --git a/include/linux/if_bridge.h b/include/linux/if_bridge.h
index 0c16866..3cd18ac 100644
--- a/include/linux/if_bridge.h
+++ b/include/linux/if_bridge.h
@@ -62,6 +62,7 @@
struct list_head *br_ip_list);
bool br_multicast_has_querier_anywhere(struct net_device *dev, int proto);
bool br_multicast_has_querier_adjacent(struct net_device *dev, int proto);
+bool br_multicast_enabled(const struct net_device *dev);
#else
static inline int br_multicast_list_adjacent(struct net_device *dev,
struct list_head *br_ip_list)
@@ -78,6 +79,19 @@
{
return false;
}
+static inline bool br_multicast_enabled(const struct net_device *dev)
+{
+ return false;
+}
+#endif
+
+#if IS_ENABLED(CONFIG_BRIDGE) && IS_ENABLED(CONFIG_BRIDGE_VLAN_FILTERING)
+bool br_vlan_enabled(const struct net_device *dev);
+#else
+static inline bool br_vlan_enabled(const struct net_device *dev)
+{
+ return false;
+}
#endif
#endif
diff --git a/include/linux/if_tap.h b/include/linux/if_tap.h
index 3482c3c..4837157 100644
--- a/include/linux/if_tap.h
+++ b/include/linux/if_tap.h
@@ -3,6 +3,7 @@
#if IS_ENABLED(CONFIG_TAP)
struct socket *tap_get_socket(struct file *);
+struct skb_array *tap_get_skb_array(struct file *file);
#else
#include <linux/err.h>
#include <linux/errno.h>
@@ -12,6 +13,10 @@
{
return ERR_PTR(-EINVAL);
}
+static inline struct skb_array *tap_get_skb_array(struct file *f)
+{
+ return ERR_PTR(-EINVAL);
+}
#endif /* CONFIG_TAP */
#include <net/sock.h>
diff --git a/include/linux/if_team.h b/include/linux/if_team.h
index c05216a..3029460 100644
--- a/include/linux/if_team.h
+++ b/include/linux/if_team.h
@@ -298,4 +298,6 @@
#define TEAM_DEFAULT_NUM_TX_QUEUES 16
#define TEAM_DEFAULT_NUM_RX_QUEUES 16
+#define MODULE_ALIAS_TEAM_MODE(kind) MODULE_ALIAS("team-mode-" kind)
+
#endif /* _LINUX_IF_TEAM_H_ */
diff --git a/include/linux/if_tun.h b/include/linux/if_tun.h
index ed6da2e..bf9bdf4 100644
--- a/include/linux/if_tun.h
+++ b/include/linux/if_tun.h
@@ -19,6 +19,7 @@
#if defined(CONFIG_TUN) || defined(CONFIG_TUN_MODULE)
struct socket *tun_get_socket(struct file *);
+struct skb_array *tun_get_skb_array(struct file *file);
#else
#include <linux/err.h>
#include <linux/errno.h>
@@ -28,5 +29,9 @@
{
return ERR_PTR(-EINVAL);
}
+static inline struct skb_array *tun_get_skb_array(struct file *f)
+{
+ return ERR_PTR(-EINVAL);
+}
#endif /* CONFIG_TUN */
#endif /* __IF_TUN_H */
diff --git a/include/linux/if_vlan.h b/include/linux/if_vlan.h
index 283dc2f..5e6a2d4 100644
--- a/include/linux/if_vlan.h
+++ b/include/linux/if_vlan.h
@@ -318,7 +318,7 @@
if (skb_cow_head(skb, VLAN_HLEN) < 0)
return -ENOMEM;
- veth = (struct vlan_ethhdr *)skb_push(skb, VLAN_HLEN);
+ veth = skb_push(skb, VLAN_HLEN);
/* Move the mac addresses to the beginning of the new header. */
memmove(skb->data, skb->data + VLAN_HLEN, 2 * ETH_ALEN);
diff --git a/include/linux/inetdevice.h b/include/linux/inetdevice.h
index a2e9d6e..e7c04c4 100644
--- a/include/linux/inetdevice.h
+++ b/include/linux/inetdevice.h
@@ -150,8 +150,15 @@
unsigned long ifa_tstamp; /* updated timestamp */
};
+struct in_validator_info {
+ __be32 ivi_addr;
+ struct in_device *ivi_dev;
+};
+
int register_inetaddr_notifier(struct notifier_block *nb);
int unregister_inetaddr_notifier(struct notifier_block *nb);
+int register_inetaddr_validator_notifier(struct notifier_block *nb);
+int unregister_inetaddr_validator_notifier(struct notifier_block *nb);
void inet_netconf_notify_devconf(struct net *net, int event, int type,
int ifindex, struct ipv4_devconf *devconf);
diff --git a/include/linux/mISDNif.h b/include/linux/mISDNif.h
index ac02c54..a7330eb 100644
--- a/include/linux/mISDNif.h
+++ b/include/linux/mISDNif.h
@@ -554,7 +554,7 @@
if (!skb)
return NULL;
if (len)
- memcpy(skb_put(skb, len), dp, len);
+ skb_put_data(skb, dp, len);
hh = mISDN_HEAD_P(skb);
hh->prim = prim;
hh->id = id;
diff --git a/include/linux/micrel_phy.h b/include/linux/micrel_phy.h
index f541da6..472fa4d 100644
--- a/include/linux/micrel_phy.h
+++ b/include/linux/micrel_phy.h
@@ -37,6 +37,8 @@
#define PHY_ID_KSZ8795 0x00221550
+#define PHY_ID_KSZ9477 0x00221631
+
/* struct phy_device dev_flags definitions */
#define MICREL_PHY_50MHZ_CLK 0x00000001
#define MICREL_PHY_FXEN 0x00000002
diff --git a/include/linux/mii.h b/include/linux/mii.h
index 1629a0c..e870bfa 100644
--- a/include/linux/mii.h
+++ b/include/linux/mii.h
@@ -31,7 +31,7 @@
extern int mii_link_ok (struct mii_if_info *mii);
extern int mii_nway_restart (struct mii_if_info *mii);
extern int mii_ethtool_gset(struct mii_if_info *mii, struct ethtool_cmd *ecmd);
-extern int mii_ethtool_get_link_ksettings(
+extern void mii_ethtool_get_link_ksettings(
struct mii_if_info *mii, struct ethtool_link_ksettings *cmd);
extern int mii_ethtool_sset(struct mii_if_info *mii, struct ethtool_cmd *ecmd);
extern int mii_ethtool_set_link_ksettings(
diff --git a/include/linux/mlx5/device.h b/include/linux/mlx5/device.h
index a940ec6..b26a478 100644
--- a/include/linux/mlx5/device.h
+++ b/include/linux/mlx5/device.h
@@ -300,6 +300,8 @@
MLX5_EVENT_TYPE_PAGE_FAULT = 0xc,
MLX5_EVENT_TYPE_NIC_VPORT_CHANGE = 0xd,
+
+ MLX5_EVENT_TYPE_FPGA_ERROR = 0x20,
};
enum {
@@ -973,6 +975,7 @@
MLX5_CAP_RESERVED,
MLX5_CAP_VECTOR_CALC,
MLX5_CAP_QOS,
+ MLX5_CAP_FPGA,
/* NUM OF CAP Types */
MLX5_CAP_NUM
};
@@ -1094,6 +1097,9 @@
#define MLX5_CAP_MCAM_FEATURE(mdev, fld) \
MLX5_GET(mcam_reg, (mdev)->caps.mcam, mng_feature_cap_mask.enhanced_features.fld)
+#define MLX5_CAP_FPGA(mdev, cap) \
+ MLX5_GET(fpga_cap, (mdev)->caps.hca_cur[MLX5_CAP_FPGA], cap)
+
enum {
MLX5_CMD_STAT_OK = 0x0,
MLX5_CMD_STAT_INT_ERR = 0x1,
diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h
index 93273d9..bf15e87 100644
--- a/include/linux/mlx5/driver.h
+++ b/include/linux/mlx5/driver.h
@@ -108,6 +108,8 @@
MLX5_REG_QTCT = 0x400a,
MLX5_REG_DCBX_PARAM = 0x4020,
MLX5_REG_DCBX_APP = 0x4021,
+ MLX5_REG_FPGA_CAP = 0x4022,
+ MLX5_REG_FPGA_CTRL = 0x4023,
MLX5_REG_PCAP = 0x5001,
MLX5_REG_PMTU = 0x5003,
MLX5_REG_PTYS = 0x5004,
@@ -761,6 +763,9 @@
atomic_t num_qps;
u32 issi;
struct mlx5e_resources mlx5e_res;
+#ifdef CONFIG_MLX5_FPGA
+ struct mlx5_fpga_device *fpga;
+#endif
#ifdef CONFIG_RFS_ACCEL
struct cpu_rmap *rmap;
#endif
@@ -812,6 +817,7 @@
u64 ts1;
u64 ts2;
u16 op;
+ bool polling;
};
struct mlx5_pas {
@@ -895,11 +901,6 @@
return ioread32be(&dev->iseg->cmdif_rev_fw_sub) >> 16;
}
-static inline void *mlx5_vzalloc(unsigned long size)
-{
- return kvzalloc(size, GFP_KERNEL);
-}
-
static inline u32 mlx5_base_mkey(const u32 key)
{
return key & 0xffffff00u;
@@ -915,6 +916,8 @@
int mlx5_cmd_exec_cb(struct mlx5_core_dev *dev, void *in, int in_size,
void *out, int out_size, mlx5_cmd_cbk_t callback,
void *context);
+int mlx5_cmd_exec_polling(struct mlx5_core_dev *dev, void *in, int in_size,
+ void *out, int out_size);
void mlx5_cmd_mbox_status(void *out, u8 *status, u32 *syndrome);
int mlx5_core_get_caps(struct mlx5_core_dev *dev, enum mlx5_cap_type cap_type);
@@ -925,6 +928,7 @@
void mlx5_start_health_poll(struct mlx5_core_dev *dev);
void mlx5_stop_health_poll(struct mlx5_core_dev *dev);
void mlx5_drain_health_wq(struct mlx5_core_dev *dev);
+void mlx5_trigger_health_work(struct mlx5_core_dev *dev);
int mlx5_buf_alloc_node(struct mlx5_core_dev *dev, int size,
struct mlx5_buf *buf, int node);
int mlx5_buf_alloc(struct mlx5_core_dev *dev, int size, struct mlx5_buf *buf);
diff --git a/include/linux/mlx5/mlx5_ifc.h b/include/linux/mlx5/mlx5_ifc.h
index edafedb..e86ef88 100644
--- a/include/linux/mlx5/mlx5_ifc.h
+++ b/include/linux/mlx5/mlx5_ifc.h
@@ -32,6 +32,8 @@
#ifndef MLX5_IFC_H
#define MLX5_IFC_H
+#include "mlx5_ifc_fpga.h"
+
enum {
MLX5_EVENT_TYPE_CODING_COMPLETION_EVENTS = 0x0,
MLX5_EVENT_TYPE_CODING_PATH_MIGRATED_SUCCEEDED = 0x1,
@@ -56,7 +58,8 @@
MLX5_EVENT_TYPE_CODING_STALL_VL_EVENT = 0x1b,
MLX5_EVENT_TYPE_CODING_DROPPED_PACKET_LOGGED_EVENT = 0x1f,
MLX5_EVENT_TYPE_CODING_COMMAND_INTERFACE_COMPLETION = 0xa,
- MLX5_EVENT_TYPE_CODING_PAGE_REQUEST = 0xb
+ MLX5_EVENT_TYPE_CODING_PAGE_REQUEST = 0xb,
+ MLX5_EVENT_TYPE_CODING_FPGA_ERROR = 0x20,
};
enum {
@@ -658,9 +661,9 @@
struct mlx5_ifc_atomic_caps_bits {
u8 reserved_at_0[0x40];
- u8 atomic_req_8B_endianess_mode[0x2];
+ u8 atomic_req_8B_endianness_mode[0x2];
u8 reserved_at_42[0x4];
- u8 supported_atomic_req_8B_endianess_mode_1[0x1];
+ u8 supported_atomic_req_8B_endianness_mode_1[0x1];
u8 reserved_at_47[0x19];
@@ -798,7 +801,8 @@
u8 max_indirection[0x8];
u8 fixed_buffer_size[0x1];
u8 log_max_mrw_sz[0x7];
- u8 reserved_at_110[0x2];
+ u8 force_teardown[0x1];
+ u8 reserved_at_111[0x1];
u8 log_max_bsf_list_size[0x6];
u8 umr_extended_translation_offset[0x1];
u8 null_mkey[0x1];
@@ -860,7 +864,8 @@
u8 max_tc[0x4];
u8 reserved_at_1d0[0x1];
u8 dcbx[0x1];
- u8 reserved_at_1d2[0x4];
+ u8 reserved_at_1d2[0x3];
+ u8 fpga[0x1];
u8 rol_s[0x1];
u8 rol_g[0x1];
u8 reserved_at_1d8[0x1];
@@ -2194,6 +2199,7 @@
struct mlx5_ifc_e_switch_cap_bits e_switch_cap;
struct mlx5_ifc_vector_calc_cap_bits vector_calc_cap;
struct mlx5_ifc_qos_cap_bits qos_cap;
+ struct mlx5_ifc_fpga_cap_bits fpga_cap;
u8 reserved_at_0[0x8000];
};
@@ -3089,18 +3095,25 @@
u8 reserved_at_10[0x10];
};
+enum {
+ MLX5_TEARDOWN_HCA_OUT_FORCE_STATE_SUCCESS = 0x0,
+ MLX5_TEARDOWN_HCA_OUT_FORCE_STATE_FAIL = 0x1,
+};
+
struct mlx5_ifc_teardown_hca_out_bits {
u8 status[0x8];
u8 reserved_at_8[0x18];
u8 syndrome[0x20];
- u8 reserved_at_40[0x40];
+ u8 reserved_at_40[0x3f];
+
+ u8 force_state[0x1];
};
enum {
MLX5_TEARDOWN_HCA_IN_PROFILE_GRACEFUL_CLOSE = 0x0,
- MLX5_TEARDOWN_HCA_IN_PROFILE_PANIC_CLOSE = 0x1,
+ MLX5_TEARDOWN_HCA_IN_PROFILE_FORCE_CLOSE = 0x1,
};
struct mlx5_ifc_teardown_hca_in_bits {
@@ -6622,6 +6635,24 @@
u8 reserved_at_60[0x20];
};
+struct mlx5_ifc_flow_table_context_bits {
+ u8 encap_en[0x1];
+ u8 decap_en[0x1];
+ u8 reserved_at_2[0x2];
+ u8 table_miss_action[0x4];
+ u8 level[0x8];
+ u8 reserved_at_10[0x8];
+ u8 log_size[0x8];
+
+ u8 reserved_at_20[0x8];
+ u8 table_miss_id[0x18];
+
+ u8 reserved_at_40[0x8];
+ u8 lag_master_next_table_id[0x18];
+
+ u8 reserved_at_60[0xe0];
+};
+
struct mlx5_ifc_create_flow_table_in_bits {
u8 opcode[0x10];
u8 reserved_at_10[0x10];
@@ -6640,21 +6671,7 @@
u8 reserved_at_a0[0x20];
- u8 encap_en[0x1];
- u8 decap_en[0x1];
- u8 reserved_at_c2[0x2];
- u8 table_miss_mode[0x4];
- u8 level[0x8];
- u8 reserved_at_d0[0x8];
- u8 log_size[0x8];
-
- u8 reserved_at_e0[0x8];
- u8 table_miss_id[0x18];
-
- u8 reserved_at_100[0x8];
- u8 lag_master_next_table_id[0x18];
-
- u8 reserved_at_120[0x80];
+ struct mlx5_ifc_flow_table_context_bits flow_table_context;
};
struct mlx5_ifc_create_flow_group_out_bits {
@@ -7286,7 +7303,8 @@
u8 ib_link_width_oper[0x10];
u8 ib_proto_oper[0x10];
- u8 reserved_at_160[0x20];
+ u8 reserved_at_160[0x1c];
+ u8 connector_type[0x4];
u8 eth_proto_lp_advertise[0x20];
@@ -7689,8 +7707,10 @@
};
struct mlx5_ifc_pcam_enhanced_features_bits {
- u8 reserved_at_0[0x7e];
+ u8 reserved_at_0[0x7c];
+ u8 ptys_connector_type[0x1];
+ u8 reserved_at_7d[0x1];
u8 ppcnt_discard_group[0x1];
u8 ppcnt_statistical_group[0x1];
};
@@ -8190,6 +8210,8 @@
struct mlx5_ifc_sltp_reg_bits sltp_reg;
struct mlx5_ifc_mtpps_reg_bits mtpps_reg;
struct mlx5_ifc_mtppse_reg_bits mtppse_reg;
+ struct mlx5_ifc_fpga_ctrl_bits fpga_ctrl_bits;
+ struct mlx5_ifc_fpga_cap_bits fpga_cap_bits;
u8 reserved_at_0[0x60e0];
};
@@ -8270,17 +8292,7 @@
u8 reserved_at_a0[0x8];
u8 table_id[0x18];
- u8 reserved_at_c0[0x4];
- u8 table_miss_mode[0x4];
- u8 reserved_at_c8[0x18];
-
- u8 reserved_at_e0[0x8];
- u8 table_miss_id[0x18];
-
- u8 reserved_at_100[0x8];
- u8 lag_master_next_table_id[0x18];
-
- u8 reserved_at_120[0x80];
+ struct mlx5_ifc_flow_table_context_bits flow_table_context;
};
struct mlx5_ifc_ets_tcn_config_reg_bits {
diff --git a/include/linux/mlx5/mlx5_ifc_fpga.h b/include/linux/mlx5/mlx5_ifc_fpga.h
new file mode 100644
index 0000000..0032d10
--- /dev/null
+++ b/include/linux/mlx5/mlx5_ifc_fpga.h
@@ -0,0 +1,144 @@
+/*
+ * Copyright (c) 2017, Mellanox Technologies, Ltd. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef MLX5_IFC_FPGA_H
+#define MLX5_IFC_FPGA_H
+
+struct mlx5_ifc_fpga_shell_caps_bits {
+ u8 max_num_qps[0x10];
+ u8 reserved_at_10[0x8];
+ u8 total_rcv_credits[0x8];
+
+ u8 reserved_at_20[0xe];
+ u8 qp_type[0x2];
+ u8 reserved_at_30[0x5];
+ u8 rae[0x1];
+ u8 rwe[0x1];
+ u8 rre[0x1];
+ u8 reserved_at_38[0x4];
+ u8 dc[0x1];
+ u8 ud[0x1];
+ u8 uc[0x1];
+ u8 rc[0x1];
+
+ u8 reserved_at_40[0x1a];
+ u8 log_ddr_size[0x6];
+
+ u8 max_fpga_qp_msg_size[0x20];
+
+ u8 reserved_at_80[0x180];
+};
+
+struct mlx5_ifc_fpga_cap_bits {
+ u8 fpga_id[0x8];
+ u8 fpga_device[0x18];
+
+ u8 register_file_ver[0x20];
+
+ u8 fpga_ctrl_modify[0x1];
+ u8 reserved_at_41[0x5];
+ u8 access_reg_query_mode[0x2];
+ u8 reserved_at_48[0x6];
+ u8 access_reg_modify_mode[0x2];
+ u8 reserved_at_50[0x10];
+
+ u8 reserved_at_60[0x20];
+
+ u8 image_version[0x20];
+
+ u8 image_date[0x20];
+
+ u8 image_time[0x20];
+
+ u8 shell_version[0x20];
+
+ u8 reserved_at_100[0x80];
+
+ struct mlx5_ifc_fpga_shell_caps_bits shell_caps;
+
+ u8 reserved_at_380[0x8];
+ u8 ieee_vendor_id[0x18];
+
+ u8 sandbox_product_version[0x10];
+ u8 sandbox_product_id[0x10];
+
+ u8 sandbox_basic_caps[0x20];
+
+ u8 reserved_at_3e0[0x10];
+ u8 sandbox_extended_caps_len[0x10];
+
+ u8 sandbox_extended_caps_addr[0x40];
+
+ u8 fpga_ddr_start_addr[0x40];
+
+ u8 fpga_cr_space_start_addr[0x40];
+
+ u8 fpga_ddr_size[0x20];
+
+ u8 fpga_cr_space_size[0x20];
+
+ u8 reserved_at_500[0x300];
+};
+
+struct mlx5_ifc_fpga_ctrl_bits {
+ u8 reserved_at_0[0x8];
+ u8 operation[0x8];
+ u8 reserved_at_10[0x8];
+ u8 status[0x8];
+
+ u8 reserved_at_20[0x8];
+ u8 flash_select_admin[0x8];
+ u8 reserved_at_30[0x8];
+ u8 flash_select_oper[0x8];
+
+ u8 reserved_at_40[0x40];
+};
+
+enum {
+ MLX5_FPGA_ERROR_EVENT_SYNDROME_CORRUPTED_DDR = 0x1,
+ MLX5_FPGA_ERROR_EVENT_SYNDROME_FLASH_TIMEOUT = 0x2,
+ MLX5_FPGA_ERROR_EVENT_SYNDROME_INTERNAL_LINK_ERROR = 0x3,
+ MLX5_FPGA_ERROR_EVENT_SYNDROME_WATCHDOG_FAILURE = 0x4,
+ MLX5_FPGA_ERROR_EVENT_SYNDROME_I2C_FAILURE = 0x5,
+ MLX5_FPGA_ERROR_EVENT_SYNDROME_IMAGE_CHANGED = 0x6,
+ MLX5_FPGA_ERROR_EVENT_SYNDROME_TEMPERATURE_CRITICAL = 0x7,
+};
+
+struct mlx5_ifc_fpga_error_event_bits {
+ u8 reserved_at_0[0x40];
+
+ u8 reserved_at_40[0x18];
+ u8 syndrome[0x8];
+
+ u8 reserved_at_60[0x80];
+};
+
+#endif /* MLX5_IFC_FPGA_H */
diff --git a/include/linux/mlx5/port.h b/include/linux/mlx5/port.h
index e527732..c57d4b7 100644
--- a/include/linux/mlx5/port.h
+++ b/include/linux/mlx5/port.h
@@ -92,6 +92,19 @@
MLX5E_LINK_MODES_NUMBER,
};
+enum mlx5e_connector_type {
+ MLX5E_PORT_UNKNOWN = 0,
+ MLX5E_PORT_NONE = 1,
+ MLX5E_PORT_TP = 2,
+ MLX5E_PORT_AUI = 3,
+ MLX5E_PORT_BNC = 4,
+ MLX5E_PORT_MII = 5,
+ MLX5E_PORT_FIBRE = 6,
+ MLX5E_PORT_DA = 7,
+ MLX5E_PORT_OTHER = 8,
+ MLX5E_CONNECTOR_TYPE_NUMBER,
+};
+
#define MLX5E_PROT_MASK(link_mode) (1 << link_mode)
#define PORT_MODULE_EVENT_MODULE_STATUS_MASK 0xF
diff --git a/include/linux/mlx5/qp.h b/include/linux/mlx5/qp.h
index bef80d0..1f637f4 100644
--- a/include/linux/mlx5/qp.h
+++ b/include/linux/mlx5/qp.h
@@ -569,8 +569,6 @@
int mlx5_core_dealloc_q_counter(struct mlx5_core_dev *dev, u16 counter_id);
int mlx5_core_query_q_counter(struct mlx5_core_dev *dev, u16 counter_id,
int reset, void *out, int out_size);
-int mlx5_core_query_out_of_buffer(struct mlx5_core_dev *dev, u16 counter_id,
- u32 *out_of_buffer);
static inline const char *mlx5_qp_type_str(int type)
{
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index 4ed952c..7c7118b 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -824,7 +824,10 @@
struct netlink_ext_ack *extack;
};
/* XDP_QUERY_PROG */
- bool prog_attached;
+ struct {
+ bool prog_attached;
+ u32 prog_id;
+ };
};
};
@@ -971,7 +974,7 @@
* with PF and querying it may introduce a theoretical security risk.
* int (*ndo_set_vf_rss_query_en)(struct net_device *dev, int vf, bool setting);
* int (*ndo_get_vf_port)(struct net_device *dev, int vf, struct sk_buff *skb);
- * int (*ndo_setup_tc)(struct net_device *dev, u32 handle,
+ * int (*ndo_setup_tc)(struct net_device *dev, u32 handle, u32 chain_index,
* __be16 protocol, struct tc_to_netdev *tc);
* Called to setup any 'tc' scheduler, classifier or action on @dev.
* This is always called from the stack with the rtnl lock held and netif
@@ -1221,7 +1224,7 @@
struct net_device *dev,
int vf, bool setting);
int (*ndo_setup_tc)(struct net_device *dev,
- u32 handle,
+ u32 handle, u32 chain_index,
__be16 protocol,
struct tc_to_netdev *tc);
#if IS_ENABLED(CONFIG_FCOE)
@@ -1823,7 +1826,7 @@
#ifdef CONFIG_NET_SCHED
DECLARE_HASHTABLE (qdisc_hash, 4);
#endif
- unsigned long tx_queue_len;
+ unsigned int tx_queue_len;
spinlock_t tx_global_lock;
int watchdog_timeo;
@@ -2456,6 +2459,7 @@
struct net_device *dev_get_by_index(struct net *net, int ifindex);
struct net_device *__dev_get_by_index(struct net *net, int ifindex);
struct net_device *dev_get_by_index_rcu(struct net *net, int ifindex);
+struct net_device *dev_get_by_napi_id(unsigned int napi_id);
int netdev_get_name(struct net *net, char *name, int ifindex);
int dev_restart(struct net_device *dev);
int skb_gro_receive(struct sk_buff **head, struct sk_buff *skb);
@@ -2573,9 +2577,7 @@
if (__skb_gro_checksum_validate_needed(skb, zero_okay, check)) \
__ret = __skb_gro_checksum_validate_complete(skb, \
compute_pseudo(skb, proto)); \
- if (__ret) \
- __skb_mark_checksum_bad(skb); \
- else \
+ if (!__ret) \
skb_gro_incr_csum_unnecessary(skb); \
__ret; \
})
@@ -3303,7 +3305,7 @@
typedef int (*xdp_op_t)(struct net_device *dev, struct netdev_xdp *xdp);
int dev_change_xdp_fd(struct net_device *dev, struct netlink_ext_ack *extack,
int fd, u32 flags);
-bool __dev_xdp_attached(struct net_device *dev, xdp_op_t xdp_op);
+bool __dev_xdp_attached(struct net_device *dev, xdp_op_t xdp_op, u32 *prog_id);
int __dev_forward_skb(struct net_device *dev, struct sk_buff *skb);
int dev_forward_skb(struct net_device *dev, struct sk_buff *skb);
@@ -3931,6 +3933,10 @@
int dev_get_nest_level(struct net_device *dev);
int skb_checksum_help(struct sk_buff *skb);
+int skb_crc32c_csum_help(struct sk_buff *skb);
+int skb_csum_hwoffload_help(struct sk_buff *skb,
+ const netdev_features_t features);
+
struct sk_buff *__skb_gso_segment(struct sk_buff *skb,
netdev_features_t features, bool tx_path);
struct sk_buff *skb_mac_gso_segment(struct sk_buff *skb,
diff --git a/include/linux/netlink.h b/include/linux/netlink.h
index 5fff5ba..8664fd2 100644
--- a/include/linux/netlink.h
+++ b/include/linux/netlink.h
@@ -97,6 +97,21 @@
#define NL_SET_ERR_MSG_MOD(extack, msg) \
NL_SET_ERR_MSG((extack), KBUILD_MODNAME ": " msg)
+#define NL_SET_BAD_ATTR(extack, attr) do { \
+ if ((extack)) \
+ (extack)->bad_attr = (attr); \
+} while (0)
+
+#define NL_SET_ERR_MSG_ATTR(extack, attr, msg) do { \
+ static const char __msg[] = (msg); \
+ struct netlink_ext_ack *__extack = (extack); \
+ \
+ if (__extack) { \
+ __extack->_msg = __msg; \
+ __extack->bad_attr = (attr); \
+ } \
+} while (0)
+
extern void netlink_kernel_release(struct sock *sk);
extern int __netlink_change_ngroups(struct sock *sk, unsigned int groups);
extern int netlink_change_ngroups(struct sock *sk, unsigned int groups);
diff --git a/include/linux/of_mdio.h b/include/linux/of_mdio.h
index ba35ba5..f5db93b 100644
--- a/include/linux/of_mdio.h
+++ b/include/linux/of_mdio.h
@@ -27,11 +27,33 @@
phy_interface_t iface);
extern struct mii_bus *of_mdio_find_bus(struct device_node *mdio_np);
-extern int of_mdio_parse_addr(struct device *dev, const struct device_node *np);
extern int of_phy_register_fixed_link(struct device_node *np);
extern void of_phy_deregister_fixed_link(struct device_node *np);
extern bool of_phy_is_fixed_link(struct device_node *np);
+
+static inline int of_mdio_parse_addr(struct device *dev,
+ const struct device_node *np)
+{
+ u32 addr;
+ int ret;
+
+ ret = of_property_read_u32(np, "reg", &addr);
+ if (ret < 0) {
+ dev_err(dev, "%s has invalid PHY address\n", np->full_name);
+ return ret;
+ }
+
+ /* A PHY must have a reg property in the range [0-31] */
+ if (addr >= PHY_MAX_ADDR) {
+ dev_err(dev, "%s PHY address %i is too large\n",
+ np->full_name, addr);
+ return -EINVAL;
+ }
+
+ return addr;
+}
+
#else /* CONFIG_OF_MDIO */
static inline int of_mdiobus_register(struct mii_bus *mdio, struct device_node *np)
{
diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
index 24a6358..8fc5f0f 100644
--- a/include/linux/perf_event.h
+++ b/include/linux/perf_event.h
@@ -896,7 +896,7 @@
void *context);
extern void perf_pmu_migrate_context(struct pmu *pmu,
int src_cpu, int dst_cpu);
-extern u64 perf_event_read_local(struct perf_event *event);
+int perf_event_read_local(struct perf_event *event, u64 *value);
extern u64 perf_event_read_value(struct perf_event *event,
u64 *enabled, u64 *running);
@@ -1301,7 +1301,10 @@
{
return ERR_PTR(-EINVAL);
}
-static inline u64 perf_event_read_local(struct perf_event *event) { return -EINVAL; }
+static inline int perf_event_read_local(struct perf_event *event, u64 *value)
+{
+ return -EINVAL;
+}
static inline void perf_event_print_debug(void) { }
static inline int perf_event_task_disable(void) { return -EINVAL; }
static inline int perf_event_task_enable(void) { return -EINVAL; }
diff --git a/include/linux/phy.h b/include/linux/phy.h
index e76e4ad..23d2e46 100644
--- a/include/linux/phy.h
+++ b/include/linux/phy.h
@@ -58,8 +58,7 @@
#define PHY_IGNORE_INTERRUPT -2
#define PHY_HAS_INTERRUPT 0x00000001
-#define PHY_HAS_MAGICANEG 0x00000002
-#define PHY_IS_INTERNAL 0x00000004
+#define PHY_IS_INTERNAL 0x00000002
#define MDIO_DEVICE_IS_PHY 0x80000000
/* Interface Mode definitions */
@@ -84,6 +83,9 @@
PHY_INTERFACE_MODE_1000BASEX,
PHY_INTERFACE_MODE_2500BASEX,
PHY_INTERFACE_MODE_RXAUI,
+ PHY_INTERFACE_MODE_XAUI,
+ /* 10GBASE-KR, XFI, SFI - single lane 10G Serdes */
+ PHY_INTERFACE_MODE_10GKR,
PHY_INTERFACE_MODE_MAX,
} phy_interface_t;
@@ -150,6 +152,10 @@
return "2500base-x";
case PHY_INTERFACE_MODE_RXAUI:
return "rxaui";
+ case PHY_INTERFACE_MODE_XAUI:
+ return "xaui";
+ case PHY_INTERFACE_MODE_10GKR:
+ return "10gbase-kr";
default:
return "unknown";
}
@@ -220,10 +226,8 @@
/* GPIO reset pulse width in microseconds */
int reset_delay_us;
- /* Number of reset GPIOs */
- int num_reset_gpios;
- /* Array of RESET GPIO descriptors */
- struct gpio_desc **reset_gpiod;
+ /* RESET GPIO descriptor pointer */
+ struct gpio_desc *reset_gpiod;
};
#define to_mii_bus(d) container_of(d, struct mii_bus, dev)
@@ -364,6 +368,7 @@
* is_pseudo_fixed_link: Set to true if this phy is an Ethernet switch, etc.
* has_fixups: Set to true if this phy has fixups/quirks.
* suspended: Set to true if this phy has been suspended successfully.
+ * sysfs_links: Internal boolean tracking sysfs symbolic links setup/removal.
* state: state of the PHY for management purposes
* dev_flags: Device-specific flags used by the PHY driver.
* link_timeout: The number of timer firings to wait before the
@@ -400,6 +405,7 @@
bool is_pseudo_fixed_link;
bool has_fixups;
bool suspended;
+ bool sysfs_links;
enum phy_state state;
@@ -717,14 +723,24 @@
}
/**
+ * phy_interface_mode_is_rgmii - Convenience function for testing if a
+ * PHY interface mode is RGMII (all variants)
+ * @mode: the phy_interface_t enum
+ */
+static inline bool phy_interface_mode_is_rgmii(phy_interface_t mode)
+{
+ return mode >= PHY_INTERFACE_MODE_RGMII &&
+ mode <= PHY_INTERFACE_MODE_RGMII_TXID;
+};
+
+/**
* phy_interface_is_rgmii - Convenience function for testing if a PHY interface
* is RGMII (all variants)
* @phydev: the phy_device struct
*/
static inline bool phy_interface_is_rgmii(struct phy_device *phydev)
{
- return phydev->interface >= PHY_INTERFACE_MODE_RGMII &&
- phydev->interface <= PHY_INTERFACE_MODE_RGMII_TXID;
+ return phy_interface_mode_is_rgmii(phydev->interface);
};
/*
@@ -793,6 +809,7 @@
int phy_aneg_done(struct phy_device *phydev);
int phy_stop_interrupts(struct phy_device *phydev);
+int phy_restart_aneg(struct phy_device *phydev);
static inline int phy_read_status(struct phy_device *phydev)
{
@@ -816,6 +833,8 @@
void phy_attached_print(struct phy_device *phydev, const char *fmt, ...)
__printf(2, 3);
void phy_attached_info(struct phy_device *phydev);
+
+/* Clause 22 PHY */
int genphy_config_init(struct phy_device *phydev);
int genphy_setup_forced(struct phy_device *phydev);
int genphy_restart_aneg(struct phy_device *phydev);
@@ -830,6 +849,16 @@
{
return 0;
}
+
+/* Clause 45 PHY */
+int genphy_c45_restart_aneg(struct phy_device *phydev);
+int genphy_c45_aneg_done(struct phy_device *phydev);
+int genphy_c45_read_link(struct phy_device *phydev, u32 mmd_mask);
+int genphy_c45_read_lpa(struct phy_device *phydev);
+int genphy_c45_read_pma(struct phy_device *phydev);
+int genphy_c45_pma_setup_forced(struct phy_device *phydev);
+int genphy_c45_an_disable_aneg(struct phy_device *phydev);
+
void phy_driver_unregister(struct phy_driver *drv);
void phy_drivers_unregister(struct phy_driver *drv, int n);
int phy_driver_register(struct phy_driver *new_driver, struct module *owner);
@@ -843,9 +872,8 @@
void phy_stop_machine(struct phy_device *phydev);
void phy_trigger_machine(struct phy_device *phydev, bool sync);
int phy_ethtool_sset(struct phy_device *phydev, struct ethtool_cmd *cmd);
-int phy_ethtool_gset(struct phy_device *phydev, struct ethtool_cmd *cmd);
-int phy_ethtool_ksettings_get(struct phy_device *phydev,
- struct ethtool_link_ksettings *cmd);
+void phy_ethtool_ksettings_get(struct phy_device *phydev,
+ struct ethtool_link_ksettings *cmd);
int phy_ethtool_ksettings_set(struct phy_device *phydev,
const struct ethtool_link_ksettings *cmd);
int phy_mii_ioctl(struct phy_device *phydev, struct ifreq *ifr, int cmd);
diff --git a/include/linux/platform_data/microchip-ksz.h b/include/linux/platform_data/microchip-ksz.h
new file mode 100644
index 0000000..84789ca
--- /dev/null
+++ b/include/linux/platform_data/microchip-ksz.h
@@ -0,0 +1,29 @@
+/*
+ * Microchip KSZ series switch platform data
+ *
+ * Copyright (C) 2017
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef __MICROCHIP_KSZ_H
+#define __MICROCHIP_KSZ_H
+
+#include <linux/kernel.h>
+
+struct ksz_platform_data {
+ u32 chip_id;
+ u16 enabled_ports;
+};
+
+#endif
diff --git a/include/linux/ptr_ring.h b/include/linux/ptr_ring.h
index 6b2e0dd..d8c97ec 100644
--- a/include/linux/ptr_ring.h
+++ b/include/linux/ptr_ring.h
@@ -278,6 +278,22 @@
return ptr;
}
+static inline int __ptr_ring_consume_batched(struct ptr_ring *r,
+ void **array, int n)
+{
+ void *ptr;
+ int i;
+
+ for (i = 0; i < n; i++) {
+ ptr = __ptr_ring_consume(r);
+ if (!ptr)
+ break;
+ array[i] = ptr;
+ }
+
+ return i;
+}
+
/*
* Note: resize (below) nests producer lock within consumer lock, so if you
* call this in interrupt or BH context, you must disable interrupts/BH when
@@ -328,6 +344,55 @@
return ptr;
}
+static inline int ptr_ring_consume_batched(struct ptr_ring *r,
+ void **array, int n)
+{
+ int ret;
+
+ spin_lock(&r->consumer_lock);
+ ret = __ptr_ring_consume_batched(r, array, n);
+ spin_unlock(&r->consumer_lock);
+
+ return ret;
+}
+
+static inline int ptr_ring_consume_batched_irq(struct ptr_ring *r,
+ void **array, int n)
+{
+ int ret;
+
+ spin_lock_irq(&r->consumer_lock);
+ ret = __ptr_ring_consume_batched(r, array, n);
+ spin_unlock_irq(&r->consumer_lock);
+
+ return ret;
+}
+
+static inline int ptr_ring_consume_batched_any(struct ptr_ring *r,
+ void **array, int n)
+{
+ unsigned long flags;
+ int ret;
+
+ spin_lock_irqsave(&r->consumer_lock, flags);
+ ret = __ptr_ring_consume_batched(r, array, n);
+ spin_unlock_irqrestore(&r->consumer_lock, flags);
+
+ return ret;
+}
+
+static inline int ptr_ring_consume_batched_bh(struct ptr_ring *r,
+ void **array, int n)
+{
+ int ret;
+
+ spin_lock_bh(&r->consumer_lock);
+ ret = __ptr_ring_consume_batched(r, array, n);
+ spin_unlock_bh(&r->consumer_lock);
+
+ return ret;
+}
+
/* Cast to structure type and call a function without discarding from FIFO.
* Function must return a value.
* Callers must take consumer_lock.
@@ -403,6 +468,61 @@
return 0;
}
+/*
+ * Return entries into ring. Destroy entries that don't fit.
+ *
+ * Note: this is expected to be a rare slow path operation.
+ *
+ * Note: producer lock is nested within consumer lock, so if you
+ * resize you must make sure all uses nest correctly.
+ * In particular if you consume ring in interrupt or BH context, you must
+ * disable interrupts/BH when doing so.
+ */
+static inline void ptr_ring_unconsume(struct ptr_ring *r, void **batch, int n,
+ void (*destroy)(void *))
+{
+ unsigned long flags;
+ int head;
+
+ spin_lock_irqsave(&r->consumer_lock, flags);
+ spin_lock(&r->producer_lock);
+
+ if (!r->size)
+ goto done;
+
+ /*
+ * Clean out buffered entries (for simplicity). This way following code
+ * can test entries for NULL and if not assume they are valid.
+ */
+ head = r->consumer_head - 1;
+ while (likely(head >= r->consumer_tail))
+ r->queue[head--] = NULL;
+ r->consumer_tail = r->consumer_head;
+
+ /*
+ * Go over entries in batch, start moving head back and copy entries.
+ * Stop when we run into previously unconsumed entries.
+ */
+ while (n) {
+ head = r->consumer_head - 1;
+ if (head < 0)
+ head = r->size - 1;
+ if (r->queue[head]) {
+ /* This batch entry will have to be destroyed. */
+ goto done;
+ }
+ r->queue[head] = batch[--n];
+ r->consumer_tail = r->consumer_head = head;
+ }
+
+done:
+ /* Destroy all entries left in the batch. */
+ while (n)
+ destroy(batch[--n]);
+ spin_unlock(&r->producer_lock);
+ spin_unlock_irqrestore(&r->consumer_lock, flags);
+}
+
static inline void **__ptr_ring_swap_queue(struct ptr_ring *r, void **queue,
int size, gfp_t gfp,
void (*destroy)(void *))
diff --git a/include/linux/qed/common_hsi.h b/include/linux/qed/common_hsi.h
index fbab6e0..a567cbf 100644
--- a/include/linux/qed/common_hsi.h
+++ b/include/linux/qed/common_hsi.h
@@ -96,12 +96,12 @@
#define CORE_SPQE_PAGE_SIZE_BYTES 4096
-#define MAX_NUM_LL2_RX_QUEUES 32
-#define MAX_NUM_LL2_TX_STATS_COUNTERS 32
+#define MAX_NUM_LL2_RX_QUEUES 48
+#define MAX_NUM_LL2_TX_STATS_COUNTERS 48
#define FW_MAJOR_VERSION 8
-#define FW_MINOR_VERSION 15
-#define FW_REVISION_VERSION 3
+#define FW_MINOR_VERSION 20
+#define FW_REVISION_VERSION 0
#define FW_ENGINEERING_VERSION 0
/***********************/
@@ -181,6 +181,14 @@
#define CDU_VF_FL_SEG_TYPE_OFFSET_REG_TYPE_SHIFT (12)
#define CDU_VF_FL_SEG_TYPE_OFFSET_REG_OFFSET_MASK (0xfff)
+
+#define CDU_CONTEXT_VALIDATION_CFG_ENABLE_SHIFT (0)
+#define CDU_CONTEXT_VALIDATION_CFG_VALIDATION_TYPE_SHIFT (1)
+#define CDU_CONTEXT_VALIDATION_CFG_USE_TYPE (2)
+#define CDU_CONTEXT_VALIDATION_CFG_USE_REGION (3)
+#define CDU_CONTEXT_VALIDATION_CFG_USE_CID (4)
+#define CDU_CONTEXT_VALIDATION_CFG_USE_ACTIVE (5)
+
/*****************/
/* DQ CONSTANTS */
/*****************/
@@ -457,7 +465,6 @@
#define PXP_BAR_DQ 1
/* PTT and GTT */
-#define PXP_NUM_PF_WINDOWS 12
#define PXP_PER_PF_ENTRY_SIZE 8
#define PXP_NUM_GLOBAL_WINDOWS 243
#define PXP_GLOBAL_ENTRY_SIZE 4
@@ -482,6 +489,7 @@
#define PXP_PF_ME_OPAQUE_ADDR 0x1f8
#define PXP_PF_ME_CONCRETE_ADDR 0x1fc
+#define PXP_NUM_PF_WINDOWS 12
#define PXP_EXTERNAL_BAR_PF_WINDOW_START 0x1000
#define PXP_EXTERNAL_BAR_PF_WINDOW_NUM PXP_NUM_PF_WINDOWS
#define PXP_EXTERNAL_BAR_PF_WINDOW_SINGLE_SIZE 0x1000
@@ -618,16 +626,21 @@
/*****************/
/* PRM CONSTANTS */
/*****************/
-#define PRM_DMA_PAD_BYTES_NUM 2
-/******************/
-/* SDMs CONSTANTS */
-/******************/
-#define SDM_OP_GEN_TRIG_NONE 0
-#define SDM_OP_GEN_TRIG_WAKE_THREAD 1
-#define SDM_OP_GEN_TRIG_AGG_INT 2
-#define SDM_OP_GEN_TRIG_LOADER 4
-#define SDM_OP_GEN_TRIG_INDICATE_ERROR 6
-#define SDM_OP_GEN_TRIG_RELEASE_THREAD 7
+#define PRM_DMA_PAD_BYTES_NUM 2
+/*****************/
+/* SDMs CONSTANTS */
+/*****************/
+
+#define SDM_OP_GEN_TRIG_NONE 0
+#define SDM_OP_GEN_TRIG_WAKE_THREAD 1
+#define SDM_OP_GEN_TRIG_AGG_INT 2
+#define SDM_OP_GEN_TRIG_LOADER 4
+#define SDM_OP_GEN_TRIG_INDICATE_ERROR 6
+#define SDM_OP_GEN_TRIG_INC_ORDER_CNT 9
+
+/********************/
+/* Completion types */
+/********************/
#define SDM_COMP_TYPE_NONE 0
#define SDM_COMP_TYPE_WAKE_THREAD 1
@@ -638,10 +651,11 @@
#define SDM_COMP_TYPE_INDICATE_ERROR 6
#define SDM_COMP_TYPE_RELEASE_THREAD 7
#define SDM_COMP_TYPE_RAM 8
+#define SDM_COMP_TYPE_INC_ORDER_CNT 9
-/******************/
-/* PBF CONSTANTS */
-/******************/
+/*****************/
+/* PBF Constants */
+/*****************/
/* Number of PBF command queue lines. Each line is 32B. */
#define PBF_MAX_CMD_LINES 3328
@@ -861,7 +875,7 @@
/* Enum of doorbell DPM types */
enum db_dpm_type {
DPM_LEGACY,
- DPM_ROCE,
+ DPM_RDMA,
DPM_L2_INLINE,
DPM_L2_BD,
MAX_DB_DPM_TYPE
@@ -884,8 +898,8 @@
#define DB_L2_DPM_DATA_RESERVED0_SHIFT 27
#define DB_L2_DPM_DATA_SGE_NUM_MASK 0x7
#define DB_L2_DPM_DATA_SGE_NUM_SHIFT 28
-#define DB_L2_DPM_DATA_RESERVED1_MASK 0x1
-#define DB_L2_DPM_DATA_RESERVED1_SHIFT 31
+#define DB_L2_DPM_DATA_GFS_SRC_EN_MASK 0x1
+#define DB_L2_DPM_DATA_GFS_SRC_EN_SHIFT 31
};
/* Structure for SGE in a DPM doorbell of type DPM_L2_BD */
@@ -931,31 +945,33 @@
};
/* Parameters to RoCE firmware, passed in EDPM doorbell */
-struct db_roce_dpm_params {
+struct db_rdma_dpm_params {
__le32 params;
-#define DB_ROCE_DPM_PARAMS_SIZE_MASK 0x3F
-#define DB_ROCE_DPM_PARAMS_SIZE_SHIFT 0
-#define DB_ROCE_DPM_PARAMS_DPM_TYPE_MASK 0x3
-#define DB_ROCE_DPM_PARAMS_DPM_TYPE_SHIFT 6
-#define DB_ROCE_DPM_PARAMS_OPCODE_MASK 0xFF
-#define DB_ROCE_DPM_PARAMS_OPCODE_SHIFT 8
-#define DB_ROCE_DPM_PARAMS_WQE_SIZE_MASK 0x7FF
-#define DB_ROCE_DPM_PARAMS_WQE_SIZE_SHIFT 16
-#define DB_ROCE_DPM_PARAMS_RESERVED0_MASK 0x1
-#define DB_ROCE_DPM_PARAMS_RESERVED0_SHIFT 27
-#define DB_ROCE_DPM_PARAMS_COMPLETION_FLG_MASK 0x1
-#define DB_ROCE_DPM_PARAMS_COMPLETION_FLG_SHIFT 28
-#define DB_ROCE_DPM_PARAMS_S_FLG_MASK 0x1
-#define DB_ROCE_DPM_PARAMS_S_FLG_SHIFT 29
-#define DB_ROCE_DPM_PARAMS_RESERVED1_MASK 0x3
-#define DB_ROCE_DPM_PARAMS_RESERVED1_SHIFT 30
+#define DB_RDMA_DPM_PARAMS_SIZE_MASK 0x3F
+#define DB_RDMA_DPM_PARAMS_SIZE_SHIFT 0
+#define DB_RDMA_DPM_PARAMS_DPM_TYPE_MASK 0x3
+#define DB_RDMA_DPM_PARAMS_DPM_TYPE_SHIFT 6
+#define DB_RDMA_DPM_PARAMS_OPCODE_MASK 0xFF
+#define DB_RDMA_DPM_PARAMS_OPCODE_SHIFT 8
+#define DB_RDMA_DPM_PARAMS_WQE_SIZE_MASK 0x7FF
+#define DB_RDMA_DPM_PARAMS_WQE_SIZE_SHIFT 16
+#define DB_RDMA_DPM_PARAMS_RESERVED0_MASK 0x1
+#define DB_RDMA_DPM_PARAMS_RESERVED0_SHIFT 27
+#define DB_RDMA_DPM_PARAMS_COMPLETION_FLG_MASK 0x1
+#define DB_RDMA_DPM_PARAMS_COMPLETION_FLG_SHIFT 28
+#define DB_RDMA_DPM_PARAMS_S_FLG_MASK 0x1
+#define DB_RDMA_DPM_PARAMS_S_FLG_SHIFT 29
+#define DB_RDMA_DPM_PARAMS_RESERVED1_MASK 0x1
+#define DB_RDMA_DPM_PARAMS_RESERVED1_SHIFT 30
+#define DB_RDMA_DPM_PARAMS_CONN_TYPE_IS_IWARP_MASK 0x1
+#define DB_RDMA_DPM_PARAMS_CONN_TYPE_IS_IWARP_SHIFT 31
};
/* Structure for doorbell data, in ROCE DPM mode, for 1st db in a DPM burst */
-struct db_roce_dpm_data {
+struct db_rdma_dpm_data {
__le16 icid;
__le16 prod_val;
- struct db_roce_dpm_params params;
+ struct db_rdma_dpm_params params;
};
/* Igu interrupt command */
@@ -1026,6 +1042,42 @@
#define PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMERROR_SHIFT 15
};
+struct parsing_err_flags {
+ __le16 flags;
+#define PARSING_ERR_FLAGS_MAC_ERROR_MASK 0x1
+#define PARSING_ERR_FLAGS_MAC_ERROR_SHIFT 0
+#define PARSING_ERR_FLAGS_TRUNC_ERROR_MASK 0x1
+#define PARSING_ERR_FLAGS_TRUNC_ERROR_SHIFT 1
+#define PARSING_ERR_FLAGS_PKT_TOO_SMALL_MASK 0x1
+#define PARSING_ERR_FLAGS_PKT_TOO_SMALL_SHIFT 2
+#define PARSING_ERR_FLAGS_ANY_HDR_MISSING_TAG_MASK 0x1
+#define PARSING_ERR_FLAGS_ANY_HDR_MISSING_TAG_SHIFT 3
+#define PARSING_ERR_FLAGS_ANY_HDR_IP_VER_MISMTCH_MASK 0x1
+#define PARSING_ERR_FLAGS_ANY_HDR_IP_VER_MISMTCH_SHIFT 4
+#define PARSING_ERR_FLAGS_ANY_HDR_IP_V4_HDR_LEN_TOO_SMALL_MASK 0x1
+#define PARSING_ERR_FLAGS_ANY_HDR_IP_V4_HDR_LEN_TOO_SMALL_SHIFT 5
+#define PARSING_ERR_FLAGS_ANY_HDR_IP_BAD_TOTAL_LEN_MASK 0x1
+#define PARSING_ERR_FLAGS_ANY_HDR_IP_BAD_TOTAL_LEN_SHIFT 6
+#define PARSING_ERR_FLAGS_IP_V4_CHKSM_ERROR_MASK 0x1
+#define PARSING_ERR_FLAGS_IP_V4_CHKSM_ERROR_SHIFT 7
+#define PARSING_ERR_FLAGS_ANY_HDR_L4_IP_LEN_MISMTCH_MASK 0x1
+#define PARSING_ERR_FLAGS_ANY_HDR_L4_IP_LEN_MISMTCH_SHIFT 8
+#define PARSING_ERR_FLAGS_ZERO_UDP_IP_V6_CHKSM_MASK 0x1
+#define PARSING_ERR_FLAGS_ZERO_UDP_IP_V6_CHKSM_SHIFT 9
+#define PARSING_ERR_FLAGS_INNER_L4_CHKSM_ERROR_MASK 0x1
+#define PARSING_ERR_FLAGS_INNER_L4_CHKSM_ERROR_SHIFT 10
+#define PARSING_ERR_FLAGS_ANY_HDR_ZERO_TTL_OR_HOP_LIM_MASK 0x1
+#define PARSING_ERR_FLAGS_ANY_HDR_ZERO_TTL_OR_HOP_LIM_SHIFT 11
+#define PARSING_ERR_FLAGS_NON_8021Q_TAG_EXISTS_IN_BOTH_HDRS_MASK 0x1
+#define PARSING_ERR_FLAGS_NON_8021Q_TAG_EXISTS_IN_BOTH_HDRS_SHIFT 12
+#define PARSING_ERR_FLAGS_GENEVE_OPTION_OVERSIZED_MASK 0x1
+#define PARSING_ERR_FLAGS_GENEVE_OPTION_OVERSIZED_SHIFT 13
+#define PARSING_ERR_FLAGS_TUNNEL_IP_V4_CHKSM_ERROR_MASK 0x1
+#define PARSING_ERR_FLAGS_TUNNEL_IP_V4_CHKSM_ERROR_SHIFT 14
+#define PARSING_ERR_FLAGS_TUNNEL_L4_CHKSM_ERROR_MASK 0x1
+#define PARSING_ERR_FLAGS_TUNNEL_L4_CHKSM_ERROR_SHIFT 15
+};
+
struct pb_context {
__le32 crc[4];
};
@@ -1288,39 +1340,56 @@
struct timers_context {
__le32 logical_client_0;
-#define TIMERS_CONTEXT_EXPIRATIONTIMELC0_MASK 0xFFFFFFF
-#define TIMERS_CONTEXT_EXPIRATIONTIMELC0_SHIFT 0
-#define TIMERS_CONTEXT_VALIDLC0_MASK 0x1
-#define TIMERS_CONTEXT_VALIDLC0_SHIFT 28
-#define TIMERS_CONTEXT_ACTIVELC0_MASK 0x1
-#define TIMERS_CONTEXT_ACTIVELC0_SHIFT 29
-#define TIMERS_CONTEXT_RESERVED0_MASK 0x3
-#define TIMERS_CONTEXT_RESERVED0_SHIFT 30
+#define TIMERS_CONTEXT_EXPIRATIONTIMELC0_MASK 0x7FFFFFF
+#define TIMERS_CONTEXT_EXPIRATIONTIMELC0_SHIFT 0
+#define TIMERS_CONTEXT_RESERVED0_MASK 0x1
+#define TIMERS_CONTEXT_RESERVED0_SHIFT 27
+#define TIMERS_CONTEXT_VALIDLC0_MASK 0x1
+#define TIMERS_CONTEXT_VALIDLC0_SHIFT 28
+#define TIMERS_CONTEXT_ACTIVELC0_MASK 0x1
+#define TIMERS_CONTEXT_ACTIVELC0_SHIFT 29
+#define TIMERS_CONTEXT_RESERVED1_MASK 0x3
+#define TIMERS_CONTEXT_RESERVED1_SHIFT 30
__le32 logical_client_1;
-#define TIMERS_CONTEXT_EXPIRATIONTIMELC1_MASK 0xFFFFFFF
-#define TIMERS_CONTEXT_EXPIRATIONTIMELC1_SHIFT 0
-#define TIMERS_CONTEXT_VALIDLC1_MASK 0x1
-#define TIMERS_CONTEXT_VALIDLC1_SHIFT 28
-#define TIMERS_CONTEXT_ACTIVELC1_MASK 0x1
-#define TIMERS_CONTEXT_ACTIVELC1_SHIFT 29
-#define TIMERS_CONTEXT_RESERVED1_MASK 0x3
-#define TIMERS_CONTEXT_RESERVED1_SHIFT 30
+#define TIMERS_CONTEXT_EXPIRATIONTIMELC1_MASK 0x7FFFFFF
+#define TIMERS_CONTEXT_EXPIRATIONTIMELC1_SHIFT 0
+#define TIMERS_CONTEXT_RESERVED2_MASK 0x1
+#define TIMERS_CONTEXT_RESERVED2_SHIFT 27
+#define TIMERS_CONTEXT_VALIDLC1_MASK 0x1
+#define TIMERS_CONTEXT_VALIDLC1_SHIFT 28
+#define TIMERS_CONTEXT_ACTIVELC1_MASK 0x1
+#define TIMERS_CONTEXT_ACTIVELC1_SHIFT 29
+#define TIMERS_CONTEXT_RESERVED3_MASK 0x3
+#define TIMERS_CONTEXT_RESERVED3_SHIFT 30
__le32 logical_client_2;
-#define TIMERS_CONTEXT_EXPIRATIONTIMELC2_MASK 0xFFFFFFF
-#define TIMERS_CONTEXT_EXPIRATIONTIMELC2_SHIFT 0
-#define TIMERS_CONTEXT_VALIDLC2_MASK 0x1
-#define TIMERS_CONTEXT_VALIDLC2_SHIFT 28
-#define TIMERS_CONTEXT_ACTIVELC2_MASK 0x1
-#define TIMERS_CONTEXT_ACTIVELC2_SHIFT 29
-#define TIMERS_CONTEXT_RESERVED2_MASK 0x3
-#define TIMERS_CONTEXT_RESERVED2_SHIFT 30
+#define TIMERS_CONTEXT_EXPIRATIONTIMELC2_MASK 0x7FFFFFF
+#define TIMERS_CONTEXT_EXPIRATIONTIMELC2_SHIFT 0
+#define TIMERS_CONTEXT_RESERVED4_MASK 0x1
+#define TIMERS_CONTEXT_RESERVED4_SHIFT 27
+#define TIMERS_CONTEXT_VALIDLC2_MASK 0x1
+#define TIMERS_CONTEXT_VALIDLC2_SHIFT 28
+#define TIMERS_CONTEXT_ACTIVELC2_MASK 0x1
+#define TIMERS_CONTEXT_ACTIVELC2_SHIFT 29
+#define TIMERS_CONTEXT_RESERVED5_MASK 0x3
+#define TIMERS_CONTEXT_RESERVED5_SHIFT 30
__le32 host_expiration_fields;
-#define TIMERS_CONTEXT_HOSTEXPRIRATIONVALUE_MASK 0xFFFFFFF
-#define TIMERS_CONTEXT_HOSTEXPRIRATIONVALUE_SHIFT 0
-#define TIMERS_CONTEXT_HOSTEXPRIRATIONVALID_MASK 0x1
-#define TIMERS_CONTEXT_HOSTEXPRIRATIONVALID_SHIFT 28
-#define TIMERS_CONTEXT_RESERVED3_MASK 0x7
-#define TIMERS_CONTEXT_RESERVED3_SHIFT 29
+#define TIMERS_CONTEXT_HOSTEXPRIRATIONVALUE_MASK 0x7FFFFFF
+#define TIMERS_CONTEXT_HOSTEXPRIRATIONVALUE_SHIFT 0
+#define TIMERS_CONTEXT_RESERVED6_MASK 0x1
+#define TIMERS_CONTEXT_RESERVED6_SHIFT 27
+#define TIMERS_CONTEXT_HOSTEXPRIRATIONVALID_MASK 0x1
+#define TIMERS_CONTEXT_HOSTEXPRIRATIONVALID_SHIFT 28
+#define TIMERS_CONTEXT_RESERVED7_MASK 0x7
+#define TIMERS_CONTEXT_RESERVED7_SHIFT 29
};
+
+enum tunnel_next_protocol {
+ e_unknown = 0,
+ e_l2 = 1,
+ e_ipv4 = 2,
+ e_ipv6 = 3,
+ MAX_TUNNEL_NEXT_PROTOCOL
+};
+
#endif /* __COMMON_HSI__ */
#endif
diff --git a/include/linux/qed/eth_common.h b/include/linux/qed/eth_common.h
index 34d93eb..cb06e6e 100644
--- a/include/linux/qed/eth_common.h
+++ b/include/linux/qed/eth_common.h
@@ -75,7 +75,8 @@
(ETH_NUM_STATISTIC_COUNTERS - 3 * MAX_NUM_VFS / 4)
/* Maximum number of buffers, used for RX packet placement */
-#define ETH_RX_MAX_BUFF_PER_PKT 5
+#define ETH_RX_MAX_BUFF_PER_PKT 5
+#define ETH_RX_BD_THRESHOLD 12
/* num of MAC/VLAN filters */
#define ETH_NUM_MAC_FILTERS 512
diff --git a/include/linux/qed/fcoe_common.h b/include/linux/qed/fcoe_common.h
index 947a635..12fc9e7 100644
--- a/include/linux/qed/fcoe_common.h
+++ b/include/linux/qed/fcoe_common.h
@@ -13,7 +13,6 @@
/*********************/
#define FC_ABTS_REPLY_MAX_PAYLOAD_LEN 12
-#define FCOE_MAX_SIZE_FCP_DATA_SUPER (8600)
struct fcoe_abts_pkt {
__le32 abts_rsp_fc_payload_lo;
diff --git a/include/linux/qed/iscsi_common.h b/include/linux/qed/iscsi_common.h
index 69949f8..85e086c 100644
--- a/include/linux/qed/iscsi_common.h
+++ b/include/linux/qed/iscsi_common.h
@@ -75,25 +75,13 @@
#define ISCSI_TARGET_MODE 1
/* iSCSI request op codes */
-#define ISCSI_OPCODE_NOP_OUT_NO_IMM (0)
-#define ISCSI_OPCODE_NOP_OUT ( \
- ISCSI_OPCODE_NOP_OUT_NO_IMM | 0x40)
-#define ISCSI_OPCODE_SCSI_CMD_NO_IMM (1)
-#define ISCSI_OPCODE_SCSI_CMD ( \
- ISCSI_OPCODE_SCSI_CMD_NO_IMM | 0x40)
-#define ISCSI_OPCODE_TMF_REQUEST_NO_IMM (2)
-#define ISCSI_OPCODE_TMF_REQUEST ( \
- ISCSI_OPCODE_TMF_REQUEST_NO_IMM | 0x40)
-#define ISCSI_OPCODE_LOGIN_REQUEST_NO_IMM (3)
-#define ISCSI_OPCODE_LOGIN_REQUEST ( \
- ISCSI_OPCODE_LOGIN_REQUEST_NO_IMM | 0x40)
-#define ISCSI_OPCODE_TEXT_REQUEST_NO_IMM (4)
-#define ISCSI_OPCODE_TEXT_REQUEST ( \
- ISCSI_OPCODE_TEXT_REQUEST_NO_IMM | 0x40)
-#define ISCSI_OPCODE_DATA_OUT (5)
-#define ISCSI_OPCODE_LOGOUT_REQUEST_NO_IMM (6)
-#define ISCSI_OPCODE_LOGOUT_REQUEST ( \
- ISCSI_OPCODE_LOGOUT_REQUEST_NO_IMM | 0x40)
+#define ISCSI_OPCODE_NOP_OUT (0)
+#define ISCSI_OPCODE_SCSI_CMD (1)
+#define ISCSI_OPCODE_TMF_REQUEST (2)
+#define ISCSI_OPCODE_LOGIN_REQUEST (3)
+#define ISCSI_OPCODE_TEXT_REQUEST (4)
+#define ISCSI_OPCODE_DATA_OUT (5)
+#define ISCSI_OPCODE_LOGOUT_REQUEST (6)
/* iSCSI response/messages op codes */
#define ISCSI_OPCODE_NOP_IN (0x20)
@@ -172,17 +160,23 @@
struct iscsi_cmd_hdr {
__le16 reserved1;
u8 flags_attr;
-#define ISCSI_CMD_HDR_ATTR_MASK 0x7
-#define ISCSI_CMD_HDR_ATTR_SHIFT 0
-#define ISCSI_CMD_HDR_RSRV_MASK 0x3
-#define ISCSI_CMD_HDR_RSRV_SHIFT 3
-#define ISCSI_CMD_HDR_WRITE_MASK 0x1
-#define ISCSI_CMD_HDR_WRITE_SHIFT 5
-#define ISCSI_CMD_HDR_READ_MASK 0x1
-#define ISCSI_CMD_HDR_READ_SHIFT 6
-#define ISCSI_CMD_HDR_FINAL_MASK 0x1
-#define ISCSI_CMD_HDR_FINAL_SHIFT 7
- u8 opcode;
+#define ISCSI_CMD_HDR_ATTR_MASK 0x7
+#define ISCSI_CMD_HDR_ATTR_SHIFT 0
+#define ISCSI_CMD_HDR_RSRV_MASK 0x3
+#define ISCSI_CMD_HDR_RSRV_SHIFT 3
+#define ISCSI_CMD_HDR_WRITE_MASK 0x1
+#define ISCSI_CMD_HDR_WRITE_SHIFT 5
+#define ISCSI_CMD_HDR_READ_MASK 0x1
+#define ISCSI_CMD_HDR_READ_SHIFT 6
+#define ISCSI_CMD_HDR_FINAL_MASK 0x1
+#define ISCSI_CMD_HDR_FINAL_SHIFT 7
+ u8 hdr_first_byte;
+#define ISCSI_CMD_HDR_OPCODE_MASK 0x3F
+#define ISCSI_CMD_HDR_OPCODE_SHIFT 0
+#define ISCSI_CMD_HDR_IMM_MASK 0x1
+#define ISCSI_CMD_HDR_IMM_SHIFT 6
+#define ISCSI_CMD_HDR_RSRV1_MASK 0x1
+#define ISCSI_CMD_HDR_RSRV1_SHIFT 7
__le32 hdr_second_dword;
#define ISCSI_CMD_HDR_DATA_SEG_LEN_MASK 0xFFFFFF
#define ISCSI_CMD_HDR_DATA_SEG_LEN_SHIFT 0
@@ -790,9 +784,9 @@
ISCSI_CONN_ERROR_LOCAL_COMPLETION_ERROR,
ISCSI_CONN_ERROR_DATA_OVERRUN,
ISCSI_CONN_ERROR_OUT_OF_SGES_ERROR,
- ISCSI_CONN_ERROR_TCP_SEG_PROC_URG_ERROR,
- ISCSI_CONN_ERROR_TCP_SEG_PROC_IP_OPTIONS_ERROR,
- ISCSI_CONN_ERROR_TCP_SEG_PROC_CONNECT_INVALID_WS_OPTION,
+ ISCSI_CONN_ERROR_IP_OPTIONS_ERROR,
+ ISCSI_CONN_ERROR_PRS_ERRORS,
+ ISCSI_CONN_ERROR_CONNECT_INVALID_TCP_OPTION,
ISCSI_CONN_ERROR_TCP_IP_FRAGMENT_ERROR,
ISCSI_CONN_ERROR_PROTOCOL_ERR_AHS_LEN,
ISCSI_CONN_ERROR_PROTOCOL_ERR_AHS_TYPE,
@@ -1304,22 +1298,6 @@
struct regpair iscsi_tx_total_pdu_cnt;
};
-struct iscsi_db_data {
- u8 params;
-#define ISCSI_DB_DATA_DEST_MASK 0x3
-#define ISCSI_DB_DATA_DEST_SHIFT 0
-#define ISCSI_DB_DATA_AGG_CMD_MASK 0x3
-#define ISCSI_DB_DATA_AGG_CMD_SHIFT 2
-#define ISCSI_DB_DATA_BYPASS_EN_MASK 0x1
-#define ISCSI_DB_DATA_BYPASS_EN_SHIFT 4
-#define ISCSI_DB_DATA_RESERVED_MASK 0x1
-#define ISCSI_DB_DATA_RESERVED_SHIFT 5
-#define ISCSI_DB_DATA_AGG_VAL_SEL_MASK 0x3
-#define ISCSI_DB_DATA_AGG_VAL_SEL_SHIFT 6
- u8 agg_flags;
- __le16 sq_prod;
-};
-
struct tstorm_iscsi_task_ag_ctx {
u8 byte0;
u8 byte1;
@@ -1398,5 +1376,20 @@
__le32 reg1;
__le32 reg2;
};
+struct iscsi_db_data {
+ u8 params;
+#define ISCSI_DB_DATA_DEST_MASK 0x3
+#define ISCSI_DB_DATA_DEST_SHIFT 0
+#define ISCSI_DB_DATA_AGG_CMD_MASK 0x3
+#define ISCSI_DB_DATA_AGG_CMD_SHIFT 2
+#define ISCSI_DB_DATA_BYPASS_EN_MASK 0x1
+#define ISCSI_DB_DATA_BYPASS_EN_SHIFT 4
+#define ISCSI_DB_DATA_RESERVED_MASK 0x1
+#define ISCSI_DB_DATA_RESERVED_SHIFT 5
+#define ISCSI_DB_DATA_AGG_VAL_SEL_MASK 0x3
+#define ISCSI_DB_DATA_AGG_VAL_SEL_SHIFT 6
+ u8 agg_flags;
+ __le16 sq_prod;
+};
#endif /* __ISCSI_COMMON__ */
diff --git a/include/linux/qed/qed_chain.h b/include/linux/qed/qed_chain.h
index 5cd7a46..59ddf9a 100644
--- a/include/linux/qed/qed_chain.h
+++ b/include/linux/qed/qed_chain.h
@@ -80,6 +80,11 @@
u32 cons_page_idx;
};
+struct qed_chain_ext_pbl {
+ dma_addr_t p_pbl_phys;
+ void *p_pbl_virt;
+};
+
struct qed_chain_u16 {
/* Cyclic index of next element to produce/consme */
u16 prod_idx;
@@ -155,6 +160,8 @@
u32 size;
u8 intended_use;
+
+ bool b_external_pbl;
};
#define QED_CHAIN_PBL_ENTRY_SIZE (8)
diff --git a/include/linux/qed/qed_eth_if.h b/include/linux/qed/qed_eth_if.h
index d66d16a..0eef0a2 100644
--- a/include/linux/qed/qed_eth_if.h
+++ b/include/linux/qed/qed_eth_if.h
@@ -47,8 +47,7 @@
/* Relative, but relevant only for PFs */
u8 stats_id;
- /* These are always absolute */
- u16 sb;
+ struct qed_sb_info *p_sb;
u8 sb_idx;
};
@@ -74,6 +73,9 @@
/* Legacy VF - this affects the datapath, so qede has to know */
bool is_legacy;
+
+ /* Might depend on available resources [in case of VF] */
+ bool xdp_supported;
};
struct qed_update_vport_rss_params {
diff --git a/include/linux/qed/qed_fcoe_if.h b/include/linux/qed/qed_fcoe_if.h
index bd6bcb8..1e015c5 100644
--- a/include/linux/qed/qed_fcoe_if.h
+++ b/include/linux/qed/qed_fcoe_if.h
@@ -24,6 +24,11 @@
void __iomem *primary_dbq_rq_addr;
void __iomem *secondary_bdq_rq_addr;
+
+ u64 wwpn;
+ u64 wwnn;
+
+ u8 num_cqs;
};
struct qed_fcoe_params_offload {
diff --git a/include/linux/qed/qed_if.h b/include/linux/qed/qed_if.h
index c70ac13..ef39c7f 100644
--- a/include/linux/qed/qed_if.h
+++ b/include/linux/qed/qed_if.h
@@ -156,6 +156,11 @@
struct qed_dcbx_admin_params local;
};
+enum qed_nvm_images {
+ QED_NVM_IMAGE_ISCSI_CFG,
+ QED_NVM_IMAGE_FCOE_CFG,
+};
+
enum qed_led_mode {
QED_LED_MODE_OFF,
QED_LED_MODE_ON,
@@ -180,6 +185,10 @@
*/
u16 num_cons;
+ /* per-VF number of CIDs */
+ u8 num_vf_cons;
+#define ETH_PF_PARAMS_VF_CONS_DEFAULT (32)
+
/* To enable arfs, previous to HW-init a positive number needs to be
* set [as filters require allocated searcher ILT memory].
* This will set the maximal number of configured steering-filters.
@@ -328,6 +337,14 @@
/* MFW version */
u32 mfw_rev;
+#define QED_MFW_VERSION_0_MASK 0x000000FF
+#define QED_MFW_VERSION_0_OFFSET 0
+#define QED_MFW_VERSION_1_MASK 0x0000FF00
+#define QED_MFW_VERSION_1_OFFSET 8
+#define QED_MFW_VERSION_2_MASK 0x00FF0000
+#define QED_MFW_VERSION_2_OFFSET 16
+#define QED_MFW_VERSION_3_MASK 0xFF000000
+#define QED_MFW_VERSION_3_OFFSET 24
u32 flash_size;
u8 mf_mode;
@@ -337,12 +354,23 @@
bool wol_support;
+ /* MBI version */
+ u32 mbi_version;
+#define QED_MBI_VERSION_0_MASK 0x000000FF
+#define QED_MBI_VERSION_0_OFFSET 0
+#define QED_MBI_VERSION_1_MASK 0x0000FF00
+#define QED_MBI_VERSION_1_OFFSET 8
+#define QED_MBI_VERSION_2_MASK 0x00FF0000
+#define QED_MBI_VERSION_2_OFFSET 16
+
enum qed_dev_type dev_type;
/* Output parameters for qede */
bool vxlan_enable;
bool gre_enable;
bool geneve_enable;
+
+ u8 abs_pf_id;
};
enum qed_sb_type {
@@ -503,9 +531,7 @@
int (*set_power_state)(struct qed_dev *cdev,
pci_power_t state);
- void (*set_id)(struct qed_dev *cdev,
- char name[],
- char ver_str[]);
+ void (*set_name) (struct qed_dev *cdev, char name[]);
/* Client drivers need to make this call before slowpath_start.
* PF params required for the call before slowpath_start is
@@ -608,12 +634,26 @@
enum qed_chain_cnt_type cnt_type,
u32 num_elems,
size_t elem_size,
- struct qed_chain *p_chain);
+ struct qed_chain *p_chain,
+ struct qed_chain_ext_pbl *ext_pbl);
void (*chain_free)(struct qed_dev *cdev,
struct qed_chain *p_chain);
/**
+ * @brief nvm_get_image - reads an entire image from nvram
+ *
+ * @param cdev
+ * @param type - type of the request nvram image
+ * @param buf - preallocated buffer to fill with the image
+ * @param len - length of the allocated buffer
+ *
+ * @return 0 on success, error otherwise
+ */
+ int (*nvm_get_image)(struct qed_dev *cdev,
+ enum qed_nvm_images type, u8 *buf, u16 len);
+
+/**
* @brief get_coalesce - Get coalesce parameters in usec
*
* @param cdev
@@ -700,11 +740,13 @@
(((value) >> (name ## _SHIFT)) & name ## _MASK)
/* Debug print definitions */
-#define DP_ERR(cdev, fmt, ...) \
- pr_err("[%s:%d(%s)]" fmt, \
- __func__, __LINE__, \
- DP_NAME(cdev) ? DP_NAME(cdev) : "", \
- ## __VA_ARGS__) \
+#define DP_ERR(cdev, fmt, ...) \
+ do { \
+ pr_err("[%s:%d(%s)]" fmt, \
+ __func__, __LINE__, \
+ DP_NAME(cdev) ? DP_NAME(cdev) : "", \
+ ## __VA_ARGS__); \
+ } while (0)
#define DP_NOTICE(cdev, fmt, ...) \
do { \
@@ -869,9 +911,15 @@
#define TX_PI(tc) (RX_PI + 1 + tc)
struct qed_sb_cnt_info {
- int sb_cnt;
- int sb_iov_cnt;
- int sb_free_blk;
+ /* Original, current, and free SBs for PF */
+ int orig;
+ int cnt;
+ int free_cnt;
+
+ /* Original, current and free SBS for child VFs */
+ int iov_orig;
+ int iov_cnt;
+ int free_cnt_iov;
};
static inline u16 qed_sb_update_sb_idx(struct qed_sb_info *sb_info)
diff --git a/include/linux/qed/qed_iscsi_if.h b/include/linux/qed/qed_iscsi_if.h
index 3414649..111e606 100644
--- a/include/linux/qed/qed_iscsi_if.h
+++ b/include/linux/qed/qed_iscsi_if.h
@@ -210,6 +210,11 @@
* @param stats - pointer to struck that would be filled
* we stats
* @return 0 on success, error otherwise.
+ * @change_mac Change MAC of interface
+ * @param cdev
+ * @param handle - the connection handle.
+ * @param mac - new MAC to configure.
+ * @return 0 on success, otherwise error value.
*/
struct qed_iscsi_ops {
const struct qed_common_ops *common;
@@ -248,6 +253,8 @@
int (*get_stats)(struct qed_dev *cdev,
struct qed_iscsi_stats *stats);
+
+ int (*change_mac)(struct qed_dev *cdev, u32 handle, const u8 *mac);
};
const struct qed_iscsi_ops *qed_get_iscsi_ops(void);
diff --git a/include/linux/qed/qed_ll2_if.h b/include/linux/qed/qed_ll2_if.h
index 4fb4666..5958b45e 100644
--- a/include/linux/qed/qed_ll2_if.h
+++ b/include/linux/qed/qed_ll2_if.h
@@ -43,6 +43,35 @@
#include <linux/slab.h>
#include <linux/qed/qed_if.h>
+enum qed_ll2_conn_type {
+ QED_LL2_TYPE_FCOE,
+ QED_LL2_TYPE_ISCSI,
+ QED_LL2_TYPE_TEST,
+ QED_LL2_TYPE_ISCSI_OOO,
+ QED_LL2_TYPE_RESERVED2,
+ QED_LL2_TYPE_ROCE,
+ QED_LL2_TYPE_RESERVED3,
+ MAX_QED_LL2_RX_CONN_TYPE
+};
+
+enum qed_ll2_roce_flavor_type {
+ QED_LL2_ROCE,
+ QED_LL2_RROCE,
+ MAX_QED_LL2_ROCE_FLAVOR_TYPE
+};
+
+enum qed_ll2_tx_dest {
+ QED_LL2_TX_DEST_NW, /* Light L2 TX Destination to the Network */
+ QED_LL2_TX_DEST_LB, /* Light L2 TX Destination to the Loopback */
+ QED_LL2_TX_DEST_MAX
+};
+
+enum qed_ll2_error_handle {
+ QED_LL2_DROP_PACKET,
+ QED_LL2_DO_NOTHING,
+ QED_LL2_ASSERT,
+};
+
struct qed_ll2_stats {
u64 gsi_invalid_hdr;
u64 gsi_invalid_pkt_length;
@@ -67,6 +96,105 @@
u64 sent_bcast_pkts;
};
+struct qed_ll2_comp_rx_data {
+ void *cookie;
+ dma_addr_t rx_buf_addr;
+ u16 parse_flags;
+ u16 vlan;
+ bool b_last_packet;
+ u8 connection_handle;
+
+ union {
+ u16 packet_length;
+ u16 data_length;
+ } length;
+
+ u32 opaque_data_0;
+ u32 opaque_data_1;
+
+ /* GSI only */
+ u32 gid_dst[4];
+ u16 qp_id;
+
+ union {
+ u8 placement_offset;
+ u8 data_length_error;
+ } u;
+};
+
+typedef
+void (*qed_ll2_complete_rx_packet_cb)(void *cxt,
+ struct qed_ll2_comp_rx_data *data);
+
+typedef
+void (*qed_ll2_release_rx_packet_cb)(void *cxt,
+ u8 connection_handle,
+ void *cookie,
+ dma_addr_t rx_buf_addr,
+ bool b_last_packet);
+
+typedef
+void (*qed_ll2_complete_tx_packet_cb)(void *cxt,
+ u8 connection_handle,
+ void *cookie,
+ dma_addr_t first_frag_addr,
+ bool b_last_fragment,
+ bool b_last_packet);
+
+typedef
+void (*qed_ll2_release_tx_packet_cb)(void *cxt,
+ u8 connection_handle,
+ void *cookie,
+ dma_addr_t first_frag_addr,
+ bool b_last_fragment, bool b_last_packet);
+
+struct qed_ll2_cbs {
+ qed_ll2_complete_rx_packet_cb rx_comp_cb;
+ qed_ll2_release_rx_packet_cb rx_release_cb;
+ qed_ll2_complete_tx_packet_cb tx_comp_cb;
+ qed_ll2_release_tx_packet_cb tx_release_cb;
+ void *cookie;
+};
+
+struct qed_ll2_acquire_data_inputs {
+ enum qed_ll2_conn_type conn_type;
+ u16 mtu;
+ u16 rx_num_desc;
+ u16 rx_num_ooo_buffers;
+ u8 rx_drop_ttl0_flg;
+ u8 rx_vlan_removal_en;
+ u16 tx_num_desc;
+ u8 tx_max_bds_per_packet;
+ u8 tx_tc;
+ enum qed_ll2_tx_dest tx_dest;
+ enum qed_ll2_error_handle ai_err_packet_too_big;
+ enum qed_ll2_error_handle ai_err_no_buf;
+ u8 gsi_enable;
+};
+
+struct qed_ll2_acquire_data {
+ struct qed_ll2_acquire_data_inputs input;
+ const struct qed_ll2_cbs *cbs;
+
+ /* Output container for LL2 connection's handle */
+ u8 *p_connection_handle;
+};
+
+struct qed_ll2_tx_pkt_info {
+ void *cookie;
+ dma_addr_t first_frag;
+ enum qed_ll2_tx_dest tx_dest;
+ enum qed_ll2_roce_flavor_type qed_roce_flavor;
+ u16 vlan;
+ u16 l4_hdr_offset_w; /* from start of packet */
+ u16 first_frag_len;
+ u8 num_of_bds;
+ u8 bd_flags;
+ bool enable_ip_cksum;
+ bool enable_l4_cksum;
+ bool calc_ip_len;
+};
+
#define QED_LL2_UNUSED_HANDLE (0xff)
struct qed_ll2_cb_ops {
diff --git a/include/linux/qed/qed_roce_if.h b/include/linux/qed/qed_rdma_if.h
similarity index 91%
rename from include/linux/qed/qed_roce_if.h
rename to include/linux/qed/qed_rdma_if.h
index cbb2ff0..ff9be01 100644
--- a/include/linux/qed/qed_roce_if.h
+++ b/include/linux/qed/qed_rdma_if.h
@@ -29,13 +29,11 @@
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
-#ifndef _QED_ROCE_IF_H
-#define _QED_ROCE_IF_H
+#ifndef _QED_RDMA_IF_H
+#define _QED_RDMA_IF_H
#include <linux/types.h>
#include <linux/delay.h>
#include <linux/list.h>
-#include <linux/mutex.h>
-#include <linux/pci.h>
#include <linux/slab.h>
#include <linux/qed/qed_if.h>
#include <linux/qed/qed_ll2_if.h>
@@ -491,42 +489,6 @@
enum qed_roce_ll2_tx_dest tx_dest;
};
-struct qed_roce_ll2_tx_params {
- int reserved;
-};
-
-struct qed_roce_ll2_rx_params {
- u16 vlan_id;
- u8 smac[ETH_ALEN];
- int rc;
-};
-
-struct qed_roce_ll2_cbs {
- void (*tx_cb)(void *pdev, struct qed_roce_ll2_packet *pkt);
-
- void (*rx_cb)(void *pdev, struct qed_roce_ll2_packet *pkt,
- struct qed_roce_ll2_rx_params *params);
-};
-
-struct qed_roce_ll2_params {
- u16 max_rx_buffers;
- u16 max_tx_buffers;
- u16 mtu;
- u8 mac_address[ETH_ALEN];
- struct qed_roce_ll2_cbs cbs;
- void *cb_cookie;
-};
-
-struct qed_roce_ll2_info {
- u8 handle;
- struct qed_roce_ll2_cbs cbs;
- u8 mac_address[ETH_ALEN];
- void *cb_cookie;
-
- /* Lock to protect ll2 */
- struct mutex lock;
-};
-
enum qed_rdma_type {
QED_RDMA_TYPE_ROCE,
};
@@ -579,26 +541,40 @@
int (*rdma_query_qp)(void *rdma_cxt, struct qed_rdma_qp *qp,
struct qed_rdma_query_qp_out_params *oparams);
int (*rdma_destroy_qp)(void *rdma_cxt, struct qed_rdma_qp *qp);
+
int
(*rdma_register_tid)(void *rdma_cxt,
struct qed_rdma_register_tid_in_params *iparams);
+
int (*rdma_deregister_tid)(void *rdma_cxt, u32 itid);
int (*rdma_alloc_tid)(void *rdma_cxt, u32 *itid);
void (*rdma_free_tid)(void *rdma_cxt, u32 itid);
- int (*roce_ll2_start)(struct qed_dev *cdev,
- struct qed_roce_ll2_params *params);
- int (*roce_ll2_stop)(struct qed_dev *cdev);
- int (*roce_ll2_tx)(struct qed_dev *cdev,
- struct qed_roce_ll2_packet *packet,
- struct qed_roce_ll2_tx_params *params);
- int (*roce_ll2_post_rx_buffer)(struct qed_dev *cdev,
- struct qed_roce_ll2_buffer *buf,
- u64 cookie, u8 notify_fw);
- int (*roce_ll2_set_mac_filter)(struct qed_dev *cdev,
- u8 *old_mac_address,
- u8 *new_mac_address);
- int (*roce_ll2_stats)(struct qed_dev *cdev,
- struct qed_ll2_stats *stats);
+
+ int (*ll2_acquire_connection)(void *rdma_cxt,
+ struct qed_ll2_acquire_data *data);
+
+ int (*ll2_establish_connection)(void *rdma_cxt, u8 connection_handle);
+ int (*ll2_terminate_connection)(void *rdma_cxt, u8 connection_handle);
+ void (*ll2_release_connection)(void *rdma_cxt, u8 connection_handle);
+
+ int (*ll2_prepare_tx_packet)(void *rdma_cxt,
+ u8 connection_handle,
+ struct qed_ll2_tx_pkt_info *pkt,
+ bool notify_fw);
+
+ int (*ll2_set_fragment_of_tx_packet)(void *rdma_cxt,
+ u8 connection_handle,
+ dma_addr_t addr,
+ u16 nbytes);
+ int (*ll2_post_rx_buffer)(void *rdma_cxt, u8 connection_handle,
+ dma_addr_t addr, u16 buf_len, void *cookie,
+ u8 notify_fw);
+ int (*ll2_get_stats)(void *rdma_cxt,
+ u8 connection_handle,
+ struct qed_ll2_stats *p_stats);
+ int (*ll2_set_mac_filter)(struct qed_dev *cdev,
+ u8 *old_mac_address, u8 *new_mac_address);
+
};
const struct qed_rdma_ops *qed_get_rdma_ops(void);
diff --git a/include/linux/qed/qede_roce.h b/include/linux/qed/qede_rdma.h
similarity index 65%
rename from include/linux/qed/qede_roce.h
rename to include/linux/qed/qede_rdma.h
index 3b8dd55..9904617 100644
--- a/include/linux/qed/qede_roce.h
+++ b/include/linux/qed/qede_rdma.h
@@ -32,22 +32,27 @@
#ifndef QEDE_ROCE_H
#define QEDE_ROCE_H
+#include <linux/pci.h>
+#include <linux/netdevice.h>
+#include <linux/types.h>
+#include <linux/workqueue.h>
+
struct qedr_dev;
struct qed_dev;
struct qede_dev;
-enum qede_roce_event {
+enum qede_rdma_event {
QEDE_UP,
QEDE_DOWN,
QEDE_CHANGE_ADDR,
QEDE_CLOSE
};
-struct qede_roce_event_work {
+struct qede_rdma_event_work {
struct list_head list;
struct work_struct work;
void *ptr;
- enum qede_roce_event event;
+ enum qede_rdma_event event;
};
struct qedr_driver {
@@ -57,32 +62,33 @@
struct net_device *);
void (*remove)(struct qedr_dev *);
- void (*notify)(struct qedr_dev *, enum qede_roce_event);
+ void (*notify)(struct qedr_dev *, enum qede_rdma_event);
};
-/* APIs for RoCE driver to register callback handlers,
+/* APIs for RDMA driver to register callback handlers,
* which will be invoked when device is added, removed, ifup, ifdown
*/
-int qede_roce_register_driver(struct qedr_driver *drv);
-void qede_roce_unregister_driver(struct qedr_driver *drv);
+int qede_rdma_register_driver(struct qedr_driver *drv);
+void qede_rdma_unregister_driver(struct qedr_driver *drv);
-bool qede_roce_supported(struct qede_dev *dev);
+bool qede_rdma_supported(struct qede_dev *dev);
#if IS_ENABLED(CONFIG_QED_RDMA)
-int qede_roce_dev_add(struct qede_dev *dev);
-void qede_roce_dev_event_open(struct qede_dev *dev);
-void qede_roce_dev_event_close(struct qede_dev *dev);
-void qede_roce_dev_remove(struct qede_dev *dev);
-void qede_roce_event_changeaddr(struct qede_dev *qedr);
+int qede_rdma_dev_add(struct qede_dev *dev);
+void qede_rdma_dev_event_open(struct qede_dev *dev);
+void qede_rdma_dev_event_close(struct qede_dev *dev);
+void qede_rdma_dev_remove(struct qede_dev *dev);
+void qede_rdma_event_changeaddr(struct qede_dev *edr);
+
#else
-static inline int qede_roce_dev_add(struct qede_dev *dev)
+static inline int qede_rdma_dev_add(struct qede_dev *dev)
{
return 0;
}
-static inline void qede_roce_dev_event_open(struct qede_dev *dev) {}
-static inline void qede_roce_dev_event_close(struct qede_dev *dev) {}
-static inline void qede_roce_dev_remove(struct qede_dev *dev) {}
-static inline void qede_roce_event_changeaddr(struct qede_dev *qedr) {}
+static inline void qede_rdma_dev_event_open(struct qede_dev *dev) {}
+static inline void qede_rdma_dev_event_close(struct qede_dev *dev) {}
+static inline void qede_rdma_dev_remove(struct qede_dev *dev) {}
+static inline void qede_rdma_event_changeaddr(struct qede_dev *edr) {}
#endif
#endif
diff --git a/include/linux/qed/rdma_common.h b/include/linux/qed/rdma_common.h
index 72c770f..a9b3050 100644
--- a/include/linux/qed/rdma_common.h
+++ b/include/linux/qed/rdma_common.h
@@ -42,7 +42,7 @@
#define RDMA_MAX_SGE_PER_SQ_WQE (4)
#define RDMA_MAX_SGE_PER_RQ_WQE (4)
-#define RDMA_MAX_DATA_SIZE_IN_WQE (0x7FFFFFFF)
+#define RDMA_MAX_DATA_SIZE_IN_WQE (0x80000000)
#define RDMA_REQ_RD_ATOMIC_ELM_SIZE (0x50)
#define RDMA_RESP_RD_ATOMIC_ELM_SIZE (0x20)
diff --git a/include/linux/qed/roce_common.h b/include/linux/qed/roce_common.h
index 866f063..fe6a33e 100644
--- a/include/linux/qed/roce_common.h
+++ b/include/linux/qed/roce_common.h
@@ -37,6 +37,8 @@
#define ROCE_REQ_MAX_SINGLE_SQ_WQE_SIZE (288)
#define ROCE_MAX_QPS (32 * 1024)
+#define ROCE_DCQCN_NP_MAX_QPS (64)
+#define ROCE_DCQCN_RP_MAX_QPS (64)
enum roce_async_events_type {
ROCE_ASYNC_EVENT_NONE = 0,
diff --git a/include/linux/qed/tcp_common.h b/include/linux/qed/tcp_common.h
index a5e8432..dbf7a43 100644
--- a/include/linux/qed/tcp_common.h
+++ b/include/linux/qed/tcp_common.h
@@ -111,7 +111,6 @@
__le32 snd_wnd;
__le32 rcv_wnd;
__le32 snd_wl1;
- __le32 ts_time;
__le32 ts_recent;
__le32 ts_recent_age;
__le32 total_rt;
@@ -122,7 +121,7 @@
u8 ka_probe_cnt;
u8 rt_cnt;
__le16 rtt_var;
- __le16 reserved2;
+ __le16 fw_internal;
__le32 ka_timeout;
__le32 ka_interval;
__le32 max_rt_time;
@@ -130,7 +129,7 @@
u8 snd_wnd_scale;
u8 ack_frequency;
__le16 da_timeout_value;
- __le32 ts_ticks_per_second;
+ __le32 reserved3[2];
};
struct tcp_offload_params_opt2 {
diff --git a/include/linux/rtnetlink.h b/include/linux/rtnetlink.h
index 57e5484..dea59c8 100644
--- a/include/linux/rtnetlink.h
+++ b/include/linux/rtnetlink.h
@@ -18,7 +18,8 @@
void rtmsg_ifinfo(int type, struct net_device *dev, unsigned change, gfp_t flags);
struct sk_buff *rtmsg_ifinfo_build_skb(int type, struct net_device *dev,
- unsigned change, gfp_t flags);
+ unsigned change, u32 event,
+ gfp_t flags);
void rtmsg_ifinfo_send(struct sk_buff *skb, struct net_device *dev,
gfp_t flags);
diff --git a/include/linux/rxrpc.h b/include/linux/rxrpc.h
index c68307b..7343f71 100644
--- a/include/linux/rxrpc.h
+++ b/include/linux/rxrpc.h
@@ -37,6 +37,8 @@
#define RXRPC_SECURITY_KEYRING 2 /* [srvr] set ring of server security keys */
#define RXRPC_EXCLUSIVE_CONNECTION 3 /* Deprecated; use RXRPC_EXCLUSIVE_CALL instead */
#define RXRPC_MIN_SECURITY_LEVEL 4 /* minimum security level */
+#define RXRPC_UPGRADEABLE_SERVICE 5 /* Upgrade service[0] -> service[1] */
+#define RXRPC_SUPPORTED_CMSG 6 /* Get highest supported control message type */
/*
* RxRPC control messages
@@ -44,15 +46,20 @@
* - terminal messages mean that a user call ID tag can be recycled
* - s/r/- indicate whether these are applicable to sendmsg() and/or recvmsg()
*/
-#define RXRPC_USER_CALL_ID 1 /* sr: user call ID specifier */
-#define RXRPC_ABORT 2 /* sr: abort request / notification [terminal] */
-#define RXRPC_ACK 3 /* -r: [Service] RPC op final ACK received [terminal] */
-#define RXRPC_NET_ERROR 5 /* -r: network error received [terminal] */
-#define RXRPC_BUSY 6 /* -r: server busy received [terminal] */
-#define RXRPC_LOCAL_ERROR 7 /* -r: local error generated [terminal] */
-#define RXRPC_NEW_CALL 8 /* -r: [Service] new incoming call notification */
-#define RXRPC_ACCEPT 9 /* s-: [Service] accept request */
-#define RXRPC_EXCLUSIVE_CALL 10 /* s-: Call should be on exclusive connection */
+enum rxrpc_cmsg_type {
+ RXRPC_USER_CALL_ID = 1, /* sr: user call ID specifier */
+ RXRPC_ABORT = 2, /* sr: abort request / notification [terminal] */
+ RXRPC_ACK = 3, /* -r: [Service] RPC op final ACK received [terminal] */
+ RXRPC_NET_ERROR = 5, /* -r: network error received [terminal] */
+ RXRPC_BUSY = 6, /* -r: server busy received [terminal] */
+ RXRPC_LOCAL_ERROR = 7, /* -r: local error generated [terminal] */
+ RXRPC_NEW_CALL = 8, /* -r: [Service] new incoming call notification */
+ RXRPC_ACCEPT = 9, /* s-: [Service] accept request */
+ RXRPC_EXCLUSIVE_CALL = 10, /* s-: Call should be on exclusive connection */
+ RXRPC_UPGRADE_SERVICE = 11, /* s-: Request service upgrade for client call */
+ RXRPC_TX_LENGTH = 12, /* s-: Total length of Tx data */
+ RXRPC__SUPPORTED
+};
/*
* RxRPC security levels
diff --git a/include/linux/skb_array.h b/include/linux/skb_array.h
index f4dfade..35226cd 100644
--- a/include/linux/skb_array.h
+++ b/include/linux/skb_array.h
@@ -97,21 +97,46 @@
return ptr_ring_consume(&a->ring);
}
+static inline int skb_array_consume_batched(struct skb_array *a,
+ struct sk_buff **array, int n)
+{
+ return ptr_ring_consume_batched(&a->ring, (void **)array, n);
+}
+
static inline struct sk_buff *skb_array_consume_irq(struct skb_array *a)
{
return ptr_ring_consume_irq(&a->ring);
}
+static inline int skb_array_consume_batched_irq(struct skb_array *a,
+ struct sk_buff **array, int n)
+{
+ return ptr_ring_consume_batched_irq(&a->ring, (void **)array, n);
+}
+
static inline struct sk_buff *skb_array_consume_any(struct skb_array *a)
{
return ptr_ring_consume_any(&a->ring);
}
+static inline int skb_array_consume_batched_any(struct skb_array *a,
+ struct sk_buff **array, int n)
+{
+ return ptr_ring_consume_batched_any(&a->ring, (void **)array, n);
+}
+
+
static inline struct sk_buff *skb_array_consume_bh(struct skb_array *a)
{
return ptr_ring_consume_bh(&a->ring);
}
+static inline int skb_array_consume_batched_bh(struct skb_array *a,
+ struct sk_buff **array, int n)
+{
+ return ptr_ring_consume_batched_bh(&a->ring, (void **)array, n);
+}
+
static inline int __skb_array_len_with_tag(struct sk_buff *skb)
{
if (likely(skb)) {
@@ -156,6 +181,12 @@
kfree_skb(ptr);
}
+static inline void skb_array_unconsume(struct skb_array *a,
+ struct sk_buff **skbs, int n)
+{
+ ptr_ring_unconsume(&a->ring, (void **)skbs, n, __skb_array_destroy_skb);
+}
+
static inline int skb_array_resize(struct skb_array *a, int size, gfp_t gfp)
{
return ptr_ring_resize(&a->ring, size, gfp, __skb_array_destroy_skb);
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index a098d95..a17e235 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -109,6 +109,7 @@
* may perform further validation in this case.
* GRE: only if the checksum is present in the header.
* SCTP: indicates the CRC in SCTP header has been validated.
+ * FCOE: indicates the CRC in FC frame has been validated.
*
* skb->csum_level indicates the number of consecutive checksums found in
* the packet minus one that have been verified as CHECKSUM_UNNECESSARY.
@@ -126,8 +127,10 @@
* packet as seen by netif_rx() and fills out in skb->csum. Meaning, the
* hardware doesn't need to parse L3/L4 headers to implement this.
*
- * Note: Even if device supports only some protocols, but is able to produce
- * skb->csum, it MUST use CHECKSUM_COMPLETE, not CHECKSUM_UNNECESSARY.
+ * Notes:
+ * - Even if device supports only some protocols, but is able to produce
+ * skb->csum, it MUST use CHECKSUM_COMPLETE, not CHECKSUM_UNNECESSARY.
+ * - CHECKSUM_COMPLETE is not applicable to SCTP and FCoE protocols.
*
* CHECKSUM_PARTIAL:
*
@@ -162,14 +165,11 @@
*
* NETIF_F_IP_CSUM and NETIF_F_IPV6_CSUM are being deprecated in favor of
* NETIF_F_HW_CSUM. New devices should use NETIF_F_HW_CSUM to indicate
- * checksum offload capability. If a device has limited checksum capabilities
- * (for instance can only perform NETIF_F_IP_CSUM or NETIF_F_IPV6_CSUM as
- * described above) a helper function can be called to resolve
- * CHECKSUM_PARTIAL. The helper functions are skb_csum_off_chk*. The helper
- * function takes a spec argument that describes the protocol layer that is
- * supported for checksum offload and can be called for each packet. If a
- * packet does not match the specification for offload, skb_checksum_help
- * is called to resolve the checksum.
+ * checksum offload capability.
+ * skb_csum_hwoffload_help() can be called to resolve CHECKSUM_PARTIAL based
+ * on network device checksumming capabilities: if a packet does not match
+ * them, skb_checksum_help or skb_crc32c_help (depending on the value of
+ * csum_not_inet, see item D.) is called to resolve the checksum.
*
* CHECKSUM_NONE:
*
@@ -189,11 +189,13 @@
*
* NETIF_F_SCTP_CRC - This feature indicates that a device is capable of
* offloading the SCTP CRC in a packet. To perform this offload the stack
- * will set ip_summed to CHECKSUM_PARTIAL and set csum_start and csum_offset
- * accordingly. Note the there is no indication in the skbuff that the
- * CHECKSUM_PARTIAL refers to an SCTP checksum, a driver that supports
- * both IP checksum offload and SCTP CRC offload must verify which offload
- * is configured for a packet presumably by inspecting packet headers.
+ * will set set csum_start and csum_offset accordingly, set ip_summed to
+ * CHECKSUM_PARTIAL and set csum_not_inet to 1, to provide an indication in
+ * the skbuff that the CHECKSUM_PARTIAL refers to CRC32c.
+ * A driver that supports both IP checksum offload and SCTP CRC32c offload
+ * must verify which offload is configured for a packet by testing the
+ * value of skb->csum_not_inet; skb_crc32c_csum_help is provided to resolve
+ * CHECKSUM_PARTIAL on skbs where csum_not_inet is set to 1.
*
* NETIF_F_FCOE_CRC - This feature indicates that a device is capable of
* offloading the FCOE CRC in a packet. To perform this offload the stack
@@ -506,66 +508,6 @@
typedef unsigned char *sk_buff_data_t;
#endif
-/**
- * struct skb_mstamp - multi resolution time stamps
- * @stamp_us: timestamp in us resolution
- * @stamp_jiffies: timestamp in jiffies
- */
-struct skb_mstamp {
- union {
- u64 v64;
- struct {
- u32 stamp_us;
- u32 stamp_jiffies;
- };
- };
-};
-
-/**
- * skb_mstamp_get - get current timestamp
- * @cl: place to store timestamps
- */
-static inline void skb_mstamp_get(struct skb_mstamp *cl)
-{
- u64 val = local_clock();
-
- do_div(val, NSEC_PER_USEC);
- cl->stamp_us = (u32)val;
- cl->stamp_jiffies = (u32)jiffies;
-}
-
-/**
- * skb_mstamp_delta - compute the difference in usec between two skb_mstamp
- * @t1: pointer to newest sample
- * @t0: pointer to oldest sample
- */
-static inline u32 skb_mstamp_us_delta(const struct skb_mstamp *t1,
- const struct skb_mstamp *t0)
-{
- s32 delta_us = t1->stamp_us - t0->stamp_us;
- u32 delta_jiffies = t1->stamp_jiffies - t0->stamp_jiffies;
-
- /* If delta_us is negative, this might be because interval is too big,
- * or local_clock() drift is too big : fallback using jiffies.
- */
- if (delta_us <= 0 ||
- delta_jiffies >= (INT_MAX / (USEC_PER_SEC / HZ)))
-
- delta_us = jiffies_to_usecs(delta_jiffies);
-
- return delta_us;
-}
-
-static inline bool skb_mstamp_after(const struct skb_mstamp *t1,
- const struct skb_mstamp *t0)
-{
- s32 diff = t1->stamp_jiffies - t0->stamp_jiffies;
-
- if (!diff)
- diff = t1->stamp_us - t0->stamp_us;
- return diff > 0;
-}
-
/**
* struct sk_buff - socket buffer
* @next: Next buffer in list
@@ -616,6 +558,7 @@
* @wifi_acked_valid: wifi_acked was set
* @wifi_acked: whether frame was acked on wifi or not
* @no_fcs: Request NIC to treat last 4 bytes as Ethernet FCS
+ * @csum_not_inet: use CRC32c to resolve CHECKSUM_PARTIAL
* @dst_pending_confirm: need to confirm neighbour
* @napi_id: id of the NAPI struct this skb came from
* @secmark: security marking
@@ -646,7 +589,7 @@
union {
ktime_t tstamp;
- struct skb_mstamp skb_mstamp;
+ u64 skb_mstamp;
};
};
struct rb_node rbnode; /* used in netem & tcp stack */
@@ -744,7 +687,7 @@
__u8 csum_valid:1;
__u8 csum_complete_sw:1;
__u8 csum_level:2;
- __u8 csum_bad:1;
+ __u8 csum_not_inet:1;
__u8 dst_pending_confirm:1;
#ifdef CONFIG_IPV6_NDISC_NODETYPE
@@ -915,10 +858,34 @@
return ptype <= PACKET_OTHERHOST;
}
+static inline unsigned int skb_napi_id(const struct sk_buff *skb)
+{
+#ifdef CONFIG_NET_RX_BUSY_POLL
+ return skb->napi_id;
+#else
+ return 0;
+#endif
+}
+
+/* decrement the reference count and return true if we can free the skb */
+static inline bool skb_unref(struct sk_buff *skb)
+{
+ if (unlikely(!skb))
+ return false;
+ if (likely(atomic_read(&skb->users) == 1))
+ smp_rmb();
+ else if (likely(!atomic_dec_and_test(&skb->users)))
+ return false;
+
+ return true;
+}
+
+void skb_release_head_state(struct sk_buff *skb);
void kfree_skb(struct sk_buff *skb);
void kfree_skb_list(struct sk_buff *segs);
void skb_tx_error(struct sk_buff *skb);
void consume_skb(struct sk_buff *skb);
+void consume_stateless_skb(struct sk_buff *skb);
void __kfree_skb(struct sk_buff *skb);
extern struct kmem_cache *skbuff_head_cache;
@@ -1001,10 +968,10 @@
unsigned int headroom);
struct sk_buff *skb_copy_expand(const struct sk_buff *skb, int newheadroom,
int newtailroom, gfp_t priority);
-int skb_to_sgvec_nomark(struct sk_buff *skb, struct scatterlist *sg,
- int offset, int len);
-int skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg, int offset,
- int len);
+int __must_check skb_to_sgvec_nomark(struct sk_buff *skb, struct scatterlist *sg,
+ int offset, int len);
+int __must_check skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg,
+ int offset, int len);
int skb_cow_data(struct sk_buff *skb, int tailbits, struct sk_buff **trailer);
int skb_pad(struct sk_buff *skb, int pad);
#define dev_kfree_skb(a) consume_skb(a)
@@ -1926,41 +1893,87 @@
/*
* Add data to an sk_buff
*/
-unsigned char *pskb_put(struct sk_buff *skb, struct sk_buff *tail, int len);
-unsigned char *skb_put(struct sk_buff *skb, unsigned int len);
-static inline unsigned char *__skb_put(struct sk_buff *skb, unsigned int len)
+void *pskb_put(struct sk_buff *skb, struct sk_buff *tail, int len);
+void *skb_put(struct sk_buff *skb, unsigned int len);
+static inline void *__skb_put(struct sk_buff *skb, unsigned int len)
{
- unsigned char *tmp = skb_tail_pointer(skb);
+ void *tmp = skb_tail_pointer(skb);
SKB_LINEAR_ASSERT(skb);
skb->tail += len;
skb->len += len;
return tmp;
}
-unsigned char *skb_push(struct sk_buff *skb, unsigned int len);
-static inline unsigned char *__skb_push(struct sk_buff *skb, unsigned int len)
+static inline void *__skb_put_zero(struct sk_buff *skb, unsigned int len)
+{
+ void *tmp = __skb_put(skb, len);
+
+ memset(tmp, 0, len);
+ return tmp;
+}
+
+static inline void *__skb_put_data(struct sk_buff *skb, const void *data,
+ unsigned int len)
+{
+ void *tmp = __skb_put(skb, len);
+
+ memcpy(tmp, data, len);
+ return tmp;
+}
+
+static inline void __skb_put_u8(struct sk_buff *skb, u8 val)
+{
+ *(u8 *)__skb_put(skb, 1) = val;
+}
+
+static inline void *skb_put_zero(struct sk_buff *skb, unsigned int len)
+{
+ void *tmp = skb_put(skb, len);
+
+ memset(tmp, 0, len);
+
+ return tmp;
+}
+
+static inline void *skb_put_data(struct sk_buff *skb, const void *data,
+ unsigned int len)
+{
+ void *tmp = skb_put(skb, len);
+
+ memcpy(tmp, data, len);
+
+ return tmp;
+}
+
+static inline void skb_put_u8(struct sk_buff *skb, u8 val)
+{
+ *(u8 *)skb_put(skb, 1) = val;
+}
+
+void *skb_push(struct sk_buff *skb, unsigned int len);
+static inline void *__skb_push(struct sk_buff *skb, unsigned int len)
{
skb->data -= len;
skb->len += len;
return skb->data;
}
-unsigned char *skb_pull(struct sk_buff *skb, unsigned int len);
-static inline unsigned char *__skb_pull(struct sk_buff *skb, unsigned int len)
+void *skb_pull(struct sk_buff *skb, unsigned int len);
+static inline void *__skb_pull(struct sk_buff *skb, unsigned int len)
{
skb->len -= len;
BUG_ON(skb->len < skb->data_len);
return skb->data += len;
}
-static inline unsigned char *skb_pull_inline(struct sk_buff *skb, unsigned int len)
+static inline void *skb_pull_inline(struct sk_buff *skb, unsigned int len)
{
return unlikely(len > skb->len) ? NULL : __skb_pull(skb, len);
}
-unsigned char *__pskb_pull_tail(struct sk_buff *skb, int delta);
+void *__pskb_pull_tail(struct sk_buff *skb, int delta);
-static inline unsigned char *__pskb_pull(struct sk_buff *skb, unsigned int len)
+static inline void *__pskb_pull(struct sk_buff *skb, unsigned int len)
{
if (len > skb_headlen(skb) &&
!__pskb_pull_tail(skb, len - skb_headlen(skb)))
@@ -1969,7 +1982,7 @@
return skb->data += len;
}
-static inline unsigned char *pskb_pull(struct sk_buff *skb, unsigned int len)
+static inline void *pskb_pull(struct sk_buff *skb, unsigned int len)
{
return unlikely(len > skb->len) ? NULL : __pskb_pull(skb, len);
}
@@ -2952,7 +2965,7 @@
__skb_postpush_rcsum(skb, start, len, 0);
}
-unsigned char *skb_pull_rcsum(struct sk_buff *skb, unsigned int len);
+void *skb_pull_rcsum(struct sk_buff *skb, unsigned int len);
/**
* skb_push_rcsum - push skb and update receive checksum
@@ -2965,8 +2978,7 @@
* that the checksum difference is zero (e.g., a valid IP header)
* or you are setting ip_summed to CHECKSUM_NONE.
*/
-static inline unsigned char *skb_push_rcsum(struct sk_buff *skb,
- unsigned int len)
+static inline void *skb_push_rcsum(struct sk_buff *skb, unsigned int len)
{
skb_push(skb, len);
skb_postpush_rcsum(skb, skb->data, len);
@@ -3056,6 +3068,13 @@
int __skb_wait_for_more_packets(struct sock *sk, int *err, long *timeo_p,
const struct sk_buff *skb);
+struct sk_buff *__skb_try_recv_from_queue(struct sock *sk,
+ struct sk_buff_head *queue,
+ unsigned int flags,
+ void (*destructor)(struct sock *sk,
+ struct sk_buff *skb),
+ int *peeked, int *off, int *err,
+ struct sk_buff **last);
struct sk_buff *__skb_try_recv_datagram(struct sock *sk, unsigned flags,
void (*destructor)(struct sock *sk,
struct sk_buff *skb),
@@ -3129,6 +3148,8 @@
__wsum (*combine)(__wsum csum, __wsum csum2, int offset, int len);
};
+extern const struct skb_checksum_ops *crc32c_csum_stub __read_mostly;
+
__wsum __skb_checksum(const struct sk_buff *skb, int offset, int len,
__wsum csum, const struct skb_checksum_ops *ops);
__wsum skb_checksum(const struct sk_buff *skb, int offset, int len,
@@ -3298,13 +3319,6 @@
void skb_tstamp_tx(struct sk_buff *orig_skb,
struct skb_shared_hwtstamps *hwtstamps);
-static inline void sw_tx_timestamp(struct sk_buff *skb)
-{
- if (skb_shinfo(skb)->tx_flags & SKBTX_SW_TSTAMP &&
- !(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS))
- skb_tstamp_tx(skb, NULL);
-}
-
/**
* skb_tx_timestamp() - Driver hook for transmit timestamping
*
@@ -3320,7 +3334,8 @@
static inline void skb_tx_timestamp(struct sk_buff *skb)
{
skb_clone_tx_timestamp(skb);
- sw_tx_timestamp(skb);
+ if (skb_shinfo(skb)->tx_flags & SKBTX_SW_TSTAMP)
+ skb_tstamp_tx(skb, NULL);
}
/**
@@ -3386,21 +3401,6 @@
}
}
-static inline void __skb_mark_checksum_bad(struct sk_buff *skb)
-{
- /* Mark current checksum as bad (typically called from GRO
- * path). In the case that ip_summed is CHECKSUM_NONE
- * this must be the first checksum encountered in the packet.
- * When ip_summed is CHECKSUM_UNNECESSARY, this is the first
- * checksum after the last one validated. For UDP, a zero
- * checksum can not be marked as bad.
- */
-
- if (skb->ip_summed == CHECKSUM_NONE ||
- skb->ip_summed == CHECKSUM_UNNECESSARY)
- skb->csum_bad = 1;
-}
-
/* Check if we need to perform checksum complete validation.
*
* Returns true if checksum complete is needed, false otherwise
@@ -3454,9 +3454,6 @@
skb->csum_valid = 1;
return 0;
}
- } else if (skb->csum_bad) {
- /* ip_summed == CHECKSUM_NONE in this case */
- return (__force __sum16)1;
}
skb->csum = psum;
@@ -3516,8 +3513,7 @@
static inline bool __skb_checksum_convert_check(struct sk_buff *skb)
{
- return (skb->ip_summed == CHECKSUM_NONE &&
- skb->csum_valid && !skb->csum_bad);
+ return (skb->ip_summed == CHECKSUM_NONE && skb->csum_valid);
}
static inline void __skb_checksum_convert(struct sk_buff *skb,
diff --git a/include/linux/socket.h b/include/linux/socket.h
index 0820274..8b13db5 100644
--- a/include/linux/socket.h
+++ b/include/linux/socket.h
@@ -334,6 +334,7 @@
#define SOL_ALG 279
#define SOL_NFC 280
#define SOL_KCM 281
+#define SOL_TLS 282
/* IPX options */
#define IPX_TYPE 1
diff --git a/include/linux/stmmac.h b/include/linux/stmmac.h
index 3921cb9..108739f 100644
--- a/include/linux/stmmac.h
+++ b/include/linux/stmmac.h
@@ -177,6 +177,7 @@
void (*fix_mac_speed)(void *priv, unsigned int speed);
int (*init)(struct platform_device *pdev, void *priv);
void (*exit)(struct platform_device *pdev, void *priv);
+ struct mac_device_info *(*setup)(void *priv);
void *bsp_priv;
struct clk *stmmac_clk;
struct clk *pclk;
@@ -185,6 +186,7 @@
struct reset_control *stmmac_rst;
struct stmmac_axi *axi;
int has_gmac4;
+ bool has_sun8i;
bool tso_en;
int mac_port_sel_speed;
bool en_tx_lpi_clockgating;
diff --git a/include/linux/tcp.h b/include/linux/tcp.h
index b6d5adce..542ca1a 100644
--- a/include/linux/tcp.h
+++ b/include/linux/tcp.h
@@ -123,7 +123,7 @@
struct tcp_request_sock {
struct inet_request_sock req;
const struct tcp_request_sock_ops *af_specific;
- struct skb_mstamp snt_synack; /* first SYNACK sent time */
+ u64 snt_synack; /* first SYNACK sent time */
bool tfo_listener;
u32 txhash;
u32 rcv_isn;
@@ -211,7 +211,7 @@
/* Information of the most recently (s)acked skb */
struct tcp_rack {
- struct skb_mstamp mstamp; /* (Re)sent time of the skb */
+ u64 mstamp; /* (Re)sent time of the skb */
u32 rtt_us; /* Associated RTT */
u32 end_seq; /* Ending TCP sequence of the skb */
u8 advanced; /* mstamp advanced since last lost marking */
@@ -240,7 +240,7 @@
u32 tlp_high_seq; /* snd_nxt at the time of TLP retransmit. */
/* RTT measurement */
- struct skb_mstamp tcp_mstamp; /* most recent packet received/sent */
+ u64 tcp_mstamp; /* most recent packet received/sent */
u32 srtt_us; /* smoothed round trip time << 3 in usecs */
u32 mdev_us; /* medium deviation */
u32 mdev_max_us; /* maximal mdev for the last rtt period */
@@ -280,8 +280,8 @@
u32 delivered; /* Total data packets delivered incl. rexmits */
u32 lost; /* Total data packets lost incl. rexmits */
u32 app_limited; /* limited until "delivered" reaches this val */
- struct skb_mstamp first_tx_mstamp; /* start of window send phase */
- struct skb_mstamp delivered_mstamp; /* time we reached "delivered" */
+ u64 first_tx_mstamp; /* start of window send phase */
+ u64 delivered_mstamp; /* time we reached "delivered" */
u32 rate_delivered; /* saved rate sample: packets delivered */
u32 rate_interval_us; /* saved rate sample: time elapsed */
@@ -293,6 +293,8 @@
u32 sacked_out; /* SACK'd packets */
u32 fackets_out; /* FACK'd packets */
+ struct hrtimer pacing_timer;
+
/* from STCP, retrans queue hinting */
struct sk_buff* lost_skb_hint;
struct sk_buff *retransmit_skb_hint;
@@ -333,16 +335,16 @@
/* Receiver side RTT estimation */
struct {
- u32 rtt_us;
- u32 seq;
- struct skb_mstamp time;
+ u32 rtt_us;
+ u32 seq;
+ u64 time;
} rcv_rtt_est;
/* Receiver queue space */
struct {
- int space;
- u32 seq;
- struct skb_mstamp time;
+ int space;
+ u32 seq;
+ u64 time;
} rcvq_space;
/* TCP-specific MTU probe information. */
diff --git a/include/linux/udp.h b/include/linux/udp.h
index 6cb4061..eaea63b 100644
--- a/include/linux/udp.h
+++ b/include/linux/udp.h
@@ -80,6 +80,9 @@
struct sk_buff *skb,
int nhoff);
+ /* udp_recvmsg try to use this before splicing sk_receive_queue */
+ struct sk_buff_head reader_queue ____cacheline_aligned_in_smp;
+
/* This field is dirtied by udp_recvmsg() */
int forward_deficit;
};
diff --git a/include/net/act_api.h b/include/net/act_api.h
index cfa2ae3..26ffd83 100644
--- a/include/net/act_api.h
+++ b/include/net/act_api.h
@@ -42,6 +42,7 @@
struct gnet_stats_basic_cpu __percpu *cpu_bstats;
struct gnet_stats_queue __percpu *cpu_qstats;
struct tc_cookie *act_cookie;
+ struct tcf_chain *goto_chain;
};
#define tcf_head common.tcfa_head
#define tcf_index common.tcfa_index
@@ -180,12 +181,12 @@
int tcf_action_destroy(struct list_head *actions, int bind);
int tcf_action_exec(struct sk_buff *skb, struct tc_action **actions,
int nr_actions, struct tcf_result *res);
-int tcf_action_init(struct net *net, struct nlattr *nla,
- struct nlattr *est, char *n, int ovr,
- int bind, struct list_head *);
-struct tc_action *tcf_action_init_1(struct net *net, struct nlattr *nla,
- struct nlattr *est, char *n, int ovr,
- int bind);
+int tcf_action_init(struct net *net, struct tcf_proto *tp, struct nlattr *nla,
+ struct nlattr *est, char *name, int ovr, int bind,
+ struct list_head *actions);
+struct tc_action *tcf_action_init_1(struct net *net, struct tcf_proto *tp,
+ struct nlattr *nla, struct nlattr *est,
+ char *name, int ovr, int bind);
int tcf_action_dump(struct sk_buff *skb, struct list_head *, int, int);
int tcf_action_dump_old(struct sk_buff *skb, struct tc_action *a, int, int);
int tcf_action_dump_1(struct sk_buff *skb, struct tc_action *a, int, int);
diff --git a/include/net/addrconf.h b/include/net/addrconf.h
index b43a4ee..d0889cb 100644
--- a/include/net/addrconf.h
+++ b/include/net/addrconf.h
@@ -48,11 +48,15 @@
struct in6_addr prefix;
};
-
#include <linux/netdevice.h>
#include <net/if_inet6.h>
#include <net/ipv6.h>
+struct in6_validator_info {
+ struct in6_addr i6vi_addr;
+ struct inet6_dev *i6vi_dev;
+};
+
#define IN6_ADDR_HSIZE_SHIFT 4
#define IN6_ADDR_HSIZE (1 << IN6_ADDR_HSIZE_SHIFT)
@@ -278,6 +282,10 @@
int unregister_inet6addr_notifier(struct notifier_block *nb);
int inet6addr_notifier_call_chain(unsigned long val, void *v);
+int register_inet6addr_validator_notifier(struct notifier_block *nb);
+int unregister_inet6addr_validator_notifier(struct notifier_block *nb);
+int inet6addr_validator_notifier_call_chain(unsigned long val, void *v);
+
void inet6_netconf_notify_devconf(struct net *net, int event, int type,
int ifindex, struct ipv6_devconf *devconf);
diff --git a/include/net/af_rxrpc.h b/include/net/af_rxrpc.h
index b5f5187..c172709 100644
--- a/include/net/af_rxrpc.h
+++ b/include/net/af_rxrpc.h
@@ -33,6 +33,7 @@
struct sockaddr_rxrpc *,
struct key *,
unsigned long,
+ s64,
gfp_t,
rxrpc_notify_rx_t);
int rxrpc_kernel_send_data(struct socket *, struct rxrpc_call *,
@@ -46,5 +47,6 @@
struct sockaddr_rxrpc *);
int rxrpc_kernel_charge_accept(struct socket *, rxrpc_notify_rx_t,
rxrpc_user_attach_call_t, unsigned long, gfp_t);
+void rxrpc_kernel_set_tx_length(struct socket *, struct rxrpc_call *, s64);
#endif /* _NET_RXRPC_H */
diff --git a/include/net/bluetooth/hci.h b/include/net/bluetooth/hci.h
index 99aa5e5..fe98f0a 100644
--- a/include/net/bluetooth/hci.h
+++ b/include/net/bluetooth/hci.h
@@ -399,6 +399,7 @@
#define HCI_LE_PING 0x10
#define HCI_LE_DATA_LEN_EXT 0x20
#define HCI_LE_EXT_SCAN_POLICY 0x80
+#define HCI_LE_CHAN_SEL_ALG2 0x40
/* Connection modes */
#define HCI_CM_ACTIVE 0x0000
@@ -1498,6 +1499,13 @@
__le16 rx_time;
} __packed;
+#define HCI_OP_LE_SET_DEFAULT_PHY 0x2031
+struct hci_cp_le_set_default_phy {
+ __u8 all_phys;
+ __u8 tx_phys;
+ __u8 rx_phys;
+} __packed;
+
/* ---- HCI Events ---- */
#define HCI_EV_INQUIRY_COMPLETE 0x01
diff --git a/include/net/bond_options.h b/include/net/bond_options.h
index 1797235..d79d28f 100644
--- a/include/net/bond_options.h
+++ b/include/net/bond_options.h
@@ -104,6 +104,8 @@
int __bond_opt_set(struct bonding *bond, unsigned int option,
struct bond_opt_value *val);
+int __bond_opt_set_notify(struct bonding *bond, unsigned int option,
+ struct bond_opt_value *val);
int bond_opt_tryset_rtnl(struct bonding *bond, unsigned int option, char *buf);
const struct bond_opt_value *bond_opt_parse(const struct bond_option *opt,
diff --git a/include/net/cfg80211.h b/include/net/cfg80211.h
index b083e6c..f12fa52 100644
--- a/include/net/cfg80211.h
+++ b/include/net/cfg80211.h
@@ -649,6 +649,7 @@
* @wep_keys: static WEP keys, if not NULL points to an array of
* CFG80211_MAX_WEP_KEYS WEP keys
* @wep_tx_key: key index (0..3) of the default TX static WEP key
+ * @psk: PSK (for devices supporting 4-way-handshake offload)
*/
struct cfg80211_crypto_settings {
u32 wpa_versions;
@@ -662,6 +663,7 @@
bool control_port_no_encrypt;
struct key_params *wep_keys;
int wep_tx_key;
+ const u8 *psk;
};
/**
@@ -1441,6 +1443,9 @@
* @mcast_rate: multicat rate for Mesh Node [6Mbps is the default for 802.11a]
* @basic_rates: basic rates to use when creating the mesh
* @beacon_rate: bitrate to be used for beacons
+ * @userspace_handles_dfs: whether user space controls DFS operation, i.e.
+ * changes the channel when a radar is detected. This is required
+ * to operate on DFS channels.
*
* These parameters are fixed when the mesh is created.
*/
@@ -1462,6 +1467,7 @@
int mcast_rate[NUM_NL80211_BANDS];
u32 basic_rates;
struct cfg80211_bitrate_mask beacon_rate;
+ bool userspace_handles_dfs;
};
/**
@@ -2106,6 +2112,8 @@
* @fils_erp_rrk: ERP re-authentication Root Key (rRK) used to derive additional
* keys in FILS or %NULL if not specified.
* @fils_erp_rrk_len: Length of @fils_erp_rrk in octets.
+ * @want_1x: indicates user-space supports and wants to use 802.1X driver
+ * offload of 4-way handshake.
*/
struct cfg80211_connect_params {
struct ieee80211_channel *channel;
@@ -2138,6 +2146,7 @@
u16 fils_erp_next_seq_num;
const u8 *fils_erp_rrk;
size_t fils_erp_rrk_len;
+ bool want_1x;
};
/**
@@ -2560,6 +2569,23 @@
};
/**
+ * struct cfg80211_pmk_conf - PMK configuration
+ *
+ * @aa: authenticator address
+ * @pmk_len: PMK length in bytes.
+ * @pmk: the PMK material
+ * @pmk_r0_name: PMK-R0 Name. NULL if not applicable (i.e., the PMK
+ * is not PMK-R0). When pmk_r0_name is not NULL, the pmk field
+ * holds PMK-R0.
+ */
+struct cfg80211_pmk_conf {
+ const u8 *aa;
+ u8 pmk_len;
+ const u8 *pmk;
+ const u8 *pmk_r0_name;
+};
+
+/**
* struct cfg80211_ops - backend description for wireless configuration
*
* This struct is registered by fullmac card drivers and/or wireless stacks
@@ -2875,6 +2901,13 @@
* All other parameters must be ignored.
*
* @set_multicast_to_unicast: configure multicast to unicast conversion for BSS
+ *
+ * @set_pmk: configure the PMK to be used for offloaded 802.1X 4-Way handshake.
+ * If not deleted through @del_pmk the PMK remains valid until disconnect
+ * upon which the driver should clear it.
+ * (invoked with the wireless_dev mutex held)
+ * @del_pmk: delete the previously configured PMK for the given authenticator.
+ * (invoked with the wireless_dev mutex held)
*/
struct cfg80211_ops {
int (*suspend)(struct wiphy *wiphy, struct cfg80211_wowlan *wow);
@@ -3163,6 +3196,11 @@
int (*set_multicast_to_unicast)(struct wiphy *wiphy,
struct net_device *dev,
const bool enabled);
+
+ int (*set_pmk)(struct wiphy *wiphy, struct net_device *dev,
+ const struct cfg80211_pmk_conf *conf);
+ int (*del_pmk)(struct wiphy *wiphy, struct net_device *dev,
+ const u8 *aa);
};
/*
@@ -5403,6 +5441,9 @@
* @req_ie_len: association request IEs length
* @resp_ie: association response IEs (may be %NULL)
* @resp_ie_len: assoc response IEs length
+ * @authorized: true if the 802.1X authentication was done by the driver or is
+ * not needed (e.g., when Fast Transition protocol was used), false
+ * otherwise. Ignored for networks that don't use 802.1X authentication.
*/
struct cfg80211_roam_info {
struct ieee80211_channel *channel;
@@ -5412,6 +5453,7 @@
size_t req_ie_len;
const u8 *resp_ie;
size_t resp_ie_len;
+ bool authorized;
};
/**
diff --git a/include/net/dsa.h b/include/net/dsa.h
index 8e24677..58969b9 100644
--- a/include/net/dsa.h
+++ b/include/net/dsa.h
@@ -20,6 +20,7 @@
#include <linux/of.h>
#include <linux/ethtool.h>
#include <net/devlink.h>
+#include <net/switchdev.h>
struct tc_action;
struct phy_device;
@@ -27,13 +28,14 @@
enum dsa_tag_protocol {
DSA_TAG_PROTO_NONE = 0,
- DSA_TAG_PROTO_DSA,
- DSA_TAG_PROTO_TRAILER,
- DSA_TAG_PROTO_EDSA,
DSA_TAG_PROTO_BRCM,
- DSA_TAG_PROTO_QCA,
- DSA_TAG_PROTO_MTK,
+ DSA_TAG_PROTO_DSA,
+ DSA_TAG_PROTO_EDSA,
+ DSA_TAG_PROTO_KSZ,
DSA_TAG_PROTO_LAN9303,
+ DSA_TAG_PROTO_MTK,
+ DSA_TAG_PROTO_QCA,
+ DSA_TAG_PROTO_TRAILER,
DSA_TAG_LAST, /* MUST BE LAST */
};
@@ -120,27 +122,16 @@
*/
struct dsa_platform_data *pd;
- /*
- * Reference to network device to use, and which tagging
- * protocol to use.
- */
- struct net_device *master_netdev;
+ /* Copy of tag_ops->rcv for faster access in hot path */
struct sk_buff * (*rcv)(struct sk_buff *skb,
struct net_device *dev,
struct packet_type *pt,
struct net_device *orig_dev);
/*
- * Original copy of the master netdev ethtool_ops
+ * The switch port to which the CPU is attached.
*/
- struct ethtool_ops master_ethtool_ops;
- const struct ethtool_ops *master_orig_ethtool_ops;
-
- /*
- * The switch and port to which the CPU is attached.
- */
- struct dsa_switch *cpu_switch;
- s8 cpu_port;
+ struct dsa_port *cpu_dp;
/*
* Data for the individual switch chips.
@@ -180,12 +171,18 @@
struct dsa_switch *ds;
unsigned int index;
const char *name;
+ struct dsa_port *cpu_dp;
struct net_device *netdev;
struct device_node *dn;
unsigned int ageing_time;
u8 stp_state;
struct net_device *bridge_dev;
struct devlink_port devlink_port;
+ /*
+ * Original copy of the master netdev ethtool_ops
+ */
+ struct ethtool_ops ethtool_ops;
+ const struct ethtool_ops *orig_ethtool_ops;
};
struct dsa_switch {
@@ -224,11 +221,6 @@
s8 rtable[DSA_MAX_SWITCHES];
/*
- * The lower device this switch uses to talk to the host
- */
- struct net_device *master_netdev;
-
- /*
* Slave mii_bus and devices for the individual ports.
*/
u32 dsa_port_mask;
@@ -251,7 +243,7 @@
static inline bool dsa_is_cpu_port(struct dsa_switch *ds, int p)
{
- return !!(ds == ds->dst->cpu_switch && p == ds->dst->cpu_port);
+ return !!(ds->cpu_port_mask & (1 << p));
}
static inline bool dsa_is_dsa_port(struct dsa_switch *ds, int p)
@@ -279,28 +271,12 @@
* Else return the (DSA) port number that connects to the
* switch that is one hop closer to the cpu.
*/
- if (dst->cpu_switch == ds)
- return dst->cpu_port;
+ if (dst->cpu_dp->ds == ds)
+ return dst->cpu_dp->index;
else
- return ds->rtable[dst->cpu_switch->index];
+ return ds->rtable[dst->cpu_dp->ds->index];
}
-struct switchdev_trans;
-struct switchdev_obj;
-struct switchdev_obj_port_fdb;
-struct switchdev_obj_port_mdb;
-struct switchdev_obj_port_vlan;
-
-#define DSA_NOTIFIER_BRIDGE_JOIN 1
-#define DSA_NOTIFIER_BRIDGE_LEAVE 2
-
-/* DSA_NOTIFIER_BRIDGE_* */
-struct dsa_notifier_bridge_info {
- struct net_device *br;
- int sw_index;
- int port;
-};
-
struct dsa_switch_ops {
/*
* Legacy probing.
@@ -410,7 +386,7 @@
const struct switchdev_obj_port_vlan *vlan);
int (*port_vlan_dump)(struct dsa_switch *ds, int port,
struct switchdev_obj_port_vlan *vlan,
- int (*cb)(struct switchdev_obj *obj));
+ switchdev_obj_dump_cb_t *cb);
/*
* Forwarding database
@@ -425,7 +401,7 @@
const struct switchdev_obj_port_fdb *fdb);
int (*port_fdb_dump)(struct dsa_switch *ds, int port,
struct switchdev_obj_port_fdb *fdb,
- int (*cb)(struct switchdev_obj *obj));
+ switchdev_obj_dump_cb_t *cb);
/*
* Multicast database
@@ -440,7 +416,7 @@
const struct switchdev_obj_port_mdb *mdb);
int (*port_mdb_dump)(struct dsa_switch *ds, int port,
struct switchdev_obj_port_mdb *mdb,
- int (*cb)(struct switchdev_obj *obj));
+ switchdev_obj_dump_cb_t *cb);
/*
* RXNFC
@@ -480,23 +456,18 @@
struct net_device *dsa_dev_to_net_device(struct device *dev);
-static inline bool dsa_uses_tagged_protocol(struct dsa_switch_tree *dst)
-{
- return dst->rcv != NULL;
-}
-
+/* Keep inline for faster access in hot path */
static inline bool netdev_uses_dsa(struct net_device *dev)
{
#if IS_ENABLED(CONFIG_NET_DSA)
- if (dev->dsa_ptr != NULL)
- return dsa_uses_tagged_protocol(dev->dsa_ptr);
+ return dev->dsa_ptr && dev->dsa_ptr->rcv;
#endif
return false;
}
struct dsa_switch *dsa_switch_alloc(struct device *dev, size_t n);
void dsa_unregister_switch(struct dsa_switch *ds);
-int dsa_register_switch(struct dsa_switch *ds, struct device *dev);
+int dsa_register_switch(struct dsa_switch *ds);
#ifdef CONFIG_PM_SLEEP
int dsa_switch_suspend(struct dsa_switch *ds);
int dsa_switch_resume(struct dsa_switch *ds);
diff --git a/include/net/dst.h b/include/net/dst.h
index cfc0437..f73611e 100644
--- a/include/net/dst.h
+++ b/include/net/dst.h
@@ -31,9 +31,9 @@
struct sk_buff;
struct dst_entry {
+ struct net_device *dev;
struct rcu_head rcu_head;
struct dst_entry *child;
- struct net_device *dev;
struct dst_ops *ops;
unsigned long _metrics;
unsigned long expires;
@@ -51,13 +51,11 @@
#define DST_HOST 0x0001
#define DST_NOXFRM 0x0002
#define DST_NOPOLICY 0x0004
-#define DST_NOHASH 0x0008
-#define DST_NOCACHE 0x0010
-#define DST_NOCOUNT 0x0020
-#define DST_FAKE_RTABLE 0x0040
-#define DST_XFRM_TUNNEL 0x0080
-#define DST_XFRM_QUEUE 0x0100
-#define DST_METADATA 0x0200
+#define DST_NOCOUNT 0x0008
+#define DST_FAKE_RTABLE 0x0010
+#define DST_XFRM_TUNNEL 0x0020
+#define DST_XFRM_QUEUE 0x0040
+#define DST_METADATA 0x0080
short error;
@@ -253,7 +251,7 @@
* __pad_to_align_refcnt declaration in struct dst_entry
*/
BUILD_BUG_ON(offsetof(struct dst_entry, __refcnt) & 63);
- atomic_inc(&dst->__refcnt);
+ WARN_ON(atomic_inc_not_zero(&dst->__refcnt) == 0);
}
static inline void dst_use(struct dst_entry *dst, unsigned long time)
@@ -278,6 +276,8 @@
void dst_release(struct dst_entry *dst);
+void dst_release_immediate(struct dst_entry *dst);
+
static inline void refdst_drop(unsigned long refdst)
{
if (!(refdst & SKB_DST_NOREF))
@@ -334,10 +334,7 @@
*/
static inline bool dst_hold_safe(struct dst_entry *dst)
{
- if (dst->flags & DST_NOCACHE)
- return atomic_inc_not_zero(&dst->__refcnt);
- dst_hold(dst);
- return true;
+ return atomic_inc_not_zero(&dst->__refcnt);
}
/**
@@ -423,26 +420,8 @@
void dst_init(struct dst_entry *dst, struct dst_ops *ops,
struct net_device *dev, int initial_ref, int initial_obsolete,
unsigned short flags);
-void __dst_free(struct dst_entry *dst);
struct dst_entry *dst_destroy(struct dst_entry *dst);
-
-static inline void dst_free(struct dst_entry *dst)
-{
- if (dst->obsolete > 0)
- return;
- if (!atomic_read(&dst->__refcnt)) {
- dst = dst_destroy(dst);
- if (!dst)
- return;
- }
- __dst_free(dst);
-}
-
-static inline void dst_rcu_free(struct rcu_head *head)
-{
- struct dst_entry *dst = container_of(head, struct dst_entry, rcu_head);
- dst_free(dst);
-}
+void dst_dev_put(struct dst_entry *dst);
static inline void dst_confirm(struct dst_entry *dst)
{
@@ -505,8 +484,6 @@
return dst;
}
-void dst_subsys_init(void);
-
/* Flags for xfrm_lookup flags argument. */
enum {
XFRM_LOOKUP_ICMP = 1 << 0,
diff --git a/include/net/flow_dissector.h b/include/net/flow_dissector.h
index 8d21d44..e2663e9 100644
--- a/include/net/flow_dissector.h
+++ b/include/net/flow_dissector.h
@@ -157,6 +157,24 @@
unsigned char src[ETH_ALEN];
};
+/**
+ * struct flow_dissector_key_tcp:
+ * @flags: flags
+ */
+struct flow_dissector_key_tcp {
+ __be16 flags;
+};
+
+/**
+ * struct flow_dissector_key_ip:
+ * @tos: tos
+ * @ttl: ttl
+ */
+struct flow_dissector_key_ip {
+ __u8 tos;
+ __u8 ttl;
+};
+
enum flow_dissector_key_id {
FLOW_DISSECTOR_KEY_CONTROL, /* struct flow_dissector_key_control */
FLOW_DISSECTOR_KEY_BASIC, /* struct flow_dissector_key_basic */
@@ -177,6 +195,8 @@
FLOW_DISSECTOR_KEY_ENC_CONTROL, /* struct flow_dissector_key_control */
FLOW_DISSECTOR_KEY_ENC_PORTS, /* struct flow_dissector_key_ports */
FLOW_DISSECTOR_KEY_MPLS, /* struct flow_dissector_key_mpls */
+ FLOW_DISSECTOR_KEY_TCP, /* struct flow_dissector_key_tcp */
+ FLOW_DISSECTOR_KEY_IP, /* struct flow_dissector_key_ip */
FLOW_DISSECTOR_KEY_MAX,
};
diff --git a/include/net/genetlink.h b/include/net/genetlink.h
index 68b8819..c59a098 100644
--- a/include/net/genetlink.h
+++ b/include/net/genetlink.h
@@ -128,7 +128,6 @@
* @start: start callback for dumps
* @dumpit: callback for dumpers
* @done: completion callback for dumps
- * @ops_list: operations list
*/
struct genl_ops {
const struct nla_policy *policy;
diff --git a/include/net/inet_connection_sock.h b/include/net/inet_connection_sock.h
index c7a5779..13e4c89 100644
--- a/include/net/inet_connection_sock.h
+++ b/include/net/inet_connection_sock.h
@@ -75,6 +75,8 @@
* @icsk_pmtu_cookie Last pmtu seen by socket
* @icsk_ca_ops Pluggable congestion control hook
* @icsk_af_ops Operations which are AF_INET{4,6} specific
+ * @icsk_ulp_ops Pluggable ULP control hook
+ * @icsk_ulp_data ULP private data
* @icsk_ca_state: Congestion control state
* @icsk_retransmits: Number of unrecovered [RTO] timeouts
* @icsk_pending: Scheduled timer event
@@ -97,6 +99,8 @@
__u32 icsk_pmtu_cookie;
const struct tcp_congestion_ops *icsk_ca_ops;
const struct inet_connection_sock_af_ops *icsk_af_ops;
+ const struct tcp_ulp_ops *icsk_ulp_ops;
+ void *icsk_ulp_data;
unsigned int (*icsk_sync_mss)(struct sock *sk, u32 pmtu);
__u8 icsk_ca_state:6,
icsk_ca_setsockopt:1,
diff --git a/include/net/inet_frag.h b/include/net/inet_frag.h
index 5894730..975779d 100644
--- a/include/net/inet_frag.h
+++ b/include/net/inet_frag.h
@@ -92,7 +92,7 @@
*/
u32 rnd;
seqlock_t rnd_seqlock;
- int qsize;
+ unsigned int qsize;
unsigned int (*hashfn)(const struct inet_frag_queue *);
bool (*match)(const struct inet_frag_queue *q,
diff --git a/include/net/ip6_fib.h b/include/net/ip6_fib.h
index c979c87..1a88008 100644
--- a/include/net/ip6_fib.h
+++ b/include/net/ip6_fib.h
@@ -170,7 +170,7 @@
static inline u32 rt6_get_cookie(const struct rt6_info *rt)
{
if (rt->rt6i_flags & RTF_PCPU ||
- (unlikely(rt->dst.flags & DST_NOCACHE) && rt->dst.from))
+ (unlikely(!list_empty(&rt->rt6i_uncached)) && rt->dst.from))
rt = (struct rt6_info *)(rt->dst.from);
return rt->rt6i_node ? rt->rt6i_node->fn_sernum : 0;
@@ -277,7 +277,8 @@
void *arg);
int fib6_add(struct fib6_node *root, struct rt6_info *rt,
- struct nl_info *info, struct mx6_config *mxc);
+ struct nl_info *info, struct mx6_config *mxc,
+ struct netlink_ext_ack *extack);
int fib6_del(struct rt6_info *rt, struct nl_info *info);
void inet6_rt_notify(int event, struct rt6_info *rt, struct nl_info *info,
diff --git a/include/net/ip6_route.h b/include/net/ip6_route.h
index f5e625f..0fbf73d 100644
--- a/include/net/ip6_route.h
+++ b/include/net/ip6_route.h
@@ -90,7 +90,7 @@
int ipv6_route_ioctl(struct net *net, unsigned int cmd, void __user *arg);
-int ip6_route_add(struct fib6_config *cfg);
+int ip6_route_add(struct fib6_config *cfg, struct netlink_ext_ack *extack);
int ip6_ins_rt(struct rt6_info *);
int ip6_del_rt(struct rt6_info *);
@@ -116,7 +116,6 @@
const struct in6_addr *saddr, int oif, int flags);
struct dst_entry *icmp6_dst_alloc(struct net_device *dev, struct flowi6 *fl6);
-int icmp6_dst_gc(void);
void fib6_force_start_gc(struct net *net);
diff --git a/include/net/ip_fib.h b/include/net/ip_fib.h
index f7f6aa7..3dbfd5e 100644
--- a/include/net/ip_fib.h
+++ b/include/net/ip_fib.h
@@ -136,6 +136,7 @@
struct fib_table;
struct fib_result {
+ __be32 prefix;
unsigned char prefixlen;
unsigned char nh_sel;
unsigned char type;
@@ -263,8 +264,10 @@
int fib_table_lookup(struct fib_table *tb, const struct flowi4 *flp,
struct fib_result *res, int fib_flags);
-int fib_table_insert(struct net *, struct fib_table *, struct fib_config *);
-int fib_table_delete(struct net *, struct fib_table *, struct fib_config *);
+int fib_table_insert(struct net *, struct fib_table *, struct fib_config *,
+ struct netlink_ext_ack *extack);
+int fib_table_delete(struct net *, struct fib_table *, struct fib_config *,
+ struct netlink_ext_ack *extack);
int fib_table_dump(struct fib_table *table, struct sk_buff *skb,
struct netlink_callback *cb);
int fib_table_flush(struct net *net, struct fib_table *table);
diff --git a/include/net/lwtunnel.h b/include/net/lwtunnel.h
index ebfe237..7c26863 100644
--- a/include/net/lwtunnel.h
+++ b/include/net/lwtunnel.h
@@ -35,7 +35,8 @@
struct lwtunnel_encap_ops {
int (*build_state)(struct nlattr *encap,
unsigned int family, const void *cfg,
- struct lwtunnel_state **ts);
+ struct lwtunnel_state **ts,
+ struct netlink_ext_ack *extack);
void (*destroy_state)(struct lwtunnel_state *lws);
int (*output)(struct net *net, struct sock *sk, struct sk_buff *skb);
int (*input)(struct sk_buff *skb);
@@ -107,12 +108,15 @@
unsigned int num);
int lwtunnel_encap_del_ops(const struct lwtunnel_encap_ops *op,
unsigned int num);
-int lwtunnel_valid_encap_type(u16 encap_type);
-int lwtunnel_valid_encap_type_attr(struct nlattr *attr, int len);
+int lwtunnel_valid_encap_type(u16 encap_type,
+ struct netlink_ext_ack *extack);
+int lwtunnel_valid_encap_type_attr(struct nlattr *attr, int len,
+ struct netlink_ext_ack *extack);
int lwtunnel_build_state(u16 encap_type,
struct nlattr *encap,
unsigned int family, const void *cfg,
- struct lwtunnel_state **lws);
+ struct lwtunnel_state **lws,
+ struct netlink_ext_ack *extack);
int lwtunnel_fill_encap(struct sk_buff *skb,
struct lwtunnel_state *lwtstate);
int lwtunnel_get_encap_size(struct lwtunnel_state *lwtstate);
@@ -172,11 +176,14 @@
return -EOPNOTSUPP;
}
-static inline int lwtunnel_valid_encap_type(u16 encap_type)
+static inline int lwtunnel_valid_encap_type(u16 encap_type,
+ struct netlink_ext_ack *extack)
{
+ NL_SET_ERR_MSG(extack, "CONFIG_LWTUNNEL is not enabled in this kernel");
return -EOPNOTSUPP;
}
-static inline int lwtunnel_valid_encap_type_attr(struct nlattr *attr, int len)
+static inline int lwtunnel_valid_encap_type_attr(struct nlattr *attr, int len,
+ struct netlink_ext_ack *extack)
{
/* return 0 since we are not walking attr looking for
* RTA_ENCAP_TYPE attribute on nexthops.
@@ -187,7 +194,8 @@
static inline int lwtunnel_build_state(u16 encap_type,
struct nlattr *encap,
unsigned int family, const void *cfg,
- struct lwtunnel_state **lws)
+ struct lwtunnel_state **lws,
+ struct netlink_ext_ack *extack)
{
return -EOPNOTSUPP;
}
diff --git a/include/net/mac80211.h b/include/net/mac80211.h
index 76ed24a..b2b5419 100644
--- a/include/net/mac80211.h
+++ b/include/net/mac80211.h
@@ -4205,6 +4205,22 @@
int max_rates);
/**
+ * ieee80211_sta_set_expected_throughput - set the expected tpt for a station
+ *
+ * Call this function to notify mac80211 about a change in expected throughput
+ * to a station. A driver for a device that does rate control in firmware can
+ * call this function when the expected throughput estimate towards a station
+ * changes. The information is used to tune the CoDel AQM applied to traffic
+ * going towards that station (which can otherwise be too aggressive and cause
+ * slow stations to starve).
+ *
+ * @pubsta: the station to set throughput for.
+ * @thr: the current expected throughput in kbps.
+ */
+void ieee80211_sta_set_expected_throughput(struct ieee80211_sta *pubsta,
+ u32 thr);
+
+/**
* ieee80211_tx_status - transmit status callback
*
* Call this function for all transmitted frames after they have been
@@ -5436,6 +5452,9 @@
*/
void ieee80211_send_bar(struct ieee80211_vif *vif, u8 *ra, u16 tid, u16 ssn);
+void ieee80211_manage_rx_ba_offl(struct ieee80211_vif *vif, const u8 *addr,
+ unsigned int bit);
+
/**
* ieee80211_start_rx_ba_session_offl - start a Rx BA session
*
@@ -5450,8 +5469,13 @@
* @addr: station mac address
* @tid: the rx tid
*/
-void ieee80211_start_rx_ba_session_offl(struct ieee80211_vif *vif,
- const u8 *addr, u16 tid);
+static inline void ieee80211_start_rx_ba_session_offl(struct ieee80211_vif *vif,
+ const u8 *addr, u16 tid)
+{
+ if (WARN_ON(tid >= IEEE80211_NUM_TIDS))
+ return;
+ ieee80211_manage_rx_ba_offl(vif, addr, tid);
+}
/**
* ieee80211_stop_rx_ba_session_offl - stop a Rx BA session
@@ -5467,8 +5491,13 @@
* @addr: station mac address
* @tid: the rx tid
*/
-void ieee80211_stop_rx_ba_session_offl(struct ieee80211_vif *vif,
- const u8 *addr, u16 tid);
+static inline void ieee80211_stop_rx_ba_session_offl(struct ieee80211_vif *vif,
+ const u8 *addr, u16 tid)
+{
+ if (WARN_ON(tid >= IEEE80211_NUM_TIDS))
+ return;
+ ieee80211_manage_rx_ba_offl(vif, addr, tid + IEEE80211_NUM_TIDS);
+}
/* Rate control API */
diff --git a/include/net/neighbour.h b/include/net/neighbour.h
index e4dd3a2..639b675 100644
--- a/include/net/neighbour.h
+++ b/include/net/neighbour.h
@@ -317,6 +317,7 @@
int neigh_update(struct neighbour *neigh, const u8 *lladdr, u8 new, u32 flags,
u32 nlmsg_pid);
void __neigh_set_probe_once(struct neighbour *neigh);
+bool neigh_remove_one(struct neighbour *ndel, struct neigh_table *tbl);
void neigh_changeaddr(struct neigh_table *tbl, struct net_device *dev);
int neigh_ifdown(struct neigh_table *tbl, struct net_device *dev);
int neigh_resolve_output(struct neighbour *neigh, struct sk_buff *skb);
diff --git a/include/net/netns/ipv4.h b/include/net/netns/ipv4.h
index cd686c4..9a14a08 100644
--- a/include/net/netns/ipv4.h
+++ b/include/net/netns/ipv4.h
@@ -122,6 +122,9 @@
int sysctl_tcp_fin_timeout;
unsigned int sysctl_tcp_notsent_lowat;
int sysctl_tcp_tw_reuse;
+ int sysctl_tcp_sack;
+ int sysctl_tcp_window_scaling;
+ int sysctl_tcp_timestamps;
struct inet_timewait_death_row tcp_death_row;
int sysctl_max_syn_backlog;
diff --git a/include/net/pkt_cls.h b/include/net/pkt_cls.h
index 269fd78..537d0a0 100644
--- a/include/net/pkt_cls.h
+++ b/include/net/pkt_cls.h
@@ -18,10 +18,31 @@
int unregister_tcf_proto_ops(struct tcf_proto_ops *ops);
#ifdef CONFIG_NET_CLS
-void tcf_destroy_chain(struct tcf_proto __rcu **fl);
+struct tcf_chain *tcf_chain_get(struct tcf_block *block, u32 chain_index,
+ bool create);
+void tcf_chain_put(struct tcf_chain *chain);
+int tcf_block_get(struct tcf_block **p_block,
+ struct tcf_proto __rcu **p_filter_chain);
+void tcf_block_put(struct tcf_block *block);
+int tcf_classify(struct sk_buff *skb, const struct tcf_proto *tp,
+ struct tcf_result *res, bool compat_mode);
+
#else
-static inline void tcf_destroy_chain(struct tcf_proto __rcu **fl)
+static inline
+int tcf_block_get(struct tcf_block **p_block,
+ struct tcf_proto __rcu **p_filter_chain)
{
+ return 0;
+}
+
+static inline void tcf_block_put(struct tcf_block *block)
+{
+}
+
+static inline int tcf_classify(struct sk_buff *skb, const struct tcf_proto *tp,
+ struct tcf_result *res, bool compat_mode)
+{
+ return TC_ACT_UNSPEC;
}
#endif
@@ -136,6 +157,25 @@
#endif
}
+static inline void
+tcf_exts_stats_update(const struct tcf_exts *exts,
+ u64 bytes, u64 packets, u64 lastuse)
+{
+#ifdef CONFIG_NET_CLS_ACT
+ int i;
+
+ preempt_disable();
+
+ for (i = 0; i < exts->nr_actions; i++) {
+ struct tc_action *a = exts->actions[i];
+
+ tcf_action_stats_update(a, bytes, packets, lastuse);
+ }
+
+ preempt_enable();
+#endif
+}
+
/**
* tcf_exts_exec - execute tc filter extensions
* @skb: socket buffer
diff --git a/include/net/pkt_sched.h b/include/net/pkt_sched.h
index bec46f6..2579c20 100644
--- a/include/net/pkt_sched.h
+++ b/include/net/pkt_sched.h
@@ -113,9 +113,6 @@
__qdisc_run(q);
}
-int tc_classify(struct sk_buff *skb, const struct tcf_proto *tp,
- struct tcf_result *res, bool compat_mode);
-
static inline __be16 tc_skb_protocol(const struct sk_buff *skb)
{
/* We need to take extra care in case the skb came via
diff --git a/include/net/request_sock.h b/include/net/request_sock.h
index a12a5d2..53ced67 100644
--- a/include/net/request_sock.h
+++ b/include/net/request_sock.h
@@ -29,7 +29,7 @@
struct request_sock_ops {
int family;
- int obj_size;
+ unsigned int obj_size;
struct kmem_cache *slab;
char *slab_name;
int (*rtx_syn_ack)(const struct sock *sk,
diff --git a/include/net/route.h b/include/net/route.h
index 2cc0e14..cb0a76d 100644
--- a/include/net/route.h
+++ b/include/net/route.h
@@ -113,13 +113,16 @@
int ip_rt_init(void);
void rt_cache_flush(struct net *net);
void rt_flush_dev(struct net_device *dev);
-struct rtable *__ip_route_output_key_hash(struct net *net, struct flowi4 *flp,
- const struct sk_buff *skb);
+struct rtable *ip_route_output_key_hash(struct net *net, struct flowi4 *flp,
+ const struct sk_buff *skb);
+struct rtable *ip_route_output_key_hash_rcu(struct net *net, struct flowi4 *flp,
+ struct fib_result *res,
+ const struct sk_buff *skb);
static inline struct rtable *__ip_route_output_key(struct net *net,
struct flowi4 *flp)
{
- return __ip_route_output_key_hash(net, flp, NULL);
+ return ip_route_output_key_hash(net, flp, NULL);
}
struct rtable *ip_route_output_flow(struct net *, struct flowi4 *flp,
@@ -175,6 +178,9 @@
int ip_route_input_noref(struct sk_buff *skb, __be32 dst, __be32 src,
u8 tos, struct net_device *devin);
+int ip_route_input_rcu(struct sk_buff *skb, __be32 dst, __be32 src,
+ u8 tos, struct net_device *devin,
+ struct fib_result *res);
static inline int ip_route_input(struct sk_buff *skb, __be32 dst, __be32 src,
u8 tos, struct net_device *devin)
@@ -184,7 +190,9 @@
rcu_read_lock();
err = ip_route_input_noref(skb, dst, src, tos, devin);
if (!err)
- skb_dst_force(skb);
+ skb_dst_force_safe(skb);
+ if (!skb_dst(skb))
+ err = -EINVAL;
rcu_read_unlock();
return err;
diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h
index 22e5209..3688501 100644
--- a/include/net/sch_generic.h
+++ b/include/net/sch_generic.h
@@ -8,6 +8,7 @@
#include <linux/pkt_cls.h>
#include <linux/percpu.h>
#include <linux/dynamic_queue_limits.h>
+#include <linux/list.h>
#include <net/gen_stats.h>
#include <net/rtnetlink.h>
@@ -153,7 +154,7 @@
void (*walk)(struct Qdisc *, struct qdisc_walker * arg);
/* Filter manipulation */
- struct tcf_proto __rcu ** (*tcf_chain)(struct Qdisc *, unsigned long);
+ struct tcf_block * (*tcf_block)(struct Qdisc *, unsigned long);
bool (*tcf_cl_offload)(u32 classid);
unsigned long (*bind_tcf)(struct Qdisc *, unsigned long,
u32 classid);
@@ -192,8 +193,13 @@
struct tcf_result {
- unsigned long class;
- u32 classid;
+ union {
+ struct {
+ unsigned long class;
+ u32 classid;
+ };
+ const struct tcf_proto *goto_tp;
+ };
};
struct tcf_proto_ops {
@@ -236,6 +242,7 @@
struct Qdisc *q;
void *data;
const struct tcf_proto_ops *ops;
+ struct tcf_chain *chain;
struct rcu_head rcu;
};
@@ -247,6 +254,19 @@
unsigned char data[QDISC_CB_PRIV_LEN];
};
+struct tcf_chain {
+ struct tcf_proto __rcu *filter_chain;
+ struct tcf_proto __rcu **p_filter_chain;
+ struct list_head list;
+ struct tcf_block *block;
+ u32 index; /* chain index */
+ unsigned int refcnt;
+};
+
+struct tcf_block {
+ struct list_head chain_list;
+};
+
static inline void qdisc_cb_private_validate(const struct sk_buff *skb, int sz)
{
struct qdisc_skb_cb *qcb;
diff --git a/include/net/sctp/structs.h b/include/net/sctp/structs.h
index a8b38e1..e26763b 100644
--- a/include/net/sctp/structs.h
+++ b/include/net/sctp/structs.h
@@ -377,10 +377,11 @@
__u64 hb_nonce;
} sctp_sender_hb_info_t;
-int sctp_stream_new(struct sctp_association *asoc, gfp_t gfp);
-int sctp_stream_init(struct sctp_association *asoc, gfp_t gfp);
+int sctp_stream_init(struct sctp_stream *stream, __u16 outcnt, __u16 incnt,
+ gfp_t gfp);
void sctp_stream_free(struct sctp_stream *stream);
void sctp_stream_clear(struct sctp_stream *stream);
+void sctp_stream_update(struct sctp_stream *stream, struct sctp_stream *new);
/* What is the current SSN number for this stream? */
#define sctp_ssn_peek(stream, type, sid) \
@@ -1750,7 +1751,7 @@
__u32 default_rcv_context;
/* Stream arrays */
- struct sctp_stream *stream;
+ struct sctp_stream stream;
/* All outbound chunks go through this structure. */
struct sctp_outq outqueue;
@@ -1952,8 +1953,8 @@
const union sctp_addr *,
const union sctp_addr *);
void sctp_assoc_migrate(struct sctp_association *, struct sock *);
-void sctp_assoc_update(struct sctp_association *old,
- struct sctp_association *new);
+int sctp_assoc_update(struct sctp_association *old,
+ struct sctp_association *new);
__u32 sctp_association_get_next_tsn(struct sctp_association *);
diff --git a/include/net/secure_seq.h b/include/net/secure_seq.h
index b94006f..031bf16 100644
--- a/include/net/secure_seq.h
+++ b/include/net/secure_seq.h
@@ -8,10 +8,11 @@
__be16 dport);
u32 secure_tcp_seq(__be32 saddr, __be32 daddr,
__be16 sport, __be16 dport);
-u32 secure_tcp_ts_off(__be32 saddr, __be32 daddr);
+u32 secure_tcp_ts_off(const struct net *net, __be32 saddr, __be32 daddr);
u32 secure_tcpv6_seq(const __be32 *saddr, const __be32 *daddr,
__be16 sport, __be16 dport);
-u32 secure_tcpv6_ts_off(const __be32 *saddr, const __be32 *daddr);
+u32 secure_tcpv6_ts_off(const struct net *net,
+ const __be32 *saddr, const __be32 *daddr);
u64 secure_dccp_sequence_number(__be32 saddr, __be32 daddr,
__be16 sport, __be16 dport);
u64 secure_dccpv6_sequence_number(__be32 *saddr, __be32 *daddr,
diff --git a/include/net/sock.h b/include/net/sock.h
index f33e3d1..00d0914 100644
--- a/include/net/sock.h
+++ b/include/net/sock.h
@@ -253,6 +253,7 @@
* @sk_ll_usec: usecs to busypoll when there is no data
* @sk_allocation: allocation mode
* @sk_pacing_rate: Pacing rate (if supported by transport/packet scheduler)
+ * @sk_pacing_status: Pacing status (requested, handled by sch_fq)
* @sk_max_pacing_rate: Maximum pacing rate (%SO_MAX_PACING_RATE)
* @sk_sndbuf: size of send buffer in bytes
* @sk_padding: unused element for alignment
@@ -396,7 +397,7 @@
__s32 sk_peek_off;
int sk_write_pending;
__u32 sk_dst_pending_confirm;
- /* Note: 32bit hole on 64bit arches */
+ u32 sk_pacing_status; /* see enum sk_pacing */
long sk_sndtimeo;
struct timer_list sk_timer;
__u32 sk_priority;
@@ -475,6 +476,12 @@
struct rcu_head sk_rcu;
};
+enum sk_pacing {
+ SK_PACING_NONE = 0,
+ SK_PACING_NEEDED = 1,
+ SK_PACING_FQ = 2,
+};
+
#define __sk_user_data(sk) ((*((void __rcu **)&(sk)->sk_user_data)))
#define rcu_dereference_sk_user_data(sk) rcu_dereference(__sk_user_data((sk)))
@@ -900,7 +907,10 @@
static inline void sk_incoming_cpu_update(struct sock *sk)
{
- sk->sk_incoming_cpu = raw_smp_processor_id();
+ int cpu = raw_smp_processor_id();
+
+ if (unlikely(sk->sk_incoming_cpu != cpu))
+ sk->sk_incoming_cpu = cpu;
}
static inline void sock_rps_record_flow_hash(__u32 hash)
@@ -1073,6 +1083,7 @@
bool (*stream_memory_free)(const struct sock *sk);
/* Memory pressure */
void (*enter_memory_pressure)(struct sock *sk);
+ void (*leave_memory_pressure)(struct sock *sk);
atomic_long_t *memory_allocated; /* Current allocated memory. */
struct percpu_counter *sockets_allocated; /* Current number of sockets. */
/*
@@ -1081,7 +1092,7 @@
* All the __sk_mem_schedule() is of this nature: accounting
* is strict, actions are advisory and have some latency.
*/
- int *memory_pressure;
+ unsigned long *memory_pressure;
long *sysctl_mem;
int *sysctl_wmem;
int *sysctl_rmem;
@@ -1186,25 +1197,6 @@
return !!*sk->sk_prot->memory_pressure;
}
-static inline void sk_leave_memory_pressure(struct sock *sk)
-{
- int *memory_pressure = sk->sk_prot->memory_pressure;
-
- if (!memory_pressure)
- return;
-
- if (*memory_pressure)
- *memory_pressure = 0;
-}
-
-static inline void sk_enter_memory_pressure(struct sock *sk)
-{
- if (!sk->sk_prot->enter_memory_pressure)
- return;
-
- sk->sk_prot->enter_memory_pressure(sk);
-}
-
static inline long
sk_memory_allocated(const struct sock *sk)
{
@@ -2035,8 +2027,8 @@
void sk_stop_timer(struct sock *sk, struct timer_list *timer);
-int __sk_queue_drop_skb(struct sock *sk, struct sk_buff *skb,
- unsigned int flags,
+int __sk_queue_drop_skb(struct sock *sk, struct sk_buff_head *sk_queue,
+ struct sk_buff *skb, unsigned int flags,
void (*destructor)(struct sock *sk,
struct sk_buff *skb));
int __sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb);
diff --git a/include/net/switchdev.h b/include/net/switchdev.h
index 929d6af..c784a6a 100644
--- a/include/net/switchdev.h
+++ b/include/net/switchdev.h
@@ -46,6 +46,7 @@
SWITCHDEV_ATTR_ID_PORT_PARENT_ID,
SWITCHDEV_ATTR_ID_PORT_STP_STATE,
SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS,
+ SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS_SUPPORT,
SWITCHDEV_ATTR_ID_PORT_MROUTER,
SWITCHDEV_ATTR_ID_BRIDGE_AGEING_TIME,
SWITCHDEV_ATTR_ID_BRIDGE_VLAN_FILTERING,
@@ -62,6 +63,7 @@
struct netdev_phys_item_id ppid; /* PORT_PARENT_ID */
u8 stp_state; /* PORT_STP_STATE */
unsigned long brport_flags; /* PORT_BRIDGE_FLAGS */
+ unsigned long brport_flags_support; /* PORT_BRIDGE_FLAGS_SUPPORT */
bool mrouter; /* PORT_MROUTER */
clock_t ageing_time; /* BRIDGE_AGEING_TIME */
bool vlan_filtering; /* BRIDGE_VLAN_FILTERING */
@@ -153,8 +155,11 @@
};
enum switchdev_notifier_type {
- SWITCHDEV_FDB_ADD = 1,
- SWITCHDEV_FDB_DEL,
+ SWITCHDEV_FDB_ADD_TO_BRIDGE = 1,
+ SWITCHDEV_FDB_DEL_TO_BRIDGE,
+ SWITCHDEV_FDB_ADD_TO_DEVICE,
+ SWITCHDEV_FDB_DEL_TO_DEVICE,
+ SWITCHDEV_FDB_OFFLOADED,
};
struct switchdev_notifier_info {
diff --git a/include/net/tc_act/tc_gact.h b/include/net/tc_act/tc_gact.h
index b6f1739..d576374 100644
--- a/include/net/tc_act/tc_gact.h
+++ b/include/net/tc_act/tc_gact.h
@@ -15,7 +15,7 @@
};
#define to_gact(a) ((struct tcf_gact *)a)
-static inline bool is_tcf_gact_shot(const struct tc_action *a)
+static inline bool __is_tcf_gact_act(const struct tc_action *a, int act)
{
#ifdef CONFIG_NET_CLS_ACT
struct tcf_gact *gact;
@@ -24,10 +24,21 @@
return false;
gact = to_gact(a);
- if (gact->tcf_action == TC_ACT_SHOT)
+ if (gact->tcf_action == act)
return true;
#endif
return false;
}
+
+static inline bool is_tcf_gact_shot(const struct tc_action *a)
+{
+ return __is_tcf_gact_act(a, TC_ACT_SHOT);
+}
+
+static inline bool is_tcf_gact_trap(const struct tc_action *a)
+{
+ return __is_tcf_gact_act(a, TC_ACT_TRAP);
+}
+
#endif /* __NET_TC_GACT_H */
diff --git a/include/net/tcp.h b/include/net/tcp.h
index be6223c..d0751b7 100644
--- a/include/net/tcp.h
+++ b/include/net/tcp.h
@@ -237,9 +237,6 @@
/* sysctl variables for tcp */
-extern int sysctl_tcp_timestamps;
-extern int sysctl_tcp_window_scaling;
-extern int sysctl_tcp_sack;
extern int sysctl_tcp_fastopen;
extern int sysctl_tcp_retrans_collapse;
extern int sysctl_tcp_stdurg;
@@ -279,7 +276,7 @@
extern atomic_long_t tcp_memory_allocated;
extern struct percpu_counter tcp_sockets_allocated;
-extern int tcp_memory_pressure;
+extern unsigned long tcp_memory_pressure;
/* optimized version of sk_under_memory_pressure() for TCP sockets */
static inline bool tcp_under_memory_pressure(const struct sock *sk)
@@ -353,6 +350,8 @@
int tcp_sendmsg(struct sock *sk, struct msghdr *msg, size_t size);
int tcp_sendpage(struct sock *sk, struct page *page, int offset, size_t size,
int flags);
+ssize_t do_tcp_sendpages(struct sock *sk, struct page *page, int offset,
+ size_t size, int flags);
void tcp_release_cb(struct sock *sk);
void tcp_wfree(struct sk_buff *skb);
void tcp_write_timer_handler(struct sock *sk);
@@ -427,7 +426,7 @@
void tcp_syn_ack_timeout(const struct request_sock *req);
int tcp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, int nonblock,
int flags, int *addr_len);
-void tcp_parse_options(const struct sk_buff *skb,
+void tcp_parse_options(const struct net *net, const struct sk_buff *skb,
struct tcp_options_received *opt_rx,
int estab, struct tcp_fastopen_cookie *foc);
const u8 *tcp_parse_md5sig_option(const struct tcphdr *th);
@@ -519,8 +518,9 @@
u32 __cookie_v4_init_sequence(const struct iphdr *iph, const struct tcphdr *th,
u16 *mssp);
__u32 cookie_v4_init_sequence(const struct sk_buff *skb, __u16 *mss);
-__u32 cookie_init_timestamp(struct request_sock *req);
-bool cookie_timestamp_decode(struct tcp_options_received *opt);
+u64 cookie_init_timestamp(struct request_sock *req);
+bool cookie_timestamp_decode(const struct net *net,
+ struct tcp_options_received *opt);
bool cookie_ecn_ok(const struct tcp_options_received *opt,
const struct net *net, const struct dst_entry *dst);
@@ -574,6 +574,7 @@
void tcp_init_xmit_timers(struct sock *);
static inline void tcp_clear_xmit_timers(struct sock *sk)
{
+ hrtimer_cancel(&tcp_sk(sk)->pacing_timer);
inet_csk_clear_xmit_timers(sk);
}
@@ -699,17 +700,61 @@
void tcp_send_window_probe(struct sock *sk);
-/* TCP timestamps are only 32-bits, this causes a slight
- * complication on 64-bit systems since we store a snapshot
- * of jiffies in the buffer control blocks below. We decided
- * to use only the low 32-bits of jiffies and hide the ugly
- * casts with the following macro.
+/* TCP uses 32bit jiffies to save some space.
+ * Note that this is different from tcp_time_stamp, which
+ * historically has been the same until linux-4.13.
*/
-#define tcp_time_stamp ((__u32)(jiffies))
+#define tcp_jiffies32 ((u32)jiffies)
+
+/*
+ * Deliver a 32bit value for TCP timestamp option (RFC 7323)
+ * It is no longer tied to jiffies, but to 1 ms clock.
+ * Note: double check if you want to use tcp_jiffies32 instead of this.
+ */
+#define TCP_TS_HZ 1000
+
+static inline u64 tcp_clock_ns(void)
+{
+ return local_clock();
+}
+
+static inline u64 tcp_clock_us(void)
+{
+ return div_u64(tcp_clock_ns(), NSEC_PER_USEC);
+}
+
+/* This should only be used in contexts where tp->tcp_mstamp is up to date */
+static inline u32 tcp_time_stamp(const struct tcp_sock *tp)
+{
+ return div_u64(tp->tcp_mstamp, USEC_PER_SEC / TCP_TS_HZ);
+}
+
+/* Could use tcp_clock_us() / 1000, but this version uses a single divide */
+static inline u32 tcp_time_stamp_raw(void)
+{
+ return div_u64(tcp_clock_ns(), NSEC_PER_SEC / TCP_TS_HZ);
+}
+
+
+/* Refresh 1us clock of a TCP socket,
+ * ensuring monotically increasing values.
+ */
+static inline void tcp_mstamp_refresh(struct tcp_sock *tp)
+{
+ u64 val = tcp_clock_us();
+
+ if (val > tp->tcp_mstamp)
+ tp->tcp_mstamp = val;
+}
+
+static inline u32 tcp_stamp_us_delta(u64 t1, u64 t0)
+{
+ return max_t(s64, t1 - t0, 0);
+}
static inline u32 tcp_skb_timestamp(const struct sk_buff *skb)
{
- return skb->skb_mstamp.stamp_jiffies;
+ return div_u64(skb->skb_mstamp, USEC_PER_SEC / TCP_TS_HZ);
}
@@ -774,9 +819,9 @@
/* pkts S/ACKed so far upon tx of skb, incl retrans: */
__u32 delivered;
/* start of send pipeline phase */
- struct skb_mstamp first_tx_mstamp;
+ u64 first_tx_mstamp;
/* when we reached the "delivered" count */
- struct skb_mstamp delivered_mstamp;
+ u64 delivered_mstamp;
} tx; /* only used for outgoing skbs */
union {
struct inet_skb_parm h4;
@@ -892,7 +937,7 @@
* A sample is invalid if "delivered" or "interval_us" is negative.
*/
struct rate_sample {
- struct skb_mstamp prior_mstamp; /* starting timestamp for interval */
+ u64 prior_mstamp; /* starting timestamp for interval */
u32 prior_delivered; /* tp->delivered at "prior_mstamp" */
s32 delivered; /* number of packets delivered over interval */
long interval_us; /* time for tp->delivered to incr "delivered" */
@@ -1241,7 +1286,7 @@
if (!sysctl_tcp_slow_start_after_idle || tp->packets_out ||
ca_ops->cong_control)
return;
- delta = tcp_time_stamp - tp->lsndtime;
+ delta = tcp_jiffies32 - tp->lsndtime;
if (delta > inet_csk(sk)->icsk_rto)
tcp_cwnd_restart(sk, delta);
}
@@ -1277,6 +1322,7 @@
const struct dst_entry *dst);
void tcp_enter_memory_pressure(struct sock *sk);
+void tcp_leave_memory_pressure(struct sock *sk);
static inline int keepalive_intvl_when(const struct tcp_sock *tp)
{
@@ -1303,8 +1349,8 @@
{
const struct inet_connection_sock *icsk = &tp->inet_conn;
- return min_t(u32, tcp_time_stamp - icsk->icsk_ack.lrcvtime,
- tcp_time_stamp - tp->rcv_tstamp);
+ return min_t(u32, tcp_jiffies32 - icsk->icsk_ack.lrcvtime,
+ tcp_jiffies32 - tp->rcv_tstamp);
}
static inline int tcp_fin_time(const struct sock *sk)
@@ -1395,6 +1441,7 @@
u8 keylen;
u8 family; /* AF_INET or AF_INET6 */
union tcp_md5_addr addr;
+ u8 prefixlen;
u8 key[TCP_MD5SIG_MAXKEYLEN];
struct rcu_head rcu;
};
@@ -1438,9 +1485,10 @@
int tcp_v4_md5_hash_skb(char *md5_hash, const struct tcp_md5sig_key *key,
const struct sock *sk, const struct sk_buff *skb);
int tcp_md5_do_add(struct sock *sk, const union tcp_md5_addr *addr,
- int family, const u8 *newkey, u8 newkeylen, gfp_t gfp);
+ int family, u8 prefixlen, const u8 *newkey, u8 newkeylen,
+ gfp_t gfp);
int tcp_md5_do_del(struct sock *sk, const union tcp_md5_addr *addr,
- int family);
+ int family, u8 prefixlen);
struct tcp_md5sig_key *tcp_v4_md5_lookup(const struct sock *sk,
const struct sock *addr_sk);
@@ -1800,6 +1848,7 @@
const struct sock *sk,
const struct sk_buff *skb);
int (*md5_parse)(struct sock *sk,
+ int optname,
char __user *optval,
int optlen);
#endif
@@ -1825,7 +1874,7 @@
struct dst_entry *(*route_req)(const struct sock *sk, struct flowi *fl,
const struct request_sock *req);
u32 (*init_seq)(const struct sk_buff *skb);
- u32 (*init_ts_off)(const struct sk_buff *skb);
+ u32 (*init_ts_off)(const struct net *net, const struct sk_buff *skb);
int (*send_synack)(const struct sock *sk, struct dst_entry *dst,
struct flowi *fl, struct request_sock *req,
struct tcp_fastopen_cookie *foc,
@@ -1858,7 +1907,7 @@
/* tcp_recovery.c */
extern void tcp_rack_mark_lost(struct sock *sk);
extern void tcp_rack_advance(struct tcp_sock *tp, u8 sacked, u32 end_seq,
- const struct skb_mstamp *xmit_time);
+ u64 xmit_time);
extern void tcp_rack_reo_timeout(struct sock *sk);
/*
@@ -1945,4 +1994,31 @@
__NET_INC_STATS(sock_net(sk), LINUX_MIB_LISTENDROPS);
}
+enum hrtimer_restart tcp_pace_kick(struct hrtimer *timer);
+
+/*
+ * Interface for adding Upper Level Protocols over TCP
+ */
+
+#define TCP_ULP_NAME_MAX 16
+#define TCP_ULP_MAX 128
+#define TCP_ULP_BUF_MAX (TCP_ULP_NAME_MAX*TCP_ULP_MAX)
+
+struct tcp_ulp_ops {
+ struct list_head list;
+
+ /* initialize ulp */
+ int (*init)(struct sock *sk);
+ /* cleanup ulp */
+ void (*release)(struct sock *sk);
+
+ char name[TCP_ULP_NAME_MAX];
+ struct module *owner;
+};
+int tcp_register_ulp(struct tcp_ulp_ops *type);
+void tcp_unregister_ulp(struct tcp_ulp_ops *type);
+int tcp_set_ulp(struct sock *sk, const char *name);
+void tcp_get_available_ulp(char *buf, size_t len);
+void tcp_cleanup_ulp(struct sock *sk);
+
#endif /* _TCP_H */
diff --git a/include/net/tls.h b/include/net/tls.h
new file mode 100644
index 0000000..b89d397
--- /dev/null
+++ b/include/net/tls.h
@@ -0,0 +1,237 @@
+/*
+ * Copyright (c) 2016-2017, Mellanox Technologies. All rights reserved.
+ * Copyright (c) 2016-2017, Dave Watson <davejwatson@fb.com>. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef _TLS_OFFLOAD_H
+#define _TLS_OFFLOAD_H
+
+#include <linux/types.h>
+
+#include <uapi/linux/tls.h>
+
+
+/* Maximum data size carried in a TLS record */
+#define TLS_MAX_PAYLOAD_SIZE ((size_t)1 << 14)
+
+#define TLS_HEADER_SIZE 5
+#define TLS_NONCE_OFFSET TLS_HEADER_SIZE
+
+#define TLS_CRYPTO_INFO_READY(info) ((info)->cipher_type)
+
+#define TLS_RECORD_TYPE_DATA 0x17
+
+#define TLS_AAD_SPACE_SIZE 13
+
+struct tls_sw_context {
+ struct crypto_aead *aead_send;
+
+ /* Sending context */
+ char aad_space[TLS_AAD_SPACE_SIZE];
+
+ unsigned int sg_plaintext_size;
+ int sg_plaintext_num_elem;
+ struct scatterlist sg_plaintext_data[MAX_SKB_FRAGS];
+
+ unsigned int sg_encrypted_size;
+ int sg_encrypted_num_elem;
+ struct scatterlist sg_encrypted_data[MAX_SKB_FRAGS];
+
+ /* AAD | sg_plaintext_data | sg_tag */
+ struct scatterlist sg_aead_in[2];
+ /* AAD | sg_encrypted_data (data contain overhead for hdr&iv&tag) */
+ struct scatterlist sg_aead_out[2];
+};
+
+enum {
+ TLS_PENDING_CLOSED_RECORD
+};
+
+struct tls_context {
+ union {
+ struct tls_crypto_info crypto_send;
+ struct tls12_crypto_info_aes_gcm_128 crypto_send_aes_gcm_128;
+ };
+
+ void *priv_ctx;
+
+ u16 prepend_size;
+ u16 tag_size;
+ u16 overhead_size;
+ u16 iv_size;
+ char *iv;
+ u16 rec_seq_size;
+ char *rec_seq;
+
+ struct scatterlist *partially_sent_record;
+ u16 partially_sent_offset;
+ unsigned long flags;
+
+ u16 pending_open_record_frags;
+ int (*push_pending_record)(struct sock *sk, int flags);
+ void (*free_resources)(struct sock *sk);
+
+ void (*sk_write_space)(struct sock *sk);
+ void (*sk_proto_close)(struct sock *sk, long timeout);
+
+ int (*setsockopt)(struct sock *sk, int level,
+ int optname, char __user *optval,
+ unsigned int optlen);
+ int (*getsockopt)(struct sock *sk, int level,
+ int optname, char __user *optval,
+ int __user *optlen);
+};
+
+int wait_on_pending_writer(struct sock *sk, long *timeo);
+int tls_sk_query(struct sock *sk, int optname, char __user *optval,
+ int __user *optlen);
+int tls_sk_attach(struct sock *sk, int optname, char __user *optval,
+ unsigned int optlen);
+
+
+int tls_set_sw_offload(struct sock *sk, struct tls_context *ctx);
+int tls_sw_sendmsg(struct sock *sk, struct msghdr *msg, size_t size);
+int tls_sw_sendpage(struct sock *sk, struct page *page,
+ int offset, size_t size, int flags);
+void tls_sw_close(struct sock *sk, long timeout);
+
+void tls_sk_destruct(struct sock *sk, struct tls_context *ctx);
+void tls_icsk_clean_acked(struct sock *sk);
+
+int tls_push_sg(struct sock *sk, struct tls_context *ctx,
+ struct scatterlist *sg, u16 first_offset,
+ int flags);
+int tls_push_pending_closed_record(struct sock *sk, struct tls_context *ctx,
+ int flags, long *timeo);
+
+static inline bool tls_is_pending_closed_record(struct tls_context *ctx)
+{
+ return test_bit(TLS_PENDING_CLOSED_RECORD, &ctx->flags);
+}
+
+static inline int tls_complete_pending_work(struct sock *sk,
+ struct tls_context *ctx,
+ int flags, long *timeo)
+{
+ int rc = 0;
+
+ if (unlikely(sk->sk_write_pending))
+ rc = wait_on_pending_writer(sk, timeo);
+
+ if (!rc && tls_is_pending_closed_record(ctx))
+ rc = tls_push_pending_closed_record(sk, ctx, flags, timeo);
+
+ return rc;
+}
+
+static inline bool tls_is_partially_sent_record(struct tls_context *ctx)
+{
+ return !!ctx->partially_sent_record;
+}
+
+static inline bool tls_is_pending_open_record(struct tls_context *tls_ctx)
+{
+ return tls_ctx->pending_open_record_frags;
+}
+
+static inline void tls_err_abort(struct sock *sk)
+{
+ sk->sk_err = -EBADMSG;
+ sk->sk_error_report(sk);
+}
+
+static inline bool tls_bigint_increment(unsigned char *seq, int len)
+{
+ int i;
+
+ for (i = len - 1; i >= 0; i--) {
+ ++seq[i];
+ if (seq[i] != 0)
+ break;
+ }
+
+ return (i == -1);
+}
+
+static inline void tls_advance_record_sn(struct sock *sk,
+ struct tls_context *ctx)
+{
+ if (tls_bigint_increment(ctx->rec_seq, ctx->rec_seq_size))
+ tls_err_abort(sk);
+ tls_bigint_increment(ctx->iv + TLS_CIPHER_AES_GCM_128_SALT_SIZE,
+ ctx->iv_size);
+}
+
+static inline void tls_fill_prepend(struct tls_context *ctx,
+ char *buf,
+ size_t plaintext_len,
+ unsigned char record_type)
+{
+ size_t pkt_len, iv_size = ctx->iv_size;
+
+ pkt_len = plaintext_len + iv_size + ctx->tag_size;
+
+ /* we cover nonce explicit here as well, so buf should be of
+ * size KTLS_DTLS_HEADER_SIZE + KTLS_DTLS_NONCE_EXPLICIT_SIZE
+ */
+ buf[0] = record_type;
+ buf[1] = TLS_VERSION_MINOR(ctx->crypto_send.version);
+ buf[2] = TLS_VERSION_MAJOR(ctx->crypto_send.version);
+ /* we can use IV for nonce explicit according to spec */
+ buf[3] = pkt_len >> 8;
+ buf[4] = pkt_len & 0xFF;
+ memcpy(buf + TLS_NONCE_OFFSET,
+ ctx->iv + TLS_CIPHER_AES_GCM_128_SALT_SIZE, iv_size);
+}
+
+static inline struct tls_context *tls_get_ctx(const struct sock *sk)
+{
+ struct inet_connection_sock *icsk = inet_csk(sk);
+
+ return icsk->icsk_ulp_data;
+}
+
+static inline struct tls_sw_context *tls_sw_ctx(
+ const struct tls_context *tls_ctx)
+{
+ return (struct tls_sw_context *)tls_ctx->priv_ctx;
+}
+
+static inline struct tls_offload_context *tls_offload_ctx(
+ const struct tls_context *tls_ctx)
+{
+ return (struct tls_offload_context *)tls_ctx->priv_ctx;
+}
+
+int tls_proccess_cmsg(struct sock *sk, struct msghdr *msg,
+ unsigned char *record_type);
+
+#endif /* _TLS_OFFLOAD_H */
diff --git a/include/net/udp.h b/include/net/udp.h
index 3391dbd..1468dbd 100644
--- a/include/net/udp.h
+++ b/include/net/udp.h
@@ -249,13 +249,8 @@
void skb_consume_udp(struct sock *sk, struct sk_buff *skb, int len);
int __udp_enqueue_schedule_skb(struct sock *sk, struct sk_buff *skb);
void udp_skb_destructor(struct sock *sk, struct sk_buff *skb);
-static inline struct sk_buff *
-__skb_recv_udp(struct sock *sk, unsigned int flags, int noblock, int *peeked,
- int *off, int *err)
-{
- return __skb_recv_datagram(sk, flags | (noblock ? MSG_DONTWAIT : 0),
- udp_skb_destructor, peeked, off, err);
-}
+struct sk_buff *__skb_recv_udp(struct sock *sk, unsigned int flags,
+ int noblock, int *peeked, int *off, int *err);
static inline struct sk_buff *skb_recv_udp(struct sock *sk, unsigned int flags,
int noblock, int *err)
{
diff --git a/include/net/udplite.h b/include/net/udplite.h
index ea34052..b7a18f6 100644
--- a/include/net/udplite.h
+++ b/include/net/udplite.h
@@ -26,8 +26,8 @@
/* Designate sk as UDP-Lite socket */
static inline int udplite_sk_init(struct sock *sk)
{
+ udp_init_sock(sk);
udp_sk(sk)->pcflag = UDPLITE_BIT;
- sk->sk_destruct = udp_destruct_sock;
return 0;
}
diff --git a/include/net/vxlan.h b/include/net/vxlan.h
index 49a5920..b816a0a 100644
--- a/include/net/vxlan.h
+++ b/include/net/vxlan.h
@@ -232,7 +232,6 @@
struct net_device *dev;
struct net *net; /* netns for packet i/o */
struct vxlan_rdst default_dst; /* default destination */
- u32 flags; /* VXLAN_F_* in vxlan.h */
struct timer_list age_timer;
spinlock_t hash_lock;
@@ -259,6 +258,7 @@
#define VXLAN_F_REMCSUM_NOPARTIAL 0x1000
#define VXLAN_F_COLLECT_METADATA 0x2000
#define VXLAN_F_GPE 0x4000
+#define VXLAN_F_IPV6_LINKLOCAL 0x8000
/* Flags that are used in the receive path. These flags must match in
* order for a socket to be shareable
@@ -273,6 +273,7 @@
/* Flags that can be set together with VXLAN_F_GPE. */
#define VXLAN_F_ALLOWED_GPE (VXLAN_F_GPE | \
VXLAN_F_IPV6 | \
+ VXLAN_F_IPV6_LINKLOCAL | \
VXLAN_F_UDP_ZERO_CSUM_TX | \
VXLAN_F_UDP_ZERO_CSUM6_TX | \
VXLAN_F_UDP_ZERO_CSUM6_RX | \
diff --git a/include/rxrpc/packet.h b/include/rxrpc/packet.h
index 703a64b..a2dcfb8 100644
--- a/include/rxrpc/packet.h
+++ b/include/rxrpc/packet.h
@@ -58,6 +58,8 @@
#define RXRPC_SLOW_START_OK 0x20 /* [ACK] slow start supported */
uint8_t userStatus; /* app-layer defined status */
+#define RXRPC_USERSTATUS_SERVICE_UPGRADE 0x01 /* AuriStor service upgrade request */
+
uint8_t securityIndex; /* security protocol ID */
union {
__be16 _rsvd; /* reserved */
diff --git a/include/soc/fsl/qe/qe.h b/include/soc/fsl/qe/qe.h
index 0cd4c1147..b3d1aff 100644
--- a/include/soc/fsl/qe/qe.h
+++ b/include/soc/fsl/qe/qe.h
@@ -668,6 +668,10 @@
#define UCC_FAST_GUMR_CTSS 0x00800000
#define UCC_FAST_GUMR_TXSY 0x00020000
#define UCC_FAST_GUMR_RSYN 0x00010000
+#define UCC_FAST_GUMR_SYNL_MASK 0x0000C000
+#define UCC_FAST_GUMR_SYNL_16 0x0000C000
+#define UCC_FAST_GUMR_SYNL_8 0x00008000
+#define UCC_FAST_GUMR_SYNL_AUTO 0x00004000
#define UCC_FAST_GUMR_RTSM 0x00002000
#define UCC_FAST_GUMR_REVD 0x00000400
#define UCC_FAST_GUMR_ENR 0x00000020
@@ -785,6 +789,11 @@
#define UCC_GETH_UPSMR_SMM 0x00000080
#define UCC_GETH_UPSMR_SGMM 0x00000020
+/* UCC Protocol Specific Mode Register (UPSMR), when used for HDLC */
+#define UCC_HDLC_UPSMR_RTE 0x02000000
+#define UCC_HDLC_UPSMR_BUS 0x00200000
+#define UCC_HDLC_UPSMR_CW8 0x00007000
+
/* UCC Transmit On Demand Register (UTODR) */
#define UCC_SLOW_TOD 0x8000
#define UCC_FAST_TOD 0x8000
diff --git a/include/trace/events/rxrpc.h b/include/trace/events/rxrpc.h
index 29a3d53..ebe9679 100644
--- a/include/trace/events/rxrpc.h
+++ b/include/trace/events/rxrpc.h
@@ -233,6 +233,7 @@
EM(RXRPC_CONN_CLIENT_INACTIVE, "Inac") \
EM(RXRPC_CONN_CLIENT_WAITING, "Wait") \
EM(RXRPC_CONN_CLIENT_ACTIVE, "Actv") \
+ EM(RXRPC_CONN_CLIENT_UPGRADE, "Upgd") \
EM(RXRPC_CONN_CLIENT_CULLED, "Cull") \
E_(RXRPC_CONN_CLIENT_IDLE, "Idle") \
diff --git a/include/uapi/asm-generic/socket.h b/include/uapi/asm-generic/socket.h
index 2b48856..9861be8 100644
--- a/include/uapi/asm-generic/socket.h
+++ b/include/uapi/asm-generic/socket.h
@@ -100,4 +100,8 @@
#define SO_COOKIE 57
+#define SCM_TIMESTAMPING_PKTINFO 58
+
+#define SO_PEERGROUPS 59
+
#endif /* __ASM_GENERIC_SOCKET_H */
diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h
index 94dfa9d..f94b48b 100644
--- a/include/uapi/linux/bpf.h
+++ b/include/uapi/linux/bpf.h
@@ -82,6 +82,11 @@
BPF_PROG_ATTACH,
BPF_PROG_DETACH,
BPF_PROG_TEST_RUN,
+ BPF_PROG_GET_NEXT_ID,
+ BPF_MAP_GET_NEXT_ID,
+ BPF_PROG_GET_FD_BY_ID,
+ BPF_MAP_GET_FD_BY_ID,
+ BPF_OBJ_GET_INFO_BY_FD,
};
enum bpf_map_type {
@@ -209,6 +214,21 @@
__u32 repeat;
__u32 duration;
} test;
+
+ struct { /* anonymous struct used by BPF_*_GET_*_ID */
+ union {
+ __u32 start_id;
+ __u32 prog_id;
+ __u32 map_id;
+ };
+ __u32 next_id;
+ };
+
+ struct { /* anonymous struct used by BPF_OBJ_GET_INFO_BY_FD */
+ __u32 bpf_fd;
+ __u32 info_len;
+ __aligned_u64 info;
+ } info;
} __attribute__((aligned(8)));
/* BPF helper function descriptions:
@@ -313,8 +333,11 @@
* @flags: room for future extensions
* Return: 0 on success or negative error
*
- * u64 bpf_perf_event_read(&map, index)
- * Return: Number events read or error code
+ * u64 bpf_perf_event_read(map, flags)
+ * read perf event counter value
+ * @map: pointer to perf_event_array map
+ * @flags: index of event in the map or bitmask flags
+ * Return: value of perf event counter read or error code
*
* int bpf_redirect(ifindex, flags)
* redirect to another netdev
@@ -328,11 +351,11 @@
* @skb: pointer to skb
* Return: realm if != 0
*
- * int bpf_perf_event_output(ctx, map, index, data, size)
+ * int bpf_perf_event_output(ctx, map, flags, data, size)
* output perf raw sample
* @ctx: struct pt_regs*
* @map: pointer to perf_event_array map
- * @index: index of event in the map
+ * @flags: index of event in the map or bitmask flags
* @data: data on stack to be output as raw data
* @size: size of data
* Return: 0 on success or negative error
@@ -490,6 +513,11 @@
* Get the owner uid of the socket stored inside sk_buff.
* @skb: pointer to skb
* Return: uid of the socket owner on success or overflowuid if failed.
+ *
+ * u32 bpf_set_hash(skb, hash)
+ * Set full skb->hash.
+ * @skb: pointer to skb
+ * @hash: hash to set
*/
#define __BPF_FUNC_MAPPER(FN) \
FN(unspec), \
@@ -539,7 +567,8 @@
FN(xdp_adjust_head), \
FN(probe_read_str), \
FN(get_socket_cookie), \
- FN(get_socket_uid),
+ FN(get_socket_uid), \
+ FN(set_hash),
/* integer value in 'imm' field of BPF_CALL instruction selects which helper
* function eBPF program intends to call
@@ -670,4 +699,25 @@
__u32 data_end;
};
+#define BPF_TAG_SIZE 8
+
+struct bpf_prog_info {
+ __u32 type;
+ __u32 id;
+ __u8 tag[BPF_TAG_SIZE];
+ __u32 jited_prog_len;
+ __u32 xlated_prog_len;
+ __aligned_u64 jited_prog_insns;
+ __aligned_u64 xlated_prog_insns;
+} __attribute__((aligned(8)));
+
+struct bpf_map_info {
+ __u32 type;
+ __u32 id;
+ __u32 key_size;
+ __u32 value_size;
+ __u32 max_entries;
+ __u32 map_flags;
+} __attribute__((aligned(8)));
+
#endif /* _UAPI__LINUX_BPF_H__ */
diff --git a/include/uapi/linux/if_link.h b/include/uapi/linux/if_link.h
index 15ac203..dd88375 100644
--- a/include/uapi/linux/if_link.h
+++ b/include/uapi/linux/if_link.h
@@ -157,6 +157,7 @@
IFLA_GSO_MAX_SIZE,
IFLA_PAD,
IFLA_XDP,
+ IFLA_EVENT,
__IFLA_MAX
};
@@ -906,9 +907,20 @@
IFLA_XDP_FD,
IFLA_XDP_ATTACHED,
IFLA_XDP_FLAGS,
+ IFLA_XDP_PROG_ID,
__IFLA_XDP_MAX,
};
#define IFLA_XDP_MAX (__IFLA_XDP_MAX - 1)
+enum {
+ IFLA_EVENT_NONE,
+ IFLA_EVENT_REBOOT, /* internal reset / reboot */
+ IFLA_EVENT_FEATURES, /* change in offload features */
+ IFLA_EVENT_BONDING_FAILOVER, /* change in active slave */
+ IFLA_EVENT_NOTIFY_PEERS, /* re-sent grat. arp/ndisc */
+ IFLA_EVENT_IGMP_RESEND, /* re-sent IGMP JOIN */
+ IFLA_EVENT_BONDING_OPTIONS, /* change in bonding options */
+};
+
#endif /* _UAPI_LINUX_IF_LINK_H */
diff --git a/include/uapi/linux/mroute.h b/include/uapi/linux/mroute.h
index 1fe4c1e..e8e5041 100644
--- a/include/uapi/linux/mroute.h
+++ b/include/uapi/linux/mroute.h
@@ -110,6 +110,60 @@
struct in_addr im_src,im_dst;
};
+/* ipmr netlink table attributes */
+enum {
+ IPMRA_TABLE_UNSPEC,
+ IPMRA_TABLE_ID,
+ IPMRA_TABLE_CACHE_RES_QUEUE_LEN,
+ IPMRA_TABLE_MROUTE_REG_VIF_NUM,
+ IPMRA_TABLE_MROUTE_DO_ASSERT,
+ IPMRA_TABLE_MROUTE_DO_PIM,
+ IPMRA_TABLE_VIFS,
+ __IPMRA_TABLE_MAX
+};
+#define IPMRA_TABLE_MAX (__IPMRA_TABLE_MAX - 1)
+
+/* ipmr netlink vif attribute format
+ * [ IPMRA_TABLE_VIFS ] - nested attribute
+ * [ IPMRA_VIF ] - nested attribute
+ * [ IPMRA_VIFA_xxx ]
+ */
+enum {
+ IPMRA_VIF_UNSPEC,
+ IPMRA_VIF,
+ __IPMRA_VIF_MAX
+};
+#define IPMRA_VIF_MAX (__IPMRA_VIF_MAX - 1)
+
+/* vif-specific attributes */
+enum {
+ IPMRA_VIFA_UNSPEC,
+ IPMRA_VIFA_IFINDEX,
+ IPMRA_VIFA_VIF_ID,
+ IPMRA_VIFA_FLAGS,
+ IPMRA_VIFA_BYTES_IN,
+ IPMRA_VIFA_BYTES_OUT,
+ IPMRA_VIFA_PACKETS_IN,
+ IPMRA_VIFA_PACKETS_OUT,
+ IPMRA_VIFA_LOCAL_ADDR,
+ IPMRA_VIFA_REMOTE_ADDR,
+ IPMRA_VIFA_PAD,
+ __IPMRA_VIFA_MAX
+};
+#define IPMRA_VIFA_MAX (__IPMRA_VIFA_MAX - 1)
+
+/* ipmr netlink cache report attributes */
+enum {
+ IPMRA_CREPORT_UNSPEC,
+ IPMRA_CREPORT_MSGTYPE,
+ IPMRA_CREPORT_VIF_ID,
+ IPMRA_CREPORT_SRC_ADDR,
+ IPMRA_CREPORT_DST_ADDR,
+ IPMRA_CREPORT_PKT,
+ __IPMRA_CREPORT_MAX
+};
+#define IPMRA_CREPORT_MAX (__IPMRA_CREPORT_MAX - 1)
+
/* That's all usermode folks */
#define MFC_ASSERT_THRESH (3*HZ) /* Maximal freq. of asserts */
diff --git a/include/uapi/linux/mroute6.h b/include/uapi/linux/mroute6.h
index ed57211..e474681 100644
--- a/include/uapi/linux/mroute6.h
+++ b/include/uapi/linux/mroute6.h
@@ -133,4 +133,16 @@
struct in6_addr im6_src, im6_dst;
};
+/* ip6mr netlink cache report attributes */
+enum {
+ IP6MRA_CREPORT_UNSPEC,
+ IP6MRA_CREPORT_MSGTYPE,
+ IP6MRA_CREPORT_MIF_ID,
+ IP6MRA_CREPORT_SRC_ADDR,
+ IP6MRA_CREPORT_DST_ADDR,
+ IP6MRA_CREPORT_PKT,
+ __IP6MRA_CREPORT_MAX
+};
+#define IP6MRA_CREPORT_MAX (__IP6MRA_CREPORT_MAX - 1)
+
#endif /* _UAPI__LINUX_MROUTE6_H */
diff --git a/include/uapi/linux/neighbour.h b/include/uapi/linux/neighbour.h
index f3d16db..3199d28 100644
--- a/include/uapi/linux/neighbour.h
+++ b/include/uapi/linux/neighbour.h
@@ -41,6 +41,7 @@
#define NTF_MASTER 0x04
#define NTF_PROXY 0x08 /* == ATF_PUBL */
#define NTF_EXT_LEARNED 0x10
+#define NTF_OFFLOADED 0x20
#define NTF_ROUTER 0x80
/*
diff --git a/include/uapi/linux/net_tstamp.h b/include/uapi/linux/net_tstamp.h
index 464dcca..3d421d9 100644
--- a/include/uapi/linux/net_tstamp.h
+++ b/include/uapi/linux/net_tstamp.h
@@ -9,6 +9,7 @@
#ifndef _NET_TIMESTAMPING_H
#define _NET_TIMESTAMPING_H
+#include <linux/types.h>
#include <linux/socket.h> /* for SO_TIMESTAMPING */
/* SO_TIMESTAMPING gets an integer bit field comprised of these values */
@@ -26,8 +27,10 @@
SOF_TIMESTAMPING_OPT_CMSG = (1<<10),
SOF_TIMESTAMPING_OPT_TSONLY = (1<<11),
SOF_TIMESTAMPING_OPT_STATS = (1<<12),
+ SOF_TIMESTAMPING_OPT_PKTINFO = (1<<13),
+ SOF_TIMESTAMPING_OPT_TX_SWHW = (1<<14),
- SOF_TIMESTAMPING_LAST = SOF_TIMESTAMPING_OPT_STATS,
+ SOF_TIMESTAMPING_LAST = SOF_TIMESTAMPING_OPT_TX_SWHW,
SOF_TIMESTAMPING_MASK = (SOF_TIMESTAMPING_LAST - 1) |
SOF_TIMESTAMPING_LAST
};
@@ -125,6 +128,16 @@
HWTSTAMP_FILTER_PTP_V2_SYNC,
/* PTP v2/802.AS1, any layer, Delay_req packet */
HWTSTAMP_FILTER_PTP_V2_DELAY_REQ,
+
+ /* NTP, UDP, all versions and packet modes */
+ HWTSTAMP_FILTER_NTP_ALL,
+};
+
+/* SCM_TIMESTAMPING_PKTINFO control message */
+struct scm_ts_pktinfo {
+ __u32 if_index;
+ __u32 pkt_length;
+ __u32 reserved[2];
};
#endif /* _NET_TIMESTAMPING_H */
diff --git a/include/uapi/linux/nl80211.h b/include/uapi/linux/nl80211.h
index b8c44b9..828aa47 100644
--- a/include/uapi/linux/nl80211.h
+++ b/include/uapi/linux/nl80211.h
@@ -173,6 +173,29 @@
*/
/**
+ * DOC: WPA/WPA2 EAPOL handshake offload
+ *
+ * By setting @NL80211_EXT_FEATURE_4WAY_HANDSHAKE_STA_PSK flag drivers
+ * can indicate they support offloading EAPOL handshakes for WPA/WPA2
+ * preshared key authentication. In %NL80211_CMD_CONNECT the preshared
+ * key should be specified using %NL80211_ATTR_PMK. Drivers supporting
+ * this offload may reject the %NL80211_CMD_CONNECT when no preshared
+ * key material is provided, for example when that driver does not
+ * support setting the temporal keys through %CMD_NEW_KEY.
+ *
+ * Similarly @NL80211_EXT_FEATURE_4WAY_HANDSHAKE_STA_1X flag can be
+ * set by drivers indicating offload support of the PTK/GTK EAPOL
+ * handshakes during 802.1X authentication. In order to use the offload
+ * the %NL80211_CMD_CONNECT should have %NL80211_ATTR_WANT_1X_4WAY_HS
+ * attribute flag. Drivers supporting this offload may reject the
+ * %NL80211_CMD_CONNECT when the attribute flag is not present.
+ *
+ * For 802.1X the PMK or PMK-R0 are set by providing %NL80211_ATTR_PMK
+ * using %NL80211_CMD_SET_PMK. For offloaded FT support also
+ * %NL80211_ATTR_PMKR0_NAME must be provided.
+ */
+
+/**
* DOC: FILS shared key authentication offload
*
* FILS shared key authentication offload can be advertized by drivers by
@@ -546,8 +569,13 @@
* authentication/association or not receiving a response from the AP.
* Non-zero %NL80211_ATTR_STATUS_CODE value is indicated in that case as
* well to remain backwards compatible.
- * @NL80211_CMD_ROAM: request that the card roam (currently not implemented),
- * sent as an event when the card/driver roamed by itself.
+ * @NL80211_CMD_ROAM: notifcation indicating the card/driver roamed by itself.
+ * When the driver roamed in a network that requires 802.1X authentication,
+ * %NL80211_ATTR_PORT_AUTHORIZED should be set if the 802.1X authentication
+ * was done by the driver or if roaming was done using Fast Transition
+ * protocol (in which case 802.1X authentication is not needed). If
+ * %NL80211_ATTR_PORT_AUTHORIZED is not set, user space is responsible for
+ * the 802.1X authentication.
* @NL80211_CMD_DISCONNECT: drop a given connection; also used to notify
* userspace that a connection was dropped by the AP or due to other
* reasons, for this the %NL80211_ATTR_DISCONNECTED_BY_AP and
@@ -947,6 +975,14 @@
* does not result in a change for the current association. Currently,
* only the %NL80211_ATTR_IE data is used and updated with this command.
*
+ * @NL80211_CMD_SET_PMK: For offloaded 4-Way handshake, set the PMK or PMK-R0
+ * for the given authenticator address (specified with &NL80211_ATTR_MAC).
+ * When &NL80211_ATTR_PMKR0_NAME is set, &NL80211_ATTR_PMK specifies the
+ * PMK-R0, otherwise it specifies the PMK.
+ * @NL80211_CMD_DEL_PMK: For offloaded 4-Way handshake, delete the previously
+ * configured PMK for the authenticator address identified by
+ * &NL80211_ATTR_MAC.
+ *
* @NL80211_CMD_MAX: highest used command number
* @__NL80211_CMD_AFTER_LAST: internal use
*/
@@ -1146,6 +1182,9 @@
NL80211_CMD_UPDATE_CONNECT_PARAMS,
+ NL80211_CMD_SET_PMK,
+ NL80211_CMD_DEL_PMK,
+
/* add new commands above here */
/* used to define NL80211_CMD_MAX below */
@@ -2080,14 +2119,27 @@
* identifying the scope of PMKSAs. This is used with
* @NL80211_CMD_SET_PMKSA and @NL80211_CMD_DEL_PMKSA.
*
- * @NL80211_ATTR_PMK: PMK for the PMKSA identified by %NL80211_ATTR_PMKID.
- * This is used with @NL80211_CMD_SET_PMKSA.
+ * @NL80211_ATTR_PMK: attribute for passing PMK key material. Used with
+ * %NL80211_CMD_SET_PMKSA for the PMKSA identified by %NL80211_ATTR_PMKID.
+ * For %NL80211_CMD_CONNECT it is used to provide PSK for offloading 4-way
+ * handshake for WPA/WPA2-PSK networks. For 802.1X authentication it is
+ * used with %NL80211_CMD_SET_PMK. For offloaded FT support this attribute
+ * specifies the PMK-R0 if NL80211_ATTR_PMKR0_NAME is included as well.
*
* @NL80211_ATTR_SCHED_SCAN_MULTI: flag attribute which user-space shall use to
* indicate that it supports multiple active scheduled scan requests.
* @NL80211_ATTR_SCHED_SCAN_MAX_REQS: indicates maximum number of scheduled
* scan request that may be active for the device (u32).
*
+ * @NL80211_ATTR_WANT_1X_4WAY_HS: flag attribute which user-space can include
+ * in %NL80211_CMD_CONNECT to indicate that for 802.1X authentication it
+ * wants to use the supported offload of the 4-way handshake.
+ * @NL80211_ATTR_PMKR0_NAME: PMK-R0 Name for offloaded FT.
+ * @NL80211_ATTR_PORT_AUTHORIZED: flag attribute used in %NL80211_CMD_ROAMED
+ * notification indicating that that 802.1X authentication was done by
+ * the driver or is not needed (because roaming used the Fast Transition
+ * protocol).
+ *
* @NUM_NL80211_ATTR: total number of nl80211_attrs available
* @NL80211_ATTR_MAX: highest attribute number currently defined
* @__NL80211_ATTR_AFTER_LAST: internal use
@@ -2510,6 +2562,10 @@
NL80211_ATTR_SCHED_SCAN_MULTI,
NL80211_ATTR_SCHED_SCAN_MAX_REQS,
+ NL80211_ATTR_WANT_1X_4WAY_HS,
+ NL80211_ATTR_PMKR0_NAME,
+ NL80211_ATTR_PORT_AUTHORIZED,
+
/* add attributes here, update the policy in nl80211.c */
__NL80211_ATTR_AFTER_LAST,
@@ -4852,6 +4908,13 @@
* RSSI threshold values to monitor rather than exactly one threshold.
* @NL80211_EXT_FEATURE_FILS_SK_OFFLOAD: Driver SME supports FILS shared key
* authentication with %NL80211_CMD_CONNECT.
+ * @NL80211_EXT_FEATURE_4WAY_HANDSHAKE_STA_PSK: Device wants to do 4-way
+ * handshake with PSK in station mode (PSK is passed as part of the connect
+ * and associate commands), doing it in the host might not be supported.
+ * @NL80211_EXT_FEATURE_4WAY_HANDSHAKE_STA_1X: Device wants to do doing 4-way
+ * handshake with 802.1X in station mode (will pass EAP frames to the host
+ * and accept the set_pmk/del_pmk commands), doing it in the host might not
+ * be supported.
*
* @NUM_NL80211_EXT_FEATURES: number of extended features.
* @MAX_NL80211_EXT_FEATURES: highest extended feature index.
@@ -4872,6 +4935,8 @@
NL80211_EXT_FEATURE_SCHED_SCAN_RELATIVE_RSSI,
NL80211_EXT_FEATURE_CQM_RSSI_LIST,
NL80211_EXT_FEATURE_FILS_SK_OFFLOAD,
+ NL80211_EXT_FEATURE_4WAY_HANDSHAKE_STA_PSK,
+ NL80211_EXT_FEATURE_4WAY_HANDSHAKE_STA_1X,
/* add new features before the definition below */
NUM_NL80211_EXT_FEATURES,
diff --git a/include/uapi/linux/pkt_cls.h b/include/uapi/linux/pkt_cls.h
index d613be3..d5e2bf6 100644
--- a/include/uapi/linux/pkt_cls.h
+++ b/include/uapi/linux/pkt_cls.h
@@ -37,6 +37,13 @@
#define TC_ACT_QUEUED 5
#define TC_ACT_REPEAT 6
#define TC_ACT_REDIRECT 7
+#define TC_ACT_TRAP 8 /* For hw path, this means "trap to cpu"
+ * and don't further process the frame
+ * in hardware. For sw path, this is
+ * equivalent of TC_ACT_STOLEN - drop
+ * the skb and act like everything
+ * is alright.
+ */
/* There is a special kind of actions called "extended actions",
* which need a value parameter. These have a local opcode located in
@@ -51,6 +58,7 @@
(((combined) & (~TC_ACT_EXT_VAL_MASK)) == opcode)
#define TC_ACT_JUMP __TC_ACT_EXT(1)
+#define TC_ACT_GOTO_CHAIN __TC_ACT_EXT(2)
/* Action type identifiers*/
enum {
@@ -361,6 +369,7 @@
TCA_BPF_FLAGS,
TCA_BPF_FLAGS_GEN,
TCA_BPF_TAG,
+ TCA_BPF_ID,
__TCA_BPF_MAX,
};
@@ -450,6 +459,14 @@
TCA_FLOWER_KEY_MPLS_TC, /* u8 - 3 bits */
TCA_FLOWER_KEY_MPLS_LABEL, /* be32 - 20 bits */
+ TCA_FLOWER_KEY_TCP_FLAGS, /* be16 */
+ TCA_FLOWER_KEY_TCP_FLAGS_MASK, /* be16 */
+
+ TCA_FLOWER_KEY_IP_TOS, /* u8 */
+ TCA_FLOWER_KEY_IP_TOS_MASK, /* u8 */
+ TCA_FLOWER_KEY_IP_TTL, /* u8 */
+ TCA_FLOWER_KEY_IP_TTL_MASK, /* u8 */
+
__TCA_FLOWER_MAX,
};
diff --git a/include/uapi/linux/rtnetlink.h b/include/uapi/linux/rtnetlink.h
index cce0613..d148505 100644
--- a/include/uapi/linux/rtnetlink.h
+++ b/include/uapi/linux/rtnetlink.h
@@ -146,6 +146,9 @@
RTM_GETSTATS = 94,
#define RTM_GETSTATS RTM_GETSTATS
+ RTM_NEWCACHEREPORT = 96,
+#define RTM_NEWCACHEREPORT RTM_NEWCACHEREPORT
+
__RTM_MAX,
#define RTM_MAX (((__RTM_MAX + 3) & ~3) - 1)
};
@@ -278,6 +281,7 @@
#define RTM_F_EQUALIZE 0x400 /* Multipath equalizer: NI */
#define RTM_F_PREFIX 0x800 /* Prefix addresses */
#define RTM_F_LOOKUP_TABLE 0x1000 /* set rtm_table to FIB lookup result */
+#define RTM_F_FIB_MATCH 0x2000 /* return full fib lookup match */
/* Reserved table identifiers */
@@ -549,6 +553,7 @@
TCA_STAB,
TCA_PAD,
TCA_DUMP_INVISIBLE,
+ TCA_CHAIN,
__TCA_MAX
};
@@ -664,6 +669,10 @@
#define RTNLGRP_NSID RTNLGRP_NSID
RTNLGRP_MPLS_NETCONF,
#define RTNLGRP_MPLS_NETCONF RTNLGRP_MPLS_NETCONF
+ RTNLGRP_IPV4_MROUTE_R,
+#define RTNLGRP_IPV4_MROUTE_R RTNLGRP_IPV4_MROUTE_R
+ RTNLGRP_IPV6_MROUTE_R,
+#define RTNLGRP_IPV6_MROUTE_R RTNLGRP_IPV6_MROUTE_R
__RTNLGRP_MAX
};
#define RTNLGRP_MAX (__RTNLGRP_MAX - 1)
diff --git a/include/uapi/linux/snmp.h b/include/uapi/linux/snmp.h
index 95cffcb..d856932 100644
--- a/include/uapi/linux/snmp.h
+++ b/include/uapi/linux/snmp.h
@@ -228,6 +228,7 @@
LINUX_MIB_TCPABORTONLINGER, /* TCPAbortOnLinger */
LINUX_MIB_TCPABORTFAILED, /* TCPAbortFailed */
LINUX_MIB_TCPMEMORYPRESSURES, /* TCPMemoryPressures */
+ LINUX_MIB_TCPMEMORYPRESSURESCHRONO, /* TCPMemoryPressuresChrono */
LINUX_MIB_TCPSACKDISCARD, /* TCPSACKDiscard */
LINUX_MIB_TCPDSACKIGNOREDOLD, /* TCPSACKIgnoredOld */
LINUX_MIB_TCPDSACKIGNOREDNOUNDO, /* TCPSACKIgnoredNoUndo */
diff --git a/include/uapi/linux/tc_act/tc_bpf.h b/include/uapi/linux/tc_act/tc_bpf.h
index 975b50d..8dc2ac0 100644
--- a/include/uapi/linux/tc_act/tc_bpf.h
+++ b/include/uapi/linux/tc_act/tc_bpf.h
@@ -28,6 +28,7 @@
TCA_ACT_BPF_NAME,
TCA_ACT_BPF_PAD,
TCA_ACT_BPF_TAG,
+ TCA_ACT_BPF_ID,
__TCA_ACT_BPF_MAX,
};
#define TCA_ACT_BPF_MAX (__TCA_ACT_BPF_MAX - 1)
diff --git a/include/uapi/linux/tc_act/tc_tunnel_key.h b/include/uapi/linux/tc_act/tc_tunnel_key.h
index 84ea55e..afcd4be 100644
--- a/include/uapi/linux/tc_act/tc_tunnel_key.h
+++ b/include/uapi/linux/tc_act/tc_tunnel_key.h
@@ -34,6 +34,7 @@
TCA_TUNNEL_KEY_ENC_KEY_ID, /* be64 */
TCA_TUNNEL_KEY_PAD,
TCA_TUNNEL_KEY_ENC_DST_PORT, /* be16 */
+ TCA_TUNNEL_KEY_NO_CSUM, /* u8 */
__TCA_TUNNEL_KEY_MAX,
};
diff --git a/include/uapi/linux/tcp.h b/include/uapi/linux/tcp.h
index 38a2b07..a5507c9 100644
--- a/include/uapi/linux/tcp.h
+++ b/include/uapi/linux/tcp.h
@@ -117,6 +117,8 @@
#define TCP_SAVED_SYN 28 /* Get SYN headers recorded for connection */
#define TCP_REPAIR_WINDOW 29 /* Get/set window parameters */
#define TCP_FASTOPEN_CONNECT 30 /* Attempt FastOpen with connect */
+#define TCP_ULP 31 /* Attach a ULP to a TCP connection */
+#define TCP_MD5SIG_EXT 32 /* TCP MD5 Signature with extensions */
struct tcp_repair_opt {
__u32 opt_code;
@@ -234,11 +236,15 @@
/* for TCP_MD5SIG socket option */
#define TCP_MD5SIG_MAXKEYLEN 80
+/* tcp_md5sig extension flags for TCP_MD5SIG_EXT */
+#define TCP_MD5SIG_FLAG_PREFIX 1 /* address prefix length */
+
struct tcp_md5sig {
struct __kernel_sockaddr_storage tcpm_addr; /* address associated */
- __u16 __tcpm_pad1; /* zero */
+ __u8 tcpm_flags; /* extension flags */
+ __u8 tcpm_prefixlen; /* address prefix */
__u16 tcpm_keylen; /* key length */
- __u32 __tcpm_pad2; /* zero */
+ __u32 __tcpm_pad; /* zero */
__u8 tcpm_key[TCP_MD5SIG_MAXKEYLEN]; /* key (binary) */
};
diff --git a/include/uapi/linux/tls.h b/include/uapi/linux/tls.h
new file mode 100644
index 0000000..cc1d21d
--- /dev/null
+++ b/include/uapi/linux/tls.h
@@ -0,0 +1,79 @@
+/*
+ * Copyright (c) 2016-2017, Mellanox Technologies. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef _UAPI_LINUX_TLS_H
+#define _UAPI_LINUX_TLS_H
+
+#include <linux/types.h>
+#include <asm/byteorder.h>
+#include <linux/socket.h>
+#include <linux/tcp.h>
+#include <net/tcp.h>
+
+/* TLS socket options */
+#define TLS_TX 1 /* Set transmit parameters */
+
+/* Supported versions */
+#define TLS_VERSION_MINOR(ver) ((ver) & 0xFF)
+#define TLS_VERSION_MAJOR(ver) (((ver) >> 8) & 0xFF)
+
+#define TLS_VERSION_NUMBER(id) ((((id##_VERSION_MAJOR) & 0xFF) << 8) | \
+ ((id##_VERSION_MINOR) & 0xFF))
+
+#define TLS_1_2_VERSION_MAJOR 0x3
+#define TLS_1_2_VERSION_MINOR 0x3
+#define TLS_1_2_VERSION TLS_VERSION_NUMBER(TLS_1_2)
+
+/* Supported ciphers */
+#define TLS_CIPHER_AES_GCM_128 51
+#define TLS_CIPHER_AES_GCM_128_IV_SIZE 8
+#define TLS_CIPHER_AES_GCM_128_KEY_SIZE 16
+#define TLS_CIPHER_AES_GCM_128_SALT_SIZE 4
+#define TLS_CIPHER_AES_GCM_128_TAG_SIZE 16
+#define TLS_CIPHER_AES_GCM_128_REC_SEQ_SIZE 8
+
+#define TLS_SET_RECORD_TYPE 1
+
+struct tls_crypto_info {
+ __u16 version;
+ __u16 cipher_type;
+};
+
+struct tls12_crypto_info_aes_gcm_128 {
+ struct tls_crypto_info info;
+ unsigned char iv[TLS_CIPHER_AES_GCM_128_IV_SIZE];
+ unsigned char key[TLS_CIPHER_AES_GCM_128_KEY_SIZE];
+ unsigned char salt[TLS_CIPHER_AES_GCM_128_SALT_SIZE];
+ unsigned char rec_seq[TLS_CIPHER_AES_GCM_128_REC_SEQ_SIZE];
+};
+
+#endif /* _UAPI_LINUX_TLS_H */
diff --git a/kernel/bpf/arraymap.c b/kernel/bpf/arraymap.c
index 172dc8e..ecb4354 100644
--- a/kernel/bpf/arraymap.c
+++ b/kernel/bpf/arraymap.c
@@ -452,38 +452,24 @@
static void *perf_event_fd_array_get_ptr(struct bpf_map *map,
struct file *map_file, int fd)
{
- const struct perf_event_attr *attr;
struct bpf_event_entry *ee;
struct perf_event *event;
struct file *perf_file;
+ u64 value;
perf_file = perf_event_get(fd);
if (IS_ERR(perf_file))
return perf_file;
+ ee = ERR_PTR(-EOPNOTSUPP);
event = perf_file->private_data;
- ee = ERR_PTR(-EINVAL);
-
- attr = perf_event_attrs(event);
- if (IS_ERR(attr) || attr->inherit)
+ if (perf_event_read_local(event, &value) == -EOPNOTSUPP)
goto err_out;
- switch (attr->type) {
- case PERF_TYPE_SOFTWARE:
- if (attr->config != PERF_COUNT_SW_BPF_OUTPUT)
- goto err_out;
- /* fall-through */
- case PERF_TYPE_RAW:
- case PERF_TYPE_HARDWARE:
- ee = bpf_event_entry_gen(perf_file, map_file);
- if (ee)
- return ee;
- ee = ERR_PTR(-ENOMEM);
- /* fall-through */
- default:
- break;
- }
-
+ ee = bpf_event_entry_gen(perf_file, map_file);
+ if (ee)
+ return ee;
+ ee = ERR_PTR(-ENOMEM);
err_out:
fput(perf_file);
return ee;
diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c
index dedf367..774069c 100644
--- a/kernel/bpf/core.c
+++ b/kernel/bpf/core.c
@@ -763,10 +763,10 @@
*
* Decode and execute eBPF instructions.
*/
-static unsigned int __bpf_prog_run(void *ctx, const struct bpf_insn *insn)
+static unsigned int ___bpf_prog_run(u64 *regs, const struct bpf_insn *insn,
+ u64 *stack)
{
- u64 stack[MAX_BPF_STACK / sizeof(u64)];
- u64 regs[MAX_BPF_REG], tmp;
+ u64 tmp;
static const void *jumptable[256] = {
[0 ... 255] = &&default_label,
/* Now overwrite non-defaults ... */
@@ -824,7 +824,7 @@
[BPF_ALU64 | BPF_NEG] = &&ALU64_NEG,
/* Call instruction */
[BPF_JMP | BPF_CALL] = &&JMP_CALL,
- [BPF_JMP | BPF_CALL | BPF_X] = &&JMP_TAIL_CALL,
+ [BPF_JMP | BPF_TAIL_CALL] = &&JMP_TAIL_CALL,
/* Jumps */
[BPF_JMP | BPF_JA] = &&JMP_JA,
[BPF_JMP | BPF_JEQ | BPF_X] = &&JMP_JEQ_X,
@@ -874,9 +874,6 @@
#define CONT ({ insn++; goto select_insn; })
#define CONT_JMP ({ insn++; goto select_insn; })
- FP = (u64) (unsigned long) &stack[ARRAY_SIZE(stack)];
- ARG1 = (u64) (unsigned long) ctx;
-
select_insn:
goto *jumptable[insn->code];
@@ -1219,7 +1216,39 @@
WARN_RATELIMIT(1, "unknown opcode %02x\n", insn->code);
return 0;
}
-STACK_FRAME_NON_STANDARD(__bpf_prog_run); /* jump table */
+STACK_FRAME_NON_STANDARD(___bpf_prog_run); /* jump table */
+
+#define PROG_NAME(stack_size) __bpf_prog_run##stack_size
+#define DEFINE_BPF_PROG_RUN(stack_size) \
+static unsigned int PROG_NAME(stack_size)(const void *ctx, const struct bpf_insn *insn) \
+{ \
+ u64 stack[stack_size / sizeof(u64)]; \
+ u64 regs[MAX_BPF_REG]; \
+\
+ FP = (u64) (unsigned long) &stack[ARRAY_SIZE(stack)]; \
+ ARG1 = (u64) (unsigned long) ctx; \
+ return ___bpf_prog_run(regs, insn, stack); \
+}
+
+#define EVAL1(FN, X) FN(X)
+#define EVAL2(FN, X, Y...) FN(X) EVAL1(FN, Y)
+#define EVAL3(FN, X, Y...) FN(X) EVAL2(FN, Y)
+#define EVAL4(FN, X, Y...) FN(X) EVAL3(FN, Y)
+#define EVAL5(FN, X, Y...) FN(X) EVAL4(FN, Y)
+#define EVAL6(FN, X, Y...) FN(X) EVAL5(FN, Y)
+
+EVAL6(DEFINE_BPF_PROG_RUN, 32, 64, 96, 128, 160, 192);
+EVAL6(DEFINE_BPF_PROG_RUN, 224, 256, 288, 320, 352, 384);
+EVAL4(DEFINE_BPF_PROG_RUN, 416, 448, 480, 512);
+
+#define PROG_NAME_LIST(stack_size) PROG_NAME(stack_size),
+
+static unsigned int (*interpreters[])(const void *ctx,
+ const struct bpf_insn *insn) = {
+EVAL6(PROG_NAME_LIST, 32, 64, 96, 128, 160, 192)
+EVAL6(PROG_NAME_LIST, 224, 256, 288, 320, 352, 384)
+EVAL4(PROG_NAME_LIST, 416, 448, 480, 512)
+};
bool bpf_prog_array_compatible(struct bpf_array *array,
const struct bpf_prog *fp)
@@ -1268,7 +1297,7 @@
*/
struct bpf_prog *bpf_prog_select_runtime(struct bpf_prog *fp, int *err)
{
- fp->bpf_func = (void *) __bpf_prog_run;
+ fp->bpf_func = interpreters[round_down(fp->aux->stack_depth, 32) / 32];
/* eBPF JITs can rewrite the program in case constant
* blinding is active. However, in case of error during
diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c
index 265a0d8..8942c82 100644
--- a/kernel/bpf/syscall.c
+++ b/kernel/bpf/syscall.c
@@ -22,8 +22,13 @@
#include <linux/filter.h>
#include <linux/version.h>
#include <linux/kernel.h>
+#include <linux/idr.h>
DEFINE_PER_CPU(int, bpf_prog_active);
+static DEFINE_IDR(prog_idr);
+static DEFINE_SPINLOCK(prog_idr_lock);
+static DEFINE_IDR(map_idr);
+static DEFINE_SPINLOCK(map_idr_lock);
int sysctl_unprivileged_bpf_disabled __read_mostly;
@@ -114,6 +119,37 @@
free_uid(user);
}
+static int bpf_map_alloc_id(struct bpf_map *map)
+{
+ int id;
+
+ spin_lock_bh(&map_idr_lock);
+ id = idr_alloc_cyclic(&map_idr, map, 1, INT_MAX, GFP_ATOMIC);
+ if (id > 0)
+ map->id = id;
+ spin_unlock_bh(&map_idr_lock);
+
+ if (WARN_ON_ONCE(!id))
+ return -ENOSPC;
+
+ return id > 0 ? 0 : id;
+}
+
+static void bpf_map_free_id(struct bpf_map *map, bool do_idr_lock)
+{
+ if (do_idr_lock)
+ spin_lock_bh(&map_idr_lock);
+ else
+ __acquire(&map_idr_lock);
+
+ idr_remove(&map_idr, map->id);
+
+ if (do_idr_lock)
+ spin_unlock_bh(&map_idr_lock);
+ else
+ __release(&map_idr_lock);
+}
+
/* called from workqueue */
static void bpf_map_free_deferred(struct work_struct *work)
{
@@ -135,14 +171,21 @@
/* decrement map refcnt and schedule it for freeing via workqueue
* (unrelying map implementation ops->map_free() might sleep)
*/
-void bpf_map_put(struct bpf_map *map)
+static void __bpf_map_put(struct bpf_map *map, bool do_idr_lock)
{
if (atomic_dec_and_test(&map->refcnt)) {
+ /* bpf_map_free_id() must be called first */
+ bpf_map_free_id(map, do_idr_lock);
INIT_WORK(&map->work, bpf_map_free_deferred);
schedule_work(&map->work);
}
}
+void bpf_map_put(struct bpf_map *map)
+{
+ __bpf_map_put(map, true);
+}
+
void bpf_map_put_with_uref(struct bpf_map *map)
{
bpf_map_put_uref(map);
@@ -236,11 +279,22 @@
if (err)
goto free_map_nouncharge;
- err = bpf_map_new_fd(map);
- if (err < 0)
- /* failed to allocate fd */
+ err = bpf_map_alloc_id(map);
+ if (err)
goto free_map;
+ err = bpf_map_new_fd(map);
+ if (err < 0) {
+ /* failed to allocate fd.
+ * bpf_map_put() is needed because the above
+ * bpf_map_alloc_id() has published the map
+ * to the userspace and the userspace may
+ * have refcnt-ed it through BPF_MAP_GET_FD_BY_ID.
+ */
+ bpf_map_put(map);
+ return err;
+ }
+
trace_bpf_map_create(map, err);
return err;
@@ -295,6 +349,28 @@
return map;
}
+/* map_idr_lock should have been held */
+static struct bpf_map *bpf_map_inc_not_zero(struct bpf_map *map,
+ bool uref)
+{
+ int refold;
+
+ refold = __atomic_add_unless(&map->refcnt, 1, 0);
+
+ if (refold >= BPF_MAX_REFCNT) {
+ __bpf_map_put(map, false);
+ return ERR_PTR(-EBUSY);
+ }
+
+ if (!refold)
+ return ERR_PTR(-ENOENT);
+
+ if (uref)
+ atomic_inc(&map->usercnt);
+
+ return map;
+}
+
int __weak bpf_stackmap_copy(struct bpf_map *map, void *key, void *value)
{
return -ENOTSUPP;
@@ -650,6 +726,42 @@
free_uid(user);
}
+static int bpf_prog_alloc_id(struct bpf_prog *prog)
+{
+ int id;
+
+ spin_lock_bh(&prog_idr_lock);
+ id = idr_alloc_cyclic(&prog_idr, prog, 1, INT_MAX, GFP_ATOMIC);
+ if (id > 0)
+ prog->aux->id = id;
+ spin_unlock_bh(&prog_idr_lock);
+
+ /* id is in [1, INT_MAX) */
+ if (WARN_ON_ONCE(!id))
+ return -ENOSPC;
+
+ return id > 0 ? 0 : id;
+}
+
+static void bpf_prog_free_id(struct bpf_prog *prog, bool do_idr_lock)
+{
+ /* cBPF to eBPF migrations are currently not in the idr store. */
+ if (!prog->aux->id)
+ return;
+
+ if (do_idr_lock)
+ spin_lock_bh(&prog_idr_lock);
+ else
+ __acquire(&prog_idr_lock);
+
+ idr_remove(&prog_idr, prog->aux->id);
+
+ if (do_idr_lock)
+ spin_unlock_bh(&prog_idr_lock);
+ else
+ __release(&prog_idr_lock);
+}
+
static void __bpf_prog_put_rcu(struct rcu_head *rcu)
{
struct bpf_prog_aux *aux = container_of(rcu, struct bpf_prog_aux, rcu);
@@ -659,14 +771,21 @@
bpf_prog_free(aux->prog);
}
-void bpf_prog_put(struct bpf_prog *prog)
+static void __bpf_prog_put(struct bpf_prog *prog, bool do_idr_lock)
{
if (atomic_dec_and_test(&prog->aux->refcnt)) {
trace_bpf_prog_put_rcu(prog);
+ /* bpf_prog_free_id() must be called first */
+ bpf_prog_free_id(prog, do_idr_lock);
bpf_prog_kallsyms_del(prog);
call_rcu(&prog->aux->rcu, __bpf_prog_put_rcu);
}
}
+
+void bpf_prog_put(struct bpf_prog *prog)
+{
+ __bpf_prog_put(prog, true);
+}
EXPORT_SYMBOL_GPL(bpf_prog_put);
static int bpf_prog_release(struct inode *inode, struct file *filp)
@@ -748,6 +867,24 @@
}
EXPORT_SYMBOL_GPL(bpf_prog_inc);
+/* prog_idr_lock should have been held */
+static struct bpf_prog *bpf_prog_inc_not_zero(struct bpf_prog *prog)
+{
+ int refold;
+
+ refold = __atomic_add_unless(&prog->aux->refcnt, 1, 0);
+
+ if (refold >= BPF_MAX_REFCNT) {
+ __bpf_prog_put(prog, false);
+ return ERR_PTR(-EBUSY);
+ }
+
+ if (!refold)
+ return ERR_PTR(-ENOENT);
+
+ return prog;
+}
+
static struct bpf_prog *__bpf_prog_get(u32 ufd, enum bpf_prog_type *type)
{
struct fd f = fdget(ufd);
@@ -815,7 +952,9 @@
attr->kern_version != LINUX_VERSION_CODE)
return -EINVAL;
- if (type != BPF_PROG_TYPE_SOCKET_FILTER && !capable(CAP_SYS_ADMIN))
+ if (type != BPF_PROG_TYPE_SOCKET_FILTER &&
+ type != BPF_PROG_TYPE_CGROUP_SKB &&
+ !capable(CAP_SYS_ADMIN))
return -EPERM;
/* plain bpf_prog allocation */
@@ -855,11 +994,22 @@
if (err < 0)
goto free_used_maps;
- err = bpf_prog_new_fd(prog);
- if (err < 0)
- /* failed to allocate fd */
+ err = bpf_prog_alloc_id(prog);
+ if (err)
goto free_used_maps;
+ err = bpf_prog_new_fd(prog);
+ if (err < 0) {
+ /* failed to allocate fd.
+ * bpf_prog_put() is needed because the above
+ * bpf_prog_alloc_id() has published the prog
+ * to the userspace and the userspace may
+ * have refcnt-ed it through BPF_PROG_GET_FD_BY_ID.
+ */
+ bpf_prog_put(prog);
+ return err;
+ }
+
bpf_prog_kallsyms_add(prog);
trace_bpf_prog_load(prog, err);
return err;
@@ -997,6 +1147,237 @@
return ret;
}
+#define BPF_OBJ_GET_NEXT_ID_LAST_FIELD next_id
+
+static int bpf_obj_get_next_id(const union bpf_attr *attr,
+ union bpf_attr __user *uattr,
+ struct idr *idr,
+ spinlock_t *lock)
+{
+ u32 next_id = attr->start_id;
+ int err = 0;
+
+ if (CHECK_ATTR(BPF_OBJ_GET_NEXT_ID) || next_id >= INT_MAX)
+ return -EINVAL;
+
+ if (!capable(CAP_SYS_ADMIN))
+ return -EPERM;
+
+ next_id++;
+ spin_lock_bh(lock);
+ if (!idr_get_next(idr, &next_id))
+ err = -ENOENT;
+ spin_unlock_bh(lock);
+
+ if (!err)
+ err = put_user(next_id, &uattr->next_id);
+
+ return err;
+}
+
+#define BPF_PROG_GET_FD_BY_ID_LAST_FIELD prog_id
+
+static int bpf_prog_get_fd_by_id(const union bpf_attr *attr)
+{
+ struct bpf_prog *prog;
+ u32 id = attr->prog_id;
+ int fd;
+
+ if (CHECK_ATTR(BPF_PROG_GET_FD_BY_ID))
+ return -EINVAL;
+
+ if (!capable(CAP_SYS_ADMIN))
+ return -EPERM;
+
+ spin_lock_bh(&prog_idr_lock);
+ prog = idr_find(&prog_idr, id);
+ if (prog)
+ prog = bpf_prog_inc_not_zero(prog);
+ else
+ prog = ERR_PTR(-ENOENT);
+ spin_unlock_bh(&prog_idr_lock);
+
+ if (IS_ERR(prog))
+ return PTR_ERR(prog);
+
+ fd = bpf_prog_new_fd(prog);
+ if (fd < 0)
+ bpf_prog_put(prog);
+
+ return fd;
+}
+
+#define BPF_MAP_GET_FD_BY_ID_LAST_FIELD map_id
+
+static int bpf_map_get_fd_by_id(const union bpf_attr *attr)
+{
+ struct bpf_map *map;
+ u32 id = attr->map_id;
+ int fd;
+
+ if (CHECK_ATTR(BPF_MAP_GET_FD_BY_ID))
+ return -EINVAL;
+
+ if (!capable(CAP_SYS_ADMIN))
+ return -EPERM;
+
+ spin_lock_bh(&map_idr_lock);
+ map = idr_find(&map_idr, id);
+ if (map)
+ map = bpf_map_inc_not_zero(map, true);
+ else
+ map = ERR_PTR(-ENOENT);
+ spin_unlock_bh(&map_idr_lock);
+
+ if (IS_ERR(map))
+ return PTR_ERR(map);
+
+ fd = bpf_map_new_fd(map);
+ if (fd < 0)
+ bpf_map_put(map);
+
+ return fd;
+}
+
+static int check_uarg_tail_zero(void __user *uaddr,
+ size_t expected_size,
+ size_t actual_size)
+{
+ unsigned char __user *addr;
+ unsigned char __user *end;
+ unsigned char val;
+ int err;
+
+ if (actual_size <= expected_size)
+ return 0;
+
+ addr = uaddr + expected_size;
+ end = uaddr + actual_size;
+
+ for (; addr < end; addr++) {
+ err = get_user(val, addr);
+ if (err)
+ return err;
+ if (val)
+ return -E2BIG;
+ }
+
+ return 0;
+}
+
+static int bpf_prog_get_info_by_fd(struct bpf_prog *prog,
+ const union bpf_attr *attr,
+ union bpf_attr __user *uattr)
+{
+ struct bpf_prog_info __user *uinfo = u64_to_user_ptr(attr->info.info);
+ struct bpf_prog_info info = {};
+ u32 info_len = attr->info.info_len;
+ char __user *uinsns;
+ u32 ulen;
+ int err;
+
+ err = check_uarg_tail_zero(uinfo, sizeof(info), info_len);
+ if (err)
+ return err;
+ info_len = min_t(u32, sizeof(info), info_len);
+
+ if (copy_from_user(&info, uinfo, info_len))
+ return err;
+
+ info.type = prog->type;
+ info.id = prog->aux->id;
+
+ memcpy(info.tag, prog->tag, sizeof(prog->tag));
+
+ if (!capable(CAP_SYS_ADMIN)) {
+ info.jited_prog_len = 0;
+ info.xlated_prog_len = 0;
+ goto done;
+ }
+
+ ulen = info.jited_prog_len;
+ info.jited_prog_len = prog->jited_len;
+ if (info.jited_prog_len && ulen) {
+ uinsns = u64_to_user_ptr(info.jited_prog_insns);
+ ulen = min_t(u32, info.jited_prog_len, ulen);
+ if (copy_to_user(uinsns, prog->bpf_func, ulen))
+ return -EFAULT;
+ }
+
+ ulen = info.xlated_prog_len;
+ info.xlated_prog_len = bpf_prog_size(prog->len);
+ if (info.xlated_prog_len && ulen) {
+ uinsns = u64_to_user_ptr(info.xlated_prog_insns);
+ ulen = min_t(u32, info.xlated_prog_len, ulen);
+ if (copy_to_user(uinsns, prog->insnsi, ulen))
+ return -EFAULT;
+ }
+
+done:
+ if (copy_to_user(uinfo, &info, info_len) ||
+ put_user(info_len, &uattr->info.info_len))
+ return -EFAULT;
+
+ return 0;
+}
+
+static int bpf_map_get_info_by_fd(struct bpf_map *map,
+ const union bpf_attr *attr,
+ union bpf_attr __user *uattr)
+{
+ struct bpf_map_info __user *uinfo = u64_to_user_ptr(attr->info.info);
+ struct bpf_map_info info = {};
+ u32 info_len = attr->info.info_len;
+ int err;
+
+ err = check_uarg_tail_zero(uinfo, sizeof(info), info_len);
+ if (err)
+ return err;
+ info_len = min_t(u32, sizeof(info), info_len);
+
+ info.type = map->map_type;
+ info.id = map->id;
+ info.key_size = map->key_size;
+ info.value_size = map->value_size;
+ info.max_entries = map->max_entries;
+ info.map_flags = map->map_flags;
+
+ if (copy_to_user(uinfo, &info, info_len) ||
+ put_user(info_len, &uattr->info.info_len))
+ return -EFAULT;
+
+ return 0;
+}
+
+#define BPF_OBJ_GET_INFO_BY_FD_LAST_FIELD info.info
+
+static int bpf_obj_get_info_by_fd(const union bpf_attr *attr,
+ union bpf_attr __user *uattr)
+{
+ int ufd = attr->info.bpf_fd;
+ struct fd f;
+ int err;
+
+ if (CHECK_ATTR(BPF_OBJ_GET_INFO_BY_FD))
+ return -EINVAL;
+
+ f = fdget(ufd);
+ if (!f.file)
+ return -EBADFD;
+
+ if (f.file->f_op == &bpf_prog_fops)
+ err = bpf_prog_get_info_by_fd(f.file->private_data, attr,
+ uattr);
+ else if (f.file->f_op == &bpf_map_fops)
+ err = bpf_map_get_info_by_fd(f.file->private_data, attr,
+ uattr);
+ else
+ err = -EINVAL;
+
+ fdput(f);
+ return err;
+}
+
SYSCALL_DEFINE3(bpf, int, cmd, union bpf_attr __user *, uattr, unsigned int, size)
{
union bpf_attr attr = {};
@@ -1016,23 +1397,10 @@
* user-space does not rely on any kernel feature
* extensions we dont know about yet.
*/
- if (size > sizeof(attr)) {
- unsigned char __user *addr;
- unsigned char __user *end;
- unsigned char val;
-
- addr = (void __user *)uattr + sizeof(attr);
- end = (void __user *)uattr + size;
-
- for (; addr < end; addr++) {
- err = get_user(val, addr);
- if (err)
- return err;
- if (val)
- return -E2BIG;
- }
- size = sizeof(attr);
- }
+ err = check_uarg_tail_zero(uattr, sizeof(attr), size);
+ if (err)
+ return err;
+ size = min_t(u32, size, sizeof(attr));
/* copy attributes from user space, may be less than sizeof(bpf_attr) */
if (copy_from_user(&attr, uattr, size) != 0)
@@ -1074,6 +1442,23 @@
case BPF_PROG_TEST_RUN:
err = bpf_prog_test_run(&attr, uattr);
break;
+ case BPF_PROG_GET_NEXT_ID:
+ err = bpf_obj_get_next_id(&attr, uattr,
+ &prog_idr, &prog_idr_lock);
+ break;
+ case BPF_MAP_GET_NEXT_ID:
+ err = bpf_obj_get_next_id(&attr, uattr,
+ &map_idr, &map_idr_lock);
+ break;
+ case BPF_PROG_GET_FD_BY_ID:
+ err = bpf_prog_get_fd_by_id(&attr);
+ break;
+ case BPF_MAP_GET_FD_BY_ID:
+ err = bpf_map_get_fd_by_id(&attr);
+ break;
+ case BPF_OBJ_GET_INFO_BY_FD:
+ err = bpf_obj_get_info_by_fd(&attr, uattr);
+ break;
default:
err = -EINVAL;
break;
diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
index 339c8a1..44b97d9 100644
--- a/kernel/bpf/verifier.c
+++ b/kernel/bpf/verifier.c
@@ -758,15 +758,26 @@
}
/* check access to 'struct bpf_context' fields */
-static int check_ctx_access(struct bpf_verifier_env *env, int off, int size,
+static int check_ctx_access(struct bpf_verifier_env *env, int insn_idx, int off, int size,
enum bpf_access_type t, enum bpf_reg_type *reg_type)
{
+ int ctx_field_size = 0;
+
/* for analyzer ctx accesses are already validated and converted */
if (env->analyzer_ops)
return 0;
if (env->prog->aux->ops->is_valid_access &&
- env->prog->aux->ops->is_valid_access(off, size, t, reg_type)) {
+ env->prog->aux->ops->is_valid_access(off, size, t, reg_type, &ctx_field_size)) {
+ /* a non zero ctx_field_size indicates:
+ * . For this field, the prog type specific ctx conversion algorithm
+ * only supports whole field access.
+ * . This ctx access is a candiate for later verifier transformation
+ * to load the whole field and then apply a mask to get correct result.
+ */
+ if (ctx_field_size)
+ env->insn_aux_data[insn_idx].ctx_field_size = ctx_field_size;
+
/* remember the offset of last byte accessed in ctx */
if (env->prog->aux->max_ctx_offset < off + size)
env->prog->aux->max_ctx_offset = off + size;
@@ -868,7 +879,7 @@
* if t==write && value_regno==-1, some unknown value is stored into memory
* if t==read && value_regno==-1, don't care what we read from memory
*/
-static int check_mem_access(struct bpf_verifier_env *env, u32 regno, int off,
+static int check_mem_access(struct bpf_verifier_env *env, int insn_idx, u32 regno, int off,
int bpf_size, enum bpf_access_type t,
int value_regno)
{
@@ -911,7 +922,7 @@
verbose("R%d leaks addr into ctx\n", value_regno);
return -EACCES;
}
- err = check_ctx_access(env, off, size, t, ®_type);
+ err = check_ctx_access(env, insn_idx, off, size, t, ®_type);
if (!err && t == BPF_READ && value_regno >= 0) {
mark_reg_unknown_value_and_range(state->regs,
value_regno);
@@ -926,6 +937,10 @@
verbose("invalid stack off=%d size=%d\n", off, size);
return -EACCES;
}
+
+ if (env->prog->aux->stack_depth < -off)
+ env->prog->aux->stack_depth = -off;
+
if (t == BPF_WRITE) {
if (!env->allow_ptr_leaks &&
state->stack_slot_type[MAX_BPF_STACK + off] == STACK_SPILL &&
@@ -968,7 +983,7 @@
return err;
}
-static int check_xadd(struct bpf_verifier_env *env, struct bpf_insn *insn)
+static int check_xadd(struct bpf_verifier_env *env, int insn_idx, struct bpf_insn *insn)
{
struct bpf_reg_state *regs = env->cur_state.regs;
int err;
@@ -990,13 +1005,13 @@
return err;
/* check whether atomic_add can read the memory */
- err = check_mem_access(env, insn->dst_reg, insn->off,
+ err = check_mem_access(env, insn_idx, insn->dst_reg, insn->off,
BPF_SIZE(insn->code), BPF_READ, -1);
if (err)
return err;
/* check whether atomic_add can write into the same memory */
- return check_mem_access(env, insn->dst_reg, insn->off,
+ return check_mem_access(env, insn_idx, insn->dst_reg, insn->off,
BPF_SIZE(insn->code), BPF_WRITE, -1);
}
@@ -1032,6 +1047,9 @@
return -EACCES;
}
+ if (env->prog->aux->stack_depth < -off)
+ env->prog->aux->stack_depth = -off;
+
if (meta && meta->raw_mode) {
meta->access_size = access_size;
meta->regno = regno;
@@ -1339,8 +1357,8 @@
if (reg->type != PTR_TO_PACKET &&
reg->type != PTR_TO_PACKET_END)
continue;
- reg->type = UNKNOWN_VALUE;
- reg->imm = 0;
+ __mark_reg_unknown_value(state->spilled_regs,
+ i / BPF_REG_SIZE);
}
}
@@ -1409,7 +1427,7 @@
* is inferred from register state.
*/
for (i = 0; i < meta.access_size; i++) {
- err = check_mem_access(env, meta.regno, i, BPF_B, BPF_WRITE, -1);
+ err = check_mem_access(env, insn_idx, meta.regno, i, BPF_B, BPF_WRITE, -1);
if (err)
return err;
}
@@ -1945,6 +1963,7 @@
*/
regs[insn->dst_reg].type = CONST_IMM;
regs[insn->dst_reg].imm = insn->imm;
+ regs[insn->dst_reg].id = 0;
regs[insn->dst_reg].max_value = insn->imm;
regs[insn->dst_reg].min_value = insn->imm;
regs[insn->dst_reg].min_align = calc_align(insn->imm);
@@ -2402,6 +2421,7 @@
regs[insn->dst_reg].type = CONST_IMM;
regs[insn->dst_reg].imm = imm;
+ regs[insn->dst_reg].id = 0;
return 0;
}
@@ -2821,6 +2841,8 @@
return false;
if (i % BPF_REG_SIZE)
continue;
+ if (old->stack_slot_type[i] != STACK_SPILL)
+ continue;
if (memcmp(&old->spilled_regs[i / BPF_REG_SIZE],
&cur->spilled_regs[i / BPF_REG_SIZE],
sizeof(old->spilled_regs[0])))
@@ -2982,18 +3004,12 @@
/* check that memory (src_reg + off) is readable,
* the state of dst_reg will be updated by this func
*/
- err = check_mem_access(env, insn->src_reg, insn->off,
+ err = check_mem_access(env, insn_idx, insn->src_reg, insn->off,
BPF_SIZE(insn->code), BPF_READ,
insn->dst_reg);
if (err)
return err;
- if (BPF_SIZE(insn->code) != BPF_W &&
- BPF_SIZE(insn->code) != BPF_DW) {
- insn_idx++;
- continue;
- }
-
prev_src_type = &env->insn_aux_data[insn_idx].ptr_type;
if (*prev_src_type == NOT_INIT) {
@@ -3021,7 +3037,7 @@
enum bpf_reg_type *prev_dst_type, dst_reg_type;
if (BPF_MODE(insn->code) == BPF_XADD) {
- err = check_xadd(env, insn);
+ err = check_xadd(env, insn_idx, insn);
if (err)
return err;
insn_idx++;
@@ -3040,7 +3056,7 @@
dst_reg_type = regs[insn->dst_reg].type;
/* check that memory (dst_reg + off) is writeable */
- err = check_mem_access(env, insn->dst_reg, insn->off,
+ err = check_mem_access(env, insn_idx, insn->dst_reg, insn->off,
BPF_SIZE(insn->code), BPF_WRITE,
insn->src_reg);
if (err)
@@ -3069,7 +3085,7 @@
return err;
/* check that memory (dst_reg + off) is writeable */
- err = check_mem_access(env, insn->dst_reg, insn->off,
+ err = check_mem_access(env, insn_idx, insn->dst_reg, insn->off,
BPF_SIZE(insn->code), BPF_WRITE,
-1);
if (err)
@@ -3167,7 +3183,8 @@
insn_idx++;
}
- verbose("processed %d insns\n", insn_processed);
+ verbose("processed %d insns, stack depth %d\n",
+ insn_processed, env->prog->aux->stack_depth);
return 0;
}
@@ -3371,7 +3388,7 @@
struct bpf_insn insn_buf[16], *insn;
struct bpf_prog *new_prog;
enum bpf_access_type type;
- int i, cnt, delta = 0;
+ int i, cnt, off, size, ctx_field_size, is_narrower_load, delta = 0;
if (ops->gen_prologue) {
cnt = ops->gen_prologue(insn_buf, env->seen_direct_write,
@@ -3411,11 +3428,39 @@
if (env->insn_aux_data[i + delta].ptr_type != PTR_TO_CTX)
continue;
+ off = insn->off;
+ size = bpf_size_to_bytes(BPF_SIZE(insn->code));
+ ctx_field_size = env->insn_aux_data[i + delta].ctx_field_size;
+ is_narrower_load = (type == BPF_READ && size < ctx_field_size);
+
+ /* If the read access is a narrower load of the field,
+ * convert to a 4/8-byte load, to minimum program type specific
+ * convert_ctx_access changes. If conversion is successful,
+ * we will apply proper mask to the result.
+ */
+ if (is_narrower_load) {
+ int size_code = BPF_H;
+
+ if (ctx_field_size == 4)
+ size_code = BPF_W;
+ else if (ctx_field_size == 8)
+ size_code = BPF_DW;
+ insn->off = off & ~(ctx_field_size - 1);
+ insn->code = BPF_LDX | BPF_MEM | size_code;
+ }
cnt = ops->convert_ctx_access(type, insn, insn_buf, env->prog);
if (cnt == 0 || cnt >= ARRAY_SIZE(insn_buf)) {
verbose("bpf verifier is misconfigured\n");
return -EINVAL;
}
+ if (is_narrower_load) {
+ if (ctx_field_size <= 4)
+ insn_buf[cnt++] = BPF_ALU32_IMM(BPF_AND, insn->dst_reg,
+ (1 << size * 8) - 1);
+ else
+ insn_buf[cnt++] = BPF_ALU64_IMM(BPF_AND, insn->dst_reg,
+ (1 << size * 8) - 1);
+ }
new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, cnt);
if (!new_prog)
@@ -3462,6 +3507,7 @@
* the program array.
*/
prog->cb_access = 1;
+ env->prog->aux->stack_depth = MAX_BPF_STACK;
/* mark bpf_tail_call as different opcode to avoid
* conditional branch in the interpeter for every normal
@@ -3469,7 +3515,7 @@
* that doesn't support bpf_tail_call yet
*/
insn->imm = 0;
- insn->code |= BPF_X;
+ insn->code = BPF_JMP | BPF_TAIL_CALL;
continue;
}
diff --git a/kernel/events/core.c b/kernel/events/core.c
index 6c4e523..9afab55 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -3636,10 +3636,10 @@
* will not be local and we cannot read them atomically
* - must not have a pmu::count method
*/
-u64 perf_event_read_local(struct perf_event *event)
+int perf_event_read_local(struct perf_event *event, u64 *value)
{
unsigned long flags;
- u64 val;
+ int ret = 0;
/*
* Disabling interrupts avoids all counter scheduling (context
@@ -3647,25 +3647,37 @@
*/
local_irq_save(flags);
- /* If this is a per-task event, it must be for current */
- WARN_ON_ONCE((event->attach_state & PERF_ATTACH_TASK) &&
- event->hw.target != current);
-
- /* If this is a per-CPU event, it must be for this CPU */
- WARN_ON_ONCE(!(event->attach_state & PERF_ATTACH_TASK) &&
- event->cpu != smp_processor_id());
-
/*
* It must not be an event with inherit set, we cannot read
* all child counters from atomic context.
*/
- WARN_ON_ONCE(event->attr.inherit);
+ if (event->attr.inherit) {
+ ret = -EOPNOTSUPP;
+ goto out;
+ }
/*
* It must not have a pmu::count method, those are not
* NMI safe.
*/
- WARN_ON_ONCE(event->pmu->count);
+ if (event->pmu->count) {
+ ret = -EOPNOTSUPP;
+ goto out;
+ }
+
+ /* If this is a per-task event, it must be for current */
+ if ((event->attach_state & PERF_ATTACH_TASK) &&
+ event->hw.target != current) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ /* If this is a per-CPU event, it must be for this CPU */
+ if (!(event->attach_state & PERF_ATTACH_TASK) &&
+ event->cpu != smp_processor_id()) {
+ ret = -EINVAL;
+ goto out;
+ }
/*
* If the event is currently on this CPU, its either a per-task event,
@@ -3675,10 +3687,11 @@
if (event->oncpu == smp_processor_id())
event->pmu->read(event);
- val = local64_read(&event->count);
+ *value = local64_read(&event->count);
+out:
local_irq_restore(flags);
- return val;
+ return ret;
}
static int perf_event_read(struct perf_event *event, bool group)
@@ -8058,12 +8071,8 @@
bool is_kprobe, is_tracepoint;
struct bpf_prog *prog;
- if (event->attr.type == PERF_TYPE_HARDWARE ||
- event->attr.type == PERF_TYPE_SOFTWARE)
- return perf_event_set_bpf_handler(event, prog_fd);
-
if (event->attr.type != PERF_TYPE_TRACEPOINT)
- return -EINVAL;
+ return perf_event_set_bpf_handler(event, prog_fd);
if (event->tp_event->prog)
return -EEXIST;
diff --git a/kernel/trace/bpf_trace.c b/kernel/trace/bpf_trace.c
index 460a031..9d3ec82 100644
--- a/kernel/trace/bpf_trace.c
+++ b/kernel/trace/bpf_trace.c
@@ -234,7 +234,8 @@
unsigned int cpu = smp_processor_id();
u64 index = flags & BPF_F_INDEX_MASK;
struct bpf_event_entry *ee;
- struct perf_event *event;
+ u64 value = 0;
+ int err;
if (unlikely(flags & ~(BPF_F_INDEX_MASK)))
return -EINVAL;
@@ -247,21 +248,14 @@
if (!ee)
return -ENOENT;
- event = ee->event;
- if (unlikely(event->attr.type != PERF_TYPE_HARDWARE &&
- event->attr.type != PERF_TYPE_RAW))
- return -EINVAL;
-
- /* make sure event is local and doesn't have pmu::count */
- if (unlikely(event->oncpu != cpu || event->pmu->count))
- return -EINVAL;
-
+ err = perf_event_read_local(ee->event, &value);
/*
- * we don't know if the function is run successfully by the
- * return value. It can be judged in other places, such as
- * eBPF programs.
+ * this api is ugly since we miss [-22..-2] range of valid
+ * counter values, but that's uapi
*/
- return perf_event_read_local(event);
+ if (err)
+ return err;
+ return value;
}
static const struct bpf_func_proto bpf_perf_event_read_proto = {
@@ -272,14 +266,16 @@
.arg2_type = ARG_ANYTHING,
};
+static DEFINE_PER_CPU(struct perf_sample_data, bpf_sd);
+
static __always_inline u64
__bpf_perf_event_output(struct pt_regs *regs, struct bpf_map *map,
u64 flags, struct perf_raw_record *raw)
{
struct bpf_array *array = container_of(map, struct bpf_array, map);
+ struct perf_sample_data *sd = this_cpu_ptr(&bpf_sd);
unsigned int cpu = smp_processor_id();
u64 index = flags & BPF_F_INDEX_MASK;
- struct perf_sample_data sample_data;
struct bpf_event_entry *ee;
struct perf_event *event;
@@ -300,9 +296,9 @@
if (unlikely(event->oncpu != cpu))
return -EOPNOTSUPP;
- perf_sample_data_init(&sample_data, 0, 0);
- sample_data.raw = raw;
- perf_event_output(event, &sample_data, regs);
+ perf_sample_data_init(sd, 0, 0);
+ sd->raw = raw;
+ perf_event_output(event, sd, regs);
return 0;
}
@@ -483,7 +479,7 @@
/* bpf+kprobe programs can access fields of 'struct pt_regs' */
static bool kprobe_prog_is_valid_access(int off, int size, enum bpf_access_type type,
- enum bpf_reg_type *reg_type)
+ enum bpf_reg_type *reg_type, int *ctx_field_size)
{
if (off < 0 || off >= sizeof(struct pt_regs))
return false;
@@ -566,7 +562,7 @@
}
static bool tp_prog_is_valid_access(int off, int size, enum bpf_access_type type,
- enum bpf_reg_type *reg_type)
+ enum bpf_reg_type *reg_type, int *ctx_field_size)
{
if (off < sizeof(void *) || off >= PERF_MAX_TRACE_SIZE)
return false;
@@ -585,17 +581,26 @@
};
static bool pe_prog_is_valid_access(int off, int size, enum bpf_access_type type,
- enum bpf_reg_type *reg_type)
+ enum bpf_reg_type *reg_type, int *ctx_field_size)
{
+ int sample_period_off;
+
if (off < 0 || off >= sizeof(struct bpf_perf_event_data))
return false;
if (type != BPF_READ)
return false;
if (off % size != 0)
return false;
- if (off == offsetof(struct bpf_perf_event_data, sample_period)) {
- if (size != sizeof(u64))
- return false;
+
+ /* permit 1, 2, 4 byte narrower and 8 normal read access to sample_period */
+ sample_period_off = offsetof(struct bpf_perf_event_data, sample_period);
+ if (off >= sample_period_off && off < sample_period_off + sizeof(__u64)) {
+ *ctx_field_size = 8;
+#ifdef __LITTLE_ENDIAN
+ return (off & 0x7) == 0 && size <= 8 && (size & (size - 1)) == 0;
+#else
+ return ((off & 0x7) + size) == 8 && size <= 8 && (size & (size - 1)) == 0;
+#endif
} else {
if (size != sizeof(long))
return false;
diff --git a/lib/nlattr.c b/lib/nlattr.c
index a7e0b16..fb52435 100644
--- a/lib/nlattr.c
+++ b/lib/nlattr.c
@@ -352,7 +352,7 @@
{
struct nlattr *nla;
- nla = (struct nlattr *) skb_put(skb, nla_total_size(attrlen));
+ nla = skb_put(skb, nla_total_size(attrlen));
nla->nla_type = attrtype;
nla->nla_len = nla_attr_size(attrlen);
@@ -398,12 +398,7 @@
*/
void *__nla_reserve_nohdr(struct sk_buff *skb, int attrlen)
{
- void *start;
-
- start = skb_put(skb, NLA_ALIGN(attrlen));
- memset(start, 0, NLA_ALIGN(attrlen));
-
- return start;
+ return skb_put_zero(skb, NLA_ALIGN(attrlen));
}
EXPORT_SYMBOL(__nla_reserve_nohdr);
@@ -617,7 +612,7 @@
if (unlikely(skb_tailroom(skb) < NLA_ALIGN(attrlen)))
return -EMSGSIZE;
- memcpy(skb_put(skb, attrlen), data, attrlen);
+ skb_put_data(skb, data, attrlen);
return 0;
}
EXPORT_SYMBOL(nla_append);
diff --git a/lib/test_bpf.c b/lib/test_bpf.c
index be88cba..d9d5a41 100644
--- a/lib/test_bpf.c
+++ b/lib/test_bpf.c
@@ -84,6 +84,7 @@
} test[MAX_SUBTESTS];
int (*fill_helper)(struct bpf_test *self);
__u8 frag_data[MAX_DATA];
+ int stack_depth; /* for eBPF only, since tests don't call verifier */
};
/* Large test cases need separate allocation and fill handler. */
@@ -434,6 +435,30 @@
return 0;
}
+static int bpf_fill_jump_around_ld_abs(struct bpf_test *self)
+{
+ unsigned int len = BPF_MAXINSNS;
+ struct bpf_insn *insn;
+ int i = 0;
+
+ insn = kmalloc_array(len, sizeof(*insn), GFP_KERNEL);
+ if (!insn)
+ return -ENOMEM;
+
+ insn[i++] = BPF_MOV64_REG(R6, R1);
+ insn[i++] = BPF_LD_ABS(BPF_B, 0);
+ insn[i] = BPF_JMP_IMM(BPF_JEQ, R0, 10, len - i - 2);
+ i++;
+ while (i < len - 1)
+ insn[i++] = BPF_LD_ABS(BPF_B, 1);
+ insn[i] = BPF_EXIT_INSN();
+
+ self->u.ptr.insns = insn;
+ self->u.ptr.len = len;
+
+ return 0;
+}
+
static int __bpf_fill_stxdw(struct bpf_test *self, int size)
{
unsigned int len = BPF_MAXINSNS;
@@ -455,6 +480,7 @@
self->u.ptr.insns = insn;
self->u.ptr.len = len;
+ self->stack_depth = 40;
return 0;
}
@@ -2317,7 +2343,8 @@
{ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x08, 0x06, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0x10, 0xbf, 0x48, 0xd6, 0x43, 0xd6},
- { { 38, 256 } }
+ { { 38, 256 } },
+ .stack_depth = 64,
},
/* BPF_ALU | BPF_MOV | BPF_X */
{
@@ -4169,6 +4196,7 @@
INTERNAL,
{ },
{ { 0, 0xff } },
+ .stack_depth = 40,
},
{
"ST_MEM_B: Store/Load byte: max positive",
@@ -4181,6 +4209,7 @@
INTERNAL,
{ },
{ { 0, 0x7f } },
+ .stack_depth = 40,
},
{
"STX_MEM_B: Store/Load byte: max negative",
@@ -4194,6 +4223,7 @@
INTERNAL,
{ },
{ { 0, 0xff } },
+ .stack_depth = 40,
},
{
"ST_MEM_H: Store/Load half word: max negative",
@@ -4206,6 +4236,7 @@
INTERNAL,
{ },
{ { 0, 0xffff } },
+ .stack_depth = 40,
},
{
"ST_MEM_H: Store/Load half word: max positive",
@@ -4218,6 +4249,7 @@
INTERNAL,
{ },
{ { 0, 0x7fff } },
+ .stack_depth = 40,
},
{
"STX_MEM_H: Store/Load half word: max negative",
@@ -4231,6 +4263,7 @@
INTERNAL,
{ },
{ { 0, 0xffff } },
+ .stack_depth = 40,
},
{
"ST_MEM_W: Store/Load word: max negative",
@@ -4243,6 +4276,7 @@
INTERNAL,
{ },
{ { 0, 0xffffffff } },
+ .stack_depth = 40,
},
{
"ST_MEM_W: Store/Load word: max positive",
@@ -4255,6 +4289,7 @@
INTERNAL,
{ },
{ { 0, 0x7fffffff } },
+ .stack_depth = 40,
},
{
"STX_MEM_W: Store/Load word: max negative",
@@ -4268,6 +4303,7 @@
INTERNAL,
{ },
{ { 0, 0xffffffff } },
+ .stack_depth = 40,
},
{
"ST_MEM_DW: Store/Load double word: max negative",
@@ -4280,6 +4316,7 @@
INTERNAL,
{ },
{ { 0, 0xffffffff } },
+ .stack_depth = 40,
},
{
"ST_MEM_DW: Store/Load double word: max negative 2",
@@ -4297,6 +4334,7 @@
INTERNAL,
{ },
{ { 0, 0x1 } },
+ .stack_depth = 40,
},
{
"ST_MEM_DW: Store/Load double word: max positive",
@@ -4309,6 +4347,7 @@
INTERNAL,
{ },
{ { 0, 0x7fffffff } },
+ .stack_depth = 40,
},
{
"STX_MEM_DW: Store/Load double word: max negative",
@@ -4322,6 +4361,7 @@
INTERNAL,
{ },
{ { 0, 0xffffffff } },
+ .stack_depth = 40,
},
/* BPF_STX | BPF_XADD | BPF_W/DW */
{
@@ -4336,6 +4376,7 @@
INTERNAL,
{ },
{ { 0, 0x22 } },
+ .stack_depth = 40,
},
{
"STX_XADD_W: Test side-effects, r10: 0x12 + 0x10 = 0x22",
@@ -4351,6 +4392,7 @@
INTERNAL,
{ },
{ { 0, 0 } },
+ .stack_depth = 40,
},
{
"STX_XADD_W: Test side-effects, r0: 0x12 + 0x10 = 0x22",
@@ -4363,6 +4405,7 @@
INTERNAL,
{ },
{ { 0, 0x12 } },
+ .stack_depth = 40,
},
{
"STX_XADD_W: X + 1 + 1 + 1 + ...",
@@ -4384,6 +4427,7 @@
INTERNAL,
{ },
{ { 0, 0x22 } },
+ .stack_depth = 40,
},
{
"STX_XADD_DW: Test side-effects, r10: 0x12 + 0x10 = 0x22",
@@ -4399,6 +4443,7 @@
INTERNAL,
{ },
{ { 0, 0 } },
+ .stack_depth = 40,
},
{
"STX_XADD_DW: Test side-effects, r0: 0x12 + 0x10 = 0x22",
@@ -4411,6 +4456,7 @@
INTERNAL,
{ },
{ { 0, 0x12 } },
+ .stack_depth = 40,
},
{
"STX_XADD_DW: X + 1 + 1 + 1 + ...",
@@ -5022,6 +5068,14 @@
{ { ETH_HLEN, 0xbef } },
.fill_helper = bpf_fill_ld_abs_vlan_push_pop,
},
+ {
+ "BPF_MAXINSNS: jump around ld_abs",
+ { },
+ INTERNAL,
+ { 10, 11 },
+ { { 2, 10 } },
+ .fill_helper = bpf_fill_jump_around_ld_abs,
+ },
/*
* LD_IND / LD_ABS on fragmented SKBs
*/
@@ -5663,7 +5717,7 @@
if (!skb)
return NULL;
- memcpy(__skb_put(skb, size), buf, size);
+ __skb_put_data(skb, buf, size);
/* Initialize a fake skb with test pattern. */
skb_reset_mac_header(skb);
@@ -5809,6 +5863,7 @@
/* Type doesn't really matter here as long as it's not unspec. */
fp->type = BPF_PROG_TYPE_SOCKET_FILTER;
memcpy(fp->insnsi, fptr, fp->len * sizeof(struct bpf_insn));
+ fp->aux->stack_depth = tests[which].stack_depth;
/* We cannot error here as we don't need type compatibility
* checks.
diff --git a/net/802/fc.c b/net/802/fc.c
index 1bb496e..058a9f7 100644
--- a/net/802/fc.c
+++ b/net/802/fc.c
@@ -49,7 +49,7 @@
struct fcllc *fcllc;
hdr_len = sizeof(struct fch_hdr) + sizeof(struct fcllc);
- fch = (struct fch_hdr *)skb_push(skb, hdr_len);
+ fch = skb_push(skb, hdr_len);
fcllc = (struct fcllc *)(fch+1);
fcllc->dsap = fcllc->ssap = EXTENDED_SAP;
fcllc->llc = UI_CMD;
@@ -59,7 +59,7 @@
else
{
hdr_len = sizeof(struct fch_hdr);
- fch = (struct fch_hdr *)skb_push(skb, hdr_len);
+ fch = skb_push(skb, hdr_len);
}
if(saddr)
diff --git a/net/802/fddi.c b/net/802/fddi.c
index 6356623..90f1416 100644
--- a/net/802/fddi.c
+++ b/net/802/fddi.c
@@ -58,7 +58,7 @@
if(type != ETH_P_IP && type != ETH_P_IPV6 && type != ETH_P_ARP)
hl=FDDI_K_8022_HLEN-3;
- fddi = (struct fddihdr *)skb_push(skb, hl);
+ fddi = skb_push(skb, hl);
fddi->fc = FDDI_FC_K_ASYNC_LLC_DEF;
if(type == ETH_P_IP || type == ETH_P_IPV6 || type == ETH_P_ARP)
{
diff --git a/net/802/garp.c b/net/802/garp.c
index b38ee6d..2dac647 100644
--- a/net/802/garp.c
+++ b/net/802/garp.c
@@ -221,7 +221,7 @@
skb->protocol = htons(ETH_P_802_2);
skb_reserve(skb, LL_RESERVED_SPACE(app->dev) + LLC_RESERVE);
- gp = (struct garp_pdu_hdr *)__skb_put(skb, sizeof(*gp));
+ gp = __skb_put(skb, sizeof(*gp));
put_unaligned(htons(GARP_PROTOCOL_ID), &gp->protocol);
app->pdu = skb;
@@ -232,7 +232,7 @@
{
if (skb_tailroom(app->pdu) < sizeof(u8))
return -1;
- *(u8 *)__skb_put(app->pdu, sizeof(u8)) = GARP_END_MARK;
+ __skb_put_u8(app->pdu, GARP_END_MARK);
return 0;
}
@@ -268,7 +268,7 @@
if (skb_tailroom(app->pdu) < sizeof(*gm))
return -1;
- gm = (struct garp_msg_hdr *)__skb_put(app->pdu, sizeof(*gm));
+ gm = __skb_put(app->pdu, sizeof(*gm));
gm->attrtype = attrtype;
garp_cb(app->pdu)->cur_type = attrtype;
return 0;
@@ -299,7 +299,7 @@
len = sizeof(*ga) + attr->dlen;
if (skb_tailroom(app->pdu) < len)
goto queue;
- ga = (struct garp_attr_hdr *)__skb_put(app->pdu, len);
+ ga = __skb_put(app->pdu, len);
ga->len = len;
ga->event = event;
memcpy(ga->data, attr->data, attr->dlen);
diff --git a/net/802/hippi.c b/net/802/hippi.c
index 4460606..690308b 100644
--- a/net/802/hippi.c
+++ b/net/802/hippi.c
@@ -47,7 +47,7 @@
unsigned short type,
const void *daddr, const void *saddr, unsigned int len)
{
- struct hippi_hdr *hip = (struct hippi_hdr *)skb_push(skb, HIPPI_HLEN);
+ struct hippi_hdr *hip = skb_push(skb, HIPPI_HLEN);
struct hippi_cb *hcb = (struct hippi_cb *) skb->cb;
if (!len){
diff --git a/net/802/mrp.c b/net/802/mrp.c
index 72db278..be4dd31 100644
--- a/net/802/mrp.c
+++ b/net/802/mrp.c
@@ -311,7 +311,7 @@
skb_reset_network_header(skb);
skb_reset_transport_header(skb);
- ph = (struct mrp_pdu_hdr *)__skb_put(skb, sizeof(*ph));
+ ph = __skb_put(skb, sizeof(*ph));
ph->version = app->app->version;
app->pdu = skb;
@@ -324,7 +324,7 @@
if (skb_tailroom(app->pdu) < sizeof(*endmark))
return -1;
- endmark = (__be16 *)__skb_put(app->pdu, sizeof(*endmark));
+ endmark = __skb_put(app->pdu, sizeof(*endmark));
put_unaligned(MRP_END_MARK, endmark);
return 0;
}
@@ -368,7 +368,7 @@
if (skb_tailroom(app->pdu) < sizeof(*mh))
return -1;
- mh = (struct mrp_msg_hdr *)__skb_put(app->pdu, sizeof(*mh));
+ mh = __skb_put(app->pdu, sizeof(*mh));
mh->attrtype = attrtype;
mh->attrlen = attrlen;
mrp_cb(app->pdu)->mh = mh;
@@ -382,8 +382,7 @@
if (skb_tailroom(app->pdu) < sizeof(*vah) + attrlen)
return -1;
- vah = (struct mrp_vecattr_hdr *)__skb_put(app->pdu,
- sizeof(*vah) + attrlen);
+ vah = __skb_put(app->pdu, sizeof(*vah) + attrlen);
put_unaligned(0, &vah->lenflags);
memcpy(vah->firstattrvalue, firstattrvalue, attrlen);
mrp_cb(app->pdu)->vah = vah;
@@ -435,7 +434,7 @@
if (!pos) {
if (skb_tailroom(app->pdu) < sizeof(u8))
goto queue;
- vaevents = (u8 *)__skb_put(app->pdu, sizeof(u8));
+ vaevents = __skb_put(app->pdu, sizeof(u8));
} else {
vaevents = (u8 *)(skb_tail_pointer(app->pdu) - sizeof(u8));
}
diff --git a/net/8021q/vlan_dev.c b/net/8021q/vlan_dev.c
index abc5f40..f7e83f6 100644
--- a/net/8021q/vlan_dev.c
+++ b/net/8021q/vlan_dev.c
@@ -58,7 +58,7 @@
int rc;
if (!(vlan->flags & VLAN_FLAG_REORDER_HDR)) {
- vhdr = (struct vlan_hdr *) skb_push(skb, VLAN_HLEN);
+ vhdr = skb_push(skb, VLAN_HLEN);
vlan_tci = vlan->vlan_id;
vlan_tci |= vlan_dev_get_egress_qos_mask(dev, skb->priority);
@@ -797,12 +797,6 @@
.ndo_netpoll_cleanup = vlan_dev_netpoll_cleanup,
#endif
.ndo_fix_features = vlan_dev_fix_features,
- .ndo_fdb_add = switchdev_port_fdb_add,
- .ndo_fdb_del = switchdev_port_fdb_del,
- .ndo_fdb_dump = switchdev_port_fdb_dump,
- .ndo_bridge_setlink = switchdev_port_bridge_setlink,
- .ndo_bridge_getlink = switchdev_port_bridge_getlink,
- .ndo_bridge_dellink = switchdev_port_bridge_dellink,
.ndo_get_lock_subclass = vlan_dev_get_lock_subclass,
.ndo_get_iflink = vlan_dev_get_iflink,
};
diff --git a/net/Kconfig b/net/Kconfig
index 102f781..7d57ef3 100644
--- a/net/Kconfig
+++ b/net/Kconfig
@@ -55,6 +55,7 @@
source "net/packet/Kconfig"
source "net/unix/Kconfig"
+source "net/tls/Kconfig"
source "net/xfrm/Kconfig"
source "net/iucv/Kconfig"
source "net/smc/Kconfig"
diff --git a/net/Makefile b/net/Makefile
index 9086ffb..bed80fa 100644
--- a/net/Makefile
+++ b/net/Makefile
@@ -15,6 +15,7 @@
obj-$(CONFIG_NET) += ethernet/ 802/ sched/ netlink/ bpf/
obj-$(CONFIG_NETFILTER) += netfilter/
obj-$(CONFIG_INET) += ipv4/
+obj-$(CONFIG_TLS) += tls/
obj-$(CONFIG_XFRM) += xfrm/
obj-$(CONFIG_UNIX) += unix/
obj-$(CONFIG_NET) += ipv6/
diff --git a/net/appletalk/ddp.c b/net/appletalk/ddp.c
index 465cc24..5d035c1 100644
--- a/net/appletalk/ddp.c
+++ b/net/appletalk/ddp.c
@@ -1529,7 +1529,7 @@
* The push leaves us with a ddephdr not an shdr, and
* handily the port bytes in the right place preset.
*/
- ddp = (struct ddpehdr *) skb_push(skb, sizeof(*ddp) - 4);
+ ddp = skb_push(skb, sizeof(*ddp) - 4);
/* Now fill in the long header */
@@ -1647,7 +1647,7 @@
SOCK_DEBUG(sk, "SK %p: Begin build.\n", sk);
- ddp = (struct ddpehdr *)skb_put(skb, sizeof(struct ddpehdr));
+ ddp = skb_put(skb, sizeof(struct ddpehdr));
ddp->deh_len_hops = htons(len + sizeof(*ddp));
ddp->deh_dnet = usat->sat_addr.s_net;
ddp->deh_snet = at->src_net;
diff --git a/net/atm/clip.c b/net/atm/clip.c
index ec527b6..a7e4018 100644
--- a/net/atm/clip.c
+++ b/net/atm/clip.c
@@ -60,7 +60,7 @@
skb = alloc_skb(sizeof(struct atmarp_ctrl), GFP_ATOMIC);
if (!skb)
return -ENOMEM;
- ctrl = (struct atmarp_ctrl *)skb_put(skb, sizeof(struct atmarp_ctrl));
+ ctrl = skb_put(skb, sizeof(struct atmarp_ctrl));
ctrl->type = type;
ctrl->itf_num = itf;
ctrl->ip = ip;
diff --git a/net/atm/signaling.c b/net/atm/signaling.c
index adb6e3d..f640a99 100644
--- a/net/atm/signaling.c
+++ b/net/atm/signaling.c
@@ -150,8 +150,7 @@
pr_debug("%d (0x%p)\n", (int)type, vcc);
while (!(skb = alloc_skb(sizeof(struct atmsvc_msg), GFP_KERNEL)))
schedule();
- msg = (struct atmsvc_msg *)skb_put(skb, sizeof(struct atmsvc_msg));
- memset(msg, 0, sizeof(*msg));
+ msg = skb_put_zero(skb, sizeof(struct atmsvc_msg));
msg->type = type;
*(struct atm_vcc **) &msg->vcc = vcc;
*(struct atm_vcc **) &msg->listen_vcc = listen_vcc;
diff --git a/net/ax25/af_ax25.c b/net/ax25/af_ax25.c
index b7c4867..0c92ba0 100644
--- a/net/ax25/af_ax25.c
+++ b/net/ax25/af_ax25.c
@@ -1562,7 +1562,7 @@
/* Add the PID if one is not supplied by the user in the skb */
if (!ax25->pidincl)
- *skb_push(skb, 1) = sk->sk_protocol;
+ *(u8 *)skb_push(skb, 1) = sk->sk_protocol;
if (sk->sk_type == SOCK_SEQPACKET) {
/* Connected mode sockets go via the LAPB machine */
diff --git a/net/batman-adv/bat_iv_ogm.c b/net/batman-adv/bat_iv_ogm.c
index 495ba7c..a350117 100644
--- a/net/batman-adv/bat_iv_ogm.c
+++ b/net/batman-adv/bat_iv_ogm.c
@@ -732,8 +732,8 @@
unsigned char *skb_buff;
unsigned long new_direct_link_flag;
- skb_buff = skb_put(forw_packet_aggr->skb, packet_len);
- memcpy(skb_buff, packet_buff, packet_len);
+ skb_buff = skb_put_data(forw_packet_aggr->skb, packet_buff,
+ packet_len);
forw_packet_aggr->packet_len += packet_len;
forw_packet_aggr->num_packets++;
@@ -1022,7 +1022,8 @@
u8 tq_avg;
batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
- "update_originator(): Searching and updating originator entry of received packet\n");
+ "%s(): Searching and updating originator entry of received packet\n",
+ __func__);
rcu_read_lock();
hlist_for_each_entry_rcu(tmp_neigh_node,
@@ -1944,7 +1945,7 @@
batadv_iv_ogm_orig_print_neigh(orig_node, if_outgoing,
seq);
- seq_puts(seq, "\n");
+ seq_putc(seq, '\n');
batman_count++;
next:
diff --git a/net/batman-adv/bat_v.c b/net/batman-adv/bat_v.c
index a36c8e7..4e2724c 100644
--- a/net/batman-adv/bat_v.c
+++ b/net/batman-adv/bat_v.c
@@ -400,7 +400,7 @@
neigh_node->if_incoming->net_dev->name);
batadv_v_orig_print_neigh(orig_node, if_outgoing, seq);
- seq_puts(seq, "\n");
+ seq_putc(seq, '\n');
batman_count++;
next:
diff --git a/net/batman-adv/bat_v_elp.c b/net/batman-adv/bat_v_elp.c
index b90c990..bd1064d 100644
--- a/net/batman-adv/bat_v_elp.c
+++ b/net/batman-adv/bat_v_elp.c
@@ -19,6 +19,7 @@
#include "main.h"
#include <linux/atomic.h>
+#include <linux/bitops.h>
#include <linux/byteorder/generic.h>
#include <linux/errno.h>
#include <linux/etherdevice.h>
@@ -29,6 +30,7 @@
#include <linux/kernel.h>
#include <linux/kref.h>
#include <linux/netdevice.h>
+#include <linux/nl80211.h>
#include <linux/random.h>
#include <linux/rculist.h>
#include <linux/rcupdate.h>
@@ -109,8 +111,12 @@
*/
return 0;
}
- if (!ret)
- return sinfo.expected_throughput / 100;
+ if (ret)
+ goto default_throughput;
+ if (!(sinfo.filled & BIT(NL80211_STA_INFO_EXPECTED_THROUGHPUT)))
+ goto default_throughput;
+
+ return sinfo.expected_throughput / 100;
}
/* if not a wifi interface, check if this device provides data via
@@ -340,9 +346,8 @@
goto out;
skb_reserve(hard_iface->bat_v.elp_skb, ETH_HLEN + NET_IP_ALIGN);
- elp_buff = skb_put(hard_iface->bat_v.elp_skb, BATADV_ELP_HLEN);
+ elp_buff = skb_put_zero(hard_iface->bat_v.elp_skb, BATADV_ELP_HLEN);
elp_packet = (struct batadv_elp_packet *)elp_buff;
- memset(elp_packet, 0, BATADV_ELP_HLEN);
elp_packet->packet_type = BATADV_ELP;
elp_packet->version = BATADV_COMPAT_VERSION;
diff --git a/net/batman-adv/bat_v_ogm.c b/net/batman-adv/bat_v_ogm.c
index 03a35c9..1e3dc37 100644
--- a/net/batman-adv/bat_v_ogm.c
+++ b/net/batman-adv/bat_v_ogm.c
@@ -166,8 +166,7 @@
goto reschedule;
skb_reserve(skb, ETH_HLEN);
- pkt_buff = skb_put(skb, ogm_buff_len);
- memcpy(pkt_buff, ogm_buff, ogm_buff_len);
+ pkt_buff = skb_put_data(skb, ogm_buff, ogm_buff_len);
ogm_packet = (struct batadv_ogm2_packet *)skb->data;
ogm_packet->seqno = htonl(atomic_read(&bat_priv->bat_v.ogm_seqno));
@@ -382,8 +381,7 @@
goto out;
skb_reserve(skb, ETH_HLEN);
- skb_buff = skb_put(skb, packet_len);
- memcpy(skb_buff, ogm_received, packet_len);
+ skb_buff = skb_put_data(skb, ogm_received, packet_len);
/* apply forward penalty */
ogm_forward = (struct batadv_ogm2_packet *)skb_buff;
diff --git a/net/batman-adv/bridge_loop_avoidance.c b/net/batman-adv/bridge_loop_avoidance.c
index d07e89e..cdd8e8e 100644
--- a/net/batman-adv/bridge_loop_avoidance.c
+++ b/net/batman-adv/bridge_loop_avoidance.c
@@ -394,7 +394,7 @@
*/
ether_addr_copy(ethhdr->h_source, mac);
batadv_dbg(BATADV_DBG_BLA, bat_priv,
- "bla_send_claim(): CLAIM %pM on vid %d\n", mac,
+ "%s(): CLAIM %pM on vid %d\n", __func__, mac,
batadv_print_vid(vid));
break;
case BATADV_CLAIM_TYPE_UNCLAIM:
@@ -403,7 +403,7 @@
*/
ether_addr_copy(hw_src, mac);
batadv_dbg(BATADV_DBG_BLA, bat_priv,
- "bla_send_claim(): UNCLAIM %pM on vid %d\n", mac,
+ "%s(): UNCLAIM %pM on vid %d\n", __func__, mac,
batadv_print_vid(vid));
break;
case BATADV_CLAIM_TYPE_ANNOUNCE:
@@ -412,7 +412,7 @@
*/
ether_addr_copy(hw_src, mac);
batadv_dbg(BATADV_DBG_BLA, bat_priv,
- "bla_send_claim(): ANNOUNCE of %pM on vid %d\n",
+ "%s(): ANNOUNCE of %pM on vid %d\n", __func__,
ethhdr->h_source, batadv_print_vid(vid));
break;
case BATADV_CLAIM_TYPE_REQUEST:
@@ -423,15 +423,15 @@
ether_addr_copy(hw_src, mac);
ether_addr_copy(ethhdr->h_dest, mac);
batadv_dbg(BATADV_DBG_BLA, bat_priv,
- "bla_send_claim(): REQUEST of %pM to %pM on vid %d\n",
+ "%s(): REQUEST of %pM to %pM on vid %d\n", __func__,
ethhdr->h_source, ethhdr->h_dest,
batadv_print_vid(vid));
break;
case BATADV_CLAIM_TYPE_LOOPDETECT:
ether_addr_copy(ethhdr->h_source, mac);
batadv_dbg(BATADV_DBG_BLA, bat_priv,
- "bla_send_claim(): LOOPDETECT of %pM to %pM on vid %d\n",
- ethhdr->h_source, ethhdr->h_dest,
+ "%s(): LOOPDETECT of %pM to %pM on vid %d\n",
+ __func__, ethhdr->h_source, ethhdr->h_dest,
batadv_print_vid(vid));
break;
@@ -509,7 +509,7 @@
return entry;
batadv_dbg(BATADV_DBG_BLA, bat_priv,
- "bla_get_backbone_gw(): not found (%pM, %d), creating new entry\n",
+ "%s(): not found (%pM, %d), creating new entry\n", __func__,
orig, batadv_print_vid(vid));
entry = kzalloc(sizeof(*entry), GFP_ATOMIC);
@@ -605,7 +605,8 @@
int i;
batadv_dbg(BATADV_DBG_BLA, bat_priv,
- "bla_answer_request(): received a claim request, send all of our own claims again\n");
+ "%s(): received a claim request, send all of our own claims again\n",
+ __func__);
backbone_gw = batadv_backbone_hash_find(bat_priv,
primary_if->net_dev->dev_addr,
@@ -718,8 +719,8 @@
kref_init(&claim->refcount);
batadv_dbg(BATADV_DBG_BLA, bat_priv,
- "bla_add_claim(): adding new entry %pM, vid %d to hash ...\n",
- mac, batadv_print_vid(vid));
+ "%s(): adding new entry %pM, vid %d to hash ...\n",
+ __func__, mac, batadv_print_vid(vid));
kref_get(&claim->refcount);
hash_added = batadv_hash_add(bat_priv->bla.claim_hash,
@@ -739,8 +740,9 @@
goto claim_free_ref;
batadv_dbg(BATADV_DBG_BLA, bat_priv,
- "bla_add_claim(): changing ownership for %pM, vid %d to gw %pM\n",
- mac, batadv_print_vid(vid), backbone_gw->orig);
+ "%s(): changing ownership for %pM, vid %d to gw %pM\n",
+ __func__, mac, batadv_print_vid(vid),
+ backbone_gw->orig);
remove_crc = true;
}
@@ -808,7 +810,7 @@
if (!claim)
return;
- batadv_dbg(BATADV_DBG_BLA, bat_priv, "bla_del_claim(): %pM, vid %d\n",
+ batadv_dbg(BATADV_DBG_BLA, bat_priv, "%s(): %pM, vid %d\n", __func__,
mac, batadv_print_vid(vid));
batadv_hash_remove(bat_priv->bla.claim_hash, batadv_compare_claim,
@@ -848,8 +850,8 @@
crc = ntohs(*((__be16 *)(&an_addr[4])));
batadv_dbg(BATADV_DBG_BLA, bat_priv,
- "handle_announce(): ANNOUNCE vid %d (sent by %pM)... CRC = %#.4x\n",
- batadv_print_vid(vid), backbone_gw->orig, crc);
+ "%s(): ANNOUNCE vid %d (sent by %pM)... CRC = %#.4x\n",
+ __func__, batadv_print_vid(vid), backbone_gw->orig, crc);
spin_lock_bh(&backbone_gw->crc_lock);
backbone_crc = backbone_gw->crc;
@@ -857,8 +859,8 @@
if (backbone_crc != crc) {
batadv_dbg(BATADV_DBG_BLA, backbone_gw->bat_priv,
- "handle_announce(): CRC FAILED for %pM/%d (my = %#.4x, sent = %#.4x)\n",
- backbone_gw->orig,
+ "%s(): CRC FAILED for %pM/%d (my = %#.4x, sent = %#.4x)\n",
+ __func__, backbone_gw->orig,
batadv_print_vid(backbone_gw->vid),
backbone_crc, crc);
@@ -903,8 +905,8 @@
return true;
batadv_dbg(BATADV_DBG_BLA, bat_priv,
- "handle_request(): REQUEST vid %d (sent by %pM)...\n",
- batadv_print_vid(vid), ethhdr->h_source);
+ "%s(): REQUEST vid %d (sent by %pM)...\n",
+ __func__, batadv_print_vid(vid), ethhdr->h_source);
batadv_bla_answer_request(bat_priv, primary_if, vid);
return true;
@@ -940,7 +942,7 @@
/* this must be an UNCLAIM frame */
batadv_dbg(BATADV_DBG_BLA, bat_priv,
- "handle_unclaim(): UNCLAIM %pM on vid %d (sent by %pM)...\n",
+ "%s(): UNCLAIM %pM on vid %d (sent by %pM)...\n", __func__,
claim_addr, batadv_print_vid(vid), backbone_gw->orig);
batadv_bla_del_claim(bat_priv, claim_addr, vid);
@@ -1160,9 +1162,9 @@
ethhdr);
if (ret == 1)
batadv_dbg(BATADV_DBG_BLA, bat_priv,
- "bla_process_claim(): received a claim frame from another group. From: %pM on vid %d ...(hw_src %pM, hw_dst %pM)\n",
- ethhdr->h_source, batadv_print_vid(vid), hw_src,
- hw_dst);
+ "%s(): received a claim frame from another group. From: %pM on vid %d ...(hw_src %pM, hw_dst %pM)\n",
+ __func__, ethhdr->h_source, batadv_print_vid(vid),
+ hw_src, hw_dst);
if (ret < 2)
return !!ret;
@@ -1196,8 +1198,9 @@
}
batadv_dbg(BATADV_DBG_BLA, bat_priv,
- "bla_process_claim(): ERROR - this looks like a claim frame, but is useless. eth src %pM on vid %d ...(hw_src %pM, hw_dst %pM)\n",
- ethhdr->h_source, batadv_print_vid(vid), hw_src, hw_dst);
+ "%s(): ERROR - this looks like a claim frame, but is useless. eth src %pM on vid %d ...(hw_src %pM, hw_dst %pM)\n",
+ __func__, ethhdr->h_source, batadv_print_vid(vid), hw_src,
+ hw_dst);
return true;
}
@@ -1237,8 +1240,8 @@
continue;
batadv_dbg(BATADV_DBG_BLA, backbone_gw->bat_priv,
- "bla_purge_backbone_gw(): backbone gw %pM timed out\n",
- backbone_gw->orig);
+ "%s(): backbone gw %pM timed out\n",
+ __func__, backbone_gw->orig);
purge_now:
/* don't wait for the pending request anymore */
@@ -1295,11 +1298,11 @@
goto skip;
batadv_dbg(BATADV_DBG_BLA, bat_priv,
- "bla_purge_claims(): timed out.\n");
+ "%s(): timed out.\n", __func__);
purge_now:
batadv_dbg(BATADV_DBG_BLA, bat_priv,
- "bla_purge_claims(): %pM, vid %d\n",
+ "%s(): %pM, vid %d\n", __func__,
claim->addr, claim->vid);
batadv_handle_unclaim(bat_priv, primary_if,
@@ -1851,8 +1854,8 @@
*/
batadv_dbg(BATADV_DBG_BLA, bat_priv,
- "bla_rx(): Unclaimed MAC %pM found. Claim it. Local: %s\n",
- ethhdr->h_source,
+ "%s(): Unclaimed MAC %pM found. Claim it. Local: %s\n",
+ __func__, ethhdr->h_source,
batadv_is_my_client(bat_priv,
ethhdr->h_source, vid) ?
"yes" : "no");
@@ -1978,15 +1981,15 @@
* older than 100 ms to make sure we really
* have a roaming client here.
*/
- batadv_dbg(BATADV_DBG_BLA, bat_priv, "bla_tx(): Roaming client %pM detected. Unclaim it.\n",
- ethhdr->h_source);
+ batadv_dbg(BATADV_DBG_BLA, bat_priv, "%s(): Roaming client %pM detected. Unclaim it.\n",
+ __func__, ethhdr->h_source);
batadv_handle_unclaim(bat_priv, primary_if,
primary_if->net_dev->dev_addr,
ethhdr->h_source, vid);
goto allow;
} else {
- batadv_dbg(BATADV_DBG_BLA, bat_priv, "bla_tx(): Race for claim %pM detected. Drop packet.\n",
- ethhdr->h_source);
+ batadv_dbg(BATADV_DBG_BLA, bat_priv, "%s(): Race for claim %pM detected. Drop packet.\n",
+ __func__, ethhdr->h_source);
goto handled;
}
}
diff --git a/net/batman-adv/distributed-arp-table.c b/net/batman-adv/distributed-arp-table.c
index 000ca2f..6930d6b 100644
--- a/net/batman-adv/distributed-arp-table.c
+++ b/net/batman-adv/distributed-arp-table.c
@@ -601,7 +601,7 @@
BATADV_DAT_ADDR_MAX);
batadv_dbg(BATADV_DBG_DAT, bat_priv,
- "dat_select_candidates(): IP=%pI4 hash(IP)=%u\n", &ip_dst,
+ "%s(): IP=%pI4 hash(IP)=%u\n", __func__, &ip_dst,
ip_key);
for (select = 0; select < BATADV_DAT_CANDIDATES_NUM; select++)
diff --git a/net/batman-adv/fragmentation.c b/net/batman-adv/fragmentation.c
index 8f964be..a98cf11 100644
--- a/net/batman-adv/fragmentation.c
+++ b/net/batman-adv/fragmentation.c
@@ -296,8 +296,7 @@
/* Copy the payload of the each fragment into the last skb */
hlist_for_each_entry(entry, chain, list) {
size = entry->skb->len - hdr_size;
- memcpy(skb_put(skb_out, size), entry->skb->data + hdr_size,
- size);
+ skb_put_data(skb_out, entry->skb->data + hdr_size, size);
}
free:
diff --git a/net/batman-adv/icmp_socket.c b/net/batman-adv/icmp_socket.c
index 6308c9f..8ead292 100644
--- a/net/batman-adv/icmp_socket.c
+++ b/net/batman-adv/icmp_socket.c
@@ -207,7 +207,7 @@
skb->priority = TC_PRIO_CONTROL;
skb_reserve(skb, ETH_HLEN);
- icmp_header = (struct batadv_icmp_header *)skb_put(skb, packet_len);
+ icmp_header = skb_put(skb, packet_len);
if (copy_from_user(icmp_header, buff, packet_len)) {
len = -EFAULT;
diff --git a/net/batman-adv/main.h b/net/batman-adv/main.h
index 810f7d0..2be8f1f 100644
--- a/net/batman-adv/main.h
+++ b/net/batman-adv/main.h
@@ -24,7 +24,7 @@
#define BATADV_DRIVER_DEVICE "batman-adv"
#ifndef BATADV_SOURCE_VERSION
-#define BATADV_SOURCE_VERSION "2017.1"
+#define BATADV_SOURCE_VERSION "2017.2"
#endif
/* B.A.T.M.A.N. parameters */
@@ -168,7 +168,7 @@
/* Maximum number of fragments for one packet */
#define BATADV_FRAG_MAX_FRAGMENTS 16
/* Maxumim size of each fragment */
-#define BATADV_FRAG_MAX_FRAG_SIZE 1400
+#define BATADV_FRAG_MAX_FRAG_SIZE 1280
/* Time to keep fragments while waiting for rest of the fragments */
#define BATADV_FRAG_TIMEOUT 10000
diff --git a/net/batman-adv/network-coding.c b/net/batman-adv/network-coding.c
index e1f6fc7..3604d78 100644
--- a/net/batman-adv/network-coding.c
+++ b/net/batman-adv/network-coding.c
@@ -1935,9 +1935,7 @@
list)
seq_printf(seq, "%pM ",
nc_node->addr);
- seq_puts(seq, "\n");
-
- seq_puts(seq, " Outgoing: ");
+ seq_puts(seq, "\n Outgoing: ");
/* For out_nc_node to this orig_node */
list_for_each_entry_rcu(nc_node,
&orig_node->out_coding_list,
diff --git a/net/batman-adv/routing.c b/net/batman-adv/routing.c
index ae9f4d3..f10e3ff 100644
--- a/net/batman-adv/routing.c
+++ b/net/batman-adv/routing.c
@@ -985,8 +985,8 @@
batadv_orig_node_put(orig_node_gw);
if (is_gw) {
batadv_dbg(BATADV_DBG_BLA, bat_priv,
- "recv_unicast_packet(): Dropped unicast pkt received from another backbone gw %pM.\n",
- orig_addr_gw);
+ "%s(): Dropped unicast pkt received from another backbone gw %pM.\n",
+ __func__, orig_addr_gw);
goto free_skb;
}
}
diff --git a/net/batman-adv/send.c b/net/batman-adv/send.c
index 403df59..d239a9d 100644
--- a/net/batman-adv/send.c
+++ b/net/batman-adv/send.c
@@ -971,11 +971,11 @@
if (hard_iface)
batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
- "purge_outstanding_packets(): %s\n",
- hard_iface->net_dev->name);
+ "%s(): %s\n",
+ __func__, hard_iface->net_dev->name);
else
batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
- "purge_outstanding_packets()\n");
+ "%s()\n", __func__);
/* claim bcast list for free() */
spin_lock_bh(&bat_priv->forw_bcast_list_lock);
diff --git a/net/batman-adv/tp_meter.c b/net/batman-adv/tp_meter.c
index 556f9a8..bfe8eff 100644
--- a/net/batman-adv/tp_meter.c
+++ b/net/batman-adv/tp_meter.c
@@ -27,6 +27,7 @@
#include <linux/etherdevice.h>
#include <linux/fs.h>
#include <linux/if_ether.h>
+#include <linux/init.h>
#include <linux/jiffies.h>
#include <linux/kernel.h>
#include <linux/kref.h>
@@ -594,7 +595,7 @@
return BATADV_TP_REASON_MEMORY_ERROR;
skb_reserve(skb, ETH_HLEN);
- icmp = (struct batadv_icmp_tp_packet *)skb_put(skb, sizeof(*icmp));
+ icmp = skb_put(skb, sizeof(*icmp));
/* fill the icmp header */
ether_addr_copy(icmp->dst, orig_node->orig);
@@ -611,7 +612,7 @@
icmp->timestamp = htonl(timestamp);
data_len = len - sizeof(*icmp);
- data = (u8 *)skb_put(skb, data_len);
+ data = skb_put(skb, data_len);
batadv_tp_fill_prerandom(tp_vars, data, data_len);
r = batadv_send_skb_to_orig(skb, orig_node, NULL);
@@ -1189,7 +1190,7 @@
}
skb_reserve(skb, ETH_HLEN);
- icmp = (struct batadv_icmp_tp_packet *)skb_put(skb, sizeof(*icmp));
+ icmp = skb_put(skb, sizeof(*icmp));
icmp->packet_type = BATADV_ICMP;
icmp->version = BATADV_COMPAT_VERSION;
icmp->ttl = BATADV_TTL;
@@ -1497,7 +1498,7 @@
/**
* batadv_tp_meter_init - initialize global tp_meter structures
*/
-void batadv_tp_meter_init(void)
+void __init batadv_tp_meter_init(void)
{
get_random_bytes(batadv_tp_prerandom, sizeof(batadv_tp_prerandom));
}
diff --git a/net/batman-adv/translation-table.c b/net/batman-adv/translation-table.c
index e75b493..e1133bc 100644
--- a/net/batman-adv/translation-table.c
+++ b/net/batman-adv/translation-table.c
@@ -2488,18 +2488,16 @@
_batadv_is_ap_isolated(struct batadv_tt_local_entry *tt_local_entry,
struct batadv_tt_global_entry *tt_global_entry)
{
- bool ret = false;
-
if (tt_local_entry->common.flags & BATADV_TT_CLIENT_WIFI &&
tt_global_entry->common.flags & BATADV_TT_CLIENT_WIFI)
- ret = true;
+ return true;
/* check if the two clients are marked as isolated */
if (tt_local_entry->common.flags & BATADV_TT_CLIENT_ISOLA &&
tt_global_entry->common.flags & BATADV_TT_CLIENT_ISOLA)
- ret = true;
+ return true;
- return ret;
+ return false;
}
/**
@@ -4010,19 +4008,22 @@
const unsigned char *addr,
unsigned short vid)
{
- bool ret = false;
+ /* ignore loop detect macs, they are not supposed to be in the tt local
+ * data as well.
+ */
+ if (batadv_bla_is_loopdetect_mac(addr))
+ return false;
if (!batadv_tt_global_add(bat_priv, orig_node, addr, vid,
BATADV_TT_CLIENT_TEMP,
atomic_read(&orig_node->last_ttvn)))
- goto out;
+ return false;
batadv_dbg(BATADV_DBG_TT, bat_priv,
"Added temporary global client (addr: %pM, vid: %d, orig: %pM)\n",
addr, batadv_print_vid(vid), orig_node->orig);
- ret = true;
-out:
- return ret;
+
+ return true;
}
/**
diff --git a/net/bluetooth/a2mp.c b/net/bluetooth/a2mp.c
index f0095fd..aad994e 100644
--- a/net/bluetooth/a2mp.c
+++ b/net/bluetooth/a2mp.c
@@ -239,7 +239,7 @@
}
len -= sizeof(*cl);
- cl = (void *) skb_pull(skb, sizeof(*cl));
+ cl = skb_pull(skb, sizeof(*cl));
}
/* Fall back to L2CAP init sequence */
@@ -279,7 +279,7 @@
while (skb->len >= sizeof(*cl)) {
BT_DBG("Controller id %d type %d status %d", cl->id, cl->type,
cl->status);
- cl = (struct a2mp_cl *) skb_pull(skb, sizeof(*cl));
+ cl = skb_pull(skb, sizeof(*cl));
}
/* TODO send A2MP_CHANGE_RSP */
diff --git a/net/bluetooth/af_bluetooth.c b/net/bluetooth/af_bluetooth.c
index 42d0997..8a8f77a 100644
--- a/net/bluetooth/af_bluetooth.c
+++ b/net/bluetooth/af_bluetooth.c
@@ -733,7 +733,7 @@
EXPORT_SYMBOL(bt_procfs_init);
EXPORT_SYMBOL(bt_procfs_cleanup);
-static struct net_proto_family bt_sock_family_ops = {
+static const struct net_proto_family bt_sock_family_ops = {
.owner = THIS_MODULE,
.family = PF_BLUETOOTH,
.create = bt_sock_create,
diff --git a/net/bluetooth/bnep/core.c b/net/bluetooth/bnep/core.c
index fbf251f..9a40013 100644
--- a/net/bluetooth/bnep/core.c
+++ b/net/bluetooth/bnep/core.c
@@ -374,25 +374,22 @@
/* Decompress header and construct ether frame */
switch (type & BNEP_TYPE_MASK) {
case BNEP_COMPRESSED:
- memcpy(__skb_put(nskb, ETH_HLEN), &s->eh, ETH_HLEN);
+ __skb_put_data(nskb, &s->eh, ETH_HLEN);
break;
case BNEP_COMPRESSED_SRC_ONLY:
- memcpy(__skb_put(nskb, ETH_ALEN), s->eh.h_dest, ETH_ALEN);
- memcpy(__skb_put(nskb, ETH_ALEN), skb_mac_header(skb), ETH_ALEN);
+ __skb_put_data(nskb, s->eh.h_dest, ETH_ALEN);
+ __skb_put_data(nskb, skb_mac_header(skb), ETH_ALEN);
put_unaligned(s->eh.h_proto, (__be16 *) __skb_put(nskb, 2));
break;
case BNEP_COMPRESSED_DST_ONLY:
- memcpy(__skb_put(nskb, ETH_ALEN), skb_mac_header(skb),
- ETH_ALEN);
- memcpy(__skb_put(nskb, ETH_ALEN + 2), s->eh.h_source,
- ETH_ALEN + 2);
+ __skb_put_data(nskb, skb_mac_header(skb), ETH_ALEN);
+ __skb_put_data(nskb, s->eh.h_source, ETH_ALEN + 2);
break;
case BNEP_GENERAL:
- memcpy(__skb_put(nskb, ETH_ALEN * 2), skb_mac_header(skb),
- ETH_ALEN * 2);
+ __skb_put_data(nskb, skb_mac_header(skb), ETH_ALEN * 2);
put_unaligned(s->eh.h_proto, (__be16 *) __skb_put(nskb, 2));
break;
}
diff --git a/net/bluetooth/bnep/netdev.c b/net/bluetooth/bnep/netdev.c
index 2b875ed..1d4d7d4 100644
--- a/net/bluetooth/bnep/netdev.c
+++ b/net/bluetooth/bnep/netdev.c
@@ -75,16 +75,16 @@
u8 start[ETH_ALEN] = { 0x01 };
/* Request all addresses */
- memcpy(__skb_put(skb, ETH_ALEN), start, ETH_ALEN);
- memcpy(__skb_put(skb, ETH_ALEN), dev->broadcast, ETH_ALEN);
+ __skb_put_data(skb, start, ETH_ALEN);
+ __skb_put_data(skb, dev->broadcast, ETH_ALEN);
r->len = htons(ETH_ALEN * 2);
} else {
struct netdev_hw_addr *ha;
int i, len = skb->len;
if (dev->flags & IFF_BROADCAST) {
- memcpy(__skb_put(skb, ETH_ALEN), dev->broadcast, ETH_ALEN);
- memcpy(__skb_put(skb, ETH_ALEN), dev->broadcast, ETH_ALEN);
+ __skb_put_data(skb, dev->broadcast, ETH_ALEN);
+ __skb_put_data(skb, dev->broadcast, ETH_ALEN);
}
/* FIXME: We should group addresses here. */
@@ -93,8 +93,8 @@
netdev_for_each_mc_addr(ha, dev) {
if (i == BNEP_MAX_MULTICAST_FILTERS)
break;
- memcpy(__skb_put(skb, ETH_ALEN), ha->addr, ETH_ALEN);
- memcpy(__skb_put(skb, ETH_ALEN), ha->addr, ETH_ALEN);
+ __skb_put_data(skb, ha->addr, ETH_ALEN);
+ __skb_put_data(skb, ha->addr, ETH_ALEN);
i++;
}
diff --git a/net/bluetooth/cmtp/core.c b/net/bluetooth/cmtp/core.c
index 9e59b66..f4c64ef 100644
--- a/net/bluetooth/cmtp/core.c
+++ b/net/bluetooth/cmtp/core.c
@@ -122,7 +122,7 @@
if (skb && (skb->len > 0))
skb_copy_from_linear_data(skb, skb_put(nskb, skb->len), skb->len);
- memcpy(skb_put(nskb, count), buf, count);
+ skb_put_data(nskb, buf, count);
session->reassembly[id] = nskb;
diff --git a/net/bluetooth/ecdh_helper.c b/net/bluetooth/ecdh_helper.c
index 24d4e60..c7b1a9a 100644
--- a/net/bluetooth/ecdh_helper.c
+++ b/net/bluetooth/ecdh_helper.c
@@ -89,11 +89,9 @@
p.curve_id = ECC_CURVE_NIST_P256;
buf_len = crypto_ecdh_key_len(&p);
buf = kmalloc(buf_len, GFP_KERNEL);
- if (!buf) {
- pr_err("alg: kpp: Failed to allocate %d bytes for buf\n",
- buf_len);
+ if (!buf)
goto free_req;
- }
+
crypto_ecdh_encode_key(buf, buf_len, &p);
/* Set A private Key */
@@ -170,11 +168,8 @@
p.key_size = 32;
buf_len = crypto_ecdh_key_len(&p);
buf = kmalloc(buf_len, GFP_KERNEL);
- if (!buf) {
- pr_err("alg: kpp: Failed to allocate %d bytes for buf\n",
- buf_len);
+ if (!buf)
goto free_req;
- }
do {
if (tries++ >= max_tries)
diff --git a/net/bluetooth/hci_core.c b/net/bluetooth/hci_core.c
index 0568677..d860e3c 100644
--- a/net/bluetooth/hci_core.c
+++ b/net/bluetooth/hci_core.c
@@ -148,13 +148,13 @@
return -EINVAL;
/* When the diagnostic flags are not persistent and the transport
- * is not active, then there is no need for the vendor callback.
- *
- * Instead just store the desired value. If needed the setting
- * will be programmed when the controller gets powered on.
+ * is not active or in user channel operation, then there is no need
+ * for the vendor callback. Instead just store the desired value and
+ * the setting will be programmed when the controller gets powered on.
*/
if (test_bit(HCI_QUIRK_NON_PERSISTENT_DIAG, &hdev->quirks) &&
- !test_bit(HCI_RUNNING, &hdev->flags))
+ (!test_bit(HCI_RUNNING, &hdev->flags) ||
+ hci_dev_test_flag(hdev, HCI_USER_CHANNEL)))
goto done;
hci_req_sync_lock(hdev);
@@ -548,6 +548,7 @@
{
struct hci_dev *hdev = req->hdev;
u8 events[8] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
+ bool changed = false;
/* If Connectionless Slave Broadcast master role is supported
* enable all necessary events for it.
@@ -557,6 +558,7 @@
events[1] |= 0x80; /* Synchronization Train Complete */
events[2] |= 0x10; /* Slave Page Response Timeout */
events[2] |= 0x20; /* CSB Channel Map Change */
+ changed = true;
}
/* If Connectionless Slave Broadcast slave role is supported
@@ -567,13 +569,24 @@
events[2] |= 0x02; /* CSB Receive */
events[2] |= 0x04; /* CSB Timeout */
events[2] |= 0x08; /* Truncated Page Complete */
+ changed = true;
}
/* Enable Authenticated Payload Timeout Expired event if supported */
- if (lmp_ping_capable(hdev) || hdev->le_features[0] & HCI_LE_PING)
+ if (lmp_ping_capable(hdev) || hdev->le_features[0] & HCI_LE_PING) {
events[2] |= 0x80;
+ changed = true;
+ }
- hci_req_add(req, HCI_OP_SET_EVENT_MASK_PAGE_2, sizeof(events), events);
+ /* Some Broadcom based controllers indicate support for Set Event
+ * Mask Page 2 command, but then actually do not support it. Since
+ * the default value is all bits set to zero, the command is only
+ * required if the event mask has to be changed. In case no change
+ * to the event mask is needed, skip this command.
+ */
+ if (changed)
+ hci_req_add(req, HCI_OP_SET_EVENT_MASK_PAGE_2,
+ sizeof(events), events);
}
static int hci_init3_req(struct hci_request *req, unsigned long opt)
@@ -635,6 +648,14 @@
* Report
*/
+ /* If the controller supports Channel Selection Algorithm #2
+ * feature, enable the corresponding event.
+ */
+ if (hdev->le_features[1] & HCI_LE_CHAN_SEL_ALG2)
+ events[2] |= 0x08; /* LE Channel Selection
+ * Algorithm
+ */
+
/* If the controller supports the LE Set Scan Enable command,
* enable the corresponding advertising report event.
*/
@@ -677,6 +698,12 @@
if (hdev->commands[34] & 0x04)
events[1] |= 0x01; /* LE Generate DHKey Complete */
+ /* If the controller supports the LE Set Default PHY or
+ * LE Set PHY commands, enable the corresponding event.
+ */
+ if (hdev->commands[35] & (0x20 | 0x40))
+ events[1] |= 0x08; /* LE PHY Update Complete */
+
hci_req_add(req, HCI_OP_LE_SET_EVENT_MASK, sizeof(events),
events);
@@ -771,6 +798,27 @@
sizeof(support), &support);
}
+ /* Set Suggested Default Data Length to maximum if supported */
+ if (hdev->le_features[0] & HCI_LE_DATA_LEN_EXT) {
+ struct hci_cp_le_write_def_data_len cp;
+
+ cp.tx_len = hdev->le_max_tx_len;
+ cp.tx_time = hdev->le_max_tx_time;
+ hci_req_add(req, HCI_OP_LE_WRITE_DEF_DATA_LEN, sizeof(cp), &cp);
+ }
+
+ /* Set Default PHY parameters if command is supported */
+ if (hdev->commands[35] & 0x20) {
+ struct hci_cp_le_set_default_phy cp;
+
+ /* No transmitter PHY or receiver PHY preferences */
+ cp.all_phys = 0x03;
+ cp.tx_phys = 0;
+ cp.rx_phys = 0;
+
+ hci_req_add(req, HCI_OP_LE_SET_DEFAULT_PHY, sizeof(cp), &cp);
+ }
+
return 0;
}
@@ -1384,6 +1432,7 @@
* completed.
*/
if (test_bit(HCI_QUIRK_NON_PERSISTENT_DIAG, &hdev->quirks) &&
+ !hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
hci_dev_test_flag(hdev, HCI_VENDOR_DIAG) && hdev->set_diag)
ret = hdev->set_diag(hdev, true);
@@ -3217,7 +3266,7 @@
return -ENOMEM;
hci_skb_pkt_type(skb) = HCI_EVENT_PKT;
- memcpy(skb_put(skb, 3), hw_err, 3);
+ skb_put_data(skb, hw_err, 3);
/* Send Hardware Error to upper stack */
return hci_recv_frame(hdev, skb);
diff --git a/net/bluetooth/hci_request.c b/net/bluetooth/hci_request.c
index b5faff4..b73ac14 100644
--- a/net/bluetooth/hci_request.c
+++ b/net/bluetooth/hci_request.c
@@ -299,12 +299,12 @@
if (!skb)
return NULL;
- hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
+ hdr = skb_put(skb, HCI_COMMAND_HDR_SIZE);
hdr->opcode = cpu_to_le16(opcode);
hdr->plen = plen;
if (plen)
- memcpy(skb_put(skb, plen), param, plen);
+ skb_put_data(skb, param, plen);
BT_DBG("skb len %d", skb->len);
diff --git a/net/bluetooth/hci_sock.c b/net/bluetooth/hci_sock.c
index 638bf0e..65d734c 100644
--- a/net/bluetooth/hci_sock.c
+++ b/net/bluetooth/hci_sock.c
@@ -332,7 +332,7 @@
return;
/* Put header before the data */
- hdr = (void *)skb_push(skb_copy, HCI_MON_HDR_SIZE);
+ hdr = skb_push(skb_copy, HCI_MON_HDR_SIZE);
hdr->opcode = opcode;
hdr->index = cpu_to_le16(hdev->id);
hdr->len = cpu_to_le16(skb->len);
@@ -379,11 +379,11 @@
put_unaligned_le16(event, skb_put(skb, 2));
if (data)
- memcpy(skb_put(skb, data_len), data, data_len);
+ skb_put_data(skb, data, data_len);
skb->tstamp = tstamp;
- hdr = (void *)skb_push(skb, HCI_MON_HDR_SIZE);
+ hdr = skb_push(skb, HCI_MON_HDR_SIZE);
hdr->opcode = cpu_to_le16(HCI_MON_CTRL_EVENT);
hdr->index = index;
hdr->len = cpu_to_le16(skb->len - HCI_MON_HDR_SIZE);
@@ -410,7 +410,7 @@
if (!skb)
return NULL;
- ni = (void *)skb_put(skb, HCI_MON_NEW_INDEX_SIZE);
+ ni = skb_put(skb, HCI_MON_NEW_INDEX_SIZE);
ni->type = hdev->dev_type;
ni->bus = hdev->bus;
bacpy(&ni->bdaddr, &hdev->bdaddr);
@@ -438,7 +438,7 @@
if (!skb)
return NULL;
- ii = (void *)skb_put(skb, HCI_MON_INDEX_INFO_SIZE);
+ ii = skb_put(skb, HCI_MON_INDEX_INFO_SIZE);
bacpy(&ii->bdaddr, &hdev->bdaddr);
ii->manufacturer = cpu_to_le16(hdev->manufacturer);
@@ -467,7 +467,7 @@
__net_timestamp(skb);
- hdr = (void *)skb_push(skb, HCI_MON_HDR_SIZE);
+ hdr = skb_push(skb, HCI_MON_HDR_SIZE);
hdr->opcode = opcode;
hdr->index = cpu_to_le16(hdev->id);
hdr->len = cpu_to_le16(skb->len - HCI_MON_HDR_SIZE);
@@ -515,14 +515,14 @@
put_unaligned_le32(hci_pi(sk)->cookie, skb_put(skb, 4));
put_unaligned_le16(format, skb_put(skb, 2));
- memcpy(skb_put(skb, sizeof(ver)), ver, sizeof(ver));
+ skb_put_data(skb, ver, sizeof(ver));
put_unaligned_le32(flags, skb_put(skb, 4));
- *skb_put(skb, 1) = TASK_COMM_LEN;
- memcpy(skb_put(skb, TASK_COMM_LEN), hci_pi(sk)->comm, TASK_COMM_LEN);
+ skb_put_u8(skb, TASK_COMM_LEN);
+ skb_put_data(skb, hci_pi(sk)->comm, TASK_COMM_LEN);
__net_timestamp(skb);
- hdr = (void *)skb_push(skb, HCI_MON_HDR_SIZE);
+ hdr = skb_push(skb, HCI_MON_HDR_SIZE);
hdr->opcode = cpu_to_le16(HCI_MON_CTRL_OPEN);
if (hci_pi(sk)->hdev)
hdr->index = cpu_to_le16(hci_pi(sk)->hdev->id);
@@ -560,7 +560,7 @@
__net_timestamp(skb);
- hdr = (void *)skb_push(skb, HCI_MON_HDR_SIZE);
+ hdr = skb_push(skb, HCI_MON_HDR_SIZE);
hdr->opcode = cpu_to_le16(HCI_MON_CTRL_CLOSE);
if (hci_pi(sk)->hdev)
hdr->index = cpu_to_le16(hci_pi(sk)->hdev->id);
@@ -586,11 +586,11 @@
put_unaligned_le16(opcode, skb_put(skb, 2));
if (buf)
- memcpy(skb_put(skb, len), buf, len);
+ skb_put_data(skb, buf, len);
__net_timestamp(skb);
- hdr = (void *)skb_push(skb, HCI_MON_HDR_SIZE);
+ hdr = skb_push(skb, HCI_MON_HDR_SIZE);
hdr->opcode = cpu_to_le16(HCI_MON_CTRL_COMMAND);
hdr->index = cpu_to_le16(index);
hdr->len = cpu_to_le16(skb->len - HCI_MON_HDR_SIZE);
@@ -616,7 +616,7 @@
va_start(args, fmt);
vsprintf(skb_put(skb, len), fmt, args);
- *skb_put(skb, 1) = 0;
+ *(u8 *)skb_put(skb, 1) = 0;
va_end(args);
__net_timestamp(skb);
@@ -703,11 +703,11 @@
if (!skb)
return;
- hdr = (void *)skb_put(skb, HCI_EVENT_HDR_SIZE);
+ hdr = skb_put(skb, HCI_EVENT_HDR_SIZE);
hdr->evt = HCI_EV_STACK_INTERNAL;
hdr->plen = sizeof(*ev) + dlen;
- ev = (void *)skb_put(skb, sizeof(*ev) + dlen);
+ ev = skb_put(skb, sizeof(*ev) + dlen);
ev->type = type;
memcpy(ev->data, data, dlen);
diff --git a/net/bluetooth/hidp/core.c b/net/bluetooth/hidp/core.c
index 0bec458..961f7f5 100644
--- a/net/bluetooth/hidp/core.c
+++ b/net/bluetooth/hidp/core.c
@@ -112,9 +112,9 @@
return -ENOMEM;
}
- *skb_put(skb, 1) = hdr;
+ skb_put_u8(skb, hdr);
if (data && size > 0)
- memcpy(skb_put(skb, size), data, size);
+ skb_put_data(skb, data, size);
skb_queue_tail(transmit, skb);
wake_up_interruptible(sk_sleep(sk));
diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c
index f88ac995..303c779 100644
--- a/net/bluetooth/l2cap_core.c
+++ b/net/bluetooth/l2cap_core.c
@@ -1048,7 +1048,7 @@
if (!skb)
return ERR_PTR(-ENOMEM);
- lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
+ lh = skb_put(skb, L2CAP_HDR_SIZE);
lh->len = cpu_to_le16(hlen - L2CAP_HDR_SIZE);
lh->cid = cpu_to_le16(chan->dcid);
@@ -2182,7 +2182,7 @@
return skb;
/* Create L2CAP header */
- lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
+ lh = skb_put(skb, L2CAP_HDR_SIZE);
lh->cid = cpu_to_le16(chan->dcid);
lh->len = cpu_to_le16(len + L2CAP_PSMLEN_SIZE);
put_unaligned(chan->psm, (__le16 *) skb_put(skb, L2CAP_PSMLEN_SIZE));
@@ -2213,7 +2213,7 @@
return skb;
/* Create L2CAP header */
- lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
+ lh = skb_put(skb, L2CAP_HDR_SIZE);
lh->cid = cpu_to_le16(chan->dcid);
lh->len = cpu_to_le16(len);
@@ -2255,7 +2255,7 @@
return skb;
/* Create L2CAP header */
- lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
+ lh = skb_put(skb, L2CAP_HDR_SIZE);
lh->cid = cpu_to_le16(chan->dcid);
lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
@@ -2373,7 +2373,7 @@
return skb;
/* Create L2CAP header */
- lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
+ lh = skb_put(skb, L2CAP_HDR_SIZE);
lh->cid = cpu_to_le16(chan->dcid);
lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
@@ -2908,7 +2908,7 @@
if (!skb)
return NULL;
- lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
+ lh = skb_put(skb, L2CAP_HDR_SIZE);
lh->len = cpu_to_le16(L2CAP_CMD_HDR_SIZE + dlen);
if (conn->hcon->type == LE_LINK)
@@ -2916,14 +2916,14 @@
else
lh->cid = cpu_to_le16(L2CAP_CID_SIGNALING);
- cmd = (struct l2cap_cmd_hdr *) skb_put(skb, L2CAP_CMD_HDR_SIZE);
+ cmd = skb_put(skb, L2CAP_CMD_HDR_SIZE);
cmd->code = code;
cmd->ident = ident;
cmd->len = cpu_to_le16(dlen);
if (dlen) {
count -= L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE;
- memcpy(skb_put(skb, count), data, count);
+ skb_put_data(skb, data, count);
data += count;
}
@@ -2938,7 +2938,7 @@
if (!*frag)
goto fail;
- memcpy(skb_put(*frag, count), data, count);
+ skb_put_data(*frag, data, count);
len -= count;
data += count;
diff --git a/net/bluetooth/mgmt_util.c b/net/bluetooth/mgmt_util.c
index c933bd0..0d0a6d7 100644
--- a/net/bluetooth/mgmt_util.c
+++ b/net/bluetooth/mgmt_util.c
@@ -44,11 +44,11 @@
put_unaligned_le16(opcode, skb_put(skb, 2));
if (buf)
- memcpy(skb_put(skb, len), buf, len);
+ skb_put_data(skb, buf, len);
__net_timestamp(skb);
- hdr = (void *)skb_push(skb, HCI_MON_HDR_SIZE);
+ hdr = skb_push(skb, HCI_MON_HDR_SIZE);
hdr->opcode = cpu_to_le16(HCI_MON_CTRL_EVENT);
hdr->index = index;
hdr->len = cpu_to_le16(skb->len - HCI_MON_HDR_SIZE);
@@ -66,7 +66,7 @@
if (!skb)
return -ENOMEM;
- hdr = (void *) skb_put(skb, sizeof(*hdr));
+ hdr = skb_put(skb, sizeof(*hdr));
hdr->opcode = cpu_to_le16(event);
if (hdev)
hdr->index = cpu_to_le16(hdev->id);
@@ -75,7 +75,7 @@
hdr->len = cpu_to_le16(data_len);
if (data)
- memcpy(skb_put(skb, data_len), data, data_len);
+ skb_put_data(skb, data, data_len);
/* Time stamp */
__net_timestamp(skb);
@@ -103,13 +103,13 @@
if (!skb)
return -ENOMEM;
- hdr = (void *) skb_put(skb, sizeof(*hdr));
+ hdr = skb_put(skb, sizeof(*hdr));
hdr->opcode = cpu_to_le16(MGMT_EV_CMD_STATUS);
hdr->index = cpu_to_le16(index);
hdr->len = cpu_to_le16(sizeof(*ev));
- ev = (void *) skb_put(skb, sizeof(*ev));
+ ev = skb_put(skb, sizeof(*ev));
ev->status = status;
ev->opcode = cpu_to_le16(cmd);
@@ -147,13 +147,13 @@
if (!skb)
return -ENOMEM;
- hdr = (void *) skb_put(skb, sizeof(*hdr));
+ hdr = skb_put(skb, sizeof(*hdr));
hdr->opcode = cpu_to_le16(MGMT_EV_CMD_COMPLETE);
hdr->index = cpu_to_le16(index);
hdr->len = cpu_to_le16(sizeof(*ev) + rp_len);
- ev = (void *) skb_put(skb, sizeof(*ev) + rp_len);
+ ev = skb_put(skb, sizeof(*ev) + rp_len);
ev->opcode = cpu_to_le16(cmd);
ev->status = status;
diff --git a/net/bluetooth/rfcomm/core.c b/net/bluetooth/rfcomm/core.c
index 8ebca90..4a0b41d 100644
--- a/net/bluetooth/rfcomm/core.c
+++ b/net/bluetooth/rfcomm/core.c
@@ -863,7 +863,7 @@
if (!skb)
return -ENOMEM;
- cmd = (void *) __skb_put(skb, sizeof(*cmd));
+ cmd = __skb_put(skb, sizeof(*cmd));
cmd->addr = d->addr;
cmd->ctrl = __ctrl(RFCOMM_DISC, 1);
cmd->len = __len8(0);
@@ -1149,10 +1149,10 @@
u8 *crc;
if (len > 127) {
- hdr = (void *) skb_push(skb, 4);
+ hdr = skb_push(skb, 4);
put_unaligned(cpu_to_le16(__len16(len)), (__le16 *) &hdr->len);
} else {
- hdr = (void *) skb_push(skb, 3);
+ hdr = skb_push(skb, 3);
hdr->len = __len8(len);
}
hdr->addr = addr;
diff --git a/net/bluetooth/rfcomm/tty.c b/net/bluetooth/rfcomm/tty.c
index 2f2cb5e..5f3074c 100644
--- a/net/bluetooth/rfcomm/tty.c
+++ b/net/bluetooth/rfcomm/tty.c
@@ -798,7 +798,7 @@
skb_reserve(skb, RFCOMM_SKB_HEAD_RESERVE);
- memcpy(skb_put(skb, size), buf + sent, size);
+ skb_put_data(skb, buf + sent, size);
rfcomm_dlc_send_noerror(dlc, skb);
diff --git a/net/bluetooth/smp.c b/net/bluetooth/smp.c
index 14585ed..a0ef897 100644
--- a/net/bluetooth/smp.c
+++ b/net/bluetooth/smp.c
@@ -23,6 +23,7 @@
#include <linux/debugfs.h>
#include <linux/scatterlist.h>
#include <linux/crypto.h>
+#include <crypto/algapi.h>
#include <crypto/b128ops.h>
#include <crypto/hash.h>
@@ -523,7 +524,7 @@
if (err)
return false;
- return !memcmp(bdaddr->b, hash, 3);
+ return !crypto_memneq(bdaddr->b, hash, 3);
}
int smp_generate_rpa(struct hci_dev *hdev, const u8 irk[16], bdaddr_t *rpa)
@@ -579,7 +580,7 @@
/* This is unlikely, but we need to check that
* we didn't accidentially generate a debug key.
*/
- if (memcmp(smp->local_sk, debug_sk, 32))
+ if (crypto_memneq(smp->local_sk, debug_sk, 32))
break;
}
smp->debug_key = false;
@@ -993,7 +994,7 @@
if (ret)
return SMP_UNSPECIFIED;
- if (memcmp(smp->pcnf, confirm, sizeof(smp->pcnf)) != 0) {
+ if (crypto_memneq(smp->pcnf, confirm, sizeof(smp->pcnf))) {
BT_ERR("Pairing failed (confirmation values mismatch)");
return SMP_CONFIRM_FAILED;
}
@@ -1512,7 +1513,7 @@
smp->rrnd, r, cfm))
return SMP_UNSPECIFIED;
- if (memcmp(smp->pcnf, cfm, 16))
+ if (crypto_memneq(smp->pcnf, cfm, 16))
return SMP_CONFIRM_FAILED;
smp->passkey_round++;
@@ -1908,7 +1909,7 @@
/* This is unlikely, but we need to check that
* we didn't accidentially generate a debug key.
*/
- if (memcmp(smp->local_sk, debug_sk, 32))
+ if (crypto_memneq(smp->local_sk, debug_sk, 32))
break;
}
}
@@ -2176,7 +2177,7 @@
if (err)
return SMP_UNSPECIFIED;
- if (memcmp(smp->pcnf, cfm, 16))
+ if (crypto_memneq(smp->pcnf, cfm, 16))
return SMP_CONFIRM_FAILED;
} else {
smp_send_cmd(conn, SMP_CMD_PAIRING_RANDOM, sizeof(smp->prnd),
@@ -2660,7 +2661,7 @@
if (err)
return SMP_UNSPECIFIED;
- if (memcmp(cfm.confirm_val, smp->pcnf, 16))
+ if (crypto_memneq(cfm.confirm_val, smp->pcnf, 16))
return SMP_CONFIRM_FAILED;
}
@@ -2693,7 +2694,7 @@
else
hcon->pending_sec_level = BT_SECURITY_FIPS;
- if (!memcmp(debug_pk, smp->remote_pk, 64))
+ if (!crypto_memneq(debug_pk, smp->remote_pk, 64))
set_bit(SMP_FLAG_DEBUG_KEY, &smp->flags);
if (smp->method == DSP_PASSKEY) {
@@ -2792,7 +2793,7 @@
if (err)
return SMP_UNSPECIFIED;
- if (memcmp(check->e, e, 16))
+ if (crypto_memneq(check->e, e, 16))
return SMP_DHKEY_CHECK_FAILED;
if (!hcon->out) {
@@ -3506,10 +3507,10 @@
if (!generate_ecdh_keys(pk, sk))
return -EINVAL;
- if (memcmp(sk, debug_sk, 32))
+ if (crypto_memneq(sk, debug_sk, 32))
return -EINVAL;
- if (memcmp(pk, debug_pk, 64))
+ if (crypto_memneq(pk, debug_pk, 64))
return -EINVAL;
return 0;
@@ -3529,7 +3530,7 @@
if (err)
return err;
- if (memcmp(res, exp, 3))
+ if (crypto_memneq(res, exp, 3))
return -EINVAL;
return 0;
@@ -3559,7 +3560,7 @@
if (err)
return err;
- if (memcmp(res, exp, 16))
+ if (crypto_memneq(res, exp, 16))
return -EINVAL;
return 0;
@@ -3584,7 +3585,7 @@
if (err)
return err;
- if (memcmp(res, exp, 16))
+ if (crypto_memneq(res, exp, 16))
return -EINVAL;
return 0;
@@ -3616,7 +3617,7 @@
if (err)
return err;
- if (memcmp(res, exp, 16))
+ if (crypto_memneq(res, exp, 16))
return -EINVAL;
return 0;
@@ -3650,10 +3651,10 @@
if (err)
return err;
- if (memcmp(mackey, exp_mackey, 16))
+ if (crypto_memneq(mackey, exp_mackey, 16))
return -EINVAL;
- if (memcmp(ltk, exp_ltk, 16))
+ if (crypto_memneq(ltk, exp_ltk, 16))
return -EINVAL;
return 0;
@@ -3686,7 +3687,7 @@
if (err)
return err;
- if (memcmp(res, exp, 16))
+ if (crypto_memneq(res, exp, 16))
return -EINVAL;
return 0;
@@ -3740,7 +3741,7 @@
if (err)
return err;
- if (memcmp(res, exp, 16))
+ if (crypto_memneq(res, exp, 16))
return -EINVAL;
return 0;
diff --git a/net/bridge/br.c b/net/bridge/br.c
index 889e564..1407d1b 100644
--- a/net/bridge/br.c
+++ b/net/bridge/br.c
@@ -121,7 +121,7 @@
.notifier_call = br_device_event
};
-/* called with RTNL */
+/* called with RTNL or RCU */
static int br_switchdev_event(struct notifier_block *unused,
unsigned long event, void *ptr)
{
@@ -131,27 +131,36 @@
struct switchdev_notifier_fdb_info *fdb_info;
int err = NOTIFY_DONE;
- p = br_port_get_rtnl(dev);
+ p = br_port_get_rtnl_rcu(dev);
if (!p)
goto out;
br = p->br;
switch (event) {
- case SWITCHDEV_FDB_ADD:
+ case SWITCHDEV_FDB_ADD_TO_BRIDGE:
fdb_info = ptr;
err = br_fdb_external_learn_add(br, p, fdb_info->addr,
fdb_info->vid);
- if (err)
+ if (err) {
err = notifier_from_errno(err);
+ break;
+ }
+ br_fdb_offloaded_set(br, p, fdb_info->addr,
+ fdb_info->vid);
break;
- case SWITCHDEV_FDB_DEL:
+ case SWITCHDEV_FDB_DEL_TO_BRIDGE:
fdb_info = ptr;
err = br_fdb_external_learn_del(br, p, fdb_info->addr,
fdb_info->vid);
if (err)
err = notifier_from_errno(err);
break;
+ case SWITCHDEV_FDB_OFFLOADED:
+ fdb_info = ptr;
+ br_fdb_offloaded_set(br, p, fdb_info->addr,
+ fdb_info->vid);
+ break;
}
out:
diff --git a/net/bridge/br_fdb.c b/net/bridge/br_fdb.c
index ab0c7cc..fef7872 100644
--- a/net/bridge/br_fdb.c
+++ b/net/bridge/br_fdb.c
@@ -511,6 +511,7 @@
fdb->is_static = is_static;
fdb->added_by_user = 0;
fdb->added_by_external_learn = 0;
+ fdb->offloaded = 0;
fdb->updated = fdb->used = jiffies;
hlist_add_head_rcu(&fdb->hlist, head);
}
@@ -647,11 +648,16 @@
ndm->ndm_family = AF_BRIDGE;
ndm->ndm_pad1 = 0;
ndm->ndm_pad2 = 0;
- ndm->ndm_flags = fdb->added_by_external_learn ? NTF_EXT_LEARNED : 0;
+ ndm->ndm_flags = 0;
ndm->ndm_type = 0;
ndm->ndm_ifindex = fdb->dst ? fdb->dst->dev->ifindex : br->dev->ifindex;
ndm->ndm_state = fdb_to_nud(br, fdb);
+ if (fdb->offloaded)
+ ndm->ndm_flags |= NTF_OFFLOADED;
+ if (fdb->added_by_external_learn)
+ ndm->ndm_flags |= NTF_EXT_LEARNED;
+
if (nla_put(skb, NDA_LLADDR, ETH_ALEN, &fdb->addr))
goto nla_put_failure;
if (nla_put_u32(skb, NDA_MASTER, br->dev->ifindex))
@@ -690,6 +696,8 @@
struct sk_buff *skb;
int err = -ENOBUFS;
+ br_switchdev_fdb_notify(fdb, type);
+
skb = nlmsg_new(fdb_nlmsg_size(), GFP_ATOMIC);
if (skb == NULL)
goto errout;
@@ -1075,7 +1083,6 @@
struct net_bridge_fdb_entry *fdb;
int err = 0;
- ASSERT_RTNL();
spin_lock_bh(&br->hash_lock);
head = &br->hash[br_mac_hash(addr, vid)];
@@ -1110,7 +1117,6 @@
struct net_bridge_fdb_entry *fdb;
int err = 0;
- ASSERT_RTNL();
spin_lock_bh(&br->hash_lock);
fdb = br_fdb_find(br, addr, vid);
@@ -1123,3 +1129,17 @@
return err;
}
+
+void br_fdb_offloaded_set(struct net_bridge *br, struct net_bridge_port *p,
+ const unsigned char *addr, u16 vid)
+{
+ struct net_bridge_fdb_entry *fdb;
+
+ spin_lock_bh(&br->hash_lock);
+
+ fdb = br_fdb_find(br, addr, vid);
+ if (fdb)
+ fdb->offloaded = 1;
+
+ spin_unlock_bh(&br->hash_lock);
+}
diff --git a/net/bridge/br_if.c b/net/bridge/br_if.c
index 7f8d05c..f3aef22 100644
--- a/net/bridge/br_if.c
+++ b/net/bridge/br_if.c
@@ -138,7 +138,7 @@
/* If vlan filtering is disabled or bridge interface is placed
* into promiscuous mode, place all ports in promiscuous mode.
*/
- if ((br->dev->flags & IFF_PROMISC) || !br_vlan_enabled(br))
+ if ((br->dev->flags & IFF_PROMISC) || !br_vlan_enabled(br->dev))
set_all = true;
list_for_each_entry(p, &br->port_list, list) {
diff --git a/net/bridge/br_mdb.c b/net/bridge/br_mdb.c
index b084548..09dcdb9 100644
--- a/net/bridge/br_mdb.c
+++ b/net/bridge/br_mdb.c
@@ -599,7 +599,7 @@
return -EINVAL;
vg = nbp_vlan_group(p);
- if (br_vlan_enabled(br) && vg && entry->vid == 0) {
+ if (br_vlan_enabled(br->dev) && vg && entry->vid == 0) {
list_for_each_entry(v, &vg->vlan_list, vlist) {
entry->vid = v->vid;
err = __br_mdb_add(net, br, entry);
@@ -694,7 +694,7 @@
return -EINVAL;
vg = nbp_vlan_group(p);
- if (br_vlan_enabled(br) && vg && entry->vid == 0) {
+ if (br_vlan_enabled(br->dev) && vg && entry->vid == 0) {
list_for_each_entry(v, &vg->vlan_list, vlist) {
entry->vid = v->vid;
err = __br_mdb_del(br, entry);
diff --git a/net/bridge/br_multicast.c b/net/bridge/br_multicast.c
index faa7261..8dc5c8d 100644
--- a/net/bridge/br_multicast.c
+++ b/net/bridge/br_multicast.c
@@ -2176,6 +2176,14 @@
return err;
}
+bool br_multicast_enabled(const struct net_device *dev)
+{
+ struct net_bridge *br = netdev_priv(dev);
+
+ return !br->multicast_disabled;
+}
+EXPORT_SYMBOL_GPL(br_multicast_enabled);
+
int br_multicast_set_querier(struct net_bridge *br, unsigned long val)
{
unsigned long max_delay;
diff --git a/net/bridge/br_netlink.c b/net/bridge/br_netlink.c
index 32bd3ea..63dca34 100644
--- a/net/bridge/br_netlink.c
+++ b/net/bridge/br_netlink.c
@@ -662,16 +662,26 @@
}
/* Set/clear or port flags based on attribute */
-static void br_set_port_flag(struct net_bridge_port *p, struct nlattr *tb[],
- int attrtype, unsigned long mask)
+static int br_set_port_flag(struct net_bridge_port *p, struct nlattr *tb[],
+ int attrtype, unsigned long mask)
{
- if (tb[attrtype]) {
- u8 flag = nla_get_u8(tb[attrtype]);
- if (flag)
- p->flags |= mask;
- else
- p->flags &= ~mask;
- }
+ unsigned long flags;
+ int err;
+
+ if (!tb[attrtype])
+ return 0;
+
+ if (nla_get_u8(tb[attrtype]))
+ flags = p->flags | mask;
+ else
+ flags = p->flags & ~mask;
+
+ err = br_switchdev_set_port_flag(p, flags, mask);
+ if (err)
+ return err;
+
+ p->flags = flags;
+ return 0;
}
/* Process bridge protocol info on port */
@@ -681,20 +691,55 @@
bool br_vlan_tunnel_old = false;
int err;
- br_set_port_flag(p, tb, IFLA_BRPORT_MODE, BR_HAIRPIN_MODE);
- br_set_port_flag(p, tb, IFLA_BRPORT_GUARD, BR_BPDU_GUARD);
- br_set_port_flag(p, tb, IFLA_BRPORT_FAST_LEAVE, BR_MULTICAST_FAST_LEAVE);
- br_set_port_flag(p, tb, IFLA_BRPORT_PROTECT, BR_ROOT_BLOCK);
- br_set_port_flag(p, tb, IFLA_BRPORT_LEARNING, BR_LEARNING);
- br_set_port_flag(p, tb, IFLA_BRPORT_UNICAST_FLOOD, BR_FLOOD);
- br_set_port_flag(p, tb, IFLA_BRPORT_MCAST_FLOOD, BR_MCAST_FLOOD);
- br_set_port_flag(p, tb, IFLA_BRPORT_MCAST_TO_UCAST, BR_MULTICAST_TO_UNICAST);
- br_set_port_flag(p, tb, IFLA_BRPORT_BCAST_FLOOD, BR_BCAST_FLOOD);
- br_set_port_flag(p, tb, IFLA_BRPORT_PROXYARP, BR_PROXYARP);
- br_set_port_flag(p, tb, IFLA_BRPORT_PROXYARP_WIFI, BR_PROXYARP_WIFI);
+ err = br_set_port_flag(p, tb, IFLA_BRPORT_MODE, BR_HAIRPIN_MODE);
+ if (err)
+ return err;
+
+ err = br_set_port_flag(p, tb, IFLA_BRPORT_GUARD, BR_BPDU_GUARD);
+ if (err)
+ return err;
+
+ err = br_set_port_flag(p, tb, IFLA_BRPORT_FAST_LEAVE, BR_MULTICAST_FAST_LEAVE);
+ if (err)
+ return err;
+
+ err = br_set_port_flag(p, tb, IFLA_BRPORT_PROTECT, BR_ROOT_BLOCK);
+ if (err)
+ return err;
+
+ err = br_set_port_flag(p, tb, IFLA_BRPORT_LEARNING, BR_LEARNING);
+ if (err)
+ return err;
+
+ err = br_set_port_flag(p, tb, IFLA_BRPORT_UNICAST_FLOOD, BR_FLOOD);
+ if (err)
+ return err;
+
+ err = br_set_port_flag(p, tb, IFLA_BRPORT_MCAST_FLOOD, BR_MCAST_FLOOD);
+ if (err)
+ return err;
+
+ err = br_set_port_flag(p, tb, IFLA_BRPORT_MCAST_TO_UCAST, BR_MULTICAST_TO_UNICAST);
+ if (err)
+ return err;
+
+ err = br_set_port_flag(p, tb, IFLA_BRPORT_BCAST_FLOOD, BR_BCAST_FLOOD);
+ if (err)
+ return err;
+
+ err = br_set_port_flag(p, tb, IFLA_BRPORT_PROXYARP, BR_PROXYARP);
+ if (err)
+ return err;
+
+ err = br_set_port_flag(p, tb, IFLA_BRPORT_PROXYARP_WIFI, BR_PROXYARP_WIFI);
+ if (err)
+ return err;
br_vlan_tunnel_old = (p->flags & BR_VLAN_TUNNEL) ? true : false;
- br_set_port_flag(p, tb, IFLA_BRPORT_VLAN_TUNNEL, BR_VLAN_TUNNEL);
+ err = br_set_port_flag(p, tb, IFLA_BRPORT_VLAN_TUNNEL, BR_VLAN_TUNNEL);
+ if (err)
+ return err;
+
if (br_vlan_tunnel_old && !(p->flags & BR_VLAN_TUNNEL))
nbp_vlan_tunnel_info_flush(p);
@@ -1251,7 +1296,7 @@
u32 ageing_time = jiffies_to_clock_t(br->ageing_time);
u32 stp_enabled = br->stp_enabled;
u16 priority = (br->bridge_id.prio[0] << 8) | br->bridge_id.prio[1];
- u8 vlan_enabled = br_vlan_enabled(br);
+ u8 vlan_enabled = br_vlan_enabled(br->dev);
u64 clockval;
clockval = br_timer_value(&br->hello_timer);
diff --git a/net/bridge/br_private.h b/net/bridge/br_private.h
index 0d17728..c18682f 100644
--- a/net/bridge/br_private.h
+++ b/net/bridge/br_private.h
@@ -169,7 +169,8 @@
unsigned char is_local:1,
is_static:1,
added_by_user:1,
- added_by_external_learn:1;
+ added_by_external_learn:1,
+ offloaded:1;
/* write-heavy members should not affect lookups */
unsigned long updated ____cacheline_aligned_in_smp;
@@ -284,6 +285,12 @@
rtnl_dereference(dev->rx_handler_data) : NULL;
}
+static inline struct net_bridge_port *br_port_get_rtnl_rcu(const struct net_device *dev)
+{
+ return br_port_exists(dev) ?
+ rcu_dereference_rtnl(dev->rx_handler_data) : NULL;
+}
+
struct net_bridge {
spinlock_t lock;
spinlock_t hash_lock;
@@ -530,6 +537,8 @@
const unsigned char *addr, u16 vid);
int br_fdb_external_learn_del(struct net_bridge *br, struct net_bridge_port *p,
const unsigned char *addr, u16 vid);
+void br_fdb_offloaded_set(struct net_bridge *br, struct net_bridge_port *p,
+ const unsigned char *addr, u16 vid);
/* br_forward.c */
enum br_pkt_type {
@@ -854,10 +863,6 @@
return vg->pvid;
}
-static inline int br_vlan_enabled(struct net_bridge *br)
-{
- return br->vlan_enabled;
-}
#else
static inline bool br_allowed_ingress(const struct net_bridge *br,
struct net_bridge_vlan_group *vg,
@@ -945,11 +950,6 @@
return 0;
}
-static inline int br_vlan_enabled(struct net_bridge *br)
-{
- return 0;
-}
-
static inline int __br_vlan_filter_toggle(struct net_bridge *br,
unsigned long val)
{
@@ -1085,6 +1085,11 @@
struct sk_buff *skb);
bool nbp_switchdev_allowed_egress(const struct net_bridge_port *p,
const struct sk_buff *skb);
+int br_switchdev_set_port_flag(struct net_bridge_port *p,
+ unsigned long flags,
+ unsigned long mask);
+void br_switchdev_fdb_notify(const struct net_bridge_fdb_entry *fdb,
+ int type);
#else
static inline int nbp_switchdev_mark_set(struct net_bridge_port *p)
{
@@ -1101,6 +1106,18 @@
{
return true;
}
+
+static inline int br_switchdev_set_port_flag(struct net_bridge_port *p,
+ unsigned long flags,
+ unsigned long mask)
+{
+ return 0;
+}
+
+static inline void
+br_switchdev_fdb_notify(const struct net_bridge_fdb_entry *fdb, int type)
+{
+}
#endif /* CONFIG_NET_SWITCHDEV */
#endif
diff --git a/net/bridge/br_stp_bpdu.c b/net/bridge/br_stp_bpdu.c
index 5881fbc..1b75d6b 100644
--- a/net/bridge/br_stp_bpdu.c
+++ b/net/bridge/br_stp_bpdu.c
@@ -50,7 +50,7 @@
skb->priority = TC_PRIO_CONTROL;
skb_reserve(skb, LLC_RESERVE);
- memcpy(__skb_put(skb, length), data, length);
+ __skb_put_data(skb, data, length);
llc_pdu_header_init(skb, LLC_PDU_TYPE_U, LLC_SAP_BSPAN,
LLC_SAP_BSPAN, LLC_PDU_CMD);
diff --git a/net/bridge/br_stp_if.c b/net/bridge/br_stp_if.c
index 6f12a52..8911031 100644
--- a/net/bridge/br_stp_if.c
+++ b/net/bridge/br_stp_if.c
@@ -150,7 +150,6 @@
static void br_stp_start(struct net_bridge *br)
{
- struct net_bridge_port *p;
int err = -ENOENT;
if (net_eq(dev_net(br->dev), &init_net))
@@ -169,11 +168,6 @@
if (!err) {
br->stp_enabled = BR_USER_STP;
br_debug(br, "userspace STP started\n");
-
- /* Stop hello and hold timers */
- del_timer(&br->hello_timer);
- list_for_each_entry(p, &br->port_list, list)
- del_timer(&p->hold_timer);
} else {
br->stp_enabled = BR_KERNEL_STP;
br_debug(br, "using kernel STP\n");
@@ -189,7 +183,6 @@
static void br_stp_stop(struct net_bridge *br)
{
- struct net_bridge_port *p;
int err;
if (br->stp_enabled == BR_USER_STP) {
@@ -198,10 +191,6 @@
br_err(br, "failed to stop userspace STP (%d)\n", err);
/* To start timers on any ports left in blocking */
- mod_timer(&br->hello_timer, jiffies + br->hello_time);
- list_for_each_entry(p, &br->port_list, list)
- mod_timer(&p->hold_timer,
- round_jiffies(jiffies + BR_HOLD_TIME));
spin_lock_bh(&br->lock);
br_port_state_selection(br);
spin_unlock_bh(&br->lock);
diff --git a/net/bridge/br_switchdev.c b/net/bridge/br_switchdev.c
index f4097b9..181a44d 100644
--- a/net/bridge/br_switchdev.c
+++ b/net/bridge/br_switchdev.c
@@ -55,3 +55,79 @@
return !skb->offload_fwd_mark ||
BR_INPUT_SKB_CB(skb)->offload_fwd_mark != p->offload_fwd_mark;
}
+
+/* Flags that can be offloaded to hardware */
+#define BR_PORT_FLAGS_HW_OFFLOAD (BR_LEARNING | BR_FLOOD | \
+ BR_MCAST_FLOOD | BR_BCAST_FLOOD)
+
+int br_switchdev_set_port_flag(struct net_bridge_port *p,
+ unsigned long flags,
+ unsigned long mask)
+{
+ struct switchdev_attr attr = {
+ .orig_dev = p->dev,
+ .id = SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS_SUPPORT,
+ };
+ int err;
+
+ if (mask & ~BR_PORT_FLAGS_HW_OFFLOAD)
+ return 0;
+
+ err = switchdev_port_attr_get(p->dev, &attr);
+ if (err == -EOPNOTSUPP)
+ return 0;
+ if (err)
+ return err;
+
+ /* Check if specific bridge flag attribute offload is supported */
+ if (!(attr.u.brport_flags_support & mask)) {
+ br_warn(p->br, "bridge flag offload is not supported %u(%s)\n",
+ (unsigned int)p->port_no, p->dev->name);
+ return -EOPNOTSUPP;
+ }
+
+ attr.id = SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS;
+ attr.flags = SWITCHDEV_F_DEFER;
+ attr.u.brport_flags = flags;
+ err = switchdev_port_attr_set(p->dev, &attr);
+ if (err) {
+ br_warn(p->br, "error setting offload flag on port %u(%s)\n",
+ (unsigned int)p->port_no, p->dev->name);
+ return err;
+ }
+
+ return 0;
+}
+
+static void
+br_switchdev_fdb_call_notifiers(bool adding, const unsigned char *mac,
+ u16 vid, struct net_device *dev)
+{
+ struct switchdev_notifier_fdb_info info;
+ unsigned long notifier_type;
+
+ info.addr = mac;
+ info.vid = vid;
+ notifier_type = adding ? SWITCHDEV_FDB_ADD_TO_DEVICE : SWITCHDEV_FDB_DEL_TO_DEVICE;
+ call_switchdev_notifiers(notifier_type, dev, &info.info);
+}
+
+void
+br_switchdev_fdb_notify(const struct net_bridge_fdb_entry *fdb, int type)
+{
+ if (!fdb->added_by_user)
+ return;
+
+ switch (type) {
+ case RTM_DELNEIGH:
+ br_switchdev_fdb_call_notifiers(false, fdb->addr.addr,
+ fdb->vlan_id,
+ fdb->dst->dev);
+ break;
+ case RTM_NEWNEIGH:
+ br_switchdev_fdb_call_notifiers(true, fdb->addr.addr,
+ fdb->vlan_id,
+ fdb->dst->dev);
+ break;
+ }
+}
diff --git a/net/bridge/br_vlan.c b/net/bridge/br_vlan.c
index b838213..26a1a56 100644
--- a/net/bridge/br_vlan.c
+++ b/net/bridge/br_vlan.c
@@ -706,6 +706,14 @@
return __br_vlan_filter_toggle(br, val);
}
+bool br_vlan_enabled(const struct net_device *dev)
+{
+ struct net_bridge *br = netdev_priv(dev);
+
+ return !!br->vlan_enabled;
+}
+EXPORT_SYMBOL_GPL(br_vlan_enabled);
+
int __br_vlan_set_proto(struct net_bridge *br, __be16 proto)
{
int err = 0;
diff --git a/net/bridge/netfilter/nft_reject_bridge.c b/net/bridge/netfilter/nft_reject_bridge.c
index 346ef6b..eaf05de 100644
--- a/net/bridge/netfilter/nft_reject_bridge.c
+++ b/net/bridge/netfilter/nft_reject_bridge.c
@@ -28,7 +28,7 @@
{
struct ethhdr *eth;
- eth = (struct ethhdr *)skb_push(nskb, ETH_HLEN);
+ eth = skb_push(nskb, ETH_HLEN);
skb_reset_mac_header(nskb);
ether_addr_copy(eth->h_source, eth_hdr(oldskb)->h_dest);
ether_addr_copy(eth->h_dest, eth_hdr(oldskb)->h_source);
@@ -107,11 +107,10 @@
struct iphdr *niph;
struct icmphdr *icmph;
unsigned int len;
- void *payload;
__wsum csum;
u8 proto;
- if (oldskb->csum_bad || !nft_bridge_iphdr_validate(oldskb))
+ if (!nft_bridge_iphdr_validate(oldskb))
return;
/* IP header checks: fragment. */
@@ -147,13 +146,11 @@
net->ipv4.sysctl_ip_default_ttl);
skb_reset_transport_header(nskb);
- icmph = (struct icmphdr *)skb_put(nskb, sizeof(struct icmphdr));
- memset(icmph, 0, sizeof(*icmph));
+ icmph = skb_put_zero(nskb, sizeof(struct icmphdr));
icmph->type = ICMP_DEST_UNREACH;
icmph->code = code;
- payload = skb_put(nskb, len);
- memcpy(payload, skb_network_header(oldskb), len);
+ skb_put_data(nskb, skb_network_header(oldskb), len);
csum = csum_partial((void *)icmph, len + sizeof(struct icmphdr), 0);
icmph->checksum = csum_fold(csum);
@@ -226,9 +223,6 @@
__be16 fo;
u8 proto = ip6h->nexthdr;
- if (skb->csum_bad)
- return false;
-
if (skb_csum_unnecessary(skb))
return true;
@@ -252,7 +246,6 @@
struct ipv6hdr *nip6h;
struct icmp6hdr *icmp6h;
unsigned int len;
- void *payload;
if (!nft_bridge_ip6hdr_validate(oldskb))
return;
@@ -278,13 +271,11 @@
net->ipv6.devconf_all->hop_limit);
skb_reset_transport_header(nskb);
- icmp6h = (struct icmp6hdr *)skb_put(nskb, sizeof(struct icmp6hdr));
- memset(icmp6h, 0, sizeof(*icmp6h));
+ icmp6h = skb_put_zero(nskb, sizeof(struct icmp6hdr));
icmp6h->icmp6_type = ICMPV6_DEST_UNREACH;
icmp6h->icmp6_code = code;
- payload = skb_put(nskb, len);
- memcpy(payload, skb_network_header(oldskb), len);
+ skb_put_data(nskb, skb_network_header(oldskb), len);
nip6h->payload_len = htons(nskb->len - sizeof(struct ipv6hdr));
icmp6h->icmp6_cksum =
diff --git a/net/caif/caif_socket.c b/net/caif/caif_socket.c
index 21f18ea..7506b85 100644
--- a/net/caif/caif_socket.c
+++ b/net/caif/caif_socket.c
@@ -1103,7 +1103,7 @@
}
-static struct net_proto_family caif_family_ops = {
+static const struct net_proto_family caif_family_ops = {
.family = PF_CAIF,
.create = caif_create,
.owner = THIS_MODULE,
diff --git a/net/can/bcm.c b/net/can/bcm.c
index 6543263..47a8748d 100644
--- a/net/can/bcm.c
+++ b/net/can/bcm.c
@@ -282,7 +282,7 @@
can_skb_prv(skb)->ifindex = dev->ifindex;
can_skb_prv(skb)->skbcnt = 0;
- memcpy(skb_put(skb, op->cfsiz), cf, op->cfsiz);
+ skb_put_data(skb, cf, op->cfsiz);
/* send with loopback */
skb->dev = dev;
@@ -318,13 +318,13 @@
if (!skb)
return;
- memcpy(skb_put(skb, sizeof(*head)), head, sizeof(*head));
+ skb_put_data(skb, head, sizeof(*head));
if (head->nframes) {
/* CAN frames starting here */
firstframe = (struct canfd_frame *)skb_tail_pointer(skb);
- memcpy(skb_put(skb, datalen), frames, datalen);
+ skb_put_data(skb, frames, datalen);
/*
* the BCM uses the flags-element of the canfd_frame
diff --git a/net/core/datagram.c b/net/core/datagram.c
index db1866f2..e5311a7 100644
--- a/net/core/datagram.c
+++ b/net/core/datagram.c
@@ -161,6 +161,45 @@
return skb;
}
+struct sk_buff *__skb_try_recv_from_queue(struct sock *sk,
+ struct sk_buff_head *queue,
+ unsigned int flags,
+ void (*destructor)(struct sock *sk,
+ struct sk_buff *skb),
+ int *peeked, int *off, int *err,
+ struct sk_buff **last)
+{
+ struct sk_buff *skb;
+ int _off = *off;
+
+ *last = queue->prev;
+ skb_queue_walk(queue, skb) {
+ if (flags & MSG_PEEK) {
+ if (_off >= skb->len && (skb->len || _off ||
+ skb->peeked)) {
+ _off -= skb->len;
+ continue;
+ }
+ if (!skb->len) {
+ skb = skb_set_peeked(skb);
+ if (unlikely(IS_ERR(skb))) {
+ *err = PTR_ERR(skb);
+ return NULL;
+ }
+ }
+ *peeked = 1;
+ atomic_inc(&skb->users);
+ } else {
+ __skb_unlink(skb, queue);
+ if (destructor)
+ destructor(sk, skb);
+ }
+ *off = _off;
+ return skb;
+ }
+ return NULL;
+}
+
/**
* __skb_try_recv_datagram - Receive a datagram skbuff
* @sk: socket
@@ -222,40 +261,14 @@
* Look at current nfs client by the way...
* However, this function was correct in any case. 8)
*/
- int _off = *off;
-
- *last = (struct sk_buff *)queue;
spin_lock_irqsave(&queue->lock, cpu_flags);
- skb_queue_walk(queue, skb) {
- *last = skb;
- if (flags & MSG_PEEK) {
- if (_off >= skb->len && (skb->len || _off ||
- skb->peeked)) {
- _off -= skb->len;
- continue;
- }
- if (!skb->len) {
- skb = skb_set_peeked(skb);
- if (IS_ERR(skb)) {
- error = PTR_ERR(skb);
- spin_unlock_irqrestore(&queue->lock,
- cpu_flags);
- goto no_packet;
- }
- }
- *peeked = 1;
- atomic_inc(&skb->users);
- } else {
- __skb_unlink(skb, queue);
- if (destructor)
- destructor(sk, skb);
- }
- spin_unlock_irqrestore(&queue->lock, cpu_flags);
- *off = _off;
- return skb;
- }
-
+ skb = __skb_try_recv_from_queue(sk, queue, flags, destructor,
+ peeked, off, &error, last);
spin_unlock_irqrestore(&queue->lock, cpu_flags);
+ if (error)
+ goto no_packet;
+ if (skb)
+ return skb;
if (!sk_can_busy_loop(sk))
break;
@@ -317,9 +330,7 @@
{
bool slow;
- if (likely(atomic_read(&skb->users) == 1))
- smp_rmb();
- else if (likely(!atomic_dec_and_test(&skb->users))) {
+ if (!skb_unref(skb)) {
sk_peek_offset_bwd(sk, len);
return;
}
@@ -335,8 +346,8 @@
}
EXPORT_SYMBOL(__skb_free_datagram_locked);
-int __sk_queue_drop_skb(struct sock *sk, struct sk_buff *skb,
- unsigned int flags,
+int __sk_queue_drop_skb(struct sock *sk, struct sk_buff_head *sk_queue,
+ struct sk_buff *skb, unsigned int flags,
void (*destructor)(struct sock *sk,
struct sk_buff *skb))
{
@@ -344,15 +355,15 @@
if (flags & MSG_PEEK) {
err = -ENOENT;
- spin_lock_bh(&sk->sk_receive_queue.lock);
- if (skb == skb_peek(&sk->sk_receive_queue)) {
- __skb_unlink(skb, &sk->sk_receive_queue);
+ spin_lock_bh(&sk_queue->lock);
+ if (skb == skb_peek(sk_queue)) {
+ __skb_unlink(skb, sk_queue);
atomic_dec(&skb->users);
if (destructor)
destructor(sk, skb);
err = 0;
}
- spin_unlock_bh(&sk->sk_receive_queue.lock);
+ spin_unlock_bh(&sk_queue->lock);
}
atomic_inc(&sk->sk_drops);
@@ -383,7 +394,8 @@
int skb_kill_datagram(struct sock *sk, struct sk_buff *skb, unsigned int flags)
{
- int err = __sk_queue_drop_skb(sk, skb, flags, NULL);
+ int err = __sk_queue_drop_skb(sk, &sk->sk_receive_queue, skb, flags,
+ NULL);
kfree_skb(skb);
sk_mem_reclaim_partial(sk);
diff --git a/net/core/dev.c b/net/core/dev.c
index 7243421..df76377 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -105,6 +105,7 @@
#include <net/dst.h>
#include <net/dst_metadata.h>
#include <net/pkt_sched.h>
+#include <net/pkt_cls.h>
#include <net/checksum.h>
#include <net/xfrm.h>
#include <linux/highmem.h>
@@ -142,6 +143,7 @@
#include <linux/hrtimer.h>
#include <linux/netfilter_ingress.h>
#include <linux/crash_dump.h>
+#include <linux/sctp.h>
#include "net-sysfs.h"
@@ -161,6 +163,7 @@
static int call_netdevice_notifiers_info(unsigned long val,
struct net_device *dev,
struct netdev_notifier_info *info);
+static struct napi_struct *napi_by_id(unsigned int napi_id);
/*
* The @dev_base_head list is protected by @dev_base_lock and the rtnl
@@ -865,6 +868,31 @@
EXPORT_SYMBOL(dev_get_by_index);
/**
+ * dev_get_by_napi_id - find a device by napi_id
+ * @napi_id: ID of the NAPI struct
+ *
+ * Search for an interface by NAPI ID. Returns %NULL if the device
+ * is not found or a pointer to the device. The device has not had
+ * its reference counter increased so the caller must be careful
+ * about locking. The caller must hold RCU lock.
+ */
+
+struct net_device *dev_get_by_napi_id(unsigned int napi_id)
+{
+ struct napi_struct *napi;
+
+ WARN_ON_ONCE(!rcu_read_lock_held());
+
+ if (napi_id < MIN_NAPI_ID)
+ return NULL;
+
+ napi = napi_by_id(napi_id);
+
+ return napi ? napi->dev : NULL;
+}
+EXPORT_SYMBOL(dev_get_by_napi_id);
+
+/**
* netdev_get_name - get a netdevice name, knowing its ifindex.
* @net: network namespace
* @name: a pointer to the buffer where the name will be stored.
@@ -2612,6 +2640,47 @@
}
EXPORT_SYMBOL(skb_checksum_help);
+int skb_crc32c_csum_help(struct sk_buff *skb)
+{
+ __le32 crc32c_csum;
+ int ret = 0, offset, start;
+
+ if (skb->ip_summed != CHECKSUM_PARTIAL)
+ goto out;
+
+ if (unlikely(skb_is_gso(skb)))
+ goto out;
+
+ /* Before computing a checksum, we should make sure no frag could
+ * be modified by an external entity : checksum could be wrong.
+ */
+ if (unlikely(skb_has_shared_frag(skb))) {
+ ret = __skb_linearize(skb);
+ if (ret)
+ goto out;
+ }
+ start = skb_checksum_start_offset(skb);
+ offset = start + offsetof(struct sctphdr, checksum);
+ if (WARN_ON_ONCE(offset >= skb_headlen(skb))) {
+ ret = -EINVAL;
+ goto out;
+ }
+ if (skb_cloned(skb) &&
+ !skb_clone_writable(skb, offset + sizeof(__le32))) {
+ ret = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
+ if (ret)
+ goto out;
+ }
+ crc32c_csum = cpu_to_le32(~__skb_checksum(skb, start,
+ skb->len - start, ~(__u32)0,
+ crc32c_csum_stub));
+ *(__le32 *)(skb->data + offset) = crc32c_csum;
+ skb->ip_summed = CHECKSUM_NONE;
+ skb->csum_not_inet = 0;
+out:
+ return ret;
+}
+
__be16 skb_network_protocol(struct sk_buff *skb, int *depth)
{
__be16 type = skb->protocol;
@@ -2954,6 +3023,17 @@
return skb;
}
+int skb_csum_hwoffload_help(struct sk_buff *skb,
+ const netdev_features_t features)
+{
+ if (unlikely(skb->csum_not_inet))
+ return !!(features & NETIF_F_SCTP_CRC) ? 0 :
+ skb_crc32c_csum_help(skb);
+
+ return !!(features & NETIF_F_CSUM_MASK) ? 0 : skb_checksum_help(skb);
+}
+EXPORT_SYMBOL(skb_csum_hwoffload_help);
+
static struct sk_buff *validate_xmit_skb(struct sk_buff *skb, struct net_device *dev)
{
netdev_features_t features;
@@ -2992,8 +3072,7 @@
else
skb_set_transport_header(skb,
skb_checksum_start_offset(skb));
- if (!(features & NETIF_F_CSUM_MASK) &&
- skb_checksum_help(skb))
+ if (skb_csum_hwoffload_help(skb, features))
goto out_kfree_skb;
}
}
@@ -3179,7 +3258,7 @@
/* qdisc_skb_cb(skb)->pkt_len was already set by the caller. */
qdisc_bstats_cpu_update(cl->q, skb);
- switch (tc_classify(skb, cl, &cl_res, false)) {
+ switch (tcf_classify(skb, cl, &cl_res, false)) {
case TC_ACT_OK:
case TC_ACT_RECLASSIFY:
skb->tc_index = TC_H_MIN(cl_res.classid);
@@ -3191,6 +3270,7 @@
return NULL;
case TC_ACT_STOLEN:
case TC_ACT_QUEUED:
+ case TC_ACT_TRAP:
*ret = NET_XMIT_SUCCESS;
consume_skb(skb);
return NULL;
@@ -3949,7 +4029,7 @@
skb->tc_at_ingress = 1;
qdisc_bstats_cpu_update(cl->q, skb);
- switch (tc_classify(skb, cl, &cl_res, false)) {
+ switch (tcf_classify(skb, cl, &cl_res, false)) {
case TC_ACT_OK:
case TC_ACT_RECLASSIFY:
skb->tc_index = TC_H_MIN(cl_res.classid);
@@ -3960,6 +4040,7 @@
return NULL;
case TC_ACT_STOLEN:
case TC_ACT_QUEUED:
+ case TC_ACT_TRAP:
consume_skb(skb);
return NULL;
case TC_ACT_REDIRECT:
@@ -4261,13 +4342,12 @@
static int generic_xdp_install(struct net_device *dev, struct netdev_xdp *xdp)
{
+ struct bpf_prog *old = rtnl_dereference(dev->xdp_prog);
struct bpf_prog *new = xdp->prog;
int ret = 0;
switch (xdp->command) {
- case XDP_SETUP_PROG: {
- struct bpf_prog *old = rtnl_dereference(dev->xdp_prog);
-
+ case XDP_SETUP_PROG:
rcu_assign_pointer(dev->xdp_prog, new);
if (old)
bpf_prog_put(old);
@@ -4279,10 +4359,10 @@
dev_disable_lro(dev);
}
break;
- }
case XDP_QUERY_PROG:
- xdp->prog_attached = !!rcu_access_pointer(dev->xdp_prog);
+ xdp->prog_attached = !!old;
+ xdp->prog_id = old ? old->aux->id : 0;
break;
default:
@@ -4637,9 +4717,6 @@
if (netif_elide_gro(skb->dev))
goto normal;
- if (skb->csum_bad)
- goto normal;
-
gro_list_prepare(napi, skb);
rcu_read_lock();
@@ -6857,7 +6934,8 @@
}
EXPORT_SYMBOL(dev_change_proto_down);
-bool __dev_xdp_attached(struct net_device *dev, xdp_op_t xdp_op)
+bool __dev_xdp_attached(struct net_device *dev, xdp_op_t xdp_op,
+ u32 *prog_id)
{
struct netdev_xdp xdp;
@@ -6866,6 +6944,9 @@
/* Query must always succeed. */
WARN_ON(xdp_op(dev, &xdp) < 0);
+ if (prog_id)
+ *prog_id = xdp.prog_id;
+
return xdp.prog_attached;
}
@@ -6911,10 +6992,10 @@
xdp_chk = generic_xdp_install;
if (fd >= 0) {
- if (xdp_chk && __dev_xdp_attached(dev, xdp_chk))
+ if (xdp_chk && __dev_xdp_attached(dev, xdp_chk, NULL))
return -EEXIST;
if ((flags & XDP_FLAGS_UPDATE_IF_NOEXIST) &&
- __dev_xdp_attached(dev, xdp_op))
+ __dev_xdp_attached(dev, xdp_op, NULL))
return -EBUSY;
prog = bpf_prog_get_type(fd, BPF_PROG_TYPE_XDP);
@@ -7013,7 +7094,7 @@
if (!dev->rtnl_link_ops ||
dev->rtnl_link_state == RTNL_LINK_INITIALIZED)
- skb = rtmsg_ifinfo_build_skb(RTM_DELLINK, dev, ~0U,
+ skb = rtmsg_ifinfo_build_skb(RTM_DELLINK, dev, ~0U, 0,
GFP_KERNEL);
/*
@@ -8598,7 +8679,6 @@
rc = cpuhp_setup_state_nocalls(CPUHP_NET_DEV_DEAD, "net/dev:dead",
NULL, dev_cpu_dead);
WARN_ON(rc < 0);
- dst_subsys_init();
rc = 0;
out:
return rc;
diff --git a/net/core/dev_ioctl.c b/net/core/dev_ioctl.c
index 27fad31..82fd4c9 100644
--- a/net/core/dev_ioctl.c
+++ b/net/core/dev_ioctl.c
@@ -225,6 +225,7 @@
case HWTSTAMP_FILTER_PTP_V2_EVENT:
case HWTSTAMP_FILTER_PTP_V2_SYNC:
case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
+ case HWTSTAMP_FILTER_NTP_ALL:
rx_filter_valid = 1;
break;
}
diff --git a/net/core/dst.c b/net/core/dst.c
index 13ba4a0..f851adb 100644
--- a/net/core/dst.c
+++ b/net/core/dst.c
@@ -42,108 +42,6 @@
* to dirty as few cache lines as possible in __dst_free().
* As this is not a very strong hint, we dont force an alignment on SMP.
*/
-static struct {
- spinlock_t lock;
- struct dst_entry *list;
- unsigned long timer_inc;
- unsigned long timer_expires;
-} dst_garbage = {
- .lock = __SPIN_LOCK_UNLOCKED(dst_garbage.lock),
- .timer_inc = DST_GC_MAX,
-};
-static void dst_gc_task(struct work_struct *work);
-static void ___dst_free(struct dst_entry *dst);
-
-static DECLARE_DELAYED_WORK(dst_gc_work, dst_gc_task);
-
-static DEFINE_MUTEX(dst_gc_mutex);
-/*
- * long lived entries are maintained in this list, guarded by dst_gc_mutex
- */
-static struct dst_entry *dst_busy_list;
-
-static void dst_gc_task(struct work_struct *work)
-{
- int delayed = 0;
- int work_performed = 0;
- unsigned long expires = ~0L;
- struct dst_entry *dst, *next, head;
- struct dst_entry *last = &head;
-
- mutex_lock(&dst_gc_mutex);
- next = dst_busy_list;
-
-loop:
- while ((dst = next) != NULL) {
- next = dst->next;
- prefetch(&next->next);
- cond_resched();
- if (likely(atomic_read(&dst->__refcnt))) {
- last->next = dst;
- last = dst;
- delayed++;
- continue;
- }
- work_performed++;
-
- dst = dst_destroy(dst);
- if (dst) {
- /* NOHASH and still referenced. Unless it is already
- * on gc list, invalidate it and add to gc list.
- *
- * Note: this is temporary. Actually, NOHASH dst's
- * must be obsoleted when parent is obsoleted.
- * But we do not have state "obsoleted, but
- * referenced by parent", so it is right.
- */
- if (dst->obsolete > 0)
- continue;
-
- ___dst_free(dst);
- dst->next = next;
- next = dst;
- }
- }
-
- spin_lock_bh(&dst_garbage.lock);
- next = dst_garbage.list;
- if (next) {
- dst_garbage.list = NULL;
- spin_unlock_bh(&dst_garbage.lock);
- goto loop;
- }
- last->next = NULL;
- dst_busy_list = head.next;
- if (!dst_busy_list)
- dst_garbage.timer_inc = DST_GC_MAX;
- else {
- /*
- * if we freed less than 1/10 of delayed entries,
- * we can sleep longer.
- */
- if (work_performed <= delayed/10) {
- dst_garbage.timer_expires += dst_garbage.timer_inc;
- if (dst_garbage.timer_expires > DST_GC_MAX)
- dst_garbage.timer_expires = DST_GC_MAX;
- dst_garbage.timer_inc += DST_GC_INC;
- } else {
- dst_garbage.timer_inc = DST_GC_INC;
- dst_garbage.timer_expires = DST_GC_MIN;
- }
- expires = dst_garbage.timer_expires;
- /*
- * if the next desired timer is more than 4 seconds in the
- * future then round the timer to whole seconds
- */
- if (expires > 4*HZ)
- expires = round_jiffies_relative(expires);
- schedule_delayed_work(&dst_gc_work, expires);
- }
-
- spin_unlock_bh(&dst_garbage.lock);
- mutex_unlock(&dst_gc_mutex);
-}
-
int dst_discard_out(struct net *net, struct sock *sk, struct sk_buff *skb)
{
kfree_skb(skb);
@@ -216,41 +114,12 @@
}
EXPORT_SYMBOL(dst_alloc);
-static void ___dst_free(struct dst_entry *dst)
-{
- /* The first case (dev==NULL) is required, when
- protocol module is unloaded.
- */
- if (dst->dev == NULL || !(dst->dev->flags&IFF_UP)) {
- dst->input = dst_discard;
- dst->output = dst_discard_out;
- }
- dst->obsolete = DST_OBSOLETE_DEAD;
-}
-
-void __dst_free(struct dst_entry *dst)
-{
- spin_lock_bh(&dst_garbage.lock);
- ___dst_free(dst);
- dst->next = dst_garbage.list;
- dst_garbage.list = dst;
- if (dst_garbage.timer_inc > DST_GC_INC) {
- dst_garbage.timer_inc = DST_GC_INC;
- dst_garbage.timer_expires = DST_GC_MIN;
- mod_delayed_work(system_wq, &dst_gc_work,
- dst_garbage.timer_expires);
- }
- spin_unlock_bh(&dst_garbage.lock);
-}
-EXPORT_SYMBOL(__dst_free);
-
struct dst_entry *dst_destroy(struct dst_entry * dst)
{
struct dst_entry *child;
smp_rmb();
-again:
child = dst->child;
if (!(dst->flags & DST_NOCOUNT))
@@ -269,20 +138,8 @@
kmem_cache_free(dst->ops->kmem_cachep, dst);
dst = child;
- if (dst) {
- int nohash = dst->flags & DST_NOHASH;
-
- if (atomic_dec_and_test(&dst->__refcnt)) {
- /* We were real parent of this dst, so kill child. */
- if (nohash)
- goto again;
- } else {
- /* Child is still referenced, return it for freeing. */
- if (nohash)
- return dst;
- /* Child is still in his hash table */
- }
- }
+ if (dst)
+ dst_release_immediate(dst);
return NULL;
}
EXPORT_SYMBOL(dst_destroy);
@@ -292,26 +149,62 @@
struct dst_entry *dst = container_of(head, struct dst_entry, rcu_head);
dst = dst_destroy(dst);
- if (dst)
- __dst_free(dst);
}
+/* Operations to mark dst as DEAD and clean up the net device referenced
+ * by dst:
+ * 1. put the dst under loopback interface and discard all tx/rx packets
+ * on this route.
+ * 2. release the net_device
+ * This function should be called when removing routes from the fib tree
+ * in preparation for a NETDEV_DOWN/NETDEV_UNREGISTER event and also to
+ * make the next dst_ops->check() fail.
+ */
+void dst_dev_put(struct dst_entry *dst)
+{
+ struct net_device *dev = dst->dev;
+
+ dst->obsolete = DST_OBSOLETE_DEAD;
+ if (dst->ops->ifdown)
+ dst->ops->ifdown(dst, dev, true);
+ dst->input = dst_discard;
+ dst->output = dst_discard_out;
+ dst->dev = dev_net(dst->dev)->loopback_dev;
+ dev_hold(dst->dev);
+ dev_put(dev);
+}
+EXPORT_SYMBOL(dst_dev_put);
+
void dst_release(struct dst_entry *dst)
{
if (dst) {
int newrefcnt;
- unsigned short nocache = dst->flags & DST_NOCACHE;
newrefcnt = atomic_dec_return(&dst->__refcnt);
if (unlikely(newrefcnt < 0))
net_warn_ratelimited("%s: dst:%p refcnt:%d\n",
__func__, dst, newrefcnt);
- if (!newrefcnt && unlikely(nocache))
+ if (!newrefcnt)
call_rcu(&dst->rcu_head, dst_destroy_rcu);
}
}
EXPORT_SYMBOL(dst_release);
+void dst_release_immediate(struct dst_entry *dst)
+{
+ if (dst) {
+ int newrefcnt;
+
+ newrefcnt = atomic_dec_return(&dst->__refcnt);
+ if (unlikely(newrefcnt < 0))
+ net_warn_ratelimited("%s: dst:%p refcnt:%d\n",
+ __func__, dst, newrefcnt);
+ if (!newrefcnt)
+ dst_destroy(dst);
+ }
+}
+EXPORT_SYMBOL(dst_release_immediate);
+
u32 *dst_cow_metrics_generic(struct dst_entry *dst, unsigned long old)
{
struct dst_metrics *p = kmalloc(sizeof(*p), GFP_ATOMIC);
@@ -377,7 +270,7 @@
dst = &md_dst->dst;
dst_init(dst, &md_dst_ops, NULL, 1, DST_OBSOLETE_NONE,
- DST_METADATA | DST_NOCACHE | DST_NOCOUNT);
+ DST_METADATA | DST_NOCOUNT);
dst->input = dst_md_discard;
dst->output = dst_md_discard_out;
@@ -423,86 +316,3 @@
return md_dst;
}
EXPORT_SYMBOL_GPL(metadata_dst_alloc_percpu);
-
-/* Dirty hack. We did it in 2.2 (in __dst_free),
- * we have _very_ good reasons not to repeat
- * this mistake in 2.3, but we have no choice
- * now. _It_ _is_ _explicit_ _deliberate_
- * _race_ _condition_.
- *
- * Commented and originally written by Alexey.
- */
-static void dst_ifdown(struct dst_entry *dst, struct net_device *dev,
- int unregister)
-{
- if (dst->ops->ifdown)
- dst->ops->ifdown(dst, dev, unregister);
-
- if (dev != dst->dev)
- return;
-
- if (!unregister) {
- dst->input = dst_discard;
- dst->output = dst_discard_out;
- } else {
- dst->dev = dev_net(dst->dev)->loopback_dev;
- dev_hold(dst->dev);
- dev_put(dev);
- }
-}
-
-static int dst_dev_event(struct notifier_block *this, unsigned long event,
- void *ptr)
-{
- struct net_device *dev = netdev_notifier_info_to_dev(ptr);
- struct dst_entry *dst, *last = NULL;
-
- switch (event) {
- case NETDEV_UNREGISTER_FINAL:
- case NETDEV_DOWN:
- mutex_lock(&dst_gc_mutex);
- for (dst = dst_busy_list; dst; dst = dst->next) {
- last = dst;
- dst_ifdown(dst, dev, event != NETDEV_DOWN);
- }
-
- spin_lock_bh(&dst_garbage.lock);
- dst = dst_garbage.list;
- dst_garbage.list = NULL;
- /* The code in dst_ifdown places a hold on the loopback device.
- * If the gc entry processing is set to expire after a lengthy
- * interval, this hold can cause netdev_wait_allrefs() to hang
- * out and wait for a long time -- until the the loopback
- * interface is released. If we're really unlucky, it'll emit
- * pr_emerg messages to console too. Reset the interval here,
- * so dst cleanups occur in a more timely fashion.
- */
- if (dst_garbage.timer_inc > DST_GC_INC) {
- dst_garbage.timer_inc = DST_GC_INC;
- dst_garbage.timer_expires = DST_GC_MIN;
- mod_delayed_work(system_wq, &dst_gc_work,
- dst_garbage.timer_expires);
- }
- spin_unlock_bh(&dst_garbage.lock);
-
- if (last)
- last->next = dst;
- else
- dst_busy_list = dst;
- for (; dst; dst = dst->next)
- dst_ifdown(dst, dev, event != NETDEV_DOWN);
- mutex_unlock(&dst_gc_mutex);
- break;
- }
- return NOTIFY_DONE;
-}
-
-static struct notifier_block dst_dev_notifier = {
- .notifier_call = dst_dev_event,
- .priority = -10, /* must be called after other network notifiers */
-};
-
-void __init dst_subsys_init(void)
-{
- register_netdevice_notifier(&dst_dev_notifier);
-}
diff --git a/net/core/filter.c b/net/core/filter.c
index a6bb95f..60ed6f3 100644
--- a/net/core/filter.c
+++ b/net/core/filter.c
@@ -352,7 +352,7 @@
* bpf_convert_filter - convert filter program
* @prog: the user passed filter program
* @len: the length of the user passed filter program
- * @new_prog: buffer where converted program will be stored
+ * @new_prog: allocated 'struct bpf_prog' or NULL
* @new_len: pointer to store length of converted program
*
* Remap 'sock_filter' style classic BPF (cBPF) instruction set to 'bpf_insn'
@@ -364,14 +364,13 @@
*
* 2) 2nd pass to remap in two passes: 1st pass finds new
* jump offsets, 2nd pass remapping:
- * new_prog = kmalloc(sizeof(struct bpf_insn) * new_len);
* bpf_convert_filter(old_prog, old_len, new_prog, &new_len);
*/
static int bpf_convert_filter(struct sock_filter *prog, int len,
- struct bpf_insn *new_prog, int *new_len)
+ struct bpf_prog *new_prog, int *new_len)
{
- int new_flen = 0, pass = 0, target, i;
- struct bpf_insn *new_insn;
+ int new_flen = 0, pass = 0, target, i, stack_off;
+ struct bpf_insn *new_insn, *first_insn = NULL;
struct sock_filter *fp;
int *addrs = NULL;
u8 bpf_src;
@@ -383,6 +382,7 @@
return -EINVAL;
if (new_prog) {
+ first_insn = new_prog->insnsi;
addrs = kcalloc(len, sizeof(*addrs),
GFP_KERNEL | __GFP_NOWARN);
if (!addrs)
@@ -390,11 +390,11 @@
}
do_pass:
- new_insn = new_prog;
+ new_insn = first_insn;
fp = prog;
/* Classic BPF related prologue emission. */
- if (new_insn) {
+ if (new_prog) {
/* Classic BPF expects A and X to be reset first. These need
* to be guaranteed to be the first two instructions.
*/
@@ -415,7 +415,7 @@
struct bpf_insn *insn = tmp_insns;
if (addrs)
- addrs[i] = new_insn - new_prog;
+ addrs[i] = new_insn - first_insn;
switch (fp->code) {
/* All arithmetic insns and skb loads map as-is. */
@@ -561,17 +561,25 @@
/* Store to stack. */
case BPF_ST:
case BPF_STX:
+ stack_off = fp->k * 4 + 4;
*insn = BPF_STX_MEM(BPF_W, BPF_REG_FP, BPF_CLASS(fp->code) ==
BPF_ST ? BPF_REG_A : BPF_REG_X,
- -(BPF_MEMWORDS - fp->k) * 4);
+ -stack_off);
+ /* check_load_and_stores() verifies that classic BPF can
+ * load from stack only after write, so tracking
+ * stack_depth for ST|STX insns is enough
+ */
+ if (new_prog && new_prog->aux->stack_depth < stack_off)
+ new_prog->aux->stack_depth = stack_off;
break;
/* Load from stack. */
case BPF_LD | BPF_MEM:
case BPF_LDX | BPF_MEM:
+ stack_off = fp->k * 4 + 4;
*insn = BPF_LDX_MEM(BPF_W, BPF_CLASS(fp->code) == BPF_LD ?
BPF_REG_A : BPF_REG_X, BPF_REG_FP,
- -(BPF_MEMWORDS - fp->k) * 4);
+ -stack_off);
break;
/* A = K or X = K */
@@ -619,13 +627,13 @@
if (!new_prog) {
/* Only calculating new length. */
- *new_len = new_insn - new_prog;
+ *new_len = new_insn - first_insn;
return 0;
}
pass++;
- if (new_flen != new_insn - new_prog) {
- new_flen = new_insn - new_prog;
+ if (new_flen != new_insn - first_insn) {
+ new_flen = new_insn - first_insn;
if (pass > 2)
goto err;
goto do_pass;
@@ -1017,7 +1025,7 @@
fp->len = new_len;
/* 2nd pass: remap sock_filter insns into bpf_insn insns. */
- err = bpf_convert_filter(old_prog, old_len, fp->insnsi, &new_len);
+ err = bpf_convert_filter(old_prog, old_len, fp, &new_len);
if (err)
/* 2nd bpf_convert_filter() can fail only if it fails
* to allocate memory, remapping must succeed. Note,
@@ -1866,6 +1874,24 @@
.arg1_type = ARG_PTR_TO_CTX,
};
+BPF_CALL_2(bpf_set_hash, struct sk_buff *, skb, u32, hash)
+{
+ /* Set user specified hash as L4(+), so that it gets returned
+ * on skb_get_hash() call unless BPF prog later on triggers a
+ * skb_clear_hash().
+ */
+ __skb_set_sw_hash(skb, hash, true);
+ return 0;
+}
+
+static const struct bpf_func_proto bpf_set_hash_proto = {
+ .func = bpf_set_hash,
+ .gpl_only = false,
+ .ret_type = RET_INTEGER,
+ .arg1_type = ARG_PTR_TO_CTX,
+ .arg2_type = ARG_ANYTHING,
+};
+
BPF_CALL_3(bpf_skb_vlan_push, struct sk_buff *, skb, __be16, vlan_proto,
u16, vlan_tci)
{
@@ -2736,6 +2762,8 @@
return &bpf_get_hash_recalc_proto;
case BPF_FUNC_set_hash_invalid:
return &bpf_set_hash_invalid_proto;
+ case BPF_FUNC_set_hash:
+ return &bpf_set_hash_proto;
case BPF_FUNC_perf_event_output:
return &bpf_skb_event_output_proto;
case BPF_FUNC_get_smp_processor_id:
@@ -2767,12 +2795,6 @@
}
static const struct bpf_func_proto *
-cg_skb_func_proto(enum bpf_func_id func_id)
-{
- return sk_filter_func_proto(func_id);
-}
-
-static const struct bpf_func_proto *
lwt_inout_func_proto(enum bpf_func_id func_id)
{
switch (func_id) {
@@ -2834,7 +2856,8 @@
}
}
-static bool __is_valid_access(int off, int size)
+static bool __is_valid_access(int off, int size, enum bpf_access_type type,
+ int *ctx_field_size)
{
if (off < 0 || off >= sizeof(struct __sk_buff))
return false;
@@ -2850,9 +2873,27 @@
offsetof(struct __sk_buff, cb[4]) + sizeof(__u32))
return false;
break;
- default:
+ case offsetof(struct __sk_buff, data) ...
+ offsetof(struct __sk_buff, data) + sizeof(__u32) - 1:
+ case offsetof(struct __sk_buff, data_end) ...
+ offsetof(struct __sk_buff, data_end) + sizeof(__u32) - 1:
if (size != sizeof(__u32))
return false;
+ break;
+ default:
+ /* permit narrower load for not cb/data/data_end fields */
+ *ctx_field_size = 4;
+ if (type == BPF_WRITE) {
+ if (size != sizeof(__u32))
+ return false;
+ } else {
+ if (size != sizeof(__u32))
+#ifdef __LITTLE_ENDIAN
+ return (off & 0x3) == 0 && (size == 1 || size == 2);
+#else
+ return (off & 0x3) + size == 4 && (size == 1 || size == 2);
+#endif
+ }
}
return true;
@@ -2860,12 +2901,16 @@
static bool sk_filter_is_valid_access(int off, int size,
enum bpf_access_type type,
- enum bpf_reg_type *reg_type)
+ enum bpf_reg_type *reg_type,
+ int *ctx_field_size)
{
switch (off) {
- case offsetof(struct __sk_buff, tc_classid):
- case offsetof(struct __sk_buff, data):
- case offsetof(struct __sk_buff, data_end):
+ case offsetof(struct __sk_buff, tc_classid) ...
+ offsetof(struct __sk_buff, tc_classid) + sizeof(__u32) - 1:
+ case offsetof(struct __sk_buff, data) ...
+ offsetof(struct __sk_buff, data) + sizeof(__u32) - 1:
+ case offsetof(struct __sk_buff, data_end) ...
+ offsetof(struct __sk_buff, data_end) + sizeof(__u32) - 1:
return false;
}
@@ -2879,15 +2924,17 @@
}
}
- return __is_valid_access(off, size);
+ return __is_valid_access(off, size, type, ctx_field_size);
}
static bool lwt_is_valid_access(int off, int size,
enum bpf_access_type type,
- enum bpf_reg_type *reg_type)
+ enum bpf_reg_type *reg_type,
+ int *ctx_field_size)
{
switch (off) {
- case offsetof(struct __sk_buff, tc_classid):
+ case offsetof(struct __sk_buff, tc_classid) ...
+ offsetof(struct __sk_buff, tc_classid) + sizeof(__u32) - 1:
return false;
}
@@ -2912,12 +2959,13 @@
break;
}
- return __is_valid_access(off, size);
+ return __is_valid_access(off, size, type, ctx_field_size);
}
static bool sock_filter_is_valid_access(int off, int size,
enum bpf_access_type type,
- enum bpf_reg_type *reg_type)
+ enum bpf_reg_type *reg_type,
+ int *ctx_field_size)
{
if (type == BPF_WRITE) {
switch (off) {
@@ -2980,7 +3028,8 @@
static bool tc_cls_act_is_valid_access(int off, int size,
enum bpf_access_type type,
- enum bpf_reg_type *reg_type)
+ enum bpf_reg_type *reg_type,
+ int *ctx_field_size)
{
if (type == BPF_WRITE) {
switch (off) {
@@ -3005,7 +3054,7 @@
break;
}
- return __is_valid_access(off, size);
+ return __is_valid_access(off, size, type, ctx_field_size);
}
static bool __is_valid_xdp_access(int off, int size)
@@ -3022,7 +3071,8 @@
static bool xdp_is_valid_access(int off, int size,
enum bpf_access_type type,
- enum bpf_reg_type *reg_type)
+ enum bpf_reg_type *reg_type,
+ int *ctx_field_size)
{
if (type == BPF_WRITE)
return false;
@@ -3336,7 +3386,7 @@
};
const struct bpf_verifier_ops cg_skb_prog_ops = {
- .get_func_proto = cg_skb_func_proto,
+ .get_func_proto = sk_filter_func_proto,
.is_valid_access = sk_filter_is_valid_access,
.convert_ctx_access = bpf_convert_ctx_access,
.test_run = bpf_prog_test_run_skb,
diff --git a/net/core/flow_dissector.c b/net/core/flow_dissector.c
index 28d94bc..fc5fc45 100644
--- a/net/core/flow_dissector.c
+++ b/net/core/flow_dissector.c
@@ -18,6 +18,7 @@
#include <linux/stddef.h>
#include <linux/if_ether.h>
#include <linux/mpls.h>
+#include <linux/tcp.h>
#include <net/flow_dissector.h>
#include <scsi/fc/fc_fcoe.h>
@@ -342,6 +343,64 @@
return FLOW_DISSECT_RET_OUT_PROTO_AGAIN;
}
+static void
+__skb_flow_dissect_tcp(const struct sk_buff *skb,
+ struct flow_dissector *flow_dissector,
+ void *target_container, void *data, int thoff, int hlen)
+{
+ struct flow_dissector_key_tcp *key_tcp;
+ struct tcphdr *th, _th;
+
+ if (!dissector_uses_key(flow_dissector, FLOW_DISSECTOR_KEY_TCP))
+ return;
+
+ th = __skb_header_pointer(skb, thoff, sizeof(_th), data, hlen, &_th);
+ if (!th)
+ return;
+
+ if (unlikely(__tcp_hdrlen(th) < sizeof(_th)))
+ return;
+
+ key_tcp = skb_flow_dissector_target(flow_dissector,
+ FLOW_DISSECTOR_KEY_TCP,
+ target_container);
+ key_tcp->flags = (*(__be16 *) &tcp_flag_word(th) & htons(0x0FFF));
+}
+
+static void
+__skb_flow_dissect_ipv4(const struct sk_buff *skb,
+ struct flow_dissector *flow_dissector,
+ void *target_container, void *data, const struct iphdr *iph)
+{
+ struct flow_dissector_key_ip *key_ip;
+
+ if (!dissector_uses_key(flow_dissector, FLOW_DISSECTOR_KEY_IP))
+ return;
+
+ key_ip = skb_flow_dissector_target(flow_dissector,
+ FLOW_DISSECTOR_KEY_IP,
+ target_container);
+ key_ip->tos = iph->tos;
+ key_ip->ttl = iph->ttl;
+}
+
+static void
+__skb_flow_dissect_ipv6(const struct sk_buff *skb,
+ struct flow_dissector *flow_dissector,
+ void *target_container, void *data, const struct ipv6hdr *iph)
+{
+ struct flow_dissector_key_ip *key_ip;
+
+ if (!dissector_uses_key(flow_dissector, FLOW_DISSECTOR_KEY_IP))
+ return;
+
+ key_ip = skb_flow_dissector_target(flow_dissector,
+ FLOW_DISSECTOR_KEY_IP,
+ target_container);
+ key_ip->tos = ipv6_get_dsfield(iph);
+ key_ip->ttl = iph->hop_limit;
+}
+
/**
* __skb_flow_dissect - extract the flow_keys struct and return it
* @skb: sk_buff to extract the flow from, can be NULL if the rest are specified
@@ -444,6 +503,9 @@
}
}
+ __skb_flow_dissect_ipv4(skb, flow_dissector,
+ target_container, data, iph);
+
if (flags & FLOW_DISSECTOR_F_STOP_AT_L3)
goto out_good;
@@ -489,6 +551,9 @@
goto out_good;
}
+ __skb_flow_dissect_ipv6(skb, flow_dissector,
+ target_container, data, iph);
+
if (flags & FLOW_DISSECTOR_F_STOP_AT_L3)
goto out_good;
@@ -683,6 +748,10 @@
case IPPROTO_MPLS:
proto = htons(ETH_P_MPLS_UC);
goto mpls;
+ case IPPROTO_TCP:
+ __skb_flow_dissect_tcp(skb, flow_dissector, target_container,
+ data, nhoff, hlen);
+ break;
default:
break;
}
diff --git a/net/core/lwt_bpf.c b/net/core/lwt_bpf.c
index b3bc0a3..1307731 100644
--- a/net/core/lwt_bpf.c
+++ b/net/core/lwt_bpf.c
@@ -240,7 +240,8 @@
static int bpf_build_state(struct nlattr *nla,
unsigned int family, const void *cfg,
- struct lwtunnel_state **ts)
+ struct lwtunnel_state **ts,
+ struct netlink_ext_ack *extack)
{
struct nlattr *tb[LWT_BPF_MAX + 1];
struct lwtunnel_state *newts;
@@ -250,7 +251,7 @@
if (family != AF_INET && family != AF_INET6)
return -EAFNOSUPPORT;
- ret = nla_parse_nested(tb, LWT_BPF_MAX, nla, bpf_nl_policy, NULL);
+ ret = nla_parse_nested(tb, LWT_BPF_MAX, nla, bpf_nl_policy, extack);
if (ret < 0)
return ret;
diff --git a/net/core/lwtunnel.c b/net/core/lwtunnel.c
index cfae3d5..d9cb353 100644
--- a/net/core/lwtunnel.c
+++ b/net/core/lwtunnel.c
@@ -103,37 +103,53 @@
int lwtunnel_build_state(u16 encap_type,
struct nlattr *encap, unsigned int family,
- const void *cfg, struct lwtunnel_state **lws)
+ const void *cfg, struct lwtunnel_state **lws,
+ struct netlink_ext_ack *extack)
{
const struct lwtunnel_encap_ops *ops;
+ bool found = false;
int ret = -EINVAL;
if (encap_type == LWTUNNEL_ENCAP_NONE ||
- encap_type > LWTUNNEL_ENCAP_MAX)
+ encap_type > LWTUNNEL_ENCAP_MAX) {
+ NL_SET_ERR_MSG_ATTR(extack, encap,
+ "Unknown LWT encapsulation type");
return ret;
+ }
ret = -EOPNOTSUPP;
rcu_read_lock();
ops = rcu_dereference(lwtun_encaps[encap_type]);
if (likely(ops && ops->build_state && try_module_get(ops->owner))) {
- ret = ops->build_state(encap, family, cfg, lws);
+ found = true;
+ ret = ops->build_state(encap, family, cfg, lws, extack);
if (ret)
module_put(ops->owner);
}
rcu_read_unlock();
+ /* don't rely on -EOPNOTSUPP to detect match as build_state
+ * handlers could return it
+ */
+ if (!found) {
+ NL_SET_ERR_MSG_ATTR(extack, encap,
+ "LWT encapsulation type not supported");
+ }
+
return ret;
}
EXPORT_SYMBOL(lwtunnel_build_state);
-int lwtunnel_valid_encap_type(u16 encap_type)
+int lwtunnel_valid_encap_type(u16 encap_type, struct netlink_ext_ack *extack)
{
const struct lwtunnel_encap_ops *ops;
int ret = -EINVAL;
if (encap_type == LWTUNNEL_ENCAP_NONE ||
- encap_type > LWTUNNEL_ENCAP_MAX)
+ encap_type > LWTUNNEL_ENCAP_MAX) {
+ NL_SET_ERR_MSG(extack, "Unknown lwt encapsulation type");
return ret;
+ }
rcu_read_lock();
ops = rcu_dereference(lwtun_encaps[encap_type]);
@@ -153,11 +169,16 @@
}
}
#endif
- return ops ? 0 : -EOPNOTSUPP;
+ ret = ops ? 0 : -EOPNOTSUPP;
+ if (ret < 0)
+ NL_SET_ERR_MSG(extack, "lwt encapsulation type not supported");
+
+ return ret;
}
EXPORT_SYMBOL(lwtunnel_valid_encap_type);
-int lwtunnel_valid_encap_type_attr(struct nlattr *attr, int remaining)
+int lwtunnel_valid_encap_type_attr(struct nlattr *attr, int remaining,
+ struct netlink_ext_ack *extack)
{
struct rtnexthop *rtnh = (struct rtnexthop *)attr;
struct nlattr *nla_entype;
@@ -174,7 +195,8 @@
if (nla_entype) {
encap_type = nla_get_u16(nla_entype);
- if (lwtunnel_valid_encap_type(encap_type) != 0)
+ if (lwtunnel_valid_encap_type(encap_type,
+ extack) != 0)
return -EOPNOTSUPP;
}
}
diff --git a/net/core/neighbour.c b/net/core/neighbour.c
index d274f81..dadb5ee 100644
--- a/net/core/neighbour.c
+++ b/net/core/neighbour.c
@@ -118,6 +118,50 @@
EXPORT_SYMBOL(neigh_rand_reach_time);
+static bool neigh_del(struct neighbour *n, __u8 state,
+ struct neighbour __rcu **np, struct neigh_table *tbl)
+{
+ bool retval = false;
+
+ write_lock(&n->lock);
+ if (atomic_read(&n->refcnt) == 1 && !(n->nud_state & state)) {
+ struct neighbour *neigh;
+
+ neigh = rcu_dereference_protected(n->next,
+ lockdep_is_held(&tbl->lock));
+ rcu_assign_pointer(*np, neigh);
+ n->dead = 1;
+ retval = true;
+ }
+ write_unlock(&n->lock);
+ if (retval)
+ neigh_cleanup_and_release(n);
+ return retval;
+}
+
+bool neigh_remove_one(struct neighbour *ndel, struct neigh_table *tbl)
+{
+ struct neigh_hash_table *nht;
+ void *pkey = ndel->primary_key;
+ u32 hash_val;
+ struct neighbour *n;
+ struct neighbour __rcu **np;
+
+ nht = rcu_dereference_protected(tbl->nht,
+ lockdep_is_held(&tbl->lock));
+ hash_val = tbl->hash(pkey, ndel->dev, nht->hash_rnd);
+ hash_val = hash_val >> (32 - nht->hash_shift);
+
+ np = &nht->hash_buckets[hash_val];
+ while ((n = rcu_dereference_protected(*np,
+ lockdep_is_held(&tbl->lock)))) {
+ if (n == ndel)
+ return neigh_del(n, 0, np, tbl);
+ np = &n->next;
+ }
+ return false;
+}
+
static int neigh_forced_gc(struct neigh_table *tbl)
{
int shrunk = 0;
@@ -140,19 +184,10 @@
* - nobody refers to it.
* - it is not permanent
*/
- write_lock(&n->lock);
- if (atomic_read(&n->refcnt) == 1 &&
- !(n->nud_state & NUD_PERMANENT)) {
- rcu_assign_pointer(*np,
- rcu_dereference_protected(n->next,
- lockdep_is_held(&tbl->lock)));
- n->dead = 1;
- shrunk = 1;
- write_unlock(&n->lock);
- neigh_cleanup_and_release(n);
+ if (neigh_del(n, NUD_PERMANENT, np, tbl)) {
+ shrunk = 1;
continue;
}
- write_unlock(&n->lock);
np = &n->next;
}
}
@@ -1649,7 +1684,10 @@
NEIGH_UPDATE_F_OVERRIDE |
NEIGH_UPDATE_F_ADMIN,
NETLINK_CB(skb).portid);
+ write_lock_bh(&tbl->lock);
neigh_release(neigh);
+ neigh_remove_one(neigh, tbl);
+ write_unlock_bh(&tbl->lock);
out:
return err;
diff --git a/net/core/net-procfs.c b/net/core/net-procfs.c
index 14d0934..4847964 100644
--- a/net/core/net-procfs.c
+++ b/net/core/net-procfs.c
@@ -363,15 +363,10 @@
netif_addr_lock_bh(dev);
netdev_for_each_mc_addr(ha, dev) {
- int i;
-
- seq_printf(seq, "%-4d %-15s %-5d %-5d ", dev->ifindex,
- dev->name, ha->refcount, ha->global_use);
-
- for (i = 0; i < dev->addr_len; i++)
- seq_printf(seq, "%02x", ha->addr[i]);
-
- seq_putc(seq, '\n');
+ seq_printf(seq, "%-4d %-15s %-5d %-5d %*phN\n",
+ dev->ifindex, dev->name,
+ ha->refcount, ha->global_use,
+ (int)dev->addr_len, ha->addr);
}
netif_addr_unlock_bh(dev);
return 0;
diff --git a/net/core/net-sysfs.c b/net/core/net-sysfs.c
index 65ea0ff..58e6cc7 100644
--- a/net/core/net-sysfs.c
+++ b/net/core/net-sysfs.c
@@ -323,7 +323,11 @@
static int change_tx_queue_len(struct net_device *dev, unsigned long new_len)
{
- int res, orig_len = dev->tx_queue_len;
+ unsigned int orig_len = dev->tx_queue_len;
+ int res;
+
+ if (new_len != (unsigned int)new_len)
+ return -ERANGE;
if (new_len != orig_len) {
dev->tx_queue_len = new_len;
@@ -349,7 +353,7 @@
return netdev_store(dev, attr, buf, len, change_tx_queue_len);
}
-NETDEVICE_SHOW_RW(tx_queue_len, fmt_ulong);
+NETDEVICE_SHOW_RW(tx_queue_len, fmt_dec);
static int change_gro_flush_timeout(struct net_device *dev, unsigned long val)
{
diff --git a/net/core/net_namespace.c b/net/core/net_namespace.c
index 26bbfab..2178db8 100644
--- a/net/core/net_namespace.c
+++ b/net/core/net_namespace.c
@@ -596,6 +596,7 @@
{
struct net *net = sock_net(skb->sk);
struct nlattr *tb[NETNSA_MAX + 1];
+ struct nlattr *nla;
struct net *peer;
int nsid, err;
@@ -603,23 +604,35 @@
rtnl_net_policy, extack);
if (err < 0)
return err;
- if (!tb[NETNSA_NSID])
+ if (!tb[NETNSA_NSID]) {
+ NL_SET_ERR_MSG(extack, "nsid is missing");
return -EINVAL;
+ }
nsid = nla_get_s32(tb[NETNSA_NSID]);
- if (tb[NETNSA_PID])
+ if (tb[NETNSA_PID]) {
peer = get_net_ns_by_pid(nla_get_u32(tb[NETNSA_PID]));
- else if (tb[NETNSA_FD])
+ nla = tb[NETNSA_PID];
+ } else if (tb[NETNSA_FD]) {
peer = get_net_ns_by_fd(nla_get_u32(tb[NETNSA_FD]));
- else
+ nla = tb[NETNSA_FD];
+ } else {
+ NL_SET_ERR_MSG(extack, "Peer netns reference is missing");
return -EINVAL;
- if (IS_ERR(peer))
+ }
+ if (IS_ERR(peer)) {
+ NL_SET_BAD_ATTR(extack, nla);
+ NL_SET_ERR_MSG(extack, "Peer netns reference is invalid");
return PTR_ERR(peer);
+ }
spin_lock_bh(&net->nsid_lock);
if (__peernet2id(net, peer) >= 0) {
spin_unlock_bh(&net->nsid_lock);
err = -EEXIST;
+ NL_SET_BAD_ATTR(extack, nla);
+ NL_SET_ERR_MSG(extack,
+ "Peer netns already has a nsid assigned");
goto out;
}
@@ -628,6 +641,10 @@
if (err >= 0) {
rtnl_net_notifyid(net, RTM_NEWNSID, err);
err = 0;
+ } else if (err == -ENOSPC && nsid >= 0) {
+ err = -EEXIST;
+ NL_SET_BAD_ATTR(extack, tb[NETNSA_NSID]);
+ NL_SET_ERR_MSG(extack, "The specified nsid is already used");
}
out:
put_net(peer);
@@ -670,6 +687,7 @@
{
struct net *net = sock_net(skb->sk);
struct nlattr *tb[NETNSA_MAX + 1];
+ struct nlattr *nla;
struct sk_buff *msg;
struct net *peer;
int err, id;
@@ -678,15 +696,22 @@
rtnl_net_policy, extack);
if (err < 0)
return err;
- if (tb[NETNSA_PID])
+ if (tb[NETNSA_PID]) {
peer = get_net_ns_by_pid(nla_get_u32(tb[NETNSA_PID]));
- else if (tb[NETNSA_FD])
+ nla = tb[NETNSA_PID];
+ } else if (tb[NETNSA_FD]) {
peer = get_net_ns_by_fd(nla_get_u32(tb[NETNSA_FD]));
- else
+ nla = tb[NETNSA_FD];
+ } else {
+ NL_SET_ERR_MSG(extack, "Peer netns reference is missing");
return -EINVAL;
+ }
- if (IS_ERR(peer))
+ if (IS_ERR(peer)) {
+ NL_SET_BAD_ATTR(extack, nla);
+ NL_SET_ERR_MSG(extack, "Peer netns reference is invalid");
return PTR_ERR(peer);
+ }
msg = nlmsg_new(rtnl_net_get_size(), GFP_KERNEL);
if (!msg) {
diff --git a/net/core/netpoll.c b/net/core/netpoll.c
index 29be246..37c1e34 100644
--- a/net/core/netpoll.c
+++ b/net/core/netpoll.c
@@ -441,7 +441,7 @@
ip6h->saddr = np->local_ip.in6;
ip6h->daddr = np->remote_ip.in6;
- eth = (struct ethhdr *) skb_push(skb, ETH_HLEN);
+ eth = skb_push(skb, ETH_HLEN);
skb_reset_mac_header(skb);
skb->protocol = eth->h_proto = htons(ETH_P_IPV6);
} else {
@@ -470,7 +470,7 @@
put_unaligned(np->remote_ip.ip, &(iph->daddr));
iph->check = ip_fast_csum((unsigned char *)iph, iph->ihl);
- eth = (struct ethhdr *) skb_push(skb, ETH_HLEN);
+ eth = skb_push(skb, ETH_HLEN);
skb_reset_mac_header(skb);
skb->protocol = eth->h_proto = htons(ETH_P_IP);
}
diff --git a/net/core/pktgen.c b/net/core/pktgen.c
index 96947f5..2dd42c5 100644
--- a/net/core/pktgen.c
+++ b/net/core/pktgen.c
@@ -2675,7 +2675,7 @@
goto err;
}
/* restore ll */
- eth = (struct ethhdr *)skb_push(skb, ETH_HLEN);
+ eth = skb_push(skb, ETH_HLEN);
memcpy(eth, pkt_dev->hh, 2 * ETH_ALEN);
eth->h_proto = protocol;
@@ -2714,11 +2714,11 @@
struct timeval timestamp;
struct pktgen_hdr *pgh;
- pgh = (struct pktgen_hdr *)skb_put(skb, sizeof(*pgh));
+ pgh = skb_put(skb, sizeof(*pgh));
datalen -= sizeof(*pgh);
if (pkt_dev->nfrags <= 0) {
- memset(skb_put(skb, datalen), 0, datalen);
+ skb_put_zero(skb, datalen);
} else {
int frags = pkt_dev->nfrags;
int i, len;
@@ -2729,7 +2729,7 @@
frags = MAX_SKB_FRAGS;
len = datalen - frags * PAGE_SIZE;
if (len > 0) {
- memset(skb_put(skb, len), 0, len);
+ skb_put_zero(skb, len);
datalen = frags * PAGE_SIZE;
}
@@ -2844,34 +2844,35 @@
skb_reserve(skb, 16);
/* Reserve for ethernet and IP header */
- eth = (__u8 *) skb_push(skb, 14);
- mpls = (__be32 *)skb_put(skb, pkt_dev->nr_labels*sizeof(__u32));
+ eth = skb_push(skb, 14);
+ mpls = skb_put(skb, pkt_dev->nr_labels * sizeof(__u32));
if (pkt_dev->nr_labels)
mpls_push(mpls, pkt_dev);
if (pkt_dev->vlan_id != 0xffff) {
if (pkt_dev->svlan_id != 0xffff) {
- svlan_tci = (__be16 *)skb_put(skb, sizeof(__be16));
+ svlan_tci = skb_put(skb, sizeof(__be16));
*svlan_tci = build_tci(pkt_dev->svlan_id,
pkt_dev->svlan_cfi,
pkt_dev->svlan_p);
- svlan_encapsulated_proto = (__be16 *)skb_put(skb, sizeof(__be16));
+ svlan_encapsulated_proto = skb_put(skb,
+ sizeof(__be16));
*svlan_encapsulated_proto = htons(ETH_P_8021Q);
}
- vlan_tci = (__be16 *)skb_put(skb, sizeof(__be16));
+ vlan_tci = skb_put(skb, sizeof(__be16));
*vlan_tci = build_tci(pkt_dev->vlan_id,
pkt_dev->vlan_cfi,
pkt_dev->vlan_p);
- vlan_encapsulated_proto = (__be16 *)skb_put(skb, sizeof(__be16));
+ vlan_encapsulated_proto = skb_put(skb, sizeof(__be16));
*vlan_encapsulated_proto = htons(ETH_P_IP);
}
skb_reset_mac_header(skb);
skb_set_network_header(skb, skb->len);
- iph = (struct iphdr *) skb_put(skb, sizeof(struct iphdr));
+ iph = skb_put(skb, sizeof(struct iphdr));
skb_set_transport_header(skb, skb->len);
- udph = (struct udphdr *) skb_put(skb, sizeof(struct udphdr));
+ udph = skb_put(skb, sizeof(struct udphdr));
skb_set_queue_mapping(skb, queue_map);
skb->priority = pkt_dev->skb_priority;
@@ -2971,34 +2972,35 @@
skb_reserve(skb, 16);
/* Reserve for ethernet and IP header */
- eth = (__u8 *) skb_push(skb, 14);
- mpls = (__be32 *)skb_put(skb, pkt_dev->nr_labels*sizeof(__u32));
+ eth = skb_push(skb, 14);
+ mpls = skb_put(skb, pkt_dev->nr_labels * sizeof(__u32));
if (pkt_dev->nr_labels)
mpls_push(mpls, pkt_dev);
if (pkt_dev->vlan_id != 0xffff) {
if (pkt_dev->svlan_id != 0xffff) {
- svlan_tci = (__be16 *)skb_put(skb, sizeof(__be16));
+ svlan_tci = skb_put(skb, sizeof(__be16));
*svlan_tci = build_tci(pkt_dev->svlan_id,
pkt_dev->svlan_cfi,
pkt_dev->svlan_p);
- svlan_encapsulated_proto = (__be16 *)skb_put(skb, sizeof(__be16));
+ svlan_encapsulated_proto = skb_put(skb,
+ sizeof(__be16));
*svlan_encapsulated_proto = htons(ETH_P_8021Q);
}
- vlan_tci = (__be16 *)skb_put(skb, sizeof(__be16));
+ vlan_tci = skb_put(skb, sizeof(__be16));
*vlan_tci = build_tci(pkt_dev->vlan_id,
pkt_dev->vlan_cfi,
pkt_dev->vlan_p);
- vlan_encapsulated_proto = (__be16 *)skb_put(skb, sizeof(__be16));
+ vlan_encapsulated_proto = skb_put(skb, sizeof(__be16));
*vlan_encapsulated_proto = htons(ETH_P_IPV6);
}
skb_reset_mac_header(skb);
skb_set_network_header(skb, skb->len);
- iph = (struct ipv6hdr *) skb_put(skb, sizeof(struct ipv6hdr));
+ iph = skb_put(skb, sizeof(struct ipv6hdr));
skb_set_transport_header(skb, skb->len);
- udph = (struct udphdr *) skb_put(skb, sizeof(struct udphdr));
+ udph = skb_put(skb, sizeof(struct udphdr));
skb_set_queue_mapping(skb, queue_map);
skb->priority = pkt_dev->skb_priority;
diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
index 467a2f4..8da89c1 100644
--- a/net/core/rtnetlink.c
+++ b/net/core/rtnetlink.c
@@ -39,6 +39,7 @@
#include <linux/if_vlan.h>
#include <linux/pci.h>
#include <linux/etherdevice.h>
+#include <linux/bpf.h>
#include <linux/uaccess.h>
@@ -899,7 +900,8 @@
static size_t rtnl_xdp_size(void)
{
size_t xdp_size = nla_total_size(0) + /* nest IFLA_XDP */
- nla_total_size(1); /* XDP_ATTACHED */
+ nla_total_size(1) + /* XDP_ATTACHED */
+ nla_total_size(4); /* XDP_PROG_ID */
return xdp_size;
}
@@ -942,6 +944,7 @@
+ nla_total_size(MAX_PHYS_ITEM_ID_LEN) /* IFLA_PHYS_SWITCH_ID */
+ nla_total_size(IFNAMSIZ) /* IFLA_PHYS_PORT_NAME */
+ rtnl_xdp_size() /* IFLA_XDP */
+ + nla_total_size(4) /* IFLA_EVENT */
+ nla_total_size(1); /* IFLA_PROTO_DOWN */
}
@@ -1248,15 +1251,20 @@
return 0;
}
-static u8 rtnl_xdp_attached_mode(struct net_device *dev)
+static u8 rtnl_xdp_attached_mode(struct net_device *dev, u32 *prog_id)
{
const struct net_device_ops *ops = dev->netdev_ops;
+ const struct bpf_prog *generic_xdp_prog;
ASSERT_RTNL();
- if (rcu_access_pointer(dev->xdp_prog))
+ *prog_id = 0;
+ generic_xdp_prog = rtnl_dereference(dev->xdp_prog);
+ if (generic_xdp_prog) {
+ *prog_id = generic_xdp_prog->aux->id;
return XDP_ATTACHED_SKB;
- if (ops->ndo_xdp && __dev_xdp_attached(dev, ops->ndo_xdp))
+ }
+ if (ops->ndo_xdp && __dev_xdp_attached(dev, ops->ndo_xdp, prog_id))
return XDP_ATTACHED_DRV;
return XDP_ATTACHED_NONE;
@@ -1265,6 +1273,7 @@
static int rtnl_xdp_fill(struct sk_buff *skb, struct net_device *dev)
{
struct nlattr *xdp;
+ u32 prog_id;
int err;
xdp = nla_nest_start(skb, IFLA_XDP);
@@ -1272,10 +1281,16 @@
return -EMSGSIZE;
err = nla_put_u8(skb, IFLA_XDP_ATTACHED,
- rtnl_xdp_attached_mode(dev));
+ rtnl_xdp_attached_mode(dev, &prog_id));
if (err)
goto err_cancel;
+ if (prog_id) {
+ err = nla_put_u32(skb, IFLA_XDP_PROG_ID, prog_id);
+ if (err)
+ goto err_cancel;
+ }
+
nla_nest_end(skb, xdp);
return 0;
@@ -1284,9 +1299,40 @@
return err;
}
+static u32 rtnl_get_event(unsigned long event)
+{
+ u32 rtnl_event_type = IFLA_EVENT_NONE;
+
+ switch (event) {
+ case NETDEV_REBOOT:
+ rtnl_event_type = IFLA_EVENT_REBOOT;
+ break;
+ case NETDEV_FEAT_CHANGE:
+ rtnl_event_type = IFLA_EVENT_FEATURES;
+ break;
+ case NETDEV_BONDING_FAILOVER:
+ rtnl_event_type = IFLA_EVENT_BONDING_FAILOVER;
+ break;
+ case NETDEV_NOTIFY_PEERS:
+ rtnl_event_type = IFLA_EVENT_NOTIFY_PEERS;
+ break;
+ case NETDEV_RESEND_IGMP:
+ rtnl_event_type = IFLA_EVENT_IGMP_RESEND;
+ break;
+ case NETDEV_CHANGEINFODATA:
+ rtnl_event_type = IFLA_EVENT_BONDING_OPTIONS;
+ break;
+ default:
+ break;
+ }
+
+ return rtnl_event_type;
+}
+
static int rtnl_fill_ifinfo(struct sk_buff *skb, struct net_device *dev,
int type, u32 pid, u32 seq, u32 change,
- unsigned int flags, u32 ext_filter_mask)
+ unsigned int flags, u32 ext_filter_mask,
+ u32 event)
{
struct ifinfomsg *ifm;
struct nlmsghdr *nlh;
@@ -1335,6 +1381,11 @@
nla_put_u8(skb, IFLA_PROTO_DOWN, dev->proto_down))
goto nla_put_failure;
+ if (event != IFLA_EVENT_NONE) {
+ if (nla_put_u32(skb, IFLA_EVENT, event))
+ goto nla_put_failure;
+ }
+
if (rtnl_fill_link_ifmap(skb, dev))
goto nla_put_failure;
@@ -1469,6 +1520,7 @@
[IFLA_LINK_NETNSID] = { .type = NLA_S32 },
[IFLA_PROTO_DOWN] = { .type = NLA_U8 },
[IFLA_XDP] = { .type = NLA_NESTED },
+ [IFLA_EVENT] = { .type = NLA_U32 },
[IFLA_GROUP] = { .type = NLA_U32 },
};
@@ -1517,6 +1569,7 @@
[IFLA_XDP_FD] = { .type = NLA_S32 },
[IFLA_XDP_ATTACHED] = { .type = NLA_U8 },
[IFLA_XDP_FLAGS] = { .type = NLA_U32 },
+ [IFLA_XDP_PROG_ID] = { .type = NLA_U32 },
};
static const struct rtnl_link_ops *linkinfo_to_kind_ops(const struct nlattr *nla)
@@ -1629,7 +1682,7 @@
NETLINK_CB(cb->skb).portid,
cb->nlh->nlmsg_seq, 0,
flags,
- ext_filter_mask);
+ ext_filter_mask, 0);
if (err < 0) {
if (likely(skb->len))
@@ -2051,8 +2104,8 @@
}
if (tb[IFLA_TXQLEN]) {
- unsigned long value = nla_get_u32(tb[IFLA_TXQLEN]);
- unsigned long orig_len = dev->tx_queue_len;
+ unsigned int value = nla_get_u32(tb[IFLA_TXQLEN]);
+ unsigned int orig_len = dev->tx_queue_len;
if (dev->tx_queue_len ^ value) {
dev->tx_queue_len = value;
@@ -2189,7 +2242,7 @@
if (err < 0)
goto errout;
- if (xdp[IFLA_XDP_ATTACHED]) {
+ if (xdp[IFLA_XDP_ATTACHED] || xdp[IFLA_XDP_PROG_ID]) {
err = -EINVAL;
goto errout;
}
@@ -2739,7 +2792,7 @@
return -ENOBUFS;
err = rtnl_fill_ifinfo(nskb, dev, RTM_NEWLINK, NETLINK_CB(skb).portid,
- nlh->nlmsg_seq, 0, 0, ext_filter_mask);
+ nlh->nlmsg_seq, 0, 0, ext_filter_mask, 0);
if (err < 0) {
/* -EMSGSIZE implies BUG in if_nlmsg_size */
WARN_ON(err == -EMSGSIZE);
@@ -2811,7 +2864,8 @@
}
struct sk_buff *rtmsg_ifinfo_build_skb(int type, struct net_device *dev,
- unsigned int change, gfp_t flags)
+ unsigned int change,
+ u32 event, gfp_t flags)
{
struct net *net = dev_net(dev);
struct sk_buff *skb;
@@ -2822,7 +2876,7 @@
if (skb == NULL)
goto errout;
- err = rtnl_fill_ifinfo(skb, dev, type, 0, 0, change, 0, 0);
+ err = rtnl_fill_ifinfo(skb, dev, type, 0, 0, change, 0, 0, event);
if (err < 0) {
/* -EMSGSIZE implies BUG in if_nlmsg_size() */
WARN_ON(err == -EMSGSIZE);
@@ -2843,18 +2897,25 @@
rtnl_notify(skb, net, 0, RTNLGRP_LINK, NULL, flags);
}
-void rtmsg_ifinfo(int type, struct net_device *dev, unsigned int change,
- gfp_t flags)
+static void rtmsg_ifinfo_event(int type, struct net_device *dev,
+ unsigned int change, u32 event,
+ gfp_t flags)
{
struct sk_buff *skb;
if (dev->reg_state != NETREG_REGISTERED)
return;
- skb = rtmsg_ifinfo_build_skb(type, dev, change, flags);
+ skb = rtmsg_ifinfo_build_skb(type, dev, change, event, flags);
if (skb)
rtmsg_ifinfo_send(skb, dev, flags);
}
+
+void rtmsg_ifinfo(int type, struct net_device *dev, unsigned int change,
+ gfp_t flags)
+{
+ rtmsg_ifinfo_event(type, dev, change, rtnl_get_event(0), flags);
+}
EXPORT_SYMBOL(rtmsg_ifinfo);
static int nlmsg_populate_fdb_fill(struct sk_buff *skb,
@@ -4159,6 +4220,18 @@
rtnl_unlock();
}
+static int rtnetlink_bind(struct net *net, int group)
+{
+ switch (group) {
+ case RTNLGRP_IPV4_MROUTE_R:
+ case RTNLGRP_IPV6_MROUTE_R:
+ if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
+ return -EPERM;
+ break;
+ }
+ return 0;
+}
+
static int rtnetlink_event(struct notifier_block *this, unsigned long event, void *ptr)
{
struct net_device *dev = netdev_notifier_info_to_dev(ptr);
@@ -4171,7 +4244,8 @@
case NETDEV_NOTIFY_PEERS:
case NETDEV_RESEND_IGMP:
case NETDEV_CHANGEINFODATA:
- rtmsg_ifinfo(RTM_NEWLINK, dev, 0, GFP_KERNEL);
+ rtmsg_ifinfo_event(RTM_NEWLINK, dev, 0, rtnl_get_event(event),
+ GFP_KERNEL);
break;
default:
break;
@@ -4192,6 +4266,7 @@
.input = rtnetlink_rcv,
.cb_mutex = &rtnl_mutex,
.flags = NL_CFG_F_NONROOT_RECV,
+ .bind = rtnetlink_bind,
};
sk = netlink_kernel_create(net, NETLINK_ROUTE, &cfg);
diff --git a/net/core/secure_seq.c b/net/core/secure_seq.c
index ae35cce..7232274 100644
--- a/net/core/secure_seq.c
+++ b/net/core/secure_seq.c
@@ -51,7 +51,8 @@
#endif
#if IS_ENABLED(CONFIG_IPV6)
-u32 secure_tcpv6_ts_off(const __be32 *saddr, const __be32 *daddr)
+u32 secure_tcpv6_ts_off(const struct net *net,
+ const __be32 *saddr, const __be32 *daddr)
{
const struct {
struct in6_addr saddr;
@@ -61,7 +62,7 @@
.daddr = *(struct in6_addr *)daddr,
};
- if (sysctl_tcp_timestamps != 1)
+ if (net->ipv4.sysctl_tcp_timestamps != 1)
return 0;
ts_secret_init();
@@ -113,9 +114,9 @@
#endif
#ifdef CONFIG_INET
-u32 secure_tcp_ts_off(__be32 saddr, __be32 daddr)
+u32 secure_tcp_ts_off(const struct net *net, __be32 saddr, __be32 daddr)
{
- if (sysctl_tcp_timestamps != 1)
+ if (net->ipv4.sysctl_tcp_timestamps != 1)
return 0;
ts_secret_init();
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index b1be7c0..f75897a 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -643,12 +643,10 @@
kmem_cache_free(skbuff_fclone_cache, fclones);
}
-static void skb_release_head_state(struct sk_buff *skb)
+void skb_release_head_state(struct sk_buff *skb)
{
skb_dst_drop(skb);
-#ifdef CONFIG_XFRM
- secpath_put(skb->sp);
-#endif
+ secpath_reset(skb);
if (skb->destructor) {
WARN_ON(in_irq());
skb->destructor(skb);
@@ -694,12 +692,9 @@
*/
void kfree_skb(struct sk_buff *skb)
{
- if (unlikely(!skb))
+ if (!skb_unref(skb))
return;
- if (likely(atomic_read(&skb->users) == 1))
- smp_rmb();
- else if (likely(!atomic_dec_and_test(&skb->users)))
- return;
+
trace_kfree_skb(skb, __builtin_return_address(0));
__kfree_skb(skb);
}
@@ -746,17 +741,32 @@
*/
void consume_skb(struct sk_buff *skb)
{
- if (unlikely(!skb))
+ if (!skb_unref(skb))
return;
- if (likely(atomic_read(&skb->users) == 1))
- smp_rmb();
- else if (likely(!atomic_dec_and_test(&skb->users)))
- return;
+
trace_consume_skb(skb);
__kfree_skb(skb);
}
EXPORT_SYMBOL(consume_skb);
+/**
+ * consume_stateless_skb - free an skbuff, assuming it is stateless
+ * @skb: buffer to free
+ *
+ * Works like consume_skb(), but this variant assumes that all the head
+ * states have been already dropped.
+ */
+void consume_stateless_skb(struct sk_buff *skb)
+{
+ if (!skb_unref(skb))
+ return;
+
+ trace_consume_skb(skb);
+ if (likely(skb->head))
+ skb_release_data(skb);
+ kfree_skbmem(skb);
+}
+
void __kfree_skb_flush(void)
{
struct napi_alloc_cache *nc = this_cpu_ptr(&napi_alloc_cache);
@@ -807,10 +817,9 @@
return;
}
- if (likely(atomic_read(&skb->users) == 1))
- smp_rmb();
- else if (likely(!atomic_dec_and_test(&skb->users)))
+ if (!skb_unref(skb))
return;
+
/* if reaching here SKB is ready to free */
trace_consume_skb(skb);
@@ -1412,7 +1421,7 @@
* returned.
*/
-unsigned char *pskb_put(struct sk_buff *skb, struct sk_buff *tail, int len)
+void *pskb_put(struct sk_buff *skb, struct sk_buff *tail, int len)
{
if (tail != skb) {
skb->data_len += len;
@@ -1431,9 +1440,9 @@
* exceed the total buffer size the kernel will panic. A pointer to the
* first byte of the extra data is returned.
*/
-unsigned char *skb_put(struct sk_buff *skb, unsigned int len)
+void *skb_put(struct sk_buff *skb, unsigned int len)
{
- unsigned char *tmp = skb_tail_pointer(skb);
+ void *tmp = skb_tail_pointer(skb);
SKB_LINEAR_ASSERT(skb);
skb->tail += len;
skb->len += len;
@@ -1452,7 +1461,7 @@
* start. If this would exceed the total buffer headroom the kernel will
* panic. A pointer to the first byte of the extra data is returned.
*/
-unsigned char *skb_push(struct sk_buff *skb, unsigned int len)
+void *skb_push(struct sk_buff *skb, unsigned int len)
{
skb->data -= len;
skb->len += len;
@@ -1472,7 +1481,7 @@
* is returned. Once the data has been pulled future pushes will overwrite
* the old data.
*/
-unsigned char *skb_pull(struct sk_buff *skb, unsigned int len)
+void *skb_pull(struct sk_buff *skb, unsigned int len)
{
return skb_pull_inline(skb, len);
}
@@ -1607,7 +1616,7 @@
*
* It is pretty complicated. Luckily, it is called only in exceptional cases.
*/
-unsigned char *__pskb_pull_tail(struct sk_buff *skb, int delta)
+void *__pskb_pull_tail(struct sk_buff *skb, int delta)
{
/* If skb has not enough free space at tail, get new one
* plus 128 bytes for future expansions. If we have enough
@@ -2243,6 +2252,32 @@
}
EXPORT_SYMBOL(skb_copy_and_csum_bits);
+static __wsum warn_crc32c_csum_update(const void *buff, int len, __wsum sum)
+{
+ net_warn_ratelimited(
+ "%s: attempt to compute crc32c without libcrc32c.ko\n",
+ __func__);
+ return 0;
+}
+
+static __wsum warn_crc32c_csum_combine(__wsum csum, __wsum csum2,
+ int offset, int len)
+{
+ net_warn_ratelimited(
+ "%s: attempt to compute crc32c without libcrc32c.ko\n",
+ __func__);
+ return 0;
+}
+
+static const struct skb_checksum_ops default_crc32c_ops = {
+ .update = warn_crc32c_csum_update,
+ .combine = warn_crc32c_csum_combine,
+};
+
+const struct skb_checksum_ops *crc32c_csum_stub __read_mostly =
+ &default_crc32c_ops;
+EXPORT_SYMBOL(crc32c_csum_stub);
+
/**
* skb_zerocopy_headlen - Calculate headroom needed for skb_zerocopy()
* @from: source buffer
@@ -2620,7 +2655,8 @@
{
int pos = skb_headlen(skb);
- skb_shinfo(skb1)->tx_flags = skb_shinfo(skb)->tx_flags & SKBTX_SHARED_FRAG;
+ skb_shinfo(skb1)->tx_flags |= skb_shinfo(skb)->tx_flags &
+ SKBTX_SHARED_FRAG;
if (len < pos) /* Split line is inside header. */
skb_split_inside_header(skb, skb1, len, pos);
else /* Second chunk has no header, nothing to copy. */
@@ -3029,7 +3065,7 @@
* that the checksum difference is zero (e.g., a valid IP header)
* or you are setting ip_summed to CHECKSUM_NONE.
*/
-unsigned char *skb_pull_rcsum(struct sk_buff *skb, unsigned int len)
+void *skb_pull_rcsum(struct sk_buff *skb, unsigned int len)
{
unsigned char *data = skb->data;
@@ -3235,8 +3271,8 @@
skb_copy_from_linear_data_offset(head_skb, offset,
skb_put(nskb, hsize), hsize);
- skb_shinfo(nskb)->tx_flags = skb_shinfo(head_skb)->tx_flags &
- SKBTX_SHARED_FRAG;
+ skb_shinfo(nskb)->tx_flags |= skb_shinfo(head_skb)->tx_flags &
+ SKBTX_SHARED_FRAG;
while (pos < offset + len) {
if (i >= nfrags) {
@@ -3482,24 +3518,18 @@
NULL);
}
-/**
- * skb_to_sgvec - Fill a scatter-gather list from a socket buffer
- * @skb: Socket buffer containing the buffers to be mapped
- * @sg: The scatter-gather list to map into
- * @offset: The offset into the buffer's contents to start mapping
- * @len: Length of buffer space to be mapped
- *
- * Fill the specified scatter-gather list with mappings/pointers into a
- * region of the buffer space attached to a socket buffer.
- */
static int
-__skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg, int offset, int len)
+__skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg, int offset, int len,
+ unsigned int recursion_level)
{
int start = skb_headlen(skb);
int i, copy = start - offset;
struct sk_buff *frag_iter;
int elt = 0;
+ if (unlikely(recursion_level >= 24))
+ return -EMSGSIZE;
+
if (copy > 0) {
if (copy > len)
copy = len;
@@ -3518,6 +3548,8 @@
end = start + skb_frag_size(&skb_shinfo(skb)->frags[i]);
if ((copy = end - offset) > 0) {
skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
+ if (unlikely(elt && sg_is_last(&sg[elt - 1])))
+ return -EMSGSIZE;
if (copy > len)
copy = len;
@@ -3532,16 +3564,22 @@
}
skb_walk_frags(skb, frag_iter) {
- int end;
+ int end, ret;
WARN_ON(start > offset + len);
end = start + frag_iter->len;
if ((copy = end - offset) > 0) {
+ if (unlikely(elt && sg_is_last(&sg[elt - 1])))
+ return -EMSGSIZE;
+
if (copy > len)
copy = len;
- elt += __skb_to_sgvec(frag_iter, sg+elt, offset - start,
- copy);
+ ret = __skb_to_sgvec(frag_iter, sg+elt, offset - start,
+ copy, recursion_level + 1);
+ if (unlikely(ret < 0))
+ return ret;
+ elt += ret;
if ((len -= copy) == 0)
return elt;
offset += copy;
@@ -3552,6 +3590,31 @@
return elt;
}
+/**
+ * skb_to_sgvec - Fill a scatter-gather list from a socket buffer
+ * @skb: Socket buffer containing the buffers to be mapped
+ * @sg: The scatter-gather list to map into
+ * @offset: The offset into the buffer's contents to start mapping
+ * @len: Length of buffer space to be mapped
+ *
+ * Fill the specified scatter-gather list with mappings/pointers into a
+ * region of the buffer space attached to a socket buffer. Returns either
+ * the number of scatterlist items used, or -EMSGSIZE if the contents
+ * could not fit.
+ */
+int skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg, int offset, int len)
+{
+ int nsg = __skb_to_sgvec(skb, sg, offset, len, 0);
+
+ if (nsg <= 0)
+ return nsg;
+
+ sg_mark_end(&sg[nsg - 1]);
+
+ return nsg;
+}
+EXPORT_SYMBOL_GPL(skb_to_sgvec);
+
/* As compared with skb_to_sgvec, skb_to_sgvec_nomark only map skb to given
* sglist without mark the sg which contain last skb data as the end.
* So the caller can mannipulate sg list as will when padding new data after
@@ -3574,19 +3637,11 @@
int skb_to_sgvec_nomark(struct sk_buff *skb, struct scatterlist *sg,
int offset, int len)
{
- return __skb_to_sgvec(skb, sg, offset, len);
+ return __skb_to_sgvec(skb, sg, offset, len, 0);
}
EXPORT_SYMBOL_GPL(skb_to_sgvec_nomark);
-int skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg, int offset, int len)
-{
- int nsg = __skb_to_sgvec(skb, sg, offset, len);
- sg_mark_end(&sg[nsg - 1]);
-
- return nsg;
-}
-EXPORT_SYMBOL_GPL(skb_to_sgvec);
/**
* skb_cow_data - Check that a socket buffer's data buffers are writable
@@ -3878,6 +3933,10 @@
if (!sk)
return;
+ if (!hwtstamps && !(sk->sk_tsflags & SOF_TIMESTAMPING_OPT_TX_SWHW) &&
+ skb_shinfo(orig_skb)->tx_flags & SKBTX_IN_PROGRESS)
+ return;
+
tsonly = sk->sk_tsflags & SOF_TIMESTAMPING_OPT_TSONLY;
if (!skb_may_tx_timestamp(sk, tsonly))
return;
@@ -3899,7 +3958,8 @@
return;
if (tsonly) {
- skb_shinfo(skb)->tx_flags = skb_shinfo(orig_skb)->tx_flags;
+ skb_shinfo(skb)->tx_flags |= skb_shinfo(orig_skb)->tx_flags &
+ SKBTX_ANY_TSTAMP;
skb_shinfo(skb)->tskey = skb_shinfo(orig_skb)->tskey;
}
diff --git a/net/core/sock.c b/net/core/sock.c
index 727f924..6f4b090 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -1038,6 +1038,10 @@
#endif
case SO_MAX_PACING_RATE:
+ if (val != ~0U)
+ cmpxchg(&sk->sk_pacing_status,
+ SK_PACING_NONE,
+ SK_PACING_NEEDED);
sk->sk_max_pacing_rate = val;
sk->sk_pacing_rate = min(sk->sk_pacing_rate,
sk->sk_max_pacing_rate);
@@ -1074,6 +1078,18 @@
}
}
+static int groups_to_user(gid_t __user *dst, const struct group_info *src)
+{
+ struct user_namespace *user_ns = current_user_ns();
+ int i;
+
+ for (i = 0; i < src->ngroups; i++)
+ if (put_user(from_kgid_munged(user_ns, src->gid[i]), dst + i))
+ return -EFAULT;
+
+ return 0;
+}
+
int sock_getsockopt(struct socket *sock, int level, int optname,
char __user *optval, int __user *optlen)
{
@@ -1227,6 +1243,27 @@
goto lenout;
}
+ case SO_PEERGROUPS:
+ {
+ int ret, n;
+
+ if (!sk->sk_peer_cred)
+ return -ENODATA;
+
+ n = sk->sk_peer_cred->group_info->ngroups;
+ if (len < n * sizeof(gid_t)) {
+ len = n * sizeof(gid_t);
+ return put_user(len, optlen) ? -EFAULT : -ERANGE;
+ }
+ len = n * sizeof(gid_t);
+
+ ret = groups_to_user((gid_t __user *)optval,
+ sk->sk_peer_cred->group_info);
+ if (ret)
+ return ret;
+ goto lenout;
+ }
+
case SO_PEERNAME:
{
char address[128];
@@ -2072,6 +2109,26 @@
}
EXPORT_SYMBOL(sock_cmsg_send);
+static void sk_enter_memory_pressure(struct sock *sk)
+{
+ if (!sk->sk_prot->enter_memory_pressure)
+ return;
+
+ sk->sk_prot->enter_memory_pressure(sk);
+}
+
+static void sk_leave_memory_pressure(struct sock *sk)
+{
+ if (sk->sk_prot->leave_memory_pressure) {
+ sk->sk_prot->leave_memory_pressure(sk);
+ } else {
+ unsigned long *memory_pressure = sk->sk_prot->memory_pressure;
+
+ if (memory_pressure && *memory_pressure)
+ *memory_pressure = 0;
+ }
+}
+
/* On 32bit arches, an skb frag is limited to 2^15 */
#define SKB_FRAG_PAGE_ORDER get_order(32768)
diff --git a/net/dcb/dcbnl.c b/net/dcb/dcbnl.c
index 9310612..733f523 100644
--- a/net/dcb/dcbnl.c
+++ b/net/dcb/dcbnl.c
@@ -178,10 +178,6 @@
[DCB_ATTR_IEEE_QCN_STATS] = {.len = sizeof(struct ieee_qcn_stats)},
};
-static const struct nla_policy dcbnl_ieee_app[DCB_ATTR_IEEE_APP_MAX + 1] = {
- [DCB_ATTR_IEEE_APP] = {.len = sizeof(struct dcb_app)},
-};
-
/* DCB number of traffic classes nested attributes. */
static const struct nla_policy dcbnl_featcfg_nest[DCB_FEATCFG_ATTR_MAX + 1] = {
[DCB_FEATCFG_ATTR_ALL] = {.type = NLA_FLAG},
@@ -1463,8 +1459,15 @@
nla_for_each_nested(attr, ieee[DCB_ATTR_IEEE_APP_TABLE], rem) {
struct dcb_app *app_data;
+
if (nla_type(attr) != DCB_ATTR_IEEE_APP)
continue;
+
+ if (nla_len(attr) < sizeof(struct dcb_app)) {
+ err = -ERANGE;
+ goto err;
+ }
+
app_data = nla_data(attr);
if (ops->ieee_setapp)
err = ops->ieee_setapp(netdev, app_data);
diff --git a/net/dccp/ccids/ccid2.c b/net/dccp/ccids/ccid2.c
index 5e3a730..e1295d5 100644
--- a/net/dccp/ccids/ccid2.c
+++ b/net/dccp/ccids/ccid2.c
@@ -233,7 +233,7 @@
{
struct dccp_sock *dp = dccp_sk(sk);
struct ccid2_hc_tx_sock *hc = ccid2_hc_tx_sk(sk);
- const u32 now = ccid2_time_stamp;
+ const u32 now = ccid2_jiffies32;
struct ccid2_seq *next;
/* slow-start after idle periods (RFC 2581, RFC 2861) */
@@ -466,7 +466,7 @@
* The cleanest solution is to not use the ccid2s_sent field at all
* and instead use DCCP timestamps: requires changes in other places.
*/
- ccid2_rtt_estimator(sk, ccid2_time_stamp - seqp->ccid2s_sent);
+ ccid2_rtt_estimator(sk, ccid2_jiffies32 - seqp->ccid2s_sent);
}
static void ccid2_congestion_event(struct sock *sk, struct ccid2_seq *seqp)
@@ -478,7 +478,7 @@
return;
}
- hc->tx_last_cong = ccid2_time_stamp;
+ hc->tx_last_cong = ccid2_jiffies32;
hc->tx_cwnd = hc->tx_cwnd / 2 ? : 1U;
hc->tx_ssthresh = max(hc->tx_cwnd, 2U);
@@ -731,7 +731,7 @@
hc->tx_rto = DCCP_TIMEOUT_INIT;
hc->tx_rpdupack = -1;
- hc->tx_last_cong = hc->tx_lsndtime = hc->tx_cwnd_stamp = ccid2_time_stamp;
+ hc->tx_last_cong = hc->tx_lsndtime = hc->tx_cwnd_stamp = ccid2_jiffies32;
hc->tx_cwnd_used = 0;
setup_timer(&hc->tx_rtotimer, ccid2_hc_tx_rto_expire,
(unsigned long)sk);
diff --git a/net/dccp/ccids/ccid2.h b/net/dccp/ccids/ccid2.h
index 18c9754..6e50ef2 100644
--- a/net/dccp/ccids/ccid2.h
+++ b/net/dccp/ccids/ccid2.h
@@ -27,7 +27,7 @@
* CCID-2 timestamping faces the same issues as TCP timestamping.
* Hence we reuse/share as much of the code as possible.
*/
-#define ccid2_time_stamp tcp_time_stamp
+#define ccid2_jiffies32 ((u32)jiffies)
/* NUMDUPACK parameter from RFC 4341, p. 6 */
#define NUMDUPACK 3
diff --git a/net/dccp/ipv4.c b/net/dccp/ipv4.c
index f75482b..f85d901 100644
--- a/net/dccp/ipv4.c
+++ b/net/dccp/ipv4.c
@@ -1033,33 +1033,34 @@
{
int err = proto_register(&dccp_v4_prot, 1);
- if (err != 0)
+ if (err)
goto out;
- err = inet_add_protocol(&dccp_v4_protocol, IPPROTO_DCCP);
- if (err != 0)
- goto out_proto_unregister;
-
inet_register_protosw(&dccp_v4_protosw);
err = register_pernet_subsys(&dccp_v4_ops);
if (err)
goto out_destroy_ctl_sock;
+
+ err = inet_add_protocol(&dccp_v4_protocol, IPPROTO_DCCP);
+ if (err)
+ goto out_proto_unregister;
+
out:
return err;
+out_proto_unregister:
+ unregister_pernet_subsys(&dccp_v4_ops);
out_destroy_ctl_sock:
inet_unregister_protosw(&dccp_v4_protosw);
- inet_del_protocol(&dccp_v4_protocol, IPPROTO_DCCP);
-out_proto_unregister:
proto_unregister(&dccp_v4_prot);
goto out;
}
static void __exit dccp_v4_exit(void)
{
+ inet_del_protocol(&dccp_v4_protocol, IPPROTO_DCCP);
unregister_pernet_subsys(&dccp_v4_ops);
inet_unregister_protosw(&dccp_v4_protosw);
- inet_del_protocol(&dccp_v4_protocol, IPPROTO_DCCP);
proto_unregister(&dccp_v4_prot);
}
diff --git a/net/dccp/ipv6.c b/net/dccp/ipv6.c
index 9926211..4fccc0c 100644
--- a/net/dccp/ipv6.c
+++ b/net/dccp/ipv6.c
@@ -1098,33 +1098,33 @@
{
int err = proto_register(&dccp_v6_prot, 1);
- if (err != 0)
+ if (err)
goto out;
- err = inet6_add_protocol(&dccp_v6_protocol, IPPROTO_DCCP);
- if (err != 0)
- goto out_unregister_proto;
-
inet6_register_protosw(&dccp_v6_protosw);
err = register_pernet_subsys(&dccp_v6_ops);
- if (err != 0)
+ if (err)
goto out_destroy_ctl_sock;
+
+ err = inet6_add_protocol(&dccp_v6_protocol, IPPROTO_DCCP);
+ if (err)
+ goto out_unregister_proto;
+
out:
return err;
-
-out_destroy_ctl_sock:
- inet6_del_protocol(&dccp_v6_protocol, IPPROTO_DCCP);
- inet6_unregister_protosw(&dccp_v6_protosw);
out_unregister_proto:
+ unregister_pernet_subsys(&dccp_v6_ops);
+out_destroy_ctl_sock:
+ inet6_unregister_protosw(&dccp_v6_protosw);
proto_unregister(&dccp_v6_prot);
goto out;
}
static void __exit dccp_v6_exit(void)
{
- unregister_pernet_subsys(&dccp_v6_ops);
inet6_del_protocol(&dccp_v6_protocol, IPPROTO_DCCP);
+ unregister_pernet_subsys(&dccp_v6_ops);
inet6_unregister_protosw(&dccp_v6_protosw);
proto_unregister(&dccp_v6_prot);
}
diff --git a/net/dccp/options.c b/net/dccp/options.c
index 74d29c5..51cdfc3 100644
--- a/net/dccp/options.c
+++ b/net/dccp/options.c
@@ -484,7 +484,7 @@
return -1;
DCCP_SKB_CB(skb)->dccpd_opt_len++;
- *skb_push(skb, 1) = DCCPO_MANDATORY;
+ *(u8 *)skb_push(skb, 1) = DCCPO_MANDATORY;
return 0;
}
diff --git a/net/decnet/af_decnet.c b/net/decnet/af_decnet.c
index 405483a..73a0399 100644
--- a/net/decnet/af_decnet.c
+++ b/net/decnet/af_decnet.c
@@ -447,7 +447,7 @@
dst_release(rcu_dereference_check(sk->sk_dst_cache, 1));
}
-static int dn_memory_pressure;
+static unsigned long dn_memory_pressure;
static void dn_enter_memory_pressure(struct sock *sk)
{
diff --git a/net/decnet/dn_dev.c b/net/decnet/dn_dev.c
index 9017a9a..fa0110b 100644
--- a/net/decnet/dn_dev.c
+++ b/net/decnet/dn_dev.c
@@ -846,7 +846,7 @@
skb->dev = dev;
- msg = (struct endnode_hello_message *)skb_put(skb,sizeof(*msg));
+ msg = skb_put(skb, sizeof(*msg));
msg->msgflg = 0x0D;
memcpy(msg->tiver, dn_eco_version, 3);
@@ -867,7 +867,7 @@
msg->datalen = 0x02;
memset(msg->data, 0xAA, 2);
- pktlen = (__le16 *)skb_push(skb,2);
+ pktlen = skb_push(skb, 2);
*pktlen = cpu_to_le16(skb->len - 2);
skb_reset_network_header(skb);
@@ -959,7 +959,7 @@
skb_trim(skb, (27 + *i2));
- pktlen = (__le16 *)skb_push(skb, 2);
+ pktlen = skb_push(skb, 2);
*pktlen = cpu_to_le16(skb->len - 2);
skb_reset_network_header(skb);
diff --git a/net/decnet/dn_nsp_out.c b/net/decnet/dn_nsp_out.c
index 849805e..66f035e 100644
--- a/net/decnet/dn_nsp_out.c
+++ b/net/decnet/dn_nsp_out.c
@@ -484,7 +484,7 @@
if ((skb = dn_alloc_skb(sk, 3, sk->sk_allocation)) == NULL)
return;
- msg = (struct nsp_conn_ack_msg *)skb_put(skb, 3);
+ msg = skb_put(skb, 3);
msg->msgflg = 0x24;
msg->dstaddr = scp->addrrem;
@@ -522,7 +522,7 @@
if ((skb = dn_alloc_skb(sk, 50 + len, gfp)) == NULL)
return;
- msg = (struct nsp_conn_init_msg *)skb_put(skb, sizeof(*msg));
+ msg = skb_put(skb, sizeof(*msg));
msg->msgflg = 0x28;
msg->dstaddr = scp->addrrem;
msg->srcaddr = scp->addrloc;
@@ -530,10 +530,10 @@
msg->info = scp->info_loc;
msg->segsize = cpu_to_le16(scp->segsize_loc);
- *skb_put(skb,1) = len;
+ skb_put_u8(skb, len);
if (len > 0)
- memcpy(skb_put(skb, len), scp->conndata_out.opt_data, len);
+ skb_put_data(skb, scp->conndata_out.opt_data, len);
dn_nsp_send(skb);
@@ -662,7 +662,7 @@
return;
cb = DN_SKB_CB(skb);
- msg = (struct nsp_conn_init_msg *)skb_put(skb,sizeof(*msg));
+ msg = skb_put(skb, sizeof(*msg));
msg->msgflg = msgflg;
msg->dstaddr = 0x0000; /* Remote Node will assign it*/
@@ -686,27 +686,27 @@
if (scp->peer.sdn_flags & SDF_UICPROXY)
menuver |= DN_MENUVER_UIC;
- *skb_put(skb, 1) = menuver; /* Menu Version */
+ skb_put_u8(skb, menuver); /* Menu Version */
aux = scp->accessdata.acc_userl;
- *skb_put(skb, 1) = aux;
+ skb_put_u8(skb, aux);
if (aux > 0)
- memcpy(skb_put(skb, aux), scp->accessdata.acc_user, aux);
+ skb_put_data(skb, scp->accessdata.acc_user, aux);
aux = scp->accessdata.acc_passl;
- *skb_put(skb, 1) = aux;
+ skb_put_u8(skb, aux);
if (aux > 0)
- memcpy(skb_put(skb, aux), scp->accessdata.acc_pass, aux);
+ skb_put_data(skb, scp->accessdata.acc_pass, aux);
aux = scp->accessdata.acc_accl;
- *skb_put(skb, 1) = aux;
+ skb_put_u8(skb, aux);
if (aux > 0)
- memcpy(skb_put(skb, aux), scp->accessdata.acc_acc, aux);
+ skb_put_data(skb, scp->accessdata.acc_acc, aux);
aux = (__u8)le16_to_cpu(scp->conndata_out.opt_optl);
- *skb_put(skb, 1) = aux;
+ skb_put_u8(skb, aux);
if (aux > 0)
- memcpy(skb_put(skb, aux), scp->conndata_out.opt_data, aux);
+ skb_put_data(skb, scp->conndata_out.opt_data, aux);
scp->persist = dn_nsp_persist(sk);
scp->persist_fxn = dn_nsp_retrans_conninit;
diff --git a/net/decnet/dn_route.c b/net/decnet/dn_route.c
index 6f95612..bcbe548 100644
--- a/net/decnet/dn_route.c
+++ b/net/decnet/dn_route.c
@@ -183,11 +183,6 @@
return dn_rt_hash_mask & (unsigned int)tmp;
}
-static inline void dnrt_free(struct dn_route *rt)
-{
- call_rcu_bh(&rt->dst.rcu_head, dst_rcu_free);
-}
-
static void dn_dst_check_expire(unsigned long dummy)
{
int i;
@@ -202,14 +197,15 @@
spin_lock(&dn_rt_hash_table[i].lock);
while ((rt = rcu_dereference_protected(*rtp,
lockdep_is_held(&dn_rt_hash_table[i].lock))) != NULL) {
- if (atomic_read(&rt->dst.__refcnt) ||
- (now - rt->dst.lastuse) < expire) {
+ if (atomic_read(&rt->dst.__refcnt) > 1 ||
+ (now - rt->dst.lastuse) < expire) {
rtp = &rt->dst.dn_next;
continue;
}
*rtp = rt->dst.dn_next;
rt->dst.dn_next = NULL;
- dnrt_free(rt);
+ dst_dev_put(&rt->dst);
+ dst_release(&rt->dst);
}
spin_unlock(&dn_rt_hash_table[i].lock);
@@ -235,14 +231,15 @@
while ((rt = rcu_dereference_protected(*rtp,
lockdep_is_held(&dn_rt_hash_table[i].lock))) != NULL) {
- if (atomic_read(&rt->dst.__refcnt) ||
- (now - rt->dst.lastuse) < expire) {
+ if (atomic_read(&rt->dst.__refcnt) > 1 ||
+ (now - rt->dst.lastuse) < expire) {
rtp = &rt->dst.dn_next;
continue;
}
*rtp = rt->dst.dn_next;
rt->dst.dn_next = NULL;
- dnrt_free(rt);
+ dst_dev_put(&rt->dst);
+ dst_release(&rt->dst);
break;
}
spin_unlock_bh(&dn_rt_hash_table[i].lock);
@@ -344,7 +341,7 @@
dst_use(&rth->dst, now);
spin_unlock_bh(&dn_rt_hash_table[hash].lock);
- dst_free(&rt->dst);
+ dst_release_immediate(&rt->dst);
*rp = rth;
return 0;
}
@@ -374,7 +371,8 @@
for(; rt; rt = next) {
next = rcu_dereference_raw(rt->dst.dn_next);
RCU_INIT_POINTER(rt->dst.dn_next, NULL);
- dnrt_free(rt);
+ dst_dev_put(&rt->dst);
+ dst_release(&rt->dst);
}
nothing_to_declare:
@@ -1215,6 +1213,7 @@
goto e_neighbour;
hash = dn_hash(rt->fld.saddr, rt->fld.daddr);
+ /* dn_insert_route() increments dst->__refcnt */
dn_insert_route(rt, hash, (struct dn_route **)pprt);
done:
@@ -1237,7 +1236,7 @@
err = -ENOBUFS;
goto done;
e_neighbour:
- dst_free(&rt->dst);
+ dst_release_immediate(&rt->dst);
goto e_nobufs;
}
@@ -1445,7 +1444,7 @@
}
make_route:
- rt = dst_alloc(&dn_dst_ops, out_dev, 0, DST_OBSOLETE_NONE, DST_HOST);
+ rt = dst_alloc(&dn_dst_ops, out_dev, 1, DST_OBSOLETE_NONE, DST_HOST);
if (rt == NULL)
goto e_nobufs;
@@ -1491,6 +1490,7 @@
goto e_neighbour;
hash = dn_hash(rt->fld.saddr, rt->fld.daddr);
+ /* dn_insert_route() increments dst->__refcnt */
dn_insert_route(rt, hash, &rt);
skb_dst_set(skb, &rt->dst);
@@ -1514,7 +1514,7 @@
goto done;
e_neighbour:
- dst_free(&rt->dst);
+ dst_release_immediate(&rt->dst);
goto done;
}
diff --git a/net/dsa/Kconfig b/net/dsa/Kconfig
index 81a0868..cc5f8f9 100644
--- a/net/dsa/Kconfig
+++ b/net/dsa/Kconfig
@@ -25,16 +25,19 @@
config NET_DSA_TAG_EDSA
bool
-config NET_DSA_TAG_TRAILER
+config NET_DSA_TAG_KSZ
bool
-config NET_DSA_TAG_QCA
+config NET_DSA_TAG_LAN9303
bool
config NET_DSA_TAG_MTK
bool
-config NET_DSA_TAG_LAN9303
+config NET_DSA_TAG_TRAILER
+ bool
+
+config NET_DSA_TAG_QCA
bool
endif
diff --git a/net/dsa/Makefile b/net/dsa/Makefile
index 0b747d7..fcce25d 100644
--- a/net/dsa/Makefile
+++ b/net/dsa/Makefile
@@ -1,12 +1,13 @@
# the core
obj-$(CONFIG_NET_DSA) += dsa_core.o
-dsa_core-y += dsa.o slave.o dsa2.o switch.o legacy.o
+dsa_core-y += dsa.o dsa2.o legacy.o port.o slave.o switch.o
# tagging formats
dsa_core-$(CONFIG_NET_DSA_TAG_BRCM) += tag_brcm.o
dsa_core-$(CONFIG_NET_DSA_TAG_DSA) += tag_dsa.o
dsa_core-$(CONFIG_NET_DSA_TAG_EDSA) += tag_edsa.o
-dsa_core-$(CONFIG_NET_DSA_TAG_TRAILER) += tag_trailer.o
-dsa_core-$(CONFIG_NET_DSA_TAG_QCA) += tag_qca.o
-dsa_core-$(CONFIG_NET_DSA_TAG_MTK) += tag_mtk.o
+dsa_core-$(CONFIG_NET_DSA_TAG_KSZ) += tag_ksz.o
dsa_core-$(CONFIG_NET_DSA_TAG_LAN9303) += tag_lan9303.o
+dsa_core-$(CONFIG_NET_DSA_TAG_MTK) += tag_mtk.o
+dsa_core-$(CONFIG_NET_DSA_TAG_QCA) += tag_qca.o
+dsa_core-$(CONFIG_NET_DSA_TAG_TRAILER) += tag_trailer.o
diff --git a/net/dsa/dsa.c b/net/dsa/dsa.c
index 90038d4..416ac4e 100644
--- a/net/dsa/dsa.c
+++ b/net/dsa/dsa.c
@@ -24,7 +24,7 @@
#include <linux/phy_fixed.h>
#include <linux/gpio/consumer.h>
#include <linux/etherdevice.h>
-#include <net/dsa.h>
+
#include "dsa_priv.h"
static struct sk_buff *dsa_slave_notag_xmit(struct sk_buff *skb,
@@ -40,26 +40,29 @@
};
const struct dsa_device_ops *dsa_device_ops[DSA_TAG_LAST] = {
+#ifdef CONFIG_NET_DSA_TAG_BRCM
+ [DSA_TAG_PROTO_BRCM] = &brcm_netdev_ops,
+#endif
#ifdef CONFIG_NET_DSA_TAG_DSA
[DSA_TAG_PROTO_DSA] = &dsa_netdev_ops,
#endif
#ifdef CONFIG_NET_DSA_TAG_EDSA
[DSA_TAG_PROTO_EDSA] = &edsa_netdev_ops,
#endif
-#ifdef CONFIG_NET_DSA_TAG_TRAILER
- [DSA_TAG_PROTO_TRAILER] = &trailer_netdev_ops,
+#ifdef CONFIG_NET_DSA_TAG_KSZ
+ [DSA_TAG_PROTO_KSZ] = &ksz_netdev_ops,
#endif
-#ifdef CONFIG_NET_DSA_TAG_BRCM
- [DSA_TAG_PROTO_BRCM] = &brcm_netdev_ops,
-#endif
-#ifdef CONFIG_NET_DSA_TAG_QCA
- [DSA_TAG_PROTO_QCA] = &qca_netdev_ops,
+#ifdef CONFIG_NET_DSA_TAG_LAN9303
+ [DSA_TAG_PROTO_LAN9303] = &lan9303_netdev_ops,
#endif
#ifdef CONFIG_NET_DSA_TAG_MTK
[DSA_TAG_PROTO_MTK] = &mtk_netdev_ops,
#endif
-#ifdef CONFIG_NET_DSA_TAG_LAN9303
- [DSA_TAG_PROTO_LAN9303] = &lan9303_netdev_ops,
+#ifdef CONFIG_NET_DSA_TAG_QCA
+ [DSA_TAG_PROTO_QCA] = &qca_netdev_ops,
+#endif
+#ifdef CONFIG_NET_DSA_TAG_TRAILER
+ [DSA_TAG_PROTO_TRAILER] = &trailer_netdev_ops,
#endif
[DSA_TAG_PROTO_NONE] = &none_ops,
};
@@ -109,23 +112,22 @@
return ops;
}
-int dsa_cpu_port_ethtool_setup(struct dsa_switch *ds)
+int dsa_cpu_port_ethtool_setup(struct dsa_port *cpu_dp)
{
+ struct dsa_switch *ds = cpu_dp->ds;
struct net_device *master;
struct ethtool_ops *cpu_ops;
- master = ds->dst->master_netdev;
- if (ds->master_netdev)
- master = ds->master_netdev;
+ master = cpu_dp->netdev;
cpu_ops = devm_kzalloc(ds->dev, sizeof(*cpu_ops), GFP_KERNEL);
if (!cpu_ops)
return -ENOMEM;
- memcpy(&ds->dst->master_ethtool_ops, master->ethtool_ops,
+ memcpy(&cpu_dp->ethtool_ops, master->ethtool_ops,
sizeof(struct ethtool_ops));
- ds->dst->master_orig_ethtool_ops = master->ethtool_ops;
- memcpy(cpu_ops, &ds->dst->master_ethtool_ops,
+ cpu_dp->orig_ethtool_ops = master->ethtool_ops;
+ memcpy(cpu_ops, &cpu_dp->ethtool_ops,
sizeof(struct ethtool_ops));
dsa_cpu_port_ethtool_init(cpu_ops);
master->ethtool_ops = cpu_ops;
@@ -133,15 +135,9 @@
return 0;
}
-void dsa_cpu_port_ethtool_restore(struct dsa_switch *ds)
+void dsa_cpu_port_ethtool_restore(struct dsa_port *cpu_dp)
{
- struct net_device *master;
-
- master = ds->dst->master_netdev;
- if (ds->master_netdev)
- master = ds->master_netdev;
-
- master->ethtool_ops = ds->dst->master_orig_ethtool_ops;
+ cpu_dp->netdev->ethtool_ops = cpu_dp->orig_ethtool_ops;
}
void dsa_cpu_dsa_destroy(struct dsa_port *port)
diff --git a/net/dsa/dsa2.c b/net/dsa/dsa2.c
index 7796580..56e4609 100644
--- a/net/dsa/dsa2.c
+++ b/net/dsa/dsa2.c
@@ -18,7 +18,7 @@
#include <linux/rtnetlink.h>
#include <linux/of.h>
#include <linux/of_net.h>
-#include <net/dsa.h>
+
#include "dsa_priv.h"
static LIST_HEAD(dsa_switch_trees);
@@ -214,66 +214,59 @@
return 0;
}
-static int dsa_dsa_port_apply(struct dsa_port *port, u32 index,
- struct dsa_switch *ds)
+static int dsa_dsa_port_apply(struct dsa_port *port)
{
+ struct dsa_switch *ds = port->ds;
int err;
- err = dsa_cpu_dsa_setup(ds, ds->dev, port, index);
+ err = dsa_cpu_dsa_setup(ds, ds->dev, port, port->index);
if (err) {
dev_warn(ds->dev, "Failed to setup dsa port %d: %d\n",
- index, err);
+ port->index, err);
return err;
}
- memset(&ds->ports[index].devlink_port, 0,
- sizeof(ds->ports[index].devlink_port));
+ memset(&port->devlink_port, 0, sizeof(port->devlink_port));
- return devlink_port_register(ds->devlink,
- &ds->ports[index].devlink_port,
- index);
+ return devlink_port_register(ds->devlink, &port->devlink_port,
+ port->index);
}
-static void dsa_dsa_port_unapply(struct dsa_port *port, u32 index,
- struct dsa_switch *ds)
+static void dsa_dsa_port_unapply(struct dsa_port *port)
{
- devlink_port_unregister(&ds->ports[index].devlink_port);
+ devlink_port_unregister(&port->devlink_port);
dsa_cpu_dsa_destroy(port);
}
-static int dsa_cpu_port_apply(struct dsa_port *port, u32 index,
- struct dsa_switch *ds)
+static int dsa_cpu_port_apply(struct dsa_port *port)
{
+ struct dsa_switch *ds = port->ds;
int err;
- err = dsa_cpu_dsa_setup(ds, ds->dev, port, index);
+ err = dsa_cpu_dsa_setup(ds, ds->dev, port, port->index);
if (err) {
dev_warn(ds->dev, "Failed to setup cpu port %d: %d\n",
- index, err);
+ port->index, err);
return err;
}
- ds->cpu_port_mask |= BIT(index);
-
- memset(&ds->ports[index].devlink_port, 0,
- sizeof(ds->ports[index].devlink_port));
- err = devlink_port_register(ds->devlink, &ds->ports[index].devlink_port,
- index);
+ memset(&port->devlink_port, 0, sizeof(port->devlink_port));
+ err = devlink_port_register(ds->devlink, &port->devlink_port,
+ port->index);
return err;
}
-static void dsa_cpu_port_unapply(struct dsa_port *port, u32 index,
- struct dsa_switch *ds)
+static void dsa_cpu_port_unapply(struct dsa_port *port)
{
- devlink_port_unregister(&ds->ports[index].devlink_port);
+ devlink_port_unregister(&port->devlink_port);
dsa_cpu_dsa_destroy(port);
- ds->cpu_port_mask &= ~BIT(index);
+ port->ds->cpu_port_mask &= ~BIT(port->index);
}
-static int dsa_user_port_apply(struct dsa_port *port, u32 index,
- struct dsa_switch *ds)
+static int dsa_user_port_apply(struct dsa_port *port)
{
+ struct dsa_switch *ds = port->ds;
const char *name = port->name;
int err;
@@ -282,35 +275,32 @@
if (!name)
name = "eth%d";
- err = dsa_slave_create(ds, ds->dev, index, name);
+ err = dsa_slave_create(ds, ds->dev, port->index, name);
if (err) {
dev_warn(ds->dev, "Failed to create slave %d: %d\n",
- index, err);
- ds->ports[index].netdev = NULL;
+ port->index, err);
+ port->netdev = NULL;
return err;
}
- memset(&ds->ports[index].devlink_port, 0,
- sizeof(ds->ports[index].devlink_port));
- err = devlink_port_register(ds->devlink, &ds->ports[index].devlink_port,
- index);
+ memset(&port->devlink_port, 0, sizeof(port->devlink_port));
+ err = devlink_port_register(ds->devlink, &port->devlink_port,
+ port->index);
if (err)
return err;
- devlink_port_type_eth_set(&ds->ports[index].devlink_port,
- ds->ports[index].netdev);
+ devlink_port_type_eth_set(&port->devlink_port, port->netdev);
return 0;
}
-static void dsa_user_port_unapply(struct dsa_port *port, u32 index,
- struct dsa_switch *ds)
+static void dsa_user_port_unapply(struct dsa_port *port)
{
- devlink_port_unregister(&ds->ports[index].devlink_port);
- if (ds->ports[index].netdev) {
- dsa_slave_destroy(ds->ports[index].netdev);
- ds->ports[index].netdev = NULL;
- ds->enabled_port_mask &= ~(1 << index);
+ devlink_port_unregister(&port->devlink_port);
+ if (port->netdev) {
+ dsa_slave_destroy(port->netdev);
+ port->netdev = NULL;
+ port->ds->enabled_port_mask &= ~(1 << port->index);
}
}
@@ -347,7 +337,7 @@
return err;
if (ds->ops->set_addr) {
- err = ds->ops->set_addr(ds, dst->master_netdev->dev_addr);
+ err = ds->ops->set_addr(ds, dst->cpu_dp->netdev->dev_addr);
if (err < 0)
return err;
}
@@ -370,20 +360,20 @@
continue;
if (dsa_port_is_dsa(port)) {
- err = dsa_dsa_port_apply(port, index, ds);
+ err = dsa_dsa_port_apply(port);
if (err)
return err;
continue;
}
if (dsa_port_is_cpu(port)) {
- err = dsa_cpu_port_apply(port, index, ds);
+ err = dsa_cpu_port_apply(port);
if (err)
return err;
continue;
}
- err = dsa_user_port_apply(port, index, ds);
+ err = dsa_user_port_apply(port);
if (err)
continue;
}
@@ -402,16 +392,16 @@
continue;
if (dsa_port_is_dsa(port)) {
- dsa_dsa_port_unapply(port, index, ds);
+ dsa_dsa_port_unapply(port);
continue;
}
if (dsa_port_is_cpu(port)) {
- dsa_cpu_port_unapply(port, index, ds);
+ dsa_cpu_port_unapply(port);
continue;
}
- dsa_user_port_unapply(port, index, ds);
+ dsa_user_port_unapply(port);
}
if (ds->slave_mii_bus && ds->ops->phy_read)
@@ -443,8 +433,8 @@
return err;
}
- if (dst->cpu_switch) {
- err = dsa_cpu_port_ethtool_setup(dst->cpu_switch);
+ if (dst->cpu_dp) {
+ err = dsa_cpu_port_ethtool_setup(dst->cpu_dp);
if (err)
return err;
}
@@ -454,7 +444,7 @@
* sent to the tag format's receive function.
*/
wmb();
- dst->master_netdev->dsa_ptr = (void *)dst;
+ dst->cpu_dp->netdev->dsa_ptr = dst;
dst->applied = true;
return 0;
@@ -468,7 +458,7 @@
if (!dst->applied)
return;
- dst->master_netdev->dsa_ptr = NULL;
+ dst->cpu_dp->netdev->dsa_ptr = NULL;
/* If we used a tagging format that doesn't have an ethertype
* field, make sure that all packets from this point get sent
@@ -484,9 +474,9 @@
dsa_ds_unapply(dst, ds);
}
- if (dst->cpu_switch) {
- dsa_cpu_port_ethtool_restore(dst->cpu_switch);
- dst->cpu_switch = NULL;
+ if (dst->cpu_dp) {
+ dsa_cpu_port_ethtool_restore(dst->cpu_dp);
+ dst->cpu_dp = NULL;
}
pr_info("DSA: tree %d unapplied\n", dst->tree);
@@ -514,15 +504,9 @@
if (!ethernet_dev)
return -EPROBE_DEFER;
- if (!ds->master_netdev)
- ds->master_netdev = ethernet_dev;
-
- if (!dst->master_netdev)
- dst->master_netdev = ethernet_dev;
-
- if (!dst->cpu_switch) {
- dst->cpu_switch = ds;
- dst->cpu_port = index;
+ if (!dst->cpu_dp) {
+ dst->cpu_dp = port;
+ dst->cpu_dp->netdev = ethernet_dev;
}
tag_protocol = ds->ops->get_tag_protocol(ds);
@@ -534,6 +518,12 @@
dst->rcv = dst->tag_ops->rcv;
+ /* Initialize cpu_port_mask now for drv->setup()
+ * to have access to a correct value, just like what
+ * net/dsa/dsa.c::dsa_switch_setup_one does.
+ */
+ ds->cpu_port_mask |= BIT(index);
+
return 0;
}
@@ -545,14 +535,22 @@
for (index = 0; index < ds->num_ports; index++) {
port = &ds->ports[index];
- if (!dsa_port_is_valid(port))
+ if (!dsa_port_is_valid(port) ||
+ dsa_port_is_dsa(port))
continue;
if (dsa_port_is_cpu(port)) {
err = dsa_cpu_parse(port, index, dst, ds);
if (err)
return err;
+ } else {
+ /* Initialize enabled_port_mask now for drv->setup()
+ * to have access to a correct value, just like what
+ * net/dsa/dsa.c::dsa_switch_setup_one does.
+ */
+ ds->enabled_port_mask |= BIT(index);
}
+
}
pr_info("DSA: switch %d %d parsed\n", dst->tree, ds->index);
@@ -563,7 +561,9 @@
static int dsa_dst_parse(struct dsa_switch_tree *dst)
{
struct dsa_switch *ds;
+ struct dsa_port *dp;
u32 index;
+ int port;
int err;
for (index = 0; index < DSA_MAX_SWITCHES; index++) {
@@ -576,11 +576,28 @@
return err;
}
- if (!dst->master_netdev) {
+ if (!dst->cpu_dp->netdev) {
pr_warn("Tree has no master device\n");
return -EINVAL;
}
+ /* Assign the default CPU port to all ports of the fabric */
+ for (index = 0; index < DSA_MAX_SWITCHES; index++) {
+ ds = dst->ds[index];
+ if (!ds)
+ continue;
+
+ for (port = 0; port < ds->num_ports; port++) {
+ dp = &ds->ports[port];
+ if (!dsa_port_is_valid(dp) ||
+ dsa_port_is_dsa(dp) ||
+ dsa_port_is_cpu(dp))
+ continue;
+
+ dp->cpu_dp = dst->cpu_dp;
+ }
+ }
+
pr_info("DSA: tree %d parsed\n", dst->tree);
return 0;
@@ -601,13 +618,6 @@
return -EINVAL;
ds->ports[reg].dn = port;
-
- /* Initialize enabled_port_mask now for ops->setup()
- * to have access to a correct value, just like what
- * net/dsa/dsa.c::dsa_switch_setup_one does.
- */
- if (!dsa_port_is_cpu(&ds->ports[reg]))
- ds->enabled_port_mask |= 1 << reg;
}
return 0;
@@ -623,14 +633,6 @@
continue;
ds->ports[i].name = cd->port_names[i];
-
- /* Initialize enabled_port_mask now for drv->setup()
- * to have access to a correct value, just like what
- * net/dsa/dsa.c::dsa_switch_setup_one does.
- */
- if (!dsa_port_is_cpu(&ds->ports[i]))
- ds->enabled_port_mask |= 1 << i;
-
valid_name_found = true;
}
@@ -690,10 +692,10 @@
return ports;
}
-static int _dsa_register_switch(struct dsa_switch *ds, struct device *dev)
+static int _dsa_register_switch(struct dsa_switch *ds)
{
- struct dsa_chip_data *pdata = dev->platform_data;
- struct device_node *np = dev->of_node;
+ struct dsa_chip_data *pdata = ds->dev->platform_data;
+ struct device_node *np = ds->dev->of_node;
struct dsa_switch_tree *dst;
struct device_node *ports;
u32 tree, index;
@@ -807,12 +809,12 @@
}
EXPORT_SYMBOL_GPL(dsa_switch_alloc);
-int dsa_register_switch(struct dsa_switch *ds, struct device *dev)
+int dsa_register_switch(struct dsa_switch *ds)
{
int err;
mutex_lock(&dsa2_mutex);
- err = _dsa_register_switch(ds, dev);
+ err = _dsa_register_switch(ds);
mutex_unlock(&dsa2_mutex);
return err;
diff --git a/net/dsa/dsa_priv.h b/net/dsa/dsa_priv.h
index f4a88e4..55982cc 100644
--- a/net/dsa/dsa_priv.h
+++ b/net/dsa/dsa_priv.h
@@ -14,6 +14,56 @@
#include <linux/phy.h>
#include <linux/netdevice.h>
#include <linux/netpoll.h>
+#include <net/dsa.h>
+
+enum {
+ DSA_NOTIFIER_AGEING_TIME,
+ DSA_NOTIFIER_BRIDGE_JOIN,
+ DSA_NOTIFIER_BRIDGE_LEAVE,
+ DSA_NOTIFIER_FDB_ADD,
+ DSA_NOTIFIER_FDB_DEL,
+ DSA_NOTIFIER_MDB_ADD,
+ DSA_NOTIFIER_MDB_DEL,
+ DSA_NOTIFIER_VLAN_ADD,
+ DSA_NOTIFIER_VLAN_DEL,
+};
+
+/* DSA_NOTIFIER_AGEING_TIME */
+struct dsa_notifier_ageing_time_info {
+ struct switchdev_trans *trans;
+ unsigned int ageing_time;
+};
+
+/* DSA_NOTIFIER_BRIDGE_* */
+struct dsa_notifier_bridge_info {
+ struct net_device *br;
+ int sw_index;
+ int port;
+};
+
+/* DSA_NOTIFIER_FDB_* */
+struct dsa_notifier_fdb_info {
+ const struct switchdev_obj_port_fdb *fdb;
+ struct switchdev_trans *trans;
+ int sw_index;
+ int port;
+};
+
+/* DSA_NOTIFIER_MDB_* */
+struct dsa_notifier_mdb_info {
+ const struct switchdev_obj_port_mdb *mdb;
+ struct switchdev_trans *trans;
+ int sw_index;
+ int port;
+};
+
+/* DSA_NOTIFIER_VLAN_* */
+struct dsa_notifier_vlan_info {
+ const struct switchdev_obj_port_vlan *vlan;
+ struct switchdev_trans *trans;
+ int sw_index;
+ int port;
+};
struct dsa_device_ops {
struct sk_buff *(*xmit)(struct sk_buff *skb, struct net_device *dev);
@@ -23,6 +73,7 @@
};
struct dsa_slave_priv {
+ /* Copy of dp->ds->dst->tag_ops->xmit for faster access in hot path */
struct sk_buff * (*xmit)(struct sk_buff *skb,
struct net_device *dev);
@@ -52,13 +103,46 @@
struct dsa_port *dport, int port);
void dsa_cpu_dsa_destroy(struct dsa_port *dport);
const struct dsa_device_ops *dsa_resolve_tag_protocol(int tag_protocol);
-int dsa_cpu_port_ethtool_setup(struct dsa_switch *ds);
-void dsa_cpu_port_ethtool_restore(struct dsa_switch *ds);
+int dsa_cpu_port_ethtool_setup(struct dsa_port *cpu_dp);
+void dsa_cpu_port_ethtool_restore(struct dsa_port *cpu_dp);
/* legacy.c */
int dsa_legacy_register(void);
void dsa_legacy_unregister(void);
+/* port.c */
+int dsa_port_set_state(struct dsa_port *dp, u8 state,
+ struct switchdev_trans *trans);
+void dsa_port_set_state_now(struct dsa_port *dp, u8 state);
+int dsa_port_bridge_join(struct dsa_port *dp, struct net_device *br);
+void dsa_port_bridge_leave(struct dsa_port *dp, struct net_device *br);
+int dsa_port_vlan_filtering(struct dsa_port *dp, bool vlan_filtering,
+ struct switchdev_trans *trans);
+int dsa_port_ageing_time(struct dsa_port *dp, clock_t ageing_clock,
+ struct switchdev_trans *trans);
+int dsa_port_fdb_add(struct dsa_port *dp,
+ const struct switchdev_obj_port_fdb *fdb,
+ struct switchdev_trans *trans);
+int dsa_port_fdb_del(struct dsa_port *dp,
+ const struct switchdev_obj_port_fdb *fdb);
+int dsa_port_fdb_dump(struct dsa_port *dp, struct switchdev_obj_port_fdb *fdb,
+ switchdev_obj_dump_cb_t *cb);
+int dsa_port_mdb_add(struct dsa_port *dp,
+ const struct switchdev_obj_port_mdb *mdb,
+ struct switchdev_trans *trans);
+int dsa_port_mdb_del(struct dsa_port *dp,
+ const struct switchdev_obj_port_mdb *mdb);
+int dsa_port_mdb_dump(struct dsa_port *dp, struct switchdev_obj_port_mdb *mdb,
+ switchdev_obj_dump_cb_t *cb);
+int dsa_port_vlan_add(struct dsa_port *dp,
+ const struct switchdev_obj_port_vlan *vlan,
+ struct switchdev_trans *trans);
+int dsa_port_vlan_del(struct dsa_port *dp,
+ const struct switchdev_obj_port_vlan *vlan);
+int dsa_port_vlan_dump(struct dsa_port *dp,
+ struct switchdev_obj_port_vlan *vlan,
+ switchdev_obj_dump_cb_t *cb);
+
/* slave.c */
extern const struct dsa_device_ops notag_netdev_ops;
void dsa_slave_mii_bus_init(struct dsa_switch *ds);
@@ -75,25 +159,38 @@
int dsa_switch_register_notifier(struct dsa_switch *ds);
void dsa_switch_unregister_notifier(struct dsa_switch *ds);
+/* tag_brcm.c */
+extern const struct dsa_device_ops brcm_netdev_ops;
+
/* tag_dsa.c */
extern const struct dsa_device_ops dsa_netdev_ops;
/* tag_edsa.c */
extern const struct dsa_device_ops edsa_netdev_ops;
-/* tag_trailer.c */
-extern const struct dsa_device_ops trailer_netdev_ops;
+/* tag_ksz.c */
+extern const struct dsa_device_ops ksz_netdev_ops;
-/* tag_brcm.c */
-extern const struct dsa_device_ops brcm_netdev_ops;
-
-/* tag_qca.c */
-extern const struct dsa_device_ops qca_netdev_ops;
+/* tag_lan9303.c */
+extern const struct dsa_device_ops lan9303_netdev_ops;
/* tag_mtk.c */
extern const struct dsa_device_ops mtk_netdev_ops;
-/* tag_lan9303.c */
-extern const struct dsa_device_ops lan9303_netdev_ops;
+/* tag_qca.c */
+extern const struct dsa_device_ops qca_netdev_ops;
+
+/* tag_trailer.c */
+extern const struct dsa_device_ops trailer_netdev_ops;
+
+static inline struct net_device *dsa_master_netdev(struct dsa_slave_priv *p)
+{
+ return p->dp->cpu_dp->netdev;
+}
+
+static inline struct dsa_port *dsa_get_cpu_port(struct dsa_switch_tree *dst)
+{
+ return dst->cpu_dp;
+}
#endif
diff --git a/net/dsa/legacy.c b/net/dsa/legacy.c
index 7281098..1d7a328 100644
--- a/net/dsa/legacy.c
+++ b/net/dsa/legacy.c
@@ -22,7 +22,7 @@
#include <linux/sysfs.h>
#include <linux/phy_fixed.h>
#include <linux/etherdevice.h>
-#include <net/dsa.h>
+
#include "dsa_priv.h"
/* switch driver registration ***********************************************/
@@ -95,7 +95,8 @@
return 0;
}
-static int dsa_switch_setup_one(struct dsa_switch *ds, struct device *parent)
+static int dsa_switch_setup_one(struct dsa_switch *ds, struct net_device *master,
+ struct device *parent)
{
const struct dsa_switch_ops *ops = ds->ops;
struct dsa_switch_tree *dst = ds->dst;
@@ -115,13 +116,13 @@
continue;
if (!strcmp(name, "cpu")) {
- if (dst->cpu_switch) {
- netdev_err(dst->master_netdev,
+ if (dst->cpu_dp) {
+ netdev_err(master,
"multiple cpu ports?!\n");
return -EINVAL;
}
- dst->cpu_switch = ds;
- dst->cpu_port = i;
+ dst->cpu_dp = &ds->ports[i];
+ dst->cpu_dp->netdev = master;
ds->cpu_port_mask |= 1 << i;
} else if (!strcmp(name, "dsa")) {
ds->dsa_port_mask |= 1 << i;
@@ -144,7 +145,7 @@
* tagging protocol to the preferred tagging format of this
* switch.
*/
- if (dst->cpu_switch == ds) {
+ if (dst->cpu_dp->ds == ds) {
enum dsa_tag_protocol tag_protocol;
tag_protocol = ops->get_tag_protocol(ds);
@@ -169,7 +170,7 @@
return ret;
if (ops->set_addr) {
- ret = ops->set_addr(ds, dst->master_netdev->dev_addr);
+ ret = ops->set_addr(ds, master->dev_addr);
if (ret < 0)
return ret;
}
@@ -190,23 +191,24 @@
*/
for (i = 0; i < ds->num_ports; i++) {
ds->ports[i].dn = cd->port_dn[i];
+ ds->ports[i].cpu_dp = dst->cpu_dp;
if (!(ds->enabled_port_mask & (1 << i)))
continue;
ret = dsa_slave_create(ds, parent, i, cd->port_names[i]);
if (ret < 0)
- netdev_err(dst->master_netdev, "[%d]: can't create dsa slave device for port %d(%s): %d\n",
+ netdev_err(master, "[%d]: can't create dsa slave device for port %d(%s): %d\n",
index, i, cd->port_names[i], ret);
}
/* Perform configuration of the CPU and DSA ports */
ret = dsa_cpu_dsa_setups(ds, parent);
if (ret < 0)
- netdev_err(dst->master_netdev, "[%d] : can't configure CPU and DSA ports\n",
+ netdev_err(master, "[%d] : can't configure CPU and DSA ports\n",
index);
- ret = dsa_cpu_port_ethtool_setup(ds);
+ ret = dsa_cpu_port_ethtool_setup(ds->dst->cpu_dp);
if (ret)
return ret;
@@ -214,8 +216,8 @@
}
static struct dsa_switch *
-dsa_switch_setup(struct dsa_switch_tree *dst, int index,
- struct device *parent, struct device *host_dev)
+dsa_switch_setup(struct dsa_switch_tree *dst, struct net_device *master,
+ int index, struct device *parent, struct device *host_dev)
{
struct dsa_chip_data *cd = dst->pd->chip + index;
const struct dsa_switch_ops *ops;
@@ -229,11 +231,11 @@
*/
ops = dsa_switch_probe(parent, host_dev, cd->sw_addr, &name, &priv);
if (!ops) {
- netdev_err(dst->master_netdev, "[%d]: could not detect attached switch\n",
+ netdev_err(master, "[%d]: could not detect attached switch\n",
index);
return ERR_PTR(-EINVAL);
}
- netdev_info(dst->master_netdev, "[%d]: detected a %s switch\n",
+ netdev_info(master, "[%d]: detected a %s switch\n",
index, name);
@@ -250,7 +252,7 @@
ds->ops = ops;
ds->priv = priv;
- ret = dsa_switch_setup_one(ds, parent);
+ ret = dsa_switch_setup_one(ds, master, parent);
if (ret)
return ERR_PTR(ret);
@@ -576,13 +578,11 @@
unsigned configured = 0;
dst->pd = pd;
- dst->master_netdev = dev;
- dst->cpu_port = -1;
for (i = 0; i < pd->nr_chips; i++) {
struct dsa_switch *ds;
- ds = dsa_switch_setup(dst, i, parent, pd->chip[i].host_dev);
+ ds = dsa_switch_setup(dst, dev, i, parent, pd->chip[i].host_dev);
if (IS_ERR(ds)) {
netdev_err(dev, "[%d]: couldn't create dsa switch instance (error %ld)\n",
i, PTR_ERR(ds));
@@ -606,7 +606,7 @@
* sent to the tag format's receive function.
*/
wmb();
- dev->dsa_ptr = (void *)dst;
+ dev->dsa_ptr = dst;
return 0;
}
@@ -673,7 +673,7 @@
{
int i;
- dst->master_netdev->dsa_ptr = NULL;
+ dst->cpu_dp->netdev->dsa_ptr = NULL;
/* If we used a tagging format that doesn't have an ethertype
* field, make sure that all packets from this point get sent
@@ -688,9 +688,9 @@
dsa_switch_destroy(ds);
}
- dsa_cpu_port_ethtool_restore(dst->cpu_switch);
+ dsa_cpu_port_ethtool_restore(dst->cpu_dp);
- dev_put(dst->master_netdev);
+ dev_put(dst->cpu_dp->netdev);
}
static int dsa_remove(struct platform_device *pdev)
diff --git a/net/dsa/port.c b/net/dsa/port.c
new file mode 100644
index 0000000..efc3bce
--- /dev/null
+++ b/net/dsa/port.c
@@ -0,0 +1,259 @@
+/*
+ * Handling of a single switch port
+ *
+ * Copyright (c) 2017 Savoir-faire Linux Inc.
+ * Vivien Didelot <vivien.didelot@savoirfairelinux.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/if_bridge.h>
+#include <linux/notifier.h>
+
+#include "dsa_priv.h"
+
+static int dsa_port_notify(struct dsa_port *dp, unsigned long e, void *v)
+{
+ struct raw_notifier_head *nh = &dp->ds->dst->nh;
+ int err;
+
+ err = raw_notifier_call_chain(nh, e, v);
+
+ return notifier_to_errno(err);
+}
+
+int dsa_port_set_state(struct dsa_port *dp, u8 state,
+ struct switchdev_trans *trans)
+{
+ struct dsa_switch *ds = dp->ds;
+ int port = dp->index;
+
+ if (switchdev_trans_ph_prepare(trans))
+ return ds->ops->port_stp_state_set ? 0 : -EOPNOTSUPP;
+
+ if (ds->ops->port_stp_state_set)
+ ds->ops->port_stp_state_set(ds, port, state);
+
+ if (ds->ops->port_fast_age) {
+ /* Fast age FDB entries or flush appropriate forwarding database
+ * for the given port, if we are moving it from Learning or
+ * Forwarding state, to Disabled or Blocking or Listening state.
+ */
+
+ if ((dp->stp_state == BR_STATE_LEARNING ||
+ dp->stp_state == BR_STATE_FORWARDING) &&
+ (state == BR_STATE_DISABLED ||
+ state == BR_STATE_BLOCKING ||
+ state == BR_STATE_LISTENING))
+ ds->ops->port_fast_age(ds, port);
+ }
+
+ dp->stp_state = state;
+
+ return 0;
+}
+
+void dsa_port_set_state_now(struct dsa_port *dp, u8 state)
+{
+ int err;
+
+ err = dsa_port_set_state(dp, state, NULL);
+ if (err)
+ pr_err("DSA: failed to set STP state %u (%d)\n", state, err);
+}
+
+int dsa_port_bridge_join(struct dsa_port *dp, struct net_device *br)
+{
+ struct dsa_notifier_bridge_info info = {
+ .sw_index = dp->ds->index,
+ .port = dp->index,
+ .br = br,
+ };
+ int err;
+
+ /* Here the port is already bridged. Reflect the current configuration
+ * so that drivers can program their chips accordingly.
+ */
+ dp->bridge_dev = br;
+
+ err = dsa_port_notify(dp, DSA_NOTIFIER_BRIDGE_JOIN, &info);
+
+ /* The bridging is rolled back on error */
+ if (err)
+ dp->bridge_dev = NULL;
+
+ return err;
+}
+
+void dsa_port_bridge_leave(struct dsa_port *dp, struct net_device *br)
+{
+ struct dsa_notifier_bridge_info info = {
+ .sw_index = dp->ds->index,
+ .port = dp->index,
+ .br = br,
+ };
+ int err;
+
+ /* Here the port is already unbridged. Reflect the current configuration
+ * so that drivers can program their chips accordingly.
+ */
+ dp->bridge_dev = NULL;
+
+ err = dsa_port_notify(dp, DSA_NOTIFIER_BRIDGE_LEAVE, &info);
+ if (err)
+ pr_err("DSA: failed to notify DSA_NOTIFIER_BRIDGE_LEAVE\n");
+
+ /* Port left the bridge, put in BR_STATE_DISABLED by the bridge layer,
+ * so allow it to be in BR_STATE_FORWARDING to be kept functional
+ */
+ dsa_port_set_state_now(dp, BR_STATE_FORWARDING);
+}
+
+int dsa_port_vlan_filtering(struct dsa_port *dp, bool vlan_filtering,
+ struct switchdev_trans *trans)
+{
+ struct dsa_switch *ds = dp->ds;
+
+ /* bridge skips -EOPNOTSUPP, so skip the prepare phase */
+ if (switchdev_trans_ph_prepare(trans))
+ return 0;
+
+ if (ds->ops->port_vlan_filtering)
+ return ds->ops->port_vlan_filtering(ds, dp->index,
+ vlan_filtering);
+
+ return 0;
+}
+
+int dsa_port_ageing_time(struct dsa_port *dp, clock_t ageing_clock,
+ struct switchdev_trans *trans)
+{
+ unsigned long ageing_jiffies = clock_t_to_jiffies(ageing_clock);
+ unsigned int ageing_time = jiffies_to_msecs(ageing_jiffies);
+ struct dsa_notifier_ageing_time_info info = {
+ .ageing_time = ageing_time,
+ .trans = trans,
+ };
+
+ if (switchdev_trans_ph_prepare(trans))
+ return dsa_port_notify(dp, DSA_NOTIFIER_AGEING_TIME, &info);
+
+ dp->ageing_time = ageing_time;
+
+ return dsa_port_notify(dp, DSA_NOTIFIER_AGEING_TIME, &info);
+}
+
+int dsa_port_fdb_add(struct dsa_port *dp,
+ const struct switchdev_obj_port_fdb *fdb,
+ struct switchdev_trans *trans)
+{
+ struct dsa_notifier_fdb_info info = {
+ .sw_index = dp->ds->index,
+ .port = dp->index,
+ .trans = trans,
+ .fdb = fdb,
+ };
+
+ return dsa_port_notify(dp, DSA_NOTIFIER_FDB_ADD, &info);
+}
+
+int dsa_port_fdb_del(struct dsa_port *dp,
+ const struct switchdev_obj_port_fdb *fdb)
+{
+ struct dsa_notifier_fdb_info info = {
+ .sw_index = dp->ds->index,
+ .port = dp->index,
+ .fdb = fdb,
+ };
+
+ return dsa_port_notify(dp, DSA_NOTIFIER_FDB_DEL, &info);
+}
+
+int dsa_port_fdb_dump(struct dsa_port *dp, struct switchdev_obj_port_fdb *fdb,
+ switchdev_obj_dump_cb_t *cb)
+{
+ struct dsa_switch *ds = dp->ds;
+
+ if (ds->ops->port_fdb_dump)
+ return ds->ops->port_fdb_dump(ds, dp->index, fdb, cb);
+
+ return -EOPNOTSUPP;
+}
+
+int dsa_port_mdb_add(struct dsa_port *dp,
+ const struct switchdev_obj_port_mdb *mdb,
+ struct switchdev_trans *trans)
+{
+ struct dsa_notifier_mdb_info info = {
+ .sw_index = dp->ds->index,
+ .port = dp->index,
+ .trans = trans,
+ .mdb = mdb,
+ };
+
+ return dsa_port_notify(dp, DSA_NOTIFIER_MDB_ADD, &info);
+}
+
+int dsa_port_mdb_del(struct dsa_port *dp,
+ const struct switchdev_obj_port_mdb *mdb)
+{
+ struct dsa_notifier_mdb_info info = {
+ .sw_index = dp->ds->index,
+ .port = dp->index,
+ .mdb = mdb,
+ };
+
+ return dsa_port_notify(dp, DSA_NOTIFIER_MDB_DEL, &info);
+}
+
+int dsa_port_mdb_dump(struct dsa_port *dp, struct switchdev_obj_port_mdb *mdb,
+ switchdev_obj_dump_cb_t *cb)
+{
+ struct dsa_switch *ds = dp->ds;
+
+ if (ds->ops->port_mdb_dump)
+ return ds->ops->port_mdb_dump(ds, dp->index, mdb, cb);
+
+ return -EOPNOTSUPP;
+}
+
+int dsa_port_vlan_add(struct dsa_port *dp,
+ const struct switchdev_obj_port_vlan *vlan,
+ struct switchdev_trans *trans)
+{
+ struct dsa_notifier_vlan_info info = {
+ .sw_index = dp->ds->index,
+ .port = dp->index,
+ .trans = trans,
+ .vlan = vlan,
+ };
+
+ return dsa_port_notify(dp, DSA_NOTIFIER_VLAN_ADD, &info);
+}
+
+int dsa_port_vlan_del(struct dsa_port *dp,
+ const struct switchdev_obj_port_vlan *vlan)
+{
+ struct dsa_notifier_vlan_info info = {
+ .sw_index = dp->ds->index,
+ .port = dp->index,
+ .vlan = vlan,
+ };
+
+ return dsa_port_notify(dp, DSA_NOTIFIER_VLAN_DEL, &info);
+}
+
+int dsa_port_vlan_dump(struct dsa_port *dp,
+ struct switchdev_obj_port_vlan *vlan,
+ switchdev_obj_dump_cb_t *cb)
+{
+ struct dsa_switch *ds = dp->ds;
+
+ if (ds->ops->port_vlan_dump)
+ return ds->ops->port_vlan_dump(ds, dp->index, vlan, cb);
+
+ return -EOPNOTSUPP;
+}
diff --git a/net/dsa/slave.c b/net/dsa/slave.c
index 7693182..9507bd3 100644
--- a/net/dsa/slave.c
+++ b/net/dsa/slave.c
@@ -17,28 +17,16 @@
#include <linux/of_mdio.h>
#include <linux/mdio.h>
#include <linux/list.h>
-#include <net/dsa.h>
#include <net/rtnetlink.h>
-#include <net/switchdev.h>
#include <net/pkt_cls.h>
#include <net/tc_act/tc_mirred.h>
#include <linux/if_bridge.h>
#include <linux/netpoll.h>
+
#include "dsa_priv.h"
static bool dsa_slave_dev_check(struct net_device *dev);
-static int dsa_slave_notify(struct net_device *dev, unsigned long e, void *v)
-{
- struct dsa_slave_priv *p = netdev_priv(dev);
- struct raw_notifier_head *nh = &p->dp->ds->dst->nh;
- int err;
-
- err = raw_notifier_call_chain(nh, e, v);
-
- return notifier_to_errno(err);
-}
-
/* slave mii_bus handling ***************************************************/
static int dsa_slave_phy_read(struct mii_bus *bus, int addr, int reg)
{
@@ -78,48 +66,16 @@
{
struct dsa_slave_priv *p = netdev_priv(dev);
- return p->dp->ds->dst->master_netdev->ifindex;
-}
-
-static inline bool dsa_port_is_bridged(struct dsa_port *dp)
-{
- return !!dp->bridge_dev;
-}
-
-static void dsa_slave_set_state(struct net_device *dev, u8 state)
-{
- struct dsa_slave_priv *p = netdev_priv(dev);
- struct dsa_port *dp = p->dp;
- struct dsa_switch *ds = dp->ds;
- int port = dp->index;
-
- if (ds->ops->port_stp_state_set)
- ds->ops->port_stp_state_set(ds, port, state);
-
- if (ds->ops->port_fast_age) {
- /* Fast age FDB entries or flush appropriate forwarding database
- * for the given port, if we are moving it from Learning or
- * Forwarding state, to Disabled or Blocking or Listening state.
- */
-
- if ((dp->stp_state == BR_STATE_LEARNING ||
- dp->stp_state == BR_STATE_FORWARDING) &&
- (state == BR_STATE_DISABLED ||
- state == BR_STATE_BLOCKING ||
- state == BR_STATE_LISTENING))
- ds->ops->port_fast_age(ds, port);
- }
-
- dp->stp_state = state;
+ return dsa_master_netdev(p)->ifindex;
}
static int dsa_slave_open(struct net_device *dev)
{
struct dsa_slave_priv *p = netdev_priv(dev);
- struct net_device *master = p->dp->ds->dst->master_netdev;
- struct dsa_switch *ds = p->dp->ds;
- u8 stp_state = dsa_port_is_bridged(p->dp) ?
- BR_STATE_BLOCKING : BR_STATE_FORWARDING;
+ struct dsa_port *dp = p->dp;
+ struct dsa_switch *ds = dp->ds;
+ struct net_device *master = dsa_master_netdev(p);
+ u8 stp_state = dp->bridge_dev ? BR_STATE_BLOCKING : BR_STATE_FORWARDING;
int err;
if (!(master->flags & IFF_UP))
@@ -148,7 +104,7 @@
goto clear_promisc;
}
- dsa_slave_set_state(dev, stp_state);
+ dsa_port_set_state_now(p->dp, stp_state);
if (p->phy)
phy_start(p->phy);
@@ -171,7 +127,7 @@
static int dsa_slave_close(struct net_device *dev)
{
struct dsa_slave_priv *p = netdev_priv(dev);
- struct net_device *master = p->dp->ds->dst->master_netdev;
+ struct net_device *master = dsa_master_netdev(p);
struct dsa_switch *ds = p->dp->ds;
if (p->phy)
@@ -190,7 +146,7 @@
if (ds->ops->port_disable)
ds->ops->port_disable(ds, p->dp->index, p->phy);
- dsa_slave_set_state(dev, BR_STATE_DISABLED);
+ dsa_port_set_state_now(p->dp, BR_STATE_DISABLED);
return 0;
}
@@ -198,7 +154,7 @@
static void dsa_slave_change_rx_flags(struct net_device *dev, int change)
{
struct dsa_slave_priv *p = netdev_priv(dev);
- struct net_device *master = p->dp->ds->dst->master_netdev;
+ struct net_device *master = dsa_master_netdev(p);
if (change & IFF_ALLMULTI)
dev_set_allmulti(master, dev->flags & IFF_ALLMULTI ? 1 : -1);
@@ -209,7 +165,7 @@
static void dsa_slave_set_rx_mode(struct net_device *dev)
{
struct dsa_slave_priv *p = netdev_priv(dev);
- struct net_device *master = p->dp->ds->dst->master_netdev;
+ struct net_device *master = dsa_master_netdev(p);
dev_mc_sync(master, dev);
dev_uc_sync(master, dev);
@@ -218,7 +174,7 @@
static int dsa_slave_set_mac_address(struct net_device *dev, void *a)
{
struct dsa_slave_priv *p = netdev_priv(dev);
- struct net_device *master = p->dp->ds->dst->master_netdev;
+ struct net_device *master = dsa_master_netdev(p);
struct sockaddr *addr = a;
int err;
@@ -243,140 +199,6 @@
return 0;
}
-static int dsa_slave_port_vlan_add(struct net_device *dev,
- const struct switchdev_obj_port_vlan *vlan,
- struct switchdev_trans *trans)
-{
- struct dsa_slave_priv *p = netdev_priv(dev);
- struct dsa_port *dp = p->dp;
- struct dsa_switch *ds = dp->ds;
-
- if (switchdev_trans_ph_prepare(trans)) {
- if (!ds->ops->port_vlan_prepare || !ds->ops->port_vlan_add)
- return -EOPNOTSUPP;
-
- return ds->ops->port_vlan_prepare(ds, dp->index, vlan, trans);
- }
-
- ds->ops->port_vlan_add(ds, dp->index, vlan, trans);
-
- return 0;
-}
-
-static int dsa_slave_port_vlan_del(struct net_device *dev,
- const struct switchdev_obj_port_vlan *vlan)
-{
- struct dsa_slave_priv *p = netdev_priv(dev);
- struct dsa_switch *ds = p->dp->ds;
-
- if (!ds->ops->port_vlan_del)
- return -EOPNOTSUPP;
-
- return ds->ops->port_vlan_del(ds, p->dp->index, vlan);
-}
-
-static int dsa_slave_port_vlan_dump(struct net_device *dev,
- struct switchdev_obj_port_vlan *vlan,
- switchdev_obj_dump_cb_t *cb)
-{
- struct dsa_slave_priv *p = netdev_priv(dev);
- struct dsa_switch *ds = p->dp->ds;
-
- if (ds->ops->port_vlan_dump)
- return ds->ops->port_vlan_dump(ds, p->dp->index, vlan, cb);
-
- return -EOPNOTSUPP;
-}
-
-static int dsa_slave_port_fdb_add(struct net_device *dev,
- const struct switchdev_obj_port_fdb *fdb,
- struct switchdev_trans *trans)
-{
- struct dsa_slave_priv *p = netdev_priv(dev);
- struct dsa_switch *ds = p->dp->ds;
-
- if (switchdev_trans_ph_prepare(trans)) {
- if (!ds->ops->port_fdb_prepare || !ds->ops->port_fdb_add)
- return -EOPNOTSUPP;
-
- return ds->ops->port_fdb_prepare(ds, p->dp->index, fdb, trans);
- }
-
- ds->ops->port_fdb_add(ds, p->dp->index, fdb, trans);
-
- return 0;
-}
-
-static int dsa_slave_port_fdb_del(struct net_device *dev,
- const struct switchdev_obj_port_fdb *fdb)
-{
- struct dsa_slave_priv *p = netdev_priv(dev);
- struct dsa_switch *ds = p->dp->ds;
- int ret = -EOPNOTSUPP;
-
- if (ds->ops->port_fdb_del)
- ret = ds->ops->port_fdb_del(ds, p->dp->index, fdb);
-
- return ret;
-}
-
-static int dsa_slave_port_fdb_dump(struct net_device *dev,
- struct switchdev_obj_port_fdb *fdb,
- switchdev_obj_dump_cb_t *cb)
-{
- struct dsa_slave_priv *p = netdev_priv(dev);
- struct dsa_switch *ds = p->dp->ds;
-
- if (ds->ops->port_fdb_dump)
- return ds->ops->port_fdb_dump(ds, p->dp->index, fdb, cb);
-
- return -EOPNOTSUPP;
-}
-
-static int dsa_slave_port_mdb_add(struct net_device *dev,
- const struct switchdev_obj_port_mdb *mdb,
- struct switchdev_trans *trans)
-{
- struct dsa_slave_priv *p = netdev_priv(dev);
- struct dsa_switch *ds = p->dp->ds;
-
- if (switchdev_trans_ph_prepare(trans)) {
- if (!ds->ops->port_mdb_prepare || !ds->ops->port_mdb_add)
- return -EOPNOTSUPP;
-
- return ds->ops->port_mdb_prepare(ds, p->dp->index, mdb, trans);
- }
-
- ds->ops->port_mdb_add(ds, p->dp->index, mdb, trans);
-
- return 0;
-}
-
-static int dsa_slave_port_mdb_del(struct net_device *dev,
- const struct switchdev_obj_port_mdb *mdb)
-{
- struct dsa_slave_priv *p = netdev_priv(dev);
- struct dsa_switch *ds = p->dp->ds;
-
- if (ds->ops->port_mdb_del)
- return ds->ops->port_mdb_del(ds, p->dp->index, mdb);
-
- return -EOPNOTSUPP;
-}
-
-static int dsa_slave_port_mdb_dump(struct net_device *dev,
- struct switchdev_obj_port_mdb *mdb,
- switchdev_obj_dump_cb_t *cb)
-{
- struct dsa_slave_priv *p = netdev_priv(dev);
- struct dsa_switch *ds = p->dp->ds;
-
- if (ds->ops->port_mdb_dump)
- return ds->ops->port_mdb_dump(ds, p->dp->index, mdb, cb);
-
- return -EOPNOTSUPP;
-}
-
static int dsa_slave_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
{
struct dsa_slave_priv *p = netdev_priv(dev);
@@ -387,96 +209,24 @@
return -EOPNOTSUPP;
}
-static int dsa_slave_stp_state_set(struct net_device *dev,
- const struct switchdev_attr *attr,
- struct switchdev_trans *trans)
-{
- struct dsa_slave_priv *p = netdev_priv(dev);
- struct dsa_switch *ds = p->dp->ds;
-
- if (switchdev_trans_ph_prepare(trans))
- return ds->ops->port_stp_state_set ? 0 : -EOPNOTSUPP;
-
- dsa_slave_set_state(dev, attr->u.stp_state);
-
- return 0;
-}
-
-static int dsa_slave_vlan_filtering(struct net_device *dev,
- const struct switchdev_attr *attr,
- struct switchdev_trans *trans)
-{
- struct dsa_slave_priv *p = netdev_priv(dev);
- struct dsa_switch *ds = p->dp->ds;
-
- /* bridge skips -EOPNOTSUPP, so skip the prepare phase */
- if (switchdev_trans_ph_prepare(trans))
- return 0;
-
- if (ds->ops->port_vlan_filtering)
- return ds->ops->port_vlan_filtering(ds, p->dp->index,
- attr->u.vlan_filtering);
-
- return 0;
-}
-
-static unsigned int dsa_fastest_ageing_time(struct dsa_switch *ds,
- unsigned int ageing_time)
-{
- int i;
-
- for (i = 0; i < ds->num_ports; ++i) {
- struct dsa_port *dp = &ds->ports[i];
-
- if (dp && dp->ageing_time && dp->ageing_time < ageing_time)
- ageing_time = dp->ageing_time;
- }
-
- return ageing_time;
-}
-
-static int dsa_slave_ageing_time(struct net_device *dev,
- const struct switchdev_attr *attr,
- struct switchdev_trans *trans)
-{
- struct dsa_slave_priv *p = netdev_priv(dev);
- struct dsa_switch *ds = p->dp->ds;
- unsigned long ageing_jiffies = clock_t_to_jiffies(attr->u.ageing_time);
- unsigned int ageing_time = jiffies_to_msecs(ageing_jiffies);
-
- if (switchdev_trans_ph_prepare(trans)) {
- if (ds->ageing_time_min && ageing_time < ds->ageing_time_min)
- return -ERANGE;
- if (ds->ageing_time_max && ageing_time > ds->ageing_time_max)
- return -ERANGE;
- return 0;
- }
-
- /* Keep the fastest ageing time in case of multiple bridges */
- p->dp->ageing_time = ageing_time;
- ageing_time = dsa_fastest_ageing_time(ds, ageing_time);
-
- if (ds->ops->set_ageing_time)
- return ds->ops->set_ageing_time(ds, ageing_time);
-
- return 0;
-}
-
static int dsa_slave_port_attr_set(struct net_device *dev,
const struct switchdev_attr *attr,
struct switchdev_trans *trans)
{
+ struct dsa_slave_priv *p = netdev_priv(dev);
+ struct dsa_port *dp = p->dp;
int ret;
switch (attr->id) {
case SWITCHDEV_ATTR_ID_PORT_STP_STATE:
- ret = dsa_slave_stp_state_set(dev, attr, trans);
+ ret = dsa_port_set_state(dp, attr->u.stp_state, trans);
break;
case SWITCHDEV_ATTR_ID_BRIDGE_VLAN_FILTERING:
- ret = dsa_slave_vlan_filtering(dev, attr, trans);
+ ret = dsa_port_vlan_filtering(dp, attr->u.vlan_filtering,
+ trans);
break;
case SWITCHDEV_ATTR_ID_BRIDGE_AGEING_TIME:
- ret = dsa_slave_ageing_time(dev, attr, trans);
+ ret = dsa_port_ageing_time(dp, attr->u.ageing_time, trans);
break;
default:
ret = -EOPNOTSUPP;
@@ -490,6 +240,8 @@
const struct switchdev_obj *obj,
struct switchdev_trans *trans)
{
+ struct dsa_slave_priv *p = netdev_priv(dev);
+ struct dsa_port *dp = p->dp;
int err;
/* For the prepare phase, ensure the full set of changes is feasable in
@@ -499,18 +251,14 @@
switch (obj->id) {
case SWITCHDEV_OBJ_ID_PORT_FDB:
- err = dsa_slave_port_fdb_add(dev,
- SWITCHDEV_OBJ_PORT_FDB(obj),
- trans);
+ err = dsa_port_fdb_add(dp, SWITCHDEV_OBJ_PORT_FDB(obj), trans);
break;
case SWITCHDEV_OBJ_ID_PORT_MDB:
- err = dsa_slave_port_mdb_add(dev, SWITCHDEV_OBJ_PORT_MDB(obj),
- trans);
+ err = dsa_port_mdb_add(dp, SWITCHDEV_OBJ_PORT_MDB(obj), trans);
break;
case SWITCHDEV_OBJ_ID_PORT_VLAN:
- err = dsa_slave_port_vlan_add(dev,
- SWITCHDEV_OBJ_PORT_VLAN(obj),
- trans);
+ err = dsa_port_vlan_add(dp, SWITCHDEV_OBJ_PORT_VLAN(obj),
+ trans);
break;
default:
err = -EOPNOTSUPP;
@@ -523,19 +271,19 @@
static int dsa_slave_port_obj_del(struct net_device *dev,
const struct switchdev_obj *obj)
{
+ struct dsa_slave_priv *p = netdev_priv(dev);
+ struct dsa_port *dp = p->dp;
int err;
switch (obj->id) {
case SWITCHDEV_OBJ_ID_PORT_FDB:
- err = dsa_slave_port_fdb_del(dev,
- SWITCHDEV_OBJ_PORT_FDB(obj));
+ err = dsa_port_fdb_del(dp, SWITCHDEV_OBJ_PORT_FDB(obj));
break;
case SWITCHDEV_OBJ_ID_PORT_MDB:
- err = dsa_slave_port_mdb_del(dev, SWITCHDEV_OBJ_PORT_MDB(obj));
+ err = dsa_port_mdb_del(dp, SWITCHDEV_OBJ_PORT_MDB(obj));
break;
case SWITCHDEV_OBJ_ID_PORT_VLAN:
- err = dsa_slave_port_vlan_del(dev,
- SWITCHDEV_OBJ_PORT_VLAN(obj));
+ err = dsa_port_vlan_del(dp, SWITCHDEV_OBJ_PORT_VLAN(obj));
break;
default:
err = -EOPNOTSUPP;
@@ -549,22 +297,19 @@
struct switchdev_obj *obj,
switchdev_obj_dump_cb_t *cb)
{
+ struct dsa_slave_priv *p = netdev_priv(dev);
+ struct dsa_port *dp = p->dp;
int err;
switch (obj->id) {
case SWITCHDEV_OBJ_ID_PORT_FDB:
- err = dsa_slave_port_fdb_dump(dev,
- SWITCHDEV_OBJ_PORT_FDB(obj),
- cb);
+ err = dsa_port_fdb_dump(dp, SWITCHDEV_OBJ_PORT_FDB(obj), cb);
break;
case SWITCHDEV_OBJ_ID_PORT_MDB:
- err = dsa_slave_port_mdb_dump(dev, SWITCHDEV_OBJ_PORT_MDB(obj),
- cb);
+ err = dsa_port_mdb_dump(dp, SWITCHDEV_OBJ_PORT_MDB(obj), cb);
break;
case SWITCHDEV_OBJ_ID_PORT_VLAN:
- err = dsa_slave_port_vlan_dump(dev,
- SWITCHDEV_OBJ_PORT_VLAN(obj),
- cb);
+ err = dsa_port_vlan_dump(dp, SWITCHDEV_OBJ_PORT_VLAN(obj), cb);
break;
default:
err = -EOPNOTSUPP;
@@ -574,57 +319,6 @@
return err;
}
-static int dsa_slave_bridge_port_join(struct net_device *dev,
- struct net_device *br)
-{
- struct dsa_slave_priv *p = netdev_priv(dev);
- struct dsa_notifier_bridge_info info = {
- .sw_index = p->dp->ds->index,
- .port = p->dp->index,
- .br = br,
- };
- int err;
-
- /* Here the port is already bridged. Reflect the current configuration
- * so that drivers can program their chips accordingly.
- */
- p->dp->bridge_dev = br;
-
- err = dsa_slave_notify(dev, DSA_NOTIFIER_BRIDGE_JOIN, &info);
-
- /* The bridging is rolled back on error */
- if (err)
- p->dp->bridge_dev = NULL;
-
- return err;
-}
-
-static void dsa_slave_bridge_port_leave(struct net_device *dev,
- struct net_device *br)
-{
- struct dsa_slave_priv *p = netdev_priv(dev);
- struct dsa_notifier_bridge_info info = {
- .sw_index = p->dp->ds->index,
- .port = p->dp->index,
- .br = br,
- };
- int err;
-
- /* Here the port is already unbridged. Reflect the current configuration
- * so that drivers can program their chips accordingly.
- */
- p->dp->bridge_dev = NULL;
-
- err = dsa_slave_notify(dev, DSA_NOTIFIER_BRIDGE_LEAVE, &info);
- if (err)
- netdev_err(dev, "failed to notify DSA_NOTIFIER_BRIDGE_LEAVE\n");
-
- /* Port left the bridge, put in BR_STATE_DISABLED by the bridge layer,
- * so allow it to be in BR_STATE_FORWARDING to be kept functional
- */
- dsa_slave_set_state(dev, BR_STATE_FORWARDING);
-}
-
static int dsa_slave_port_attr_get(struct net_device *dev,
struct switchdev_attr *attr)
{
@@ -663,10 +357,14 @@
dev->stats.tx_packets++;
dev->stats.tx_bytes += skb->len;
- /* Transmit function may have to reallocate the original SKB */
+ /* Transmit function may have to reallocate the original SKB,
+ * in which case it must have freed it. Only free it here on error.
+ */
nskb = p->xmit(skb, dev);
- if (!nskb)
+ if (!nskb) {
+ kfree_skb(skb);
return NETDEV_TX_OK;
+ }
/* SKB for netpoll still need to be mangled with the protocol-specific
* tag to be successfully transmitted
@@ -677,7 +375,7 @@
/* Queue the SKB for transmission on the parent interface, but
* do not modify its EtherType
*/
- nskb->dev = p->dp->ds->dst->master_netdev;
+ nskb->dev = dsa_master_netdev(p);
dev_queue_xmit(nskb);
return NETDEV_TX_OK;
@@ -689,12 +387,13 @@
struct ethtool_link_ksettings *cmd)
{
struct dsa_slave_priv *p = netdev_priv(dev);
- int err = -EOPNOTSUPP;
- if (p->phy != NULL)
- err = phy_ethtool_ksettings_get(p->phy, cmd);
+ if (!p->phy)
+ return -EOPNOTSUPP;
- return err;
+ phy_ethtool_ksettings_get(p->phy, cmd);
+
+ return 0;
}
static int
@@ -821,14 +520,14 @@
uint64_t *data)
{
struct dsa_switch_tree *dst = dev->dsa_ptr;
- struct dsa_switch *ds = dst->cpu_switch;
- s8 cpu_port = dst->cpu_port;
+ struct dsa_port *cpu_dp = dsa_get_cpu_port(dst);
+ struct dsa_switch *ds = cpu_dp->ds;
+ s8 cpu_port = cpu_dp->index;
int count = 0;
- if (dst->master_ethtool_ops.get_sset_count) {
- count = dst->master_ethtool_ops.get_sset_count(dev,
- ETH_SS_STATS);
- dst->master_ethtool_ops.get_ethtool_stats(dev, stats, data);
+ if (cpu_dp->ethtool_ops.get_sset_count) {
+ count = cpu_dp->ethtool_ops.get_sset_count(dev, ETH_SS_STATS);
+ cpu_dp->ethtool_ops.get_ethtool_stats(dev, stats, data);
}
if (ds->ops->get_ethtool_stats)
@@ -838,11 +537,12 @@
static int dsa_cpu_port_get_sset_count(struct net_device *dev, int sset)
{
struct dsa_switch_tree *dst = dev->dsa_ptr;
- struct dsa_switch *ds = dst->cpu_switch;
+ struct dsa_port *cpu_dp = dsa_get_cpu_port(dst);
+ struct dsa_switch *ds = cpu_dp->ds;
int count = 0;
- if (dst->master_ethtool_ops.get_sset_count)
- count += dst->master_ethtool_ops.get_sset_count(dev, sset);
+ if (cpu_dp->ethtool_ops.get_sset_count)
+ count += cpu_dp->ethtool_ops.get_sset_count(dev, sset);
if (sset == ETH_SS_STATS && ds->ops->get_sset_count)
count += ds->ops->get_sset_count(ds);
@@ -854,8 +554,9 @@
uint32_t stringset, uint8_t *data)
{
struct dsa_switch_tree *dst = dev->dsa_ptr;
- struct dsa_switch *ds = dst->cpu_switch;
- s8 cpu_port = dst->cpu_port;
+ struct dsa_port *cpu_dp = dsa_get_cpu_port(dst);
+ struct dsa_switch *ds = cpu_dp->ds;
+ s8 cpu_port = cpu_dp->index;
int len = ETH_GSTRING_LEN;
int mcount = 0, count;
unsigned int i;
@@ -866,10 +567,9 @@
/* We do not want to be NULL-terminated, since this is a prefix */
pfx[sizeof(pfx) - 1] = '_';
- if (dst->master_ethtool_ops.get_sset_count) {
- mcount = dst->master_ethtool_ops.get_sset_count(dev,
- ETH_SS_STATS);
- dst->master_ethtool_ops.get_strings(dev, stringset, data);
+ if (cpu_dp->ethtool_ops.get_sset_count) {
+ mcount = cpu_dp->ethtool_ops.get_sset_count(dev, ETH_SS_STATS);
+ cpu_dp->ethtool_ops.get_strings(dev, stringset, data);
}
if (stringset == ETH_SS_STATS && ds->ops->get_strings) {
@@ -985,8 +685,7 @@
struct netpoll_info *ni)
{
struct dsa_slave_priv *p = netdev_priv(dev);
- struct dsa_switch *ds = p->dp->ds;
- struct net_device *master = ds->dst->master_netdev;
+ struct net_device *master = dsa_master_netdev(p);
struct netpoll *netpoll;
int err = 0;
@@ -1138,10 +837,13 @@
}
static int dsa_slave_setup_tc(struct net_device *dev, u32 handle,
- __be16 protocol, struct tc_to_netdev *tc)
+ u32 chain_index, __be16 protocol,
+ struct tc_to_netdev *tc)
{
bool ingress = TC_H_MAJ(handle) == TC_H_MAJ(TC_H_INGRESS);
- int ret = -EOPNOTSUPP;
+
+ if (chain_index)
+ return -EOPNOTSUPP;
switch (tc->type) {
case TC_SETUP_MATCHALL:
@@ -1155,10 +857,8 @@
return 0;
}
default:
- break;
+ return -EOPNOTSUPP;
}
-
- return ret;
}
void dsa_cpu_port_ethtool_init(struct ethtool_ops *ops)
@@ -1441,11 +1141,11 @@
struct net_device *master;
struct net_device *slave_dev;
struct dsa_slave_priv *p;
+ struct dsa_port *cpu_dp;
int ret;
- master = ds->dst->master_netdev;
- if (ds->master_netdev)
- master = ds->master_netdev;
+ cpu_dp = ds->dst->cpu_dp;
+ master = cpu_dp->netdev;
slave_dev = alloc_netdev(sizeof(struct dsa_slave_priv), name,
NET_NAME_UNKNOWN, ether_setup);
@@ -1528,14 +1228,16 @@
static int dsa_slave_changeupper(struct net_device *dev,
struct netdev_notifier_changeupper_info *info)
{
+ struct dsa_slave_priv *p = netdev_priv(dev);
+ struct dsa_port *dp = p->dp;
int err = NOTIFY_DONE;
if (netif_is_bridge_master(info->upper_dev)) {
if (info->linking) {
- err = dsa_slave_bridge_port_join(dev, info->upper_dev);
+ err = dsa_port_bridge_join(dp, info->upper_dev);
err = notifier_from_errno(err);
} else {
- dsa_slave_bridge_port_leave(dev, info->upper_dev);
+ dsa_port_bridge_leave(dp, info->upper_dev);
err = NOTIFY_OK;
}
}
diff --git a/net/dsa/switch.c b/net/dsa/switch.c
index ca6e26e..97e2e9c 100644
--- a/net/dsa/switch.c
+++ b/net/dsa/switch.c
@@ -12,7 +12,47 @@
#include <linux/netdevice.h>
#include <linux/notifier.h>
-#include <net/dsa.h>
+#include <net/switchdev.h>
+
+#include "dsa_priv.h"
+
+static unsigned int dsa_switch_fastest_ageing_time(struct dsa_switch *ds,
+ unsigned int ageing_time)
+{
+ int i;
+
+ for (i = 0; i < ds->num_ports; ++i) {
+ struct dsa_port *dp = &ds->ports[i];
+
+ if (dp->ageing_time && dp->ageing_time < ageing_time)
+ ageing_time = dp->ageing_time;
+ }
+
+ return ageing_time;
+}
+
+static int dsa_switch_ageing_time(struct dsa_switch *ds,
+ struct dsa_notifier_ageing_time_info *info)
+{
+ unsigned int ageing_time = info->ageing_time;
+ struct switchdev_trans *trans = info->trans;
+
+ if (switchdev_trans_ph_prepare(trans)) {
+ if (ds->ageing_time_min && ageing_time < ds->ageing_time_min)
+ return -ERANGE;
+ if (ds->ageing_time_max && ageing_time > ds->ageing_time_max)
+ return -ERANGE;
+ return 0;
+ }
+
+ /* Program the fastest ageing time in case of multiple bridges */
+ ageing_time = dsa_switch_fastest_ageing_time(ds, ageing_time);
+
+ if (ds->ops->set_ageing_time)
+ return ds->ops->set_ageing_time(ds, ageing_time);
+
+ return 0;
+}
static int dsa_switch_bridge_join(struct dsa_switch *ds,
struct dsa_notifier_bridge_info *info)
@@ -40,6 +80,137 @@
return 0;
}
+static int dsa_switch_fdb_add(struct dsa_switch *ds,
+ struct dsa_notifier_fdb_info *info)
+{
+ const struct switchdev_obj_port_fdb *fdb = info->fdb;
+ struct switchdev_trans *trans = info->trans;
+
+ /* Do not care yet about other switch chips of the fabric */
+ if (ds->index != info->sw_index)
+ return 0;
+
+ if (switchdev_trans_ph_prepare(trans)) {
+ if (!ds->ops->port_fdb_prepare || !ds->ops->port_fdb_add)
+ return -EOPNOTSUPP;
+
+ return ds->ops->port_fdb_prepare(ds, info->port, fdb, trans);
+ }
+
+ ds->ops->port_fdb_add(ds, info->port, fdb, trans);
+
+ return 0;
+}
+
+static int dsa_switch_fdb_del(struct dsa_switch *ds,
+ struct dsa_notifier_fdb_info *info)
+{
+ const struct switchdev_obj_port_fdb *fdb = info->fdb;
+
+ /* Do not care yet about other switch chips of the fabric */
+ if (ds->index != info->sw_index)
+ return 0;
+
+ if (!ds->ops->port_fdb_del)
+ return -EOPNOTSUPP;
+
+ return ds->ops->port_fdb_del(ds, info->port, fdb);
+}
+
+static int dsa_switch_mdb_add(struct dsa_switch *ds,
+ struct dsa_notifier_mdb_info *info)
+{
+ const struct switchdev_obj_port_mdb *mdb = info->mdb;
+ struct switchdev_trans *trans = info->trans;
+ DECLARE_BITMAP(group, ds->num_ports);
+ int port, err;
+
+ /* Build a mask of Multicast group members */
+ bitmap_zero(group, ds->num_ports);
+ if (ds->index == info->sw_index)
+ set_bit(info->port, group);
+ for (port = 0; port < ds->num_ports; port++)
+ if (dsa_is_cpu_port(ds, port) || dsa_is_dsa_port(ds, port))
+ set_bit(port, group);
+
+ if (switchdev_trans_ph_prepare(trans)) {
+ if (!ds->ops->port_mdb_prepare || !ds->ops->port_mdb_add)
+ return -EOPNOTSUPP;
+
+ for_each_set_bit(port, group, ds->num_ports) {
+ err = ds->ops->port_mdb_prepare(ds, port, mdb, trans);
+ if (err)
+ return err;
+ }
+ }
+
+ for_each_set_bit(port, group, ds->num_ports)
+ ds->ops->port_mdb_add(ds, port, mdb, trans);
+
+ return 0;
+}
+
+static int dsa_switch_mdb_del(struct dsa_switch *ds,
+ struct dsa_notifier_mdb_info *info)
+{
+ const struct switchdev_obj_port_mdb *mdb = info->mdb;
+
+ if (!ds->ops->port_mdb_del)
+ return -EOPNOTSUPP;
+
+ if (ds->index == info->sw_index)
+ return ds->ops->port_mdb_del(ds, info->port, mdb);
+
+ return 0;
+}
+
+static int dsa_switch_vlan_add(struct dsa_switch *ds,
+ struct dsa_notifier_vlan_info *info)
+{
+ const struct switchdev_obj_port_vlan *vlan = info->vlan;
+ struct switchdev_trans *trans = info->trans;
+ DECLARE_BITMAP(members, ds->num_ports);
+ int port, err;
+
+ /* Build a mask of VLAN members */
+ bitmap_zero(members, ds->num_ports);
+ if (ds->index == info->sw_index)
+ set_bit(info->port, members);
+ for (port = 0; port < ds->num_ports; port++)
+ if (dsa_is_cpu_port(ds, port) || dsa_is_dsa_port(ds, port))
+ set_bit(port, members);
+
+ if (switchdev_trans_ph_prepare(trans)) {
+ if (!ds->ops->port_vlan_prepare || !ds->ops->port_vlan_add)
+ return -EOPNOTSUPP;
+
+ for_each_set_bit(port, members, ds->num_ports) {
+ err = ds->ops->port_vlan_prepare(ds, port, vlan, trans);
+ if (err)
+ return err;
+ }
+ }
+
+ for_each_set_bit(port, members, ds->num_ports)
+ ds->ops->port_vlan_add(ds, port, vlan, trans);
+
+ return 0;
+}
+
+static int dsa_switch_vlan_del(struct dsa_switch *ds,
+ struct dsa_notifier_vlan_info *info)
+{
+ const struct switchdev_obj_port_vlan *vlan = info->vlan;
+
+ if (!ds->ops->port_vlan_del)
+ return -EOPNOTSUPP;
+
+ if (ds->index == info->sw_index)
+ return ds->ops->port_vlan_del(ds, info->port, vlan);
+
+ return 0;
+}
+
static int dsa_switch_event(struct notifier_block *nb,
unsigned long event, void *info)
{
@@ -47,12 +218,33 @@
int err;
switch (event) {
+ case DSA_NOTIFIER_AGEING_TIME:
+ err = dsa_switch_ageing_time(ds, info);
+ break;
case DSA_NOTIFIER_BRIDGE_JOIN:
err = dsa_switch_bridge_join(ds, info);
break;
case DSA_NOTIFIER_BRIDGE_LEAVE:
err = dsa_switch_bridge_leave(ds, info);
break;
+ case DSA_NOTIFIER_FDB_ADD:
+ err = dsa_switch_fdb_add(ds, info);
+ break;
+ case DSA_NOTIFIER_FDB_DEL:
+ err = dsa_switch_fdb_del(ds, info);
+ break;
+ case DSA_NOTIFIER_MDB_ADD:
+ err = dsa_switch_mdb_add(ds, info);
+ break;
+ case DSA_NOTIFIER_MDB_DEL:
+ err = dsa_switch_mdb_del(ds, info);
+ break;
+ case DSA_NOTIFIER_VLAN_ADD:
+ err = dsa_switch_vlan_add(ds, info);
+ break;
+ case DSA_NOTIFIER_VLAN_DEL:
+ err = dsa_switch_vlan_del(ds, info);
+ break;
default:
err = -EOPNOTSUPP;
break;
diff --git a/net/dsa/tag_brcm.c b/net/dsa/tag_brcm.c
index 2a9b52c..c697d98 100644
--- a/net/dsa/tag_brcm.c
+++ b/net/dsa/tag_brcm.c
@@ -12,7 +12,7 @@
#include <linux/etherdevice.h>
#include <linux/list.h>
#include <linux/slab.h>
-#include <net/dsa.h>
+
#include "dsa_priv.h"
/* This tag length is 4 bytes, older ones were 6 bytes, we do not
@@ -65,7 +65,7 @@
u8 *brcm_tag;
if (skb_cow_head(skb, BRCM_TAG_LEN) < 0)
- goto out_free;
+ return NULL;
skb_push(skb, BRCM_TAG_LEN);
@@ -86,10 +86,6 @@
brcm_tag[3] = (1 << p->dp->index) & BRCM_IG_DSTMAP1_MASK;
return skb;
-
-out_free:
- kfree_skb(skb);
- return NULL;
}
static struct sk_buff *brcm_tag_rcv(struct sk_buff *skb, struct net_device *dev,
@@ -97,34 +93,33 @@
struct net_device *orig_dev)
{
struct dsa_switch_tree *dst = dev->dsa_ptr;
- struct dsa_switch *ds;
+ struct dsa_port *cpu_dp = dsa_get_cpu_port(dst);
+ struct dsa_switch *ds = cpu_dp->ds;
int source_port;
u8 *brcm_tag;
- ds = dst->cpu_switch;
-
if (unlikely(!pskb_may_pull(skb, BRCM_TAG_LEN)))
- goto out_drop;
+ return NULL;
/* skb->data points to the EtherType, the tag is right before it */
brcm_tag = skb->data - 2;
/* The opcode should never be different than 0b000 */
if (unlikely((brcm_tag[0] >> BRCM_OPCODE_SHIFT) & BRCM_OPCODE_MASK))
- goto out_drop;
+ return NULL;
/* We should never see a reserved reason code without knowing how to
* handle it
*/
if (unlikely(brcm_tag[2] & BRCM_EG_RC_RSVD))
- goto out_drop;
+ return NULL;
/* Locate which port this is coming from */
source_port = brcm_tag[3] & BRCM_EG_PID_MASK;
/* Validate port against switch setup, either the port is totally */
if (source_port >= ds->num_ports || !ds->ports[source_port].netdev)
- goto out_drop;
+ return NULL;
/* Remove Broadcom tag and update checksum */
skb_pull_rcsum(skb, BRCM_TAG_LEN);
@@ -137,9 +132,6 @@
skb->dev = ds->ports[source_port].netdev;
return skb;
-
-out_drop:
- return NULL;
}
const struct dsa_device_ops brcm_netdev_ops = {
diff --git a/net/dsa/tag_dsa.c b/net/dsa/tag_dsa.c
index 1c6633f..12867a4 100644
--- a/net/dsa/tag_dsa.c
+++ b/net/dsa/tag_dsa.c
@@ -11,7 +11,7 @@
#include <linux/etherdevice.h>
#include <linux/list.h>
#include <linux/slab.h>
-#include <net/dsa.h>
+
#include "dsa_priv.h"
#define DSA_HLEN 4
@@ -28,7 +28,7 @@
*/
if (skb->protocol == htons(ETH_P_8021Q)) {
if (skb_cow_head(skb, 0) < 0)
- goto out_free;
+ return NULL;
/*
* Construct tagged FROM_CPU DSA tag from 802.1q tag.
@@ -46,7 +46,7 @@
}
} else {
if (skb_cow_head(skb, DSA_HLEN) < 0)
- goto out_free;
+ return NULL;
skb_push(skb, DSA_HLEN);
memmove(skb->data, skb->data + DSA_HLEN, 2 * ETH_ALEN);
@@ -62,10 +62,6 @@
}
return skb;
-
-out_free:
- kfree_skb(skb);
- return NULL;
}
static struct sk_buff *dsa_rcv(struct sk_buff *skb, struct net_device *dev,
@@ -79,7 +75,7 @@
int source_port;
if (unlikely(!pskb_may_pull(skb, DSA_HLEN)))
- goto out_drop;
+ return NULL;
/*
* The ethertype field is part of the DSA header.
@@ -90,7 +86,7 @@
* Check that frame type is either TO_CPU or FORWARD.
*/
if ((dsa_header[0] & 0xc0) != 0x00 && (dsa_header[0] & 0xc0) != 0xc0)
- goto out_drop;
+ return NULL;
/*
* Determine source device and port.
@@ -103,14 +99,14 @@
* port is a registered DSA port.
*/
if (source_device >= DSA_MAX_SWITCHES)
- goto out_drop;
+ return NULL;
ds = dst->ds[source_device];
if (!ds)
- goto out_drop;
+ return NULL;
if (source_port >= ds->num_ports || !ds->ports[source_port].netdev)
- goto out_drop;
+ return NULL;
/*
* Convert the DSA header to an 802.1q header if the 'tagged'
@@ -161,9 +157,6 @@
skb->dev = ds->ports[source_port].netdev;
return skb;
-
-out_drop:
- return NULL;
}
const struct dsa_device_ops dsa_netdev_ops = {
diff --git a/net/dsa/tag_edsa.c b/net/dsa/tag_edsa.c
index d9c668a..67a9d26 100644
--- a/net/dsa/tag_edsa.c
+++ b/net/dsa/tag_edsa.c
@@ -11,7 +11,7 @@
#include <linux/etherdevice.h>
#include <linux/list.h>
#include <linux/slab.h>
-#include <net/dsa.h>
+
#include "dsa_priv.h"
#define DSA_HLEN 4
@@ -30,7 +30,7 @@
*/
if (skb->protocol == htons(ETH_P_8021Q)) {
if (skb_cow_head(skb, DSA_HLEN) < 0)
- goto out_free;
+ return NULL;
skb_push(skb, DSA_HLEN);
memmove(skb->data, skb->data + DSA_HLEN, 2 * ETH_ALEN);
@@ -55,7 +55,7 @@
}
} else {
if (skb_cow_head(skb, EDSA_HLEN) < 0)
- goto out_free;
+ return NULL;
skb_push(skb, EDSA_HLEN);
memmove(skb->data, skb->data + EDSA_HLEN, 2 * ETH_ALEN);
@@ -75,10 +75,6 @@
}
return skb;
-
-out_free:
- kfree_skb(skb);
- return NULL;
}
static struct sk_buff *edsa_rcv(struct sk_buff *skb, struct net_device *dev,
@@ -92,7 +88,7 @@
int source_port;
if (unlikely(!pskb_may_pull(skb, EDSA_HLEN)))
- goto out_drop;
+ return NULL;
/*
* Skip the two null bytes after the ethertype.
@@ -103,7 +99,7 @@
* Check that frame type is either TO_CPU or FORWARD.
*/
if ((edsa_header[0] & 0xc0) != 0x00 && (edsa_header[0] & 0xc0) != 0xc0)
- goto out_drop;
+ return NULL;
/*
* Determine source device and port.
@@ -116,14 +112,14 @@
* port is a registered DSA port.
*/
if (source_device >= DSA_MAX_SWITCHES)
- goto out_drop;
+ return NULL;
ds = dst->ds[source_device];
if (!ds)
- goto out_drop;
+ return NULL;
if (source_port >= ds->num_ports || !ds->ports[source_port].netdev)
- goto out_drop;
+ return NULL;
/*
* If the 'tagged' bit is set, convert the DSA tag to a 802.1q
@@ -180,9 +176,6 @@
skb->dev = ds->ports[source_port].netdev;
return skb;
-
-out_drop:
- return NULL;
}
const struct dsa_device_ops edsa_netdev_ops = {
diff --git a/net/dsa/tag_ksz.c b/net/dsa/tag_ksz.c
new file mode 100644
index 0000000..fab41de
--- /dev/null
+++ b/net/dsa/tag_ksz.c
@@ -0,0 +1,99 @@
+/*
+ * net/dsa/tag_ksz.c - Microchip KSZ Switch tag format handling
+ * Copyright (c) 2017 Microchip Technology
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/etherdevice.h>
+#include <linux/list.h>
+#include <linux/slab.h>
+#include <net/dsa.h>
+#include "dsa_priv.h"
+
+/* For Ingress (Host -> KSZ), 2 bytes are added before FCS.
+ * ---------------------------------------------------------------------------
+ * DA(6bytes)|SA(6bytes)|....|Data(nbytes)|tag0(1byte)|tag1(1byte)|FCS(4bytes)
+ * ---------------------------------------------------------------------------
+ * tag0 : Prioritization (not used now)
+ * tag1 : each bit represents port (eg, 0x01=port1, 0x02=port2, 0x10=port5)
+ *
+ * For Egress (KSZ -> Host), 1 byte is added before FCS.
+ * ---------------------------------------------------------------------------
+ * DA(6bytes)|SA(6bytes)|....|Data(nbytes)|tag0(1byte)|FCS(4bytes)
+ * ---------------------------------------------------------------------------
+ * tag0 : zero-based value represents port
+ * (eg, 0x00=port1, 0x02=port3, 0x06=port7)
+ */
+
+#define KSZ_INGRESS_TAG_LEN 2
+#define KSZ_EGRESS_TAG_LEN 1
+
+static struct sk_buff *ksz_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ struct dsa_slave_priv *p = netdev_priv(dev);
+ struct sk_buff *nskb;
+ int padlen;
+ u8 *tag;
+
+ padlen = (skb->len >= ETH_ZLEN) ? 0 : ETH_ZLEN - skb->len;
+
+ if (skb_tailroom(skb) >= padlen + KSZ_INGRESS_TAG_LEN) {
+ nskb = skb;
+ } else {
+ nskb = alloc_skb(NET_IP_ALIGN + skb->len +
+ padlen + KSZ_INGRESS_TAG_LEN, GFP_ATOMIC);
+ if (!nskb)
+ return NULL;
+ skb_reserve(nskb, NET_IP_ALIGN);
+
+ skb_reset_mac_header(nskb);
+ skb_set_network_header(nskb,
+ skb_network_header(skb) - skb->head);
+ skb_set_transport_header(nskb,
+ skb_transport_header(skb) - skb->head);
+ skb_copy_and_csum_dev(skb, skb_put(nskb, skb->len));
+ kfree_skb(skb);
+ }
+
+ /* skb is freed when it fails */
+ if (skb_put_padto(nskb, nskb->len + padlen))
+ return NULL;
+
+ tag = skb_put(nskb, KSZ_INGRESS_TAG_LEN);
+ tag[0] = 0;
+ tag[1] = 1 << p->dp->index; /* destination port */
+
+ return nskb;
+}
+
+static struct sk_buff *ksz_rcv(struct sk_buff *skb, struct net_device *dev,
+ struct packet_type *pt,
+ struct net_device *orig_dev)
+{
+ struct dsa_switch_tree *dst = dev->dsa_ptr;
+ struct dsa_port *cpu_dp = dsa_get_cpu_port(dst);
+ struct dsa_switch *ds = cpu_dp->ds;
+ u8 *tag;
+ int source_port;
+
+ tag = skb_tail_pointer(skb) - KSZ_EGRESS_TAG_LEN;
+
+ source_port = tag[0] & 7;
+ if (source_port >= ds->num_ports || !ds->ports[source_port].netdev)
+ return NULL;
+
+ pskb_trim_rcsum(skb, skb->len - KSZ_EGRESS_TAG_LEN);
+
+ skb->dev = ds->ports[source_port].netdev;
+
+ return skb;
+}
+
+const struct dsa_device_ops ksz_netdev_ops = {
+ .xmit = ksz_xmit,
+ .rcv = ksz_rcv,
+};
diff --git a/net/dsa/tag_lan9303.c b/net/dsa/tag_lan9303.c
index 70130ed..247774d 100644
--- a/net/dsa/tag_lan9303.c
+++ b/net/dsa/tag_lan9303.c
@@ -14,7 +14,7 @@
#include <linux/etherdevice.h>
#include <linux/list.h>
#include <linux/slab.h>
-#include <net/dsa.h>
+
#include "dsa_priv.h"
/* To define the outgoing port and to discover the incoming port a regular
@@ -52,7 +52,7 @@
if (skb_cow_head(skb, LAN9303_TAG_LEN) < 0) {
dev_dbg(&dev->dev,
"Cannot make room for the special tag. Dropping packet\n");
- goto out_free;
+ return NULL;
}
/* provide 'LAN9303_TAG_LEN' bytes additional space */
@@ -66,9 +66,6 @@
lan9303_tag[1] = htons(p->dp->index | BIT(4));
return skb;
-out_free:
- kfree_skb(skb);
- return NULL;
}
static struct sk_buff *lan9303_rcv(struct sk_buff *skb, struct net_device *dev,
diff --git a/net/dsa/tag_mtk.c b/net/dsa/tag_mtk.c
index 837cddd..2f32b7e 100644
--- a/net/dsa/tag_mtk.c
+++ b/net/dsa/tag_mtk.c
@@ -13,7 +13,7 @@
*/
#include <linux/etherdevice.h>
-#include <net/dsa.h>
+
#include "dsa_priv.h"
#define MTK_HDR_LEN 4
@@ -27,7 +27,7 @@
u8 *mtk_tag;
if (skb_cow_head(skb, MTK_HDR_LEN) < 0)
- goto out_free;
+ return NULL;
skb_push(skb, MTK_HDR_LEN);
@@ -41,10 +41,6 @@
mtk_tag[3] = 0;
return skb;
-
-out_free:
- kfree_skb(skb);
- return NULL;
}
static struct sk_buff *mtk_tag_rcv(struct sk_buff *skb, struct net_device *dev,
@@ -57,7 +53,7 @@
__be16 *phdr, hdr;
if (unlikely(!pskb_may_pull(skb, MTK_HDR_LEN)))
- goto out_drop;
+ return NULL;
/* The MTK header is added by the switch between src addr
* and ethertype at this point, skb->data points to 2 bytes
@@ -79,19 +75,16 @@
*/
ds = dst->ds[0];
if (!ds)
- goto out_drop;
+ return NULL;
/* Get source port information */
port = (hdr & MTK_HDR_RECV_SOURCE_PORT_MASK);
if (!ds->ports[port].netdev)
- goto out_drop;
+ return NULL;
skb->dev = ds->ports[port].netdev;
return skb;
-
-out_drop:
- return NULL;
}
const struct dsa_device_ops mtk_netdev_ops = {
diff --git a/net/dsa/tag_qca.c b/net/dsa/tag_qca.c
index 3ba3f59..1867a3d 100644
--- a/net/dsa/tag_qca.c
+++ b/net/dsa/tag_qca.c
@@ -12,7 +12,7 @@
*/
#include <linux/etherdevice.h>
-#include <net/dsa.h>
+
#include "dsa_priv.h"
#define QCA_HDR_LEN 2
@@ -45,7 +45,7 @@
dev->stats.tx_bytes += skb->len;
if (skb_cow_head(skb, 0) < 0)
- goto out_free;
+ return NULL;
skb_push(skb, QCA_HDR_LEN);
@@ -60,10 +60,6 @@
*phdr = htons(hdr);
return skb;
-
-out_free:
- kfree_skb(skb);
- return NULL;
}
static struct sk_buff *qca_tag_rcv(struct sk_buff *skb, struct net_device *dev,
@@ -71,13 +67,14 @@
struct net_device *orig_dev)
{
struct dsa_switch_tree *dst = dev->dsa_ptr;
+ struct dsa_port *cpu_dp = dsa_get_cpu_port(dst);
struct dsa_switch *ds;
u8 ver;
int port;
__be16 *phdr, hdr;
if (unlikely(!pskb_may_pull(skb, QCA_HDR_LEN)))
- goto out_drop;
+ return NULL;
/* The QCA header is added by the switch between src addr and Ethertype
* At this point, skb->data points to ethertype so header should be
@@ -89,7 +86,7 @@
/* Make sure the version is correct */
ver = (hdr & QCA_HDR_RECV_VERSION_MASK) >> QCA_HDR_RECV_VERSION_S;
if (unlikely(ver != QCA_HDR_VERSION))
- goto out_drop;
+ return NULL;
/* Remove QCA tag and recalculate checksum */
skb_pull_rcsum(skb, QCA_HDR_LEN);
@@ -99,22 +96,19 @@
/* This protocol doesn't support cascading multiple switches so it's
* safe to assume the switch is first in the tree
*/
- ds = dst->cpu_switch;
+ ds = cpu_dp->ds;
if (!ds)
- goto out_drop;
+ return NULL;
/* Get source port information */
port = (hdr & QCA_HDR_RECV_SOURCE_PORT_MASK);
if (!ds->ports[port].netdev)
- goto out_drop;
+ return NULL;
/* Update skb & forward the frame accordingly */
skb->dev = ds->ports[port].netdev;
return skb;
-
-out_drop:
- return NULL;
}
const struct dsa_device_ops qca_netdev_ops = {
diff --git a/net/dsa/tag_trailer.c b/net/dsa/tag_trailer.c
index aafc2fc..b09e562 100644
--- a/net/dsa/tag_trailer.c
+++ b/net/dsa/tag_trailer.c
@@ -11,7 +11,7 @@
#include <linux/etherdevice.h>
#include <linux/list.h>
#include <linux/slab.h>
-#include <net/dsa.h>
+
#include "dsa_priv.h"
static struct sk_buff *trailer_xmit(struct sk_buff *skb, struct net_device *dev)
@@ -32,10 +32,8 @@
padlen = 60 - skb->len;
nskb = alloc_skb(NET_IP_ALIGN + skb->len + padlen + 4, GFP_ATOMIC);
- if (nskb == NULL) {
- kfree_skb(skb);
+ if (!nskb)
return NULL;
- }
skb_reserve(nskb, NET_IP_ALIGN);
skb_reset_mac_header(nskb);
@@ -45,8 +43,7 @@
kfree_skb(skb);
if (padlen) {
- u8 *pad = skb_put(nskb, padlen);
- memset(pad, 0, padlen);
+ skb_put_zero(nskb, padlen);
}
trailer = skb_put(nskb, 4);
@@ -63,32 +60,28 @@
struct net_device *orig_dev)
{
struct dsa_switch_tree *dst = dev->dsa_ptr;
- struct dsa_switch *ds;
+ struct dsa_port *cpu_dp = dsa_get_cpu_port(dst);
+ struct dsa_switch *ds = cpu_dp->ds;
u8 *trailer;
int source_port;
- ds = dst->cpu_switch;
-
if (skb_linearize(skb))
- goto out_drop;
+ return NULL;
trailer = skb_tail_pointer(skb) - 4;
if (trailer[0] != 0x80 || (trailer[1] & 0xf8) != 0x00 ||
(trailer[2] & 0xef) != 0x00 || trailer[3] != 0x00)
- goto out_drop;
+ return NULL;
source_port = trailer[1] & 7;
if (source_port >= ds->num_ports || !ds->ports[source_port].netdev)
- goto out_drop;
+ return NULL;
pskb_trim_rcsum(skb, skb->len - 4);
skb->dev = ds->ports[source_port].netdev;
return skb;
-
-out_drop:
- return NULL;
}
const struct dsa_device_ops trailer_netdev_ops = {
diff --git a/net/ethernet/eth.c b/net/ethernet/eth.c
index 1446810..eaeba9b 100644
--- a/net/ethernet/eth.c
+++ b/net/ethernet/eth.c
@@ -83,7 +83,7 @@
unsigned short type,
const void *daddr, const void *saddr, unsigned int len)
{
- struct ethhdr *eth = (struct ethhdr *)skb_push(skb, ETH_HLEN);
+ struct ethhdr *eth = skb_push(skb, ETH_HLEN);
if (type != ETH_P_802_3 && type != ETH_P_802_2)
eth->h_proto = htons(type);
diff --git a/net/hsr/hsr_device.c b/net/hsr/hsr_device.c
index 0a0a392..4e7bdb2 100644
--- a/net/hsr/hsr_device.c
+++ b/net/hsr/hsr_device.c
@@ -284,12 +284,12 @@
skb_reset_mac_header(skb);
if (hsrVer > 0) {
- hsr_tag = (typeof(hsr_tag)) skb_put(skb, sizeof(struct hsr_tag));
+ hsr_tag = skb_put(skb, sizeof(struct hsr_tag));
hsr_tag->encap_proto = htons(ETH_P_PRP);
set_hsr_tag_LSDU_size(hsr_tag, HSR_V1_SUP_LSDUSIZE);
}
- hsr_stag = (typeof(hsr_stag)) skb_put(skb, sizeof(struct hsr_sup_tag));
+ hsr_stag = skb_put(skb, sizeof(struct hsr_sup_tag));
set_hsr_stag_path(hsr_stag, (hsrVer ? 0x0 : 0xf));
set_hsr_stag_HSR_Ver(hsr_stag, hsrVer);
@@ -311,7 +311,7 @@
hsr_stag->HSR_TLV_Length = hsrVer ? sizeof(struct hsr_sup_payload) : 12;
/* Payload: MacAddressA */
- hsr_sp = (typeof(hsr_sp)) skb_put(skb, sizeof(struct hsr_sup_payload));
+ hsr_sp = skb_put(skb, sizeof(struct hsr_sup_payload));
ether_addr_copy(hsr_sp->MacAddressA, master->dev->dev_addr);
skb_put_padto(skb, ETH_ZLEN + HSR_HLEN);
diff --git a/net/ieee802154/6lowpan/tx.c b/net/ieee802154/6lowpan/tx.c
index dbb476d..e6ff512 100644
--- a/net/ieee802154/6lowpan/tx.c
+++ b/net/ieee802154/6lowpan/tx.c
@@ -121,8 +121,7 @@
*mac_cb(frag) = *mac_cb(skb);
if (frag1) {
- memcpy(skb_put(frag, skb->mac_len),
- skb_mac_header(skb), skb->mac_len);
+ skb_put_data(frag, skb_mac_header(skb), skb->mac_len);
} else {
rc = wpan_dev_hard_header(frag, wdev,
&master_hdr->dest,
@@ -152,8 +151,8 @@
if (IS_ERR(frag))
return PTR_ERR(frag);
- memcpy(skb_put(frag, frag_hdrlen), frag_hdr, frag_hdrlen);
- memcpy(skb_put(frag, len), skb_network_header(skb) + offset, len);
+ skb_put_data(frag, frag_hdr, frag_hdrlen);
+ skb_put_data(frag, skb_network_header(skb) + offset, len);
raw_dump_table(__func__, " fragment dump", frag->data, frag->len);
diff --git a/net/ieee802154/socket.c b/net/ieee802154/socket.c
index eedba76..a60658c 100644
--- a/net/ieee802154/socket.c
+++ b/net/ieee802154/socket.c
@@ -301,15 +301,14 @@
goto out_skb;
skb->dev = dev;
- skb->sk = sk;
skb->protocol = htons(ETH_P_IEEE802154);
- dev_put(dev);
-
err = dev_queue_xmit(skb);
if (err > 0)
err = net_xmit_errno(err);
+ dev_put(dev);
+
return err ?: size;
out_skb:
@@ -690,15 +689,14 @@
goto out_skb;
skb->dev = dev;
- skb->sk = sk;
skb->protocol = htons(ETH_P_IEEE802154);
- dev_put(dev);
-
err = dev_queue_xmit(skb);
if (err > 0)
err = net_xmit_errno(err);
+ dev_put(dev);
+
return err ?: size;
out_skb:
diff --git a/net/ipv4/Makefile b/net/ipv4/Makefile
index f83de23..afcb435 100644
--- a/net/ipv4/Makefile
+++ b/net/ipv4/Makefile
@@ -8,7 +8,7 @@
inet_timewait_sock.o inet_connection_sock.o \
tcp.o tcp_input.o tcp_output.o tcp_timer.o tcp_ipv4.o \
tcp_minisocks.o tcp_cong.o tcp_metrics.o tcp_fastopen.o \
- tcp_rate.o tcp_recovery.o \
+ tcp_rate.o tcp_recovery.o tcp_ulp.o \
tcp_offload.o datagram.o raw.o udp.o udplite.o \
udp_offload.o arp.o icmp.o devinet.o af_inet.o igmp.o \
fib_frontend.o fib_semantics.o fib_trie.o fib_notifier.o \
diff --git a/net/ipv4/ah4.c b/net/ipv4/ah4.c
index 22377c8..e8f8623 100644
--- a/net/ipv4/ah4.c
+++ b/net/ipv4/ah4.c
@@ -220,7 +220,9 @@
ah->seq_no = htonl(XFRM_SKB_CB(skb)->seq.output.low);
sg_init_table(sg, nfrags + sglists);
- skb_to_sgvec_nomark(skb, sg, 0, skb->len);
+ err = skb_to_sgvec_nomark(skb, sg, 0, skb->len);
+ if (unlikely(err < 0))
+ goto out_free;
if (x->props.flags & XFRM_STATE_ESN) {
/* Attach seqhi sg right after packet payload */
@@ -393,7 +395,9 @@
skb_push(skb, ihl);
sg_init_table(sg, nfrags + sglists);
- skb_to_sgvec_nomark(skb, sg, 0, skb->len);
+ err = skb_to_sgvec_nomark(skb, sg, 0, skb->len);
+ if (unlikely(err < 0))
+ goto out_free;
if (x->props.flags & XFRM_STATE_ESN) {
/* Attach seqhi sg right after packet payload */
diff --git a/net/ipv4/arp.c b/net/ipv4/arp.c
index e9f3386..8b52179 100644
--- a/net/ipv4/arp.c
+++ b/net/ipv4/arp.c
@@ -539,7 +539,7 @@
skb_reserve(skb, hlen);
skb_reset_network_header(skb);
- arp = (struct arphdr *) skb_put(skb, arp_hdr_len(dev));
+ arp = skb_put(skb, arp_hdr_len(dev));
skb->dev = dev;
skb->protocol = htons(ETH_P_ARP);
if (!src_hw)
@@ -1113,13 +1113,17 @@
{
struct neighbour *neigh = neigh_lookup(&arp_tbl, &ip, dev);
int err = -ENXIO;
+ struct neigh_table *tbl = &arp_tbl;
if (neigh) {
if (neigh->nud_state & ~NUD_NOARP)
err = neigh_update(neigh, NULL, NUD_FAILED,
NEIGH_UPDATE_F_OVERRIDE|
NEIGH_UPDATE_F_ADMIN, 0);
+ write_lock_bh(&tbl->lock);
neigh_release(neigh);
+ neigh_remove_one(neigh, tbl);
+ write_unlock_bh(&tbl->lock);
}
return err;
diff --git a/net/ipv4/devinet.c b/net/ipv4/devinet.c
index df14815..a7dd088 100644
--- a/net/ipv4/devinet.c
+++ b/net/ipv4/devinet.c
@@ -176,6 +176,7 @@
static void rtmsg_ifa(int event, struct in_ifaddr *, struct nlmsghdr *, u32);
static BLOCKING_NOTIFIER_HEAD(inetaddr_chain);
+static BLOCKING_NOTIFIER_HEAD(inetaddr_validator_chain);
static void inet_del_ifa(struct in_device *in_dev, struct in_ifaddr **ifap,
int destroy);
#ifdef CONFIG_SYSCTL
@@ -441,6 +442,8 @@
{
struct in_device *in_dev = ifa->ifa_dev;
struct in_ifaddr *ifa1, **ifap, **last_primary;
+ struct in_validator_info ivi;
+ int ret;
ASSERT_RTNL();
@@ -471,6 +474,23 @@
}
}
+ /* Allow any devices that wish to register ifaddr validtors to weigh
+ * in now, before changes are committed. The rntl lock is serializing
+ * access here, so the state should not change between a validator call
+ * and a final notify on commit. This isn't invoked on promotion under
+ * the assumption that validators are checking the address itself, and
+ * not the flags.
+ */
+ ivi.ivi_addr = ifa->ifa_address;
+ ivi.ivi_dev = ifa->ifa_dev;
+ ret = blocking_notifier_call_chain(&inetaddr_validator_chain,
+ NETDEV_UP, &ivi);
+ ret = notifier_to_errno(ret);
+ if (ret) {
+ inet_free_ifa(ifa);
+ return ret;
+ }
+
if (!(ifa->ifa_flags & IFA_F_SECONDARY)) {
prandom_seed((__force u32) ifa->ifa_local);
ifap = last_primary;
@@ -1356,6 +1376,19 @@
}
EXPORT_SYMBOL(unregister_inetaddr_notifier);
+int register_inetaddr_validator_notifier(struct notifier_block *nb)
+{
+ return blocking_notifier_chain_register(&inetaddr_validator_chain, nb);
+}
+EXPORT_SYMBOL(register_inetaddr_validator_notifier);
+
+int unregister_inetaddr_validator_notifier(struct notifier_block *nb)
+{
+ return blocking_notifier_chain_unregister(&inetaddr_validator_chain,
+ nb);
+}
+EXPORT_SYMBOL(unregister_inetaddr_validator_notifier);
+
/* Rename ifa_labels for a device name change. Make some effort to preserve
* existing alias numbering and to create unique labels if possible.
*/
diff --git a/net/ipv4/esp4.c b/net/ipv4/esp4.c
index 93322f8..1f18b46 100644
--- a/net/ipv4/esp4.c
+++ b/net/ipv4/esp4.c
@@ -377,9 +377,11 @@
esp->esph = esph;
sg_init_table(sg, esp->nfrags);
- skb_to_sgvec(skb, sg,
- (unsigned char *)esph - skb->data,
- assoclen + ivlen + esp->clen + alen);
+ err = skb_to_sgvec(skb, sg,
+ (unsigned char *)esph - skb->data,
+ assoclen + ivlen + esp->clen + alen);
+ if (unlikely(err < 0))
+ goto error;
if (!esp->inplace) {
int allocsize;
@@ -403,9 +405,11 @@
spin_unlock_bh(&x->lock);
sg_init_table(dsg, skb_shinfo(skb)->nr_frags + 1);
- skb_to_sgvec(skb, dsg,
- (unsigned char *)esph - skb->data,
- assoclen + ivlen + esp->clen + alen);
+ err = skb_to_sgvec(skb, dsg,
+ (unsigned char *)esph - skb->data,
+ assoclen + ivlen + esp->clen + alen);
+ if (unlikely(err < 0))
+ goto error;
}
if ((x->props.flags & XFRM_STATE_ESN))
@@ -605,7 +609,7 @@
* decryption.
*/
if ((x->props.flags & XFRM_STATE_ESN)) {
- esph = (void *)skb_push(skb, 4);
+ esph = skb_push(skb, 4);
*seqhi = esph->spi;
esph->spi = esph->seq_no;
esph->seq_no = XFRM_SKB_CB(skb)->seq.input.hi;
@@ -690,7 +694,9 @@
esp_input_set_header(skb, seqhi);
sg_init_table(sg, nfrags);
- skb_to_sgvec(skb, sg, 0, skb->len);
+ err = skb_to_sgvec(skb, sg, 0, skb->len);
+ if (unlikely(err < 0))
+ goto out;
skb->ip_summed = CHECKSUM_NONE;
diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c
index 83e3ed2..4e678fa 100644
--- a/net/ipv4/fib_frontend.c
+++ b/net/ipv4/fib_frontend.c
@@ -588,13 +588,15 @@
if (cmd == SIOCDELRT) {
tb = fib_get_table(net, cfg.fc_table);
if (tb)
- err = fib_table_delete(net, tb, &cfg);
+ err = fib_table_delete(net, tb, &cfg,
+ NULL);
else
err = -ESRCH;
} else {
tb = fib_new_table(net, cfg.fc_table);
if (tb)
- err = fib_table_insert(net, tb, &cfg);
+ err = fib_table_insert(net, tb,
+ &cfg, NULL);
else
err = -ENOBUFS;
}
@@ -626,14 +628,15 @@
};
static int rtm_to_fib_config(struct net *net, struct sk_buff *skb,
- struct nlmsghdr *nlh, struct fib_config *cfg)
+ struct nlmsghdr *nlh, struct fib_config *cfg,
+ struct netlink_ext_ack *extack)
{
struct nlattr *attr;
int err, remaining;
struct rtmsg *rtm;
err = nlmsg_validate(nlh, sizeof(*rtm), RTA_MAX, rtm_ipv4_policy,
- NULL);
+ extack);
if (err < 0)
goto errout;
@@ -654,6 +657,7 @@
cfg->fc_nlinfo.nl_net = net;
if (cfg->fc_type > RTN_MAX) {
+ NL_SET_ERR_MSG(extack, "Invalid route type");
err = -EINVAL;
goto errout;
}
@@ -681,7 +685,8 @@
break;
case RTA_MULTIPATH:
err = lwtunnel_valid_encap_type_attr(nla_data(attr),
- nla_len(attr));
+ nla_len(attr),
+ extack);
if (err < 0)
goto errout;
cfg->fc_mp = nla_data(attr);
@@ -698,7 +703,8 @@
break;
case RTA_ENCAP_TYPE:
cfg->fc_encap_type = nla_get_u16(attr);
- err = lwtunnel_valid_encap_type(cfg->fc_encap_type);
+ err = lwtunnel_valid_encap_type(cfg->fc_encap_type,
+ extack);
if (err < 0)
goto errout;
break;
@@ -718,17 +724,18 @@
struct fib_table *tb;
int err;
- err = rtm_to_fib_config(net, skb, nlh, &cfg);
+ err = rtm_to_fib_config(net, skb, nlh, &cfg, extack);
if (err < 0)
goto errout;
tb = fib_get_table(net, cfg.fc_table);
if (!tb) {
+ NL_SET_ERR_MSG(extack, "FIB table does not exist");
err = -ESRCH;
goto errout;
}
- err = fib_table_delete(net, tb, &cfg);
+ err = fib_table_delete(net, tb, &cfg, extack);
errout:
return err;
}
@@ -741,7 +748,7 @@
struct fib_table *tb;
int err;
- err = rtm_to_fib_config(net, skb, nlh, &cfg);
+ err = rtm_to_fib_config(net, skb, nlh, &cfg, extack);
if (err < 0)
goto errout;
@@ -751,7 +758,7 @@
goto errout;
}
- err = fib_table_insert(net, tb, &cfg);
+ err = fib_table_insert(net, tb, &cfg, extack);
errout:
return err;
}
@@ -845,9 +852,9 @@
cfg.fc_scope = RT_SCOPE_HOST;
if (cmd == RTM_NEWROUTE)
- fib_table_insert(net, tb, &cfg);
+ fib_table_insert(net, tb, &cfg, NULL);
else
- fib_table_delete(net, tb, &cfg);
+ fib_table_delete(net, tb, &cfg, NULL);
}
void fib_add_ifaddr(struct in_ifaddr *ifa)
diff --git a/net/ipv4/fib_lookup.h b/net/ipv4/fib_lookup.h
index 9c02920..769ab87 100644
--- a/net/ipv4/fib_lookup.h
+++ b/net/ipv4/fib_lookup.h
@@ -28,8 +28,10 @@
/* Exported by fib_semantics.c */
void fib_release_info(struct fib_info *);
-struct fib_info *fib_create_info(struct fib_config *cfg);
-int fib_nh_match(struct fib_config *cfg, struct fib_info *fi);
+struct fib_info *fib_create_info(struct fib_config *cfg,
+ struct netlink_ext_ack *extack);
+int fib_nh_match(struct fib_config *cfg, struct fib_info *fi,
+ struct netlink_ext_ack *extack);
int fib_dump_info(struct sk_buff *skb, u32 pid, u32 seq, int event, u32 tb_id,
u8 type, __be32 dst, int dst_len, u8 tos, struct fib_info *fi,
unsigned int);
diff --git a/net/ipv4/fib_semantics.c b/net/ipv4/fib_semantics.c
index ad9ad4a..ff47ea1 100644
--- a/net/ipv4/fib_semantics.c
+++ b/net/ipv4/fib_semantics.c
@@ -32,6 +32,7 @@
#include <linux/skbuff.h>
#include <linux/init.h>
#include <linux/slab.h>
+#include <linux/netlink.h>
#include <net/arp.h>
#include <net/ip.h>
@@ -151,7 +152,8 @@
* free_fib_info_rcu()
*/
- dst_free(&rt->dst);
+ dst_dev_put(&rt->dst);
+ dst_release_immediate(&rt->dst);
}
static void free_nh_exceptions(struct fib_nh *nh)
@@ -193,8 +195,10 @@
struct rtable *rt;
rt = rcu_dereference_protected(*per_cpu_ptr(rtp, cpu), 1);
- if (rt)
- dst_free(&rt->dst);
+ if (rt) {
+ dst_dev_put(&rt->dst);
+ dst_release_immediate(&rt->dst);
+ }
}
free_percpu(rtp);
}
@@ -456,7 +460,8 @@
#ifdef CONFIG_IP_ROUTE_MULTIPATH
-static int fib_count_nexthops(struct rtnexthop *rtnh, int remaining)
+static int fib_count_nexthops(struct rtnexthop *rtnh, int remaining,
+ struct netlink_ext_ack *extack)
{
int nhs = 0;
@@ -466,22 +471,35 @@
}
/* leftover implies invalid nexthop configuration, discard it */
- return remaining > 0 ? 0 : nhs;
+ if (remaining > 0) {
+ NL_SET_ERR_MSG(extack,
+ "Invalid nexthop configuration - extra data after nexthops");
+ nhs = 0;
+ }
+
+ return nhs;
}
static int fib_get_nhs(struct fib_info *fi, struct rtnexthop *rtnh,
- int remaining, struct fib_config *cfg)
+ int remaining, struct fib_config *cfg,
+ struct netlink_ext_ack *extack)
{
int ret;
change_nexthops(fi) {
int attrlen;
- if (!rtnh_ok(rtnh, remaining))
+ if (!rtnh_ok(rtnh, remaining)) {
+ NL_SET_ERR_MSG(extack,
+ "Invalid nexthop configuration - extra data after nexthop");
return -EINVAL;
+ }
- if (rtnh->rtnh_flags & (RTNH_F_DEAD | RTNH_F_LINKDOWN))
+ if (rtnh->rtnh_flags & (RTNH_F_DEAD | RTNH_F_LINKDOWN)) {
+ NL_SET_ERR_MSG(extack,
+ "Invalid flags for nexthop - can not contain DEAD or LINKDOWN");
return -EINVAL;
+ }
nexthop_nh->nh_flags =
(cfg->fc_flags & ~0xFF) | rtnh->rtnh_flags;
@@ -507,13 +525,17 @@
nla_entype = nla_find(attrs, attrlen,
RTA_ENCAP_TYPE);
- if (!nla_entype)
+ if (!nla_entype) {
+ NL_SET_BAD_ATTR(extack, nla);
+ NL_SET_ERR_MSG(extack,
+ "Encap type is missing");
goto err_inval;
+ }
ret = lwtunnel_build_state(nla_get_u16(
nla_entype),
nla, AF_INET, cfg,
- &lwtstate);
+ &lwtstate, extack);
if (ret)
goto errout;
nexthop_nh->nh_lwtstate =
@@ -595,7 +617,8 @@
static int fib_encap_match(u16 encap_type,
struct nlattr *encap,
const struct fib_nh *nh,
- const struct fib_config *cfg)
+ const struct fib_config *cfg,
+ struct netlink_ext_ack *extack)
{
struct lwtunnel_state *lwtstate;
int ret, result = 0;
@@ -603,8 +626,8 @@
if (encap_type == LWTUNNEL_ENCAP_NONE)
return 0;
- ret = lwtunnel_build_state(encap_type, encap,
- AF_INET, cfg, &lwtstate);
+ ret = lwtunnel_build_state(encap_type, encap, AF_INET,
+ cfg, &lwtstate, extack);
if (!ret) {
result = lwtunnel_cmp_encap(lwtstate, nh->nh_lwtstate);
lwtstate_free(lwtstate);
@@ -613,7 +636,8 @@
return result;
}
-int fib_nh_match(struct fib_config *cfg, struct fib_info *fi)
+int fib_nh_match(struct fib_config *cfg, struct fib_info *fi,
+ struct netlink_ext_ack *extack)
{
#ifdef CONFIG_IP_ROUTE_MULTIPATH
struct rtnexthop *rtnh;
@@ -625,9 +649,9 @@
if (cfg->fc_oif || cfg->fc_gw) {
if (cfg->fc_encap) {
- if (fib_encap_match(cfg->fc_encap_type,
- cfg->fc_encap, fi->fib_nh, cfg))
- return 1;
+ if (fib_encap_match(cfg->fc_encap_type, cfg->fc_encap,
+ fi->fib_nh, cfg, extack))
+ return 1;
}
if ((!cfg->fc_oif || cfg->fc_oif == fi->fib_nh->nh_oif) &&
(!cfg->fc_gw || cfg->fc_gw == fi->fib_nh->nh_gw))
@@ -716,7 +740,7 @@
* |-> {local prefix} (terminal node)
*/
static int fib_check_nh(struct fib_config *cfg, struct fib_info *fi,
- struct fib_nh *nh)
+ struct fib_nh *nh, struct netlink_ext_ack *extack)
{
int err = 0;
struct net *net;
@@ -729,16 +753,25 @@
if (nh->nh_flags & RTNH_F_ONLINK) {
unsigned int addr_type;
- if (cfg->fc_scope >= RT_SCOPE_LINK)
+ if (cfg->fc_scope >= RT_SCOPE_LINK) {
+ NL_SET_ERR_MSG(extack,
+ "Nexthop has invalid scope");
return -EINVAL;
+ }
dev = __dev_get_by_index(net, nh->nh_oif);
if (!dev)
return -ENODEV;
- if (!(dev->flags & IFF_UP))
+ if (!(dev->flags & IFF_UP)) {
+ NL_SET_ERR_MSG(extack,
+ "Nexthop device is not up");
return -ENETDOWN;
+ }
addr_type = inet_addr_type_dev_table(net, dev, nh->nh_gw);
- if (addr_type != RTN_UNICAST)
+ if (addr_type != RTN_UNICAST) {
+ NL_SET_ERR_MSG(extack,
+ "Nexthop has invalid gateway");
return -EINVAL;
+ }
if (!netif_carrier_ok(dev))
nh->nh_flags |= RTNH_F_LINKDOWN;
nh->nh_dev = dev;
@@ -778,18 +811,25 @@
}
if (err) {
+ NL_SET_ERR_MSG(extack,
+ "Nexthop has invalid gateway");
rcu_read_unlock();
return err;
}
}
err = -EINVAL;
- if (res.type != RTN_UNICAST && res.type != RTN_LOCAL)
+ if (res.type != RTN_UNICAST && res.type != RTN_LOCAL) {
+ NL_SET_ERR_MSG(extack, "Nexthop has invalid gateway");
goto out;
+ }
nh->nh_scope = res.scope;
nh->nh_oif = FIB_RES_OIF(res);
nh->nh_dev = dev = FIB_RES_DEV(res);
- if (!dev)
+ if (!dev) {
+ NL_SET_ERR_MSG(extack,
+ "No egress device for nexthop gateway");
goto out;
+ }
dev_hold(dev);
if (!netif_carrier_ok(dev))
nh->nh_flags |= RTNH_F_LINKDOWN;
@@ -797,17 +837,21 @@
} else {
struct in_device *in_dev;
- if (nh->nh_flags & (RTNH_F_PERVASIVE | RTNH_F_ONLINK))
+ if (nh->nh_flags & (RTNH_F_PERVASIVE | RTNH_F_ONLINK)) {
+ NL_SET_ERR_MSG(extack,
+ "Invalid flags for nexthop - PERVASIVE and ONLINK can not be set");
return -EINVAL;
-
+ }
rcu_read_lock();
err = -ENODEV;
in_dev = inetdev_by_index(net, nh->nh_oif);
if (!in_dev)
goto out;
err = -ENETDOWN;
- if (!(in_dev->dev->flags & IFF_UP))
+ if (!(in_dev->dev->flags & IFF_UP)) {
+ NL_SET_ERR_MSG(extack, "Device for nexthop is not up");
goto out;
+ }
nh->nh_dev = in_dev->dev;
dev_hold(nh->nh_dev);
nh->nh_scope = RT_SCOPE_HOST;
@@ -982,7 +1026,8 @@
return 0;
}
-struct fib_info *fib_create_info(struct fib_config *cfg)
+struct fib_info *fib_create_info(struct fib_config *cfg,
+ struct netlink_ext_ack *extack)
{
int err;
struct fib_info *fi = NULL;
@@ -994,15 +1039,20 @@
goto err_inval;
/* Fast check to catch the most weird cases */
- if (fib_props[cfg->fc_type].scope > cfg->fc_scope)
+ if (fib_props[cfg->fc_type].scope > cfg->fc_scope) {
+ NL_SET_ERR_MSG(extack, "Invalid scope");
goto err_inval;
+ }
- if (cfg->fc_flags & (RTNH_F_DEAD | RTNH_F_LINKDOWN))
+ if (cfg->fc_flags & (RTNH_F_DEAD | RTNH_F_LINKDOWN)) {
+ NL_SET_ERR_MSG(extack,
+ "Invalid rtm_flags - can not contain DEAD or LINKDOWN");
goto err_inval;
+ }
#ifdef CONFIG_IP_ROUTE_MULTIPATH
if (cfg->fc_mp) {
- nhs = fib_count_nexthops(cfg->fc_mp, cfg->fc_mp_len);
+ nhs = fib_count_nexthops(cfg->fc_mp, cfg->fc_mp_len, extack);
if (nhs == 0)
goto err_inval;
}
@@ -1065,18 +1115,29 @@
if (cfg->fc_mp) {
#ifdef CONFIG_IP_ROUTE_MULTIPATH
- err = fib_get_nhs(fi, cfg->fc_mp, cfg->fc_mp_len, cfg);
+ err = fib_get_nhs(fi, cfg->fc_mp, cfg->fc_mp_len, cfg, extack);
if (err != 0)
goto failure;
- if (cfg->fc_oif && fi->fib_nh->nh_oif != cfg->fc_oif)
+ if (cfg->fc_oif && fi->fib_nh->nh_oif != cfg->fc_oif) {
+ NL_SET_ERR_MSG(extack,
+ "Nexthop device index does not match RTA_OIF");
goto err_inval;
- if (cfg->fc_gw && fi->fib_nh->nh_gw != cfg->fc_gw)
+ }
+ if (cfg->fc_gw && fi->fib_nh->nh_gw != cfg->fc_gw) {
+ NL_SET_ERR_MSG(extack,
+ "Nexthop gateway does not match RTA_GATEWAY");
goto err_inval;
+ }
#ifdef CONFIG_IP_ROUTE_CLASSID
- if (cfg->fc_flow && fi->fib_nh->nh_tclassid != cfg->fc_flow)
+ if (cfg->fc_flow && fi->fib_nh->nh_tclassid != cfg->fc_flow) {
+ NL_SET_ERR_MSG(extack,
+ "Nexthop class id does not match RTA_FLOW");
goto err_inval;
+ }
#endif
#else
+ NL_SET_ERR_MSG(extack,
+ "Multipath support not enabled in kernel");
goto err_inval;
#endif
} else {
@@ -1085,11 +1146,14 @@
if (cfg->fc_encap) {
struct lwtunnel_state *lwtstate;
- if (cfg->fc_encap_type == LWTUNNEL_ENCAP_NONE)
+ if (cfg->fc_encap_type == LWTUNNEL_ENCAP_NONE) {
+ NL_SET_ERR_MSG(extack,
+ "LWT encap type not specified");
goto err_inval;
+ }
err = lwtunnel_build_state(cfg->fc_encap_type,
cfg->fc_encap, AF_INET, cfg,
- &lwtstate);
+ &lwtstate, extack);
if (err)
goto failure;
@@ -1109,8 +1173,11 @@
}
if (fib_props[cfg->fc_type].error) {
- if (cfg->fc_gw || cfg->fc_oif || cfg->fc_mp)
+ if (cfg->fc_gw || cfg->fc_oif || cfg->fc_mp) {
+ NL_SET_ERR_MSG(extack,
+ "Gateway, device and multipath can not be specified for this route type");
goto err_inval;
+ }
goto link_it;
} else {
switch (cfg->fc_type) {
@@ -1121,19 +1188,30 @@
case RTN_MULTICAST:
break;
default:
+ NL_SET_ERR_MSG(extack, "Invalid route type");
goto err_inval;
}
}
- if (cfg->fc_scope > RT_SCOPE_HOST)
+ if (cfg->fc_scope > RT_SCOPE_HOST) {
+ NL_SET_ERR_MSG(extack, "Invalid scope");
goto err_inval;
+ }
if (cfg->fc_scope == RT_SCOPE_HOST) {
struct fib_nh *nh = fi->fib_nh;
/* Local address is added. */
- if (nhs != 1 || nh->nh_gw)
+ if (nhs != 1) {
+ NL_SET_ERR_MSG(extack,
+ "Route with host scope can not have multiple nexthops");
goto err_inval;
+ }
+ if (nh->nh_gw) {
+ NL_SET_ERR_MSG(extack,
+ "Route with host scope can not have a gateway");
+ goto err_inval;
+ }
nh->nh_scope = RT_SCOPE_NOWHERE;
nh->nh_dev = dev_get_by_index(net, fi->fib_nh->nh_oif);
err = -ENODEV;
@@ -1143,7 +1221,7 @@
int linkdown = 0;
change_nexthops(fi) {
- err = fib_check_nh(cfg, fi, nexthop_nh);
+ err = fib_check_nh(cfg, fi, nexthop_nh, extack);
if (err != 0)
goto failure;
if (nexthop_nh->nh_flags & RTNH_F_LINKDOWN)
@@ -1153,8 +1231,10 @@
fi->fib_flags |= RTNH_F_LINKDOWN;
}
- if (fi->fib_prefsrc && !fib_valid_prefsrc(cfg, fi->fib_prefsrc))
+ if (fi->fib_prefsrc && !fib_valid_prefsrc(cfg, fi->fib_prefsrc)) {
+ NL_SET_ERR_MSG(extack, "Invalid prefsrc address");
goto err_inval;
+ }
change_nexthops(fi) {
fib_info_update_nh_saddr(net, nexthop_nh);
diff --git a/net/ipv4/fib_trie.c b/net/ipv4/fib_trie.c
index 51182ff..d56659e 100644
--- a/net/ipv4/fib_trie.c
+++ b/net/ipv4/fib_trie.c
@@ -1099,9 +1099,25 @@
return 0;
}
+static bool fib_valid_key_len(u32 key, u8 plen, struct netlink_ext_ack *extack)
+{
+ if (plen > KEYLENGTH) {
+ NL_SET_ERR_MSG(extack, "Invalid prefix length");
+ return false;
+ }
+
+ if ((plen < KEYLENGTH) && (key << plen)) {
+ NL_SET_ERR_MSG(extack,
+ "Invalid prefix for given prefix length");
+ return false;
+ }
+
+ return true;
+}
+
/* Caller must hold RTNL. */
int fib_table_insert(struct net *net, struct fib_table *tb,
- struct fib_config *cfg)
+ struct fib_config *cfg, struct netlink_ext_ack *extack)
{
enum fib_event_type event = FIB_EVENT_ENTRY_ADD;
struct trie *t = (struct trie *)tb->tb_data;
@@ -1115,17 +1131,14 @@
u32 key;
int err;
- if (plen > KEYLENGTH)
- return -EINVAL;
-
key = ntohl(cfg->fc_dst);
+ if (!fib_valid_key_len(key, plen, extack))
+ return -EINVAL;
+
pr_debug("Insert table=%u %08x/%d\n", tb->tb_id, key, plen);
- if ((plen < KEYLENGTH) && (key << plen))
- return -EINVAL;
-
- fi = fib_create_info(cfg);
+ fi = fib_create_info(cfg, extack);
if (IS_ERR(fi)) {
err = PTR_ERR(fi);
goto err;
@@ -1452,6 +1465,7 @@
if (!(fib_flags & FIB_LOOKUP_NOREF))
atomic_inc(&fi->fib_clntref);
+ res->prefix = htonl(n->key);
res->prefixlen = KEYLENGTH - fa->fa_slen;
res->nh_sel = nhsel;
res->type = fa->fa_type;
@@ -1507,7 +1521,7 @@
/* Caller must hold RTNL. */
int fib_table_delete(struct net *net, struct fib_table *tb,
- struct fib_config *cfg)
+ struct fib_config *cfg, struct netlink_ext_ack *extack)
{
struct trie *t = (struct trie *) tb->tb_data;
struct fib_alias *fa, *fa_to_delete;
@@ -1517,12 +1531,9 @@
u8 tos = cfg->fc_tos;
u32 key;
- if (plen > KEYLENGTH)
- return -EINVAL;
-
key = ntohl(cfg->fc_dst);
- if ((plen < KEYLENGTH) && (key << plen))
+ if (!fib_valid_key_len(key, plen, extack))
return -EINVAL;
l = fib_find_node(t, &tp, key);
@@ -1551,7 +1562,7 @@
fi->fib_prefsrc == cfg->fc_prefsrc) &&
(!cfg->fc_protocol ||
fi->fib_protocol == cfg->fc_protocol) &&
- fib_nh_match(cfg, fi) == 0) {
+ fib_nh_match(cfg, fi, extack) == 0) {
fa_to_delete = fa;
break;
}
diff --git a/net/ipv4/fou.c b/net/ipv4/fou.c
index 805f660..8e0257d 100644
--- a/net/ipv4/fou.c
+++ b/net/ipv4/fou.c
@@ -8,6 +8,7 @@
#include <linux/kernel.h>
#include <net/genetlink.h>
#include <net/gue.h>
+#include <net/fou.h>
#include <net/ip.h>
#include <net/protocol.h>
#include <net/udp.h>
@@ -859,25 +860,6 @@
}
EXPORT_SYMBOL(gue_encap_hlen);
-static void fou_build_udp(struct sk_buff *skb, struct ip_tunnel_encap *e,
- struct flowi4 *fl4, u8 *protocol, __be16 sport)
-{
- struct udphdr *uh;
-
- skb_push(skb, sizeof(struct udphdr));
- skb_reset_transport_header(skb);
-
- uh = udp_hdr(skb);
-
- uh->dest = e->dport;
- uh->source = sport;
- uh->len = htons(skb->len);
- udp_set_csum(!(e->flags & TUNNEL_ENCAP_FLAG_CSUM), skb,
- fl4->saddr, fl4->daddr, skb->len);
-
- *protocol = IPPROTO_UDP;
-}
-
int __fou_build_header(struct sk_buff *skb, struct ip_tunnel_encap *e,
u8 *protocol, __be16 *sport, int type)
{
@@ -894,24 +876,6 @@
}
EXPORT_SYMBOL(__fou_build_header);
-int fou_build_header(struct sk_buff *skb, struct ip_tunnel_encap *e,
- u8 *protocol, struct flowi4 *fl4)
-{
- int type = e->flags & TUNNEL_ENCAP_FLAG_CSUM ? SKB_GSO_UDP_TUNNEL_CSUM :
- SKB_GSO_UDP_TUNNEL;
- __be16 sport;
- int err;
-
- err = __fou_build_header(skb, e, protocol, &sport, type);
- if (err)
- return err;
-
- fou_build_udp(skb, e, fl4, protocol, sport);
-
- return 0;
-}
-EXPORT_SYMBOL(fou_build_header);
-
int __gue_build_header(struct sk_buff *skb, struct ip_tunnel_encap *e,
u8 *protocol, __be16 *sport, int type)
{
@@ -985,8 +949,46 @@
}
EXPORT_SYMBOL(__gue_build_header);
-int gue_build_header(struct sk_buff *skb, struct ip_tunnel_encap *e,
- u8 *protocol, struct flowi4 *fl4)
+#ifdef CONFIG_NET_FOU_IP_TUNNELS
+
+static void fou_build_udp(struct sk_buff *skb, struct ip_tunnel_encap *e,
+ struct flowi4 *fl4, u8 *protocol, __be16 sport)
+{
+ struct udphdr *uh;
+
+ skb_push(skb, sizeof(struct udphdr));
+ skb_reset_transport_header(skb);
+
+ uh = udp_hdr(skb);
+
+ uh->dest = e->dport;
+ uh->source = sport;
+ uh->len = htons(skb->len);
+ udp_set_csum(!(e->flags & TUNNEL_ENCAP_FLAG_CSUM), skb,
+ fl4->saddr, fl4->daddr, skb->len);
+
+ *protocol = IPPROTO_UDP;
+}
+
+static int fou_build_header(struct sk_buff *skb, struct ip_tunnel_encap *e,
+ u8 *protocol, struct flowi4 *fl4)
+{
+ int type = e->flags & TUNNEL_ENCAP_FLAG_CSUM ? SKB_GSO_UDP_TUNNEL_CSUM :
+ SKB_GSO_UDP_TUNNEL;
+ __be16 sport;
+ int err;
+
+ err = __fou_build_header(skb, e, protocol, &sport, type);
+ if (err)
+ return err;
+
+ fou_build_udp(skb, e, fl4, protocol, sport);
+
+ return 0;
+}
+
+static int gue_build_header(struct sk_buff *skb, struct ip_tunnel_encap *e,
+ u8 *protocol, struct flowi4 *fl4)
{
int type = e->flags & TUNNEL_ENCAP_FLAG_CSUM ? SKB_GSO_UDP_TUNNEL_CSUM :
SKB_GSO_UDP_TUNNEL;
@@ -1001,9 +1003,7 @@
return 0;
}
-EXPORT_SYMBOL(gue_build_header);
-#ifdef CONFIG_NET_FOU_IP_TUNNELS
static const struct ip_tunnel_encap_ops fou_iptun_ops = {
.encap_hlen = fou_encap_hlen,
diff --git a/net/ipv4/icmp.c b/net/ipv4/icmp.c
index 9144fa7..c2be26b 100644
--- a/net/ipv4/icmp.c
+++ b/net/ipv4/icmp.c
@@ -489,7 +489,7 @@
fl4->flowi4_oif = l3mdev_master_ifindex(skb_dst(skb_in)->dev);
security_skb_classify_flow(skb_in, flowi4_to_flowi(fl4));
- rt = __ip_route_output_key_hash(net, fl4, skb_in);
+ rt = ip_route_output_key_hash(net, fl4, skb_in);
if (IS_ERR(rt))
return rt;
diff --git a/net/ipv4/igmp.c b/net/ipv4/igmp.c
index ec9a396..c403230 100644
--- a/net/ipv4/igmp.c
+++ b/net/ipv4/igmp.c
@@ -414,7 +414,7 @@
skb = igmpv3_newpack(dev, dev->mtu);
if (!skb)
return NULL;
- pgr = (struct igmpv3_grec *)skb_put(skb, sizeof(struct igmpv3_grec));
+ pgr = skb_put(skb, sizeof(struct igmpv3_grec));
pgr->grec_type = type;
pgr->grec_auxwords = 0;
pgr->grec_nsrcs = 0;
@@ -508,7 +508,7 @@
}
if (!skb)
return NULL;
- psrc = (__be32 *)skb_put(skb, sizeof(__be32));
+ psrc = skb_put(skb, sizeof(__be32));
*psrc = psf->sf_inaddr;
scount++; stotal++;
if ((type == IGMPV3_ALLOW_NEW_SOURCES ||
@@ -742,7 +742,7 @@
((u8 *)&iph[1])[2] = 0;
((u8 *)&iph[1])[3] = 0;
- ih = (struct igmphdr *)skb_put(skb, sizeof(struct igmphdr));
+ ih = skb_put(skb, sizeof(struct igmphdr));
ih->type = type;
ih->code = 0;
ih->csum = 0;
diff --git a/net/ipv4/inet_connection_sock.c b/net/ipv4/inet_connection_sock.c
index 1054d33..a3fa1a5 100644
--- a/net/ipv4/inet_connection_sock.c
+++ b/net/ipv4/inet_connection_sock.c
@@ -25,6 +25,7 @@
#include <net/xfrm.h>
#include <net/tcp.h>
#include <net/sock_reuseport.h>
+#include <net/addrconf.h>
#ifdef INET_CSK_DEBUG
const char inet_csk_timer_bug_msg[] = "inet_csk BUG: unknown timer value\n";
@@ -789,7 +790,6 @@
inet_sk(newsk)->inet_dport = inet_rsk(req)->ir_rmt_port;
inet_sk(newsk)->inet_num = inet_rsk(req)->ir_num;
inet_sk(newsk)->inet_sport = htons(inet_rsk(req)->ir_num);
- newsk->sk_write_space = sk_stream_write_space;
/* listeners have SOCK_RCU_FREE, not the children */
sock_reset_flag(newsk, SOCK_RCU_FREE);
diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c
index e90c80a..41394a4 100644
--- a/net/ipv4/ip_gre.c
+++ b/net/ipv4/ip_gre.c
@@ -592,7 +592,7 @@
struct iphdr *iph;
struct gre_base_hdr *greh;
- iph = (struct iphdr *)skb_push(skb, t->hlen + sizeof(*iph));
+ iph = skb_push(skb, t->hlen + sizeof(*iph));
greh = (struct gre_base_hdr *)(iph+1);
greh->flags = gre_tnl_flags_to_gre_flags(t->parms.o_flags);
greh->protocol = htons(type);
diff --git a/net/ipv4/ip_tunnel_core.c b/net/ipv4/ip_tunnel_core.c
index baf196e..90e1147 100644
--- a/net/ipv4/ip_tunnel_core.c
+++ b/net/ipv4/ip_tunnel_core.c
@@ -228,14 +228,16 @@
static int ip_tun_build_state(struct nlattr *attr,
unsigned int family, const void *cfg,
- struct lwtunnel_state **ts)
+ struct lwtunnel_state **ts,
+ struct netlink_ext_ack *extack)
{
struct ip_tunnel_info *tun_info;
struct lwtunnel_state *new_state;
struct nlattr *tb[LWTUNNEL_IP_MAX + 1];
int err;
- err = nla_parse_nested(tb, LWTUNNEL_IP_MAX, attr, ip_tun_policy, NULL);
+ err = nla_parse_nested(tb, LWTUNNEL_IP_MAX, attr, ip_tun_policy,
+ extack);
if (err < 0)
return err;
@@ -325,7 +327,8 @@
static int ip6_tun_build_state(struct nlattr *attr,
unsigned int family, const void *cfg,
- struct lwtunnel_state **ts)
+ struct lwtunnel_state **ts,
+ struct netlink_ext_ack *extack)
{
struct ip_tunnel_info *tun_info;
struct lwtunnel_state *new_state;
@@ -333,7 +336,7 @@
int err;
err = nla_parse_nested(tb, LWTUNNEL_IP6_MAX, attr, ip6_tun_policy,
- NULL);
+ extack);
if (err < 0)
return err;
diff --git a/net/ipv4/ipconfig.c b/net/ipv4/ipconfig.c
index c3b12b1..4c5dfe6 100644
--- a/net/ipv4/ipconfig.c
+++ b/net/ipv4/ipconfig.c
@@ -813,8 +813,7 @@
if (!skb)
return;
skb_reserve(skb, hlen);
- b = (struct bootp_pkt *) skb_put(skb, sizeof(struct bootp_pkt));
- memset(b, 0, sizeof(struct bootp_pkt));
+ b = skb_put_zero(skb, sizeof(struct bootp_pkt));
/* Construct IP header */
skb_reset_network_header(skb);
diff --git a/net/ipv4/ipmr.c b/net/ipv4/ipmr.c
index 8ae425c..a1d521b 100644
--- a/net/ipv4/ipmr.c
+++ b/net/ipv4/ipmr.c
@@ -109,6 +109,7 @@
struct mfc_cache *c, struct rtmsg *rtm);
static void mroute_netlink_event(struct mr_table *mrt, struct mfc_cache *mfc,
int cmd);
+static void igmpmsg_netlink_event(struct mr_table *mrt, struct sk_buff *pkt);
static void mroute_clean_tables(struct mr_table *mrt, bool all);
static void ipmr_expire_process(unsigned long arg);
@@ -669,7 +670,8 @@
while ((skb = skb_dequeue(&c->mfc_un.unres.unresolved))) {
if (ip_hdr(skb)->version == 0) {
- struct nlmsghdr *nlh = (struct nlmsghdr *)skb_pull(skb, sizeof(struct iphdr));
+ struct nlmsghdr *nlh = skb_pull(skb,
+ sizeof(struct iphdr));
nlh->nlmsg_type = NLMSG_ERROR;
nlh->nlmsg_len = nlmsg_msg_size(sizeof(struct nlmsgerr));
skb_trim(skb, nlh->nlmsg_len);
@@ -972,7 +974,8 @@
/* Play the pending entries through our router */
while ((skb = __skb_dequeue(&uc->mfc_un.unres.unresolved))) {
if (ip_hdr(skb)->version == 0) {
- struct nlmsghdr *nlh = (struct nlmsghdr *)skb_pull(skb, sizeof(struct iphdr));
+ struct nlmsghdr *nlh = skb_pull(skb,
+ sizeof(struct iphdr));
if (__ipmr_fill_mroute(mrt, skb, c, nlmsg_data(nlh)) > 0) {
nlh->nlmsg_len = skb_tail_pointer(skb) -
@@ -993,8 +996,7 @@
}
}
-/* Bounce a cache query up to mrouted. We could use netlink for this but mrouted
- * expects the following bizarre scheme.
+/* Bounce a cache query up to mrouted and netlink.
*
* Called under mrt_lock.
*/
@@ -1044,7 +1046,7 @@
msg->im_vif = vifi;
skb_dst_set(skb, dst_clone(skb_dst(pkt)));
/* Add our header */
- igmp = (struct igmphdr *)skb_put(skb, sizeof(struct igmphdr));
+ igmp = skb_put(skb, sizeof(struct igmphdr));
igmp->type = assert;
msg->im_msgtype = assert;
igmp->code = 0;
@@ -1060,6 +1062,8 @@
return -EINVAL;
}
+ igmpmsg_netlink_event(mrt, skb);
+
/* Deliver to mrouted */
ret = sock_queue_rcv_skb(mroute_sk, skb);
rcu_read_unlock();
@@ -2339,6 +2343,69 @@
rtnl_set_sk_err(net, RTNLGRP_IPV4_MROUTE, err);
}
+static size_t igmpmsg_netlink_msgsize(size_t payloadlen)
+{
+ size_t len =
+ NLMSG_ALIGN(sizeof(struct rtgenmsg))
+ + nla_total_size(1) /* IPMRA_CREPORT_MSGTYPE */
+ + nla_total_size(4) /* IPMRA_CREPORT_VIF_ID */
+ + nla_total_size(4) /* IPMRA_CREPORT_SRC_ADDR */
+ + nla_total_size(4) /* IPMRA_CREPORT_DST_ADDR */
+ /* IPMRA_CREPORT_PKT */
+ + nla_total_size(payloadlen)
+ ;
+
+ return len;
+}
+
+static void igmpmsg_netlink_event(struct mr_table *mrt, struct sk_buff *pkt)
+{
+ struct net *net = read_pnet(&mrt->net);
+ struct nlmsghdr *nlh;
+ struct rtgenmsg *rtgenm;
+ struct igmpmsg *msg;
+ struct sk_buff *skb;
+ struct nlattr *nla;
+ int payloadlen;
+
+ payloadlen = pkt->len - sizeof(struct igmpmsg);
+ msg = (struct igmpmsg *)skb_network_header(pkt);
+
+ skb = nlmsg_new(igmpmsg_netlink_msgsize(payloadlen), GFP_ATOMIC);
+ if (!skb)
+ goto errout;
+
+ nlh = nlmsg_put(skb, 0, 0, RTM_NEWCACHEREPORT,
+ sizeof(struct rtgenmsg), 0);
+ if (!nlh)
+ goto errout;
+ rtgenm = nlmsg_data(nlh);
+ rtgenm->rtgen_family = RTNL_FAMILY_IPMR;
+ if (nla_put_u8(skb, IPMRA_CREPORT_MSGTYPE, msg->im_msgtype) ||
+ nla_put_u32(skb, IPMRA_CREPORT_VIF_ID, msg->im_vif) ||
+ nla_put_in_addr(skb, IPMRA_CREPORT_SRC_ADDR,
+ msg->im_src.s_addr) ||
+ nla_put_in_addr(skb, IPMRA_CREPORT_DST_ADDR,
+ msg->im_dst.s_addr))
+ goto nla_put_failure;
+
+ nla = nla_reserve(skb, IPMRA_CREPORT_PKT, payloadlen);
+ if (!nla || skb_copy_bits(pkt, sizeof(struct igmpmsg),
+ nla_data(nla), payloadlen))
+ goto nla_put_failure;
+
+ nlmsg_end(skb, nlh);
+
+ rtnl_notify(skb, net, 0, RTNLGRP_IPV4_MROUTE_R, NULL, GFP_ATOMIC);
+ return;
+
+nla_put_failure:
+ nlmsg_cancel(skb, nlh);
+errout:
+ kfree_skb(skb);
+ rtnl_set_sk_err(net, RTNLGRP_IPV4_MROUTE_R, -ENOBUFS);
+}
+
static int ipmr_rtm_dumproute(struct sk_buff *skb, struct netlink_callback *cb)
{
struct net *net = sock_net(skb->sk);
@@ -2526,6 +2593,129 @@
return ipmr_mfc_delete(tbl, &mfcc, parent);
}
+static bool ipmr_fill_table(struct mr_table *mrt, struct sk_buff *skb)
+{
+ u32 queue_len = atomic_read(&mrt->cache_resolve_queue_len);
+
+ if (nla_put_u32(skb, IPMRA_TABLE_ID, mrt->id) ||
+ nla_put_u32(skb, IPMRA_TABLE_CACHE_RES_QUEUE_LEN, queue_len) ||
+ nla_put_s32(skb, IPMRA_TABLE_MROUTE_REG_VIF_NUM,
+ mrt->mroute_reg_vif_num) ||
+ nla_put_u8(skb, IPMRA_TABLE_MROUTE_DO_ASSERT,
+ mrt->mroute_do_assert) ||
+ nla_put_u8(skb, IPMRA_TABLE_MROUTE_DO_PIM, mrt->mroute_do_pim))
+ return false;
+
+ return true;
+}
+
+static bool ipmr_fill_vif(struct mr_table *mrt, u32 vifid, struct sk_buff *skb)
+{
+ struct nlattr *vif_nest;
+ struct vif_device *vif;
+
+ /* if the VIF doesn't exist just continue */
+ if (!VIF_EXISTS(mrt, vifid))
+ return true;
+
+ vif = &mrt->vif_table[vifid];
+ vif_nest = nla_nest_start(skb, IPMRA_VIF);
+ if (!vif_nest)
+ return false;
+ if (nla_put_u32(skb, IPMRA_VIFA_IFINDEX, vif->dev->ifindex) ||
+ nla_put_u32(skb, IPMRA_VIFA_VIF_ID, vifid) ||
+ nla_put_u16(skb, IPMRA_VIFA_FLAGS, vif->flags) ||
+ nla_put_u64_64bit(skb, IPMRA_VIFA_BYTES_IN, vif->bytes_in,
+ IPMRA_VIFA_PAD) ||
+ nla_put_u64_64bit(skb, IPMRA_VIFA_BYTES_OUT, vif->bytes_out,
+ IPMRA_VIFA_PAD) ||
+ nla_put_u64_64bit(skb, IPMRA_VIFA_PACKETS_IN, vif->pkt_in,
+ IPMRA_VIFA_PAD) ||
+ nla_put_u64_64bit(skb, IPMRA_VIFA_PACKETS_OUT, vif->pkt_out,
+ IPMRA_VIFA_PAD) ||
+ nla_put_be32(skb, IPMRA_VIFA_LOCAL_ADDR, vif->local) ||
+ nla_put_be32(skb, IPMRA_VIFA_REMOTE_ADDR, vif->remote)) {
+ nla_nest_cancel(skb, vif_nest);
+ return false;
+ }
+ nla_nest_end(skb, vif_nest);
+
+ return true;
+}
+
+static int ipmr_rtm_dumplink(struct sk_buff *skb, struct netlink_callback *cb)
+{
+ struct net *net = sock_net(skb->sk);
+ struct nlmsghdr *nlh = NULL;
+ unsigned int t = 0, s_t;
+ unsigned int e = 0, s_e;
+ struct mr_table *mrt;
+
+ s_t = cb->args[0];
+ s_e = cb->args[1];
+
+ ipmr_for_each_table(mrt, net) {
+ struct nlattr *vifs, *af;
+ struct ifinfomsg *hdr;
+ u32 i;
+
+ if (t < s_t)
+ goto skip_table;
+ nlh = nlmsg_put(skb, NETLINK_CB(cb->skb).portid,
+ cb->nlh->nlmsg_seq, RTM_NEWLINK,
+ sizeof(*hdr), NLM_F_MULTI);
+ if (!nlh)
+ break;
+
+ hdr = nlmsg_data(nlh);
+ memset(hdr, 0, sizeof(*hdr));
+ hdr->ifi_family = RTNL_FAMILY_IPMR;
+
+ af = nla_nest_start(skb, IFLA_AF_SPEC);
+ if (!af) {
+ nlmsg_cancel(skb, nlh);
+ goto out;
+ }
+
+ if (!ipmr_fill_table(mrt, skb)) {
+ nlmsg_cancel(skb, nlh);
+ goto out;
+ }
+
+ vifs = nla_nest_start(skb, IPMRA_TABLE_VIFS);
+ if (!vifs) {
+ nla_nest_end(skb, af);
+ nlmsg_end(skb, nlh);
+ goto out;
+ }
+ for (i = 0; i < mrt->maxvif; i++) {
+ if (e < s_e)
+ goto skip_entry;
+ if (!ipmr_fill_vif(mrt, i, skb)) {
+ nla_nest_end(skb, vifs);
+ nla_nest_end(skb, af);
+ nlmsg_end(skb, nlh);
+ goto out;
+ }
+skip_entry:
+ e++;
+ }
+ s_e = 0;
+ e = 0;
+ nla_nest_end(skb, vifs);
+ nla_nest_end(skb, af);
+ nlmsg_end(skb, nlh);
+skip_table:
+ t++;
+ }
+
+out:
+ cb->args[1] = e;
+ cb->args[0] = t;
+
+ return skb->len;
+}
+
#ifdef CONFIG_PROC_FS
/* The /proc interfaces to multicast routing :
* /proc/net/ip_mr_cache & /proc/net/ip_mr_vif
@@ -2868,6 +3058,9 @@
ipmr_rtm_route, NULL, NULL);
rtnl_register(RTNL_FAMILY_IPMR, RTM_DELROUTE,
ipmr_rtm_route, NULL, NULL);
+
+ rtnl_register(RTNL_FAMILY_IPMR, RTM_GETLINK,
+ NULL, ipmr_rtm_dumplink, NULL);
return 0;
#ifdef CONFIG_IP_PIMSM_V2
diff --git a/net/ipv4/netfilter/ipt_SYNPROXY.c b/net/ipv4/netfilter/ipt_SYNPROXY.c
index af2b69b..f1528f7 100644
--- a/net/ipv4/netfilter/ipt_SYNPROXY.c
+++ b/net/ipv4/netfilter/ipt_SYNPROXY.c
@@ -24,7 +24,7 @@
struct iphdr *iph;
skb_reset_network_header(skb);
- iph = (struct iphdr *)skb_put(skb, sizeof(*iph));
+ iph = skb_put(skb, sizeof(*iph));
iph->version = 4;
iph->ihl = sizeof(*iph) / 4;
iph->tos = 0;
@@ -91,7 +91,7 @@
niph = synproxy_build_ip(net, nskb, iph->daddr, iph->saddr);
skb_reset_transport_header(nskb);
- nth = (struct tcphdr *)skb_put(nskb, tcp_hdr_size);
+ nth = skb_put(nskb, tcp_hdr_size);
nth->source = th->dest;
nth->dest = th->source;
nth->seq = htonl(__cookie_v4_init_sequence(iph, th, &mss));
@@ -133,7 +133,7 @@
niph = synproxy_build_ip(net, nskb, iph->saddr, iph->daddr);
skb_reset_transport_header(nskb);
- nth = (struct tcphdr *)skb_put(nskb, tcp_hdr_size);
+ nth = skb_put(nskb, tcp_hdr_size);
nth->source = th->source;
nth->dest = th->dest;
nth->seq = htonl(recv_seq - 1);
@@ -178,7 +178,7 @@
niph = synproxy_build_ip(net, nskb, iph->daddr, iph->saddr);
skb_reset_transport_header(nskb);
- nth = (struct tcphdr *)skb_put(nskb, tcp_hdr_size);
+ nth = skb_put(nskb, tcp_hdr_size);
nth->source = th->dest;
nth->dest = th->source;
nth->seq = htonl(ntohl(th->ack_seq));
@@ -216,7 +216,7 @@
niph = synproxy_build_ip(net, nskb, iph->saddr, iph->daddr);
skb_reset_transport_header(nskb);
- nth = (struct tcphdr *)skb_put(nskb, tcp_hdr_size);
+ nth = skb_put(nskb, tcp_hdr_size);
nth->source = th->source;
nth->dest = th->dest;
nth->seq = htonl(ntohl(th->seq) + 1);
diff --git a/net/ipv4/netfilter/nf_reject_ipv4.c b/net/ipv4/netfilter/nf_reject_ipv4.c
index 7cd8d0d..eeacbda 100644
--- a/net/ipv4/netfilter/nf_reject_ipv4.c
+++ b/net/ipv4/netfilter/nf_reject_ipv4.c
@@ -51,7 +51,7 @@
struct iphdr *niph, *oiph = ip_hdr(oldskb);
skb_reset_network_header(nskb);
- niph = (struct iphdr *)skb_put(nskb, sizeof(struct iphdr));
+ niph = skb_put(nskb, sizeof(struct iphdr));
niph->version = 4;
niph->ihl = sizeof(struct iphdr) / 4;
niph->tos = 0;
@@ -76,8 +76,7 @@
struct tcphdr *tcph;
skb_reset_transport_header(nskb);
- tcph = (struct tcphdr *)skb_put(nskb, sizeof(struct tcphdr));
- memset(tcph, 0, sizeof(*tcph));
+ tcph = skb_put_zero(nskb, sizeof(struct tcphdr));
tcph->source = oth->dest;
tcph->dest = oth->source;
tcph->doff = sizeof(struct tcphdr) / 4;
@@ -172,7 +171,7 @@
struct iphdr *iph = ip_hdr(skb_in);
u8 proto;
- if (skb_in->csum_bad || iph->frag_off & htons(IP_OFFSET))
+ if (iph->frag_off & htons(IP_OFFSET))
return;
if (skb_csum_unnecessary(skb_in)) {
diff --git a/net/ipv4/proc.c b/net/ipv4/proc.c
index fa44e75..43eb6567 100644
--- a/net/ipv4/proc.c
+++ b/net/ipv4/proc.c
@@ -250,6 +250,7 @@
SNMP_MIB_ITEM("TCPAbortOnLinger", LINUX_MIB_TCPABORTONLINGER),
SNMP_MIB_ITEM("TCPAbortFailed", LINUX_MIB_TCPABORTFAILED),
SNMP_MIB_ITEM("TCPMemoryPressures", LINUX_MIB_TCPMEMORYPRESSURES),
+ SNMP_MIB_ITEM("TCPMemoryPressuresChrono", LINUX_MIB_TCPMEMORYPRESSURESCHRONO),
SNMP_MIB_ITEM("TCPSACKDiscard", LINUX_MIB_TCPSACKDISCARD),
SNMP_MIB_ITEM("TCPDSACKIgnoredOld", LINUX_MIB_TCPDSACKIGNOREDOLD),
SNMP_MIB_ITEM("TCPDSACKIgnoredNoUndo", LINUX_MIB_TCPDSACKIGNOREDNOUNDO),
diff --git a/net/ipv4/route.c b/net/ipv4/route.c
index 6883b3d..c816cd5 100644
--- a/net/ipv4/route.c
+++ b/net/ipv4/route.c
@@ -114,6 +114,8 @@
#include <net/ip_tunnels.h>
#include <net/l3mdev.h>
+#include "fib_lookup.h"
+
#define RT_FL_TOS(oldflp4) \
((oldflp4)->flowi4_tos & (IPTOS_RT_MASK | RTO_ONLINK))
@@ -587,11 +589,6 @@
build_sk_flow_key(fl4, sk);
}
-static inline void rt_free(struct rtable *rt)
-{
- call_rcu(&rt->dst.rcu_head, dst_rcu_free);
-}
-
static DEFINE_SPINLOCK(fnhe_lock);
static void fnhe_flush_routes(struct fib_nh_exception *fnhe)
@@ -601,12 +598,14 @@
rt = rcu_dereference(fnhe->fnhe_rth_input);
if (rt) {
RCU_INIT_POINTER(fnhe->fnhe_rth_input, NULL);
- rt_free(rt);
+ dst_dev_put(&rt->dst);
+ dst_release(&rt->dst);
}
rt = rcu_dereference(fnhe->fnhe_rth_output);
if (rt) {
RCU_INIT_POINTER(fnhe->fnhe_rth_output, NULL);
- rt_free(rt);
+ dst_dev_put(&rt->dst);
+ dst_release(&rt->dst);
}
}
@@ -1300,7 +1299,7 @@
}
static bool rt_bind_exception(struct rtable *rt, struct fib_nh_exception *fnhe,
- __be32 daddr)
+ __be32 daddr, const bool do_cache)
{
bool ret = false;
@@ -1329,10 +1328,13 @@
if (!rt->rt_gateway)
rt->rt_gateway = daddr;
- if (!(rt->dst.flags & DST_NOCACHE)) {
+ if (do_cache) {
+ dst_hold(&rt->dst);
rcu_assign_pointer(*porig, rt);
- if (orig)
- rt_free(orig);
+ if (orig) {
+ dst_dev_put(&orig->dst);
+ dst_release(&orig->dst);
+ }
ret = true;
}
@@ -1355,12 +1357,20 @@
}
orig = *p;
+ /* hold dst before doing cmpxchg() to avoid race condition
+ * on this dst
+ */
+ dst_hold(&rt->dst);
prev = cmpxchg(p, orig, rt);
if (prev == orig) {
- if (orig)
- rt_free(orig);
- } else
+ if (orig) {
+ dst_dev_put(&orig->dst);
+ dst_release(&orig->dst);
+ }
+ } else {
+ dst_release(&rt->dst);
ret = false;
+ }
return ret;
}
@@ -1431,7 +1441,8 @@
static void rt_set_nexthop(struct rtable *rt, __be32 daddr,
const struct fib_result *res,
struct fib_nh_exception *fnhe,
- struct fib_info *fi, u16 type, u32 itag)
+ struct fib_info *fi, u16 type, u32 itag,
+ const bool do_cache)
{
bool cached = false;
@@ -1452,8 +1463,8 @@
#endif
rt->dst.lwtstate = lwtstate_get(nh->nh_lwtstate);
if (unlikely(fnhe))
- cached = rt_bind_exception(rt, fnhe, daddr);
- else if (!(rt->dst.flags & DST_NOCACHE))
+ cached = rt_bind_exception(rt, fnhe, daddr, do_cache);
+ else if (do_cache)
cached = rt_cache_route(nh, rt);
if (unlikely(!cached)) {
/* Routes we intend to cache in nexthop exception or
@@ -1461,7 +1472,6 @@
* However, if we are unsuccessful at storing this
* route into the cache we really need to set it.
*/
- rt->dst.flags |= DST_NOCACHE;
if (!rt->rt_gateway)
rt->rt_gateway = daddr;
rt_add_uncached_list(rt);
@@ -1484,7 +1494,7 @@
struct rtable *rt;
rt = dst_alloc(&ipv4_dst_ops, dev, 1, DST_OBSOLETE_FORCE_CHK,
- (will_cache ? 0 : (DST_HOST | DST_NOCACHE)) |
+ (will_cache ? 0 : DST_HOST) |
(nopolicy ? DST_NOPOLICY : 0) |
(noxfrm ? DST_NOXFRM : 0));
@@ -1728,7 +1738,8 @@
rth->dst.input = ip_forward;
- rt_set_nexthop(rth, daddr, res, fnhe, res->fi, res->type, itag);
+ rt_set_nexthop(rth, daddr, res, fnhe, res->fi, res->type, itag,
+ do_cache);
set_lwt_redirect(rth);
skb_dst_set(skb, &rth->dst);
out:
@@ -1860,9 +1871,9 @@
*/
static int ip_route_input_slow(struct sk_buff *skb, __be32 daddr, __be32 saddr,
- u8 tos, struct net_device *dev)
+ u8 tos, struct net_device *dev,
+ struct fib_result *res)
{
- struct fib_result res;
struct in_device *in_dev = __in_dev_get_rcu(dev);
struct ip_tunnel_info *tun_info;
struct flowi4 fl4;
@@ -1892,8 +1903,8 @@
if (ipv4_is_multicast(saddr) || ipv4_is_lbcast(saddr))
goto martian_source;
- res.fi = NULL;
- res.table = NULL;
+ res->fi = NULL;
+ res->table = NULL;
if (ipv4_is_lbcast(daddr) || (saddr == 0 && daddr == 0))
goto brd_input;
@@ -1929,17 +1940,17 @@
fl4.daddr = daddr;
fl4.saddr = saddr;
fl4.flowi4_uid = sock_net_uid(net, NULL);
- err = fib_lookup(net, &fl4, &res, 0);
+ err = fib_lookup(net, &fl4, res, 0);
if (err != 0) {
if (!IN_DEV_FORWARD(in_dev))
err = -EHOSTUNREACH;
goto no_route;
}
- if (res.type == RTN_BROADCAST)
+ if (res->type == RTN_BROADCAST)
goto brd_input;
- if (res.type == RTN_LOCAL) {
+ if (res->type == RTN_LOCAL) {
err = fib_validate_source(skb, saddr, daddr, tos,
0, dev, in_dev, &itag);
if (err < 0)
@@ -1951,10 +1962,10 @@
err = -EHOSTUNREACH;
goto no_route;
}
- if (res.type != RTN_UNICAST)
+ if (res->type != RTN_UNICAST)
goto martian_destination;
- err = ip_mkroute_input(skb, &res, in_dev, daddr, saddr, tos);
+ err = ip_mkroute_input(skb, res, in_dev, daddr, saddr, tos);
out: return err;
brd_input:
@@ -1968,14 +1979,14 @@
goto martian_source;
}
flags |= RTCF_BROADCAST;
- res.type = RTN_BROADCAST;
+ res->type = RTN_BROADCAST;
RT_CACHE_STAT_INC(in_brd);
local_input:
do_cache = false;
- if (res.fi) {
+ if (res->fi) {
if (!itag) {
- rth = rcu_dereference(FIB_RES_NH(res).nh_rth_input);
+ rth = rcu_dereference(FIB_RES_NH(*res).nh_rth_input);
if (rt_cache_valid(rth)) {
skb_dst_set_noref(skb, &rth->dst);
err = 0;
@@ -1986,7 +1997,7 @@
}
rth = rt_dst_alloc(l3mdev_master_dev_rcu(dev) ? : net->loopback_dev,
- flags | RTCF_LOCAL, res.type,
+ flags | RTCF_LOCAL, res->type,
IN_DEV_CONF_GET(in_dev, NOPOLICY), false, do_cache);
if (!rth)
goto e_nobufs;
@@ -1996,18 +2007,18 @@
rth->dst.tclassid = itag;
#endif
rth->rt_is_input = 1;
- if (res.table)
- rth->rt_table_id = res.table->tb_id;
+ if (res->table)
+ rth->rt_table_id = res->table->tb_id;
RT_CACHE_STAT_INC(in_slow_tot);
- if (res.type == RTN_UNREACHABLE) {
+ if (res->type == RTN_UNREACHABLE) {
rth->dst.input= ip_error;
rth->dst.error= -err;
rth->rt_flags &= ~RTCF_LOCAL;
}
if (do_cache) {
- struct fib_nh *nh = &FIB_RES_NH(res);
+ struct fib_nh *nh = &FIB_RES_NH(*res);
rth->dst.lwtstate = lwtstate_get(nh->nh_lwtstate);
if (lwtunnel_input_redirect(rth->dst.lwtstate)) {
@@ -2016,10 +2027,8 @@
rth->dst.input = lwtunnel_input;
}
- if (unlikely(!rt_cache_route(nh, rth))) {
- rth->dst.flags |= DST_NOCACHE;
+ if (unlikely(!rt_cache_route(nh, rth)))
rt_add_uncached_list(rth);
- }
}
skb_dst_set(skb, &rth->dst);
err = 0;
@@ -2027,9 +2036,9 @@
no_route:
RT_CACHE_STAT_INC(in_no_route);
- res.type = RTN_UNREACHABLE;
- res.fi = NULL;
- res.table = NULL;
+ res->type = RTN_UNREACHABLE;
+ res->fi = NULL;
+ res->table = NULL;
goto local_input;
/*
@@ -2059,11 +2068,22 @@
int ip_route_input_noref(struct sk_buff *skb, __be32 daddr, __be32 saddr,
u8 tos, struct net_device *dev)
{
- int res;
+ struct fib_result res;
+ int err;
tos &= IPTOS_RT_MASK;
rcu_read_lock();
+ err = ip_route_input_rcu(skb, daddr, saddr, tos, dev, &res);
+ rcu_read_unlock();
+ return err;
+}
+EXPORT_SYMBOL(ip_route_input_noref);
+
+/* called with rcu_read_lock held */
+int ip_route_input_rcu(struct sk_buff *skb, __be32 daddr, __be32 saddr,
+ u8 tos, struct net_device *dev, struct fib_result *res)
+{
/* Multicast recognition logic is moved from route cache to here.
The problem was that too many Ethernet cards have broken/missing
hardware multicast filters :-( As result the host on multicasting
@@ -2078,6 +2098,7 @@
if (ipv4_is_multicast(daddr)) {
struct in_device *in_dev = __in_dev_get_rcu(dev);
int our = 0;
+ int err = -EINVAL;
if (in_dev)
our = ip_check_mc_rcu(in_dev, daddr, saddr,
@@ -2093,7 +2114,6 @@
ip_hdr(skb)->protocol);
}
- res = -EINVAL;
if (our
#ifdef CONFIG_IP_MROUTE
||
@@ -2101,17 +2121,14 @@
IN_DEV_MFORWARD(in_dev))
#endif
) {
- res = ip_route_input_mc(skb, daddr, saddr,
+ err = ip_route_input_mc(skb, daddr, saddr,
tos, dev, our);
}
- rcu_read_unlock();
- return res;
+ return err;
}
- res = ip_route_input_slow(skb, daddr, saddr, tos, dev);
- rcu_read_unlock();
- return res;
+
+ return ip_route_input_slow(skb, daddr, saddr, tos, dev, res);
}
-EXPORT_SYMBOL(ip_route_input_noref);
/* called with rcu_read_lock() */
static struct rtable *__mkroute_output(const struct fib_result *res,
@@ -2207,10 +2224,8 @@
rth = rcu_dereference(*prth);
rt_cache:
- if (rt_cache_valid(rth)) {
- dst_hold(&rth->dst);
+ if (rt_cache_valid(rth) && dst_hold_safe(&rth->dst))
return rth;
- }
}
add:
@@ -2244,7 +2259,7 @@
#endif
}
- rt_set_nexthop(rth, fl4->daddr, res, fnhe, fi, type, 0);
+ rt_set_nexthop(rth, fl4->daddr, res, fnhe, fi, type, 0, do_cache);
set_lwt_redirect(rth);
return rth;
@@ -2254,29 +2269,40 @@
* Major route resolver routine.
*/
-struct rtable *__ip_route_output_key_hash(struct net *net, struct flowi4 *fl4,
- const struct sk_buff *skb)
+struct rtable *ip_route_output_key_hash(struct net *net, struct flowi4 *fl4,
+ const struct sk_buff *skb)
{
- struct net_device *dev_out = NULL;
__u8 tos = RT_FL_TOS(fl4);
- unsigned int flags = 0;
struct fib_result res;
struct rtable *rth;
- int orig_oif;
- int err = -ENETUNREACH;
res.tclassid = 0;
res.fi = NULL;
res.table = NULL;
- orig_oif = fl4->flowi4_oif;
-
fl4->flowi4_iif = LOOPBACK_IFINDEX;
fl4->flowi4_tos = tos & IPTOS_RT_MASK;
fl4->flowi4_scope = ((tos & RTO_ONLINK) ?
RT_SCOPE_LINK : RT_SCOPE_UNIVERSE);
rcu_read_lock();
+ rth = ip_route_output_key_hash_rcu(net, fl4, &res, skb);
+ rcu_read_unlock();
+
+ return rth;
+}
+EXPORT_SYMBOL_GPL(ip_route_output_key_hash);
+
+struct rtable *ip_route_output_key_hash_rcu(struct net *net, struct flowi4 *fl4,
+ struct fib_result *res,
+ const struct sk_buff *skb)
+{
+ struct net_device *dev_out = NULL;
+ int orig_oif = fl4->flowi4_oif;
+ unsigned int flags = 0;
+ struct rtable *rth;
+ int err = -ENETUNREACH;
+
if (fl4->saddr) {
rth = ERR_PTR(-EINVAL);
if (ipv4_is_multicast(fl4->saddr) ||
@@ -2362,15 +2388,15 @@
fl4->daddr = fl4->saddr = htonl(INADDR_LOOPBACK);
dev_out = net->loopback_dev;
fl4->flowi4_oif = LOOPBACK_IFINDEX;
- res.type = RTN_LOCAL;
+ res->type = RTN_LOCAL;
flags |= RTCF_LOCAL;
goto make_route;
}
- err = fib_lookup(net, fl4, &res, 0);
+ err = fib_lookup(net, fl4, res, 0);
if (err) {
- res.fi = NULL;
- res.table = NULL;
+ res->fi = NULL;
+ res->table = NULL;
if (fl4->flowi4_oif &&
(ipv4_is_multicast(fl4->daddr) ||
!netif_index_is_l3_master(net, fl4->flowi4_oif))) {
@@ -2395,43 +2421,41 @@
if (fl4->saddr == 0)
fl4->saddr = inet_select_addr(dev_out, 0,
RT_SCOPE_LINK);
- res.type = RTN_UNICAST;
+ res->type = RTN_UNICAST;
goto make_route;
}
rth = ERR_PTR(err);
goto out;
}
- if (res.type == RTN_LOCAL) {
+ if (res->type == RTN_LOCAL) {
if (!fl4->saddr) {
- if (res.fi->fib_prefsrc)
- fl4->saddr = res.fi->fib_prefsrc;
+ if (res->fi->fib_prefsrc)
+ fl4->saddr = res->fi->fib_prefsrc;
else
fl4->saddr = fl4->daddr;
}
/* L3 master device is the loopback for that domain */
- dev_out = l3mdev_master_dev_rcu(FIB_RES_DEV(res)) ? :
+ dev_out = l3mdev_master_dev_rcu(FIB_RES_DEV(*res)) ? :
net->loopback_dev;
fl4->flowi4_oif = dev_out->ifindex;
flags |= RTCF_LOCAL;
goto make_route;
}
- fib_select_path(net, &res, fl4, skb);
+ fib_select_path(net, res, fl4, skb);
- dev_out = FIB_RES_DEV(res);
+ dev_out = FIB_RES_DEV(*res);
fl4->flowi4_oif = dev_out->ifindex;
make_route:
- rth = __mkroute_output(&res, fl4, orig_oif, dev_out, flags);
+ rth = __mkroute_output(res, fl4, orig_oif, dev_out, flags);
out:
- rcu_read_unlock();
return rth;
}
-EXPORT_SYMBOL_GPL(__ip_route_output_key_hash);
static struct dst_entry *ipv4_blackhole_dst_check(struct dst_entry *dst, u32 cookie)
{
@@ -2485,7 +2509,7 @@
new->input = dst_discard;
new->output = dst_discard_out;
- new->dev = ort->dst.dev;
+ new->dev = net->loopback_dev;
if (new->dev)
dev_hold(new->dev);
@@ -2500,7 +2524,6 @@
rt->rt_uses_gateway = ort->rt_uses_gateway;
INIT_LIST_HEAD(&rt->rt_uncached);
- dst_free(new);
}
dst_release(dst_orig);
@@ -2525,9 +2548,10 @@
}
EXPORT_SYMBOL_GPL(ip_route_output_flow);
+/* called with rcu_read_lock held */
static int rt_fill_info(struct net *net, __be32 dst, __be32 src, u32 table_id,
struct flowi4 *fl4, struct sk_buff *skb, u32 portid,
- u32 seq, int event)
+ u32 seq)
{
struct rtable *rt = skb_rtable(skb);
struct rtmsg *r;
@@ -2536,7 +2560,7 @@
u32 error;
u32 metrics[RTAX_MAX];
- nlh = nlmsg_put(skb, portid, seq, event, sizeof(*r), 0);
+ nlh = nlmsg_put(skb, portid, seq, RTM_NEWROUTE, sizeof(*r), 0);
if (!nlh)
return -EMSGSIZE;
@@ -2644,6 +2668,7 @@
struct net *net = sock_net(in_skb->sk);
struct rtmsg *rtm;
struct nlattr *tb[RTA_MAX+1];
+ struct fib_result res = {};
struct rtable *rt = NULL;
struct flowi4 fl4;
__be32 dst = 0;
@@ -2700,10 +2725,12 @@
fl4.flowi4_mark = mark;
fl4.flowi4_uid = uid;
+ rcu_read_lock();
+
if (iif) {
struct net_device *dev;
- dev = __dev_get_by_index(net, iif);
+ dev = dev_get_by_index_rcu(net, iif);
if (!dev) {
err = -ENODEV;
goto errout_free;
@@ -2712,14 +2739,14 @@
skb->protocol = htons(ETH_P_IP);
skb->dev = dev;
skb->mark = mark;
- err = ip_route_input(skb, dst, src, rtm->rtm_tos, dev);
+ err = ip_route_input_rcu(skb, dst, src, rtm->rtm_tos,
+ dev, &res);
rt = skb_rtable(skb);
if (err == 0 && rt->dst.error)
err = -rt->dst.error;
} else {
- rt = ip_route_output_key(net, &fl4);
-
+ rt = ip_route_output_key_hash_rcu(net, &fl4, &res, skb);
err = 0;
if (IS_ERR(rt))
err = PTR_ERR(rt);
@@ -2735,17 +2762,25 @@
if (rtm->rtm_flags & RTM_F_LOOKUP_TABLE)
table_id = rt->rt_table_id;
- err = rt_fill_info(net, dst, src, table_id, &fl4, skb,
- NETLINK_CB(in_skb).portid, nlh->nlmsg_seq,
- RTM_NEWROUTE);
+ if (rtm->rtm_flags & RTM_F_FIB_MATCH)
+ err = fib_dump_info(skb, NETLINK_CB(in_skb).portid,
+ nlh->nlmsg_seq, RTM_NEWROUTE, table_id,
+ rt->rt_type, res.prefix, res.prefixlen,
+ fl4.flowi4_tos, res.fi, 0);
+ else
+ err = rt_fill_info(net, dst, src, table_id, &fl4, skb,
+ NETLINK_CB(in_skb).portid, nlh->nlmsg_seq);
if (err < 0)
goto errout_free;
+ rcu_read_unlock();
+
err = rtnl_unicast(skb, net, NETLINK_CB(in_skb).portid);
errout:
return err;
errout_free:
+ rcu_read_unlock();
kfree_skb(skb);
goto errout;
}
diff --git a/net/ipv4/syncookies.c b/net/ipv4/syncookies.c
index 0257d96..7835bb4 100644
--- a/net/ipv4/syncookies.c
+++ b/net/ipv4/syncookies.c
@@ -66,10 +66,10 @@
* Since subsequent timestamps use the normal tcp_time_stamp value, we
* must make sure that the resulting initial timestamp is <= tcp_time_stamp.
*/
-__u32 cookie_init_timestamp(struct request_sock *req)
+u64 cookie_init_timestamp(struct request_sock *req)
{
struct inet_request_sock *ireq;
- u32 ts, ts_now = tcp_time_stamp;
+ u32 ts, ts_now = tcp_time_stamp_raw();
u32 options = 0;
ireq = inet_rsk(req);
@@ -88,7 +88,7 @@
ts <<= TSBITS;
ts |= options;
}
- return ts;
+ return (u64)ts * (USEC_PER_SEC / TCP_TS_HZ);
}
@@ -232,7 +232,8 @@
* return false if we decode a tcp option that is disabled
* on the host.
*/
-bool cookie_timestamp_decode(struct tcp_options_received *tcp_opt)
+bool cookie_timestamp_decode(const struct net *net,
+ struct tcp_options_received *tcp_opt)
{
/* echoed timestamp, lowest bits contain options */
u32 options = tcp_opt->rcv_tsecr;
@@ -242,12 +243,12 @@
return true;
}
- if (!sysctl_tcp_timestamps)
+ if (!net->ipv4.sysctl_tcp_timestamps)
return false;
tcp_opt->sack_ok = (options & TS_OPT_SACK) ? TCP_SACK_SEEN : 0;
- if (tcp_opt->sack_ok && !sysctl_tcp_sack)
+ if (tcp_opt->sack_ok && !net->ipv4.sysctl_tcp_sack)
return false;
if ((options & TS_OPT_WSCALE_MASK) == TS_OPT_WSCALE_MASK)
@@ -256,7 +257,7 @@
tcp_opt->wscale_ok = 1;
tcp_opt->snd_wscale = options & TS_OPT_WSCALE_MASK;
- return sysctl_tcp_window_scaling != 0;
+ return net->ipv4.sysctl_tcp_window_scaling != 0;
}
EXPORT_SYMBOL(cookie_timestamp_decode);
@@ -312,14 +313,16 @@
/* check for timestamp cookie support */
memset(&tcp_opt, 0, sizeof(tcp_opt));
- tcp_parse_options(skb, &tcp_opt, 0, NULL);
+ tcp_parse_options(sock_net(sk), skb, &tcp_opt, 0, NULL);
if (tcp_opt.saw_tstamp && tcp_opt.rcv_tsecr) {
- tsoff = secure_tcp_ts_off(ip_hdr(skb)->daddr, ip_hdr(skb)->saddr);
+ tsoff = secure_tcp_ts_off(sock_net(sk),
+ ip_hdr(skb)->daddr,
+ ip_hdr(skb)->saddr);
tcp_opt.rcv_tsecr -= tsoff;
}
- if (!cookie_timestamp_decode(&tcp_opt))
+ if (!cookie_timestamp_decode(sock_net(sk), &tcp_opt))
goto out;
ret = NULL;
@@ -343,7 +346,7 @@
ireq->wscale_ok = tcp_opt.wscale_ok;
ireq->tstamp_ok = tcp_opt.saw_tstamp;
req->ts_recent = tcp_opt.saw_tstamp ? tcp_opt.rcv_tsval : 0;
- treq->snt_synack.v64 = 0;
+ treq->snt_synack = 0;
treq->tfo_listener = false;
ireq->ir_iif = inet_request_bound_dev_if(sk, skb);
diff --git a/net/ipv4/sysctl_net_ipv4.c b/net/ipv4/sysctl_net_ipv4.c
index 86957e9..9bf8097 100644
--- a/net/ipv4/sysctl_net_ipv4.c
+++ b/net/ipv4/sysctl_net_ipv4.c
@@ -360,32 +360,30 @@
ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
if (write && ret == 0)
tcp_fastopen_active_timeout_reset();
+
+ return ret;
+}
+
+static int proc_tcp_available_ulp(struct ctl_table *ctl,
+ int write,
+ void __user *buffer, size_t *lenp,
+ loff_t *ppos)
+{
+ struct ctl_table tbl = { .maxlen = TCP_ULP_BUF_MAX, };
+ int ret;
+
+ tbl.data = kmalloc(tbl.maxlen, GFP_USER);
+ if (!tbl.data)
+ return -ENOMEM;
+ tcp_get_available_ulp(tbl.data, TCP_ULP_BUF_MAX);
+ ret = proc_dostring(&tbl, write, buffer, lenp, ppos);
+ kfree(tbl.data);
+
return ret;
}
static struct ctl_table ipv4_table[] = {
{
- .procname = "tcp_timestamps",
- .data = &sysctl_tcp_timestamps,
- .maxlen = sizeof(int),
- .mode = 0644,
- .proc_handler = proc_dointvec
- },
- {
- .procname = "tcp_window_scaling",
- .data = &sysctl_tcp_window_scaling,
- .maxlen = sizeof(int),
- .mode = 0644,
- .proc_handler = proc_dointvec
- },
- {
- .procname = "tcp_sack",
- .data = &sysctl_tcp_sack,
- .maxlen = sizeof(int),
- .mode = 0644,
- .proc_handler = proc_dointvec
- },
- {
.procname = "tcp_retrans_collapse",
.data = &sysctl_tcp_retrans_collapse,
.maxlen = sizeof(int),
@@ -707,6 +705,12 @@
.proc_handler = proc_dointvec_ms_jiffies,
},
{
+ .procname = "tcp_available_ulp",
+ .maxlen = TCP_ULP_BUF_MAX,
+ .mode = 0444,
+ .proc_handler = proc_tcp_available_ulp,
+ },
+ {
.procname = "icmp_msgs_per_sec",
.data = &sysctl_icmp_msgs_per_sec,
.maxlen = sizeof(int),
@@ -1116,6 +1120,27 @@
.extra2 = &one,
},
#endif
+ {
+ .procname = "tcp_sack",
+ .data = &init_net.ipv4.sysctl_tcp_sack,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec
+ },
+ {
+ .procname = "tcp_window_scaling",
+ .data = &init_net.ipv4.sysctl_tcp_window_scaling,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec
+ },
+ {
+ .procname = "tcp_timestamps",
+ .data = &init_net.ipv4.sysctl_tcp_timestamps,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec
+ },
{ }
};
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index b5ea036..058f509 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -320,17 +320,36 @@
* All the __sk_mem_schedule() is of this nature: accounting
* is strict, actions are advisory and have some latency.
*/
-int tcp_memory_pressure __read_mostly;
-EXPORT_SYMBOL(tcp_memory_pressure);
+unsigned long tcp_memory_pressure __read_mostly;
+EXPORT_SYMBOL_GPL(tcp_memory_pressure);
void tcp_enter_memory_pressure(struct sock *sk)
{
- if (!tcp_memory_pressure) {
+ unsigned long val;
+
+ if (tcp_memory_pressure)
+ return;
+ val = jiffies;
+
+ if (!val)
+ val--;
+ if (!cmpxchg(&tcp_memory_pressure, 0, val))
NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMEMORYPRESSURES);
- tcp_memory_pressure = 1;
- }
}
-EXPORT_SYMBOL(tcp_enter_memory_pressure);
+EXPORT_SYMBOL_GPL(tcp_enter_memory_pressure);
+
+void tcp_leave_memory_pressure(struct sock *sk)
+{
+ unsigned long val;
+
+ if (!tcp_memory_pressure)
+ return;
+ val = xchg(&tcp_memory_pressure, 0);
+ if (val)
+ NET_ADD_STATS(sock_net(sk), LINUX_MIB_TCPMEMORYPRESSURESCHRONO,
+ jiffies_to_msecs(jiffies - val));
+}
+EXPORT_SYMBOL_GPL(tcp_leave_memory_pressure);
/* Convert seconds to retransmits based on initial and max timeout */
static u8 secs_to_retrans(int seconds, int timeout, int rto_max)
@@ -386,7 +405,7 @@
icsk->icsk_rto = TCP_TIMEOUT_INIT;
tp->mdev_us = jiffies_to_usecs(TCP_TIMEOUT_INIT);
- minmax_reset(&tp->rtt_min, tcp_time_stamp, ~0U);
+ minmax_reset(&tp->rtt_min, tcp_jiffies32, ~0U);
/* So many TCP implementations out there (incorrectly) count the
* initial SYN frame in their delayed-ACK and congestion control
@@ -882,8 +901,8 @@
return mss_now;
}
-static ssize_t do_tcp_sendpages(struct sock *sk, struct page *page, int offset,
- size_t size, int flags)
+ssize_t do_tcp_sendpages(struct sock *sk, struct page *page, int offset,
+ size_t size, int flags)
{
struct tcp_sock *tp = tcp_sk(sk);
int mss_now, size_goal;
@@ -1013,6 +1032,7 @@
}
return sk_stream_error(sk, flags, err);
}
+EXPORT_SYMBOL_GPL(do_tcp_sendpages);
int tcp_sendpage(struct sock *sk, struct page *page, int offset,
size_t size, int flags)
@@ -2186,7 +2206,7 @@
/* Now socket is owned by kernel and we acquire BH lock
- to finish close. No need to check for user refs.
+ * to finish close. No need to check for user refs.
*/
local_bh_disable();
bh_lock_sock(sk);
@@ -2463,6 +2483,24 @@
release_sock(sk);
return err;
}
+ case TCP_ULP: {
+ char name[TCP_ULP_NAME_MAX];
+
+ if (optlen < 1)
+ return -EINVAL;
+
+ val = strncpy_from_user(name, optval,
+ min_t(long, TCP_ULP_NAME_MAX - 1,
+ optlen));
+ if (val < 0)
+ return -EFAULT;
+ name[val] = 0;
+
+ lock_sock(sk);
+ err = tcp_set_ulp(sk, name);
+ release_sock(sk);
+ return err;
+ }
default:
/* fallthru */
break;
@@ -2480,7 +2518,8 @@
case TCP_MAXSEG:
/* Values greater than interface MTU won't take effect. However
* at the point when this call is done we typically don't yet
- * know which interface is going to be used */
+ * know which interface is going to be used
+ */
if (val && (val < TCP_MIN_MSS || val > MAX_TCP_WINDOW)) {
err = -EINVAL;
break;
@@ -2675,8 +2714,9 @@
#ifdef CONFIG_TCP_MD5SIG
case TCP_MD5SIG:
+ case TCP_MD5SIG_EXT:
/* Read the IP->Key mappings from userspace */
- err = tp->af_specific->md5_parse(sk, optval, optlen);
+ err = tp->af_specific->md5_parse(sk, optname, optval, optlen);
break;
#endif
case TCP_USER_TIMEOUT:
@@ -2715,7 +2755,7 @@
if (!tp->repair)
err = -EPERM;
else
- tp->tsoffset = val - tcp_time_stamp;
+ tp->tsoffset = val - tcp_time_stamp_raw();
break;
case TCP_REPAIR_WINDOW:
err = tcp_repair_set_window(tp, optval, optlen);
@@ -2766,7 +2806,7 @@
for (i = TCP_CHRONO_BUSY; i < __TCP_CHRONO_MAX; ++i) {
stats[i] = tp->chrono_stat[i - 1];
if (i == tp->chrono_type)
- stats[i] += tcp_time_stamp - tp->chrono_start;
+ stats[i] += tcp_jiffies32 - tp->chrono_start;
stats[i] *= USEC_PER_SEC / HZ;
total += stats[i];
}
@@ -2850,7 +2890,7 @@
info->tcpi_retrans = tp->retrans_out;
info->tcpi_fackets = tp->fackets_out;
- now = tcp_time_stamp;
+ now = tcp_jiffies32;
info->tcpi_last_data_sent = jiffies_to_msecs(now - tp->lsndtime);
info->tcpi_last_data_recv = jiffies_to_msecs(now - icsk->icsk_ack.lrcvtime);
info->tcpi_last_ack_recv = jiffies_to_msecs(now - tp->rcv_tstamp);
@@ -3018,6 +3058,16 @@
return -EFAULT;
return 0;
+ case TCP_ULP:
+ if (get_user(len, optlen))
+ return -EFAULT;
+ len = min_t(unsigned int, len, TCP_ULP_NAME_MAX);
+ if (put_user(len, optlen))
+ return -EFAULT;
+ if (copy_to_user(optval, icsk->icsk_ulp_ops->name, len))
+ return -EFAULT;
+ return 0;
+
case TCP_THIN_LINEAR_TIMEOUTS:
val = tp->thin_lto;
break;
@@ -3081,7 +3131,7 @@
break;
case TCP_TIMESTAMP:
- val = tcp_time_stamp + tp->tsoffset;
+ val = tcp_time_stamp_raw() + tp->tsoffset;
break;
case TCP_NOTSENT_LOWAT:
val = tp->notsent_lowat;
diff --git a/net/ipv4/tcp_bbr.c b/net/ipv4/tcp_bbr.c
index b89bce4..dbcc935 100644
--- a/net/ipv4/tcp_bbr.c
+++ b/net/ipv4/tcp_bbr.c
@@ -52,10 +52,9 @@
* There is a public e-mail list for discussing BBR development and testing:
* https://groups.google.com/forum/#!forum/bbr-dev
*
- * NOTE: BBR *must* be used with the fq qdisc ("man tc-fq") with pacing enabled,
- * since pacing is integral to the BBR design and implementation.
- * BBR without pacing would not function properly, and may incur unnecessary
- * high packet loss rates.
+ * NOTE: BBR might be used with the fq qdisc ("man tc-fq") with pacing enabled,
+ * otherwise TCP stack falls back to an internal pacing using one high
+ * resolution timer per TCP socket and may use more resources.
*/
#include <linux/module.h>
#include <net/tcp.h>
@@ -92,7 +91,7 @@
struct minmax bw; /* Max recent delivery rate in pkts/uS << 24 */
u32 rtt_cnt; /* count of packet-timed rounds elapsed */
u32 next_rtt_delivered; /* scb->tx.delivered at end of round */
- struct skb_mstamp cycle_mstamp; /* time of this cycle phase start */
+ u64 cycle_mstamp; /* time of this cycle phase start */
u32 mode:3, /* current bbr_mode in state machine */
prev_ca_state:3, /* CA state on previous ACK */
packet_conservation:1, /* use packet conservation? */
@@ -412,7 +411,7 @@
struct tcp_sock *tp = tcp_sk(sk);
struct bbr *bbr = inet_csk_ca(sk);
bool is_full_length =
- skb_mstamp_us_delta(&tp->delivered_mstamp, &bbr->cycle_mstamp) >
+ tcp_stamp_us_delta(tp->delivered_mstamp, bbr->cycle_mstamp) >
bbr->min_rtt_us;
u32 inflight, bw;
@@ -498,7 +497,7 @@
struct tcp_sock *tp = tcp_sk(sk);
struct bbr *bbr = inet_csk_ca(sk);
- bbr->lt_last_stamp = tp->delivered_mstamp.stamp_jiffies;
+ bbr->lt_last_stamp = div_u64(tp->delivered_mstamp, USEC_PER_MSEC);
bbr->lt_last_delivered = tp->delivered;
bbr->lt_last_lost = tp->lost;
bbr->lt_rtt_cnt = 0;
@@ -552,7 +551,7 @@
struct bbr *bbr = inet_csk_ca(sk);
u32 lost, delivered;
u64 bw;
- s32 t;
+ u32 t;
if (bbr->lt_use_bw) { /* already using long-term rate, lt_bw? */
if (bbr->mode == BBR_PROBE_BW && bbr->round_start &&
@@ -604,15 +603,15 @@
return;
/* Find average delivery rate in this sampling interval. */
- t = (s32)(tp->delivered_mstamp.stamp_jiffies - bbr->lt_last_stamp);
- if (t < 1)
- return; /* interval is less than one jiffy, so wait */
- t = jiffies_to_usecs(t);
- /* Interval long enough for jiffies_to_usecs() to return a bogus 0? */
- if (t < 1) {
+ t = div_u64(tp->delivered_mstamp, USEC_PER_MSEC) - bbr->lt_last_stamp;
+ if ((s32)t < 1)
+ return; /* interval is less than one ms, so wait */
+ /* Check if can multiply without overflow */
+ if (t >= ~0U / USEC_PER_MSEC) {
bbr_reset_lt_bw_sampling(sk); /* interval too long; reset */
return;
}
+ t *= USEC_PER_MSEC;
bw = (u64)delivered * BW_UNIT;
do_div(bw, t);
bbr_lt_bw_interval_done(sk, bw);
@@ -731,12 +730,12 @@
bool filter_expired;
/* Track min RTT seen in the min_rtt_win_sec filter window: */
- filter_expired = after(tcp_time_stamp,
+ filter_expired = after(tcp_jiffies32,
bbr->min_rtt_stamp + bbr_min_rtt_win_sec * HZ);
if (rs->rtt_us >= 0 &&
(rs->rtt_us <= bbr->min_rtt_us || filter_expired)) {
bbr->min_rtt_us = rs->rtt_us;
- bbr->min_rtt_stamp = tcp_time_stamp;
+ bbr->min_rtt_stamp = tcp_jiffies32;
}
if (bbr_probe_rtt_mode_ms > 0 && filter_expired &&
@@ -755,7 +754,7 @@
/* Maintain min packets in flight for max(200 ms, 1 round). */
if (!bbr->probe_rtt_done_stamp &&
tcp_packets_in_flight(tp) <= bbr_cwnd_min_target) {
- bbr->probe_rtt_done_stamp = tcp_time_stamp +
+ bbr->probe_rtt_done_stamp = tcp_jiffies32 +
msecs_to_jiffies(bbr_probe_rtt_mode_ms);
bbr->probe_rtt_round_done = 0;
bbr->next_rtt_delivered = tp->delivered;
@@ -763,8 +762,8 @@
if (bbr->round_start)
bbr->probe_rtt_round_done = 1;
if (bbr->probe_rtt_round_done &&
- after(tcp_time_stamp, bbr->probe_rtt_done_stamp)) {
- bbr->min_rtt_stamp = tcp_time_stamp;
+ after(tcp_jiffies32, bbr->probe_rtt_done_stamp)) {
+ bbr->min_rtt_stamp = tcp_jiffies32;
bbr->restore_cwnd = 1; /* snap to prior_cwnd */
bbr_reset_mode(sk);
}
@@ -811,7 +810,7 @@
bbr->probe_rtt_done_stamp = 0;
bbr->probe_rtt_round_done = 0;
bbr->min_rtt_us = tcp_min_rtt(tp);
- bbr->min_rtt_stamp = tcp_time_stamp;
+ bbr->min_rtt_stamp = tcp_jiffies32;
minmax_reset(&bbr->bw, bbr->rtt_cnt, 0); /* init max bw to 0 */
@@ -826,10 +825,12 @@
bbr->idle_restart = 0;
bbr->full_bw = 0;
bbr->full_bw_cnt = 0;
- bbr->cycle_mstamp.v64 = 0;
+ bbr->cycle_mstamp = 0;
bbr->cycle_idx = 0;
bbr_reset_lt_bw_sampling(sk);
bbr_reset_startup_mode(sk);
+
+ cmpxchg(&sk->sk_pacing_status, SK_PACING_NONE, SK_PACING_NEEDED);
}
static u32 bbr_sndbuf_expand(struct sock *sk)
diff --git a/net/ipv4/tcp_bic.c b/net/ipv4/tcp_bic.c
index 36087bc..609965f 100644
--- a/net/ipv4/tcp_bic.c
+++ b/net/ipv4/tcp_bic.c
@@ -84,14 +84,14 @@
static inline void bictcp_update(struct bictcp *ca, u32 cwnd)
{
if (ca->last_cwnd == cwnd &&
- (s32)(tcp_time_stamp - ca->last_time) <= HZ / 32)
+ (s32)(tcp_jiffies32 - ca->last_time) <= HZ / 32)
return;
ca->last_cwnd = cwnd;
- ca->last_time = tcp_time_stamp;
+ ca->last_time = tcp_jiffies32;
if (ca->epoch_start == 0) /* record the beginning of an epoch */
- ca->epoch_start = tcp_time_stamp;
+ ca->epoch_start = tcp_jiffies32;
/* start off normal */
if (cwnd <= low_window) {
diff --git a/net/ipv4/tcp_cubic.c b/net/ipv4/tcp_cubic.c
index 0683ba4..57ae5b5 100644
--- a/net/ipv4/tcp_cubic.c
+++ b/net/ipv4/tcp_cubic.c
@@ -155,7 +155,7 @@
{
if (event == CA_EVENT_TX_START) {
struct bictcp *ca = inet_csk_ca(sk);
- u32 now = tcp_time_stamp;
+ u32 now = tcp_jiffies32;
s32 delta;
delta = now - tcp_sk(sk)->lsndtime;
@@ -231,21 +231,21 @@
ca->ack_cnt += acked; /* count the number of ACKed packets */
if (ca->last_cwnd == cwnd &&
- (s32)(tcp_time_stamp - ca->last_time) <= HZ / 32)
+ (s32)(tcp_jiffies32 - ca->last_time) <= HZ / 32)
return;
/* The CUBIC function can update ca->cnt at most once per jiffy.
* On all cwnd reduction events, ca->epoch_start is set to 0,
* which will force a recalculation of ca->cnt.
*/
- if (ca->epoch_start && tcp_time_stamp == ca->last_time)
+ if (ca->epoch_start && tcp_jiffies32 == ca->last_time)
goto tcp_friendliness;
ca->last_cwnd = cwnd;
- ca->last_time = tcp_time_stamp;
+ ca->last_time = tcp_jiffies32;
if (ca->epoch_start == 0) {
- ca->epoch_start = tcp_time_stamp; /* record beginning */
+ ca->epoch_start = tcp_jiffies32; /* record beginning */
ca->ack_cnt = acked; /* start counting */
ca->tcp_cwnd = cwnd; /* syn with cubic */
@@ -276,7 +276,7 @@
* if the cwnd < 1 million packets !!!
*/
- t = (s32)(tcp_time_stamp - ca->epoch_start);
+ t = (s32)(tcp_jiffies32 - ca->epoch_start);
t += msecs_to_jiffies(ca->delay_min >> 3);
/* change the unit from HZ to bictcp_HZ */
t <<= BICTCP_HZ;
@@ -448,7 +448,7 @@
return;
/* Discard delay samples right after fast recovery */
- if (ca->epoch_start && (s32)(tcp_time_stamp - ca->epoch_start) < HZ)
+ if (ca->epoch_start && (s32)(tcp_jiffies32 - ca->epoch_start) < HZ)
return;
delay = (sample->rtt_us << 3) / USEC_PER_MSEC;
diff --git a/net/ipv4/tcp_htcp.c b/net/ipv4/tcp_htcp.c
index 4a4d8e7..3eb78cd 100644
--- a/net/ipv4/tcp_htcp.c
+++ b/net/ipv4/tcp_htcp.c
@@ -104,7 +104,7 @@
const struct inet_connection_sock *icsk = inet_csk(sk);
const struct tcp_sock *tp = tcp_sk(sk);
struct htcp *ca = inet_csk_ca(sk);
- u32 now = tcp_time_stamp;
+ u32 now = tcp_jiffies32;
if (icsk->icsk_ca_state == TCP_CA_Open)
ca->pkts_acked = sample->pkts_acked;
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index 174d437..2ab7e2f 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -76,9 +76,6 @@
#include <asm/unaligned.h>
#include <linux/errqueue.h>
-int sysctl_tcp_timestamps __read_mostly = 1;
-int sysctl_tcp_window_scaling __read_mostly = 1;
-int sysctl_tcp_sack __read_mostly = 1;
int sysctl_tcp_fack __read_mostly;
int sysctl_tcp_max_reordering __read_mostly = 300;
int sysctl_tcp_dsack __read_mostly = 1;
@@ -112,6 +109,7 @@
#define FLAG_DSACKING_ACK 0x800 /* SACK blocks contained D-SACK info */
#define FLAG_SACK_RENEGING 0x2000 /* snd_una advanced to a sacked seq */
#define FLAG_UPDATE_TS_RECENT 0x4000 /* tcp_replace_ts_recent() */
+#define FLAG_NO_CHALLENGE_ACK 0x8000 /* do not call tcp_send_challenge_ack() */
#define FLAG_ACKED (FLAG_DATA_ACKED|FLAG_SYN_ACKED)
#define FLAG_NOT_DUP (FLAG_DATA|FLAG_WIN_UPDATE|FLAG_ACKED)
@@ -441,7 +439,7 @@
tcp_sndbuf_expand(sk);
tp->rcvq_space.space = tp->rcv_wnd;
- skb_mstamp_get(&tp->tcp_mstamp);
+ tcp_mstamp_refresh(tp);
tp->rcvq_space.time = tp->tcp_mstamp;
tp->rcvq_space.seq = tp->copied_seq;
@@ -463,7 +461,7 @@
tp->window_clamp = max(2 * tp->advmss, maxwin - tp->advmss);
tp->rcv_ssthresh = min(tp->rcv_ssthresh, tp->window_clamp);
- tp->snd_cwnd_stamp = tcp_time_stamp;
+ tp->snd_cwnd_stamp = tcp_jiffies32;
}
/* 5. Recalculate window clamp after socket hit its memory bounds. */
@@ -555,11 +553,11 @@
{
u32 delta_us;
- if (tp->rcv_rtt_est.time.v64 == 0)
+ if (tp->rcv_rtt_est.time == 0)
goto new_measure;
if (before(tp->rcv_nxt, tp->rcv_rtt_est.seq))
return;
- delta_us = skb_mstamp_us_delta(&tp->tcp_mstamp, &tp->rcv_rtt_est.time);
+ delta_us = tcp_stamp_us_delta(tp->tcp_mstamp, tp->rcv_rtt_est.time);
tcp_rcv_rtt_update(tp, delta_us, 1);
new_measure:
@@ -571,13 +569,15 @@
const struct sk_buff *skb)
{
struct tcp_sock *tp = tcp_sk(sk);
+
if (tp->rx_opt.rcv_tsecr &&
(TCP_SKB_CB(skb)->end_seq -
- TCP_SKB_CB(skb)->seq >= inet_csk(sk)->icsk_ack.rcv_mss))
- tcp_rcv_rtt_update(tp,
- jiffies_to_usecs(tcp_time_stamp -
- tp->rx_opt.rcv_tsecr),
- 0);
+ TCP_SKB_CB(skb)->seq >= inet_csk(sk)->icsk_ack.rcv_mss)) {
+ u32 delta = tcp_time_stamp(tp) - tp->rx_opt.rcv_tsecr;
+ u32 delta_us = delta * (USEC_PER_SEC / TCP_TS_HZ);
+
+ tcp_rcv_rtt_update(tp, delta_us, 0);
+ }
}
/*
@@ -590,7 +590,7 @@
int time;
int copied;
- time = skb_mstamp_us_delta(&tp->tcp_mstamp, &tp->rcvq_space.time);
+ time = tcp_stamp_us_delta(tp->tcp_mstamp, tp->rcvq_space.time);
if (time < (tp->rcv_rtt_est.rtt_us >> 3) || tp->rcv_rtt_est.rtt_us == 0)
return;
@@ -672,7 +672,7 @@
tcp_rcv_rtt_measure(tp);
- now = tcp_time_stamp;
+ now = tcp_jiffies32;
if (!icsk->icsk_ack.ato) {
/* The _first_ data packet received, initialize
@@ -885,6 +885,9 @@
struct tcp_sock *tp = tcp_sk(sk);
int mib_idx;
+ if (WARN_ON_ONCE(metric < 0))
+ return;
+
if (metric > tp->reordering) {
tp->reordering = min(sysctl_tcp_max_reordering, metric);
@@ -1134,8 +1137,8 @@
* that was SACKed. RTO needs the earliest RTT to stay conservative,
* but congestion control should still get an accurate delay signal.
*/
- struct skb_mstamp first_sackt;
- struct skb_mstamp last_sackt;
+ u64 first_sackt;
+ u64 last_sackt;
struct rate_sample *rate;
int flag;
};
@@ -1200,7 +1203,7 @@
struct tcp_sacktag_state *state, u8 sacked,
u32 start_seq, u32 end_seq,
int dup_sack, int pcount,
- const struct skb_mstamp *xmit_time)
+ u64 xmit_time)
{
struct tcp_sock *tp = tcp_sk(sk);
int fack_count = state->fack_count;
@@ -1242,9 +1245,9 @@
state->reord);
if (!after(end_seq, tp->high_seq))
state->flag |= FLAG_ORIG_SACK_ACKED;
- if (state->first_sackt.v64 == 0)
- state->first_sackt = *xmit_time;
- state->last_sackt = *xmit_time;
+ if (state->first_sackt == 0)
+ state->first_sackt = xmit_time;
+ state->last_sackt = xmit_time;
}
if (sacked & TCPCB_LOST) {
@@ -1304,7 +1307,7 @@
*/
tcp_sacktag_one(sk, state, TCP_SKB_CB(skb)->sacked,
start_seq, end_seq, dup_sack, pcount,
- &skb->skb_mstamp);
+ skb->skb_mstamp);
tcp_rate_skb_delivered(sk, skb, state->rate);
if (skb == tp->lost_skb_hint)
@@ -1356,8 +1359,8 @@
tcp_advance_highest_sack(sk, skb);
tcp_skb_collapse_tstamp(prev, skb);
- if (unlikely(TCP_SKB_CB(prev)->tx.delivered_mstamp.v64))
- TCP_SKB_CB(prev)->tx.delivered_mstamp.v64 = 0;
+ if (unlikely(TCP_SKB_CB(prev)->tx.delivered_mstamp))
+ TCP_SKB_CB(prev)->tx.delivered_mstamp = 0;
tcp_unlink_write_queue(skb, sk);
sk_wmem_free_skb(sk, skb);
@@ -1587,7 +1590,7 @@
TCP_SKB_CB(skb)->end_seq,
dup_sack,
tcp_skb_pcount(skb),
- &skb->skb_mstamp);
+ skb->skb_mstamp);
tcp_rate_skb_delivered(sk, skb, state->rate);
if (!before(TCP_SKB_CB(skb)->seq,
@@ -1954,7 +1957,7 @@
}
tp->snd_cwnd = 1;
tp->snd_cwnd_cnt = 0;
- tp->snd_cwnd_stamp = tcp_time_stamp;
+ tp->snd_cwnd_stamp = tcp_jiffies32;
tp->retrans_out = 0;
tp->lost_out = 0;
@@ -2383,7 +2386,7 @@
tcp_ecn_withdraw_cwr(tp);
}
}
- tp->snd_cwnd_stamp = tcp_time_stamp;
+ tp->snd_cwnd_stamp = tcp_jiffies32;
tp->undo_marker = 0;
}
@@ -2520,7 +2523,7 @@
if (inet_csk(sk)->icsk_ca_state == TCP_CA_CWR ||
(tp->undo_marker && tp->snd_ssthresh < TCP_INFINITE_SSTHRESH)) {
tp->snd_cwnd = tp->snd_ssthresh;
- tp->snd_cwnd_stamp = tcp_time_stamp;
+ tp->snd_cwnd_stamp = tcp_jiffies32;
}
tcp_ca_event(sk, CA_EVENT_COMPLETE_CWR);
}
@@ -2590,7 +2593,7 @@
tcp_mss_to_mtu(sk, tp->mss_cache) /
icsk->icsk_mtup.probe_size;
tp->snd_cwnd_cnt = 0;
- tp->snd_cwnd_stamp = tcp_time_stamp;
+ tp->snd_cwnd_stamp = tcp_jiffies32;
tp->snd_ssthresh = tcp_current_ssthresh(sk);
icsk->icsk_mtup.search_low = icsk->icsk_mtup.probe_size;
@@ -2911,13 +2914,13 @@
struct tcp_sock *tp = tcp_sk(sk);
u32 wlen = sysctl_tcp_min_rtt_wlen * HZ;
- minmax_running_min(&tp->rtt_min, wlen, tcp_time_stamp,
+ minmax_running_min(&tp->rtt_min, wlen, tcp_jiffies32,
rtt_us ? : jiffies_to_usecs(1));
}
-static inline bool tcp_ack_update_rtt(struct sock *sk, const int flag,
- long seq_rtt_us, long sack_rtt_us,
- long ca_rtt_us)
+static bool tcp_ack_update_rtt(struct sock *sk, const int flag,
+ long seq_rtt_us, long sack_rtt_us,
+ long ca_rtt_us, struct rate_sample *rs)
{
const struct tcp_sock *tp = tcp_sk(sk);
@@ -2936,9 +2939,13 @@
* See draft-ietf-tcplw-high-performance-00, section 3.3.
*/
if (seq_rtt_us < 0 && tp->rx_opt.saw_tstamp && tp->rx_opt.rcv_tsecr &&
- flag & FLAG_ACKED)
- seq_rtt_us = ca_rtt_us = jiffies_to_usecs(tcp_time_stamp -
- tp->rx_opt.rcv_tsecr);
+ flag & FLAG_ACKED) {
+ u32 delta = tcp_time_stamp(tp) - tp->rx_opt.rcv_tsecr;
+ u32 delta_us = delta * (USEC_PER_SEC / TCP_TS_HZ);
+
+ seq_rtt_us = ca_rtt_us = delta_us;
+ }
+ rs->rtt_us = ca_rtt_us; /* RTT of last (S)ACKed packet (or -1) */
if (seq_rtt_us < 0)
return false;
@@ -2958,16 +2965,13 @@
/* Compute time elapsed between (last) SYNACK and the ACK completing 3WHS. */
void tcp_synack_rtt_meas(struct sock *sk, struct request_sock *req)
{
+ struct rate_sample rs;
long rtt_us = -1L;
- if (req && !req->num_retrans && tcp_rsk(req)->snt_synack.v64) {
- struct skb_mstamp now;
+ if (req && !req->num_retrans && tcp_rsk(req)->snt_synack)
+ rtt_us = tcp_stamp_us_delta(tcp_clock_us(), tcp_rsk(req)->snt_synack);
- skb_mstamp_get(&now);
- rtt_us = skb_mstamp_us_delta(&now, &tcp_rsk(req)->snt_synack);
- }
-
- tcp_ack_update_rtt(sk, FLAG_SYN_ACKED, rtt_us, -1L, rtt_us);
+ tcp_ack_update_rtt(sk, FLAG_SYN_ACKED, rtt_us, -1L, rtt_us, &rs);
}
@@ -2976,7 +2980,7 @@
const struct inet_connection_sock *icsk = inet_csk(sk);
icsk->icsk_ca_ops->cong_avoid(sk, ack, acked);
- tcp_sk(sk)->snd_cwnd_stamp = tcp_time_stamp;
+ tcp_sk(sk)->snd_cwnd_stamp = tcp_jiffies32;
}
/* Restart timer after forward progress on connection.
@@ -3001,14 +3005,14 @@
if (icsk->icsk_pending == ICSK_TIME_REO_TIMEOUT ||
icsk->icsk_pending == ICSK_TIME_LOSS_PROBE) {
struct sk_buff *skb = tcp_write_queue_head(sk);
- const u32 rto_time_stamp =
- tcp_skb_timestamp(skb) + rto;
- s32 delta = (s32)(rto_time_stamp - tcp_time_stamp);
- /* delta may not be positive if the socket is locked
+ u64 rto_time_stamp = skb->skb_mstamp +
+ jiffies_to_usecs(rto);
+ s64 delta_us = rto_time_stamp - tp->tcp_mstamp;
+ /* delta_us may not be positive if the socket is locked
* when the retrans timer fires and is rescheduled.
*/
- if (delta > 0)
- rto = delta;
+ if (delta_us > 0)
+ rto = usecs_to_jiffies(delta_us);
}
inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS, rto,
TCP_RTO_MAX);
@@ -3060,9 +3064,8 @@
struct tcp_sacktag_state *sack)
{
const struct inet_connection_sock *icsk = inet_csk(sk);
- struct skb_mstamp first_ackt, last_ackt;
+ u64 first_ackt, last_ackt;
struct tcp_sock *tp = tcp_sk(sk);
- struct skb_mstamp *now = &tp->tcp_mstamp;
u32 prior_sacked = tp->sacked_out;
u32 reord = tp->packets_out;
bool fully_acked = true;
@@ -3075,7 +3078,7 @@
bool rtt_update;
int flag = 0;
- first_ackt.v64 = 0;
+ first_ackt = 0;
while ((skb = tcp_write_queue_head(sk)) && skb != tcp_send_head(sk)) {
struct tcp_skb_cb *scb = TCP_SKB_CB(skb);
@@ -3106,8 +3109,8 @@
flag |= FLAG_RETRANS_DATA_ACKED;
} else if (!(sacked & TCPCB_SACKED_ACKED)) {
last_ackt = skb->skb_mstamp;
- WARN_ON_ONCE(last_ackt.v64 == 0);
- if (!first_ackt.v64)
+ WARN_ON_ONCE(last_ackt == 0);
+ if (!first_ackt)
first_ackt = last_ackt;
last_in_flight = TCP_SKB_CB(skb)->tx.in_flight;
@@ -3122,7 +3125,7 @@
tp->delivered += acked_pcount;
if (!tcp_skb_spurious_retrans(tp, skb))
tcp_rack_advance(tp, sacked, scb->end_seq,
- &skb->skb_mstamp);
+ skb->skb_mstamp);
}
if (sacked & TCPCB_LOST)
tp->lost_out -= acked_pcount;
@@ -3165,17 +3168,16 @@
if (skb && (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED))
flag |= FLAG_SACK_RENEGING;
- if (likely(first_ackt.v64) && !(flag & FLAG_RETRANS_DATA_ACKED)) {
- seq_rtt_us = skb_mstamp_us_delta(now, &first_ackt);
- ca_rtt_us = skb_mstamp_us_delta(now, &last_ackt);
+ if (likely(first_ackt) && !(flag & FLAG_RETRANS_DATA_ACKED)) {
+ seq_rtt_us = tcp_stamp_us_delta(tp->tcp_mstamp, first_ackt);
+ ca_rtt_us = tcp_stamp_us_delta(tp->tcp_mstamp, last_ackt);
}
- if (sack->first_sackt.v64) {
- sack_rtt_us = skb_mstamp_us_delta(now, &sack->first_sackt);
- ca_rtt_us = skb_mstamp_us_delta(now, &sack->last_sackt);
+ if (sack->first_sackt) {
+ sack_rtt_us = tcp_stamp_us_delta(tp->tcp_mstamp, sack->first_sackt);
+ ca_rtt_us = tcp_stamp_us_delta(tp->tcp_mstamp, sack->last_sackt);
}
- sack->rate->rtt_us = ca_rtt_us; /* RTT of last (S)ACKed packet, or -1 */
rtt_update = tcp_ack_update_rtt(sk, flag, seq_rtt_us, sack_rtt_us,
- ca_rtt_us);
+ ca_rtt_us, sack->rate);
if (flag & FLAG_ACKED) {
tcp_rearm_rto(sk);
@@ -3201,7 +3203,7 @@
tp->fackets_out -= min(pkts_acked, tp->fackets_out);
} else if (skb && rtt_update && sack_rtt_us >= 0 &&
- sack_rtt_us > skb_mstamp_us_delta(now, &skb->skb_mstamp)) {
+ sack_rtt_us > tcp_stamp_us_delta(tp->tcp_mstamp, skb->skb_mstamp)) {
/* Do not re-arm RTO if the sack RTT is measured from data sent
* after when the head was last (re)transmitted. Otherwise the
* timeout may continue to extend in loss recovery.
@@ -3211,7 +3213,7 @@
if (icsk->icsk_ca_ops->pkts_acked) {
struct ack_sample sample = { .pkts_acked = pkts_acked,
- .rtt_us = ca_rtt_us,
+ .rtt_us = sack->rate->rtt_us,
.in_flight = last_in_flight };
icsk->icsk_ca_ops->pkts_acked(sk, &sample);
@@ -3390,7 +3392,7 @@
u32 *last_oow_ack_time)
{
if (*last_oow_ack_time) {
- s32 elapsed = (s32)(tcp_time_stamp - *last_oow_ack_time);
+ s32 elapsed = (s32)(tcp_jiffies32 - *last_oow_ack_time);
if (0 <= elapsed && elapsed < sysctl_tcp_invalid_ratelimit) {
NET_INC_STATS(net, mib_idx);
@@ -3398,7 +3400,7 @@
}
}
- *last_oow_ack_time = tcp_time_stamp;
+ *last_oow_ack_time = tcp_jiffies32;
return false; /* not rate-limited: go ahead, send dupack now! */
}
@@ -3553,7 +3555,7 @@
int acked = 0; /* Number of packets newly acked */
int rexmit = REXMIT_NONE; /* Flag to (re)transmit to recover losses */
- sack_state.first_sackt.v64 = 0;
+ sack_state.first_sackt = 0;
sack_state.rate = &rs;
/* We very likely will need to access write queue head. */
@@ -3565,7 +3567,8 @@
if (before(ack, prior_snd_una)) {
/* RFC 5961 5.2 [Blind Data Injection Attack].[Mitigation] */
if (before(ack, prior_snd_una - tp->max_window)) {
- tcp_send_challenge_ack(sk, skb);
+ if (!(flag & FLAG_NO_CHALLENGE_ACK))
+ tcp_send_challenge_ack(sk, skb);
return -1;
}
goto old_ack;
@@ -3636,7 +3639,7 @@
*/
sk->sk_err_soft = 0;
icsk->icsk_probes_out = 0;
- tp->rcv_tstamp = tcp_time_stamp;
+ tp->rcv_tstamp = tcp_jiffies32;
if (!prior_packets)
goto no_queue;
@@ -3718,7 +3721,8 @@
* But, this can also be called on packets in the established flow when
* the fast version below fails.
*/
-void tcp_parse_options(const struct sk_buff *skb,
+void tcp_parse_options(const struct net *net,
+ const struct sk_buff *skb,
struct tcp_options_received *opt_rx, int estab,
struct tcp_fastopen_cookie *foc)
{
@@ -3759,7 +3763,7 @@
break;
case TCPOPT_WINDOW:
if (opsize == TCPOLEN_WINDOW && th->syn &&
- !estab && sysctl_tcp_window_scaling) {
+ !estab && net->ipv4.sysctl_tcp_window_scaling) {
__u8 snd_wscale = *(__u8 *)ptr;
opt_rx->wscale_ok = 1;
if (snd_wscale > TCP_MAX_WSCALE) {
@@ -3775,7 +3779,7 @@
case TCPOPT_TIMESTAMP:
if ((opsize == TCPOLEN_TIMESTAMP) &&
((estab && opt_rx->tstamp_ok) ||
- (!estab && sysctl_tcp_timestamps))) {
+ (!estab && net->ipv4.sysctl_tcp_timestamps))) {
opt_rx->saw_tstamp = 1;
opt_rx->rcv_tsval = get_unaligned_be32(ptr);
opt_rx->rcv_tsecr = get_unaligned_be32(ptr + 4);
@@ -3783,7 +3787,7 @@
break;
case TCPOPT_SACK_PERM:
if (opsize == TCPOLEN_SACK_PERM && th->syn &&
- !estab && sysctl_tcp_sack) {
+ !estab && net->ipv4.sysctl_tcp_sack) {
opt_rx->sack_ok = TCP_SACK_SEEN;
tcp_sack_reset(opt_rx);
}
@@ -3852,7 +3856,8 @@
/* Fast parse options. This hopes to only see timestamps.
* If it is wrong it falls back on tcp_parse_options().
*/
-static bool tcp_fast_parse_options(const struct sk_buff *skb,
+static bool tcp_fast_parse_options(const struct net *net,
+ const struct sk_buff *skb,
const struct tcphdr *th, struct tcp_sock *tp)
{
/* In the spirit of fast parsing, compare doff directly to constant
@@ -3867,7 +3872,7 @@
return true;
}
- tcp_parse_options(skb, &tp->rx_opt, 1, NULL);
+ tcp_parse_options(net, skb, &tp->rx_opt, 1, NULL);
if (tp->rx_opt.saw_tstamp && tp->rx_opt.rcv_tsecr)
tp->rx_opt.rcv_tsecr -= tp->tsoffset;
@@ -5019,7 +5024,7 @@
if (tcp_should_expand_sndbuf(sk)) {
tcp_sndbuf_expand(sk);
- tp->snd_cwnd_stamp = tcp_time_stamp;
+ tp->snd_cwnd_stamp = tcp_jiffies32;
}
sk->sk_write_space(sk);
@@ -5228,7 +5233,8 @@
bool rst_seq_match = false;
/* RFC1323: H1. Apply PAWS check first. */
- if (tcp_fast_parse_options(skb, th, tp) && tp->rx_opt.saw_tstamp &&
+ if (tcp_fast_parse_options(sock_net(sk), skb, th, tp) &&
+ tp->rx_opt.saw_tstamp &&
tcp_paws_discard(sk, skb)) {
if (!th->rst) {
NET_INC_STATS(sock_net(sk), LINUX_MIB_PAWSESTABREJECTED);
@@ -5356,7 +5362,7 @@
{
struct tcp_sock *tp = tcp_sk(sk);
- skb_mstamp_get(&tp->tcp_mstamp);
+ tcp_mstamp_refresh(tp);
if (unlikely(!sk->sk_rx_dst))
inet_csk(sk)->icsk_af_ops->sk_rx_dst_set(sk, skb);
/*
@@ -5554,7 +5560,7 @@
struct inet_connection_sock *icsk = inet_csk(sk);
tcp_set_state(sk, TCP_ESTABLISHED);
- icsk->icsk_ack.lrcvtime = tcp_time_stamp;
+ icsk->icsk_ack.lrcvtime = tcp_jiffies32;
if (skb) {
icsk->icsk_af_ops->sk_rx_dst_set(sk, skb);
@@ -5571,7 +5577,7 @@
/* Prevent spurious tcp_cwnd_restart() on first data
* packet.
*/
- tp->lsndtime = tcp_time_stamp;
+ tp->lsndtime = tcp_jiffies32;
tcp_init_buffer_space(sk);
@@ -5599,7 +5605,7 @@
/* Get original SYNACK MSS value if user MSS sets mss_clamp */
tcp_clear_options(&opt);
opt.user_mss = opt.mss_clamp = 0;
- tcp_parse_options(synack, &opt, 0, NULL);
+ tcp_parse_options(sock_net(sk), synack, &opt, 0, NULL);
mss = opt.mss_clamp;
}
@@ -5653,7 +5659,7 @@
int saved_clamp = tp->rx_opt.mss_clamp;
bool fastopen_fail;
- tcp_parse_options(skb, &tp->rx_opt, 0, &foc);
+ tcp_parse_options(sock_net(sk), skb, &tp->rx_opt, 0, &foc);
if (tp->rx_opt.saw_tstamp && tp->rx_opt.rcv_tsecr)
tp->rx_opt.rcv_tsecr -= tp->tsoffset;
@@ -5672,7 +5678,7 @@
if (tp->rx_opt.saw_tstamp && tp->rx_opt.rcv_tsecr &&
!between(tp->rx_opt.rcv_tsecr, tp->retrans_stamp,
- tcp_time_stamp)) {
+ tcp_time_stamp(tp))) {
NET_INC_STATS(sock_net(sk),
LINUX_MIB_PAWSACTIVEREJECTED);
goto reset_and_undo;
@@ -5917,7 +5923,7 @@
case TCP_SYN_SENT:
tp->rx_opt.saw_tstamp = 0;
- skb_mstamp_get(&tp->tcp_mstamp);
+ tcp_mstamp_refresh(tp);
queued = tcp_rcv_synsent_state_process(sk, skb, th);
if (queued >= 0)
return queued;
@@ -5929,7 +5935,7 @@
return 0;
}
- skb_mstamp_get(&tp->tcp_mstamp);
+ tcp_mstamp_refresh(tp);
tp->rx_opt.saw_tstamp = 0;
req = tp->fastopen_rsk;
if (req) {
@@ -5948,13 +5954,17 @@
/* step 5: check the ACK field */
acceptable = tcp_ack(sk, skb, FLAG_SLOWPATH |
- FLAG_UPDATE_TS_RECENT) > 0;
+ FLAG_UPDATE_TS_RECENT |
+ FLAG_NO_CHALLENGE_ACK) > 0;
+ if (!acceptable) {
+ if (sk->sk_state == TCP_SYN_RECV)
+ return 1; /* send one RST */
+ tcp_send_challenge_ack(sk, skb);
+ goto discard;
+ }
switch (sk->sk_state) {
case TCP_SYN_RECV:
- if (!acceptable)
- return 1;
-
if (!tp->srtt_us)
tcp_synack_rtt_meas(sk, req);
@@ -6008,7 +6018,7 @@
tcp_update_pacing_rate(sk);
/* Prevent spurious tcp_cwnd_restart() on first data packet */
- tp->lsndtime = tcp_time_stamp;
+ tp->lsndtime = tcp_jiffies32;
tcp_initialize_rcv_mss(sk);
tcp_fast_path_on(tp);
@@ -6023,14 +6033,6 @@
* our SYNACK so stop the SYNACK timer.
*/
if (req) {
- /* Return RST if ack_seq is invalid.
- * Note that RFC793 only says to generate a
- * DUPACK for it but for TCP Fast Open it seems
- * better to treat this case like TCP_SYN_RECV
- * above.
- */
- if (!acceptable)
- return 1;
/* We no longer need the request sock. */
reqsk_fastopen_remove(sk, req, false);
tcp_rearm_rto(sk);
@@ -6202,7 +6204,7 @@
req->cookie_ts = 0;
tcp_rsk(req)->rcv_isn = TCP_SKB_CB(skb)->seq;
tcp_rsk(req)->rcv_nxt = TCP_SKB_CB(skb)->seq + 1;
- skb_mstamp_get(&tcp_rsk(req)->snt_synack);
+ tcp_rsk(req)->snt_synack = tcp_clock_us();
tcp_rsk(req)->last_oow_ack_time = 0;
req->mss = rx_opt->mss_clamp;
req->ts_recent = rx_opt->saw_tstamp ? rx_opt->rcv_tsval : 0;
@@ -6330,7 +6332,8 @@
tcp_clear_options(&tmp_opt);
tmp_opt.mss_clamp = af_ops->mss_clamp;
tmp_opt.user_mss = tp->rx_opt.user_mss;
- tcp_parse_options(skb, &tmp_opt, 0, want_cookie ? NULL : &foc);
+ tcp_parse_options(sock_net(sk), skb, &tmp_opt, 0,
+ want_cookie ? NULL : &foc);
if (want_cookie && !tmp_opt.saw_tstamp)
tcp_clear_options(&tmp_opt);
@@ -6348,7 +6351,7 @@
goto drop_and_free;
if (tmp_opt.tstamp_ok)
- tcp_rsk(req)->ts_off = af_ops->init_ts_off(skb);
+ tcp_rsk(req)->ts_off = af_ops->init_ts_off(net, skb);
if (!want_cookie && !isn) {
/* Kill the following clause, if you dislike this way. */
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index 5ab2aac..e20bcf0 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -80,6 +80,7 @@
#include <linux/stddef.h>
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
+#include <linux/inetdevice.h>
#include <crypto/hash.h>
#include <linux/scatterlist.h>
@@ -102,10 +103,9 @@
tcp_hdr(skb)->source);
}
-static u32 tcp_v4_init_ts_off(const struct sk_buff *skb)
+static u32 tcp_v4_init_ts_off(const struct net *net, const struct sk_buff *skb)
{
- return secure_tcp_ts_off(ip_hdr(skb)->daddr,
- ip_hdr(skb)->saddr);
+ return secure_tcp_ts_off(net, ip_hdr(skb)->daddr, ip_hdr(skb)->saddr);
}
int tcp_twsk_unique(struct sock *sk, struct sock *sktw, void *twp)
@@ -242,7 +242,8 @@
inet->inet_daddr,
inet->inet_sport,
usin->sin_port);
- tp->tsoffset = secure_tcp_ts_off(inet->inet_saddr,
+ tp->tsoffset = secure_tcp_ts_off(sock_net(sk),
+ inet->inet_saddr,
inet->inet_daddr);
}
@@ -376,8 +377,9 @@
struct sock *sk;
struct sk_buff *skb;
struct request_sock *fastopen;
- __u32 seq, snd_una;
- __u32 remaining;
+ u32 seq, snd_una;
+ s32 remaining;
+ u32 delta_us;
int err;
struct net *net = dev_net(icmp_skb->dev);
@@ -483,11 +485,12 @@
skb = tcp_write_queue_head(sk);
BUG_ON(!skb);
+ tcp_mstamp_refresh(tp);
+ delta_us = (u32)(tp->tcp_mstamp - skb->skb_mstamp);
remaining = icsk->icsk_rto -
- min(icsk->icsk_rto,
- tcp_time_stamp - tcp_skb_timestamp(skb));
+ usecs_to_jiffies(delta_us);
- if (remaining) {
+ if (remaining > 0) {
inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
remaining, TCP_RTO_MAX);
} else {
@@ -811,7 +814,7 @@
tcp_v4_send_ack(sk, skb,
tcptw->tw_snd_nxt, tcptw->tw_rcv_nxt,
tcptw->tw_rcv_wnd >> tw->tw_rcv_wscale,
- tcp_time_stamp + tcptw->tw_ts_offset,
+ tcp_time_stamp_raw() + tcptw->tw_ts_offset,
tcptw->tw_ts_recent,
tw->tw_bound_dev_if,
tcp_twsk_md5_key(tcptw),
@@ -839,7 +842,7 @@
tcp_v4_send_ack(sk, skb, seq,
tcp_rsk(req)->rcv_nxt,
req->rsk_rcv_wnd >> inet_rsk(req)->rcv_wscale,
- tcp_time_stamp + tcp_rsk(req)->ts_off,
+ tcp_time_stamp_raw() + tcp_rsk(req)->ts_off,
req->ts_recent,
0,
tcp_md5_do_lookup(sk, (union tcp_md5_addr *)&ip_hdr(skb)->daddr,
@@ -904,6 +907,48 @@
{
const struct tcp_sock *tp = tcp_sk(sk);
struct tcp_md5sig_key *key;
+ const struct tcp_md5sig_info *md5sig;
+ __be32 mask;
+ struct tcp_md5sig_key *best_match = NULL;
+ bool match;
+
+ /* caller either holds rcu_read_lock() or socket lock */
+ md5sig = rcu_dereference_check(tp->md5sig_info,
+ lockdep_sock_is_held(sk));
+ if (!md5sig)
+ return NULL;
+
+ hlist_for_each_entry_rcu(key, &md5sig->head, node) {
+ if (key->family != family)
+ continue;
+
+ if (family == AF_INET) {
+ mask = inet_make_mask(key->prefixlen);
+ match = (key->addr.a4.s_addr & mask) ==
+ (addr->a4.s_addr & mask);
+#if IS_ENABLED(CONFIG_IPV6)
+ } else if (family == AF_INET6) {
+ match = ipv6_prefix_equal(&key->addr.a6, &addr->a6,
+ key->prefixlen);
+#endif
+ } else {
+ match = false;
+ }
+
+ if (match && (!best_match ||
+ key->prefixlen > best_match->prefixlen))
+ best_match = key;
+ }
+ return best_match;
+}
+EXPORT_SYMBOL(tcp_md5_do_lookup);
+
+struct tcp_md5sig_key *tcp_md5_do_lookup_exact(const struct sock *sk,
+ const union tcp_md5_addr *addr,
+ int family, u8 prefixlen)
+{
+ const struct tcp_sock *tp = tcp_sk(sk);
+ struct tcp_md5sig_key *key;
unsigned int size = sizeof(struct in_addr);
const struct tcp_md5sig_info *md5sig;
@@ -919,12 +964,12 @@
hlist_for_each_entry_rcu(key, &md5sig->head, node) {
if (key->family != family)
continue;
- if (!memcmp(&key->addr, addr, size))
+ if (!memcmp(&key->addr, addr, size) &&
+ key->prefixlen == prefixlen)
return key;
}
return NULL;
}
-EXPORT_SYMBOL(tcp_md5_do_lookup);
struct tcp_md5sig_key *tcp_v4_md5_lookup(const struct sock *sk,
const struct sock *addr_sk)
@@ -938,14 +983,15 @@
/* This can be called on a newly created socket, from other files */
int tcp_md5_do_add(struct sock *sk, const union tcp_md5_addr *addr,
- int family, const u8 *newkey, u8 newkeylen, gfp_t gfp)
+ int family, u8 prefixlen, const u8 *newkey, u8 newkeylen,
+ gfp_t gfp)
{
/* Add Key to the list */
struct tcp_md5sig_key *key;
struct tcp_sock *tp = tcp_sk(sk);
struct tcp_md5sig_info *md5sig;
- key = tcp_md5_do_lookup(sk, addr, family);
+ key = tcp_md5_do_lookup_exact(sk, addr, family, prefixlen);
if (key) {
/* Pre-existing entry - just update that one. */
memcpy(key->key, newkey, newkeylen);
@@ -976,6 +1022,7 @@
memcpy(key->key, newkey, newkeylen);
key->keylen = newkeylen;
key->family = family;
+ key->prefixlen = prefixlen;
memcpy(&key->addr, addr,
(family == AF_INET6) ? sizeof(struct in6_addr) :
sizeof(struct in_addr));
@@ -984,11 +1031,12 @@
}
EXPORT_SYMBOL(tcp_md5_do_add);
-int tcp_md5_do_del(struct sock *sk, const union tcp_md5_addr *addr, int family)
+int tcp_md5_do_del(struct sock *sk, const union tcp_md5_addr *addr, int family,
+ u8 prefixlen)
{
struct tcp_md5sig_key *key;
- key = tcp_md5_do_lookup(sk, addr, family);
+ key = tcp_md5_do_lookup_exact(sk, addr, family, prefixlen);
if (!key)
return -ENOENT;
hlist_del_rcu(&key->node);
@@ -1014,11 +1062,12 @@
}
}
-static int tcp_v4_parse_md5_keys(struct sock *sk, char __user *optval,
- int optlen)
+static int tcp_v4_parse_md5_keys(struct sock *sk, int optname,
+ char __user *optval, int optlen)
{
struct tcp_md5sig cmd;
struct sockaddr_in *sin = (struct sockaddr_in *)&cmd.tcpm_addr;
+ u8 prefixlen = 32;
if (optlen < sizeof(cmd))
return -EINVAL;
@@ -1029,15 +1078,22 @@
if (sin->sin_family != AF_INET)
return -EINVAL;
+ if (optname == TCP_MD5SIG_EXT &&
+ cmd.tcpm_flags & TCP_MD5SIG_FLAG_PREFIX) {
+ prefixlen = cmd.tcpm_prefixlen;
+ if (prefixlen > 32)
+ return -EINVAL;
+ }
+
if (!cmd.tcpm_keylen)
return tcp_md5_do_del(sk, (union tcp_md5_addr *)&sin->sin_addr.s_addr,
- AF_INET);
+ AF_INET, prefixlen);
if (cmd.tcpm_keylen > TCP_MD5SIG_MAXKEYLEN)
return -EINVAL;
return tcp_md5_do_add(sk, (union tcp_md5_addr *)&sin->sin_addr.s_addr,
- AF_INET, cmd.tcpm_key, cmd.tcpm_keylen,
+ AF_INET, prefixlen, cmd.tcpm_key, cmd.tcpm_keylen,
GFP_KERNEL);
}
@@ -1340,7 +1396,7 @@
* across. Shucks.
*/
tcp_md5_do_add(newsk, (union tcp_md5_addr *)&newinet->inet_daddr,
- AF_INET, key->key, key->keylen, GFP_ATOMIC);
+ AF_INET, 32, key->key, key->keylen, GFP_ATOMIC);
sk_nocaps_add(newsk, NETIF_F_GSO_MASK);
}
#endif
@@ -1858,6 +1914,8 @@
tcp_cleanup_congestion_control(sk);
+ tcp_cleanup_ulp(sk);
+
/* Cleanup up the write buffer. */
tcp_write_queue_purge(sk);
@@ -2385,6 +2443,7 @@
.unhash = inet_unhash,
.get_port = inet_csk_get_port,
.enter_memory_pressure = tcp_enter_memory_pressure,
+ .leave_memory_pressure = tcp_leave_memory_pressure,
.stream_memory_free = tcp_stream_memory_free,
.sockets_allocated = &tcp_sockets_allocated,
.orphan_count = &tcp_orphan_count,
@@ -2463,6 +2522,9 @@
net->ipv4.tcp_death_row.hashinfo = &tcp_hashinfo;
net->ipv4.sysctl_max_syn_backlog = max(128, cnt / 256);
+ net->ipv4.sysctl_tcp_sack = 1;
+ net->ipv4.sysctl_tcp_window_scaling = 1;
+ net->ipv4.sysctl_tcp_timestamps = 1;
return 0;
fail:
diff --git a/net/ipv4/tcp_lp.c b/net/ipv4/tcp_lp.c
index d6fb6c0..ae10ed6 100644
--- a/net/ipv4/tcp_lp.c
+++ b/net/ipv4/tcp_lp.c
@@ -37,7 +37,7 @@
#include <net/tcp.h>
/* resolution of owd */
-#define LP_RESOL 1000
+#define LP_RESOL TCP_TS_HZ
/**
* enum tcp_lp_state
@@ -147,9 +147,9 @@
tp->rx_opt.rcv_tsecr == lp->local_ref_time)
goto out;
- m = HZ * (tp->rx_opt.rcv_tsval -
- lp->remote_ref_time) / (tp->rx_opt.rcv_tsecr -
- lp->local_ref_time);
+ m = TCP_TS_HZ *
+ (tp->rx_opt.rcv_tsval - lp->remote_ref_time) /
+ (tp->rx_opt.rcv_tsecr - lp->local_ref_time);
if (m < 0)
m = -m;
@@ -194,7 +194,7 @@
if (lp->flag & LP_VALID_RHZ) {
owd =
tp->rx_opt.rcv_tsval * (LP_RESOL / lp->remote_hz) -
- tp->rx_opt.rcv_tsecr * (LP_RESOL / HZ);
+ tp->rx_opt.rcv_tsecr * (LP_RESOL / TCP_TS_HZ);
if (owd < 0)
owd = -owd;
}
@@ -264,18 +264,19 @@
{
struct tcp_sock *tp = tcp_sk(sk);
struct lp *lp = inet_csk_ca(sk);
+ u32 now = tcp_time_stamp(tp);
u32 delta;
if (sample->rtt_us > 0)
tcp_lp_rtt_sample(sk, sample->rtt_us);
/* calc inference */
- delta = tcp_time_stamp - tp->rx_opt.rcv_tsecr;
+ delta = now - tp->rx_opt.rcv_tsecr;
if ((s32)delta > 0)
lp->inference = 3 * delta;
/* test if within inference */
- if (lp->last_drop && (tcp_time_stamp - lp->last_drop < lp->inference))
+ if (lp->last_drop && (now - lp->last_drop < lp->inference))
lp->flag |= LP_WITHIN_INF;
else
lp->flag &= ~LP_WITHIN_INF;
@@ -312,7 +313,7 @@
tp->snd_cwnd = max(tp->snd_cwnd >> 1U, 1U);
/* record this drop time */
- lp->last_drop = tcp_time_stamp;
+ lp->last_drop = now;
}
static struct tcp_congestion_ops tcp_lp __read_mostly = {
diff --git a/net/ipv4/tcp_metrics.c b/net/ipv4/tcp_metrics.c
index 653bbd6..102b2c9 100644
--- a/net/ipv4/tcp_metrics.c
+++ b/net/ipv4/tcp_metrics.c
@@ -524,7 +524,7 @@
tp->snd_cwnd = 1;
else
tp->snd_cwnd = tcp_init_cwnd(tp, dst);
- tp->snd_cwnd_stamp = tcp_time_stamp;
+ tp->snd_cwnd_stamp = tcp_jiffies32;
}
bool tcp_peer_is_proven(struct request_sock *req, struct dst_entry *dst)
diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c
index 717be4d..d30ee31 100644
--- a/net/ipv4/tcp_minisocks.c
+++ b/net/ipv4/tcp_minisocks.c
@@ -98,7 +98,7 @@
tmp_opt.saw_tstamp = 0;
if (th->doff > (sizeof(*th) >> 2) && tcptw->tw_ts_recent_stamp) {
- tcp_parse_options(skb, &tmp_opt, 0, NULL);
+ tcp_parse_options(twsk_net(tw), skb, &tmp_opt, 0, NULL);
if (tmp_opt.saw_tstamp) {
if (tmp_opt.rcv_tsecr)
@@ -445,9 +445,9 @@
newtp->srtt_us = 0;
newtp->mdev_us = jiffies_to_usecs(TCP_TIMEOUT_INIT);
- minmax_reset(&newtp->rtt_min, tcp_time_stamp, ~0U);
+ minmax_reset(&newtp->rtt_min, tcp_jiffies32, ~0U);
newicsk->icsk_rto = TCP_TIMEOUT_INIT;
- newicsk->icsk_ack.lrcvtime = tcp_time_stamp;
+ newicsk->icsk_ack.lrcvtime = tcp_jiffies32;
newtp->packets_out = 0;
newtp->retrans_out = 0;
@@ -455,7 +455,7 @@
newtp->fackets_out = 0;
newtp->snd_ssthresh = TCP_INFINITE_SSTHRESH;
newtp->tlp_high_seq = 0;
- newtp->lsndtime = treq->snt_synack.stamp_jiffies;
+ newtp->lsndtime = tcp_jiffies32;
newsk->sk_txhash = treq->txhash;
newtp->last_oow_ack_time = 0;
newtp->total_retrans = req->num_retrans;
@@ -526,7 +526,7 @@
newtp->fastopen_req = NULL;
newtp->fastopen_rsk = NULL;
newtp->syn_data_acked = 0;
- newtp->rack.mstamp.v64 = 0;
+ newtp->rack.mstamp = 0;
newtp->rack.advanced = 0;
__TCP_INC_STATS(sock_net(sk), TCP_MIB_PASSIVEOPENS);
@@ -559,7 +559,7 @@
tmp_opt.saw_tstamp = 0;
if (th->doff > (sizeof(struct tcphdr)>>2)) {
- tcp_parse_options(skb, &tmp_opt, 0, NULL);
+ tcp_parse_options(sock_net(sk), skb, &tmp_opt, 0, NULL);
if (tmp_opt.saw_tstamp) {
tmp_opt.ts_recent = req->ts_recent;
diff --git a/net/ipv4/tcp_nv.c b/net/ipv4/tcp_nv.c
index 5de82a8..6d650ed 100644
--- a/net/ipv4/tcp_nv.c
+++ b/net/ipv4/tcp_nv.c
@@ -424,8 +424,8 @@
}
/* Extract info for Tcp socket info provided via netlink */
-size_t tcpnv_get_info(struct sock *sk, u32 ext, int *attr,
- union tcp_cc_info *info)
+static size_t tcpnv_get_info(struct sock *sk, u32 ext, int *attr,
+ union tcp_cc_info *info)
{
const struct tcpnv *ca = inet_csk_ca(sk);
@@ -440,7 +440,6 @@
}
return 0;
}
-EXPORT_SYMBOL_GPL(tcpnv_get_info);
static struct tcp_congestion_ops tcpnv __read_mostly = {
.init = tcpnv_init,
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index 4858e19..9a9c395 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -151,7 +151,7 @@
while ((delta -= inet_csk(sk)->icsk_rto) > 0 && cwnd > restart_cwnd)
cwnd >>= 1;
tp->snd_cwnd = max(cwnd, restart_cwnd);
- tp->snd_cwnd_stamp = tcp_time_stamp;
+ tp->snd_cwnd_stamp = tcp_jiffies32;
tp->snd_cwnd_used = 0;
}
@@ -160,7 +160,7 @@
struct sock *sk)
{
struct inet_connection_sock *icsk = inet_csk(sk);
- const u32 now = tcp_time_stamp;
+ const u32 now = tcp_jiffies32;
if (tcp_packets_in_flight(tp) == 0)
tcp_ca_event(sk, CA_EVENT_TX_START);
@@ -569,18 +569,18 @@
opts->mss = tcp_advertise_mss(sk);
remaining -= TCPOLEN_MSS_ALIGNED;
- if (likely(sysctl_tcp_timestamps && !*md5)) {
+ if (likely(sock_net(sk)->ipv4.sysctl_tcp_timestamps && !*md5)) {
opts->options |= OPTION_TS;
opts->tsval = tcp_skb_timestamp(skb) + tp->tsoffset;
opts->tsecr = tp->rx_opt.ts_recent;
remaining -= TCPOLEN_TSTAMP_ALIGNED;
}
- if (likely(sysctl_tcp_window_scaling)) {
+ if (likely(sock_net(sk)->ipv4.sysctl_tcp_window_scaling)) {
opts->ws = tp->rx_opt.rcv_wscale;
opts->options |= OPTION_WSCALE;
remaining -= TCPOLEN_WSCALE_ALIGNED;
}
- if (likely(sysctl_tcp_sack)) {
+ if (likely(sock_net(sk)->ipv4.sysctl_tcp_sack)) {
opts->options |= OPTION_SACK_ADVERTISE;
if (unlikely(!(OPTION_TS & opts->options)))
remaining -= TCPOLEN_SACKPERM_ALIGNED;
@@ -904,6 +904,72 @@
sk_free(sk);
}
+/* Note: Called under hard irq.
+ * We can not call TCP stack right away.
+ */
+enum hrtimer_restart tcp_pace_kick(struct hrtimer *timer)
+{
+ struct tcp_sock *tp = container_of(timer, struct tcp_sock, pacing_timer);
+ struct sock *sk = (struct sock *)tp;
+ unsigned long nval, oval;
+
+ for (oval = READ_ONCE(sk->sk_tsq_flags);; oval = nval) {
+ struct tsq_tasklet *tsq;
+ bool empty;
+
+ if (oval & TSQF_QUEUED)
+ break;
+
+ nval = (oval & ~TSQF_THROTTLED) | TSQF_QUEUED | TCPF_TSQ_DEFERRED;
+ nval = cmpxchg(&sk->sk_tsq_flags, oval, nval);
+ if (nval != oval)
+ continue;
+
+ if (!atomic_inc_not_zero(&sk->sk_wmem_alloc))
+ break;
+ /* queue this socket to tasklet queue */
+ tsq = this_cpu_ptr(&tsq_tasklet);
+ empty = list_empty(&tsq->head);
+ list_add(&tp->tsq_node, &tsq->head);
+ if (empty)
+ tasklet_schedule(&tsq->tasklet);
+ break;
+ }
+ return HRTIMER_NORESTART;
+}
+
+/* BBR congestion control needs pacing.
+ * Same remark for SO_MAX_PACING_RATE.
+ * sch_fq packet scheduler is efficiently handling pacing,
+ * but is not always installed/used.
+ * Return true if TCP stack should pace packets itself.
+ */
+static bool tcp_needs_internal_pacing(const struct sock *sk)
+{
+ return smp_load_acquire(&sk->sk_pacing_status) == SK_PACING_NEEDED;
+}
+
+static void tcp_internal_pacing(struct sock *sk, const struct sk_buff *skb)
+{
+ u64 len_ns;
+ u32 rate;
+
+ if (!tcp_needs_internal_pacing(sk))
+ return;
+ rate = sk->sk_pacing_rate;
+ if (!rate || rate == ~0U)
+ return;
+
+ /* Should account for header sizes as sch_fq does,
+ * but lets make things simple.
+ */
+ len_ns = (u64)skb->len * NSEC_PER_SEC;
+ do_div(len_ns, rate);
+ hrtimer_start(&tcp_sk(sk)->pacing_timer,
+ ktime_add_ns(ktime_get(), len_ns),
+ HRTIMER_MODE_ABS_PINNED);
+}
+
/* This routine actually transmits TCP packets queued in by
* tcp_do_sendmsg(). This is used by both the initial
* transmission and possible later retransmissions.
@@ -931,8 +997,8 @@
BUG_ON(!skb || !tcp_skb_pcount(skb));
tp = tcp_sk(sk);
+ skb->skb_mstamp = tp->tcp_mstamp;
if (clone_it) {
- skb_mstamp_get(&skb->skb_mstamp);
TCP_SKB_CB(skb)->tx.in_flight = TCP_SKB_CB(skb)->end_seq
- tp->snd_una;
tcp_rate_skb_sent(sk, skb);
@@ -1034,6 +1100,7 @@
if (skb->len != tcp_header_size) {
tcp_event_data_sent(tp, sk);
tp->data_segs_out += tcp_skb_pcount(skb);
+ tcp_internal_pacing(sk, skb);
}
if (after(tcb->end_seq, tp->snd_nxt) || tcb->seq == tcb->end_seq)
@@ -1261,9 +1328,8 @@
return 0;
}
-/* This is similar to __pskb_pull_head() (it will go to core/skbuff.c
- * eventually). The difference is that pulled data not copied, but
- * immediately discarded.
+/* This is similar to __pskb_pull_tail(). The difference is that pulled
+ * data is not copied, but immediately discarded.
*/
static int __pskb_trim_head(struct sk_buff *skb, int len)
{
@@ -1298,7 +1364,6 @@
}
shinfo->nr_frags = k;
- skb_reset_tail_pointer(skb);
skb->data_len -= len;
skb->len = skb->data_len;
return len;
@@ -1408,7 +1473,7 @@
icsk->icsk_mtup.search_low = tcp_mss_to_mtu(sk, net->ipv4.sysctl_tcp_base_mss);
icsk->icsk_mtup.probe_size = 0;
if (icsk->icsk_mtup.enabled)
- icsk->icsk_mtup.probe_timestamp = tcp_time_stamp;
+ icsk->icsk_mtup.probe_timestamp = tcp_jiffies32;
}
EXPORT_SYMBOL(tcp_mtup_init);
@@ -1509,7 +1574,7 @@
}
tp->snd_cwnd_used = 0;
}
- tp->snd_cwnd_stamp = tcp_time_stamp;
+ tp->snd_cwnd_stamp = tcp_jiffies32;
}
static void tcp_cwnd_validate(struct sock *sk, bool is_cwnd_limited)
@@ -1530,14 +1595,14 @@
if (tcp_is_cwnd_limited(sk)) {
/* Network is feed fully. */
tp->snd_cwnd_used = 0;
- tp->snd_cwnd_stamp = tcp_time_stamp;
+ tp->snd_cwnd_stamp = tcp_jiffies32;
} else {
/* Network starves. */
if (tp->packets_out > tp->snd_cwnd_used)
tp->snd_cwnd_used = tp->packets_out;
if (sysctl_tcp_slow_start_after_idle &&
- (s32)(tcp_time_stamp - tp->snd_cwnd_stamp) >= inet_csk(sk)->icsk_rto &&
+ (s32)(tcp_jiffies32 - tp->snd_cwnd_stamp) >= inet_csk(sk)->icsk_rto &&
!ca_ops->cong_control)
tcp_cwnd_application_limited(sk);
@@ -1839,7 +1904,6 @@
const struct inet_connection_sock *icsk = inet_csk(sk);
u32 age, send_win, cong_win, limit, in_flight;
struct tcp_sock *tp = tcp_sk(sk);
- struct skb_mstamp now;
struct sk_buff *head;
int win_divisor;
@@ -1852,7 +1916,7 @@
/* Avoid bursty behavior by allowing defer
* only if the last write was recent.
*/
- if ((s32)(tcp_time_stamp - tp->lsndtime) > 0)
+ if ((s32)(tcp_jiffies32 - tp->lsndtime) > 0)
goto send_now;
in_flight = tcp_packets_in_flight(tp);
@@ -1895,8 +1959,8 @@
}
head = tcp_write_queue_head(sk);
- skb_mstamp_get(&now);
- age = skb_mstamp_us_delta(&now, &head->skb_mstamp);
+
+ age = tcp_stamp_us_delta(tp->tcp_mstamp, head->skb_mstamp);
/* If next ACK is likely to come too late (half srtt), do not defer */
if (age < (tp->srtt_us >> 4))
goto send_now;
@@ -1921,7 +1985,7 @@
s32 delta;
interval = net->ipv4.sysctl_tcp_probe_interval;
- delta = tcp_time_stamp - icsk->icsk_mtup.probe_timestamp;
+ delta = tcp_jiffies32 - icsk->icsk_mtup.probe_timestamp;
if (unlikely(delta >= interval * HZ)) {
int mss = tcp_current_mss(sk);
@@ -1933,7 +1997,7 @@
icsk->icsk_mtup.search_low = tcp_mss_to_mtu(sk, mss);
/* Update probe time stamp */
- icsk->icsk_mtup.probe_timestamp = tcp_time_stamp;
+ icsk->icsk_mtup.probe_timestamp = tcp_jiffies32;
}
}
@@ -2086,6 +2150,12 @@
return -1;
}
+static bool tcp_pacing_check(const struct sock *sk)
+{
+ return tcp_needs_internal_pacing(sk) &&
+ hrtimer_active(&tcp_sk(sk)->pacing_timer);
+}
+
/* TCP Small Queues :
* Control number of packets in qdisc/devices to two packets / or ~1 ms.
* (These limits are doubled for retransmits)
@@ -2130,7 +2200,7 @@
static void tcp_chrono_set(struct tcp_sock *tp, const enum tcp_chrono new)
{
- const u32 now = tcp_time_stamp;
+ const u32 now = tcp_jiffies32;
if (tp->chrono_type > TCP_CHRONO_UNSPEC)
tp->chrono_stat[tp->chrono_type - 1] += now - tp->chrono_start;
@@ -2207,15 +2277,19 @@
}
max_segs = tcp_tso_segs(sk, mss_now);
+ tcp_mstamp_refresh(tp);
while ((skb = tcp_send_head(sk))) {
unsigned int limit;
+ if (tcp_pacing_check(sk))
+ break;
+
tso_segs = tcp_init_tso_segs(skb, mss_now);
BUG_ON(!tso_segs);
if (unlikely(tp->repair) && tp->repair_queue == TCP_SEND_QUEUE) {
/* "skb_mstamp" is used as a start point for the retransmit timer */
- skb_mstamp_get(&skb->skb_mstamp);
+ skb->skb_mstamp = tp->tcp_mstamp;
goto repair; /* Skip network transmission */
}
@@ -2342,10 +2416,10 @@
timeout = max_t(u32, timeout, msecs_to_jiffies(10));
/* If RTO is shorter, just schedule TLP in its place. */
- tlp_time_stamp = tcp_time_stamp + timeout;
+ tlp_time_stamp = tcp_jiffies32 + timeout;
rto_time_stamp = (u32)inet_csk(sk)->icsk_timeout;
if ((s32)(tlp_time_stamp - rto_time_stamp) > 0) {
- s32 delta = rto_time_stamp - tcp_time_stamp;
+ s32 delta = rto_time_stamp - tcp_jiffies32;
if (delta > 0)
timeout = delta;
}
@@ -2803,7 +2877,7 @@
skb_headroom(skb) >= 0xFFFF)) {
struct sk_buff *nskb;
- skb_mstamp_get(&skb->skb_mstamp);
+ skb->skb_mstamp = tp->tcp_mstamp;
nskb = __pskb_copy(skb, MAX_TCP_HEADER, GFP_ATOMIC);
err = nskb ? tcp_transmit_skb(sk, nskb, 0, GFP_ATOMIC) :
-ENOBUFS;
@@ -2878,6 +2952,10 @@
if (skb == tcp_send_head(sk))
break;
+
+ if (tcp_pacing_check(sk))
+ break;
+
/* we could do better than to assign each time */
if (!hole)
tp->retransmit_skb_hint = skb;
@@ -3015,7 +3093,7 @@
skb_reserve(skb, MAX_TCP_HEADER);
tcp_init_nondata_skb(skb, tcp_acceptable_seq(sk),
TCPHDR_ACK | TCPHDR_RST);
- skb_mstamp_get(&skb->skb_mstamp);
+ tcp_mstamp_refresh(tcp_sk(sk));
/* Send it off. */
if (tcp_transmit_skb(sk, skb, 0, priority))
NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPABORTFAILED);
@@ -3111,10 +3189,10 @@
memset(&opts, 0, sizeof(opts));
#ifdef CONFIG_SYN_COOKIES
if (unlikely(req->cookie_ts))
- skb->skb_mstamp.stamp_jiffies = cookie_init_timestamp(req);
+ skb->skb_mstamp = cookie_init_timestamp(req);
else
#endif
- skb_mstamp_get(&skb->skb_mstamp);
+ skb->skb_mstamp = tcp_clock_us();
#ifdef CONFIG_TCP_MD5SIG
rcu_read_lock();
@@ -3193,8 +3271,9 @@
/* We'll fix this up when we get a response from the other end.
* See tcp_input.c:tcp_rcv_state_process case TCP_SYN_SENT.
*/
- tp->tcp_header_len = sizeof(struct tcphdr) +
- (sysctl_tcp_timestamps ? TCPOLEN_TSTAMP_ALIGNED : 0);
+ tp->tcp_header_len = sizeof(struct tcphdr);
+ if (sock_net(sk)->ipv4.sysctl_tcp_timestamps)
+ tp->tcp_header_len += TCPOLEN_TSTAMP_ALIGNED;
#ifdef CONFIG_TCP_MD5SIG
if (tp->af_specific->md5_lookup(sk, sk))
@@ -3225,7 +3304,7 @@
tp->advmss - (tp->rx_opt.ts_recent_stamp ? tp->tcp_header_len - sizeof(struct tcphdr) : 0),
&tp->rcv_wnd,
&tp->window_clamp,
- sysctl_tcp_window_scaling,
+ sock_net(sk)->ipv4.sysctl_tcp_window_scaling,
&rcv_wscale,
dst_metric(dst, RTAX_INITRWND));
@@ -3244,7 +3323,7 @@
if (likely(!tp->repair))
tp->rcv_nxt = 0;
else
- tp->rcv_tstamp = tcp_time_stamp;
+ tp->rcv_tstamp = tcp_jiffies32;
tp->rcv_wup = tp->rcv_nxt;
tp->copied_seq = tp->rcv_nxt;
@@ -3373,7 +3452,8 @@
return -ENOBUFS;
tcp_init_nondata_skb(buff, tp->write_seq++, TCPHDR_SYN);
- tp->retrans_stamp = tcp_time_stamp;
+ tcp_mstamp_refresh(tp);
+ tp->retrans_stamp = tcp_time_stamp(tp);
tcp_connect_queue_skb(sk, buff);
tcp_ecn_send_syn(sk, buff);
@@ -3492,7 +3572,6 @@
skb_set_tcp_pure_ack(buff);
/* Send it off, this clears delayed acks for us. */
- skb_mstamp_get(&buff->skb_mstamp);
tcp_transmit_skb(sk, buff, 0, (__force gfp_t)0);
}
EXPORT_SYMBOL_GPL(tcp_send_ack);
@@ -3526,15 +3605,16 @@
* send it.
*/
tcp_init_nondata_skb(skb, tp->snd_una - !urgent, TCPHDR_ACK);
- skb_mstamp_get(&skb->skb_mstamp);
NET_INC_STATS(sock_net(sk), mib);
return tcp_transmit_skb(sk, skb, 0, (__force gfp_t)0);
}
+/* Called from setsockopt( ... TCP_REPAIR ) */
void tcp_send_window_probe(struct sock *sk)
{
if (sk->sk_state == TCP_ESTABLISHED) {
tcp_sk(sk)->snd_wl1 = tcp_sk(sk)->rcv_nxt - 1;
+ tcp_mstamp_refresh(tcp_sk(sk));
tcp_xmit_probe_skb(sk, 0, LINUX_MIB_TCPWINPROBE);
}
}
diff --git a/net/ipv4/tcp_rate.c b/net/ipv4/tcp_rate.c
index c6a9fa8..3330a37 100644
--- a/net/ipv4/tcp_rate.c
+++ b/net/ipv4/tcp_rate.c
@@ -78,7 +78,7 @@
struct tcp_sock *tp = tcp_sk(sk);
struct tcp_skb_cb *scb = TCP_SKB_CB(skb);
- if (!scb->tx.delivered_mstamp.v64)
+ if (!scb->tx.delivered_mstamp)
return;
if (!rs->prior_delivered ||
@@ -89,9 +89,9 @@
rs->is_retrans = scb->sacked & TCPCB_RETRANS;
/* Find the duration of the "send phase" of this window: */
- rs->interval_us = skb_mstamp_us_delta(
- &skb->skb_mstamp,
- &scb->tx.first_tx_mstamp);
+ rs->interval_us = tcp_stamp_us_delta(
+ skb->skb_mstamp,
+ scb->tx.first_tx_mstamp);
/* Record send time of most recently ACKed packet: */
tp->first_tx_mstamp = skb->skb_mstamp;
@@ -101,7 +101,7 @@
* we don't need to reset since it'll be freed soon.
*/
if (scb->sacked & TCPCB_SACKED_ACKED)
- scb->tx.delivered_mstamp.v64 = 0;
+ scb->tx.delivered_mstamp = 0;
}
/* Update the connection delivery information and generate a rate sample. */
@@ -125,7 +125,7 @@
rs->acked_sacked = delivered; /* freshly ACKed or SACKed */
rs->losses = lost; /* freshly marked lost */
/* Return an invalid sample if no timing information is available. */
- if (!rs->prior_mstamp.v64) {
+ if (!rs->prior_mstamp) {
rs->delivered = -1;
rs->interval_us = -1;
return;
@@ -138,8 +138,8 @@
* longer phase.
*/
snd_us = rs->interval_us; /* send phase */
- ack_us = skb_mstamp_us_delta(&tp->tcp_mstamp,
- &rs->prior_mstamp); /* ack phase */
+ ack_us = tcp_stamp_us_delta(tp->tcp_mstamp,
+ rs->prior_mstamp); /* ack phase */
rs->interval_us = max(snd_us, ack_us);
/* Normally we expect interval_us >= min-rtt.
@@ -185,3 +185,4 @@
tp->app_limited =
(tp->delivered + tcp_packets_in_flight(tp)) ? : 1;
}
+EXPORT_SYMBOL_GPL(tcp_rate_check_app_limited);
diff --git a/net/ipv4/tcp_recovery.c b/net/ipv4/tcp_recovery.c
index 362b8c7..fe9a493 100644
--- a/net/ipv4/tcp_recovery.c
+++ b/net/ipv4/tcp_recovery.c
@@ -17,12 +17,9 @@
}
}
-static bool tcp_rack_sent_after(const struct skb_mstamp *t1,
- const struct skb_mstamp *t2,
- u32 seq1, u32 seq2)
+static bool tcp_rack_sent_after(u64 t1, u64 t2, u32 seq1, u32 seq2)
{
- return skb_mstamp_after(t1, t2) ||
- (t1->v64 == t2->v64 && after(seq1, seq2));
+ return t1 > t2 || (t1 == t2 && after(seq1, seq2));
}
/* RACK loss detection (IETF draft draft-ietf-tcpm-rack-01):
@@ -72,14 +69,14 @@
scb->sacked & TCPCB_SACKED_ACKED)
continue;
- if (tcp_rack_sent_after(&tp->rack.mstamp, &skb->skb_mstamp,
+ if (tcp_rack_sent_after(tp->rack.mstamp, skb->skb_mstamp,
tp->rack.end_seq, scb->end_seq)) {
/* Step 3 in draft-cheng-tcpm-rack-00.txt:
* A packet is lost if its elapsed time is beyond
* the recent RTT plus the reordering window.
*/
- u32 elapsed = skb_mstamp_us_delta(&tp->tcp_mstamp,
- &skb->skb_mstamp);
+ u32 elapsed = tcp_stamp_us_delta(tp->tcp_mstamp,
+ skb->skb_mstamp);
s32 remaining = tp->rack.rtt_us + reo_wnd - elapsed;
if (remaining < 0) {
@@ -127,16 +124,16 @@
* draft-cheng-tcpm-rack-00.txt
*/
void tcp_rack_advance(struct tcp_sock *tp, u8 sacked, u32 end_seq,
- const struct skb_mstamp *xmit_time)
+ u64 xmit_time)
{
u32 rtt_us;
- if (tp->rack.mstamp.v64 &&
- !tcp_rack_sent_after(xmit_time, &tp->rack.mstamp,
+ if (tp->rack.mstamp &&
+ !tcp_rack_sent_after(xmit_time, tp->rack.mstamp,
end_seq, tp->rack.end_seq))
return;
- rtt_us = skb_mstamp_us_delta(&tp->tcp_mstamp, xmit_time);
+ rtt_us = tcp_stamp_us_delta(tp->tcp_mstamp, xmit_time);
if (sacked & TCPCB_RETRANS) {
/* If the sacked packet was retransmitted, it's ambiguous
* whether the retransmission or the original (or the prior
@@ -152,7 +149,7 @@
return;
}
tp->rack.rtt_us = rtt_us;
- tp->rack.mstamp = *xmit_time;
+ tp->rack.mstamp = xmit_time;
tp->rack.end_seq = end_seq;
tp->rack.advanced = 1;
}
@@ -166,7 +163,6 @@
u32 timeout, prior_inflight;
prior_inflight = tcp_packets_in_flight(tp);
- skb_mstamp_get(&tp->tcp_mstamp);
tcp_rack_detect_loss(sk, &timeout);
if (prior_inflight != tcp_packets_in_flight(tp)) {
if (inet_csk(sk)->icsk_ca_state != TCP_CA_Recovery) {
diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c
index 1467254..c0feeee 100644
--- a/net/ipv4/tcp_timer.c
+++ b/net/ipv4/tcp_timer.c
@@ -63,7 +63,7 @@
/* If peer does not open window for long time, or did not transmit
* anything for long time, penalize it. */
- if ((s32)(tcp_time_stamp - tp->lsndtime) > 2*TCP_RTO_MAX || !do_reset)
+ if ((s32)(tcp_jiffies32 - tp->lsndtime) > 2*TCP_RTO_MAX || !do_reset)
shift++;
/* If some dubious ICMP arrived, penalize even more. */
@@ -73,7 +73,7 @@
if (tcp_check_oom(sk, shift)) {
/* Catch exceptional cases, when connection requires reset.
* 1. Last segment was sent recently. */
- if ((s32)(tcp_time_stamp - tp->lsndtime) <= TCP_TIMEWAIT_LEN ||
+ if ((s32)(tcp_jiffies32 - tp->lsndtime) <= TCP_TIMEWAIT_LEN ||
/* 2. Window is closed. */
(!tp->snd_wnd && !tp->packets_out))
do_reset = true;
@@ -115,7 +115,7 @@
if (net->ipv4.sysctl_tcp_mtu_probing) {
if (!icsk->icsk_mtup.enabled) {
icsk->icsk_mtup.enabled = 1;
- icsk->icsk_mtup.probe_timestamp = tcp_time_stamp;
+ icsk->icsk_mtup.probe_timestamp = tcp_jiffies32;
tcp_sync_mss(sk, icsk->icsk_pmtu_cookie);
} else {
struct net *net = sock_net(sk);
@@ -139,22 +139,18 @@
* @timeout: A custom timeout value.
* If set to 0 the default timeout is calculated and used.
* Using TCP_RTO_MIN and the number of unsuccessful retransmits.
- * @syn_set: true if the SYN Bit was set.
*
* The default "timeout" value this function can calculate and use
* is equivalent to the timeout of a TCP Connection
* after "boundary" unsuccessful, exponentially backed-off
- * retransmissions with an initial RTO of TCP_RTO_MIN or TCP_TIMEOUT_INIT if
- * syn_set flag is set.
- *
+ * retransmissions with an initial RTO of TCP_RTO_MIN.
*/
static bool retransmits_timed_out(struct sock *sk,
unsigned int boundary,
- unsigned int timeout,
- bool syn_set)
+ unsigned int timeout)
{
+ const unsigned int rto_base = TCP_RTO_MIN;
unsigned int linear_backoff_thresh, start_ts;
- unsigned int rto_base = syn_set ? TCP_TIMEOUT_INIT : TCP_RTO_MIN;
if (!inet_csk(sk)->icsk_retransmits)
return false;
@@ -172,7 +168,7 @@
timeout = ((2 << linear_backoff_thresh) - 1) * rto_base +
(boundary - linear_backoff_thresh) * TCP_RTO_MAX;
}
- return (tcp_time_stamp - start_ts) >= timeout;
+ return (tcp_time_stamp(tcp_sk(sk)) - start_ts) >= jiffies_to_msecs(timeout);
}
/* A write timeout has occurred. Process the after effects. */
@@ -181,8 +177,8 @@
struct inet_connection_sock *icsk = inet_csk(sk);
struct tcp_sock *tp = tcp_sk(sk);
struct net *net = sock_net(sk);
+ bool expired, do_reset;
int retry_until;
- bool do_reset, syn_set = false;
if ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV)) {
if (icsk->icsk_retransmits) {
@@ -196,9 +192,9 @@
sk_rethink_txhash(sk);
}
retry_until = icsk->icsk_syn_retries ? : net->ipv4.sysctl_tcp_syn_retries;
- syn_set = true;
+ expired = icsk->icsk_retransmits >= retry_until;
} else {
- if (retransmits_timed_out(sk, net->ipv4.sysctl_tcp_retries1, 0, 0)) {
+ if (retransmits_timed_out(sk, net->ipv4.sysctl_tcp_retries1, 0)) {
/* Some middle-boxes may black-hole Fast Open _after_
* the handshake. Therefore we conservatively disable
* Fast Open on this path on recurring timeouts after
@@ -224,15 +220,15 @@
retry_until = tcp_orphan_retries(sk, alive);
do_reset = alive ||
- !retransmits_timed_out(sk, retry_until, 0, 0);
+ !retransmits_timed_out(sk, retry_until, 0);
if (tcp_out_of_resources(sk, do_reset))
return 1;
}
+ expired = retransmits_timed_out(sk, retry_until,
+ icsk->icsk_user_timeout);
}
-
- if (retransmits_timed_out(sk, retry_until,
- syn_set ? 0 : icsk->icsk_user_timeout, syn_set)) {
+ if (expired) {
/* Has it gone just too far? */
tcp_write_err(sk);
return 1;
@@ -339,9 +335,10 @@
*/
start_ts = tcp_skb_timestamp(tcp_send_head(sk));
if (!start_ts)
- skb_mstamp_get(&tcp_send_head(sk)->skb_mstamp);
+ tcp_send_head(sk)->skb_mstamp = tp->tcp_mstamp;
else if (icsk->icsk_user_timeout &&
- (s32)(tcp_time_stamp - start_ts) > icsk->icsk_user_timeout)
+ (s32)(tcp_time_stamp(tp) - start_ts) >
+ jiffies_to_msecs(icsk->icsk_user_timeout))
goto abort;
max_probes = sock_net(sk)->ipv4.sysctl_tcp_retries2;
@@ -451,7 +448,7 @@
tp->snd_una, tp->snd_nxt);
}
#endif
- if (tcp_time_stamp - tp->rcv_tstamp > TCP_RTO_MAX) {
+ if (tcp_jiffies32 - tp->rcv_tstamp > TCP_RTO_MAX) {
tcp_write_err(sk);
goto out;
}
@@ -539,7 +536,7 @@
icsk->icsk_rto = min(icsk->icsk_rto << 1, TCP_RTO_MAX);
}
inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS, icsk->icsk_rto, TCP_RTO_MAX);
- if (retransmits_timed_out(sk, net->ipv4.sysctl_tcp_retries1 + 1, 0, 0))
+ if (retransmits_timed_out(sk, net->ipv4.sysctl_tcp_retries1 + 1, 0))
__sk_dst_reset(sk);
out:;
@@ -561,6 +558,7 @@
goto out;
}
+ tcp_mstamp_refresh(tcp_sk(sk));
event = icsk->icsk_pending;
switch (event) {
@@ -710,4 +708,7 @@
{
inet_csk_init_xmit_timers(sk, &tcp_write_timer, &tcp_delack_timer,
&tcp_keepalive_timer);
+ hrtimer_init(&tcp_sk(sk)->pacing_timer, CLOCK_MONOTONIC,
+ HRTIMER_MODE_ABS_PINNED);
+ tcp_sk(sk)->pacing_timer.function = tcp_pace_kick;
}
diff --git a/net/ipv4/tcp_ulp.c b/net/ipv4/tcp_ulp.c
new file mode 100644
index 0000000..e855ea7
--- /dev/null
+++ b/net/ipv4/tcp_ulp.c
@@ -0,0 +1,134 @@
+/*
+ * Pluggable TCP upper layer protocol support.
+ *
+ * Copyright (c) 2016-2017, Mellanox Technologies. All rights reserved.
+ * Copyright (c) 2016-2017, Dave Watson <davejwatson@fb.com>. All rights reserved.
+ *
+ */
+
+#include<linux/module.h>
+#include <linux/mm.h>
+#include <linux/types.h>
+#include <linux/list.h>
+#include <linux/gfp.h>
+#include <net/tcp.h>
+
+static DEFINE_SPINLOCK(tcp_ulp_list_lock);
+static LIST_HEAD(tcp_ulp_list);
+
+/* Simple linear search, don't expect many entries! */
+static struct tcp_ulp_ops *tcp_ulp_find(const char *name)
+{
+ struct tcp_ulp_ops *e;
+
+ list_for_each_entry_rcu(e, &tcp_ulp_list, list) {
+ if (strcmp(e->name, name) == 0)
+ return e;
+ }
+
+ return NULL;
+}
+
+static const struct tcp_ulp_ops *__tcp_ulp_find_autoload(const char *name)
+{
+ const struct tcp_ulp_ops *ulp = NULL;
+
+ rcu_read_lock();
+ ulp = tcp_ulp_find(name);
+
+#ifdef CONFIG_MODULES
+ if (!ulp && capable(CAP_NET_ADMIN)) {
+ rcu_read_unlock();
+ request_module("%s", name);
+ rcu_read_lock();
+ ulp = tcp_ulp_find(name);
+ }
+#endif
+ if (!ulp || !try_module_get(ulp->owner))
+ ulp = NULL;
+
+ rcu_read_unlock();
+ return ulp;
+}
+
+/* Attach new upper layer protocol to the list
+ * of available protocols.
+ */
+int tcp_register_ulp(struct tcp_ulp_ops *ulp)
+{
+ int ret = 0;
+
+ spin_lock(&tcp_ulp_list_lock);
+ if (tcp_ulp_find(ulp->name)) {
+ pr_notice("%s already registered or non-unique name\n",
+ ulp->name);
+ ret = -EEXIST;
+ } else {
+ list_add_tail_rcu(&ulp->list, &tcp_ulp_list);
+ }
+ spin_unlock(&tcp_ulp_list_lock);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(tcp_register_ulp);
+
+void tcp_unregister_ulp(struct tcp_ulp_ops *ulp)
+{
+ spin_lock(&tcp_ulp_list_lock);
+ list_del_rcu(&ulp->list);
+ spin_unlock(&tcp_ulp_list_lock);
+
+ synchronize_rcu();
+}
+EXPORT_SYMBOL_GPL(tcp_unregister_ulp);
+
+/* Build string with list of available upper layer protocl values */
+void tcp_get_available_ulp(char *buf, size_t maxlen)
+{
+ struct tcp_ulp_ops *ulp_ops;
+ size_t offs = 0;
+
+ rcu_read_lock();
+ list_for_each_entry_rcu(ulp_ops, &tcp_ulp_list, list) {
+ offs += snprintf(buf + offs, maxlen - offs,
+ "%s%s",
+ offs == 0 ? "" : " ", ulp_ops->name);
+ }
+ rcu_read_unlock();
+}
+
+void tcp_cleanup_ulp(struct sock *sk)
+{
+ struct inet_connection_sock *icsk = inet_csk(sk);
+
+ if (!icsk->icsk_ulp_ops)
+ return;
+
+ if (icsk->icsk_ulp_ops->release)
+ icsk->icsk_ulp_ops->release(sk);
+ module_put(icsk->icsk_ulp_ops->owner);
+}
+
+/* Change upper layer protocol for socket */
+int tcp_set_ulp(struct sock *sk, const char *name)
+{
+ struct inet_connection_sock *icsk = inet_csk(sk);
+ const struct tcp_ulp_ops *ulp_ops;
+ int err = 0;
+
+ if (icsk->icsk_ulp_ops)
+ return -EEXIST;
+
+ ulp_ops = __tcp_ulp_find_autoload(name);
+ if (!ulp_ops)
+ err = -ENOENT;
+ else
+ err = ulp_ops->init(sk);
+
+ if (err)
+ goto out;
+
+ icsk->icsk_ulp_ops = ulp_ops;
+ out:
+ return err;
+}
diff --git a/net/ipv4/tcp_westwood.c b/net/ipv4/tcp_westwood.c
index 9775453..bec9caf 100644
--- a/net/ipv4/tcp_westwood.c
+++ b/net/ipv4/tcp_westwood.c
@@ -68,7 +68,7 @@
w->cumul_ack = 0;
w->reset_rtt_min = 1;
w->rtt_min = w->rtt = TCP_WESTWOOD_INIT_RTT;
- w->rtt_win_sx = tcp_time_stamp;
+ w->rtt_win_sx = tcp_jiffies32;
w->snd_una = tcp_sk(sk)->snd_una;
w->first_ack = 1;
}
@@ -116,7 +116,7 @@
static void westwood_update_window(struct sock *sk)
{
struct westwood *w = inet_csk_ca(sk);
- s32 delta = tcp_time_stamp - w->rtt_win_sx;
+ s32 delta = tcp_jiffies32 - w->rtt_win_sx;
/* Initialize w->snd_una with the first acked sequence number in order
* to fix mismatch between tp->snd_una and w->snd_una for the first
@@ -140,7 +140,7 @@
westwood_filter(w, delta);
w->bk = 0;
- w->rtt_win_sx = tcp_time_stamp;
+ w->rtt_win_sx = tcp_jiffies32;
}
}
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
index 1d6219b..067a607 100644
--- a/net/ipv4/udp.c
+++ b/net/ipv4/udp.c
@@ -1163,23 +1163,110 @@
return ret;
}
+/* Copy as much information as possible into skb->dev_scratch to avoid
+ * possibly multiple cache miss on dequeue();
+ */
+#if BITS_PER_LONG == 64
+
+/* we can store multiple info here: truesize, len and the bit needed to
+ * compute skb_csum_unnecessary will be on cold cache lines at recvmsg
+ * time.
+ * skb->len can be stored on 16 bits since the udp header has been already
+ * validated and pulled.
+ */
+struct udp_dev_scratch {
+ u32 truesize;
+ u16 len;
+ bool is_linear;
+ bool csum_unnecessary;
+};
+
+static void udp_set_dev_scratch(struct sk_buff *skb)
+{
+ struct udp_dev_scratch *scratch;
+
+ BUILD_BUG_ON(sizeof(struct udp_dev_scratch) > sizeof(long));
+ scratch = (struct udp_dev_scratch *)&skb->dev_scratch;
+ scratch->truesize = skb->truesize;
+ scratch->len = skb->len;
+ scratch->csum_unnecessary = !!skb_csum_unnecessary(skb);
+ scratch->is_linear = !skb_is_nonlinear(skb);
+}
+
+static int udp_skb_truesize(struct sk_buff *skb)
+{
+ return ((struct udp_dev_scratch *)&skb->dev_scratch)->truesize;
+}
+
+static unsigned int udp_skb_len(struct sk_buff *skb)
+{
+ return ((struct udp_dev_scratch *)&skb->dev_scratch)->len;
+}
+
+static bool udp_skb_csum_unnecessary(struct sk_buff *skb)
+{
+ return ((struct udp_dev_scratch *)&skb->dev_scratch)->csum_unnecessary;
+}
+
+static bool udp_skb_is_linear(struct sk_buff *skb)
+{
+ return ((struct udp_dev_scratch *)&skb->dev_scratch)->is_linear;
+}
+
+#else
+static void udp_set_dev_scratch(struct sk_buff *skb)
+{
+ skb->dev_scratch = skb->truesize;
+}
+
+static int udp_skb_truesize(struct sk_buff *skb)
+{
+ return skb->dev_scratch;
+}
+
+static unsigned int udp_skb_len(struct sk_buff *skb)
+{
+ return skb->len;
+}
+
+static bool udp_skb_csum_unnecessary(struct sk_buff *skb)
+{
+ return skb_csum_unnecessary(skb);
+}
+
+static bool udp_skb_is_linear(struct sk_buff *skb)
+{
+ return !skb_is_nonlinear(skb);
+}
+#endif
+
/* fully reclaim rmem/fwd memory allocated for skb */
-static void udp_rmem_release(struct sock *sk, int size, int partial)
+static void udp_rmem_release(struct sock *sk, int size, int partial,
+ bool rx_queue_lock_held)
{
struct udp_sock *up = udp_sk(sk);
+ struct sk_buff_head *sk_queue;
int amt;
if (likely(partial)) {
up->forward_deficit += size;
size = up->forward_deficit;
if (size < (sk->sk_rcvbuf >> 2) &&
- !skb_queue_empty(&sk->sk_receive_queue))
+ !skb_queue_empty(&up->reader_queue))
return;
} else {
size += up->forward_deficit;
}
up->forward_deficit = 0;
+ /* acquire the sk_receive_queue for fwd allocated memory scheduling,
+ * if the called don't held it already
+ */
+ sk_queue = &sk->sk_receive_queue;
+ if (!rx_queue_lock_held)
+ spin_lock(&sk_queue->lock);
+
+
sk->sk_forward_alloc += size;
amt = (sk->sk_forward_alloc - partial) & ~(SK_MEM_QUANTUM - 1);
sk->sk_forward_alloc -= amt;
@@ -1188,19 +1275,33 @@
__sk_mem_reduce_allocated(sk, amt >> SK_MEM_QUANTUM_SHIFT);
atomic_sub(size, &sk->sk_rmem_alloc);
+
+ /* this can save us from acquiring the rx queue lock on next receive */
+ skb_queue_splice_tail_init(sk_queue, &up->reader_queue);
+
+ if (!rx_queue_lock_held)
+ spin_unlock(&sk_queue->lock);
}
-/* Note: called with sk_receive_queue.lock held.
+/* Note: called with reader_queue.lock held.
* Instead of using skb->truesize here, find a copy of it in skb->dev_scratch
* This avoids a cache line miss while receive_queue lock is held.
* Look at __udp_enqueue_schedule_skb() to find where this copy is done.
*/
void udp_skb_destructor(struct sock *sk, struct sk_buff *skb)
{
- udp_rmem_release(sk, skb->dev_scratch, 1);
+ prefetch(&skb->data);
+ udp_rmem_release(sk, udp_skb_truesize(skb), 1, false);
}
EXPORT_SYMBOL(udp_skb_destructor);
+/* as above, but the caller held the rx queue lock, too */
+static void udp_skb_dtor_locked(struct sock *sk, struct sk_buff *skb)
+{
+ prefetch(&skb->data);
+ udp_rmem_release(sk, udp_skb_truesize(skb), 1, true);
+}
+
/* Idea of busylocks is to let producers grab an extra spinlock
* to relieve pressure on the receive_queue spinlock shared by consumer.
* Under flood, this means that only one producer can be in line
@@ -1252,10 +1353,7 @@
busy = busylock_acquire(sk);
}
size = skb->truesize;
- /* Copy skb->truesize into skb->dev_scratch to avoid a cache line miss
- * in udp_skb_destructor()
- */
- skb->dev_scratch = size;
+ udp_set_dev_scratch(skb);
/* we drop only if the receive buf is full and the receive
* queue contains some other skb
@@ -1306,14 +1404,16 @@
void udp_destruct_sock(struct sock *sk)
{
/* reclaim completely the forward allocated memory */
+ struct udp_sock *up = udp_sk(sk);
unsigned int total = 0;
struct sk_buff *skb;
- while ((skb = __skb_dequeue(&sk->sk_receive_queue)) != NULL) {
+ skb_queue_splice_tail_init(&sk->sk_receive_queue, &up->reader_queue);
+ while ((skb = __skb_dequeue(&up->reader_queue)) != NULL) {
total += skb->truesize;
kfree_skb(skb);
}
- udp_rmem_release(sk, total, 0);
+ udp_rmem_release(sk, total, 0, true);
inet_sock_destruct(sk);
}
@@ -1321,6 +1421,7 @@
int udp_init_sock(struct sock *sk)
{
+ skb_queue_head_init(&udp_sk(sk)->reader_queue);
sk->sk_destruct = udp_destruct_sock;
return 0;
}
@@ -1334,10 +1435,31 @@
sk_peek_offset_bwd(sk, len);
unlock_sock_fast(sk, slow);
}
- consume_skb(skb);
+
+ consume_stateless_skb(skb);
}
EXPORT_SYMBOL_GPL(skb_consume_udp);
+static struct sk_buff *__first_packet_length(struct sock *sk,
+ struct sk_buff_head *rcvq,
+ int *total)
+{
+ struct sk_buff *skb;
+
+ while ((skb = skb_peek(rcvq)) != NULL &&
+ udp_lib_checksum_complete(skb)) {
+ __UDP_INC_STATS(sock_net(sk), UDP_MIB_CSUMERRORS,
+ IS_UDPLITE(sk));
+ __UDP_INC_STATS(sock_net(sk), UDP_MIB_INERRORS,
+ IS_UDPLITE(sk));
+ atomic_inc(&sk->sk_drops);
+ __skb_unlink(skb, rcvq);
+ *total += skb->truesize;
+ kfree_skb(skb);
+ }
+ return skb;
+}
+
/**
* first_packet_length - return length of first packet in receive queue
* @sk: socket
@@ -1347,26 +1469,24 @@
*/
static int first_packet_length(struct sock *sk)
{
- struct sk_buff_head *rcvq = &sk->sk_receive_queue;
+ struct sk_buff_head *rcvq = &udp_sk(sk)->reader_queue;
+ struct sk_buff_head *sk_queue = &sk->sk_receive_queue;
struct sk_buff *skb;
int total = 0;
int res;
spin_lock_bh(&rcvq->lock);
- while ((skb = skb_peek(rcvq)) != NULL &&
- udp_lib_checksum_complete(skb)) {
- __UDP_INC_STATS(sock_net(sk), UDP_MIB_CSUMERRORS,
- IS_UDPLITE(sk));
- __UDP_INC_STATS(sock_net(sk), UDP_MIB_INERRORS,
- IS_UDPLITE(sk));
- atomic_inc(&sk->sk_drops);
- __skb_unlink(skb, rcvq);
- total += skb->truesize;
- kfree_skb(skb);
+ skb = __first_packet_length(sk, rcvq, &total);
+ if (!skb && !skb_queue_empty(sk_queue)) {
+ spin_lock(&sk_queue->lock);
+ skb_queue_splice_tail_init(sk_queue, rcvq);
+ spin_unlock(&sk_queue->lock);
+
+ skb = __first_packet_length(sk, rcvq, &total);
}
res = skb ? skb->len : -1;
if (total)
- udp_rmem_release(sk, total, 1);
+ udp_rmem_release(sk, total, 1, false);
spin_unlock_bh(&rcvq->lock);
return res;
}
@@ -1400,6 +1520,89 @@
}
EXPORT_SYMBOL(udp_ioctl);
+struct sk_buff *__skb_recv_udp(struct sock *sk, unsigned int flags,
+ int noblock, int *peeked, int *off, int *err)
+{
+ struct sk_buff_head *sk_queue = &sk->sk_receive_queue;
+ struct sk_buff_head *queue;
+ struct sk_buff *last;
+ long timeo;
+ int error;
+
+ queue = &udp_sk(sk)->reader_queue;
+ flags |= noblock ? MSG_DONTWAIT : 0;
+ timeo = sock_rcvtimeo(sk, flags & MSG_DONTWAIT);
+ do {
+ struct sk_buff *skb;
+
+ error = sock_error(sk);
+ if (error)
+ break;
+
+ error = -EAGAIN;
+ *peeked = 0;
+ do {
+ spin_lock_bh(&queue->lock);
+ skb = __skb_try_recv_from_queue(sk, queue, flags,
+ udp_skb_destructor,
+ peeked, off, err,
+ &last);
+ if (skb) {
+ spin_unlock_bh(&queue->lock);
+ return skb;
+ }
+
+ if (skb_queue_empty(sk_queue)) {
+ spin_unlock_bh(&queue->lock);
+ goto busy_check;
+ }
+
+ /* refill the reader queue and walk it again
+ * keep both queues locked to avoid re-acquiring
+ * the sk_receive_queue lock if fwd memory scheduling
+ * is needed.
+ */
+ spin_lock(&sk_queue->lock);
+ skb_queue_splice_tail_init(sk_queue, queue);
+
+ skb = __skb_try_recv_from_queue(sk, queue, flags,
+ udp_skb_dtor_locked,
+ peeked, off, err,
+ &last);
+ spin_unlock(&sk_queue->lock);
+ spin_unlock_bh(&queue->lock);
+ if (skb)
+ return skb;
+
+busy_check:
+ if (!sk_can_busy_loop(sk))
+ break;
+
+ sk_busy_loop(sk, flags & MSG_DONTWAIT);
+ } while (!skb_queue_empty(sk_queue));
+
+ /* sk_queue is empty, reader_queue may contain peeked packets */
+ } while (timeo &&
+ !__skb_wait_for_more_packets(sk, &error, &timeo,
+ (struct sk_buff *)sk_queue));
+
+ *err = error;
+ return NULL;
+}
+EXPORT_SYMBOL_GPL(__skb_recv_udp);
+
+static int copy_linear_skb(struct sk_buff *skb, int len, int off,
+ struct iov_iter *to)
+{
+ int n, copy = len - off;
+
+ n = copy_to_iter(skb->data + off, copy, to);
+ if (n == copy)
+ return 0;
+
+ return -EFAULT;
+}
+
/*
* This should be easy, if there is something there we
* return it, otherwise we block.
@@ -1426,7 +1629,7 @@
if (!skb)
return err;
- ulen = skb->len;
+ ulen = udp_skb_len(skb);
copied = len;
if (copied > ulen - off)
copied = ulen - off;
@@ -1441,14 +1644,18 @@
if (copied < ulen || peeking ||
(is_udplite && UDP_SKB_CB(skb)->partial_cov)) {
- checksum_valid = !udp_lib_checksum_complete(skb);
+ checksum_valid = udp_skb_csum_unnecessary(skb) ||
+ !__udp_lib_checksum_complete(skb);
if (!checksum_valid)
goto csum_copy_err;
}
- if (checksum_valid || skb_csum_unnecessary(skb))
- err = skb_copy_datagram_msg(skb, off, msg, copied);
- else {
+ if (checksum_valid || udp_skb_csum_unnecessary(skb)) {
+ if (udp_skb_is_linear(skb))
+ err = copy_linear_skb(skb, copied, off, &msg->msg_iter);
+ else
+ err = skb_copy_datagram_msg(skb, off, msg, copied);
+ } else {
err = skb_copy_and_csum_datagram_msg(skb, off, msg);
if (err == -EINVAL)
@@ -1490,7 +1697,8 @@
return err;
csum_copy_err:
- if (!__sk_queue_drop_skb(sk, skb, flags, udp_skb_destructor)) {
+ if (!__sk_queue_drop_skb(sk, &udp_sk(sk)->reader_queue, skb, flags,
+ udp_skb_destructor)) {
UDP_INC_STATS(sock_net(sk), UDP_MIB_CSUMERRORS, is_udplite);
UDP_INC_STATS(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
}
@@ -1624,6 +1832,9 @@
sk_mark_napi_id_once(sk, skb);
}
+ /* clear all pending head states while they are hot in the cache */
+ skb_release_head_state(skb);
+
rc = __udp_enqueue_schedule_skb(sk, skb);
if (rc < 0) {
int is_udplite = IS_UDPLITE(sk);
@@ -1738,6 +1949,7 @@
}
}
+ prefetch(&sk->sk_rmem_alloc);
if (rcu_access_pointer(sk->sk_filter) &&
udp_lib_checksum_complete(skb))
goto csum_error;
@@ -1766,9 +1978,10 @@
{
struct dst_entry *old;
- dst_hold(dst);
- old = xchg(&sk->sk_rx_dst, dst);
- dst_release(old);
+ if (dst_hold_safe(dst)) {
+ old = xchg(&sk->sk_rx_dst, dst);
+ dst_release(old);
+ }
}
/*
@@ -2092,13 +2305,11 @@
if (dst)
dst = dst_check(dst, 0);
if (dst) {
- /* DST_NOCACHE can not be used without taking a reference */
- if (dst->flags & DST_NOCACHE) {
- if (likely(atomic_inc_not_zero(&dst->__refcnt)))
- skb_dst_set(skb, dst);
- } else {
- skb_dst_set_noref(skb, dst);
- }
+ /* set noref for now.
+ * any place which wants to hold dst has to call
+ * dst_hold_safe()
+ */
+ skb_dst_set_noref(skb, dst);
}
}
@@ -2325,6 +2536,9 @@
unsigned int mask = datagram_poll(file, sock, wait);
struct sock *sk = sock->sk;
+ if (!skb_queue_empty(&udp_sk(sk)->reader_queue))
+ mask |= POLLIN | POLLRDNORM;
+
sock_rps_record_flow(sk);
/* Check for false positives due to checksum errors */
diff --git a/net/ipv4/xfrm4_mode_beet.c b/net/ipv4/xfrm4_mode_beet.c
index 71acd00..856d2df 100644
--- a/net/ipv4/xfrm4_mode_beet.c
+++ b/net/ipv4/xfrm4_mode_beet.c
@@ -57,8 +57,7 @@
xfrm4_beet_make_header(skb);
- ph = (struct ip_beet_phdr *)
- __skb_pull(skb, XFRM_MODE_SKB_CB(skb)->ihl - hdrlen);
+ ph = __skb_pull(skb, XFRM_MODE_SKB_CB(skb)->ihl - hdrlen);
top_iph = ip_hdr(skb);
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
index 686c923..a885ffc 100644
--- a/net/ipv6/addrconf.c
+++ b/net/ipv6/addrconf.c
@@ -963,6 +963,7 @@
struct net *net = dev_net(idev->dev);
struct inet6_ifaddr *ifa = NULL;
struct rt6_info *rt;
+ struct in6_validator_info i6vi;
unsigned int hash;
int err = 0;
int addr_type = ipv6_addr_type(addr);
@@ -974,6 +975,9 @@
return ERR_PTR(-EADDRNOTAVAIL);
rcu_read_lock_bh();
+
+ in6_dev_hold(idev);
+
if (idev->dead) {
err = -ENODEV; /*XXX*/
goto out2;
@@ -984,6 +988,17 @@
goto out2;
}
+ i6vi.i6vi_addr = *addr;
+ i6vi.i6vi_dev = idev;
+ rcu_read_unlock_bh();
+
+ err = inet6addr_validator_notifier_call_chain(NETDEV_UP, &i6vi);
+
+ rcu_read_lock_bh();
+ err = notifier_to_errno(err);
+ if (err)
+ goto out2;
+
spin_lock(&addrconf_hash_lock);
/* Ignore adding duplicate addresses on an interface */
@@ -1034,7 +1049,6 @@
ifa->rt = rt;
ifa->idev = idev;
- in6_dev_hold(idev);
/* For caller */
in6_ifa_hold(ifa);
@@ -1062,6 +1076,7 @@
inet6addr_notifier_call_chain(NETDEV_UP, ifa);
else {
kfree(ifa);
+ in6_dev_put(idev);
ifa = ERR_PTR(err);
}
@@ -2280,7 +2295,7 @@
cfg.fc_flags |= RTF_NONEXTHOP;
#endif
- ip6_route_add(&cfg);
+ ip6_route_add(&cfg, NULL);
}
@@ -2335,7 +2350,7 @@
ipv6_addr_set(&cfg.fc_dst, htonl(0xFF000000), 0, 0, 0);
- ip6_route_add(&cfg);
+ ip6_route_add(&cfg, NULL);
}
static struct inet6_dev *addrconf_add_dev(struct net_device *dev)
@@ -5561,8 +5576,8 @@
ip6_del_rt(rt);
}
if (ifp->rt) {
- dst_hold(&ifp->rt->dst);
- ip6_del_rt(ifp->rt);
+ if (dst_hold_safe(&ifp->rt->dst))
+ ip6_del_rt(ifp->rt);
}
rt_genid_bump_ipv6(net);
break;
diff --git a/net/ipv6/addrconf_core.c b/net/ipv6/addrconf_core.c
index bfa941f..9e3488d 100644
--- a/net/ipv6/addrconf_core.c
+++ b/net/ipv6/addrconf_core.c
@@ -88,6 +88,7 @@
EXPORT_SYMBOL(__ipv6_addr_type);
static ATOMIC_NOTIFIER_HEAD(inet6addr_chain);
+static ATOMIC_NOTIFIER_HEAD(inet6addr_validator_chain);
int register_inet6addr_notifier(struct notifier_block *nb)
{
@@ -107,6 +108,24 @@
}
EXPORT_SYMBOL(inet6addr_notifier_call_chain);
+int register_inet6addr_validator_notifier(struct notifier_block *nb)
+{
+ return atomic_notifier_chain_register(&inet6addr_validator_chain, nb);
+}
+EXPORT_SYMBOL(register_inet6addr_validator_notifier);
+
+int unregister_inet6addr_validator_notifier(struct notifier_block *nb)
+{
+ return atomic_notifier_chain_unregister(&inet6addr_validator_chain, nb);
+}
+EXPORT_SYMBOL(unregister_inet6addr_validator_notifier);
+
+int inet6addr_validator_notifier_call_chain(unsigned long val, void *v)
+{
+ return atomic_notifier_call_chain(&inet6addr_validator_chain, val, v);
+}
+EXPORT_SYMBOL(inet6addr_validator_notifier_call_chain);
+
static int eafnosupport_ipv6_dst_lookup(struct net *net, struct sock *u1,
struct dst_entry **u2,
struct flowi6 *u3)
diff --git a/net/ipv6/ah6.c b/net/ipv6/ah6.c
index dda6035..755f382 100644
--- a/net/ipv6/ah6.c
+++ b/net/ipv6/ah6.c
@@ -423,7 +423,9 @@
ah->seq_no = htonl(XFRM_SKB_CB(skb)->seq.output.low);
sg_init_table(sg, nfrags + sglists);
- skb_to_sgvec_nomark(skb, sg, 0, skb->len);
+ err = skb_to_sgvec_nomark(skb, sg, 0, skb->len);
+ if (unlikely(err < 0))
+ goto out_free;
if (x->props.flags & XFRM_STATE_ESN) {
/* Attach seqhi sg right after packet payload */
@@ -606,7 +608,9 @@
ip6h->hop_limit = 0;
sg_init_table(sg, nfrags + sglists);
- skb_to_sgvec_nomark(skb, sg, 0, skb->len);
+ err = skb_to_sgvec_nomark(skb, sg, 0, skb->len);
+ if (unlikely(err < 0))
+ goto out_free;
if (x->props.flags & XFRM_STATE_ESN) {
/* Attach seqhi sg right after packet payload */
diff --git a/net/ipv6/esp6.c b/net/ipv6/esp6.c
index 1fe99ba..d8b40ff 100644
--- a/net/ipv6/esp6.c
+++ b/net/ipv6/esp6.c
@@ -346,9 +346,11 @@
esph = esp_output_set_esn(skb, x, ip_esp_hdr(skb), seqhi);
sg_init_table(sg, esp->nfrags);
- skb_to_sgvec(skb, sg,
- (unsigned char *)esph - skb->data,
- assoclen + ivlen + esp->clen + alen);
+ err = skb_to_sgvec(skb, sg,
+ (unsigned char *)esph - skb->data,
+ assoclen + ivlen + esp->clen + alen);
+ if (unlikely(err < 0))
+ goto error;
if (!esp->inplace) {
int allocsize;
@@ -372,9 +374,11 @@
spin_unlock_bh(&x->lock);
sg_init_table(dsg, skb_shinfo(skb)->nr_frags + 1);
- skb_to_sgvec(skb, dsg,
- (unsigned char *)esph - skb->data,
- assoclen + ivlen + esp->clen + alen);
+ err = skb_to_sgvec(skb, dsg,
+ (unsigned char *)esph - skb->data,
+ assoclen + ivlen + esp->clen + alen);
+ if (unlikely(err < 0))
+ goto error;
}
if ((x->props.flags & XFRM_STATE_ESN))
@@ -534,7 +538,7 @@
* decryption.
*/
if ((x->props.flags & XFRM_STATE_ESN)) {
- esph = (void *)skb_push(skb, 4);
+ esph = skb_push(skb, 4);
*seqhi = esph->spi;
esph->spi = esph->seq_no;
esph->seq_no = XFRM_SKB_CB(skb)->seq.input.hi;
@@ -618,7 +622,9 @@
esp_input_set_header(skb, seqhi);
sg_init_table(sg, nfrags);
- skb_to_sgvec(skb, sg, 0, skb->len);
+ ret = skb_to_sgvec(skb, sg, 0, skb->len);
+ if (unlikely(ret < 0))
+ goto out;
skb->ip_summed = CHECKSUM_NONE;
diff --git a/net/ipv6/exthdrs.c b/net/ipv6/exthdrs.c
index b636f1da..0460af22 100644
--- a/net/ipv6/exthdrs.c
+++ b/net/ipv6/exthdrs.c
@@ -847,7 +847,7 @@
ihdr = (struct rt0_hdr *) opt;
- phdr = (struct rt0_hdr *) skb_push(skb, (ihdr->rt_hdr.hdrlen + 1) << 3);
+ phdr = skb_push(skb, (ihdr->rt_hdr.hdrlen + 1) << 3);
memcpy(phdr, ihdr, sizeof(struct rt0_hdr));
hops = ihdr->rt_hdr.hdrlen >> 1;
@@ -873,7 +873,7 @@
sr_ihdr = (struct ipv6_sr_hdr *)opt;
plen = (sr_ihdr->hdrlen + 1) << 3;
- sr_phdr = (struct ipv6_sr_hdr *)skb_push(skb, plen);
+ sr_phdr = skb_push(skb, plen);
memcpy(sr_phdr, sr_ihdr, sizeof(struct ipv6_sr_hdr));
hops = sr_ihdr->first_segment + 1;
@@ -923,7 +923,7 @@
static void ipv6_push_exthdr(struct sk_buff *skb, u8 *proto, u8 type, struct ipv6_opt_hdr *opt)
{
- struct ipv6_opt_hdr *h = (struct ipv6_opt_hdr *)skb_push(skb, ipv6_optlen(opt));
+ struct ipv6_opt_hdr *h = skb_push(skb, ipv6_optlen(opt));
memcpy(h, opt, ipv6_optlen(opt));
h->nexthdr = *proto;
diff --git a/net/ipv6/fou6.c b/net/ipv6/fou6.c
index 9ea249b..6de3c04 100644
--- a/net/ipv6/fou6.c
+++ b/net/ipv6/fou6.c
@@ -14,6 +14,8 @@
#include <net/udp.h>
#include <net/udp_tunnel.h>
+#if IS_ENABLED(CONFIG_IPV6_FOU_TUNNEL)
+
static void fou6_build_udp(struct sk_buff *skb, struct ip_tunnel_encap *e,
struct flowi6 *fl6, u8 *protocol, __be16 sport)
{
@@ -33,8 +35,8 @@
*protocol = IPPROTO_UDP;
}
-int fou6_build_header(struct sk_buff *skb, struct ip_tunnel_encap *e,
- u8 *protocol, struct flowi6 *fl6)
+static int fou6_build_header(struct sk_buff *skb, struct ip_tunnel_encap *e,
+ u8 *protocol, struct flowi6 *fl6)
{
__be16 sport;
int err;
@@ -49,10 +51,9 @@
return 0;
}
-EXPORT_SYMBOL(fou6_build_header);
-int gue6_build_header(struct sk_buff *skb, struct ip_tunnel_encap *e,
- u8 *protocol, struct flowi6 *fl6)
+static int gue6_build_header(struct sk_buff *skb, struct ip_tunnel_encap *e,
+ u8 *protocol, struct flowi6 *fl6)
{
__be16 sport;
int err;
@@ -67,9 +68,6 @@
return 0;
}
-EXPORT_SYMBOL(gue6_build_header);
-
-#if IS_ENABLED(CONFIG_IPV6_FOU_TUNNEL)
static const struct ip6_tnl_encap_ops fou_ip6tun_ops = {
.encap_hlen = fou_encap_hlen,
diff --git a/net/ipv6/ila/ila_lwt.c b/net/ipv6/ila/ila_lwt.c
index b3df03e..0c02a09 100644
--- a/net/ipv6/ila/ila_lwt.c
+++ b/net/ipv6/ila/ila_lwt.c
@@ -91,7 +91,7 @@
drop:
kfree_skb(skb);
- return -EINVAL;
+ return err;
}
static int ila_input(struct sk_buff *skb)
@@ -117,7 +117,8 @@
static int ila_build_state(struct nlattr *nla,
unsigned int family, const void *cfg,
- struct lwtunnel_state **ts)
+ struct lwtunnel_state **ts,
+ struct netlink_ext_ack *extack)
{
struct ila_lwt *ilwt;
struct ila_params *p;
@@ -146,7 +147,7 @@
return -EINVAL;
}
- ret = nla_parse_nested(tb, ILA_ATTR_MAX, nla, ila_nl_policy, NULL);
+ ret = nla_parse_nested(tb, ILA_ATTR_MAX, nla, ila_nl_policy, extack);
if (ret < 0)
return ret;
diff --git a/net/ipv6/ip6_fib.c b/net/ipv6/ip6_fib.c
index e6b78ba..5477ba7 100644
--- a/net/ipv6/ip6_fib.c
+++ b/net/ipv6/ip6_fib.c
@@ -153,11 +153,6 @@
kmem_cache_free(fib6_node_kmem, fn);
}
-static void rt6_rcu_free(struct rt6_info *rt)
-{
- call_rcu(&rt->dst.rcu_head, dst_rcu_free);
-}
-
static void rt6_free_pcpu(struct rt6_info *non_pcpu_rt)
{
int cpu;
@@ -172,7 +167,8 @@
ppcpu_rt = per_cpu_ptr(non_pcpu_rt->rt6i_pcpu, cpu);
pcpu_rt = *ppcpu_rt;
if (pcpu_rt) {
- rt6_rcu_free(pcpu_rt);
+ dst_dev_put(&pcpu_rt->dst);
+ dst_release(&pcpu_rt->dst);
*ppcpu_rt = NULL;
}
}
@@ -185,7 +181,8 @@
{
if (atomic_dec_and_test(&rt->rt6i_ref)) {
rt6_free_pcpu(rt);
- rt6_rcu_free(rt);
+ dst_dev_put(&rt->dst);
+ dst_release(&rt->dst);
}
}
@@ -472,7 +469,8 @@
static struct fib6_node *fib6_add_1(struct fib6_node *root,
struct in6_addr *addr, int plen,
int offset, int allow_create,
- int replace_required, int sernum)
+ int replace_required, int sernum,
+ struct netlink_ext_ack *extack)
{
struct fib6_node *fn, *in, *ln;
struct fib6_node *pn = NULL;
@@ -496,6 +494,8 @@
!ipv6_prefix_equal(&key->addr, addr, fn->fn_bit)) {
if (!allow_create) {
if (replace_required) {
+ NL_SET_ERR_MSG(extack,
+ "Can not replace route - no match found");
pr_warn("Can't replace route, no match found\n");
return ERR_PTR(-ENOENT);
}
@@ -542,6 +542,8 @@
* That would keep IPv6 consistent with IPv4
*/
if (replace_required) {
+ NL_SET_ERR_MSG(extack,
+ "Can not replace route - no match found");
pr_warn("Can't replace route, no match found\n");
return ERR_PTR(-ENOENT);
}
@@ -963,7 +965,8 @@
*/
int fib6_add(struct fib6_node *root, struct rt6_info *rt,
- struct nl_info *info, struct mx6_config *mxc)
+ struct nl_info *info, struct mx6_config *mxc,
+ struct netlink_ext_ack *extack)
{
struct fib6_node *fn, *pn = NULL;
int err = -ENOMEM;
@@ -971,8 +974,7 @@
int replace_required = 0;
int sernum = fib6_new_sernum(info->nl_net);
- if (WARN_ON_ONCE((rt->dst.flags & DST_NOCACHE) &&
- !atomic_read(&rt->dst.__refcnt)))
+ if (WARN_ON_ONCE(!atomic_read(&rt->dst.__refcnt)))
return -EINVAL;
if (info->nlh) {
@@ -986,7 +988,7 @@
fn = fib6_add_1(root, &rt->rt6i_dst.addr, rt->rt6i_dst.plen,
offsetof(struct rt6_info, rt6i_dst), allow_create,
- replace_required, sernum);
+ replace_required, sernum, extack);
if (IS_ERR(fn)) {
err = PTR_ERR(fn);
fn = NULL;
@@ -1027,7 +1029,8 @@
sn = fib6_add_1(sfn, &rt->rt6i_src.addr,
rt->rt6i_src.plen,
offsetof(struct rt6_info, rt6i_src),
- allow_create, replace_required, sernum);
+ allow_create, replace_required, sernum,
+ extack);
if (IS_ERR(sn)) {
/* If it is failed, discard just allocated
@@ -1046,7 +1049,8 @@
sn = fib6_add_1(fn->subtree, &rt->rt6i_src.addr,
rt->rt6i_src.plen,
offsetof(struct rt6_info, rt6i_src),
- allow_create, replace_required, sernum);
+ allow_create, replace_required, sernum,
+ extack);
if (IS_ERR(sn)) {
err = PTR_ERR(sn);
@@ -1067,7 +1071,6 @@
fib6_start_gc(info->nl_net, rt);
if (!(rt->rt6i_flags & RTF_CACHE))
fib6_prune_clones(info->nl_net, pn);
- rt->dst.flags &= ~DST_NOCACHE;
}
out:
@@ -1092,8 +1095,10 @@
atomic_inc(&pn->leaf->rt6i_ref);
}
#endif
- if (!(rt->dst.flags & DST_NOCACHE))
- dst_free(&rt->dst);
+ /* Always release dst as dst->__refcnt is guaranteed
+ * to be taken before entering this function
+ */
+ dst_release_immediate(&rt->dst);
}
return err;
@@ -1104,8 +1109,10 @@
st_failure:
if (fn && !(fn->fn_flags & (RTN_RTINFO|RTN_ROOT)))
fib6_repair_tree(info->nl_net, fn);
- if (!(rt->dst.flags & DST_NOCACHE))
- dst_free(&rt->dst);
+ /* Always release dst as dst->__refcnt is guaranteed
+ * to be taken before entering this function
+ */
+ dst_release_immediate(&rt->dst);
return err;
#endif
}
@@ -1774,7 +1781,7 @@
}
gc_args->more++;
} else if (rt->rt6i_flags & RTF_CACHE) {
- if (atomic_read(&rt->dst.__refcnt) == 0 &&
+ if (atomic_read(&rt->dst.__refcnt) == 1 &&
time_after_eq(now, rt->dst.lastuse + gc_args->timeout)) {
RT6_TRACE("aging clone %p\n", rt);
return -1;
@@ -1812,8 +1819,7 @@
}
gc_args.timeout = expires ? (int)expires :
net->ipv6.sysctl.ip6_rt_gc_interval;
-
- gc_args.more = icmp6_dst_gc();
+ gc_args.more = 0;
fib6_clean_all(net, fib6_age, &gc_args);
now = jiffies;
diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c
index 64eea396..e0e726c 100644
--- a/net/ipv6/ip6_gre.c
+++ b/net/ipv6/ip6_gre.c
@@ -942,7 +942,7 @@
const void *daddr, const void *saddr, unsigned int len)
{
struct ip6_tnl *t = netdev_priv(dev);
- struct ipv6hdr *ipv6h = (struct ipv6hdr *)skb_push(skb, t->hlen);
+ struct ipv6hdr *ipv6h = skb_push(skb, t->hlen);
__be16 *p = (__be16 *)(ipv6h+1);
ip6_flow_hdr(ipv6h, 0,
diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
index bf8a58a..5baa6fa 100644
--- a/net/ipv6/ip6_output.c
+++ b/net/ipv6/ip6_output.c
@@ -67,9 +67,6 @@
struct in6_addr *nexthop;
int ret;
- skb->protocol = htons(ETH_P_IPV6);
- skb->dev = dev;
-
if (ipv6_addr_is_multicast(&ipv6_hdr(skb)->daddr)) {
struct inet6_dev *idev = ip6_dst_idev(skb_dst(skb));
@@ -154,6 +151,9 @@
struct net_device *dev = skb_dst(skb)->dev;
struct inet6_dev *idev = ip6_dst_idev(skb_dst(skb));
+ skb->protocol = htons(ETH_P_IPV6);
+ skb->dev = dev;
+
if (unlikely(idev->cnf.disable_ipv6)) {
IP6_INC_STATS(net, idev, IPSTATS_MIB_OUTDISCARDS);
kfree_skb(skb);
@@ -682,7 +682,7 @@
skb_frag_list_init(skb);
__skb_pull(skb, hlen);
- fh = (struct frag_hdr *)__skb_push(skb, sizeof(struct frag_hdr));
+ fh = __skb_push(skb, sizeof(struct frag_hdr));
__skb_push(skb, hlen);
skb_reset_network_header(skb);
memcpy(skb_network_header(skb), tmp_hdr, hlen);
@@ -698,15 +698,13 @@
ipv6_hdr(skb)->payload_len = htons(first_len -
sizeof(struct ipv6hdr));
- dst_hold(&rt->dst);
-
for (;;) {
/* Prepare header of the next frame,
* before previous one went down. */
if (frag) {
frag->ip_summed = CHECKSUM_NONE;
skb_reset_transport_header(frag);
- fh = (struct frag_hdr *)__skb_push(frag, sizeof(struct frag_hdr));
+ fh = __skb_push(frag, sizeof(struct frag_hdr));
__skb_push(frag, hlen);
skb_reset_network_header(frag);
memcpy(skb_network_header(frag), tmp_hdr,
@@ -742,7 +740,6 @@
if (err == 0) {
IP6_INC_STATS(net, ip6_dst_idev(&rt->dst),
IPSTATS_MIB_FRAGOKS);
- ip6_rt_put(rt);
return 0;
}
@@ -750,7 +747,6 @@
IP6_INC_STATS(net, ip6_dst_idev(&rt->dst),
IPSTATS_MIB_FRAGFAILS);
- ip6_rt_put(rt);
return err;
slow_path_clean:
@@ -869,7 +865,6 @@
if (skb->sk && dst_allfrag(skb_dst(skb)))
sk_nocaps_add(skb->sk, NETIF_F_GSO_MASK);
- skb->dev = skb_dst(skb)->dev;
icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
err = -EMSGSIZE;
diff --git a/net/ipv6/ip6mr.c b/net/ipv6/ip6mr.c
index 2ecb39b..7454850 100644
--- a/net/ipv6/ip6mr.c
+++ b/net/ipv6/ip6mr.c
@@ -116,6 +116,7 @@
struct mfc6_cache *c, struct rtmsg *rtm);
static void mr6_netlink_event(struct mr6_table *mrt, struct mfc6_cache *mfc,
int cmd);
+static void mrt6msg_netlink_event(struct mr6_table *mrt, struct sk_buff *pkt);
static int ip6mr_rtm_dumproute(struct sk_buff *skb,
struct netlink_callback *cb);
static void mroute_clean_tables(struct mr6_table *mrt, bool all);
@@ -846,7 +847,8 @@
while ((skb = skb_dequeue(&c->mfc_un.unres.unresolved)) != NULL) {
if (ipv6_hdr(skb)->version == 0) {
- struct nlmsghdr *nlh = (struct nlmsghdr *)skb_pull(skb, sizeof(struct ipv6hdr));
+ struct nlmsghdr *nlh = skb_pull(skb,
+ sizeof(struct ipv6hdr));
nlh->nlmsg_type = NLMSG_ERROR;
nlh->nlmsg_len = nlmsg_msg_size(sizeof(struct nlmsgerr));
skb_trim(skb, nlh->nlmsg_len);
@@ -1106,7 +1108,8 @@
while ((skb = __skb_dequeue(&uc->mfc_un.unres.unresolved))) {
if (ipv6_hdr(skb)->version == 0) {
- struct nlmsghdr *nlh = (struct nlmsghdr *)skb_pull(skb, sizeof(struct ipv6hdr));
+ struct nlmsghdr *nlh = skb_pull(skb,
+ sizeof(struct ipv6hdr));
if (__ip6mr_fill_mroute(mrt, skb, c, nlmsg_data(nlh)) > 0) {
nlh->nlmsg_len = skb_tail_pointer(skb) - (u8 *)nlh;
@@ -1123,8 +1126,7 @@
}
/*
- * Bounce a cache query up to pim6sd. We could use netlink for this but pim6sd
- * expects the following bizarre scheme.
+ * Bounce a cache query up to pim6sd and netlink.
*
* Called under mrt_lock.
*/
@@ -1206,6 +1208,8 @@
return -EINVAL;
}
+ mrt6msg_netlink_event(mrt, skb);
+
/*
* Deliver to user space multicast routing algorithms
*/
@@ -2455,6 +2459,71 @@
rtnl_set_sk_err(net, RTNLGRP_IPV6_MROUTE, err);
}
+static size_t mrt6msg_netlink_msgsize(size_t payloadlen)
+{
+ size_t len =
+ NLMSG_ALIGN(sizeof(struct rtgenmsg))
+ + nla_total_size(1) /* IP6MRA_CREPORT_MSGTYPE */
+ + nla_total_size(4) /* IP6MRA_CREPORT_MIF_ID */
+ /* IP6MRA_CREPORT_SRC_ADDR */
+ + nla_total_size(sizeof(struct in6_addr))
+ /* IP6MRA_CREPORT_DST_ADDR */
+ + nla_total_size(sizeof(struct in6_addr))
+ /* IP6MRA_CREPORT_PKT */
+ + nla_total_size(payloadlen)
+ ;
+
+ return len;
+}
+
+static void mrt6msg_netlink_event(struct mr6_table *mrt, struct sk_buff *pkt)
+{
+ struct net *net = read_pnet(&mrt->net);
+ struct nlmsghdr *nlh;
+ struct rtgenmsg *rtgenm;
+ struct mrt6msg *msg;
+ struct sk_buff *skb;
+ struct nlattr *nla;
+ int payloadlen;
+
+ payloadlen = pkt->len - sizeof(struct mrt6msg);
+ msg = (struct mrt6msg *)skb_transport_header(pkt);
+
+ skb = nlmsg_new(mrt6msg_netlink_msgsize(payloadlen), GFP_ATOMIC);
+ if (!skb)
+ goto errout;
+
+ nlh = nlmsg_put(skb, 0, 0, RTM_NEWCACHEREPORT,
+ sizeof(struct rtgenmsg), 0);
+ if (!nlh)
+ goto errout;
+ rtgenm = nlmsg_data(nlh);
+ rtgenm->rtgen_family = RTNL_FAMILY_IP6MR;
+ if (nla_put_u8(skb, IP6MRA_CREPORT_MSGTYPE, msg->im6_msgtype) ||
+ nla_put_u32(skb, IP6MRA_CREPORT_MIF_ID, msg->im6_mif) ||
+ nla_put_in6_addr(skb, IP6MRA_CREPORT_SRC_ADDR,
+ &msg->im6_src) ||
+ nla_put_in6_addr(skb, IP6MRA_CREPORT_DST_ADDR,
+ &msg->im6_dst))
+ goto nla_put_failure;
+
+ nla = nla_reserve(skb, IP6MRA_CREPORT_PKT, payloadlen);
+ if (!nla || skb_copy_bits(pkt, sizeof(struct mrt6msg),
+ nla_data(nla), payloadlen))
+ goto nla_put_failure;
+
+ nlmsg_end(skb, nlh);
+
+ rtnl_notify(skb, net, 0, RTNLGRP_IPV6_MROUTE_R, NULL, GFP_ATOMIC);
+ return;
+
+nla_put_failure:
+ nlmsg_cancel(skb, nlh);
+errout:
+ kfree_skb(skb);
+ rtnl_set_sk_err(net, RTNLGRP_IPV6_MROUTE_R, -ENOBUFS);
+}
+
static int ip6mr_rtm_dumproute(struct sk_buff *skb, struct netlink_callback *cb)
{
struct net *net = sock_net(skb->sk);
diff --git a/net/ipv6/mcast.c b/net/ipv6/mcast.c
index 07403fa..e222113 100644
--- a/net/ipv6/mcast.c
+++ b/net/ipv6/mcast.c
@@ -1602,7 +1602,7 @@
ip6_mc_hdr(sk, skb, dev, saddr, &mld2_all_mcr, NEXTHDR_HOP, 0);
- memcpy(skb_put(skb, sizeof(ra)), ra, sizeof(ra));
+ skb_put_data(skb, ra, sizeof(ra));
skb_set_transport_header(skb, skb_tail_pointer(skb) - skb->data);
skb_put(skb, sizeof(*pmr));
@@ -1692,7 +1692,7 @@
skb = mld_newpack(pmc->idev, dev->mtu);
if (!skb)
return NULL;
- pgr = (struct mld2_grec *)skb_put(skb, sizeof(struct mld2_grec));
+ pgr = skb_put(skb, sizeof(struct mld2_grec));
pgr->grec_type = type;
pgr->grec_auxwords = 0;
pgr->grec_nsrcs = 0;
@@ -1784,7 +1784,7 @@
}
if (!skb)
return NULL;
- psrc = (struct in6_addr *)skb_put(skb, sizeof(*psrc));
+ psrc = skb_put(skb, sizeof(*psrc));
*psrc = psf->sf_addr;
scount++; stotal++;
if ((type == MLD2_ALLOW_NEW_SOURCES ||
@@ -2006,10 +2006,9 @@
ip6_mc_hdr(sk, skb, dev, saddr, snd_addr, NEXTHDR_HOP, payload_len);
- memcpy(skb_put(skb, sizeof(ra)), ra, sizeof(ra));
+ skb_put_data(skb, ra, sizeof(ra));
- hdr = (struct mld_msg *) skb_put(skb, sizeof(struct mld_msg));
- memset(hdr, 0, sizeof(struct mld_msg));
+ hdr = skb_put_zero(skb, sizeof(struct mld_msg));
hdr->mld_type = type;
hdr->mld_mca = *addr;
diff --git a/net/ipv6/ndisc.c b/net/ipv6/ndisc.c
index d310dc4..0327c1f 100644
--- a/net/ipv6/ndisc.c
+++ b/net/ipv6/ndisc.c
@@ -528,7 +528,7 @@
if (!skb)
return;
- msg = (struct nd_msg *)skb_put(skb, sizeof(*msg));
+ msg = skb_put(skb, sizeof(*msg));
*msg = (struct nd_msg) {
.icmph = {
.icmp6_type = NDISC_NEIGHBOUR_ADVERTISEMENT,
@@ -597,7 +597,7 @@
if (!skb)
return;
- msg = (struct nd_msg *)skb_put(skb, sizeof(*msg));
+ msg = skb_put(skb, sizeof(*msg));
*msg = (struct nd_msg) {
.icmph = {
.icmp6_type = NDISC_NEIGHBOUR_SOLICITATION,
@@ -657,7 +657,7 @@
if (!skb)
return;
- msg = (struct rs_msg *)skb_put(skb, sizeof(*msg));
+ msg = skb_put(skb, sizeof(*msg));
*msg = (struct rs_msg) {
.icmph = {
.icmp6_type = NDISC_ROUTER_SOLICITATION,
@@ -1633,7 +1633,7 @@
if (!buff)
goto release;
- msg = (struct rd_msg *)skb_put(buff, sizeof(*msg));
+ msg = skb_put(buff, sizeof(*msg));
*msg = (struct rd_msg) {
.icmph = {
.icmp6_type = NDISC_REDIRECT,
diff --git a/net/ipv6/netfilter/ip6t_SYNPROXY.c b/net/ipv6/netfilter/ip6t_SYNPROXY.c
index d3c4daa..ce203dd 100644
--- a/net/ipv6/netfilter/ip6t_SYNPROXY.c
+++ b/net/ipv6/netfilter/ip6t_SYNPROXY.c
@@ -27,7 +27,7 @@
struct ipv6hdr *iph;
skb_reset_network_header(skb);
- iph = (struct ipv6hdr *)skb_put(skb, sizeof(*iph));
+ iph = skb_put(skb, sizeof(*iph));
ip6_flow_hdr(iph, 0, 0);
iph->hop_limit = net->ipv6.devconf_all->hop_limit;
iph->nexthdr = IPPROTO_TCP;
@@ -105,7 +105,7 @@
niph = synproxy_build_ip(net, nskb, &iph->daddr, &iph->saddr);
skb_reset_transport_header(nskb);
- nth = (struct tcphdr *)skb_put(nskb, tcp_hdr_size);
+ nth = skb_put(nskb, tcp_hdr_size);
nth->source = th->dest;
nth->dest = th->source;
nth->seq = htonl(__cookie_v6_init_sequence(iph, th, &mss));
@@ -147,7 +147,7 @@
niph = synproxy_build_ip(net, nskb, &iph->saddr, &iph->daddr);
skb_reset_transport_header(nskb);
- nth = (struct tcphdr *)skb_put(nskb, tcp_hdr_size);
+ nth = skb_put(nskb, tcp_hdr_size);
nth->source = th->source;
nth->dest = th->dest;
nth->seq = htonl(recv_seq - 1);
@@ -192,7 +192,7 @@
niph = synproxy_build_ip(net, nskb, &iph->daddr, &iph->saddr);
skb_reset_transport_header(nskb);
- nth = (struct tcphdr *)skb_put(nskb, tcp_hdr_size);
+ nth = skb_put(nskb, tcp_hdr_size);
nth->source = th->dest;
nth->dest = th->source;
nth->seq = htonl(ntohl(th->ack_seq));
@@ -230,7 +230,7 @@
niph = synproxy_build_ip(net, nskb, &iph->saddr, &iph->daddr);
skb_reset_transport_header(nskb);
- nth = (struct tcphdr *)skb_put(nskb, tcp_hdr_size);
+ nth = skb_put(nskb, tcp_hdr_size);
nth->source = th->source;
nth->dest = th->dest;
nth->seq = htonl(ntohl(th->seq) + 1);
diff --git a/net/ipv6/netfilter/nf_reject_ipv6.c b/net/ipv6/netfilter/nf_reject_ipv6.c
index eedee5d..2485840 100644
--- a/net/ipv6/netfilter/nf_reject_ipv6.c
+++ b/net/ipv6/netfilter/nf_reject_ipv6.c
@@ -95,7 +95,7 @@
int needs_ack;
skb_reset_transport_header(nskb);
- tcph = (struct tcphdr *)skb_put(nskb, sizeof(struct tcphdr));
+ tcph = skb_put(nskb, sizeof(struct tcphdr));
/* Truncate to length (no data) */
tcph->doff = sizeof(struct tcphdr)/4;
tcph->source = oth->dest;
@@ -220,9 +220,6 @@
__be16 fo;
u8 proto;
- if (skb->csum_bad)
- return false;
-
if (skb_csum_unnecessary(skb))
return true;
diff --git a/net/ipv6/route.c b/net/ipv6/route.c
index 7cebd95..2e44900 100644
--- a/net/ipv6/route.c
+++ b/net/ipv6/route.c
@@ -128,7 +128,6 @@
{
struct uncached_list *ul = raw_cpu_ptr(&rt6_uncached_list);
- rt->dst.flags |= DST_NOCACHE;
rt->rt6i_uncached_list = ul;
spin_lock_bh(&ul->lock);
@@ -354,7 +353,7 @@
int flags)
{
struct rt6_info *rt = dst_alloc(&net->ipv6.ip6_dst_ops, dev,
- 0, DST_OBSOLETE_FORCE_CHK, flags);
+ 1, DST_OBSOLETE_FORCE_CHK, flags);
if (rt)
rt6_info_init(rt);
@@ -381,7 +380,7 @@
*p = NULL;
}
} else {
- dst_destroy((struct dst_entry *)rt);
+ dst_release_immediate(&rt->dst);
return NULL;
}
}
@@ -932,20 +931,21 @@
EXPORT_SYMBOL(rt6_lookup);
/* ip6_ins_rt is called with FREE table->tb6_lock.
- It takes new route entry, the addition fails by any reason the
- route is freed. In any case, if caller does not hold it, it may
- be destroyed.
+ * It takes new route entry, the addition fails by any reason the
+ * route is released.
+ * Caller must hold dst before calling it.
*/
static int __ip6_ins_rt(struct rt6_info *rt, struct nl_info *info,
- struct mx6_config *mxc)
+ struct mx6_config *mxc,
+ struct netlink_ext_ack *extack)
{
int err;
struct fib6_table *table;
table = rt->rt6i_table;
write_lock_bh(&table->tb6_lock);
- err = fib6_add(&table->tb6_root, rt, info, mxc);
+ err = fib6_add(&table->tb6_root, rt, info, mxc, extack);
write_unlock_bh(&table->tb6_lock);
return err;
@@ -956,7 +956,9 @@
struct nl_info info = { .nl_net = dev_net(rt->dst.dev), };
struct mx6_config mxc = { .mx = NULL, };
- return __ip6_ins_rt(rt, &info, &mxc);
+ /* Hold dst to account for the reference from the fib6 tree */
+ dst_hold(&rt->dst);
+ return __ip6_ins_rt(rt, &info, &mxc, NULL);
}
static struct rt6_info *ip6_rt_cache_alloc(struct rt6_info *ort,
@@ -1048,7 +1050,7 @@
prev = cmpxchg(p, NULL, pcpu_rt);
if (prev) {
/* If someone did it before us, return prev instead */
- dst_destroy(&pcpu_rt->dst);
+ dst_release_immediate(&pcpu_rt->dst);
pcpu_rt = prev;
}
} else {
@@ -1058,7 +1060,7 @@
* since rt is going away anyway. The next
* dst_check() will trigger a re-lookup.
*/
- dst_destroy(&pcpu_rt->dst);
+ dst_release_immediate(&pcpu_rt->dst);
pcpu_rt = rt;
}
dst_hold(&pcpu_rt->dst);
@@ -1128,12 +1130,15 @@
uncached_rt = ip6_rt_cache_alloc(rt, &fl6->daddr, NULL);
dst_release(&rt->dst);
- if (uncached_rt)
+ if (uncached_rt) {
+ /* Uncached_rt's refcnt is taken during ip6_rt_cache_alloc()
+ * No need for another dst_hold()
+ */
rt6_uncached_list_add(uncached_rt);
- else
+ } else {
uncached_rt = net->ipv6.ip6_null_entry;
-
- dst_hold(&uncached_rt->dst);
+ dst_hold(&uncached_rt->dst);
+ }
trace_fib6_table_lookup(net, uncached_rt, table->tb6_id, fl6);
return uncached_rt;
@@ -1244,9 +1249,11 @@
struct dst_entry *ip6_blackhole_route(struct net *net, struct dst_entry *dst_orig)
{
struct rt6_info *rt, *ort = (struct rt6_info *) dst_orig;
+ struct net_device *loopback_dev = net->loopback_dev;
struct dst_entry *new = NULL;
- rt = dst_alloc(&ip6_dst_blackhole_ops, ort->dst.dev, 1, DST_OBSOLETE_NONE, 0);
+ rt = dst_alloc(&ip6_dst_blackhole_ops, loopback_dev, 1,
+ DST_OBSOLETE_NONE, 0);
if (rt) {
rt6_info_init(rt);
@@ -1256,10 +1263,8 @@
new->output = dst_discard_out;
dst_copy_metrics(new, &ort->dst);
- rt->rt6i_idev = ort->rt6i_idev;
- if (rt->rt6i_idev)
- in6_dev_hold(rt->rt6i_idev);
+ rt->rt6i_idev = in6_dev_get(loopback_dev);
rt->rt6i_gateway = ort->rt6i_gateway;
rt->rt6i_flags = ort->rt6i_flags & ~RTF_PCPU;
rt->rt6i_metric = 0;
@@ -1268,8 +1273,6 @@
#ifdef CONFIG_IPV6_SUBTREES
memcpy(&rt->rt6i_src, &ort->rt6i_src, sizeof(struct rt6key));
#endif
-
- dst_free(new);
}
dst_release(dst_orig);
@@ -1322,7 +1325,7 @@
rt6_dst_from_metrics_check(rt);
if (rt->rt6i_flags & RTF_PCPU ||
- (unlikely(dst->flags & DST_NOCACHE) && rt->dst.from))
+ (unlikely(!list_empty(&rt->rt6i_uncached)) && rt->dst.from))
return rt6_dst_from_check(rt, cookie);
else
return rt6_check(rt, cookie);
@@ -1355,8 +1358,8 @@
rt = (struct rt6_info *) skb_dst(skb);
if (rt) {
if (rt->rt6i_flags & RTF_CACHE) {
- dst_hold(&rt->dst);
- ip6_del_rt(rt);
+ if (dst_hold_safe(&rt->dst))
+ ip6_del_rt(rt);
} else if (rt->rt6i_node && (rt->rt6i_flags & RTF_DEFAULT)) {
rt->rt6i_node->fn_sernum = -1;
}
@@ -1420,6 +1423,10 @@
* invalidate the sk->sk_dst_cache.
*/
ip6_ins_rt(nrt6);
+ /* Release the reference taken in
+ * ip6_rt_cache_alloc()
+ */
+ dst_release(&nrt6->dst);
}
}
}
@@ -1648,9 +1655,6 @@
return mtu - lwtunnel_headroom(dst->lwtstate, mtu);
}
-static struct dst_entry *icmp6_dst_gc_list;
-static DEFINE_SPINLOCK(icmp6_dst_lock);
-
struct dst_entry *icmp6_dst_alloc(struct net_device *dev,
struct flowi6 *fl6)
{
@@ -1671,19 +1675,16 @@
rt->dst.flags |= DST_HOST;
rt->dst.output = ip6_output;
- atomic_set(&rt->dst.__refcnt, 1);
rt->rt6i_gateway = fl6->daddr;
rt->rt6i_dst.addr = fl6->daddr;
rt->rt6i_dst.plen = 128;
rt->rt6i_idev = idev;
dst_metric_set(&rt->dst, RTAX_HOPLIMIT, 0);
- spin_lock_bh(&icmp6_dst_lock);
- rt->dst.next = icmp6_dst_gc_list;
- icmp6_dst_gc_list = &rt->dst;
- spin_unlock_bh(&icmp6_dst_lock);
-
- fib6_force_start_gc(net);
+ /* Add this dst into uncached_list so that rt6_ifdown() can
+ * do proper release of the net_device
+ */
+ rt6_uncached_list_add(rt);
dst = xfrm_lookup(net, &rt->dst, flowi6_to_flowi(fl6), NULL, 0);
@@ -1691,48 +1692,6 @@
return dst;
}
-int icmp6_dst_gc(void)
-{
- struct dst_entry *dst, **pprev;
- int more = 0;
-
- spin_lock_bh(&icmp6_dst_lock);
- pprev = &icmp6_dst_gc_list;
-
- while ((dst = *pprev) != NULL) {
- if (!atomic_read(&dst->__refcnt)) {
- *pprev = dst->next;
- dst_free(dst);
- } else {
- pprev = &dst->next;
- ++more;
- }
- }
-
- spin_unlock_bh(&icmp6_dst_lock);
-
- return more;
-}
-
-static void icmp6_clean_all(int (*func)(struct rt6_info *rt, void *arg),
- void *arg)
-{
- struct dst_entry *dst, **pprev;
-
- spin_lock_bh(&icmp6_dst_lock);
- pprev = &icmp6_dst_gc_list;
- while ((dst = *pprev) != NULL) {
- struct rt6_info *rt = (struct rt6_info *) dst;
- if (func(rt, arg)) {
- *pprev = dst->next;
- dst_free(dst);
- } else {
- pprev = &dst->next;
- }
- }
- spin_unlock_bh(&icmp6_dst_lock);
-}
-
static int ip6_dst_gc(struct dst_ops *ops)
{
struct net *net = container_of(ops, struct net, ipv6.ip6_dst_ops);
@@ -1844,7 +1803,8 @@
return rt;
}
-static struct rt6_info *ip6_route_info_create(struct fib6_config *cfg)
+static struct rt6_info *ip6_route_info_create(struct fib6_config *cfg,
+ struct netlink_ext_ack *extack)
{
struct net *net = cfg->fc_nlinfo.nl_net;
struct rt6_info *rt = NULL;
@@ -1855,14 +1815,25 @@
int err = -EINVAL;
/* RTF_PCPU is an internal flag; can not be set by userspace */
- if (cfg->fc_flags & RTF_PCPU)
+ if (cfg->fc_flags & RTF_PCPU) {
+ NL_SET_ERR_MSG(extack, "Userspace can not set RTF_PCPU");
goto out;
+ }
- if (cfg->fc_dst_len > 128 || cfg->fc_src_len > 128)
+ if (cfg->fc_dst_len > 128) {
+ NL_SET_ERR_MSG(extack, "Invalid prefix length");
goto out;
+ }
+ if (cfg->fc_src_len > 128) {
+ NL_SET_ERR_MSG(extack, "Invalid source address length");
+ goto out;
+ }
#ifndef CONFIG_IPV6_SUBTREES
- if (cfg->fc_src_len)
+ if (cfg->fc_src_len) {
+ NL_SET_ERR_MSG(extack,
+ "Specifying source address requires IPV6_SUBTREES to be enabled");
goto out;
+ }
#endif
if (cfg->fc_ifindex) {
err = -ENODEV;
@@ -1926,7 +1897,7 @@
err = lwtunnel_build_state(cfg->fc_encap_type,
cfg->fc_encap, AF_INET6, cfg,
- &lwtstate);
+ &lwtstate, extack);
if (err)
goto out;
rt->dst.lwtstate = lwtstate_get(lwtstate);
@@ -2013,9 +1984,10 @@
err = -EINVAL;
if (ipv6_chk_addr_and_flags(net, gw_addr,
gwa_type & IPV6_ADDR_LINKLOCAL ?
- dev : NULL, 0, 0))
+ dev : NULL, 0, 0)) {
+ NL_SET_ERR_MSG(extack, "Invalid gateway address");
goto out;
-
+ }
rt->rt6i_gateway = *gw_addr;
if (gwa_type != (IPV6_ADDR_LINKLOCAL|IPV6_ADDR_UNICAST)) {
@@ -2031,8 +2003,11 @@
addressing
*/
if (!(gwa_type & (IPV6_ADDR_UNICAST |
- IPV6_ADDR_MAPPED)))
+ IPV6_ADDR_MAPPED))) {
+ NL_SET_ERR_MSG(extack,
+ "Invalid gateway address");
goto out;
+ }
if (cfg->fc_table) {
grt = ip6_nh_lookup_table(net, cfg, gw_addr);
@@ -2072,8 +2047,14 @@
goto out;
}
err = -EINVAL;
- if (!dev || (dev->flags & IFF_LOOPBACK))
+ if (!dev) {
+ NL_SET_ERR_MSG(extack, "Egress device not specified");
goto out;
+ } else if (dev->flags & IFF_LOOPBACK) {
+ NL_SET_ERR_MSG(extack,
+ "Egress device can not be loopback device for this route");
+ goto out;
+ }
}
err = -ENODEV;
@@ -2082,6 +2063,7 @@
if (!ipv6_addr_any(&cfg->fc_prefsrc)) {
if (!ipv6_chk_addr(net, &cfg->fc_prefsrc, dev, 0)) {
+ NL_SET_ERR_MSG(extack, "Invalid source address");
err = -EINVAL;
goto out;
}
@@ -2106,18 +2088,19 @@
if (idev)
in6_dev_put(idev);
if (rt)
- dst_free(&rt->dst);
+ dst_release_immediate(&rt->dst);
return ERR_PTR(err);
}
-int ip6_route_add(struct fib6_config *cfg)
+int ip6_route_add(struct fib6_config *cfg,
+ struct netlink_ext_ack *extack)
{
struct mx6_config mxc = { .mx = NULL, };
struct rt6_info *rt;
int err;
- rt = ip6_route_info_create(cfg);
+ rt = ip6_route_info_create(cfg, extack);
if (IS_ERR(rt)) {
err = PTR_ERR(rt);
rt = NULL;
@@ -2128,14 +2111,14 @@
if (err)
goto out;
- err = __ip6_ins_rt(rt, &cfg->fc_nlinfo, &mxc);
+ err = __ip6_ins_rt(rt, &cfg->fc_nlinfo, &mxc, extack);
kfree(mxc.mx);
return err;
out:
if (rt)
- dst_free(&rt->dst);
+ dst_release_immediate(&rt->dst);
return err;
}
@@ -2146,8 +2129,7 @@
struct fib6_table *table;
struct net *net = dev_net(rt->dst.dev);
- if (rt == net->ipv6.ip6_null_entry ||
- rt->dst.flags & DST_NOCACHE) {
+ if (rt == net->ipv6.ip6_null_entry) {
err = -ENOENT;
goto out;
}
@@ -2222,7 +2204,8 @@
return err;
}
-static int ip6_route_del(struct fib6_config *cfg)
+static int ip6_route_del(struct fib6_config *cfg,
+ struct netlink_ext_ack *extack)
{
struct fib6_table *table;
struct fib6_node *fn;
@@ -2230,8 +2213,10 @@
int err = -ESRCH;
table = fib6_get_table(cfg->fc_nlinfo.nl_net, cfg->fc_table);
- if (!table)
+ if (!table) {
+ NL_SET_ERR_MSG(extack, "FIB table does not exist");
return err;
+ }
read_lock_bh(&table->tb6_lock);
@@ -2369,7 +2354,7 @@
nrt->rt6i_gateway = *(struct in6_addr *)neigh->primary_key;
if (ip6_ins_rt(nrt))
- goto out;
+ goto out_release;
netevent.old = &rt->dst;
netevent.new = &nrt->dst;
@@ -2382,6 +2367,12 @@
ip6_del_rt(rt);
}
+out_release:
+ /* Release the reference taken in
+ * ip6_rt_cache_alloc()
+ */
+ dst_release(&nrt->dst);
+
out:
neigh_release(neigh);
}
@@ -2483,7 +2474,7 @@
if (!prefixlen)
cfg.fc_flags |= RTF_DEFAULT;
- ip6_route_add(&cfg);
+ ip6_route_add(&cfg, NULL);
return rt6_get_route_info(net, prefix, prefixlen, gwaddr, dev);
}
@@ -2529,7 +2520,7 @@
cfg.fc_gateway = *gwaddr;
- if (!ip6_route_add(&cfg)) {
+ if (!ip6_route_add(&cfg, NULL)) {
struct fib6_table *table;
table = fib6_get_table(dev_net(dev), cfg.fc_table);
@@ -2622,10 +2613,10 @@
rtnl_lock();
switch (cmd) {
case SIOCADDRT:
- err = ip6_route_add(&cfg);
+ err = ip6_route_add(&cfg, NULL);
break;
case SIOCDELRT:
- err = ip6_route_del(&cfg);
+ err = ip6_route_del(&cfg, NULL);
break;
default:
err = -EINVAL;
@@ -2729,9 +2720,6 @@
rt->rt6i_dst.plen = 128;
tb_id = l3mdev_fib_table(idev->dev) ? : RT6_TABLE_LOCAL;
rt->rt6i_table = fib6_get_table(net, tb_id);
- rt->dst.flags |= DST_NOCACHE;
-
- atomic_set(&rt->dst.__refcnt, 1);
return rt;
}
@@ -2819,7 +2807,6 @@
};
fib6_clean_all(net, fib6_ifdown, &adn);
- icmp6_clean_all(fib6_ifdown, &adn);
if (dev)
rt6_uncached_list_flush_dev(net, dev);
}
@@ -2904,7 +2891,8 @@
};
static int rtm_to_fib6_config(struct sk_buff *skb, struct nlmsghdr *nlh,
- struct fib6_config *cfg)
+ struct fib6_config *cfg,
+ struct netlink_ext_ack *extack)
{
struct rtmsg *rtm;
struct nlattr *tb[RTA_MAX+1];
@@ -2988,7 +2976,7 @@
cfg->fc_mp_len = nla_len(tb[RTA_MULTIPATH]);
err = lwtunnel_valid_encap_type_attr(cfg->fc_mp,
- cfg->fc_mp_len);
+ cfg->fc_mp_len, extack);
if (err < 0)
goto errout;
}
@@ -3007,7 +2995,7 @@
if (tb[RTA_ENCAP_TYPE]) {
cfg->fc_encap_type = nla_get_u16(tb[RTA_ENCAP_TYPE]);
- err = lwtunnel_valid_encap_type(cfg->fc_encap_type);
+ err = lwtunnel_valid_encap_type(cfg->fc_encap_type, extack);
if (err < 0)
goto errout;
}
@@ -3098,7 +3086,8 @@
inet6_rt_notify(RTM_NEWROUTE, rt, info, nlflags);
}
-static int ip6_route_multipath_add(struct fib6_config *cfg)
+static int ip6_route_multipath_add(struct fib6_config *cfg,
+ struct netlink_ext_ack *extack)
{
struct rt6_info *rt_notif = NULL, *rt_last = NULL;
struct nl_info *info = &cfg->fc_nlinfo;
@@ -3146,7 +3135,7 @@
r_cfg.fc_encap_type = nla_get_u16(nla);
}
- rt = ip6_route_info_create(&r_cfg);
+ rt = ip6_route_info_create(&r_cfg, extack);
if (IS_ERR(rt)) {
err = PTR_ERR(rt);
rt = NULL;
@@ -3155,7 +3144,7 @@
err = ip6_route_info_append(&rt6_nh_list, rt, &r_cfg);
if (err) {
- dst_free(&rt->dst);
+ dst_release_immediate(&rt->dst);
goto cleanup;
}
@@ -3171,7 +3160,7 @@
err_nh = NULL;
list_for_each_entry(nh, &rt6_nh_list, next) {
rt_last = nh->rt6_info;
- err = __ip6_ins_rt(nh->rt6_info, info, &nh->mxc);
+ err = __ip6_ins_rt(nh->rt6_info, info, &nh->mxc, extack);
/* save reference to first route for notification */
if (!rt_notif && !err)
rt_notif = nh->rt6_info;
@@ -3213,13 +3202,13 @@
list_for_each_entry(nh, &rt6_nh_list, next) {
if (err_nh == nh)
break;
- ip6_route_del(&nh->r_cfg);
+ ip6_route_del(&nh->r_cfg, extack);
}
cleanup:
list_for_each_entry_safe(nh, nh_safe, &rt6_nh_list, next) {
if (nh->rt6_info)
- dst_free(&nh->rt6_info->dst);
+ dst_release_immediate(&nh->rt6_info->dst);
kfree(nh->mxc.mx);
list_del(&nh->next);
kfree(nh);
@@ -3228,7 +3217,8 @@
return err;
}
-static int ip6_route_multipath_del(struct fib6_config *cfg)
+static int ip6_route_multipath_del(struct fib6_config *cfg,
+ struct netlink_ext_ack *extack)
{
struct fib6_config r_cfg;
struct rtnexthop *rtnh;
@@ -3255,7 +3245,7 @@
r_cfg.fc_flags |= RTF_GATEWAY;
}
}
- err = ip6_route_del(&r_cfg);
+ err = ip6_route_del(&r_cfg, extack);
if (err)
last_err = err;
@@ -3271,15 +3261,15 @@
struct fib6_config cfg;
int err;
- err = rtm_to_fib6_config(skb, nlh, &cfg);
+ err = rtm_to_fib6_config(skb, nlh, &cfg, extack);
if (err < 0)
return err;
if (cfg.fc_mp)
- return ip6_route_multipath_del(&cfg);
+ return ip6_route_multipath_del(&cfg, extack);
else {
cfg.fc_delete_all_nh = 1;
- return ip6_route_del(&cfg);
+ return ip6_route_del(&cfg, extack);
}
}
@@ -3289,14 +3279,14 @@
struct fib6_config cfg;
int err;
- err = rtm_to_fib6_config(skb, nlh, &cfg);
+ err = rtm_to_fib6_config(skb, nlh, &cfg, extack);
if (err < 0)
return err;
if (cfg.fc_mp)
- return ip6_route_multipath_add(&cfg);
+ return ip6_route_multipath_add(&cfg, extack);
else
- return ip6_route_add(&cfg);
+ return ip6_route_add(&cfg, extack);
}
static size_t rt6_nlmsg_size(struct rt6_info *rt)
@@ -3577,11 +3567,13 @@
{
struct net *net = sock_net(in_skb->sk);
struct nlattr *tb[RTA_MAX+1];
+ int err, iif = 0, oif = 0;
+ struct dst_entry *dst;
struct rt6_info *rt;
struct sk_buff *skb;
struct rtmsg *rtm;
struct flowi6 fl6;
- int err, iif = 0, oif = 0;
+ bool fibmatch;
err = nlmsg_parse(nlh, sizeof(*rtm), tb, RTA_MAX, rtm_ipv6_policy,
extack);
@@ -3592,6 +3584,7 @@
memset(&fl6, 0, sizeof(fl6));
rtm = nlmsg_data(nlh);
fl6.flowlabel = ip6_make_flowinfo(rtm->rtm_tos, 0);
+ fibmatch = !!(rtm->rtm_flags & RTM_F_FIB_MATCH);
if (tb[RTA_SRC]) {
if (nla_len(tb[RTA_SRC]) < sizeof(struct in6_addr))
@@ -3637,12 +3630,23 @@
if (!ipv6_addr_any(&fl6.saddr))
flags |= RT6_LOOKUP_F_HAS_SADDR;
- rt = (struct rt6_info *)ip6_route_input_lookup(net, dev, &fl6,
- flags);
+ if (!fibmatch)
+ dst = ip6_route_input_lookup(net, dev, &fl6, flags);
} else {
fl6.flowi6_oif = oif;
- rt = (struct rt6_info *)ip6_route_output(net, NULL, &fl6);
+ if (!fibmatch)
+ dst = ip6_route_output(net, NULL, &fl6);
+ }
+
+ if (fibmatch)
+ dst = ip6_route_lookup(net, &fl6, 0);
+
+ rt = container_of(dst, struct rt6_info, dst);
+ if (rt->dst.error) {
+ err = rt->dst.error;
+ ip6_rt_put(rt);
+ goto errout;
}
if (rt == net->ipv6.ip6_null_entry) {
@@ -3659,10 +3663,14 @@
}
skb_dst_set(skb, &rt->dst);
-
- err = rt6_fill_node(net, skb, rt, &fl6.daddr, &fl6.saddr, iif,
- RTM_NEWROUTE, NETLINK_CB(in_skb).portid,
- nlh->nlmsg_seq, 0);
+ if (fibmatch)
+ err = rt6_fill_node(net, skb, rt, NULL, NULL, iif,
+ RTM_NEWROUTE, NETLINK_CB(in_skb).portid,
+ nlh->nlmsg_seq, 0);
+ else
+ err = rt6_fill_node(net, skb, rt, &fl6.daddr, &fl6.saddr, iif,
+ RTM_NEWROUTE, NETLINK_CB(in_skb).portid,
+ nlh->nlmsg_seq, 0);
if (err < 0) {
kfree_skb(skb);
goto errout;
diff --git a/net/ipv6/seg6.c b/net/ipv6/seg6.c
index 5f44ffe..15fba55 100644
--- a/net/ipv6/seg6.c
+++ b/net/ipv6/seg6.c
@@ -303,13 +303,9 @@
static int seg6_genl_dumphmac(struct sk_buff *skb, struct netlink_callback *cb)
{
struct rhashtable_iter *iter = (struct rhashtable_iter *)cb->args[0];
- struct net *net = sock_net(skb->sk);
- struct seg6_pernet_data *sdata;
struct seg6_hmac_info *hinfo;
int ret;
- sdata = seg6_pernet(net);
-
ret = rhashtable_walk_start(iter);
if (ret && ret != -EAGAIN)
goto done;
diff --git a/net/ipv6/seg6_iptunnel.c b/net/ipv6/seg6_iptunnel.c
index 6a49549..264d772 100644
--- a/net/ipv6/seg6_iptunnel.c
+++ b/net/ipv6/seg6_iptunnel.c
@@ -326,7 +326,8 @@
static int seg6_build_state(struct nlattr *nla,
unsigned int family, const void *cfg,
- struct lwtunnel_state **ts)
+ struct lwtunnel_state **ts,
+ struct netlink_ext_ack *extack)
{
struct nlattr *tb[SEG6_IPTUNNEL_MAX + 1];
struct seg6_iptunnel_encap *tuninfo;
@@ -336,7 +337,7 @@
int err;
err = nla_parse_nested(tb, SEG6_IPTUNNEL_MAX, nla,
- seg6_iptunnel_policy, NULL);
+ seg6_iptunnel_policy, extack);
if (err < 0)
return err;
diff --git a/net/ipv6/syncookies.c b/net/ipv6/syncookies.c
index 5abc369..2f7e99a 100644
--- a/net/ipv6/syncookies.c
+++ b/net/ipv6/syncookies.c
@@ -162,15 +162,16 @@
/* check for timestamp cookie support */
memset(&tcp_opt, 0, sizeof(tcp_opt));
- tcp_parse_options(skb, &tcp_opt, 0, NULL);
+ tcp_parse_options(sock_net(sk), skb, &tcp_opt, 0, NULL);
if (tcp_opt.saw_tstamp && tcp_opt.rcv_tsecr) {
- tsoff = secure_tcpv6_ts_off(ipv6_hdr(skb)->daddr.s6_addr32,
+ tsoff = secure_tcpv6_ts_off(sock_net(sk),
+ ipv6_hdr(skb)->daddr.s6_addr32,
ipv6_hdr(skb)->saddr.s6_addr32);
tcp_opt.rcv_tsecr -= tsoff;
}
- if (!cookie_timestamp_decode(&tcp_opt))
+ if (!cookie_timestamp_decode(sock_net(sk), &tcp_opt))
goto out;
ret = NULL;
@@ -211,7 +212,7 @@
ireq->wscale_ok = tcp_opt.wscale_ok;
ireq->tstamp_ok = tcp_opt.saw_tstamp;
req->ts_recent = tcp_opt.saw_tstamp ? tcp_opt.rcv_tsval : 0;
- treq->snt_synack.v64 = 0;
+ treq->snt_synack = 0;
treq->rcv_isn = ntohl(th->seq) - 1;
treq->snt_isn = cookie;
treq->ts_off = 0;
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
index 4f4310a..68dc747 100644
--- a/net/ipv6/tcp_ipv6.c
+++ b/net/ipv6/tcp_ipv6.c
@@ -109,9 +109,9 @@
tcp_hdr(skb)->source);
}
-static u32 tcp_v6_init_ts_off(const struct sk_buff *skb)
+static u32 tcp_v6_init_ts_off(const struct net *net, const struct sk_buff *skb)
{
- return secure_tcpv6_ts_off(ipv6_hdr(skb)->daddr.s6_addr32,
+ return secure_tcpv6_ts_off(net, ipv6_hdr(skb)->daddr.s6_addr32,
ipv6_hdr(skb)->saddr.s6_addr32);
}
@@ -292,7 +292,8 @@
sk->sk_v6_daddr.s6_addr32,
inet->inet_sport,
inet->inet_dport);
- tp->tsoffset = secure_tcpv6_ts_off(np->saddr.s6_addr32,
+ tp->tsoffset = secure_tcpv6_ts_off(sock_net(sk),
+ np->saddr.s6_addr32,
sk->sk_v6_daddr.s6_addr32);
}
@@ -514,11 +515,12 @@
return tcp_v6_md5_do_lookup(sk, &addr_sk->sk_v6_daddr);
}
-static int tcp_v6_parse_md5_keys(struct sock *sk, char __user *optval,
- int optlen)
+static int tcp_v6_parse_md5_keys(struct sock *sk, int optname,
+ char __user *optval, int optlen)
{
struct tcp_md5sig cmd;
struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)&cmd.tcpm_addr;
+ u8 prefixlen;
if (optlen < sizeof(cmd))
return -EINVAL;
@@ -529,12 +531,22 @@
if (sin6->sin6_family != AF_INET6)
return -EINVAL;
+ if (optname == TCP_MD5SIG_EXT &&
+ cmd.tcpm_flags & TCP_MD5SIG_FLAG_PREFIX) {
+ prefixlen = cmd.tcpm_prefixlen;
+ if (prefixlen > 128 || (ipv6_addr_v4mapped(&sin6->sin6_addr) &&
+ prefixlen > 32))
+ return -EINVAL;
+ } else {
+ prefixlen = ipv6_addr_v4mapped(&sin6->sin6_addr) ? 32 : 128;
+ }
+
if (!cmd.tcpm_keylen) {
if (ipv6_addr_v4mapped(&sin6->sin6_addr))
return tcp_md5_do_del(sk, (union tcp_md5_addr *)&sin6->sin6_addr.s6_addr32[3],
- AF_INET);
+ AF_INET, prefixlen);
return tcp_md5_do_del(sk, (union tcp_md5_addr *)&sin6->sin6_addr,
- AF_INET6);
+ AF_INET6, prefixlen);
}
if (cmd.tcpm_keylen > TCP_MD5SIG_MAXKEYLEN)
@@ -542,10 +554,12 @@
if (ipv6_addr_v4mapped(&sin6->sin6_addr))
return tcp_md5_do_add(sk, (union tcp_md5_addr *)&sin6->sin6_addr.s6_addr32[3],
- AF_INET, cmd.tcpm_key, cmd.tcpm_keylen, GFP_KERNEL);
+ AF_INET, prefixlen, cmd.tcpm_key,
+ cmd.tcpm_keylen, GFP_KERNEL);
return tcp_md5_do_add(sk, (union tcp_md5_addr *)&sin6->sin6_addr,
- AF_INET6, cmd.tcpm_key, cmd.tcpm_keylen, GFP_KERNEL);
+ AF_INET6, prefixlen, cmd.tcpm_key,
+ cmd.tcpm_keylen, GFP_KERNEL);
}
static int tcp_v6_md5_hash_headers(struct tcp_md5sig_pool *hp,
@@ -788,7 +802,7 @@
skb_reserve(buff, MAX_HEADER + sizeof(struct ipv6hdr) + tot_len);
- t1 = (struct tcphdr *) skb_push(buff, tot_len);
+ t1 = skb_push(buff, tot_len);
skb_reset_transport_header(buff);
/* Swap the send and the receive. */
@@ -949,7 +963,7 @@
tcp_v6_send_ack(sk, skb, tcptw->tw_snd_nxt, tcptw->tw_rcv_nxt,
tcptw->tw_rcv_wnd >> tw->tw_rcv_wscale,
- tcp_time_stamp + tcptw->tw_ts_offset,
+ tcp_time_stamp_raw() + tcptw->tw_ts_offset,
tcptw->tw_ts_recent, tw->tw_bound_dev_if, tcp_twsk_md5_key(tcptw),
tw->tw_tclass, cpu_to_be32(tw->tw_flowlabel));
@@ -971,7 +985,7 @@
tcp_rsk(req)->snt_isn + 1 : tcp_sk(sk)->snd_nxt,
tcp_rsk(req)->rcv_nxt,
req->rsk_rcv_wnd >> inet_rsk(req)->rcv_wscale,
- tcp_time_stamp + tcp_rsk(req)->ts_off,
+ tcp_time_stamp_raw() + tcp_rsk(req)->ts_off,
req->ts_recent, sk->sk_bound_dev_if,
tcp_v6_md5_do_lookup(sk, &ipv6_hdr(skb)->daddr),
0, 0);
@@ -1185,7 +1199,7 @@
* across. Shucks.
*/
tcp_md5_do_add(newsk, (union tcp_md5_addr *)&newsk->sk_v6_daddr,
- AF_INET6, key->key, key->keylen,
+ AF_INET6, 128, key->key, key->keylen,
sk_gfp_mask(sk, GFP_ATOMIC));
}
#endif
@@ -1248,9 +1262,6 @@
if (skb->protocol == htons(ETH_P_IP))
return tcp_v4_do_rcv(sk, skb);
- if (tcp_filter(sk, skb))
- goto discard;
-
/*
* socket locking is here for SMP purposes as backlog rcv
* is currently called with bh processing disabled.
@@ -1909,6 +1920,7 @@
.unhash = inet_unhash,
.get_port = inet_csk_get_port,
.enter_memory_pressure = tcp_enter_memory_pressure,
+ .leave_memory_pressure = tcp_leave_memory_pressure,
.stream_memory_free = tcp_stream_memory_free,
.sockets_allocated = &tcp_sockets_allocated,
.memory_allocated = &tcp_memory_allocated,
diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
index 06ec39b..2b33847 100644
--- a/net/ipv6/udp.c
+++ b/net/ipv6/udp.c
@@ -455,7 +455,8 @@
return err;
csum_copy_err:
- if (!__sk_queue_drop_skb(sk, skb, flags, udp_skb_destructor)) {
+ if (!__sk_queue_drop_skb(sk, &udp_sk(sk)->reader_queue, skb, flags,
+ udp_skb_destructor)) {
if (is_udp4) {
UDP_INC_STATS(sock_net(sk),
UDP_MIB_CSUMERRORS, is_udplite);
@@ -919,12 +920,11 @@
if (dst)
dst = dst_check(dst, inet6_sk(sk)->rx_dst_cookie);
if (dst) {
- if (dst->flags & DST_NOCACHE) {
- if (likely(atomic_inc_not_zero(&dst->__refcnt)))
- skb_dst_set(skb, dst);
- } else {
- skb_dst_set_noref(skb, dst);
- }
+ /* set noref for now.
+ * any place which wants to hold dst has to call
+ * dst_hold_safe()
+ */
+ skb_dst_set_noref(skb, dst);
}
}
diff --git a/net/ipv6/xfrm6_mode_beet.c b/net/ipv6/xfrm6_mode_beet.c
index 1e205c3..57fd314 100644
--- a/net/ipv6/xfrm6_mode_beet.c
+++ b/net/ipv6/xfrm6_mode_beet.c
@@ -54,7 +54,7 @@
skb->mac_header = skb->network_header +
offsetof(struct ipv6hdr, nexthdr);
skb->transport_header = skb->network_header + sizeof(*top_iph);
- ph = (struct ip_beet_phdr *)__skb_pull(skb, XFRM_MODE_SKB_CB(skb)->ihl-hdr_len);
+ ph = __skb_pull(skb, XFRM_MODE_SKB_CB(skb)->ihl - hdr_len);
xfrm6_beet_make_header(skb);
diff --git a/net/irda/ircomm/ircomm_tty.c b/net/irda/ircomm/ircomm_tty.c
index f6061c4..ec157c3 100644
--- a/net/irda/ircomm/ircomm_tty.c
+++ b/net/irda/ircomm/ircomm_tty.c
@@ -690,7 +690,7 @@
}
/* Copy data */
- memcpy(skb_put(skb,size), buf + len, size);
+ skb_put_data(skb, buf + len, size);
count -= size;
len += size;
diff --git a/net/irda/irlap_frame.c b/net/irda/irlap_frame.c
index b936b12..debda3d 100644
--- a/net/irda/irlap_frame.c
+++ b/net/irda/irlap_frame.c
@@ -133,7 +133,7 @@
if (!tx_skb)
return;
- frame = (struct snrm_frame *) skb_put(tx_skb, 2);
+ frame = skb_put(tx_skb, 2);
/* Insert connection address field */
if (qos)
@@ -228,7 +228,7 @@
if (!tx_skb)
return;
- frame = (struct ua_frame *) skb_put(tx_skb, 10);
+ frame = skb_put(tx_skb, 10);
/* Build UA response */
frame->caddr = self->caddr;
@@ -268,7 +268,7 @@
if (!tx_skb)
return;
- frame = (struct dm_frame *)skb_put(tx_skb, 2);
+ frame = skb_put(tx_skb, 2);
if (self->state == LAP_NDM)
frame->caddr = CBROADCAST;
@@ -298,7 +298,7 @@
if (!tx_skb)
return;
- frame = (struct disc_frame *)skb_put(tx_skb, 2);
+ frame = skb_put(tx_skb, 2);
frame->caddr = self->caddr | CMD_FRAME;
frame->control = DISC_CMD | PF_BIT;
@@ -392,8 +392,7 @@
info[0] = discovery->data.charset;
len = IRDA_MIN(discovery->name_len, skb_tailroom(tx_skb));
- info = skb_put(tx_skb, len);
- memcpy(info, discovery->data.info, len);
+ skb_put_data(tx_skb, discovery->data.info, len);
}
irlap_queue_xmit(self, tx_skb);
}
@@ -588,7 +587,7 @@
if (!tx_skb)
return;
- frame = (struct rr_frame *)skb_put(tx_skb, 2);
+ frame = skb_put(tx_skb, 2);
frame->caddr = self->caddr;
frame->caddr |= (command) ? CMD_FRAME : 0;
@@ -613,7 +612,7 @@
if (!tx_skb)
return;
- frame = (struct rd_frame *)skb_put(tx_skb, 2);
+ frame = skb_put(tx_skb, 2);
frame->caddr = self->caddr;
frame->control = RD_RSP | PF_BIT;
@@ -1195,7 +1194,6 @@
{
struct sk_buff *tx_skb;
struct test_frame *frame;
- __u8 *info;
tx_skb = alloc_skb(cmd->len + sizeof(struct test_frame), GFP_ATOMIC);
if (!tx_skb)
@@ -1203,21 +1201,19 @@
/* Broadcast frames must include saddr and daddr fields */
if (caddr == CBROADCAST) {
- frame = (struct test_frame *)
- skb_put(tx_skb, sizeof(struct test_frame));
+ frame = skb_put(tx_skb, sizeof(struct test_frame));
/* Insert the swapped addresses */
frame->saddr = cpu_to_le32(self->saddr);
frame->daddr = cpu_to_le32(daddr);
} else
- frame = (struct test_frame *) skb_put(tx_skb, LAP_ADDR_HEADER + LAP_CTRL_HEADER);
+ frame = skb_put(tx_skb, LAP_ADDR_HEADER + LAP_CTRL_HEADER);
frame->caddr = caddr;
frame->control = TEST_RSP | PF_BIT;
/* Copy info */
- info = skb_put(tx_skb, cmd->len);
- memcpy(info, cmd->data, cmd->len);
+ skb_put_data(tx_skb, cmd->data, cmd->len);
/* Return to sender */
irlap_wait_min_turn_around(self, &self->qos_tx);
diff --git a/net/irda/irnet/irnet_irda.c b/net/irda/irnet/irnet_irda.c
index 7f17a80..e390bce 100644
--- a/net/irda/irnet/irnet_irda.c
+++ b/net/irda/irnet/irnet_irda.c
@@ -1065,7 +1065,7 @@
if(p[0] & 1)
{
/* protocol is compressed */
- skb_push(skb, 1)[0] = 0;
+ *(u8 *)skb_push(skb, 1) = 0;
}
else
if(skb->len < 2)
diff --git a/net/iucv/af_iucv.c b/net/iucv/af_iucv.c
index 84de7b6..2cf9d59 100644
--- a/net/iucv/af_iucv.c
+++ b/net/iucv/af_iucv.c
@@ -322,8 +322,7 @@
int err, confirm_recv = 0;
memset(skb->head, 0, ETH_HLEN);
- phs_hdr = (struct af_iucv_trans_hdr *)skb_push(skb,
- sizeof(struct af_iucv_trans_hdr));
+ phs_hdr = skb_push(skb, sizeof(struct af_iucv_trans_hdr));
skb_reset_mac_header(skb);
skb_reset_network_header(skb);
skb_push(skb, ETH_HLEN);
diff --git a/net/kcm/kcmsock.c b/net/kcm/kcmsock.c
index deca20f..da49191 100644
--- a/net/kcm/kcmsock.c
+++ b/net/kcm/kcmsock.c
@@ -1985,7 +1985,7 @@
return 0;
}
-static struct net_proto_family kcm_family_ops = {
+static const struct net_proto_family kcm_family_ops = {
.family = PF_KCM,
.create = kcm_create,
.owner = THIS_MODULE,
diff --git a/net/key/af_key.c b/net/key/af_key.c
index 512dc43..daa4e90 100644
--- a/net/key/af_key.c
+++ b/net/key/af_key.c
@@ -349,7 +349,7 @@
err = EINVAL;
BUG_ON(err <= 0 || err >= 256);
- hdr = (struct sadb_msg *) skb_put(skb, sizeof(struct sadb_msg));
+ hdr = skb_put(skb, sizeof(struct sadb_msg));
pfkey_hdr_dup(hdr, orig);
hdr->sadb_msg_errno = (uint8_t) err;
hdr->sadb_msg_len = (sizeof(struct sadb_msg) /
@@ -810,12 +810,12 @@
return ERR_PTR(-ENOBUFS);
/* call should fill header later */
- hdr = (struct sadb_msg *) skb_put(skb, sizeof(struct sadb_msg));
+ hdr = skb_put(skb, sizeof(struct sadb_msg));
memset(hdr, 0, size); /* XXX do we need this ? */
hdr->sadb_msg_len = size / sizeof(uint64_t);
/* sa */
- sa = (struct sadb_sa *) skb_put(skb, sizeof(struct sadb_sa));
+ sa = skb_put(skb, sizeof(struct sadb_sa));
sa->sadb_sa_len = sizeof(struct sadb_sa)/sizeof(uint64_t);
sa->sadb_sa_exttype = SADB_EXT_SA;
sa->sadb_sa_spi = x->id.spi;
@@ -862,8 +862,7 @@
/* hard time */
if (hsc & 2) {
- lifetime = (struct sadb_lifetime *) skb_put(skb,
- sizeof(struct sadb_lifetime));
+ lifetime = skb_put(skb, sizeof(struct sadb_lifetime));
lifetime->sadb_lifetime_len =
sizeof(struct sadb_lifetime)/sizeof(uint64_t);
lifetime->sadb_lifetime_exttype = SADB_EXT_LIFETIME_HARD;
@@ -874,8 +873,7 @@
}
/* soft time */
if (hsc & 1) {
- lifetime = (struct sadb_lifetime *) skb_put(skb,
- sizeof(struct sadb_lifetime));
+ lifetime = skb_put(skb, sizeof(struct sadb_lifetime));
lifetime->sadb_lifetime_len =
sizeof(struct sadb_lifetime)/sizeof(uint64_t);
lifetime->sadb_lifetime_exttype = SADB_EXT_LIFETIME_SOFT;
@@ -885,8 +883,7 @@
lifetime->sadb_lifetime_usetime = x->lft.soft_use_expires_seconds;
}
/* current time */
- lifetime = (struct sadb_lifetime *) skb_put(skb,
- sizeof(struct sadb_lifetime));
+ lifetime = skb_put(skb, sizeof(struct sadb_lifetime));
lifetime->sadb_lifetime_len =
sizeof(struct sadb_lifetime)/sizeof(uint64_t);
lifetime->sadb_lifetime_exttype = SADB_EXT_LIFETIME_CURRENT;
@@ -895,8 +892,7 @@
lifetime->sadb_lifetime_addtime = x->curlft.add_time;
lifetime->sadb_lifetime_usetime = x->curlft.use_time;
/* src address */
- addr = (struct sadb_address*) skb_put(skb,
- sizeof(struct sadb_address)+sockaddr_size);
+ addr = skb_put(skb, sizeof(struct sadb_address) + sockaddr_size);
addr->sadb_address_len =
(sizeof(struct sadb_address)+sockaddr_size)/
sizeof(uint64_t);
@@ -915,8 +911,7 @@
BUG();
/* dst address */
- addr = (struct sadb_address*) skb_put(skb,
- sizeof(struct sadb_address)+sockaddr_size);
+ addr = skb_put(skb, sizeof(struct sadb_address) + sockaddr_size);
addr->sadb_address_len =
(sizeof(struct sadb_address)+sockaddr_size)/
sizeof(uint64_t);
@@ -933,8 +928,8 @@
if (!xfrm_addr_equal(&x->sel.saddr, &x->props.saddr,
x->props.family)) {
- addr = (struct sadb_address*) skb_put(skb,
- sizeof(struct sadb_address)+sockaddr_size);
+ addr = skb_put(skb,
+ sizeof(struct sadb_address) + sockaddr_size);
addr->sadb_address_len =
(sizeof(struct sadb_address)+sockaddr_size)/
sizeof(uint64_t);
@@ -951,8 +946,7 @@
/* auth key */
if (add_keys && auth_key_size) {
- key = (struct sadb_key *) skb_put(skb,
- sizeof(struct sadb_key)+auth_key_size);
+ key = skb_put(skb, sizeof(struct sadb_key) + auth_key_size);
key->sadb_key_len = (sizeof(struct sadb_key) + auth_key_size) /
sizeof(uint64_t);
key->sadb_key_exttype = SADB_EXT_KEY_AUTH;
@@ -962,8 +956,7 @@
}
/* encrypt key */
if (add_keys && encrypt_key_size) {
- key = (struct sadb_key *) skb_put(skb,
- sizeof(struct sadb_key)+encrypt_key_size);
+ key = skb_put(skb, sizeof(struct sadb_key) + encrypt_key_size);
key->sadb_key_len = (sizeof(struct sadb_key) +
encrypt_key_size) / sizeof(uint64_t);
key->sadb_key_exttype = SADB_EXT_KEY_ENCRYPT;
@@ -974,7 +967,7 @@
}
/* sa */
- sa2 = (struct sadb_x_sa2 *) skb_put(skb, sizeof(struct sadb_x_sa2));
+ sa2 = skb_put(skb, sizeof(struct sadb_x_sa2));
sa2->sadb_x_sa2_len = sizeof(struct sadb_x_sa2)/sizeof(uint64_t);
sa2->sadb_x_sa2_exttype = SADB_X_EXT_SA2;
if ((mode = pfkey_mode_from_xfrm(x->props.mode)) < 0) {
@@ -992,7 +985,7 @@
struct sadb_x_nat_t_port *n_port;
/* type */
- n_type = (struct sadb_x_nat_t_type*) skb_put(skb, sizeof(*n_type));
+ n_type = skb_put(skb, sizeof(*n_type));
n_type->sadb_x_nat_t_type_len = sizeof(*n_type)/sizeof(uint64_t);
n_type->sadb_x_nat_t_type_exttype = SADB_X_EXT_NAT_T_TYPE;
n_type->sadb_x_nat_t_type_type = natt->encap_type;
@@ -1001,14 +994,14 @@
n_type->sadb_x_nat_t_type_reserved[2] = 0;
/* source port */
- n_port = (struct sadb_x_nat_t_port*) skb_put(skb, sizeof (*n_port));
+ n_port = skb_put(skb, sizeof(*n_port));
n_port->sadb_x_nat_t_port_len = sizeof(*n_port)/sizeof(uint64_t);
n_port->sadb_x_nat_t_port_exttype = SADB_X_EXT_NAT_T_SPORT;
n_port->sadb_x_nat_t_port_port = natt->encap_sport;
n_port->sadb_x_nat_t_port_reserved = 0;
/* dest port */
- n_port = (struct sadb_x_nat_t_port*) skb_put(skb, sizeof (*n_port));
+ n_port = skb_put(skb, sizeof(*n_port));
n_port->sadb_x_nat_t_port_len = sizeof(*n_port)/sizeof(uint64_t);
n_port->sadb_x_nat_t_port_exttype = SADB_X_EXT_NAT_T_DPORT;
n_port->sadb_x_nat_t_port_port = natt->encap_dport;
@@ -1017,8 +1010,8 @@
/* security context */
if (xfrm_ctx) {
- sec_ctx = (struct sadb_x_sec_ctx *) skb_put(skb,
- sizeof(struct sadb_x_sec_ctx) + ctx_size);
+ sec_ctx = skb_put(skb,
+ sizeof(struct sadb_x_sec_ctx) + ctx_size);
sec_ctx->sadb_x_sec_len =
(sizeof(struct sadb_x_sec_ctx) + ctx_size) / sizeof(uint64_t);
sec_ctx->sadb_x_sec_exttype = SADB_X_EXT_SEC_CTX;
@@ -1617,7 +1610,7 @@
if (!skb)
goto out_put_algs;
- hdr = (struct sadb_msg *) skb_put(skb, sizeof(*hdr));
+ hdr = skb_put(skb, sizeof(*hdr));
pfkey_hdr_dup(hdr, orig);
hdr->sadb_msg_errno = 0;
hdr->sadb_msg_len = len / sizeof(uint64_t);
@@ -1626,7 +1619,7 @@
struct sadb_supported *sp;
struct sadb_alg *ap;
- sp = (struct sadb_supported *) skb_put(skb, auth_len);
+ sp = skb_put(skb, auth_len);
ap = (struct sadb_alg *) (sp + 1);
sp->sadb_supported_len = auth_len / sizeof(uint64_t);
@@ -1647,7 +1640,7 @@
struct sadb_supported *sp;
struct sadb_alg *ap;
- sp = (struct sadb_supported *) skb_put(skb, enc_len);
+ sp = skb_put(skb, enc_len);
ap = (struct sadb_alg *) (sp + 1);
sp->sadb_supported_len = enc_len / sizeof(uint64_t);
@@ -1706,8 +1699,7 @@
if (!skb)
return -ENOBUFS;
- hdr = (struct sadb_msg *) skb_put(skb, sizeof(struct sadb_msg));
- memcpy(hdr, ihdr, sizeof(struct sadb_msg));
+ hdr = skb_put_data(skb, ihdr, sizeof(struct sadb_msg));
hdr->sadb_msg_errno = (uint8_t) 0;
hdr->sadb_msg_len = (sizeof(struct sadb_msg) / sizeof(uint64_t));
@@ -1722,7 +1714,7 @@
skb = alloc_skb(sizeof(struct sadb_msg) + 16, GFP_ATOMIC);
if (!skb)
return -ENOBUFS;
- hdr = (struct sadb_msg *) skb_put(skb, sizeof(struct sadb_msg));
+ hdr = skb_put(skb, sizeof(struct sadb_msg));
hdr->sadb_msg_satype = pfkey_proto2satype(c->data.proto);
hdr->sadb_msg_type = SADB_FLUSH;
hdr->sadb_msg_seq = c->seq;
@@ -2047,12 +2039,11 @@
size = pfkey_xfrm_policy2msg_size(xp);
/* call should fill header later */
- hdr = (struct sadb_msg *) skb_put(skb, sizeof(struct sadb_msg));
+ hdr = skb_put(skb, sizeof(struct sadb_msg));
memset(hdr, 0, size); /* XXX do we need this ? */
/* src address */
- addr = (struct sadb_address*) skb_put(skb,
- sizeof(struct sadb_address)+sockaddr_size);
+ addr = skb_put(skb, sizeof(struct sadb_address) + sockaddr_size);
addr->sadb_address_len =
(sizeof(struct sadb_address)+sockaddr_size)/
sizeof(uint64_t);
@@ -2067,8 +2058,7 @@
BUG();
/* dst address */
- addr = (struct sadb_address*) skb_put(skb,
- sizeof(struct sadb_address)+sockaddr_size);
+ addr = skb_put(skb, sizeof(struct sadb_address) + sockaddr_size);
addr->sadb_address_len =
(sizeof(struct sadb_address)+sockaddr_size)/
sizeof(uint64_t);
@@ -2082,8 +2072,7 @@
xp->family);
/* hard time */
- lifetime = (struct sadb_lifetime *) skb_put(skb,
- sizeof(struct sadb_lifetime));
+ lifetime = skb_put(skb, sizeof(struct sadb_lifetime));
lifetime->sadb_lifetime_len =
sizeof(struct sadb_lifetime)/sizeof(uint64_t);
lifetime->sadb_lifetime_exttype = SADB_EXT_LIFETIME_HARD;
@@ -2092,8 +2081,7 @@
lifetime->sadb_lifetime_addtime = xp->lft.hard_add_expires_seconds;
lifetime->sadb_lifetime_usetime = xp->lft.hard_use_expires_seconds;
/* soft time */
- lifetime = (struct sadb_lifetime *) skb_put(skb,
- sizeof(struct sadb_lifetime));
+ lifetime = skb_put(skb, sizeof(struct sadb_lifetime));
lifetime->sadb_lifetime_len =
sizeof(struct sadb_lifetime)/sizeof(uint64_t);
lifetime->sadb_lifetime_exttype = SADB_EXT_LIFETIME_SOFT;
@@ -2102,8 +2090,7 @@
lifetime->sadb_lifetime_addtime = xp->lft.soft_add_expires_seconds;
lifetime->sadb_lifetime_usetime = xp->lft.soft_use_expires_seconds;
/* current time */
- lifetime = (struct sadb_lifetime *) skb_put(skb,
- sizeof(struct sadb_lifetime));
+ lifetime = skb_put(skb, sizeof(struct sadb_lifetime));
lifetime->sadb_lifetime_len =
sizeof(struct sadb_lifetime)/sizeof(uint64_t);
lifetime->sadb_lifetime_exttype = SADB_EXT_LIFETIME_CURRENT;
@@ -2112,7 +2099,7 @@
lifetime->sadb_lifetime_addtime = xp->curlft.add_time;
lifetime->sadb_lifetime_usetime = xp->curlft.use_time;
- pol = (struct sadb_x_policy *) skb_put(skb, sizeof(struct sadb_x_policy));
+ pol = skb_put(skb, sizeof(struct sadb_x_policy));
pol->sadb_x_policy_len = sizeof(struct sadb_x_policy)/sizeof(uint64_t);
pol->sadb_x_policy_exttype = SADB_X_EXT_POLICY;
pol->sadb_x_policy_type = IPSEC_POLICY_DISCARD;
@@ -2140,7 +2127,7 @@
} else {
size -= 2*socklen;
}
- rq = (void*)skb_put(skb, req_size);
+ rq = skb_put(skb, req_size);
pol->sadb_x_policy_len += req_size/8;
memset(rq, 0, sizeof(*rq));
rq->sadb_x_ipsecrequest_len = req_size;
@@ -2170,7 +2157,7 @@
if ((xfrm_ctx = xp->security)) {
int ctx_size = pfkey_xfrm_policy2sec_ctx_size(xp);
- sec_ctx = (struct sadb_x_sec_ctx *) skb_put(skb, ctx_size);
+ sec_ctx = skb_put(skb, ctx_size);
sec_ctx->sadb_x_sec_len = ctx_size / sizeof(uint64_t);
sec_ctx->sadb_x_sec_exttype = SADB_X_EXT_SEC_CTX;
sec_ctx->sadb_x_ctx_doi = xfrm_ctx->ctx_doi;
@@ -2734,7 +2721,7 @@
skb_out = alloc_skb(sizeof(struct sadb_msg) + 16, GFP_ATOMIC);
if (!skb_out)
return -ENOBUFS;
- hdr = (struct sadb_msg *) skb_put(skb_out, sizeof(struct sadb_msg));
+ hdr = skb_put(skb_out, sizeof(struct sadb_msg));
hdr->sadb_msg_type = SADB_X_SPDFLUSH;
hdr->sadb_msg_seq = c->seq;
hdr->sadb_msg_pid = c->portid;
@@ -2918,7 +2905,7 @@
struct sadb_prop *p;
int i;
- p = (struct sadb_prop*)skb_put(skb, sizeof(struct sadb_prop));
+ p = skb_put(skb, sizeof(struct sadb_prop));
p->sadb_prop_len = sizeof(struct sadb_prop)/8;
p->sadb_prop_exttype = SADB_EXT_PROPOSAL;
p->sadb_prop_replay = 32;
@@ -2934,8 +2921,7 @@
if (aalg_tmpl_set(t, aalg) && aalg->available) {
struct sadb_comb *c;
- c = (struct sadb_comb*)skb_put(skb, sizeof(struct sadb_comb));
- memset(c, 0, sizeof(*c));
+ c = skb_put_zero(skb, sizeof(struct sadb_comb));
p->sadb_prop_len += sizeof(struct sadb_comb)/8;
c->sadb_comb_auth = aalg->desc.sadb_alg_id;
c->sadb_comb_auth_minbits = aalg->desc.sadb_alg_minbits;
@@ -2953,7 +2939,7 @@
struct sadb_prop *p;
int i, k;
- p = (struct sadb_prop*)skb_put(skb, sizeof(struct sadb_prop));
+ p = skb_put(skb, sizeof(struct sadb_prop));
p->sadb_prop_len = sizeof(struct sadb_prop)/8;
p->sadb_prop_exttype = SADB_EXT_PROPOSAL;
p->sadb_prop_replay = 32;
@@ -2979,7 +2965,7 @@
continue;
if (!(aalg_tmpl_set(t, aalg) && aalg->available))
continue;
- c = (struct sadb_comb*)skb_put(skb, sizeof(struct sadb_comb));
+ c = skb_put(skb, sizeof(struct sadb_comb));
memset(c, 0, sizeof(*c));
p->sadb_prop_len += sizeof(struct sadb_comb)/8;
c->sadb_comb_auth = aalg->desc.sadb_alg_id;
@@ -3146,7 +3132,7 @@
if (skb == NULL)
return -ENOMEM;
- hdr = (struct sadb_msg *) skb_put(skb, sizeof(struct sadb_msg));
+ hdr = skb_put(skb, sizeof(struct sadb_msg));
hdr->sadb_msg_version = PF_KEY_V2;
hdr->sadb_msg_type = SADB_ACQUIRE;
hdr->sadb_msg_satype = pfkey_proto2satype(x->id.proto);
@@ -3157,8 +3143,7 @@
hdr->sadb_msg_pid = 0;
/* src address */
- addr = (struct sadb_address*) skb_put(skb,
- sizeof(struct sadb_address)+sockaddr_size);
+ addr = skb_put(skb, sizeof(struct sadb_address) + sockaddr_size);
addr->sadb_address_len =
(sizeof(struct sadb_address)+sockaddr_size)/
sizeof(uint64_t);
@@ -3173,8 +3158,7 @@
BUG();
/* dst address */
- addr = (struct sadb_address*) skb_put(skb,
- sizeof(struct sadb_address)+sockaddr_size);
+ addr = skb_put(skb, sizeof(struct sadb_address) + sockaddr_size);
addr->sadb_address_len =
(sizeof(struct sadb_address)+sockaddr_size)/
sizeof(uint64_t);
@@ -3188,7 +3172,7 @@
if (!addr->sadb_address_prefixlen)
BUG();
- pol = (struct sadb_x_policy *) skb_put(skb, sizeof(struct sadb_x_policy));
+ pol = skb_put(skb, sizeof(struct sadb_x_policy));
pol->sadb_x_policy_len = sizeof(struct sadb_x_policy)/sizeof(uint64_t);
pol->sadb_x_policy_exttype = SADB_X_EXT_POLICY;
pol->sadb_x_policy_type = IPSEC_POLICY_IPSEC;
@@ -3205,8 +3189,8 @@
/* security context */
if (xfrm_ctx) {
- sec_ctx = (struct sadb_x_sec_ctx *) skb_put(skb,
- sizeof(struct sadb_x_sec_ctx) + ctx_size);
+ sec_ctx = skb_put(skb,
+ sizeof(struct sadb_x_sec_ctx) + ctx_size);
sec_ctx->sadb_x_sec_len =
(sizeof(struct sadb_x_sec_ctx) + ctx_size) / sizeof(uint64_t);
sec_ctx->sadb_x_sec_exttype = SADB_X_EXT_SEC_CTX;
@@ -3348,7 +3332,7 @@
if (skb == NULL)
return -ENOMEM;
- hdr = (struct sadb_msg *) skb_put(skb, sizeof(struct sadb_msg));
+ hdr = skb_put(skb, sizeof(struct sadb_msg));
hdr->sadb_msg_version = PF_KEY_V2;
hdr->sadb_msg_type = SADB_X_NAT_T_NEW_MAPPING;
hdr->sadb_msg_satype = satype;
@@ -3359,7 +3343,7 @@
hdr->sadb_msg_pid = 0;
/* SA */
- sa = (struct sadb_sa *) skb_put(skb, sizeof(struct sadb_sa));
+ sa = skb_put(skb, sizeof(struct sadb_sa));
sa->sadb_sa_len = sizeof(struct sadb_sa)/sizeof(uint64_t);
sa->sadb_sa_exttype = SADB_EXT_SA;
sa->sadb_sa_spi = x->id.spi;
@@ -3370,8 +3354,7 @@
sa->sadb_sa_flags = 0;
/* ADDRESS_SRC (old addr) */
- addr = (struct sadb_address*)
- skb_put(skb, sizeof(struct sadb_address)+sockaddr_size);
+ addr = skb_put(skb, sizeof(struct sadb_address) + sockaddr_size);
addr->sadb_address_len =
(sizeof(struct sadb_address)+sockaddr_size)/
sizeof(uint64_t);
@@ -3386,15 +3369,14 @@
BUG();
/* NAT_T_SPORT (old port) */
- n_port = (struct sadb_x_nat_t_port*) skb_put(skb, sizeof (*n_port));
+ n_port = skb_put(skb, sizeof(*n_port));
n_port->sadb_x_nat_t_port_len = sizeof(*n_port)/sizeof(uint64_t);
n_port->sadb_x_nat_t_port_exttype = SADB_X_EXT_NAT_T_SPORT;
n_port->sadb_x_nat_t_port_port = natt->encap_sport;
n_port->sadb_x_nat_t_port_reserved = 0;
/* ADDRESS_DST (new addr) */
- addr = (struct sadb_address*)
- skb_put(skb, sizeof(struct sadb_address)+sockaddr_size);
+ addr = skb_put(skb, sizeof(struct sadb_address) + sockaddr_size);
addr->sadb_address_len =
(sizeof(struct sadb_address)+sockaddr_size)/
sizeof(uint64_t);
@@ -3409,7 +3391,7 @@
BUG();
/* NAT_T_DPORT (new port) */
- n_port = (struct sadb_x_nat_t_port*) skb_put(skb, sizeof (*n_port));
+ n_port = skb_put(skb, sizeof(*n_port));
n_port->sadb_x_nat_t_port_len = sizeof(*n_port)/sizeof(uint64_t);
n_port->sadb_x_nat_t_port_exttype = SADB_X_EXT_NAT_T_DPORT;
n_port->sadb_x_nat_t_port_port = sport;
@@ -3423,7 +3405,7 @@
const struct xfrm_selector *sel)
{
struct sadb_address *addr;
- addr = (struct sadb_address *)skb_put(skb, sizeof(struct sadb_address) + sasize);
+ addr = skb_put(skb, sizeof(struct sadb_address) + sasize);
addr->sadb_address_len = (sizeof(struct sadb_address) + sasize)/8;
addr->sadb_address_exttype = type;
addr->sadb_address_proto = sel->proto;
@@ -3461,8 +3443,7 @@
size_req = (sizeof(struct sadb_x_kmaddress) +
pfkey_sockaddr_pair_size(family));
- kma = (struct sadb_x_kmaddress *)skb_put(skb, size_req);
- memset(kma, 0, size_req);
+ kma = skb_put_zero(skb, size_req);
kma->sadb_x_kmaddress_len = size_req / 8;
kma->sadb_x_kmaddress_exttype = SADB_X_EXT_KMADDRESS;
kma->sadb_x_kmaddress_reserved = k->reserved;
@@ -3488,8 +3469,7 @@
size_req = sizeof(struct sadb_x_ipsecrequest) +
pfkey_sockaddr_pair_size(family);
- rq = (struct sadb_x_ipsecrequest *)skb_put(skb, size_req);
- memset(rq, 0, size_req);
+ rq = skb_put_zero(skb, size_req);
rq->sadb_x_ipsecrequest_len = size_req;
rq->sadb_x_ipsecrequest_proto = proto;
rq->sadb_x_ipsecrequest_mode = mode;
@@ -3557,7 +3537,7 @@
if (skb == NULL)
return -ENOMEM;
- hdr = (struct sadb_msg *)skb_put(skb, sizeof(struct sadb_msg));
+ hdr = skb_put(skb, sizeof(struct sadb_msg));
hdr->sadb_msg_version = PF_KEY_V2;
hdr->sadb_msg_type = SADB_X_MIGRATE;
hdr->sadb_msg_satype = pfkey_proto2satype(m->proto);
@@ -3578,7 +3558,7 @@
set_sadb_address(skb, sasize_sel, SADB_EXT_ADDRESS_DST, sel);
/* policy information */
- pol = (struct sadb_x_policy *)skb_put(skb, sizeof(struct sadb_x_policy));
+ pol = skb_put(skb, sizeof(struct sadb_x_policy));
pol->sadb_x_policy_len = size_pol / 8;
pol->sadb_x_policy_exttype = SADB_X_EXT_POLICY;
pol->sadb_x_policy_type = IPSEC_POLICY_IPSEC;
diff --git a/net/mac80211/agg-rx.c b/net/mac80211/agg-rx.c
index 1b7a4da..8708cbe 100644
--- a/net/mac80211/agg-rx.c
+++ b/net/mac80211/agg-rx.c
@@ -213,8 +213,7 @@
return;
skb_reserve(skb, local->hw.extra_tx_headroom);
- mgmt = (struct ieee80211_mgmt *) skb_put(skb, 24);
- memset(mgmt, 0, 24);
+ mgmt = skb_put_zero(skb, 24);
memcpy(mgmt->da, da, ETH_ALEN);
memcpy(mgmt->sa, sdata->vif.addr, ETH_ALEN);
if (sdata->vif.type == NL80211_IFTYPE_AP ||
@@ -449,44 +448,21 @@
buf_size, true, false);
}
-void ieee80211_start_rx_ba_session_offl(struct ieee80211_vif *vif,
- const u8 *addr, u16 tid)
+void ieee80211_manage_rx_ba_offl(struct ieee80211_vif *vif,
+ const u8 *addr, unsigned int bit)
{
struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif);
struct ieee80211_local *local = sdata->local;
- struct ieee80211_rx_agg *rx_agg;
- struct sk_buff *skb = dev_alloc_skb(0);
+ struct sta_info *sta;
- if (unlikely(!skb))
- return;
+ rcu_read_lock();
+ sta = sta_info_get_bss(sdata, addr);
+ if (!sta)
+ goto unlock;
- rx_agg = (struct ieee80211_rx_agg *) &skb->cb;
- memcpy(&rx_agg->addr, addr, ETH_ALEN);
- rx_agg->tid = tid;
-
- skb->pkt_type = IEEE80211_SDATA_QUEUE_RX_AGG_START;
- skb_queue_tail(&sdata->skb_queue, skb);
- ieee80211_queue_work(&local->hw, &sdata->work);
+ set_bit(bit, sta->ampdu_mlme.tid_rx_manage_offl);
+ ieee80211_queue_work(&local->hw, &sta->ampdu_mlme.work);
+ unlock:
+ rcu_read_unlock();
}
-EXPORT_SYMBOL(ieee80211_start_rx_ba_session_offl);
-
-void ieee80211_stop_rx_ba_session_offl(struct ieee80211_vif *vif,
- const u8 *addr, u16 tid)
-{
- struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif);
- struct ieee80211_local *local = sdata->local;
- struct ieee80211_rx_agg *rx_agg;
- struct sk_buff *skb = dev_alloc_skb(0);
-
- if (unlikely(!skb))
- return;
-
- rx_agg = (struct ieee80211_rx_agg *) &skb->cb;
- memcpy(&rx_agg->addr, addr, ETH_ALEN);
- rx_agg->tid = tid;
-
- skb->pkt_type = IEEE80211_SDATA_QUEUE_RX_AGG_STOP;
- skb_queue_tail(&sdata->skb_queue, skb);
- ieee80211_queue_work(&local->hw, &sdata->work);
-}
-EXPORT_SYMBOL(ieee80211_stop_rx_ba_session_offl);
+EXPORT_SYMBOL(ieee80211_manage_rx_ba_offl);
diff --git a/net/mac80211/agg-tx.c b/net/mac80211/agg-tx.c
index cf2392b2..cbd4876 100644
--- a/net/mac80211/agg-tx.c
+++ b/net/mac80211/agg-tx.c
@@ -76,8 +76,7 @@
return;
skb_reserve(skb, local->hw.extra_tx_headroom);
- mgmt = (struct ieee80211_mgmt *) skb_put(skb, 24);
- memset(mgmt, 0, 24);
+ mgmt = skb_put_zero(skb, 24);
memcpy(mgmt->da, da, ETH_ALEN);
memcpy(mgmt->sa, sdata->vif.addr, ETH_ALEN);
if (sdata->vif.type == NL80211_IFTYPE_AP ||
@@ -125,8 +124,7 @@
return;
skb_reserve(skb, local->hw.extra_tx_headroom);
- bar = (struct ieee80211_bar *)skb_put(skb, sizeof(*bar));
- memset(bar, 0, sizeof(*bar));
+ bar = skb_put_zero(skb, sizeof(*bar));
bar->frame_control = cpu_to_le16(IEEE80211_FTYPE_CTL |
IEEE80211_STYPE_BACK_REQ);
memcpy(bar->ra, ra, ETH_ALEN);
diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c
index 4a388fe..a354f19 100644
--- a/net/mac80211/cfg.c
+++ b/net/mac80211/cfg.c
@@ -1107,7 +1107,7 @@
skb = dev_alloc_skb(sizeof(*msg));
if (!skb)
return;
- msg = (struct iapp_layer2_update *)skb_put(skb, sizeof(*msg));
+ msg = skb_put(skb, sizeof(*msg));
/* 802.2 Type 1 Logical Link Control (LLC) Exchange Identifier (XID)
* Update response frame; IEEE Std 802.2-1998, 5.4.1.2.1 */
@@ -1876,6 +1876,7 @@
ifmsh->user_mpm = setup->user_mpm;
ifmsh->mesh_auth_id = setup->auth_id;
ifmsh->security = IEEE80211_MESH_SEC_NONE;
+ ifmsh->userspace_handles_dfs = setup->userspace_handles_dfs;
if (setup->is_authenticated)
ifmsh->security |= IEEE80211_MESH_SEC_AUTHED;
if (setup->is_secure)
@@ -3413,7 +3414,7 @@
skb_reserve(skb, local->hw.extra_tx_headroom);
- nullfunc = (void *) skb_put(skb, size);
+ nullfunc = skb_put(skb, size);
nullfunc->frame_control = fc;
nullfunc->duration_id = 0;
memcpy(nullfunc->addr1, sta->sta.addr, ETH_ALEN);
diff --git a/net/mac80211/debugfs_netdev.c b/net/mac80211/debugfs_netdev.c
index 8f5fff8..c813207 100644
--- a/net/mac80211/debugfs_netdev.c
+++ b/net/mac80211/debugfs_netdev.c
@@ -330,8 +330,7 @@
return -ENOMEM;
skb_reserve(skb, local->hw.extra_tx_headroom);
- hdr = (struct ieee80211_hdr *) skb_put(skb, 24);
- memset(hdr, 0, 24);
+ hdr = skb_put_zero(skb, 24);
fc = cpu_to_le16(IEEE80211_FTYPE_DATA | IEEE80211_STYPE_DATA);
switch (sdata->vif.type) {
@@ -367,7 +366,7 @@
* The exact contents does not matter since the recipient is required
* to drop this because of the Michael MIC failure.
*/
- memset(skb_put(skb, 50), 0, 50);
+ skb_put_zero(skb, 50);
IEEE80211_SKB_CB(skb)->flags |= IEEE80211_TX_INTFL_TKIP_MIC_FAILURE;
diff --git a/net/mac80211/debugfs_sta.c b/net/mac80211/debugfs_sta.c
index 4260182..b15412c 100644
--- a/net/mac80211/debugfs_sta.c
+++ b/net/mac80211/debugfs_sta.c
@@ -154,6 +154,12 @@
p += scnprintf(p,
bufsz+buf-p,
+ "target %uus interval %uus ecn %s\n",
+ codel_time_to_us(sta->cparams.target),
+ codel_time_to_us(sta->cparams.interval),
+ sta->cparams.ecn ? "yes" : "no");
+ p += scnprintf(p,
+ bufsz+buf-p,
"tid ac backlog-bytes backlog-packets new-flows drops marks overlimit collisions tx-bytes tx-packets\n");
for (i = 0; i < IEEE80211_NUM_TIDS; i++) {
diff --git a/net/mac80211/ht.c b/net/mac80211/ht.c
index 6ca5442..c92df49 100644
--- a/net/mac80211/ht.c
+++ b/net/mac80211/ht.c
@@ -331,6 +331,18 @@
sta, tid, WLAN_BACK_RECIPIENT,
WLAN_REASON_UNSPECIFIED, true);
+ if (test_and_clear_bit(tid,
+ sta->ampdu_mlme.tid_rx_manage_offl))
+ __ieee80211_start_rx_ba_session(sta, 0, 0, 0, 1, tid,
+ IEEE80211_MAX_AMPDU_BUF,
+ false, true);
+
+ if (test_and_clear_bit(tid + IEEE80211_NUM_TIDS,
+ sta->ampdu_mlme.tid_rx_manage_offl))
+ ___ieee80211_stop_rx_ba_session(
+ sta, tid, WLAN_BACK_RECIPIENT,
+ 0, false);
+
spin_lock_bh(&sta->lock);
tid_tx = sta->ampdu_mlme.tid_start_tx[tid];
@@ -382,8 +394,7 @@
return;
skb_reserve(skb, local->hw.extra_tx_headroom);
- mgmt = (struct ieee80211_mgmt *) skb_put(skb, 24);
- memset(mgmt, 0, 24);
+ mgmt = skb_put_zero(skb, 24);
memcpy(mgmt->da, da, ETH_ALEN);
memcpy(mgmt->sa, sdata->vif.addr, ETH_ALEN);
if (sdata->vif.type == NL80211_IFTYPE_AP ||
@@ -448,7 +459,7 @@
return -ENOMEM;
skb_reserve(skb, local->hw.extra_tx_headroom);
- action_frame = (void *)skb_put(skb, 27);
+ action_frame = skb_put(skb, 27);
memcpy(action_frame->da, da, ETH_ALEN);
memcpy(action_frame->sa, sdata->dev->dev_addr, ETH_ALEN);
memcpy(action_frame->bssid, bssid, ETH_ALEN);
diff --git a/net/mac80211/ibss.c b/net/mac80211/ibss.c
index 364d4e1..e9c6aa3 100644
--- a/net/mac80211/ibss.c
+++ b/net/mac80211/ibss.c
@@ -808,7 +808,6 @@
}
memset(¶ms, 0, sizeof(params));
- memset(&csa_ie, 0, sizeof(csa_ie));
err = ieee80211_parse_ch_switch_ie(sdata, elems,
ifibss->chandef.chan->band,
sta_flags, ifibss->bssid, &csa_ie);
@@ -1570,7 +1569,7 @@
return;
skb_reserve(skb, local->tx_headroom);
- memcpy(skb_put(skb, presp->head_len), presp->head, presp->head_len);
+ skb_put_data(skb, presp->head, presp->head_len);
memcpy(((struct ieee80211_mgmt *) skb->data)->da, mgmt->sa, ETH_ALEN);
ibss_dbg(sdata, "Sending ProbeResp to %pM\n", mgmt->sa);
diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h
index 5e002f6..2197c62 100644
--- a/net/mac80211/ieee80211_i.h
+++ b/net/mac80211/ieee80211_i.h
@@ -643,6 +643,8 @@
unsigned long wrkq_flags;
unsigned long mbss_changed;
+ bool userspace_handles_dfs;
+
u8 mesh_id[IEEE80211_MAX_MESH_ID_LEN];
size_t mesh_id_len;
/* Active Path Selection Protocol Identifier */
@@ -1029,17 +1031,6 @@
return shift;
}
-struct ieee80211_rx_agg {
- u8 addr[ETH_ALEN];
- u16 tid;
-};
-
-enum sdata_queue_type {
- IEEE80211_SDATA_QUEUE_TYPE_FRAME = 0,
- IEEE80211_SDATA_QUEUE_RX_AGG_START = 3,
- IEEE80211_SDATA_QUEUE_RX_AGG_STOP = 4,
-};
-
enum {
IEEE80211_RX_MSG = 1,
IEEE80211_TX_STATUS_MSG = 2,
@@ -1432,6 +1423,7 @@
u8 count;
u8 ttl;
u16 pre_value;
+ u16 reason_code;
};
/* Parsed Information Elements */
@@ -2057,6 +2049,8 @@
u8 *ieee80211_ie_build_ht_oper(u8 *pos, struct ieee80211_sta_ht_cap *ht_cap,
const struct cfg80211_chan_def *chandef,
u16 prot_mode, bool rifs_mode);
+void ieee80211_ie_build_wide_bw_cs(u8 *pos,
+ const struct cfg80211_chan_def *chandef);
u8 *ieee80211_ie_build_vht_cap(u8 *pos, struct ieee80211_sta_vht_cap *vht_cap,
u32 cap);
u8 *ieee80211_ie_build_vht_oper(u8 *pos, struct ieee80211_sta_vht_cap *vht_cap,
diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c
index f5f5015..9228ac7 100644
--- a/net/mac80211/iface.c
+++ b/net/mac80211/iface.c
@@ -1237,7 +1237,6 @@
struct ieee80211_local *local = sdata->local;
struct sk_buff *skb;
struct sta_info *sta;
- struct ieee80211_rx_agg *rx_agg;
if (!ieee80211_sdata_running(sdata))
return;
@@ -1252,28 +1251,8 @@
while ((skb = skb_dequeue(&sdata->skb_queue))) {
struct ieee80211_mgmt *mgmt = (void *)skb->data;
- if (skb->pkt_type == IEEE80211_SDATA_QUEUE_RX_AGG_START) {
- rx_agg = (void *)&skb->cb;
- mutex_lock(&local->sta_mtx);
- sta = sta_info_get_bss(sdata, rx_agg->addr);
- if (sta)
- __ieee80211_start_rx_ba_session(sta,
- 0, 0, 0, 1, rx_agg->tid,
- IEEE80211_MAX_AMPDU_BUF,
- false, true);
- mutex_unlock(&local->sta_mtx);
- } else if (skb->pkt_type == IEEE80211_SDATA_QUEUE_RX_AGG_STOP) {
- rx_agg = (void *)&skb->cb;
- mutex_lock(&local->sta_mtx);
- sta = sta_info_get_bss(sdata, rx_agg->addr);
- if (sta)
- __ieee80211_stop_rx_ba_session(sta,
- rx_agg->tid,
- WLAN_BACK_RECIPIENT, 0,
- false);
- mutex_unlock(&local->sta_mtx);
- } else if (ieee80211_is_action(mgmt->frame_control) &&
- mgmt->u.action.category == WLAN_CATEGORY_BACK) {
+ if (ieee80211_is_action(mgmt->frame_control) &&
+ mgmt->u.action.category == WLAN_CATEGORY_BACK) {
int len = skb->len;
mutex_lock(&local->sta_mtx);
diff --git a/net/mac80211/mesh.c b/net/mac80211/mesh.c
index 737e1f0..a550c70 100644
--- a/net/mac80211/mesh.c
+++ b/net/mac80211/mesh.c
@@ -345,7 +345,7 @@
data = ifmsh->ie + offset;
if (skb_tailroom(skb) < len)
return -ENOMEM;
- memcpy(skb_put(skb, len), data, len);
+ skb_put_data(skb, data, len);
}
return 0;
@@ -369,7 +369,7 @@
if (skb_tailroom(skb) < len)
return -ENOMEM;
- memcpy(skb_put(skb, len), data, len);
+ skb_put_data(skb, data, len);
return 0;
}
@@ -690,6 +690,9 @@
2 + sizeof(struct ieee80211_channel_sw_ie) +
/* Mesh Channel Switch Parameters */
2 + sizeof(struct ieee80211_mesh_chansw_params_ie) +
+ /* Channel Switch Wrapper + Wide Bandwidth CSA IE */
+ 2 + 2 + sizeof(struct ieee80211_wide_bw_chansw_ie) +
+ 2 + sizeof(struct ieee80211_sec_chan_offs_ie) +
2 + 8 + /* supported rates */
2 + 3; /* DS params */
tail_len = 2 + (IEEE80211_MAX_SUPP_RATES - 8) +
@@ -716,8 +719,7 @@
bcn->head = ((u8 *) bcn) + sizeof(*bcn);
/* fill in the head */
- mgmt = (struct ieee80211_mgmt *) skb_put(skb, hdr_len);
- memset(mgmt, 0, hdr_len);
+ mgmt = skb_put_zero(skb, hdr_len);
mgmt->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT |
IEEE80211_STYPE_BEACON);
eth_broadcast_addr(mgmt->da);
@@ -736,8 +738,12 @@
rcu_read_lock();
csa = rcu_dereference(ifmsh->csa);
if (csa) {
- pos = skb_put(skb, 13);
- memset(pos, 0, 13);
+ enum nl80211_channel_type ct;
+ struct cfg80211_chan_def *chandef;
+ int ie_len = 2 + sizeof(struct ieee80211_channel_sw_ie) +
+ 2 + sizeof(struct ieee80211_mesh_chansw_params_ie);
+
+ pos = skb_put_zero(skb, ie_len);
*pos++ = WLAN_EID_CHANNEL_SWITCH;
*pos++ = 3;
*pos++ = 0x0;
@@ -760,6 +766,37 @@
pos += 2;
put_unaligned_le16(ifmsh->pre_value, pos);
pos += 2;
+
+ switch (csa->settings.chandef.width) {
+ case NL80211_CHAN_WIDTH_40:
+ ie_len = 2 + sizeof(struct ieee80211_sec_chan_offs_ie);
+ pos = skb_put_zero(skb, ie_len);
+
+ *pos++ = WLAN_EID_SECONDARY_CHANNEL_OFFSET; /* EID */
+ *pos++ = 1; /* len */
+ ct = cfg80211_get_chandef_type(&csa->settings.chandef);
+ if (ct == NL80211_CHAN_HT40PLUS)
+ *pos++ = IEEE80211_HT_PARAM_CHA_SEC_ABOVE;
+ else
+ *pos++ = IEEE80211_HT_PARAM_CHA_SEC_BELOW;
+ break;
+ case NL80211_CHAN_WIDTH_80:
+ case NL80211_CHAN_WIDTH_80P80:
+ case NL80211_CHAN_WIDTH_160:
+ /* Channel Switch Wrapper + Wide Bandwidth CSA IE */
+ ie_len = 2 + 2 +
+ sizeof(struct ieee80211_wide_bw_chansw_ie);
+ pos = skb_put_zero(skb, ie_len);
+
+ *pos++ = WLAN_EID_CHANNEL_SWITCH_WRAPPER; /* EID */
+ *pos++ = 5; /* len */
+ /* put sub IE */
+ chandef = &csa->settings.chandef;
+ ieee80211_ie_build_wide_bw_cs(pos, chandef);
+ break;
+ default:
+ break;
+ }
}
rcu_read_unlock();
@@ -916,6 +953,21 @@
ieee80211_configure_filter(local);
}
+static void ieee80211_mesh_csa_mark_radar(struct ieee80211_sub_if_data *sdata)
+{
+ int err;
+
+ /* if the current channel is a DFS channel, mark the channel as
+ * unavailable.
+ */
+ err = cfg80211_chandef_dfs_required(sdata->local->hw.wiphy,
+ &sdata->vif.bss_conf.chandef,
+ NL80211_IFTYPE_MESH_POINT);
+ if (err > 0)
+ cfg80211_radar_event(sdata->local->hw.wiphy,
+ &sdata->vif.bss_conf.chandef, GFP_ATOMIC);
+}
+
static bool
ieee80211_mesh_process_chnswitch(struct ieee80211_sub_if_data *sdata,
struct ieee802_11_elems *elems, bool beacon)
@@ -933,19 +985,20 @@
if (!sband)
return false;
- sta_flags = IEEE80211_STA_DISABLE_VHT;
+ sta_flags = 0;
switch (sdata->vif.bss_conf.chandef.width) {
case NL80211_CHAN_WIDTH_20_NOHT:
sta_flags |= IEEE80211_STA_DISABLE_HT;
case NL80211_CHAN_WIDTH_20:
sta_flags |= IEEE80211_STA_DISABLE_40MHZ;
+ case NL80211_CHAN_WIDTH_40:
+ sta_flags |= IEEE80211_STA_DISABLE_VHT;
break;
default:
break;
}
memset(¶ms, 0, sizeof(params));
- memset(&csa_ie, 0, sizeof(csa_ie));
err = ieee80211_parse_ch_switch_ie(sdata, elems, sband->band,
sta_flags, sdata->vif.addr,
&csa_ie);
@@ -954,11 +1007,19 @@
if (err)
return false;
+ /* Mark the channel unavailable if the reason for the switch is
+ * regulatory.
+ */
+ if (csa_ie.reason_code == WLAN_REASON_MESH_CHAN_REGULATORY)
+ ieee80211_mesh_csa_mark_radar(sdata);
+
params.chandef = csa_ie.chandef;
params.count = csa_ie.count;
if (!cfg80211_chandef_usable(sdata->local->hw.wiphy, ¶ms.chandef,
- IEEE80211_CHAN_DISABLED)) {
+ IEEE80211_CHAN_DISABLED) ||
+ !cfg80211_reg_can_beacon(sdata->local->hw.wiphy, ¶ms.chandef,
+ NL80211_IFTYPE_MESH_POINT)) {
sdata_info(sdata,
"mesh STA %pM switches to unsupported channel (%d MHz, width:%d, CF1/2: %d/%d MHz), aborting\n",
sdata->vif.addr,
@@ -974,9 +1035,16 @@
NL80211_IFTYPE_MESH_POINT);
if (err < 0)
return false;
- if (err > 0)
- /* TODO: DFS not (yet) supported */
+ if (err > 0 && !ifmsh->userspace_handles_dfs) {
+ sdata_info(sdata,
+ "mesh STA %pM switches to channel requiring DFS (%d MHz, width:%d, CF1/2: %d/%d MHz), aborting\n",
+ sdata->vif.addr,
+ params.chandef.chan->center_freq,
+ params.chandef.width,
+ params.chandef.center_freq1,
+ params.chandef.center_freq2);
return false;
+ }
params.radar_required = err;
@@ -1057,8 +1125,8 @@
goto out;
skb_reserve(presp, local->tx_headroom);
- memcpy(skb_put(presp, bcn->head_len), bcn->head, bcn->head_len);
- memcpy(skb_put(presp, bcn->tail_len), bcn->tail, bcn->tail_len);
+ skb_put_data(presp, bcn->head, bcn->head_len);
+ skb_put_data(presp, bcn->tail, bcn->tail_len);
hdr = (struct ieee80211_mgmt *) presp->data;
hdr->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT |
IEEE80211_STYPE_PROBE_RESP);
@@ -1197,7 +1265,7 @@
if (!skb)
return -ENOMEM;
skb_reserve(skb, local->tx_headroom);
- mgmt_fwd = (struct ieee80211_mgmt *) skb_put(skb, len);
+ mgmt_fwd = skb_put(skb, len);
/* offset_ttl is based on whether the secondary channel
* offset is available or not. Subtract 1 from the mesh TTL
@@ -1233,7 +1301,7 @@
pos = mgmt->u.action.u.chan_switch.variable;
baselen = offsetof(struct ieee80211_mgmt,
u.action.u.chan_switch.variable);
- ieee802_11_parse_elems(pos, len - baselen, false, &elems);
+ ieee802_11_parse_elems(pos, len - baselen, true, &elems);
ifmsh->chsw_ttl = elems.mesh_chansw_params_ie->mesh_ttl;
if (!--ifmsh->chsw_ttl)
diff --git a/net/mac80211/mesh_hwmp.c b/net/mac80211/mesh_hwmp.c
index 4005edd..d8bbd0d 100644
--- a/net/mac80211/mesh_hwmp.c
+++ b/net/mac80211/mesh_hwmp.c
@@ -120,8 +120,7 @@
if (!skb)
return -1;
skb_reserve(skb, local->tx_headroom);
- mgmt = (struct ieee80211_mgmt *) skb_put(skb, hdr_len);
- memset(mgmt, 0, hdr_len);
+ mgmt = skb_put_zero(skb, hdr_len);
mgmt->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT |
IEEE80211_STYPE_ACTION);
@@ -257,8 +256,7 @@
if (!skb)
return -1;
skb_reserve(skb, local->tx_headroom + sdata->encrypt_headroom);
- mgmt = (struct ieee80211_mgmt *) skb_put(skb, hdr_len);
- memset(mgmt, 0, hdr_len);
+ mgmt = skb_put_zero(skb, hdr_len);
mgmt->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT |
IEEE80211_STYPE_ACTION);
diff --git a/net/mac80211/mesh_plink.c b/net/mac80211/mesh_plink.c
index 1131cd5..f69c6c3 100644
--- a/net/mac80211/mesh_plink.c
+++ b/net/mac80211/mesh_plink.c
@@ -242,8 +242,7 @@
return err;
info = IEEE80211_SKB_CB(skb);
skb_reserve(skb, local->tx_headroom);
- mgmt = (struct ieee80211_mgmt *) skb_put(skb, hdr_len);
- memset(mgmt, 0, hdr_len);
+ mgmt = skb_put_zero(skb, hdr_len);
mgmt->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT |
IEEE80211_STYPE_ACTION);
memcpy(mgmt->da, da, ETH_ALEN);
@@ -264,8 +263,7 @@
band = sband->band;
/* capability info */
- pos = skb_put(skb, 2);
- memset(pos, 0, 2);
+ pos = skb_put_zero(skb, 2);
if (action == WLAN_SP_MESH_PEERING_CONFIRM) {
/* AID */
pos = skb_put(skb, 2);
diff --git a/net/mac80211/mesh_ps.c b/net/mac80211/mesh_ps.c
index 90a268a..d8cd914 100644
--- a/net/mac80211/mesh_ps.c
+++ b/net/mac80211/mesh_ps.c
@@ -30,7 +30,7 @@
return NULL;
skb_reserve(skb, local->hw.extra_tx_headroom);
- nullfunc = (struct ieee80211_hdr *) skb_put(skb, size);
+ nullfunc = skb_put(skb, size);
fc = cpu_to_le16(IEEE80211_FTYPE_DATA | IEEE80211_STYPE_QOS_NULLFUNC);
ieee80211_fill_mesh_addresses(nullfunc, &fc, sta->sta.addr,
sdata->vif.addr);
@@ -39,7 +39,7 @@
nullfunc->seq_ctrl = 0;
/* no address resolution for this frame -> set addr 1 immediately */
memcpy(nullfunc->addr1, sta->sta.addr, ETH_ALEN);
- memset(skb_put(skb, 2), 0, 2); /* append QoS control field */
+ skb_put_zero(skb, 2); /* append QoS control field */
ieee80211_mps_set_frame_flags(sdata, sta, nullfunc);
return skb;
diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c
index cc8e6ea..b588e59 100644
--- a/net/mac80211/mlme.c
+++ b/net/mac80211/mlme.c
@@ -674,8 +674,7 @@
if (ifmgd->flags & IEEE80211_STA_ENABLE_RRM)
capab |= WLAN_CAPABILITY_RADIO_MEASURE;
- mgmt = (struct ieee80211_mgmt *) skb_put(skb, 24);
- memset(mgmt, 0, 24);
+ mgmt = skb_put_zero(skb, 24);
memcpy(mgmt->da, assoc_data->bss->bssid, ETH_ALEN);
memcpy(mgmt->sa, sdata->vif.addr, ETH_ALEN);
memcpy(mgmt->bssid, assoc_data->bss->bssid, ETH_ALEN);
@@ -797,8 +796,7 @@
after_ric,
ARRAY_SIZE(after_ric),
offset);
- pos = skb_put(skb, noffset - offset);
- memcpy(pos, assoc_data->ie + offset, noffset - offset);
+ skb_put_data(skb, assoc_data->ie + offset, noffset - offset);
offset = noffset;
}
@@ -835,8 +833,7 @@
noffset = ieee80211_ie_split(assoc_data->ie, assoc_data->ie_len,
before_vht, ARRAY_SIZE(before_vht),
offset);
- pos = skb_put(skb, noffset - offset);
- memcpy(pos, assoc_data->ie + offset, noffset - offset);
+ skb_put_data(skb, assoc_data->ie + offset, noffset - offset);
offset = noffset;
}
@@ -849,8 +846,7 @@
noffset = ieee80211_ie_split_vendor(assoc_data->ie,
assoc_data->ie_len,
offset);
- pos = skb_put(skb, noffset - offset);
- memcpy(pos, assoc_data->ie + offset, noffset - offset);
+ skb_put_data(skb, assoc_data->ie + offset, noffset - offset);
offset = noffset;
}
@@ -869,8 +865,7 @@
/* add any remaining custom (i.e. vendor specific here) IEs */
if (assoc_data->ie_len) {
noffset = assoc_data->ie_len;
- pos = skb_put(skb, noffset - offset);
- memcpy(pos, assoc_data->ie + offset, noffset - offset);
+ skb_put_data(skb, assoc_data->ie + offset, noffset - offset);
}
if (assoc_data->fils_kek_len &&
@@ -949,8 +944,7 @@
skb_reserve(skb, local->hw.extra_tx_headroom);
- nullfunc = (struct ieee80211_hdr *) skb_put(skb, 30);
- memset(nullfunc, 0, 30);
+ nullfunc = skb_put_zero(skb, 30);
fc = cpu_to_le16(IEEE80211_FTYPE_DATA | IEEE80211_STYPE_NULLFUNC |
IEEE80211_FCTL_FROMDS | IEEE80211_FCTL_TODS);
nullfunc->frame_control = fc;
@@ -1122,7 +1116,6 @@
return;
current_band = cbss->channel->band;
- memset(&csa_ie, 0, sizeof(csa_ie));
res = ieee80211_parse_ch_switch_ie(sdata, elems, current_band,
ifmgd->flags,
ifmgd->associated->bssid, &csa_ie);
diff --git a/net/mac80211/offchannel.c b/net/mac80211/offchannel.c
index eede5c6..f8e7a8b 100644
--- a/net/mac80211/offchannel.c
+++ b/net/mac80211/offchannel.c
@@ -885,8 +885,7 @@
}
skb_reserve(skb, local->hw.extra_tx_headroom);
- data = skb_put(skb, params->len);
- memcpy(data, params->buf, params->len);
+ data = skb_put_data(skb, params->buf, params->len);
/* Update CSA counters */
if (sdata->vif.csa_active &&
diff --git a/net/mac80211/rate.c b/net/mac80211/rate.c
index ea1f431..76f303f 100644
--- a/net/mac80211/rate.c
+++ b/net/mac80211/rate.c
@@ -943,6 +943,8 @@
drv_sta_rate_tbl_update(hw_to_local(hw), sta->sdata, pubsta);
+ ieee80211_sta_set_expected_throughput(pubsta, sta_get_expected_throughput(sta));
+
return 0;
}
EXPORT_SYMBOL(rate_control_set_rates);
@@ -991,4 +993,3 @@
local->rate_ctrl = NULL;
rate_control_free(local, ref);
}
-
diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c
index 3674fe3..70e9d2c 100644
--- a/net/mac80211/rx.c
+++ b/net/mac80211/rx.c
@@ -237,7 +237,6 @@
if (!skb)
return;
- skb->pkt_type = IEEE80211_SDATA_QUEUE_TYPE_FRAME;
skb_queue_tail(&sdata->skb_queue, skb);
ieee80211_queue_work(&sdata->local->hw, &sdata->work);
}
@@ -274,7 +273,7 @@
if (!(has_fcs && ieee80211_hw_check(&local->hw, RX_INCLUDES_FCS)))
mpdulen += FCS_LEN;
- rthdr = (struct ieee80211_radiotap_header *)skb_push(skb, rtap_len);
+ rthdr = skb_push(skb, rtap_len);
memset(rthdr, 0, rtap_len - rtap.len - rtap.pad);
it_present = &rthdr->it_present;
@@ -1217,7 +1216,6 @@
/* if this mpdu is fragmented - terminate rx aggregation session */
sc = le16_to_cpu(hdr->seq_ctrl);
if (sc & IEEE80211_SCTL_FRAG) {
- skb->pkt_type = IEEE80211_SDATA_QUEUE_TYPE_FRAME;
skb_queue_tail(&rx->sdata->skb_queue, skb);
ieee80211_queue_work(&local->hw, &rx->sdata->work);
return;
@@ -2100,7 +2098,7 @@
}
}
while ((skb = __skb_dequeue(&entry->skb_list))) {
- memcpy(skb_put(rx->skb, skb->len), skb->data, skb->len);
+ skb_put_data(rx->skb, skb->data, skb->len);
dev_kfree_skb(skb);
}
@@ -2762,8 +2760,7 @@
return;
skb_reserve(skb, local->hw.extra_tx_headroom);
- resp = (struct ieee80211_mgmt *) skb_put(skb, 24);
- memset(resp, 0, 24);
+ resp = skb_put_zero(skb, 24);
memcpy(resp->da, mgmt->sa, ETH_ALEN);
memcpy(resp->sa, sdata->vif.addr, ETH_ALEN);
memcpy(resp->bssid, sdata->u.mgd.bssid, ETH_ALEN);
@@ -3104,7 +3101,6 @@
return RX_QUEUED;
queue:
- rx->skb->pkt_type = IEEE80211_SDATA_QUEUE_TYPE_FRAME;
skb_queue_tail(&sdata->skb_queue, rx->skb);
ieee80211_queue_work(&local->hw, &sdata->work);
if (rx->sta)
@@ -3250,7 +3246,6 @@
}
/* queue up frame and kick off work to process it */
- rx->skb->pkt_type = IEEE80211_SDATA_QUEUE_TYPE_FRAME;
skb_queue_tail(&sdata->skb_queue, rx->skb);
ieee80211_queue_work(&rx->local->hw, &sdata->work);
if (rx->sta)
diff --git a/net/mac80211/spectmgmt.c b/net/mac80211/spectmgmt.c
index 0782e48..ee01817 100644
--- a/net/mac80211/spectmgmt.c
+++ b/net/mac80211/spectmgmt.c
@@ -36,6 +36,8 @@
const struct ieee80211_wide_bw_chansw_ie *wide_bw_chansw_ie;
int secondary_channel_offset = -1;
+ memset(csa_ie, 0, sizeof(*csa_ie));
+
sec_chan_offs = elems->sec_chan_offs;
wide_bw_chansw_ie = elems->wide_bw_chansw_ie;
@@ -76,6 +78,11 @@
csa_ie->mode = elems->mesh_chansw_params_ie->mesh_flags;
csa_ie->pre_value = le16_to_cpu(
elems->mesh_chansw_params_ie->mesh_pre_value);
+
+ if (elems->mesh_chansw_params_ie->mesh_flags &
+ WLAN_EID_CHAN_SWITCH_PARAM_REASON)
+ csa_ie->reason_code = le16_to_cpu(
+ elems->mesh_chansw_params_ie->mesh_reason);
}
new_freq = ieee80211_channel_to_frequency(new_chan_no, new_band);
@@ -186,8 +193,7 @@
return;
skb_reserve(skb, local->hw.extra_tx_headroom);
- msr_report = (struct ieee80211_mgmt *)skb_put(skb, 24);
- memset(msr_report, 0, 24);
+ msr_report = skb_put_zero(skb, 24);
memcpy(msr_report->da, da, ETH_ALEN);
memcpy(msr_report->sa, sdata->vif.addr, ETH_ALEN);
memcpy(msr_report->bssid, bssid, ETH_ALEN);
diff --git a/net/mac80211/sta_info.c b/net/mac80211/sta_info.c
index 403e3cc..6961501 100644
--- a/net/mac80211/sta_info.c
+++ b/net/mac80211/sta_info.c
@@ -20,6 +20,7 @@
#include <linux/timer.h>
#include <linux/rtnetlink.h>
+#include <net/codel.h>
#include <net/mac80211.h>
#include "ieee80211_i.h"
#include "driver-ops.h"
@@ -425,6 +426,11 @@
sta->sta.max_rc_amsdu_len = IEEE80211_MAX_MPDU_LEN_HT_BA;
+ sta->cparams.ce_threshold = CODEL_DISABLED_THRESHOLD;
+ sta->cparams.target = MS2TIME(20);
+ sta->cparams.interval = MS2TIME(100);
+ sta->cparams.ecn = true;
+
sta_dbg(sdata, "Allocated STA %pM\n", sta->sta.addr);
return sta;
@@ -1306,7 +1312,7 @@
skb_reserve(skb, local->hw.extra_tx_headroom);
- nullfunc = (void *) skb_put(skb, size);
+ nullfunc = skb_put(skb, size);
nullfunc->frame_control = fc;
nullfunc->duration_id = 0;
memcpy(nullfunc->addr1, sta->sta.addr, ETH_ALEN);
@@ -2310,3 +2316,27 @@
return stats->last_rx;
return sta->status_stats.last_ack;
}
+
+static void sta_update_codel_params(struct sta_info *sta, u32 thr)
+{
+ if (!sta->sdata->local->ops->wake_tx_queue)
+ return;
+
+ if (thr && thr < STA_SLOW_THRESHOLD * sta->local->num_sta) {
+ sta->cparams.target = MS2TIME(50);
+ sta->cparams.interval = MS2TIME(300);
+ sta->cparams.ecn = false;
+ } else {
+ sta->cparams.target = MS2TIME(20);
+ sta->cparams.interval = MS2TIME(100);
+ sta->cparams.ecn = true;
+ }
+}
+
+void ieee80211_sta_set_expected_throughput(struct ieee80211_sta *pubsta,
+ u32 thr)
+{
+ struct sta_info *sta = container_of(pubsta, struct sta_info, sta);
+
+ sta_update_codel_params(sta, thr);
+}
diff --git a/net/mac80211/sta_info.h b/net/mac80211/sta_info.h
index ea0747d..3acbdfa 100644
--- a/net/mac80211/sta_info.h
+++ b/net/mac80211/sta_info.h
@@ -233,6 +233,8 @@
* RX timer expired until the work for it runs
* @tid_rx_stop_requested: bitmap indicating which BA sessions per TID the
* driver requested to close until the work for it runs
+ * @tid_rx_manage_offl: bitmap indicating which BA sessions were requested
+ * to be treated as started/stopped due to offloading
* @agg_session_valid: bitmap indicating which TID has a rx BA session open on
* @unexpected_agg: bitmap indicating which TID already sent a delBA due to
* unexpected aggregation related frames outside a session
@@ -250,6 +252,7 @@
u8 tid_rx_token[IEEE80211_NUM_TIDS];
unsigned long tid_rx_timer_expired[BITS_TO_LONGS(IEEE80211_NUM_TIDS)];
unsigned long tid_rx_stop_requested[BITS_TO_LONGS(IEEE80211_NUM_TIDS)];
+ unsigned long tid_rx_manage_offl[BITS_TO_LONGS(2 * IEEE80211_NUM_TIDS)];
unsigned long agg_session_valid[BITS_TO_LONGS(IEEE80211_NUM_TIDS)];
unsigned long unexpected_agg[BITS_TO_LONGS(IEEE80211_NUM_TIDS)];
/* tx */
@@ -396,6 +399,14 @@
};
/**
+ * The bandwidth threshold below which the per-station CoDel parameters will be
+ * scaled to be more lenient (to prevent starvation of slow stations). This
+ * value will be scaled by the number of active stations when it is being
+ * applied.
+ */
+#define STA_SLOW_THRESHOLD 6000 /* 6 Mbps */
+
+/**
* struct sta_info - STA information
*
* This structure collects information about a station that
@@ -448,6 +459,7 @@
* @known_smps_mode: the smps_mode the client thinks we are in. Relevant for
* AP only.
* @cipher_scheme: optional cipher scheme for this station
+ * @cparams: CoDel parameters for this station.
* @reserved_tid: reserved TID (if any, otherwise IEEE80211_TID_UNRESERVED)
* @fast_tx: TX fastpath information
* @fast_rx: RX fastpath information
@@ -551,6 +563,8 @@
enum ieee80211_smps_mode known_smps_mode;
const struct ieee80211_cipher_scheme *cipher_scheme;
+ struct codel_params cparams;
+
u8 reserved_tid;
struct cfg80211_chan_def tdls_chandef;
diff --git a/net/mac80211/status.c b/net/mac80211/status.c
index be47ac5..da7427a 100644
--- a/net/mac80211/status.c
+++ b/net/mac80211/status.c
@@ -288,7 +288,7 @@
unsigned char *pos;
u16 txflags;
- rthdr = (struct ieee80211_radiotap_header *) skb_push(skb, rtap_len);
+ rthdr = skb_push(skb, rtap_len);
memset(rthdr, 0, rtap_len);
rthdr->it_len = cpu_to_le16(rtap_len);
@@ -546,6 +546,8 @@
skb->wifi_acked_valid = 1;
skb->wifi_acked = acked;
}
+
+ ieee80211_led_tx(local);
}
/*
@@ -823,8 +825,6 @@
}
}
- ieee80211_led_tx(local);
-
/* SNMP counters
* Fragments are passed to low-level drivers as separate skbs, so these
* are actually fragments, not frames. Update frame counters only for
diff --git a/net/mac80211/tdls.c b/net/mac80211/tdls.c
index f20dcf1..91093d4 100644
--- a/net/mac80211/tdls.c
+++ b/net/mac80211/tdls.c
@@ -49,7 +49,7 @@
!ifmgd->tdls_wider_bw_prohibited;
struct ieee80211_supported_band *sband = ieee80211_get_sband(sdata);
bool vht = sband && sband->vht_cap.vht_supported;
- u8 *pos = (void *)skb_put(skb, 10);
+ u8 *pos = skb_put(skb, 10);
*pos++ = WLAN_EID_EXT_CAPABILITY;
*pos++ = 8; /* len */
@@ -168,7 +168,7 @@
static void ieee80211_tdls_add_bss_coex_ie(struct sk_buff *skb)
{
- u8 *pos = (void *)skb_put(skb, 3);
+ u8 *pos = skb_put(skb, 3);
*pos++ = WLAN_EID_BSS_COEX_2040;
*pos++ = 1; /* len */
@@ -209,7 +209,7 @@
rsp_addr = sdata->vif.addr;
}
- lnkid = (void *)skb_put(skb, sizeof(struct ieee80211_tdls_lnkie));
+ lnkid = skb_put(skb, sizeof(struct ieee80211_tdls_lnkie));
lnkid->ie_type = WLAN_EID_LINK_ID;
lnkid->ie_len = sizeof(struct ieee80211_tdls_lnkie) - 2;
@@ -223,7 +223,7 @@
ieee80211_tdls_add_aid(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb)
{
struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
- u8 *pos = (void *)skb_put(skb, 4);
+ u8 *pos = skb_put(skb, 4);
*pos++ = WLAN_EID_AID;
*pos++ = 2; /* len */
@@ -271,8 +271,7 @@
struct ieee80211_tx_queue_params *txq;
int i;
- wmm = (void *)skb_put(skb, sizeof(*wmm));
- memset(wmm, 0, sizeof(*wmm));
+ wmm = skb_put_zero(skb, sizeof(*wmm));
wmm->element_id = WLAN_EID_VENDOR_SPECIFIC;
wmm->len = sizeof(*wmm) - 2;
@@ -389,8 +388,7 @@
before_ext_cap,
ARRAY_SIZE(before_ext_cap),
offset);
- pos = skb_put(skb, noffset - offset);
- memcpy(pos, extra_ies + offset, noffset - offset);
+ skb_put_data(skb, extra_ies + offset, noffset - offset);
offset = noffset;
}
@@ -419,8 +417,7 @@
before_ht_cap,
ARRAY_SIZE(before_ht_cap),
offset);
- pos = skb_put(skb, noffset - offset);
- memcpy(pos, extra_ies + offset, noffset - offset);
+ skb_put_data(skb, extra_ies + offset, noffset - offset);
offset = noffset;
}
@@ -491,8 +488,7 @@
before_vht_cap,
ARRAY_SIZE(before_vht_cap),
offset);
- pos = skb_put(skb, noffset - offset);
- memcpy(pos, extra_ies + offset, noffset - offset);
+ skb_put_data(skb, extra_ies + offset, noffset - offset);
offset = noffset;
}
@@ -533,8 +529,7 @@
/* add any remaining IEs */
if (extra_ies_len) {
noffset = extra_ies_len;
- pos = skb_put(skb, noffset - offset);
- memcpy(pos, extra_ies + offset, noffset - offset);
+ skb_put_data(skb, extra_ies + offset, noffset - offset);
}
}
@@ -576,8 +571,7 @@
before_qos,
ARRAY_SIZE(before_qos),
offset);
- pos = skb_put(skb, noffset - offset);
- memcpy(pos, extra_ies + offset, noffset - offset);
+ skb_put_data(skb, extra_ies + offset, noffset - offset);
offset = noffset;
}
@@ -597,8 +591,7 @@
before_ht_op,
ARRAY_SIZE(before_ht_op),
offset);
- pos = skb_put(skb, noffset - offset);
- memcpy(pos, extra_ies + offset, noffset - offset);
+ skb_put_data(skb, extra_ies + offset, noffset - offset);
offset = noffset;
}
@@ -639,8 +632,7 @@
/* add any remaining IEs */
if (extra_ies_len) {
noffset = extra_ies_len;
- pos = skb_put(skb, noffset - offset);
- memcpy(pos, extra_ies + offset, noffset - offset);
+ skb_put_data(skb, extra_ies + offset, noffset - offset);
}
}
@@ -653,7 +645,6 @@
{
struct ieee80211_tdls_data *tf;
size_t offset = 0, noffset;
- u8 *pos;
if (WARN_ON_ONCE(!chandef))
return;
@@ -671,8 +662,7 @@
before_lnkie,
ARRAY_SIZE(before_lnkie),
offset);
- pos = skb_put(skb, noffset - offset);
- memcpy(pos, extra_ies + offset, noffset - offset);
+ skb_put_data(skb, extra_ies + offset, noffset - offset);
offset = noffset;
}
@@ -681,8 +671,7 @@
/* add any remaining IEs */
if (extra_ies_len) {
noffset = extra_ies_len;
- pos = skb_put(skb, noffset - offset);
- memcpy(pos, extra_ies + offset, noffset - offset);
+ skb_put_data(skb, extra_ies + offset, noffset - offset);
}
}
@@ -697,7 +686,7 @@
ieee80211_tdls_add_link_ie(sdata, skb, peer, initiator);
if (extra_ies_len)
- memcpy(skb_put(skb, extra_ies_len), extra_ies, extra_ies_len);
+ skb_put_data(skb, extra_ies, extra_ies_len);
}
static void ieee80211_tdls_add_ies(struct ieee80211_sub_if_data *sdata,
@@ -727,8 +716,7 @@
case WLAN_TDLS_TEARDOWN:
case WLAN_TDLS_DISCOVERY_REQUEST:
if (extra_ies_len)
- memcpy(skb_put(skb, extra_ies_len), extra_ies,
- extra_ies_len);
+ skb_put_data(skb, extra_ies, extra_ies_len);
if (status_code == 0 || action_code == WLAN_TDLS_TEARDOWN)
ieee80211_tdls_add_link_ie(sdata, skb, peer, initiator);
break;
@@ -756,7 +744,7 @@
struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
struct ieee80211_tdls_data *tf;
- tf = (void *)skb_put(skb, offsetof(struct ieee80211_tdls_data, u));
+ tf = skb_put(skb, offsetof(struct ieee80211_tdls_data, u));
memcpy(tf->da, peer, ETH_ALEN);
memcpy(tf->sa, sdata->vif.addr, ETH_ALEN);
@@ -838,8 +826,7 @@
struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
struct ieee80211_mgmt *mgmt;
- mgmt = (void *)skb_put(skb, 24);
- memset(mgmt, 0, 24);
+ mgmt = skb_put_zero(skb, 24);
memcpy(mgmt->da, peer, ETH_ALEN);
memcpy(mgmt->sa, sdata->vif.addr, ETH_ALEN);
memcpy(mgmt->bssid, sdata->u.mgd.bssid, ETH_ALEN);
diff --git a/net/mac80211/trace.h b/net/mac80211/trace.h
index 0d645bc..3d9ac17 100644
--- a/net/mac80211/trace.h
+++ b/net/mac80211/trace.h
@@ -92,16 +92,19 @@
__field(u16, ssn) \
__field(u8, buf_size) \
__field(bool, amsdu) \
- __field(u16, timeout)
+ __field(u16, timeout) \
+ __field(u16, action)
#define AMPDU_ACTION_ASSIGN STA_NAMED_ASSIGN(params->sta); \
__entry->tid = params->tid; \
__entry->ssn = params->ssn; \
__entry->buf_size = params->buf_size; \
__entry->amsdu = params->amsdu; \
- __entry->timeout = params->timeout;
-#define AMPDU_ACTION_PR_FMT STA_PR_FMT " tid %d, ssn %d, buf_size %u, amsdu %d, timeout %d"
+ __entry->timeout = params->timeout; \
+ __entry->action = params->action;
+#define AMPDU_ACTION_PR_FMT STA_PR_FMT " tid %d, ssn %d, buf_size %u, amsdu %d, timeout %d action %d"
#define AMPDU_ACTION_PR_ARG STA_PR_ARG, __entry->tid, __entry->ssn, \
- __entry->buf_size, __entry->amsdu, __entry->timeout
+ __entry->buf_size, __entry->amsdu, __entry->timeout, \
+ __entry->action
/*
* Tracing for driver callbacks.
diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c
index 04b22f8..8858f4f 100644
--- a/net/mac80211/tx.c
+++ b/net/mac80211/tx.c
@@ -903,8 +903,8 @@
tmp->dev = skb->dev;
/* copy header and data */
- memcpy(skb_put(tmp, hdrlen), skb->data, hdrlen);
- memcpy(skb_put(tmp, fraglen), skb->data + pos, fraglen);
+ skb_put_data(tmp, skb->data, hdrlen);
+ skb_put_data(tmp, skb->data + pos, fraglen);
pos += fraglen;
}
@@ -1340,9 +1340,16 @@
local = container_of(fq, struct ieee80211_local, fq);
txqi = container_of(tin, struct txq_info, tin);
- cparams = &local->cparams;
cstats = &txqi->cstats;
+ if (txqi->txq.sta) {
+ struct sta_info *sta = container_of(txqi->txq.sta,
+ struct sta_info, sta);
+ cparams = &sta->cparams;
+ } else {
+ cparams = &local->cparams;
+ }
+
if (flow == &txqi->def_flow)
cvars = &txqi->def_cvars;
else
@@ -2701,7 +2708,7 @@
if (ieee80211_is_data_qos(fc)) {
__le16 *qos_control;
- qos_control = (__le16 *) skb_push(skb, 2);
+ qos_control = skb_push(skb, 2);
memcpy(skb_push(skb, hdrlen - 2), &hdr, hdrlen - 2);
/*
* Maybe we could actually set some fields here, for now just
@@ -3037,7 +3044,7 @@
if (padding) {
*subframe_len += padding;
- memset(skb_put(skb, padding), 0, padding);
+ skb_put_zero(skb, padding);
}
return true;
@@ -3340,7 +3347,7 @@
}
memcpy(ð, skb->data, ETH_HLEN - 2);
- hdr = (void *)skb_push(skb, extra_head);
+ hdr = skb_push(skb, extra_head);
memcpy(skb->data, fast_tx->hdr, fast_tx->hdr_len);
memcpy(skb->data + fast_tx->da_offs, eth.h_dest, ETH_ALEN);
memcpy(skb->data + fast_tx->sa_offs, eth.h_source, ETH_ALEN);
@@ -3867,7 +3874,7 @@
ps->dtim_count--;
}
- tim = pos = (u8 *) skb_put(skb, 6);
+ tim = pos = skb_put(skb, 6);
*pos++ = WLAN_EID_TIM;
*pos++ = 4;
*pos++ = ps->dtim_count;
@@ -4125,8 +4132,7 @@
goto out;
skb_reserve(skb, local->tx_headroom);
- memcpy(skb_put(skb, beacon->head_len), beacon->head,
- beacon->head_len);
+ skb_put_data(skb, beacon->head, beacon->head_len);
ieee80211_beacon_add_tim(sdata, &ap->ps, skb,
is_template);
@@ -4140,8 +4146,8 @@
}
if (beacon->tail)
- memcpy(skb_put(skb, beacon->tail_len),
- beacon->tail, beacon->tail_len);
+ skb_put_data(skb, beacon->tail,
+ beacon->tail_len);
} else
goto out;
} else if (sdata->vif.type == NL80211_IFTYPE_ADHOC) {
@@ -4164,8 +4170,7 @@
if (!skb)
goto out;
skb_reserve(skb, local->tx_headroom);
- memcpy(skb_put(skb, beacon->head_len), beacon->head,
- beacon->head_len);
+ skb_put_data(skb, beacon->head, beacon->head_len);
hdr = (struct ieee80211_hdr *) skb->data;
hdr->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT |
@@ -4200,8 +4205,7 @@
if (!skb)
goto out;
skb_reserve(skb, local->tx_headroom);
- memcpy(skb_put(skb, beacon->head_len), beacon->head,
- beacon->head_len);
+ skb_put_data(skb, beacon->head, beacon->head_len);
ieee80211_beacon_add_tim(sdata, &ifmsh->ps, skb, is_template);
if (offs) {
@@ -4209,8 +4213,7 @@
offs->tim_length = skb->len - beacon->head_len;
}
- memcpy(skb_put(skb, beacon->tail_len), beacon->tail,
- beacon->tail_len);
+ skb_put_data(skb, beacon->tail, beacon->tail_len);
} else {
WARN_ON(1);
goto out;
@@ -4330,7 +4333,7 @@
if (!skb)
goto out;
- memcpy(skb_put(skb, presp->len), presp->data, presp->len);
+ skb_put_data(skb, presp->data, presp->len);
hdr = (struct ieee80211_hdr *) skb->data;
memset(hdr->addr1, 0, sizeof(hdr->addr1));
@@ -4363,8 +4366,7 @@
skb_reserve(skb, local->hw.extra_tx_headroom);
- pspoll = (struct ieee80211_pspoll *) skb_put(skb, sizeof(*pspoll));
- memset(pspoll, 0, sizeof(*pspoll));
+ pspoll = skb_put_zero(skb, sizeof(*pspoll));
pspoll->frame_control = cpu_to_le16(IEEE80211_FTYPE_CTL |
IEEE80211_STYPE_PSPOLL);
pspoll->aid = cpu_to_le16(ifmgd->aid);
@@ -4401,9 +4403,7 @@
skb_reserve(skb, local->hw.extra_tx_headroom);
- nullfunc = (struct ieee80211_hdr_3addr *) skb_put(skb,
- sizeof(*nullfunc));
- memset(nullfunc, 0, sizeof(*nullfunc));
+ nullfunc = skb_put_zero(skb, sizeof(*nullfunc));
nullfunc->frame_control = cpu_to_le16(IEEE80211_FTYPE_DATA |
IEEE80211_STYPE_NULLFUNC |
IEEE80211_FCTL_TODS);
@@ -4435,8 +4435,7 @@
skb_reserve(skb, local->hw.extra_tx_headroom);
- hdr = (struct ieee80211_hdr_3addr *) skb_put(skb, sizeof(*hdr));
- memset(hdr, 0, sizeof(*hdr));
+ hdr = skb_put_zero(skb, sizeof(*hdr));
hdr->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT |
IEEE80211_STYPE_PROBE_REQ);
eth_broadcast_addr(hdr->addr1);
diff --git a/net/mac80211/util.c b/net/mac80211/util.c
index ac9ac6c..259698d 100644
--- a/net/mac80211/util.c
+++ b/net/mac80211/util.c
@@ -1242,8 +1242,7 @@
skb_reserve(skb, local->hw.extra_tx_headroom + IEEE80211_WEP_IV_LEN);
- mgmt = (struct ieee80211_mgmt *) skb_put(skb, 24 + 6);
- memset(mgmt, 0, 24 + 6);
+ mgmt = skb_put_zero(skb, 24 + 6);
mgmt->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT |
IEEE80211_STYPE_AUTH);
memcpy(mgmt->da, da, ETH_ALEN);
@@ -1253,7 +1252,7 @@
mgmt->u.auth.auth_transaction = cpu_to_le16(transaction);
mgmt->u.auth.status_code = cpu_to_le16(status);
if (extra)
- memcpy(skb_put(skb, extra_len), extra, extra_len);
+ skb_put_data(skb, extra, extra_len);
if (auth_alg == WLAN_AUTH_SHARED_KEY && transaction == 3) {
mgmt->frame_control |= cpu_to_le16(IEEE80211_FCTL_PROTECTED);
@@ -1293,8 +1292,7 @@
skb_reserve(skb, local->hw.extra_tx_headroom);
/* copy in frame */
- memcpy(skb_put(skb, IEEE80211_DEAUTH_FRAME_LEN),
- mgmt, IEEE80211_DEAUTH_FRAME_LEN);
+ skb_put_data(skb, mgmt, IEEE80211_DEAUTH_FRAME_LEN);
if (sdata->vif.type != NL80211_IFTYPE_STATION ||
!(sdata->u.mgd.flags & IEEE80211_STA_MFP_ENABLED))
@@ -2414,6 +2412,35 @@
return pos + sizeof(struct ieee80211_ht_operation);
}
+void ieee80211_ie_build_wide_bw_cs(u8 *pos,
+ const struct cfg80211_chan_def *chandef)
+{
+ *pos++ = WLAN_EID_WIDE_BW_CHANNEL_SWITCH; /* EID */
+ *pos++ = 3; /* IE length */
+ /* New channel width */
+ switch (chandef->width) {
+ case NL80211_CHAN_WIDTH_80:
+ *pos++ = IEEE80211_VHT_CHANWIDTH_80MHZ;
+ break;
+ case NL80211_CHAN_WIDTH_160:
+ *pos++ = IEEE80211_VHT_CHANWIDTH_160MHZ;
+ break;
+ case NL80211_CHAN_WIDTH_80P80:
+ *pos++ = IEEE80211_VHT_CHANWIDTH_80P80MHZ;
+ break;
+ default:
+ *pos++ = IEEE80211_VHT_CHANWIDTH_USE_HT;
+ }
+
+ /* new center frequency segment 0 */
+ *pos++ = ieee80211_frequency_to_channel(chandef->center_freq1);
+ /* new center frequency segment 1 */
+ if (chandef->center_freq2)
+ *pos++ = ieee80211_frequency_to_channel(chandef->center_freq2);
+ else
+ *pos++ = 0;
+}
+
u8 *ieee80211_ie_build_vht_oper(u8 *pos, struct ieee80211_sta_vht_cap *vht_cap,
const struct cfg80211_chan_def *chandef)
{
@@ -2964,13 +2991,13 @@
skb = dev_alloc_skb(local->tx_headroom + hdr_len +
5 + /* channel switch announcement element */
3 + /* secondary channel offset element */
+ 5 + /* wide bandwidth channel switch announcement */
8); /* mesh channel switch parameters element */
if (!skb)
return -ENOMEM;
skb_reserve(skb, local->tx_headroom);
- mgmt = (struct ieee80211_mgmt *)skb_put(skb, hdr_len);
- memset(mgmt, 0, hdr_len);
+ mgmt = skb_put_zero(skb, hdr_len);
mgmt->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT |
IEEE80211_STYPE_ACTION);
@@ -3022,6 +3049,13 @@
pos += 2;
}
+ if (csa_settings->chandef.width == NL80211_CHAN_WIDTH_80 ||
+ csa_settings->chandef.width == NL80211_CHAN_WIDTH_80P80 ||
+ csa_settings->chandef.width == NL80211_CHAN_WIDTH_160) {
+ skb_put(skb, 5);
+ ieee80211_ie_build_wide_bw_cs(pos, &csa_settings->chandef);
+ }
+
ieee80211_tx_skb(sdata, skb);
return 0;
}
diff --git a/net/mac80211/wpa.c b/net/mac80211/wpa.c
index cc19614..0d722ea 100644
--- a/net/mac80211/wpa.c
+++ b/net/mac80211/wpa.c
@@ -949,7 +949,7 @@
if (WARN_ON(skb_tailroom(skb) < sizeof(*mmie)))
return TX_DROP;
- mmie = (struct ieee80211_mmie *) skb_put(skb, sizeof(*mmie));
+ mmie = skb_put(skb, sizeof(*mmie));
mmie->element_id = WLAN_EID_MMIE;
mmie->length = sizeof(*mmie) - 2;
mmie->key_id = cpu_to_le16(key->conf.keyidx);
@@ -993,7 +993,7 @@
if (WARN_ON(skb_tailroom(skb) < sizeof(*mmie)))
return TX_DROP;
- mmie = (struct ieee80211_mmie_16 *)skb_put(skb, sizeof(*mmie));
+ mmie = skb_put(skb, sizeof(*mmie));
mmie->element_id = WLAN_EID_MMIE;
mmie->length = sizeof(*mmie) - 2;
mmie->key_id = cpu_to_le16(key->conf.keyidx);
@@ -1138,7 +1138,7 @@
if (WARN_ON(skb_tailroom(skb) < sizeof(*mmie)))
return TX_DROP;
- mmie = (struct ieee80211_mmie_16 *)skb_put(skb, sizeof(*mmie));
+ mmie = skb_put(skb, sizeof(*mmie));
mmie->element_id = WLAN_EID_MMIE;
mmie->length = sizeof(*mmie) - 2;
mmie->key_id = cpu_to_le16(key->conf.keyidx);
diff --git a/net/mpls/af_mpls.c b/net/mpls/af_mpls.c
index 7b05fd1..b51582d 100644
--- a/net/mpls/af_mpls.c
+++ b/net/mpls/af_mpls.c
@@ -684,6 +684,54 @@
return err;
}
+static int nla_get_via(const struct nlattr *nla, u8 *via_alen, u8 *via_table,
+ u8 via_addr[], struct netlink_ext_ack *extack)
+{
+ struct rtvia *via = nla_data(nla);
+ int err = -EINVAL;
+ int alen;
+
+ if (nla_len(nla) < offsetof(struct rtvia, rtvia_addr)) {
+ NL_SET_ERR_MSG_ATTR(extack, nla,
+ "Invalid attribute length for RTA_VIA");
+ goto errout;
+ }
+ alen = nla_len(nla) -
+ offsetof(struct rtvia, rtvia_addr);
+ if (alen > MAX_VIA_ALEN) {
+ NL_SET_ERR_MSG_ATTR(extack, nla,
+ "Invalid address length for RTA_VIA");
+ goto errout;
+ }
+
+ /* Validate the address family */
+ switch (via->rtvia_family) {
+ case AF_PACKET:
+ *via_table = NEIGH_LINK_TABLE;
+ break;
+ case AF_INET:
+ *via_table = NEIGH_ARP_TABLE;
+ if (alen != 4)
+ goto errout;
+ break;
+ case AF_INET6:
+ *via_table = NEIGH_ND_TABLE;
+ if (alen != 16)
+ goto errout;
+ break;
+ default:
+ /* Unsupported address family */
+ goto errout;
+ }
+
+ memcpy(via_addr, via->rtvia_addr, alen);
+ *via_alen = alen;
+ err = 0;
+
+errout:
+ return err;
+}
+
static int mpls_nh_build_from_cfg(struct mpls_route_config *cfg,
struct mpls_route *rt)
{
@@ -695,8 +743,6 @@
if (!nh)
return -ENOMEM;
- err = -EINVAL;
-
nh->nh_labels = cfg->rc_output_labels;
for (i = 0; i < nh->nh_labels; i++)
nh->nh_label[i] = cfg->rc_output_label[i];
@@ -720,7 +766,8 @@
static int mpls_nh_build(struct net *net, struct mpls_route *rt,
struct mpls_nh *nh, int oif, struct nlattr *via,
- struct nlattr *newdst, u8 max_labels)
+ struct nlattr *newdst, u8 max_labels,
+ struct netlink_ext_ack *extack)
{
int err = -ENOMEM;
@@ -728,15 +775,15 @@
goto errout;
if (newdst) {
- err = nla_get_labels(newdst, max_labels,
- &nh->nh_labels, nh->nh_label);
+ err = nla_get_labels(newdst, max_labels, &nh->nh_labels,
+ nh->nh_label, extack);
if (err)
goto errout;
}
if (via) {
err = nla_get_via(via, &nh->nh_via_alen, &nh->nh_via_table,
- __mpls_nh_via(rt, nh));
+ __mpls_nh_via(rt, nh), extack);
if (err)
goto errout;
} else {
@@ -782,7 +829,8 @@
nla = nla_find(attrs, attrlen, RTA_NEWDST);
if (nla &&
- nla_get_labels(nla, MAX_NEW_LABELS, &n_labels, NULL) != 0)
+ nla_get_labels(nla, MAX_NEW_LABELS, &n_labels,
+ NULL, NULL) != 0)
return 0;
*max_labels = max_t(u8, *max_labels, n_labels);
@@ -802,7 +850,8 @@
}
static int mpls_nh_build_multi(struct mpls_route_config *cfg,
- struct mpls_route *rt, u8 max_labels)
+ struct mpls_route *rt, u8 max_labels,
+ struct netlink_ext_ack *extack)
{
struct rtnexthop *rtnh = cfg->rc_mp;
struct nlattr *nla_via, *nla_newdst;
@@ -836,7 +885,7 @@
err = mpls_nh_build(cfg->rc_nlinfo.nl_net, rt, nh,
rtnh->rtnh_ifindex, nla_via, nla_newdst,
- max_labels);
+ max_labels, extack);
if (err)
goto errout;
@@ -855,7 +904,28 @@
return err;
}
-static int mpls_route_add(struct mpls_route_config *cfg)
+static bool mpls_label_ok(struct net *net, unsigned int index,
+ struct netlink_ext_ack *extack)
+{
+ /* Reserved labels may not be set */
+ if (index < MPLS_LABEL_FIRST_UNRESERVED) {
+ NL_SET_ERR_MSG(extack,
+ "Invalid label - must be MPLS_LABEL_FIRST_UNRESERVED or higher");
+ return false;
+ }
+
+ /* The full 20 bit range may not be supported. */
+ if (index >= net->mpls.platform_labels) {
+ NL_SET_ERR_MSG(extack,
+ "Label >= configured maximum in platform_labels");
+ return false;
+ }
+
+ return true;
+}
+
+static int mpls_route_add(struct mpls_route_config *cfg,
+ struct netlink_ext_ack *extack)
{
struct mpls_route __rcu **platform_label;
struct net *net = cfg->rc_nlinfo.nl_net;
@@ -874,18 +944,15 @@
index = find_free_label(net);
}
- /* Reserved labels may not be set */
- if (index < MPLS_LABEL_FIRST_UNRESERVED)
- goto errout;
-
- /* The full 20 bit range may not be supported. */
- if (index >= net->mpls.platform_labels)
+ if (!mpls_label_ok(net, index, extack))
goto errout;
/* Append makes no sense with mpls */
err = -EOPNOTSUPP;
- if (cfg->rc_nlflags & NLM_F_APPEND)
+ if (cfg->rc_nlflags & NLM_F_APPEND) {
+ NL_SET_ERR_MSG(extack, "MPLS does not support route append");
goto errout;
+ }
err = -EEXIST;
platform_label = rtnl_dereference(net->mpls.platform_label);
@@ -912,8 +979,10 @@
nhs = 1;
}
- if (nhs == 0)
+ if (nhs == 0) {
+ NL_SET_ERR_MSG(extack, "Route does not contain a nexthop");
goto errout;
+ }
err = -ENOMEM;
rt = mpls_rt_alloc(nhs, max_via_alen, max_labels);
@@ -927,7 +996,7 @@
rt->rt_ttl_propagate = cfg->rc_ttl_propagate;
if (cfg->rc_mp)
- err = mpls_nh_build_multi(cfg, rt, max_labels);
+ err = mpls_nh_build_multi(cfg, rt, max_labels, extack);
else
err = mpls_nh_build_from_cfg(cfg, rt);
if (err)
@@ -943,7 +1012,8 @@
return err;
}
-static int mpls_route_del(struct mpls_route_config *cfg)
+static int mpls_route_del(struct mpls_route_config *cfg,
+ struct netlink_ext_ack *extack)
{
struct net *net = cfg->rc_nlinfo.nl_net;
unsigned index;
@@ -951,12 +1021,7 @@
index = cfg->rc_label;
- /* Reserved labels may not be removed */
- if (index < MPLS_LABEL_FIRST_UNRESERVED)
- goto errout;
-
- /* The full 20 bit range may not be supported */
- if (index >= net->mpls.platform_labels)
+ if (!mpls_label_ok(net, index, extack))
goto errout;
mpls_route_update(net, index, NULL, &cfg->rc_nlinfo);
@@ -1541,8 +1606,8 @@
}
EXPORT_SYMBOL_GPL(nla_put_labels);
-int nla_get_labels(const struct nlattr *nla,
- u8 max_labels, u8 *labels, u32 label[])
+int nla_get_labels(const struct nlattr *nla, u8 max_labels, u8 *labels,
+ u32 label[], struct netlink_ext_ack *extack)
{
unsigned len = nla_len(nla);
struct mpls_shim_hdr *nla_label;
@@ -1553,13 +1618,18 @@
/* len needs to be an even multiple of 4 (the label size). Number
* of labels is a u8 so check for overflow.
*/
- if (len & 3 || len / 4 > 255)
+ if (len & 3 || len / 4 > 255) {
+ NL_SET_ERR_MSG_ATTR(extack, nla,
+ "Invalid length for labels attribute");
return -EINVAL;
+ }
/* Limit the number of new labels allowed */
nla_labels = len/4;
- if (nla_labels > max_labels)
+ if (nla_labels > max_labels) {
+ NL_SET_ERR_MSG(extack, "Too many labels");
return -EINVAL;
+ }
/* when label == NULL, caller wants number of labels */
if (!label)
@@ -1574,8 +1644,29 @@
/* Ensure the bottom of stack flag is properly set
* and ttl and tc are both clear.
*/
- if ((dec.bos != bos) || dec.ttl || dec.tc)
+ if (dec.ttl) {
+ NL_SET_ERR_MSG_ATTR(extack, nla,
+ "TTL in label must be 0");
return -EINVAL;
+ }
+
+ if (dec.tc) {
+ NL_SET_ERR_MSG_ATTR(extack, nla,
+ "Traffic class in label must be 0");
+ return -EINVAL;
+ }
+
+ if (dec.bos != bos) {
+ NL_SET_BAD_ATTR(extack, nla);
+ if (bos) {
+ NL_SET_ERR_MSG(extack,
+ "BOS bit must be set in first label");
+ } else {
+ NL_SET_ERR_MSG(extack,
+ "BOS bit can only be set in first label");
+ }
+ return -EINVAL;
+ }
switch (dec.label) {
case MPLS_LABEL_IMPLNULL:
@@ -1583,6 +1674,8 @@
* assign and distribute, but which never
* actually appears in the encapsulation.
*/
+ NL_SET_ERR_MSG_ATTR(extack, nla,
+ "Implicit NULL Label (3) can not be used in encapsulation");
return -EINVAL;
}
@@ -1594,50 +1687,10 @@
}
EXPORT_SYMBOL_GPL(nla_get_labels);
-int nla_get_via(const struct nlattr *nla, u8 *via_alen,
- u8 *via_table, u8 via_addr[])
-{
- struct rtvia *via = nla_data(nla);
- int err = -EINVAL;
- int alen;
-
- if (nla_len(nla) < offsetof(struct rtvia, rtvia_addr))
- goto errout;
- alen = nla_len(nla) -
- offsetof(struct rtvia, rtvia_addr);
- if (alen > MAX_VIA_ALEN)
- goto errout;
-
- /* Validate the address family */
- switch (via->rtvia_family) {
- case AF_PACKET:
- *via_table = NEIGH_LINK_TABLE;
- break;
- case AF_INET:
- *via_table = NEIGH_ARP_TABLE;
- if (alen != 4)
- goto errout;
- break;
- case AF_INET6:
- *via_table = NEIGH_ND_TABLE;
- if (alen != 16)
- goto errout;
- break;
- default:
- /* Unsupported address family */
- goto errout;
- }
-
- memcpy(via_addr, via->rtvia_addr, alen);
- *via_alen = alen;
- err = 0;
-
-errout:
- return err;
-}
-
-static int rtm_to_route_config(struct sk_buff *skb, struct nlmsghdr *nlh,
- struct mpls_route_config *cfg)
+static int rtm_to_route_config(struct sk_buff *skb,
+ struct nlmsghdr *nlh,
+ struct mpls_route_config *cfg,
+ struct netlink_ext_ack *extack)
{
struct rtmsg *rtm;
struct nlattr *tb[RTA_MAX+1];
@@ -1645,35 +1698,54 @@
int err;
err = nlmsg_parse(nlh, sizeof(*rtm), tb, RTA_MAX, rtm_mpls_policy,
- NULL);
+ extack);
if (err < 0)
goto errout;
err = -EINVAL;
rtm = nlmsg_data(nlh);
- if (rtm->rtm_family != AF_MPLS)
+ if (rtm->rtm_family != AF_MPLS) {
+ NL_SET_ERR_MSG(extack, "Invalid address family in rtmsg");
goto errout;
- if (rtm->rtm_dst_len != 20)
+ }
+ if (rtm->rtm_dst_len != 20) {
+ NL_SET_ERR_MSG(extack, "rtm_dst_len must be 20 for MPLS");
goto errout;
- if (rtm->rtm_src_len != 0)
+ }
+ if (rtm->rtm_src_len != 0) {
+ NL_SET_ERR_MSG(extack, "rtm_src_len must be 0 for MPLS");
goto errout;
- if (rtm->rtm_tos != 0)
+ }
+ if (rtm->rtm_tos != 0) {
+ NL_SET_ERR_MSG(extack, "rtm_tos must be 0 for MPLS");
goto errout;
- if (rtm->rtm_table != RT_TABLE_MAIN)
+ }
+ if (rtm->rtm_table != RT_TABLE_MAIN) {
+ NL_SET_ERR_MSG(extack,
+ "MPLS only supports the main route table");
goto errout;
+ }
/* Any value is acceptable for rtm_protocol */
/* As mpls uses destination specific addresses
* (or source specific address in the case of multicast)
* all addresses have universal scope.
*/
- if (rtm->rtm_scope != RT_SCOPE_UNIVERSE)
+ if (rtm->rtm_scope != RT_SCOPE_UNIVERSE) {
+ NL_SET_ERR_MSG(extack,
+ "Invalid route scope - MPLS only supports UNIVERSE");
goto errout;
- if (rtm->rtm_type != RTN_UNICAST)
+ }
+ if (rtm->rtm_type != RTN_UNICAST) {
+ NL_SET_ERR_MSG(extack,
+ "Invalid route type - MPLS only supports UNICAST");
goto errout;
- if (rtm->rtm_flags != 0)
+ }
+ if (rtm->rtm_flags != 0) {
+ NL_SET_ERR_MSG(extack, "rtm_flags must be 0 for MPLS");
goto errout;
+ }
cfg->rc_label = LABEL_NOT_SPECIFIED;
cfg->rc_protocol = rtm->rtm_protocol;
@@ -1696,26 +1768,26 @@
case RTA_NEWDST:
if (nla_get_labels(nla, MAX_NEW_LABELS,
&cfg->rc_output_labels,
- cfg->rc_output_label))
+ cfg->rc_output_label, extack))
goto errout;
break;
case RTA_DST:
{
u8 label_count;
if (nla_get_labels(nla, 1, &label_count,
- &cfg->rc_label))
+ &cfg->rc_label, extack))
goto errout;
- /* Reserved labels may not be set */
- if (cfg->rc_label < MPLS_LABEL_FIRST_UNRESERVED)
+ if (!mpls_label_ok(cfg->rc_nlinfo.nl_net,
+ cfg->rc_label, extack))
goto errout;
-
break;
}
case RTA_VIA:
{
if (nla_get_via(nla, &cfg->rc_via_alen,
- &cfg->rc_via_table, cfg->rc_via))
+ &cfg->rc_via_table, cfg->rc_via,
+ extack))
goto errout;
break;
}
@@ -1729,14 +1801,18 @@
{
u8 ttl_propagate = nla_get_u8(nla);
- if (ttl_propagate > 1)
+ if (ttl_propagate > 1) {
+ NL_SET_ERR_MSG_ATTR(extack, nla,
+ "RTA_TTL_PROPAGATE can only be 0 or 1");
goto errout;
+ }
cfg->rc_ttl_propagate = ttl_propagate ?
MPLS_TTL_PROP_ENABLED :
MPLS_TTL_PROP_DISABLED;
break;
}
default:
+ NL_SET_ERR_MSG_ATTR(extack, nla, "Unknown attribute");
/* Unsupported attribute */
goto errout;
}
@@ -1757,11 +1833,11 @@
if (!cfg)
return -ENOMEM;
- err = rtm_to_route_config(skb, nlh, cfg);
+ err = rtm_to_route_config(skb, nlh, cfg, extack);
if (err < 0)
goto out;
- err = mpls_route_del(cfg);
+ err = mpls_route_del(cfg, extack);
out:
kfree(cfg);
@@ -1779,11 +1855,11 @@
if (!cfg)
return -ENOMEM;
- err = rtm_to_route_config(skb, nlh, cfg);
+ err = rtm_to_route_config(skb, nlh, cfg, extack);
if (err < 0)
goto out;
- err = mpls_route_add(cfg);
+ err = mpls_route_add(cfg, extack);
out:
kfree(cfg);
diff --git a/net/mpls/internal.h b/net/mpls/internal.h
index 4db6a59..cf65aec 100644
--- a/net/mpls/internal.h
+++ b/net/mpls/internal.h
@@ -203,9 +203,7 @@
int nla_put_labels(struct sk_buff *skb, int attrtype, u8 labels,
const u32 label[]);
int nla_get_labels(const struct nlattr *nla, u8 max_labels, u8 *labels,
- u32 label[]);
-int nla_get_via(const struct nlattr *nla, u8 *via_alen, u8 *via_table,
- u8 via[]);
+ u32 label[], struct netlink_ext_ack *extack);
bool mpls_output_possible(const struct net_device *dev);
unsigned int mpls_dev_mtu(const struct net_device *dev);
bool mpls_pkt_too_big(const struct sk_buff *skb, unsigned int mtu);
diff --git a/net/mpls/mpls_iptunnel.c b/net/mpls/mpls_iptunnel.c
index 369c7a2..6e558a4 100644
--- a/net/mpls/mpls_iptunnel.c
+++ b/net/mpls/mpls_iptunnel.c
@@ -159,7 +159,8 @@
static int mpls_build_state(struct nlattr *nla,
unsigned int family, const void *cfg,
- struct lwtunnel_state **ts)
+ struct lwtunnel_state **ts,
+ struct netlink_ext_ack *extack)
{
struct mpls_iptunnel_encap *tun_encap_info;
struct nlattr *tb[MPLS_IPTUNNEL_MAX + 1];
@@ -168,17 +169,18 @@
int ret;
ret = nla_parse_nested(tb, MPLS_IPTUNNEL_MAX, nla,
- mpls_iptunnel_policy, NULL);
+ mpls_iptunnel_policy, extack);
if (ret < 0)
return ret;
- if (!tb[MPLS_IPTUNNEL_DST])
+ if (!tb[MPLS_IPTUNNEL_DST]) {
+ NL_SET_ERR_MSG(extack, "MPLS_IPTUNNEL_DST attribute is missing");
return -EINVAL;
-
+ }
/* determine number of labels */
- if (nla_get_labels(tb[MPLS_IPTUNNEL_DST],
- MAX_NEW_LABELS, &n_labels, NULL))
+ if (nla_get_labels(tb[MPLS_IPTUNNEL_DST], MAX_NEW_LABELS,
+ &n_labels, NULL, extack))
return -EINVAL;
newts = lwtunnel_state_alloc(sizeof(*tun_encap_info) +
@@ -188,7 +190,8 @@
tun_encap_info = mpls_lwtunnel_encap(newts);
ret = nla_get_labels(tb[MPLS_IPTUNNEL_DST], n_labels,
- &tun_encap_info->labels, tun_encap_info->label);
+ &tun_encap_info->labels, tun_encap_info->label,
+ extack);
if (ret)
goto errout;
diff --git a/net/ncsi/ncsi-cmd.c b/net/ncsi/ncsi-cmd.c
index db7083b..5e03ed1 100644
--- a/net/ncsi/ncsi-cmd.c
+++ b/net/ncsi/ncsi-cmd.c
@@ -66,8 +66,7 @@
{
struct ncsi_cmd_pkt *cmd;
- cmd = (struct ncsi_cmd_pkt *)skb_put(skb, sizeof(*cmd));
- memset(cmd, 0, sizeof(*cmd));
+ cmd = skb_put_zero(skb, sizeof(*cmd));
ncsi_cmd_build_header(&cmd->cmd.common, nca);
return 0;
@@ -78,8 +77,7 @@
{
struct ncsi_cmd_sp_pkt *cmd;
- cmd = (struct ncsi_cmd_sp_pkt *)skb_put(skb, sizeof(*cmd));
- memset(cmd, 0, sizeof(*cmd));
+ cmd = skb_put_zero(skb, sizeof(*cmd));
cmd->hw_arbitration = nca->bytes[0];
ncsi_cmd_build_header(&cmd->cmd.common, nca);
@@ -91,8 +89,7 @@
{
struct ncsi_cmd_dc_pkt *cmd;
- cmd = (struct ncsi_cmd_dc_pkt *)skb_put(skb, sizeof(*cmd));
- memset(cmd, 0, sizeof(*cmd));
+ cmd = skb_put_zero(skb, sizeof(*cmd));
cmd->ald = nca->bytes[0];
ncsi_cmd_build_header(&cmd->cmd.common, nca);
@@ -104,8 +101,7 @@
{
struct ncsi_cmd_rc_pkt *cmd;
- cmd = (struct ncsi_cmd_rc_pkt *)skb_put(skb, sizeof(*cmd));
- memset(cmd, 0, sizeof(*cmd));
+ cmd = skb_put_zero(skb, sizeof(*cmd));
ncsi_cmd_build_header(&cmd->cmd.common, nca);
return 0;
@@ -116,8 +112,7 @@
{
struct ncsi_cmd_ae_pkt *cmd;
- cmd = (struct ncsi_cmd_ae_pkt *)skb_put(skb, sizeof(*cmd));
- memset(cmd, 0, sizeof(*cmd));
+ cmd = skb_put_zero(skb, sizeof(*cmd));
cmd->mc_id = nca->bytes[0];
cmd->mode = htonl(nca->dwords[1]);
ncsi_cmd_build_header(&cmd->cmd.common, nca);
@@ -130,8 +125,7 @@
{
struct ncsi_cmd_sl_pkt *cmd;
- cmd = (struct ncsi_cmd_sl_pkt *)skb_put(skb, sizeof(*cmd));
- memset(cmd, 0, sizeof(*cmd));
+ cmd = skb_put_zero(skb, sizeof(*cmd));
cmd->mode = htonl(nca->dwords[0]);
cmd->oem_mode = htonl(nca->dwords[1]);
ncsi_cmd_build_header(&cmd->cmd.common, nca);
@@ -144,8 +138,7 @@
{
struct ncsi_cmd_svf_pkt *cmd;
- cmd = (struct ncsi_cmd_svf_pkt *)skb_put(skb, sizeof(*cmd));
- memset(cmd, 0, sizeof(*cmd));
+ cmd = skb_put_zero(skb, sizeof(*cmd));
cmd->vlan = htons(nca->words[0]);
cmd->index = nca->bytes[2];
cmd->enable = nca->bytes[3];
@@ -159,8 +152,7 @@
{
struct ncsi_cmd_ev_pkt *cmd;
- cmd = (struct ncsi_cmd_ev_pkt *)skb_put(skb, sizeof(*cmd));
- memset(cmd, 0, sizeof(*cmd));
+ cmd = skb_put_zero(skb, sizeof(*cmd));
cmd->mode = nca->bytes[0];
ncsi_cmd_build_header(&cmd->cmd.common, nca);
@@ -173,8 +165,7 @@
struct ncsi_cmd_sma_pkt *cmd;
int i;
- cmd = (struct ncsi_cmd_sma_pkt *)skb_put(skb, sizeof(*cmd));
- memset(cmd, 0, sizeof(*cmd));
+ cmd = skb_put_zero(skb, sizeof(*cmd));
for (i = 0; i < 6; i++)
cmd->mac[i] = nca->bytes[i];
cmd->index = nca->bytes[6];
@@ -189,8 +180,7 @@
{
struct ncsi_cmd_ebf_pkt *cmd;
- cmd = (struct ncsi_cmd_ebf_pkt *)skb_put(skb, sizeof(*cmd));
- memset(cmd, 0, sizeof(*cmd));
+ cmd = skb_put_zero(skb, sizeof(*cmd));
cmd->mode = htonl(nca->dwords[0]);
ncsi_cmd_build_header(&cmd->cmd.common, nca);
@@ -202,8 +192,7 @@
{
struct ncsi_cmd_egmf_pkt *cmd;
- cmd = (struct ncsi_cmd_egmf_pkt *)skb_put(skb, sizeof(*cmd));
- memset(cmd, 0, sizeof(*cmd));
+ cmd = skb_put_zero(skb, sizeof(*cmd));
cmd->mode = htonl(nca->dwords[0]);
ncsi_cmd_build_header(&cmd->cmd.common, nca);
@@ -215,8 +204,7 @@
{
struct ncsi_cmd_snfc_pkt *cmd;
- cmd = (struct ncsi_cmd_snfc_pkt *)skb_put(skb, sizeof(*cmd));
- memset(cmd, 0, sizeof(*cmd));
+ cmd = skb_put_zero(skb, sizeof(*cmd));
cmd->mode = nca->bytes[0];
ncsi_cmd_build_header(&cmd->cmd.common, nca);
@@ -343,7 +331,7 @@
}
/* Fill the ethernet header */
- eh = (struct ethhdr *)skb_push(nr->cmd, sizeof(*eh));
+ eh = skb_push(nr->cmd, sizeof(*eh));
eh->h_proto = htons(ETH_P_NCSI);
eth_broadcast_addr(eh->h_dest);
eth_broadcast_addr(eh->h_source);
diff --git a/net/netfilter/nf_synproxy_core.c b/net/netfilter/nf_synproxy_core.c
index a504e87..49bd8bb 100644
--- a/net/netfilter/nf_synproxy_core.c
+++ b/net/netfilter/nf_synproxy_core.c
@@ -152,7 +152,7 @@
struct synproxy_options *opts)
{
opts->tsecr = opts->tsval;
- opts->tsval = tcp_time_stamp & ~0x3f;
+ opts->tsval = tcp_time_stamp_raw() & ~0x3f;
if (opts->options & XT_SYNPROXY_OPT_WSCALE) {
opts->tsval |= opts->wscale;
diff --git a/net/netfilter/nfnetlink_log.c b/net/netfilter/nfnetlink_log.c
index da97049..94ec0d0 100644
--- a/net/netfilter/nfnetlink_log.c
+++ b/net/netfilter/nfnetlink_log.c
@@ -590,7 +590,7 @@
if (skb_tailroom(inst->skb) < nla_total_size(data_len))
goto nla_put_failure;
- nla = (struct nlattr *)skb_put(inst->skb, nla_total_size(data_len));
+ nla = skb_put(inst->skb, nla_total_size(data_len));
nla->nla_type = NFULA_PAYLOAD;
nla->nla_len = size;
diff --git a/net/netfilter/nfnetlink_queue.c b/net/netfilter/nfnetlink_queue.c
index 8a0f218..1b17a1b 100644
--- a/net/netfilter/nfnetlink_queue.c
+++ b/net/netfilter/nfnetlink_queue.c
@@ -589,7 +589,7 @@
if (skb_tailroom(skb) < sizeof(*nla) + hlen)
goto nla_put_failure;
- nla = (struct nlattr *)skb_put(skb, sizeof(*nla));
+ nla = skb_put(skb, sizeof(*nla));
nla->nla_type = NFQA_PAYLOAD;
nla->nla_len = nla_attr_size(data_len);
diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
index 7586d44..a88745e 100644
--- a/net/netlink/af_netlink.c
+++ b/net/netlink/af_netlink.c
@@ -170,7 +170,7 @@
NETLINK_CB(new).dst_group = NETLINK_CB(skb).dst_group;
NETLINK_CB(new).creds = NETLINK_CB(skb).creds;
- memcpy(skb_put(new, len), skb->data, len);
+ skb_put_data(new, skb->data, len);
return new;
}
@@ -2104,7 +2104,7 @@
struct nlmsghdr *nlh;
int size = nlmsg_msg_size(len);
- nlh = (struct nlmsghdr *)skb_put(skb, NLMSG_ALIGN(size));
+ nlh = skb_put(skb, NLMSG_ALIGN(size));
nlh->nlmsg_type = type;
nlh->nlmsg_len = size;
nlh->nlmsg_flags = flags;
diff --git a/net/nfc/af_nfc.c b/net/nfc/af_nfc.c
index 54e40fa..d3e594e 100644
--- a/net/nfc/af_nfc.c
+++ b/net/nfc/af_nfc.c
@@ -48,7 +48,7 @@
return rc;
}
-static struct net_proto_family nfc_sock_family_ops = {
+static const struct net_proto_family nfc_sock_family_ops = {
.owner = THIS_MODULE,
.family = PF_NFC,
.create = nfc_sock_create,
diff --git a/net/nfc/digital_core.c b/net/nfc/digital_core.c
index 0fd5518..ebeace7 100644
--- a/net/nfc/digital_core.c
+++ b/net/nfc/digital_core.c
@@ -74,8 +74,8 @@
if (msb_first)
crc = __fswab16(crc);
- *skb_put(skb, 1) = crc & 0xFF;
- *skb_put(skb, 1) = (crc >> 8) & 0xFF;
+ skb_put_u8(skb, crc & 0xFF);
+ skb_put_u8(skb, (crc >> 8) & 0xFF);
}
int digital_skb_check_crc(struct sk_buff *skb, crc_func_t crc_func,
diff --git a/net/nfc/digital_dep.c b/net/nfc/digital_dep.c
index f864ce1..74ccc2d 100644
--- a/net/nfc/digital_dep.c
+++ b/net/nfc/digital_dep.c
@@ -185,7 +185,7 @@
skb->data[0] = skb->len;
if (ddev->curr_rf_tech == NFC_DIGITAL_RF_TECH_106A)
- *skb_push(skb, sizeof(u8)) = DIGITAL_NFC_DEP_NFCA_SOD_SB;
+ *(u8 *)skb_push(skb, sizeof(u8)) = DIGITAL_NFC_DEP_NFCA_SOD_SB;
}
static int digital_skb_pull_dep_sod(struct nfc_digital_dev *ddev,
@@ -226,8 +226,7 @@
return ERR_PTR(-ENOMEM);
}
- memcpy(skb_put(new_skb, ddev->remote_payload_max), skb->data,
- ddev->remote_payload_max);
+ skb_put_data(new_skb, skb->data, ddev->remote_payload_max);
skb_pull(skb, ddev->remote_payload_max);
ddev->chaining_skb = skb;
@@ -277,8 +276,7 @@
ddev->chaining_skb = new_skb;
}
- memcpy(skb_put(ddev->chaining_skb, resp->len), resp->data,
- resp->len);
+ skb_put_data(ddev->chaining_skb, resp->data, resp->len);
kfree_skb(resp);
resp = NULL;
@@ -525,7 +523,7 @@
if (gb_len) {
atr_req->pp |= DIGITAL_GB_BIT;
- memcpy(skb_put(skb, gb_len), gb, gb_len);
+ skb_put_data(skb, gb, gb_len);
}
digital_skb_push_dep_sod(ddev, skb);
@@ -656,7 +654,7 @@
if (!skb)
return -ENOMEM;
- *skb_put(skb, 1) = rtox;
+ skb_put_u8(skb, rtox);
skb_push(skb, sizeof(struct digital_dep_req_res));
@@ -1012,8 +1010,7 @@
if (ddev->did) {
dep_res->pfb |= DIGITAL_NFC_DEP_PFB_DID_BIT;
- memcpy(skb_put(skb, sizeof(ddev->did)), &ddev->did,
- sizeof(ddev->did));
+ skb_put_data(skb, &ddev->did, sizeof(ddev->did));
}
ddev->curr_nfc_dep_pni =
@@ -1057,8 +1054,7 @@
if (ddev->did) {
dep_res->pfb |= DIGITAL_NFC_DEP_PFB_DID_BIT;
- memcpy(skb_put(skb, sizeof(ddev->did)), &ddev->did,
- sizeof(ddev->did));
+ skb_put_data(skb, &ddev->did, sizeof(ddev->did));
}
digital_skb_push_dep_sod(ddev, skb);
@@ -1325,8 +1321,7 @@
if (ddev->did) {
dep_res->pfb |= DIGITAL_NFC_DEP_PFB_DID_BIT;
- memcpy(skb_put(skb, sizeof(ddev->did)), &ddev->did,
- sizeof(ddev->did));
+ skb_put_data(skb, &ddev->did, sizeof(ddev->did));
}
ddev->curr_nfc_dep_pni =
diff --git a/net/nfc/digital_technology.c b/net/nfc/digital_technology.c
index d9080de..3cc3448 100644
--- a/net/nfc/digital_technology.c
+++ b/net/nfc/digital_technology.c
@@ -266,8 +266,8 @@
if (!skb)
return -ENOMEM;
- *skb_put(skb, 1) = DIGITAL_RATS_BYTE1;
- *skb_put(skb, 1) = DIGITAL_RATS_PARAM;
+ skb_put_u8(skb, DIGITAL_RATS_BYTE1);
+ skb_put_u8(skb, DIGITAL_RATS_PARAM);
rc = digital_in_send_cmd(ddev, skb, 30, digital_in_recv_ats,
target);
@@ -470,8 +470,8 @@
else
sel_cmd = DIGITAL_CMD_SEL_REQ_CL3;
- *skb_put(skb, sizeof(u8)) = sel_cmd;
- *skb_put(skb, sizeof(u8)) = DIGITAL_SDD_REQ_SEL_PAR;
+ skb_put_u8(skb, sel_cmd);
+ skb_put_u8(skb, DIGITAL_SDD_REQ_SEL_PAR);
return digital_in_send_cmd(ddev, skb, 30, digital_in_recv_sdd_res,
target);
@@ -541,7 +541,7 @@
if (!skb)
return -ENOMEM;
- *skb_put(skb, sizeof(u8)) = DIGITAL_CMD_SENS_REQ;
+ skb_put_u8(skb, DIGITAL_CMD_SENS_REQ);
rc = digital_in_send_cmd(ddev, skb, 30, digital_in_recv_sens_res, NULL);
if (rc)
@@ -625,8 +625,7 @@
if (!skb)
return -ENOMEM;
- attrib_req = (struct digital_attrib_req *)skb_put(skb,
- sizeof(*attrib_req));
+ attrib_req = skb_put(skb, sizeof(*attrib_req));
attrib_req->cmd = DIGITAL_CMD_ATTRIB_REQ;
memcpy(attrib_req->nfcid0, sensb_res->nfcid0,
@@ -730,8 +729,7 @@
if (!skb)
return -ENOMEM;
- sensb_req = (struct digital_sensb_req *)skb_put(skb,
- sizeof(*sensb_req));
+ sensb_req = skb_put(skb, sizeof(*sensb_req));
sensb_req->cmd = DIGITAL_CMD_SENSB_REQ;
sensb_req->afi = 0x00; /* All families and sub-families */
@@ -830,7 +828,7 @@
sensf_req->rc = 0;
sensf_req->tsn = 0;
- *skb_push(skb, 1) = size + 1;
+ *(u8 *)skb_push(skb, 1) = size + 1;
if (!DIGITAL_DRV_CAPS_IN_CRC(ddev))
digital_skb_add_crc_f(skb);
@@ -939,7 +937,7 @@
if (!skb)
return -ENOMEM;
- *skb_put(skb, 1) = DIGITAL_SEL_RES_NFC_DEP;
+ skb_put_u8(skb, DIGITAL_SEL_RES_NFC_DEP);
if (!DIGITAL_DRV_CAPS_TG_CRC(ddev))
digital_skb_add_crc_a(skb);
@@ -1163,7 +1161,7 @@
break;
}
- *skb_push(skb, sizeof(u8)) = size + 1;
+ *(u8 *)skb_push(skb, sizeof(u8)) = size + 1;
if (!DIGITAL_DRV_CAPS_TG_CRC(ddev))
digital_skb_add_crc_f(skb);
diff --git a/net/nfc/hci/core.c b/net/nfc/hci/core.c
index 2b0f0ac..b740fef 100644
--- a/net/nfc/hci/core.c
+++ b/net/nfc/hci/core.c
@@ -727,7 +727,7 @@
break;
}
- *skb_push(skb, 1) = 0; /* CTR, see spec:10.2.2.1 */
+ *(u8 *)skb_push(skb, 1) = 0; /* CTR, see spec:10.2.2.1 */
hdev->async_cb_type = HCI_CB_TYPE_TRANSCEIVE;
hdev->async_cb = cb;
@@ -874,13 +874,13 @@
return;
}
- *skb_put(hcp_skb, NFC_HCI_HCP_PACKET_HEADER_LEN) = pipe;
+ skb_put_u8(hcp_skb, pipe);
skb_queue_walk(&hdev->rx_hcp_frags, frag_skb) {
msg_len = frag_skb->len - NFC_HCI_HCP_PACKET_HEADER_LEN;
- memcpy(skb_put(hcp_skb, msg_len),
- frag_skb->data + NFC_HCI_HCP_PACKET_HEADER_LEN,
- msg_len);
+ skb_put_data(hcp_skb,
+ frag_skb->data + NFC_HCI_HCP_PACKET_HEADER_LEN,
+ msg_len);
}
skb_queue_purge(&hdev->rx_hcp_frags);
diff --git a/net/nfc/hci/llc_shdlc.c b/net/nfc/hci/llc_shdlc.c
index 401c7e2..17e59a0 100644
--- a/net/nfc/hci/llc_shdlc.c
+++ b/net/nfc/hci/llc_shdlc.c
@@ -160,7 +160,7 @@
if (skb == NULL)
return -ENOMEM;
- *skb_push(skb, 1) = SHDLC_CONTROL_HEAD_S | (sframe_type << 3) | nr;
+ *(u8 *)skb_push(skb, 1) = SHDLC_CONTROL_HEAD_S | (sframe_type << 3) | nr;
r = shdlc->xmit_to_drv(shdlc->hdev, skb);
@@ -178,7 +178,7 @@
pr_debug("uframe_modifier=%d\n", uframe_modifier);
- *skb_push(skb, 1) = SHDLC_CONTROL_HEAD_U | uframe_modifier;
+ *(u8 *)skb_push(skb, 1) = SHDLC_CONTROL_HEAD_U | uframe_modifier;
r = shdlc->xmit_to_drv(shdlc->hdev, skb);
@@ -382,8 +382,8 @@
if (skb == NULL)
return -ENOMEM;
- *skb_put(skb, 1) = SHDLC_MAX_WINDOW;
- *skb_put(skb, 1) = SHDLC_SREJ_SUPPORT ? 1 : 0;
+ skb_put_u8(skb, SHDLC_MAX_WINDOW);
+ skb_put_u8(skb, SHDLC_SREJ_SUPPORT ? 1 : 0);
return llc_shdlc_send_u_frame(shdlc, skb, U_FRAME_RSET);
}
@@ -551,8 +551,8 @@
skb = skb_dequeue(&shdlc->send_q);
- *skb_push(skb, 1) = SHDLC_CONTROL_HEAD_I | (shdlc->ns << 3) |
- shdlc->nr;
+ *(u8 *)skb_push(skb, 1) = SHDLC_CONTROL_HEAD_I | (shdlc->ns << 3) |
+ shdlc->nr;
pr_debug("Sending I-Frame %d, waiting to rcv %d\n", shdlc->ns,
shdlc->nr);
diff --git a/net/nfc/llcp_commands.c b/net/nfc/llcp_commands.c
index c5959ce..367d8c0 100644
--- a/net/nfc/llcp_commands.c
+++ b/net/nfc/llcp_commands.c
@@ -298,7 +298,7 @@
pr_debug("header 0x%x 0x%x\n", header[0], header[1]);
- memcpy(skb_put(pdu, LLCP_HEADER_SIZE), header, LLCP_HEADER_SIZE);
+ skb_put_data(pdu, header, LLCP_HEADER_SIZE);
return pdu;
}
@@ -311,7 +311,7 @@
if (tlv == NULL)
return NULL;
- memcpy(skb_put(pdu, tlv_length), tlv, tlv_length);
+ skb_put_data(pdu, tlv, tlv_length);
return pdu;
}
@@ -549,7 +549,7 @@
return PTR_ERR(skb);
hlist_for_each_entry_safe(sdp, n, tlv_list, node) {
- memcpy(skb_put(skb, sdp->tlv_len), sdp->tlv, sdp->tlv_len);
+ skb_put_data(skb, sdp->tlv, sdp->tlv_len);
hlist_del(&sdp->node);
@@ -581,8 +581,7 @@
hlist_for_each_entry_safe(sdreq, n, tlv_list, node) {
pr_debug("tid %d for %s\n", sdreq->tid, sdreq->uri);
- memcpy(skb_put(skb, sdreq->tlv_len), sdreq->tlv,
- sdreq->tlv_len);
+ skb_put_data(skb, sdreq->tlv, sdreq->tlv_len);
hlist_del(&sdreq->node);
@@ -622,7 +621,7 @@
skb = llcp_add_header(skb, dsap, ssap, LLCP_PDU_DM);
- memcpy(skb_put(skb, 1), &reason, 1);
+ skb_put_data(skb, &reason, 1);
skb_queue_head(&local->tx_queue, skb);
@@ -693,7 +692,7 @@
skb_put(pdu, LLCP_SEQUENCE_SIZE);
if (likely(frag_len > 0))
- memcpy(skb_put(pdu, frag_len), msg_ptr, frag_len);
+ skb_put_data(pdu, msg_ptr, frag_len);
skb_queue_tail(&sock->tx_queue, pdu);
@@ -759,7 +758,7 @@
pdu = llcp_add_header(pdu, dsap, ssap, LLCP_PDU_UI);
if (likely(frag_len > 0))
- memcpy(skb_put(pdu, frag_len), msg_ptr, frag_len);
+ skb_put_data(pdu, msg_ptr, frag_len);
/* No need to check for the peer RW for UI frames */
skb_queue_tail(&local->tx_queue, pdu);
diff --git a/net/nfc/llcp_core.c b/net/nfc/llcp_core.c
index e69786c..02eef5c 100644
--- a/net/nfc/llcp_core.c
+++ b/net/nfc/llcp_core.c
@@ -1390,7 +1390,7 @@
return;
}
- memcpy(skb_put(new_skb, pdu_len), skb->data, pdu_len);
+ skb_put_data(new_skb, skb->data, pdu_len);
nfc_llcp_rx_skb(local, new_skb);
diff --git a/net/nfc/nci/core.c b/net/nfc/nci/core.c
index 61fff42..a3dac34 100644
--- a/net/nfc/nci/core.c
+++ b/net/nfc/nci/core.c
@@ -462,7 +462,7 @@
return -ENOMEM;
skb_reserve(skb, NCI_DATA_HDR_SIZE);
- memcpy(skb_put(skb, data_len), data, data_len);
+ skb_put_data(skb, data, data_len);
loopback_data.conn_id = conn_id;
loopback_data.data = skb;
@@ -1341,7 +1341,7 @@
return -ENOMEM;
}
- hdr = (struct nci_ctrl_hdr *) skb_put(skb, NCI_CTRL_HDR_SIZE);
+ hdr = skb_put(skb, NCI_CTRL_HDR_SIZE);
hdr->gid = nci_opcode_gid(opcode);
hdr->oid = nci_opcode_oid(opcode);
hdr->plen = plen;
@@ -1350,7 +1350,7 @@
nci_pbf_set((__u8 *)hdr, NCI_PBF_LAST);
if (plen)
- memcpy(skb_put(skb, plen), payload, plen);
+ skb_put_data(skb, payload, plen);
skb_queue_tail(&ndev->cmd_q, skb);
queue_work(ndev->cmd_wq, &ndev->cmd_work);
diff --git a/net/nfc/nci/data.c b/net/nfc/nci/data.c
index dbd2425..908f25e 100644
--- a/net/nfc/nci/data.c
+++ b/net/nfc/nci/data.c
@@ -81,7 +81,7 @@
struct nci_data_hdr *hdr;
int plen = skb->len;
- hdr = (struct nci_data_hdr *) skb_push(skb, NCI_DATA_HDR_SIZE);
+ hdr = skb_push(skb, NCI_DATA_HDR_SIZE);
hdr->conn_id = conn_id;
hdr->rfu = 0;
hdr->plen = plen;
@@ -138,7 +138,7 @@
skb_reserve(skb_frag, NCI_DATA_HDR_SIZE);
/* first, copy the data */
- memcpy(skb_put(skb_frag, frag_len), data, frag_len);
+ skb_put_data(skb_frag, data, frag_len);
/* second, set the header */
nci_push_data_hdr(ndev, conn_id, skb_frag,
diff --git a/net/nfc/nci/hci.c b/net/nfc/nci/hci.c
index a0ab26d..ddfc52a 100644
--- a/net/nfc/nci/hci.c
+++ b/net/nfc/nci/hci.c
@@ -170,7 +170,7 @@
return -ENOMEM;
skb_reserve(skb, NCI_DATA_HDR_SIZE + 2);
- *skb_push(skb, 1) = data_type;
+ *(u8 *)skb_push(skb, 1) = data_type;
do {
len = conn_info->max_pkt_payload_len;
@@ -184,10 +184,10 @@
len = conn_info->max_pkt_payload_len - skb->len - 1;
}
- *skb_push(skb, 1) = cb;
+ *(u8 *)skb_push(skb, 1) = cb;
if (len > 0)
- memcpy(skb_put(skb, len), data + i, len);
+ skb_put_data(skb, data + i, len);
r = nci_send_data(ndev, conn_info->conn_id, skb);
if (r < 0)
@@ -472,12 +472,13 @@
return;
}
- *skb_put(hcp_skb, NCI_HCI_HCP_PACKET_HEADER_LEN) = pipe;
+ skb_put_u8(hcp_skb, pipe);
skb_queue_walk(&ndev->hci_dev->rx_hcp_frags, frag_skb) {
msg_len = frag_skb->len - NCI_HCI_HCP_PACKET_HEADER_LEN;
- memcpy(skb_put(hcp_skb, msg_len), frag_skb->data +
- NCI_HCI_HCP_PACKET_HEADER_LEN, msg_len);
+ skb_put_data(hcp_skb,
+ frag_skb->data + NCI_HCI_HCP_PACKET_HEADER_LEN,
+ msg_len);
}
skb_queue_purge(&ndev->hci_dev->rx_hcp_frags);
diff --git a/net/nfc/nci/spi.c b/net/nfc/nci/spi.c
index d904cd2..452f4c1 100644
--- a/net/nfc/nci/spi.c
+++ b/net/nfc/nci/spi.c
@@ -86,8 +86,8 @@
u16 crc;
crc = crc_ccitt(CRC_INIT, skb->data, skb->len);
- *skb_put(skb, 1) = crc >> 8;
- *skb_put(skb, 1) = crc & 0xFF;
+ skb_put_u8(skb, crc >> 8);
+ skb_put_u8(skb, crc & 0xFF);
}
if (write_handshake_completion) {
@@ -172,8 +172,8 @@
hdr[3] = 0;
crc = crc_ccitt(CRC_INIT, skb->data, skb->len);
- *skb_put(skb, 1) = crc >> 8;
- *skb_put(skb, 1) = crc & 0xFF;
+ skb_put_u8(skb, crc >> 8);
+ skb_put_u8(skb, crc & 0xFF);
ret = __nci_spi_send(nspi, skb, 0);
@@ -238,8 +238,8 @@
goto receive_error;
if (nspi->acknowledge_mode == NCI_SPI_CRC_ENABLED) {
- *skb_push(skb, 1) = resp_hdr[1];
- *skb_push(skb, 1) = resp_hdr[0];
+ *(u8 *)skb_push(skb, 1) = resp_hdr[1];
+ *(u8 *)skb_push(skb, 1) = resp_hdr[0];
}
return skb;
diff --git a/net/nfc/nci/uart.c b/net/nfc/nci/uart.c
index c468eab..8d104c1 100644
--- a/net/nfc/nci/uart.c
+++ b/net/nfc/nci/uart.c
@@ -355,7 +355,7 @@
/* Eat byte after byte till full packet header is received */
if (nu->rx_skb->len < NCI_CTRL_HDR_SIZE) {
- *skb_put(nu->rx_skb, 1) = *data++;
+ skb_put_u8(nu->rx_skb, *data++);
--count;
continue;
}
@@ -371,7 +371,7 @@
chunk_len = nu->rx_packet_len - nu->rx_skb->len;
if (count < chunk_len)
chunk_len = count;
- memcpy(skb_put(nu->rx_skb, chunk_len), data, chunk_len);
+ skb_put_data(nu->rx_skb, data, chunk_len);
data += chunk_len;
count -= chunk_len;
diff --git a/net/nfc/rawsock.c b/net/nfc/rawsock.c
index e386e6c..e2188de 100644
--- a/net/nfc/rawsock.c
+++ b/net/nfc/rawsock.c
@@ -142,7 +142,7 @@
static int rawsock_add_header(struct sk_buff *skb)
{
- *skb_push(skb, NFC_HEADER_SIZE) = 0;
+ *(u8 *)skb_push(skb, NFC_HEADER_SIZE) = 0;
return 0;
}
diff --git a/net/openvswitch/datapath.c b/net/openvswitch/datapath.c
index 7b17da9..d772e9a 100644
--- a/net/openvswitch/datapath.c
+++ b/net/openvswitch/datapath.c
@@ -413,7 +413,7 @@
size_t plen = NLA_ALIGN(skb->len) - skb->len;
if (plen > 0)
- memset(skb_put(skb, plen), 0, plen);
+ skb_put_zero(skb, plen);
}
}
@@ -453,7 +453,7 @@
/* Complete checksum if needed */
if (skb->ip_summed == CHECKSUM_PARTIAL &&
- (err = skb_checksum_help(skb)))
+ (err = skb_csum_hwoffload_help(skb, 0)))
goto out;
/* Older versions of OVS user space enforce alignment of the last
diff --git a/net/openvswitch/vport-vxlan.c b/net/openvswitch/vport-vxlan.c
index 869acb3..7e6301b 100644
--- a/net/openvswitch/vport-vxlan.c
+++ b/net/openvswitch/vport-vxlan.c
@@ -40,14 +40,14 @@
if (nla_put_u16(skb, OVS_TUNNEL_ATTR_DST_PORT, ntohs(dst_port)))
return -EMSGSIZE;
- if (vxlan->flags & VXLAN_F_GBP) {
+ if (vxlan->cfg.flags & VXLAN_F_GBP) {
struct nlattr *exts;
exts = nla_nest_start(skb, OVS_TUNNEL_ATTR_EXTENSION);
if (!exts)
return -EMSGSIZE;
- if (vxlan->flags & VXLAN_F_GBP &&
+ if (vxlan->cfg.flags & VXLAN_F_GBP &&
nla_put_flag(skb, OVS_VXLAN_EXT_GBP))
return -EMSGSIZE;
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
index e3eeed1..f9349a4 100644
--- a/net/packet/af_packet.c
+++ b/net/packet/af_packet.c
@@ -188,7 +188,6 @@
#define BLOCK_PRIV(x) ((void *)((char *)(x) + BLOCK_O2PRIV(x)))
struct packet_sock;
-static int tpacket_snd(struct packet_sock *po, struct msghdr *msg);
static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev,
struct packet_type *pt, struct net_device *orig_dev);
@@ -196,8 +195,7 @@
struct packet_ring_buffer *rb,
int status);
static void packet_increment_head(struct packet_ring_buffer *buff);
-static int prb_curr_blk_in_use(struct tpacket_kbdq_core *,
- struct tpacket_block_desc *);
+static int prb_curr_blk_in_use(struct tpacket_block_desc *);
static void *prb_dispatch_next_block(struct tpacket_kbdq_core *,
struct packet_sock *);
static void prb_retire_current_block(struct tpacket_kbdq_core *,
@@ -721,7 +719,7 @@
/* Case 1. Queue was frozen because user-space was
* lagging behind.
*/
- if (prb_curr_blk_in_use(pkc, pbd)) {
+ if (prb_curr_blk_in_use(pbd)) {
/*
* Ok, user-space is still behind.
* So just refresh the timer.
@@ -972,8 +970,7 @@
}
}
-static int prb_curr_blk_in_use(struct tpacket_kbdq_core *pkc,
- struct tpacket_block_desc *pbd)
+static int prb_curr_blk_in_use(struct tpacket_block_desc *pbd)
{
return TP_STATUS_USER & BLOCK_STATUS(pbd);
}
@@ -1064,7 +1061,7 @@
* Check if that last block which caused the queue to freeze,
* is still in_use by user-space.
*/
- if (prb_curr_blk_in_use(pkc, pbd)) {
+ if (prb_curr_blk_in_use(pbd)) {
/* Can't record this packet */
return NULL;
} else {
diff --git a/net/psample/psample.c b/net/psample/psample.c
index 8aa58a9..3a6ad0f 100644
--- a/net/psample/psample.c
+++ b/net/psample/psample.c
@@ -264,7 +264,7 @@
int nla_len = nla_total_size(data_len);
struct nlattr *nla;
- nla = (struct nlattr *)skb_put(nl_skb, nla_len);
+ nla = skb_put(nl_skb, nla_len);
nla->nla_type = PSAMPLE_ATTR_DATA;
nla->nla_len = nla_attr_size(data_len);
diff --git a/net/qrtr/qrtr.c b/net/qrtr/qrtr.c
index a9a8c7d..5586609 100644
--- a/net/qrtr/qrtr.c
+++ b/net/qrtr/qrtr.c
@@ -111,6 +111,9 @@
struct list_head item;
};
+static int qrtr_local_enqueue(struct qrtr_node *node, struct sk_buff *skb);
+static int qrtr_bcast_enqueue(struct qrtr_node *node, struct sk_buff *skb);
+
/* Release node resources and free the node.
*
* Do not call directly, use qrtr_node_release. To be used with
@@ -236,7 +239,7 @@
return -ENOMEM;
skb_reset_transport_header(skb);
- memcpy(skb_put(skb, len), data, len);
+ skb_put_data(skb, data, len);
skb_queue_tail(&node->rx_queue, skb);
schedule_work(&node->work);
@@ -245,23 +248,20 @@
}
EXPORT_SYMBOL_GPL(qrtr_endpoint_post);
-/* Allocate and construct a resume-tx packet. */
-static struct sk_buff *qrtr_alloc_resume_tx(u32 src_node,
- u32 dst_node, u32 port)
+static struct sk_buff *qrtr_alloc_ctrl_packet(u32 type, size_t pkt_len,
+ u32 src_node, u32 dst_node)
{
- const int pkt_len = 20;
struct qrtr_hdr *hdr;
struct sk_buff *skb;
- __le32 *buf;
skb = alloc_skb(QRTR_HDR_SIZE + pkt_len, GFP_KERNEL);
if (!skb)
return NULL;
skb_reset_transport_header(skb);
- hdr = (struct qrtr_hdr *)skb_put(skb, QRTR_HDR_SIZE);
+ hdr = skb_put(skb, QRTR_HDR_SIZE);
hdr->version = cpu_to_le32(QRTR_PROTO_VER);
- hdr->type = cpu_to_le32(QRTR_TYPE_RESUME_TX);
+ hdr->type = cpu_to_le32(type);
hdr->src_node_id = cpu_to_le32(src_node);
hdr->src_port_id = cpu_to_le32(QRTR_PORT_CTRL);
hdr->confirm_rx = cpu_to_le32(0);
@@ -269,8 +269,23 @@
hdr->dst_node_id = cpu_to_le32(dst_node);
hdr->dst_port_id = cpu_to_le32(QRTR_PORT_CTRL);
- buf = (__le32 *)skb_put(skb, pkt_len);
- memset(buf, 0, pkt_len);
+ return skb;
+}
+
+/* Allocate and construct a resume-tx packet. */
+static struct sk_buff *qrtr_alloc_resume_tx(u32 src_node,
+ u32 dst_node, u32 port)
+{
+ const int pkt_len = 20;
+ struct sk_buff *skb;
+ __le32 *buf;
+
+ skb = qrtr_alloc_ctrl_packet(QRTR_TYPE_RESUME_TX, pkt_len,
+ src_node, dst_node);
+ if (!skb)
+ return NULL;
+
+ buf = skb_put_zero(skb, pkt_len);
buf[0] = cpu_to_le32(QRTR_TYPE_RESUME_TX);
buf[1] = cpu_to_le32(src_node);
buf[2] = cpu_to_le32(port);
@@ -278,6 +293,43 @@
return skb;
}
+/* Allocate and construct a BYE message to signal remote termination */
+static struct sk_buff *qrtr_alloc_local_bye(u32 src_node)
+{
+ const int pkt_len = 20;
+ struct sk_buff *skb;
+ __le32 *buf;
+
+ skb = qrtr_alloc_ctrl_packet(QRTR_TYPE_BYE, pkt_len,
+ src_node, qrtr_local_nid);
+ if (!skb)
+ return NULL;
+
+ buf = skb_put_zero(skb, pkt_len);
+ buf[0] = cpu_to_le32(QRTR_TYPE_BYE);
+
+ return skb;
+}
+
+static struct sk_buff *qrtr_alloc_del_client(struct sockaddr_qrtr *sq)
+{
+ const int pkt_len = 20;
+ struct sk_buff *skb;
+ __le32 *buf;
+
+ skb = qrtr_alloc_ctrl_packet(QRTR_TYPE_DEL_CLIENT, pkt_len,
+ sq->sq_node, QRTR_NODE_BCAST);
+ if (!skb)
+ return NULL;
+
+ buf = skb_put_zero(skb, pkt_len);
+ buf[0] = cpu_to_le32(QRTR_TYPE_DEL_CLIENT);
+ buf[1] = cpu_to_le32(sq->sq_node);
+ buf[2] = cpu_to_le32(sq->sq_port);
+
+ return skb;
+}
+
static struct qrtr_sock *qrtr_port_lookup(int port);
static void qrtr_port_put(struct qrtr_sock *ipc);
@@ -369,11 +421,17 @@
void qrtr_endpoint_unregister(struct qrtr_endpoint *ep)
{
struct qrtr_node *node = ep->node;
+ struct sk_buff *skb;
mutex_lock(&node->ep_lock);
node->ep = NULL;
mutex_unlock(&node->ep_lock);
+ /* Notify the local controller about the event */
+ skb = qrtr_alloc_local_bye(node->nid);
+ if (skb)
+ qrtr_local_enqueue(NULL, skb);
+
qrtr_node_release(node);
ep->node = NULL;
}
@@ -408,8 +466,15 @@
/* Remove port assignment. */
static void qrtr_port_remove(struct qrtr_sock *ipc)
{
+ struct sk_buff *skb;
int port = ipc->us.sq_port;
+ skb = qrtr_alloc_del_client(&ipc->us);
+ if (skb) {
+ skb_set_owner_w(skb, &ipc->sk);
+ qrtr_bcast_enqueue(NULL, skb);
+ }
+
if (port == QRTR_PORT_CTRL)
port = 0;
@@ -462,6 +527,26 @@
return 0;
}
+/* Reset all non-control ports */
+static void qrtr_reset_ports(void)
+{
+ struct qrtr_sock *ipc;
+ int id;
+
+ mutex_lock(&qrtr_port_lock);
+ idr_for_each_entry(&qrtr_ports, ipc, id) {
+ /* Don't reset control port */
+ if (id == 0)
+ continue;
+
+ sock_hold(&ipc->sk);
+ ipc->sk.sk_err = ENETRESET;
+ wake_up_interruptible(sk_sleep(&ipc->sk));
+ sock_put(&ipc->sk);
+ }
+ mutex_unlock(&qrtr_port_lock);
+}
+
/* Bind socket to address.
*
* Socket should be locked upon call.
@@ -490,6 +575,10 @@
sock_reset_flag(sk, SOCK_ZAPPED);
+ /* Notify all open ports about the new controller */
+ if (port == QRTR_PORT_CTRL)
+ qrtr_reset_ports();
+
return 0;
}
diff --git a/net/rds/connection.c b/net/rds/connection.c
index 6a5ebde..382443b 100644
--- a/net/rds/connection.c
+++ b/net/rds/connection.c
@@ -124,11 +124,6 @@
cp->cp_conn = conn;
atomic_set(&cp->cp_state, RDS_CONN_DOWN);
cp->cp_send_gen = 0;
- /* cp_outgoing is per-path. So we can only set it here
- * for the single-path transports.
- */
- if (!conn->c_trans->t_mp_capable)
- cp->cp_outgoing = (is_outgoing ? 1 : 0);
cp->cp_reconnect_jiffies = 0;
INIT_DELAYED_WORK(&cp->cp_send_w, rds_send_worker);
INIT_DELAYED_WORK(&cp->cp_recv_w, rds_recv_worker);
diff --git a/net/rds/rds.h b/net/rds/rds.h
index 82d38cc..d6a04a0 100644
--- a/net/rds/rds.h
+++ b/net/rds/rds.h
@@ -92,6 +92,8 @@
#define RDS_MPATH_HASH(rs, n) (jhash_1word((rs)->rs_bound_port, \
(rs)->rs_hash_initval) & ((n) - 1))
+#define IS_CANONICAL(laddr, faddr) (htonl(laddr) < htonl(faddr))
+
/* Per mpath connection state */
struct rds_conn_path {
struct rds_connection *cp_conn;
@@ -125,8 +127,6 @@
unsigned int cp_unacked_packets;
unsigned int cp_unacked_bytes;
- unsigned int cp_outgoing:1,
- cp_pad_to_32:31;
unsigned int cp_index;
};
diff --git a/net/rds/recv.c b/net/rds/recv.c
index c70c32c..49493db 100644
--- a/net/rds/recv.c
+++ b/net/rds/recv.c
@@ -215,10 +215,10 @@
switch (type) {
case RDS_EXTHDR_NPATHS:
conn->c_npaths = min_t(int, RDS_MPATH_WORKERS,
- buffer.rds_npaths);
+ be16_to_cpu(buffer.rds_npaths));
break;
case RDS_EXTHDR_GEN_NUM:
- new_peer_gen_num = buffer.rds_gen_num;
+ new_peer_gen_num = be32_to_cpu(buffer.rds_gen_num);
break;
default:
pr_warn_ratelimited("ignoring unknown exthdr type "
@@ -254,7 +254,8 @@
int i;
struct rds_conn_path *cp;
- if (conn->c_npaths > 1 && conn->c_laddr < conn->c_faddr) {
+ if (conn->c_npaths > 1 &&
+ IS_CANONICAL(conn->c_laddr, conn->c_faddr)) {
for (i = 1; i < conn->c_npaths; i++) {
cp = &conn->c_path[i];
rds_conn_path_connect_if_down(cp);
@@ -339,14 +340,15 @@
rds_stats_inc(s_recv_ping);
rds_send_pong(cp, inc->i_hdr.h_sport);
/* if this is a handshake ping, start multipath if necessary */
- if (RDS_HS_PROBE(inc->i_hdr.h_sport, inc->i_hdr.h_dport)) {
+ if (RDS_HS_PROBE(be16_to_cpu(inc->i_hdr.h_sport),
+ be16_to_cpu(inc->i_hdr.h_dport))) {
rds_recv_hs_exthdrs(&inc->i_hdr, cp->cp_conn);
rds_start_mprds(cp->cp_conn);
}
goto out;
}
- if (inc->i_hdr.h_dport == RDS_FLAG_PROBE_PORT &&
+ if (be16_to_cpu(inc->i_hdr.h_dport) == RDS_FLAG_PROBE_PORT &&
inc->i_hdr.h_sport == 0) {
rds_recv_hs_exthdrs(&inc->i_hdr, cp->cp_conn);
/* if this is a handshake pong, start multipath if necessary */
diff --git a/net/rds/send.c b/net/rds/send.c
index 5cc6403..3652a50 100644
--- a/net/rds/send.c
+++ b/net/rds/send.c
@@ -1246,15 +1246,17 @@
rm->m_inc.i_hdr.h_flags |= h_flags;
cp->cp_next_tx_seq++;
- if (RDS_HS_PROBE(sport, dport) && cp->cp_conn->c_trans->t_mp_capable) {
- u16 npaths = RDS_MPATH_WORKERS;
+ if (RDS_HS_PROBE(be16_to_cpu(sport), be16_to_cpu(dport)) &&
+ cp->cp_conn->c_trans->t_mp_capable) {
+ u16 npaths = cpu_to_be16(RDS_MPATH_WORKERS);
+ u32 my_gen_num = cpu_to_be32(cp->cp_conn->c_my_gen_num);
rds_message_add_extension(&rm->m_inc.i_hdr,
RDS_EXTHDR_NPATHS, &npaths,
sizeof(npaths));
rds_message_add_extension(&rm->m_inc.i_hdr,
RDS_EXTHDR_GEN_NUM,
- &cp->cp_conn->c_my_gen_num,
+ &my_gen_num,
sizeof(u32));
}
spin_unlock_irqrestore(&cp->cp_lock, flags);
@@ -1293,5 +1295,6 @@
}
conn->c_ping_triggered = 1;
spin_unlock_irqrestore(&cp->cp_lock, flags);
- rds_send_probe(&conn->c_path[0], RDS_FLAG_PROBE_PORT, 0, 0);
+ rds_send_probe(&conn->c_path[0], cpu_to_be16(RDS_FLAG_PROBE_PORT),
+ 0, 0);
}
diff --git a/net/rds/tcp_connect.c b/net/rds/tcp_connect.c
index d6839d9..5a62a08 100644
--- a/net/rds/tcp_connect.c
+++ b/net/rds/tcp_connect.c
@@ -66,7 +66,7 @@
* RDS connection as RDS_CONN_UP until the reconnect,
* to avoid RDS datagram loss.
*/
- if (cp->cp_conn->c_laddr > cp->cp_conn->c_faddr &&
+ if (!IS_CANONICAL(cp->cp_conn->c_laddr, cp->cp_conn->c_faddr) &&
rds_conn_path_transition(cp, RDS_CONN_CONNECTING,
RDS_CONN_ERROR)) {
rds_conn_path_drop(cp);
@@ -135,7 +135,6 @@
ret = sock->ops->connect(sock, (struct sockaddr *)&dest, sizeof(dest),
O_NONBLOCK);
- cp->cp_outgoing = 1;
rdsdebug("connect to address %pI4 returned %d\n", &conn->c_faddr, ret);
if (ret == -EINPROGRESS)
ret = 0;
diff --git a/net/rds/tcp_listen.c b/net/rds/tcp_listen.c
index 5076788..df291ac 100644
--- a/net/rds/tcp_listen.c
+++ b/net/rds/tcp_listen.c
@@ -83,7 +83,7 @@
struct rds_tcp_connection *rds_tcp_accept_one_path(struct rds_connection *conn)
{
int i;
- bool peer_is_smaller = (conn->c_faddr < conn->c_laddr);
+ bool peer_is_smaller = IS_CANONICAL(conn->c_faddr, conn->c_laddr);
int npaths = max_t(int, 1, conn->c_npaths);
/* for mprds, all paths MUST be initiated by the peer
@@ -112,6 +112,17 @@
return NULL;
}
+static void rds_tcp_set_linger(struct socket *sock)
+{
+ struct linger no_linger = {
+ .l_onoff = 1,
+ .l_linger = 0,
+ };
+
+ kernel_setsockopt(sock, SOL_SOCKET, SO_LINGER,
+ (char *)&no_linger, sizeof(no_linger));
+}
+
int rds_tcp_accept_one(struct socket *sock)
{
struct socket *new_sock = NULL;
@@ -171,21 +182,10 @@
if (conn_state != RDS_CONN_CONNECTING && conn_state != RDS_CONN_ERROR)
goto rst_nsk;
if (rs_tcp->t_sock) {
- /* Need to resolve a duelling SYN between peers.
- * We have an outstanding SYN to this peer, which may
- * potentially have transitioned to the RDS_CONN_UP state,
- * so we must quiesce any send threads before resetting
- * c_transport_data.
- */
- if (ntohl(inet->inet_saddr) < ntohl(inet->inet_daddr) ||
- !cp->cp_outgoing) {
- goto rst_nsk;
- } else {
- rds_tcp_reset_callbacks(new_sock, cp);
- cp->cp_outgoing = 0;
- /* rds_connect_path_complete() marks RDS_CONN_UP */
- rds_connect_path_complete(cp, RDS_CONN_RESETTING);
- }
+ /* Duelling SYN has been handled in rds_tcp_accept_one() */
+ rds_tcp_reset_callbacks(new_sock, cp);
+ /* rds_connect_path_complete() marks RDS_CONN_UP */
+ rds_connect_path_complete(cp, RDS_CONN_RESETTING);
} else {
rds_tcp_set_callbacks(new_sock, cp);
rds_connect_path_complete(cp, RDS_CONN_CONNECTING);
@@ -194,7 +194,13 @@
ret = 0;
goto out;
rst_nsk:
- /* reset the newly returned accept sock and bail */
+ /* reset the newly returned accept sock and bail.
+ * It is safe to set linger on new_sock because the RDS connection
+ * has not been brought up on new_sock, so no RDS-level data could
+ * be pending on it. By setting linger, we achieve the side-effect
+ * of avoiding TIME_WAIT state on new_sock.
+ */
+ rds_tcp_set_linger(new_sock);
kernel_sock_shutdown(new_sock, SHUT_RDWR);
ret = 0;
out:
diff --git a/net/rds/threads.c b/net/rds/threads.c
index 3e447d05..2852bc1 100644
--- a/net/rds/threads.c
+++ b/net/rds/threads.c
@@ -127,7 +127,7 @@
/* let peer with smaller addr initiate reconnect, to avoid duels */
if (conn->c_trans->t_type == RDS_TRANS_TCP &&
- conn->c_laddr > conn->c_faddr)
+ !IS_CANONICAL(conn->c_laddr, conn->c_faddr))
return;
set_bit(RDS_RECONNECT_PENDING, &cp->cp_flags);
@@ -156,7 +156,8 @@
struct rds_connection *conn = cp->cp_conn;
int ret;
- if (cp->cp_index > 0 && cp->cp_conn->c_laddr > cp->cp_conn->c_faddr)
+ if (cp->cp_index > 0 &&
+ !IS_CANONICAL(cp->cp_conn->c_laddr, cp->cp_conn->c_faddr))
return;
clear_bit(RDS_RECONNECT_PENDING, &cp->cp_flags);
ret = rds_conn_path_transition(cp, RDS_CONN_DOWN, RDS_CONN_CONNECTING);
diff --git a/net/rfkill/rfkill-gpio.c b/net/rfkill/rfkill-gpio.c
index 76c01cb..41bd496 100644
--- a/net/rfkill/rfkill-gpio.c
+++ b/net/rfkill/rfkill-gpio.c
@@ -81,8 +81,7 @@
rfkill->type = (unsigned)id->driver_data;
- return acpi_dev_add_driver_gpios(ACPI_COMPANION(dev),
- acpi_rfkill_default_gpios);
+ return devm_acpi_dev_add_driver_gpios(dev, acpi_rfkill_default_gpios);
}
static int rfkill_gpio_probe(struct platform_device *pdev)
@@ -154,8 +153,6 @@
rfkill_unregister(rfkill->rfkill_dev);
rfkill_destroy(rfkill->rfkill_dev);
- acpi_dev_remove_driver_gpios(ACPI_COMPANION(&pdev->dev));
-
return 0;
}
diff --git a/net/rxrpc/Makefile b/net/rxrpc/Makefile
index b9da4d6..9c68d2f8 100644
--- a/net/rxrpc/Makefile
+++ b/net/rxrpc/Makefile
@@ -19,6 +19,7 @@
local_event.o \
local_object.o \
misc.o \
+ net_ns.o \
output.o \
peer_event.o \
peer_object.o \
diff --git a/net/rxrpc/af_rxrpc.c b/net/rxrpc/af_rxrpc.c
index 7fb59c3..58ae0db 100644
--- a/net/rxrpc/af_rxrpc.c
+++ b/net/rxrpc/af_rxrpc.c
@@ -38,9 +38,6 @@
static struct proto rxrpc_proto;
static const struct proto_ops rxrpc_rpc_ops;
-/* local epoch for detecting local-end reset */
-u32 rxrpc_epoch;
-
/* current debugging ID */
atomic_t rxrpc_debug_id;
@@ -134,9 +131,8 @@
static int rxrpc_bind(struct socket *sock, struct sockaddr *saddr, int len)
{
struct sockaddr_rxrpc *srx = (struct sockaddr_rxrpc *)saddr;
- struct sock *sk = sock->sk;
struct rxrpc_local *local;
- struct rxrpc_sock *rx = rxrpc_sk(sk);
+ struct rxrpc_sock *rx = rxrpc_sk(sock->sk);
u16 service_id = srx->srx_service;
int ret;
@@ -148,33 +144,50 @@
lock_sock(&rx->sk);
- if (rx->sk.sk_state != RXRPC_UNBOUND) {
+ switch (rx->sk.sk_state) {
+ case RXRPC_UNBOUND:
+ rx->srx = *srx;
+ local = rxrpc_lookup_local(sock_net(&rx->sk), &rx->srx);
+ if (IS_ERR(local)) {
+ ret = PTR_ERR(local);
+ goto error_unlock;
+ }
+
+ if (service_id) {
+ write_lock(&local->services_lock);
+ if (rcu_access_pointer(local->service))
+ goto service_in_use;
+ rx->local = local;
+ rcu_assign_pointer(local->service, rx);
+ write_unlock(&local->services_lock);
+
+ rx->sk.sk_state = RXRPC_SERVER_BOUND;
+ } else {
+ rx->local = local;
+ rx->sk.sk_state = RXRPC_CLIENT_BOUND;
+ }
+ break;
+
+ case RXRPC_SERVER_BOUND:
+ ret = -EINVAL;
+ if (service_id == 0)
+ goto error_unlock;
+ ret = -EADDRINUSE;
+ if (service_id == rx->srx.srx_service)
+ goto error_unlock;
+ ret = -EINVAL;
+ srx->srx_service = rx->srx.srx_service;
+ if (memcmp(srx, &rx->srx, sizeof(*srx)) != 0)
+ goto error_unlock;
+ rx->second_service = service_id;
+ rx->sk.sk_state = RXRPC_SERVER_BOUND2;
+ break;
+
+ default:
ret = -EINVAL;
goto error_unlock;
}
- memcpy(&rx->srx, srx, sizeof(rx->srx));
-
- local = rxrpc_lookup_local(&rx->srx);
- if (IS_ERR(local)) {
- ret = PTR_ERR(local);
- goto error_unlock;
- }
-
- if (service_id) {
- write_lock(&local->services_lock);
- if (rcu_access_pointer(local->service))
- goto service_in_use;
- rx->local = local;
- rcu_assign_pointer(local->service, rx);
- write_unlock(&local->services_lock);
-
- rx->sk.sk_state = RXRPC_SERVER_BOUND;
- } else {
- rx->local = local;
- rx->sk.sk_state = RXRPC_CLIENT_BOUND;
- }
-
release_sock(&rx->sk);
_leave(" = 0");
return 0;
@@ -209,6 +222,7 @@
ret = -EADDRNOTAVAIL;
break;
case RXRPC_SERVER_BOUND:
+ case RXRPC_SERVER_BOUND2:
ASSERT(rx->local != NULL);
max = READ_ONCE(rxrpc_max_backlog);
ret = -EINVAL;
@@ -248,6 +262,7 @@
* @srx: The address of the peer to contact
* @key: The security context to use (defaults to socket setting)
* @user_call_ID: The ID to use
+ * @tx_total_len: Total length of data to transmit during the call (or -1)
* @gfp: The allocation constraints
* @notify_rx: Where to send notifications instead of socket queue
*
@@ -262,6 +277,7 @@
struct sockaddr_rxrpc *srx,
struct key *key,
unsigned long user_call_ID,
+ s64 tx_total_len,
gfp_t gfp,
rxrpc_notify_rx_t notify_rx)
{
@@ -289,7 +305,8 @@
cp.security_level = 0;
cp.exclusive = false;
cp.service_id = srx->srx_service;
- call = rxrpc_new_client_call(rx, &cp, srx, user_call_ID, gfp);
+ call = rxrpc_new_client_call(rx, &cp, srx, user_call_ID, tx_total_len,
+ gfp);
/* The socket has been unlocked. */
if (!IS_ERR(call))
call->notify_rx = notify_rx;
@@ -434,7 +451,7 @@
ret = -EAFNOSUPPORT;
goto error_unlock;
}
- local = rxrpc_lookup_local(&rx->srx);
+ local = rxrpc_lookup_local(sock_net(sock->sk), &rx->srx);
if (IS_ERR(local)) {
ret = PTR_ERR(local);
goto error_unlock;
@@ -476,6 +493,7 @@
{
struct rxrpc_sock *rx = rxrpc_sk(sock->sk);
unsigned int min_sec_level;
+ u16 service_upgrade[2];
int ret;
_enter(",%d,%d,,%d", level, optname, optlen);
@@ -532,6 +550,28 @@
rx->min_sec_level = min_sec_level;
goto success;
+ case RXRPC_UPGRADEABLE_SERVICE:
+ ret = -EINVAL;
+ if (optlen != sizeof(service_upgrade) ||
+ rx->service_upgrade.from != 0)
+ goto error;
+ ret = -EISCONN;
+ if (rx->sk.sk_state != RXRPC_SERVER_BOUND2)
+ goto error;
+ ret = -EFAULT;
+ if (copy_from_user(service_upgrade, optval,
+ sizeof(service_upgrade)) != 0)
+ goto error;
+ ret = -EINVAL;
+ if ((service_upgrade[0] != rx->srx.srx_service ||
+ service_upgrade[1] != rx->second_service) &&
+ (service_upgrade[0] != rx->second_service ||
+ service_upgrade[1] != rx->srx.srx_service))
+ goto error;
+ rx->service_upgrade.from = service_upgrade[0];
+ rx->service_upgrade.to = service_upgrade[1];
+ goto success;
+
default:
break;
}
@@ -545,6 +585,34 @@
}
/*
+ * Get socket options.
+ */
+static int rxrpc_getsockopt(struct socket *sock, int level, int optname,
+ char __user *optval, int __user *_optlen)
+{
+ int optlen;
+
+ if (level != SOL_RXRPC)
+ return -EOPNOTSUPP;
+
+ if (get_user(optlen, _optlen))
+ return -EFAULT;
+
+ switch (optname) {
+ case RXRPC_SUPPORTED_CMSG:
+ if (optlen < sizeof(int))
+ return -ETOOSMALL;
+ if (put_user(RXRPC__SUPPORTED - 1, (int __user *)optval) ||
+ put_user(sizeof(int), _optlen))
+ return -EFAULT;
+ return 0;
+
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+/*
* permit an RxRPC socket to be polled
*/
static unsigned int rxrpc_poll(struct file *file, struct socket *sock,
@@ -582,9 +650,6 @@
_enter("%p,%d", sock, protocol);
- if (!net_eq(net, &init_net))
- return -EAFNOSUPPORT;
-
/* we support transport protocol UDP/UDP6 only */
if (protocol != PF_INET &&
IS_ENABLED(CONFIG_AF_RXRPC_IPV6) && protocol != PF_INET6)
@@ -750,7 +815,7 @@
.listen = rxrpc_listen,
.shutdown = rxrpc_shutdown,
.setsockopt = rxrpc_setsockopt,
- .getsockopt = sock_no_getsockopt,
+ .getsockopt = rxrpc_getsockopt,
.sendmsg = rxrpc_sendmsg,
.recvmsg = rxrpc_recvmsg,
.mmap = sock_no_mmap,
@@ -780,8 +845,6 @@
BUILD_BUG_ON(sizeof(struct rxrpc_skb_priv) > FIELD_SIZEOF(struct sk_buff, cb));
- get_random_bytes(&rxrpc_epoch, sizeof(rxrpc_epoch));
- rxrpc_epoch |= RXRPC_RANDOM_EPOCH;
get_random_bytes(&tmp, sizeof(tmp));
tmp &= 0x3fffffff;
if (tmp == 0)
@@ -809,6 +872,10 @@
goto error_security;
}
+ ret = register_pernet_subsys(&rxrpc_net_ops);
+ if (ret)
+ goto error_pernet;
+
ret = proto_register(&rxrpc_proto, 1);
if (ret < 0) {
pr_crit("Cannot register protocol\n");
@@ -839,11 +906,6 @@
goto error_sysctls;
}
-#ifdef CONFIG_PROC_FS
- proc_create("rxrpc_calls", 0, init_net.proc_net, &rxrpc_call_seq_fops);
- proc_create("rxrpc_conns", 0, init_net.proc_net,
- &rxrpc_connection_seq_fops);
-#endif
return 0;
error_sysctls:
@@ -855,6 +917,8 @@
error_sock:
proto_unregister(&rxrpc_proto);
error_proto:
+ unregister_pernet_subsys(&rxrpc_net_ops);
+error_pernet:
rxrpc_exit_security();
error_security:
destroy_workqueue(rxrpc_workqueue);
@@ -875,14 +939,16 @@
unregister_key_type(&key_type_rxrpc);
sock_unregister(PF_RXRPC);
proto_unregister(&rxrpc_proto);
- rxrpc_destroy_all_calls();
- rxrpc_destroy_all_connections();
+ unregister_pernet_subsys(&rxrpc_net_ops);
ASSERTCMP(atomic_read(&rxrpc_n_tx_skbs), ==, 0);
ASSERTCMP(atomic_read(&rxrpc_n_rx_skbs), ==, 0);
- rxrpc_destroy_all_locals();
- remove_proc_entry("rxrpc_conns", init_net.proc_net);
- remove_proc_entry("rxrpc_calls", init_net.proc_net);
+ /* Make sure the local and peer records pinned by any dying connections
+ * are released.
+ */
+ rcu_barrier();
+ rxrpc_destroy_client_conn_ids();
+
destroy_workqueue(rxrpc_workqueue);
rxrpc_exit_security();
kmem_cache_destroy(rxrpc_call_jar);
diff --git a/net/rxrpc/ar-internal.h b/net/rxrpc/ar-internal.h
index 7486926..69b9733 100644
--- a/net/rxrpc/ar-internal.h
+++ b/net/rxrpc/ar-internal.h
@@ -11,6 +11,8 @@
#include <linux/atomic.h>
#include <linux/seqlock.h>
+#include <net/net_namespace.h>
+#include <net/netns/generic.h>
#include <net/sock.h>
#include <net/af_rxrpc.h>
#include <rxrpc/packet.h>
@@ -59,12 +61,44 @@
RXRPC_CLIENT_UNBOUND, /* Unbound socket used as client */
RXRPC_CLIENT_BOUND, /* client local address bound */
RXRPC_SERVER_BOUND, /* server local address bound */
+ RXRPC_SERVER_BOUND2, /* second server local address bound */
RXRPC_SERVER_LISTENING, /* server listening for connections */
RXRPC_SERVER_LISTEN_DISABLED, /* server listening disabled */
RXRPC_CLOSE, /* socket is being closed */
};
/*
+ * Per-network namespace data.
+ */
+struct rxrpc_net {
+ struct proc_dir_entry *proc_net; /* Subdir in /proc/net */
+ u32 epoch; /* Local epoch for detecting local-end reset */
+ struct list_head calls; /* List of calls active in this namespace */
+ rwlock_t call_lock; /* Lock for ->calls */
+
+ struct list_head conn_proc_list; /* List of conns in this namespace for proc */
+ struct list_head service_conns; /* Service conns in this namespace */
+ rwlock_t conn_lock; /* Lock for ->conn_proc_list, ->service_conns */
+ struct delayed_work service_conn_reaper;
+
+ unsigned int nr_client_conns;
+ unsigned int nr_active_client_conns;
+ bool kill_all_client_conns;
+ spinlock_t client_conn_cache_lock; /* Lock for ->*_client_conns */
+ spinlock_t client_conn_discard_lock; /* Prevent multiple discarders */
+ struct list_head waiting_client_conns;
+ struct list_head active_client_conns;
+ struct list_head idle_client_conns;
+ struct delayed_work client_conn_reaper;
+
+ struct list_head local_endpoints;
+ struct mutex local_mutex; /* Lock for ->local_endpoints */
+
+ spinlock_t peer_hash_lock; /* Lock for ->peer_hash */
+ DECLARE_HASHTABLE (peer_hash, 10);
+};
+
+/*
* Service backlog preallocation.
*
* This contains circular buffers of preallocated peers, connections and calls
@@ -109,8 +143,14 @@
u32 min_sec_level; /* minimum security level */
#define RXRPC_SECURITY_MAX RXRPC_SECURITY_ENCRYPT
bool exclusive; /* Exclusive connection for a client socket */
+ u16 second_service; /* Additional service bound to the endpoint */
+ struct {
+ /* Service upgrade information */
+ u16 from; /* Service ID to upgrade (if not 0) */
+ u16 to; /* service ID to upgrade to */
+ } service_upgrade;
sa_family_t family; /* Protocol family created with */
- struct sockaddr_rxrpc srx; /* local address */
+ struct sockaddr_rxrpc srx; /* Primary Service/local addresses */
struct sockaddr_rxrpc connect_srx; /* Default client address from connect() */
};
@@ -211,6 +251,7 @@
struct rxrpc_local {
struct rcu_head rcu;
atomic_t usage;
+ struct rxrpc_net *rxnet; /* The network ns in which this resides */
struct list_head link;
struct socket *socket; /* my UDP socket */
struct work_struct processor;
@@ -259,6 +300,8 @@
u64 rtt_cache[RXRPC_RTT_CACHE_SIZE]; /* Determined RTT cache */
u8 rtt_cursor; /* next entry at which to insert */
u8 rtt_usage; /* amount of cache actually used */
+
+ u8 cong_cwnd; /* Congestion window size */
};
/*
@@ -279,6 +322,7 @@
struct rxrpc_peer *peer; /* Remote endpoint */
struct key *key; /* Security details */
bool exclusive; /* T if conn is exclusive */
+ bool upgrade; /* T if service ID can be upgraded */
u16 service_id; /* Service ID for this connection */
u32 security_level; /* Security level selected */
};
@@ -293,6 +337,7 @@
RXRPC_CONN_EXPOSED, /* Conn has extra ref for exposure */
RXRPC_CONN_DONT_REUSE, /* Don't reuse this connection */
RXRPC_CONN_COUNTED, /* Counted by rxrpc_nr_client_conns */
+ RXRPC_CONN_PROBING_FOR_UPGRADE, /* Probing for service upgrade */
};
/*
@@ -309,6 +354,7 @@
RXRPC_CONN_CLIENT_INACTIVE, /* Conn is not yet listed */
RXRPC_CONN_CLIENT_WAITING, /* Conn is on wait list, waiting for capacity */
RXRPC_CONN_CLIENT_ACTIVE, /* Conn is on active list, doing calls */
+ RXRPC_CONN_CLIENT_UPGRADE, /* Conn is on active list, probing for upgrade */
RXRPC_CONN_CLIENT_CULLED, /* Conn is culled and delisted, doing calls */
RXRPC_CONN_CLIENT_IDLE, /* Conn is on idle list, doing mostly nothing */
RXRPC_CONN__NR_CACHE_STATES
@@ -352,7 +398,6 @@
u32 call_counter; /* Call ID counter */
u32 last_call; /* ID of last call */
u8 last_type; /* Type of last packet */
- u16 last_service_id;
union {
u32 last_seq;
u32 last_abort;
@@ -383,6 +428,7 @@
atomic_t serial; /* packet serial number counter */
unsigned int hi_serial; /* highest serial number received */
u32 security_nonce; /* response re-use preventer */
+ u16 service_id; /* Service ID, possibly upgraded */
u8 size_align; /* data size alignment (for security) */
u8 security_size; /* security header size */
u8 security_ix; /* security type */
@@ -484,6 +530,7 @@
struct rb_node sock_node; /* Node in rx->calls */
struct sk_buff *tx_pending; /* Tx socket buffer being filled */
wait_queue_head_t waitq; /* Wait queue for channel or Tx */
+ s64 tx_total_len; /* Total length left to be transmitted (or -1) */
__be32 crypto_buf[2]; /* Temporary packet crypto buffer */
unsigned long user_call_ID; /* user-defined call ID */
unsigned long flags;
@@ -601,7 +648,6 @@
* af_rxrpc.c
*/
extern atomic_t rxrpc_n_tx_skbs, rxrpc_n_rx_skbs;
-extern u32 rxrpc_epoch;
extern atomic_t rxrpc_debug_id;
extern struct workqueue_struct *rxrpc_workqueue;
@@ -634,15 +680,13 @@
extern const char *const rxrpc_call_completions[];
extern unsigned int rxrpc_max_call_lifetime;
extern struct kmem_cache *rxrpc_call_jar;
-extern struct list_head rxrpc_calls;
-extern rwlock_t rxrpc_call_lock;
struct rxrpc_call *rxrpc_find_call_by_user_ID(struct rxrpc_sock *, unsigned long);
struct rxrpc_call *rxrpc_alloc_call(gfp_t);
struct rxrpc_call *rxrpc_new_client_call(struct rxrpc_sock *,
struct rxrpc_conn_parameters *,
struct sockaddr_rxrpc *,
- unsigned long, gfp_t);
+ unsigned long, s64, gfp_t);
void rxrpc_incoming_call(struct rxrpc_sock *, struct rxrpc_call *,
struct sk_buff *);
void rxrpc_release_call(struct rxrpc_sock *, struct rxrpc_call *);
@@ -653,7 +697,7 @@
void rxrpc_get_call(struct rxrpc_call *, enum rxrpc_call_trace);
void rxrpc_put_call(struct rxrpc_call *, enum rxrpc_call_trace);
void rxrpc_cleanup_call(struct rxrpc_call *);
-void __exit rxrpc_destroy_all_calls(void);
+void rxrpc_destroy_all_calls(struct rxrpc_net *);
static inline bool rxrpc_is_service_call(const struct rxrpc_call *call)
{
@@ -773,7 +817,8 @@
void rxrpc_expose_client_call(struct rxrpc_call *);
void rxrpc_disconnect_client_call(struct rxrpc_call *);
void rxrpc_put_client_conn(struct rxrpc_connection *);
-void __exit rxrpc_destroy_all_client_connections(void);
+void rxrpc_discard_expired_client_conns(struct work_struct *);
+void rxrpc_destroy_all_client_connections(struct rxrpc_net *);
/*
* conn_event.c
@@ -784,9 +829,6 @@
* conn_object.c
*/
extern unsigned int rxrpc_connection_expiry;
-extern struct list_head rxrpc_connections;
-extern struct list_head rxrpc_connection_proc_list;
-extern rwlock_t rxrpc_connection_lock;
int rxrpc_extract_addr_from_skb(struct sockaddr_rxrpc *, struct sk_buff *);
struct rxrpc_connection *rxrpc_alloc_connection(gfp_t);
@@ -800,7 +842,8 @@
void rxrpc_get_connection(struct rxrpc_connection *);
struct rxrpc_connection *rxrpc_get_connection_maybe(struct rxrpc_connection *);
void rxrpc_put_service_conn(struct rxrpc_connection *);
-void __exit rxrpc_destroy_all_connections(void);
+void rxrpc_service_connection_reaper(struct work_struct *);
+void rxrpc_destroy_all_connections(struct rxrpc_net *);
static inline bool rxrpc_conn_is_client(const struct rxrpc_connection *conn)
{
@@ -828,8 +871,9 @@
*/
struct rxrpc_connection *rxrpc_find_service_conn_rcu(struct rxrpc_peer *,
struct sk_buff *);
-struct rxrpc_connection *rxrpc_prealloc_service_connection(gfp_t);
-void rxrpc_new_incoming_connection(struct rxrpc_connection *, struct sk_buff *);
+struct rxrpc_connection *rxrpc_prealloc_service_connection(struct rxrpc_net *, gfp_t);
+void rxrpc_new_incoming_connection(struct rxrpc_sock *,
+ struct rxrpc_connection *, struct sk_buff *);
void rxrpc_unpublish_service_conn(struct rxrpc_connection *);
/*
@@ -861,9 +905,9 @@
/*
* local_object.c
*/
-struct rxrpc_local *rxrpc_lookup_local(const struct sockaddr_rxrpc *);
+struct rxrpc_local *rxrpc_lookup_local(struct net *, const struct sockaddr_rxrpc *);
void __rxrpc_put_local(struct rxrpc_local *);
-void __exit rxrpc_destroy_all_locals(void);
+void rxrpc_destroy_all_locals(struct rxrpc_net *);
static inline void rxrpc_get_local(struct rxrpc_local *local)
{
@@ -902,6 +946,17 @@
extern const s8 rxrpc_ack_priority[];
/*
+ * net_ns.c
+ */
+extern unsigned int rxrpc_net_id;
+extern struct pernet_operations rxrpc_net_ops;
+
+static inline struct rxrpc_net *rxrpc_net(struct net *net)
+{
+ return net_generic(net, rxrpc_net_id);
+}
+
+/*
* output.c
*/
int rxrpc_send_ack_packet(struct rxrpc_call *, bool);
diff --git a/net/rxrpc/call_accept.c b/net/rxrpc/call_accept.c
index 1752fcf..dd30d74 100644
--- a/net/rxrpc/call_accept.c
+++ b/net/rxrpc/call_accept.c
@@ -38,6 +38,7 @@
{
const void *here = __builtin_return_address(0);
struct rxrpc_call *call;
+ struct rxrpc_net *rxnet = rxrpc_net(sock_net(&rx->sk));
int max, tmp;
unsigned int size = RXRPC_BACKLOG_MAX;
unsigned int head, tail, call_head, call_tail;
@@ -79,7 +80,7 @@
if (CIRC_CNT(head, tail, size) < max) {
struct rxrpc_connection *conn;
- conn = rxrpc_prealloc_service_connection(gfp);
+ conn = rxrpc_prealloc_service_connection(rxnet, gfp);
if (!conn)
return -ENOMEM;
b->conn_backlog[head] = conn;
@@ -136,9 +137,9 @@
write_unlock(&rx->call_lock);
- write_lock(&rxrpc_call_lock);
- list_add_tail(&call->link, &rxrpc_calls);
- write_unlock(&rxrpc_call_lock);
+ write_lock(&rxnet->call_lock);
+ list_add_tail(&call->link, &rxnet->calls);
+ write_unlock(&rxnet->call_lock);
b->call_backlog[call_head] = call;
smp_store_release(&b->call_backlog_head, (call_head + 1) & (size - 1));
@@ -185,6 +186,7 @@
void rxrpc_discard_prealloc(struct rxrpc_sock *rx)
{
struct rxrpc_backlog *b = rx->backlog;
+ struct rxrpc_net *rxnet = rxrpc_net(sock_net(&rx->sk));
unsigned int size = RXRPC_BACKLOG_MAX, head, tail;
if (!b)
@@ -209,10 +211,10 @@
tail = b->conn_backlog_tail;
while (CIRC_CNT(head, tail, size) > 0) {
struct rxrpc_connection *conn = b->conn_backlog[tail];
- write_lock(&rxrpc_connection_lock);
+ write_lock(&rxnet->conn_lock);
list_del(&conn->link);
list_del(&conn->proc_link);
- write_unlock(&rxrpc_connection_lock);
+ write_unlock(&rxnet->conn_lock);
kfree(conn);
tail = (tail + 1) & (size - 1);
}
@@ -294,7 +296,7 @@
conn->params.local = local;
conn->params.peer = peer;
rxrpc_see_connection(conn);
- rxrpc_new_incoming_connection(conn, skb);
+ rxrpc_new_incoming_connection(rx, conn, skb);
} else {
rxrpc_get_connection(conn);
}
@@ -308,6 +310,7 @@
rxrpc_see_call(call);
call->conn = conn;
call->peer = rxrpc_get_peer(conn->params.peer);
+ call->cong_cwnd = call->peer->cong_cwnd;
return call;
}
@@ -339,7 +342,8 @@
/* Get the socket providing the service */
rx = rcu_dereference(local->service);
- if (rx && service_id == rx->srx.srx_service)
+ if (rx && (service_id == rx->srx.srx_service ||
+ service_id == rx->second_service))
goto found_service;
trace_rxrpc_abort("INV", sp->hdr.cid, sp->hdr.callNumber, sp->hdr.seq,
diff --git a/net/rxrpc/call_object.c b/net/rxrpc/call_object.c
index 47f7f42..d7809a0 100644
--- a/net/rxrpc/call_object.c
+++ b/net/rxrpc/call_object.c
@@ -44,8 +44,6 @@
};
struct kmem_cache *rxrpc_call_jar;
-LIST_HEAD(rxrpc_calls);
-DEFINE_RWLOCK(rxrpc_call_lock);
static void rxrpc_call_timer_expired(unsigned long _call)
{
@@ -129,6 +127,7 @@
rwlock_init(&call->state_lock);
atomic_set(&call->usage, 1);
call->debug_id = atomic_inc_return(&rxrpc_debug_id);
+ call->tx_total_len = -1;
memset(&call->sock_node, 0xed, sizeof(call->sock_node));
@@ -137,12 +136,7 @@
call->tx_winsize = 16;
call->rx_expect_next = 1;
- if (RXRPC_TX_SMSS > 2190)
- call->cong_cwnd = 2;
- else if (RXRPC_TX_SMSS > 1095)
- call->cong_cwnd = 3;
- else
- call->cong_cwnd = 4;
+ call->cong_cwnd = 2;
call->cong_ssthresh = RXRPC_RXTX_BUFF_SIZE - 1;
return call;
@@ -203,10 +197,12 @@
struct rxrpc_conn_parameters *cp,
struct sockaddr_rxrpc *srx,
unsigned long user_call_ID,
+ s64 tx_total_len,
gfp_t gfp)
__releases(&rx->sk.sk_lock.slock)
{
struct rxrpc_call *call, *xcall;
+ struct rxrpc_net *rxnet = rxrpc_net(sock_net(&rx->sk));
struct rb_node *parent, **pp;
const void *here = __builtin_return_address(0);
int ret;
@@ -220,6 +216,7 @@
return call;
}
+ call->tx_total_len = tx_total_len;
trace_rxrpc_call(call, rxrpc_call_new_client, atomic_read(&call->usage),
here, (const void *)user_call_ID);
@@ -255,9 +252,9 @@
write_unlock(&rx->call_lock);
- write_lock(&rxrpc_call_lock);
- list_add_tail(&call->link, &rxrpc_calls);
- write_unlock(&rxrpc_call_lock);
+ write_lock(&rxnet->call_lock);
+ list_add_tail(&call->link, &rxnet->calls);
+ write_unlock(&rxnet->call_lock);
/* From this point on, the call is protected by its own lock. */
release_sock(&rx->sk);
@@ -508,6 +505,7 @@
*/
void rxrpc_put_call(struct rxrpc_call *call, enum rxrpc_call_trace op)
{
+ struct rxrpc_net *rxnet;
const void *here = __builtin_return_address(0);
int n;
@@ -520,9 +518,12 @@
_debug("call %d dead", call->debug_id);
ASSERTCMP(call->state, ==, RXRPC_CALL_COMPLETE);
- write_lock(&rxrpc_call_lock);
- list_del_init(&call->link);
- write_unlock(&rxrpc_call_lock);
+ if (!list_empty(&call->link)) {
+ rxnet = rxrpc_net(sock_net(&call->socket->sk));
+ write_lock(&rxnet->call_lock);
+ list_del_init(&call->link);
+ write_unlock(&rxnet->call_lock);
+ }
rxrpc_cleanup_call(call);
}
@@ -570,21 +571,23 @@
}
/*
- * Make sure that all calls are gone.
+ * Make sure that all calls are gone from a network namespace. To reach this
+ * point, any open UDP sockets in that namespace must have been closed, so any
+ * outstanding calls cannot be doing I/O.
*/
-void __exit rxrpc_destroy_all_calls(void)
+void rxrpc_destroy_all_calls(struct rxrpc_net *rxnet)
{
struct rxrpc_call *call;
_enter("");
- if (list_empty(&rxrpc_calls))
+ if (list_empty(&rxnet->calls))
return;
- write_lock(&rxrpc_call_lock);
+ write_lock(&rxnet->call_lock);
- while (!list_empty(&rxrpc_calls)) {
- call = list_entry(rxrpc_calls.next, struct rxrpc_call, link);
+ while (!list_empty(&rxnet->calls)) {
+ call = list_entry(rxnet->calls.next, struct rxrpc_call, link);
_debug("Zapping call %p", call);
rxrpc_see_call(call);
@@ -595,10 +598,10 @@
rxrpc_call_states[call->state],
call->flags, call->events);
- write_unlock(&rxrpc_call_lock);
+ write_unlock(&rxnet->call_lock);
cond_resched();
- write_lock(&rxrpc_call_lock);
+ write_lock(&rxnet->call_lock);
}
- write_unlock(&rxrpc_call_lock);
+ write_unlock(&rxnet->call_lock);
}
diff --git a/net/rxrpc/conn_client.c b/net/rxrpc/conn_client.c
index e8dea0d..eb21576 100644
--- a/net/rxrpc/conn_client.c
+++ b/net/rxrpc/conn_client.c
@@ -31,22 +31,25 @@
* may freely grant available channels to new calls and calls may be
* waiting on it for channels to become available.
*
- * The connection is on the rxrpc_active_client_conns list which is kept
+ * The connection is on the rxnet->active_client_conns list which is kept
* in activation order for culling purposes.
*
* rxrpc_nr_active_client_conns is held incremented also.
*
- * (4) CULLED - The connection got summarily culled to try and free up
+ * (4) UPGRADE - As for ACTIVE, but only one call may be in progress and is
+ * being used to probe for service upgrade.
+ *
+ * (5) CULLED - The connection got summarily culled to try and free up
* capacity. Calls currently in progress on the connection are allowed to
* continue, but new calls will have to wait. There can be no waiters in
* this state - the conn would have to go to the WAITING state instead.
*
- * (5) IDLE - The connection has no calls in progress upon it and must have
+ * (6) IDLE - The connection has no calls in progress upon it and must have
* been exposed to the world (ie. the EXPOSED flag must be set). When it
* expires, the EXPOSED flag is cleared and the connection transitions to
* the INACTIVE state.
*
- * The connection is on the rxrpc_idle_client_conns list which is kept in
+ * The connection is on the rxnet->idle_client_conns list which is kept in
* order of how soon they'll expire.
*
* There are flags of relevance to the cache:
@@ -85,27 +88,13 @@
__read_mostly unsigned int rxrpc_conn_idle_client_expiry = 2 * 60 * HZ;
__read_mostly unsigned int rxrpc_conn_idle_client_fast_expiry = 2 * HZ;
-static unsigned int rxrpc_nr_client_conns;
-static unsigned int rxrpc_nr_active_client_conns;
-static __read_mostly bool rxrpc_kill_all_client_conns;
-
-static DEFINE_SPINLOCK(rxrpc_client_conn_cache_lock);
-static DEFINE_SPINLOCK(rxrpc_client_conn_discard_mutex);
-static LIST_HEAD(rxrpc_waiting_client_conns);
-static LIST_HEAD(rxrpc_active_client_conns);
-static LIST_HEAD(rxrpc_idle_client_conns);
-
/*
* We use machine-unique IDs for our client connections.
*/
DEFINE_IDR(rxrpc_client_conn_ids);
static DEFINE_SPINLOCK(rxrpc_conn_id_lock);
-static void rxrpc_cull_active_client_conns(void);
-static void rxrpc_discard_expired_client_conns(struct work_struct *);
-
-static DECLARE_DELAYED_WORK(rxrpc_client_conn_reap,
- rxrpc_discard_expired_client_conns);
+static void rxrpc_cull_active_client_conns(struct rxrpc_net *);
/*
* Get a connection ID and epoch for a client connection from the global pool.
@@ -116,6 +105,7 @@
static int rxrpc_get_client_connection_id(struct rxrpc_connection *conn,
gfp_t gfp)
{
+ struct rxrpc_net *rxnet = conn->params.local->rxnet;
int id;
_enter("");
@@ -131,7 +121,7 @@
spin_unlock(&rxrpc_conn_id_lock);
idr_preload_end();
- conn->proto.epoch = rxrpc_epoch;
+ conn->proto.epoch = rxnet->epoch;
conn->proto.cid = id << RXRPC_CIDSHIFT;
set_bit(RXRPC_CONN_HAS_IDR, &conn->flags);
_leave(" [CID %x]", conn->proto.cid);
@@ -183,6 +173,7 @@
rxrpc_alloc_client_connection(struct rxrpc_conn_parameters *cp, gfp_t gfp)
{
struct rxrpc_connection *conn;
+ struct rxrpc_net *rxnet = cp->local->rxnet;
int ret;
_enter("");
@@ -196,10 +187,13 @@
atomic_set(&conn->usage, 1);
if (cp->exclusive)
__set_bit(RXRPC_CONN_DONT_REUSE, &conn->flags);
+ if (cp->upgrade)
+ __set_bit(RXRPC_CONN_PROBING_FOR_UPGRADE, &conn->flags);
conn->params = *cp;
conn->out_clientflag = RXRPC_CLIENT_INITIATED;
conn->state = RXRPC_CONN_CLIENT;
+ conn->service_id = cp->service_id;
ret = rxrpc_get_client_connection_id(conn, gfp);
if (ret < 0)
@@ -213,9 +207,9 @@
if (ret < 0)
goto error_2;
- write_lock(&rxrpc_connection_lock);
- list_add_tail(&conn->proc_link, &rxrpc_connection_proc_list);
- write_unlock(&rxrpc_connection_lock);
+ write_lock(&rxnet->conn_lock);
+ list_add_tail(&conn->proc_link, &rxnet->conn_proc_list);
+ write_unlock(&rxnet->conn_lock);
/* We steal the caller's peer ref. */
cp->peer = NULL;
@@ -243,12 +237,13 @@
*/
static bool rxrpc_may_reuse_conn(struct rxrpc_connection *conn)
{
+ struct rxrpc_net *rxnet = conn->params.local->rxnet;
int id_cursor, id, distance, limit;
if (test_bit(RXRPC_CONN_DONT_REUSE, &conn->flags))
goto dont_reuse;
- if (conn->proto.epoch != rxrpc_epoch)
+ if (conn->proto.epoch != rxnet->epoch)
goto mark_dont_reuse;
/* The IDR tree gets very expensive on memory if the connection IDs are
@@ -297,6 +292,12 @@
if (!cp->peer)
goto error;
+ call->cong_cwnd = cp->peer->cong_cwnd;
+ if (call->cong_cwnd >= call->cong_ssthresh)
+ call->cong_mode = RXRPC_CALL_CONGEST_AVOIDANCE;
+ else
+ call->cong_mode = RXRPC_CALL_SLOW_START;
+
/* If the connection is not meant to be exclusive, search the available
* connections to see if the connection we want to use already exists.
*/
@@ -310,7 +311,8 @@
#define cmp(X) ((long)conn->params.X - (long)cp->X)
diff = (cmp(peer) ?:
cmp(key) ?:
- cmp(security_level));
+ cmp(security_level) ?:
+ cmp(upgrade));
#undef cmp
if (diff < 0) {
p = p->rb_left;
@@ -354,6 +356,7 @@
if (cp->exclusive) {
call->conn = candidate;
call->security_ix = candidate->security_ix;
+ call->service_id = candidate->service_id;
_leave(" = 0 [exclusive %d]", candidate->debug_id);
return 0;
}
@@ -374,7 +377,8 @@
#define cmp(X) ((long)conn->params.X - (long)candidate->params.X)
diff = (cmp(peer) ?:
cmp(key) ?:
- cmp(security_level));
+ cmp(security_level) ?:
+ cmp(upgrade));
#undef cmp
if (diff < 0) {
pp = &(*pp)->rb_left;
@@ -403,6 +407,7 @@
set_bit(RXRPC_CONN_IN_CLIENT_CONNS, &candidate->flags);
call->conn = candidate;
call->security_ix = candidate->security_ix;
+ call->service_id = candidate->service_id;
spin_unlock(&local->client_conns_lock);
_leave(" = 0 [new %d]", candidate->debug_id);
return 0;
@@ -424,6 +429,7 @@
spin_lock(&conn->channel_lock);
call->conn = conn;
call->security_ix = conn->security_ix;
+ call->service_id = conn->service_id;
list_add(&call->chan_wait_link, &conn->waiting_calls);
spin_unlock(&conn->channel_lock);
_leave(" = 0 [extant %d]", conn->debug_id);
@@ -440,12 +446,18 @@
/*
* Activate a connection.
*/
-static void rxrpc_activate_conn(struct rxrpc_connection *conn)
+static void rxrpc_activate_conn(struct rxrpc_net *rxnet,
+ struct rxrpc_connection *conn)
{
- trace_rxrpc_client(conn, -1, rxrpc_client_to_active);
- conn->cache_state = RXRPC_CONN_CLIENT_ACTIVE;
- rxrpc_nr_active_client_conns++;
- list_move_tail(&conn->cache_link, &rxrpc_active_client_conns);
+ if (test_bit(RXRPC_CONN_PROBING_FOR_UPGRADE, &conn->flags)) {
+ trace_rxrpc_client(conn, -1, rxrpc_client_to_upgrade);
+ conn->cache_state = RXRPC_CONN_CLIENT_UPGRADE;
+ } else {
+ trace_rxrpc_client(conn, -1, rxrpc_client_to_active);
+ conn->cache_state = RXRPC_CONN_CLIENT_ACTIVE;
+ }
+ rxnet->nr_active_client_conns++;
+ list_move_tail(&conn->cache_link, &rxnet->active_client_conns);
}
/*
@@ -460,25 +472,28 @@
* channels if it has been culled to make space and then re-requested by a new
* call.
*/
-static void rxrpc_animate_client_conn(struct rxrpc_connection *conn)
+static void rxrpc_animate_client_conn(struct rxrpc_net *rxnet,
+ struct rxrpc_connection *conn)
{
unsigned int nr_conns;
_enter("%d,%d", conn->debug_id, conn->cache_state);
- if (conn->cache_state == RXRPC_CONN_CLIENT_ACTIVE)
+ if (conn->cache_state == RXRPC_CONN_CLIENT_ACTIVE ||
+ conn->cache_state == RXRPC_CONN_CLIENT_UPGRADE)
goto out;
- spin_lock(&rxrpc_client_conn_cache_lock);
+ spin_lock(&rxnet->client_conn_cache_lock);
- nr_conns = rxrpc_nr_client_conns;
+ nr_conns = rxnet->nr_client_conns;
if (!test_and_set_bit(RXRPC_CONN_COUNTED, &conn->flags)) {
trace_rxrpc_client(conn, -1, rxrpc_client_count);
- rxrpc_nr_client_conns = nr_conns + 1;
+ rxnet->nr_client_conns = nr_conns + 1;
}
switch (conn->cache_state) {
case RXRPC_CONN_CLIENT_ACTIVE:
+ case RXRPC_CONN_CLIENT_UPGRADE:
case RXRPC_CONN_CLIENT_WAITING:
break;
@@ -494,21 +509,21 @@
}
out_unlock:
- spin_unlock(&rxrpc_client_conn_cache_lock);
+ spin_unlock(&rxnet->client_conn_cache_lock);
out:
_leave(" [%d]", conn->cache_state);
return;
activate_conn:
_debug("activate");
- rxrpc_activate_conn(conn);
+ rxrpc_activate_conn(rxnet, conn);
goto out_unlock;
wait_for_capacity:
_debug("wait");
trace_rxrpc_client(conn, -1, rxrpc_client_to_waiting);
conn->cache_state = RXRPC_CONN_CLIENT_WAITING;
- list_move_tail(&conn->cache_link, &rxrpc_waiting_client_conns);
+ list_move_tail(&conn->cache_link, &rxnet->waiting_client_conns);
goto out_unlock;
}
@@ -582,6 +597,9 @@
case RXRPC_CONN_CLIENT_ACTIVE:
mask = RXRPC_ACTIVE_CHANS_MASK;
break;
+ case RXRPC_CONN_CLIENT_UPGRADE:
+ mask = 0x01;
+ break;
default:
return;
}
@@ -660,18 +678,19 @@
struct sockaddr_rxrpc *srx,
gfp_t gfp)
{
+ struct rxrpc_net *rxnet = cp->local->rxnet;
int ret;
_enter("{%d,%lx},", call->debug_id, call->user_call_ID);
- rxrpc_discard_expired_client_conns(NULL);
- rxrpc_cull_active_client_conns();
+ rxrpc_discard_expired_client_conns(&rxnet->client_conn_reaper.work);
+ rxrpc_cull_active_client_conns(rxnet);
ret = rxrpc_get_client_conn(call, cp, srx, gfp);
if (ret < 0)
return ret;
- rxrpc_animate_client_conn(call->conn);
+ rxrpc_animate_client_conn(rxnet, call->conn);
rxrpc_activate_channels(call->conn);
ret = rxrpc_wait_for_channel(call, gfp);
@@ -729,6 +748,7 @@
unsigned int channel = call->cid & RXRPC_CHANNELMASK;
struct rxrpc_connection *conn = call->conn;
struct rxrpc_channel *chan = &conn->channels[channel];
+ struct rxrpc_net *rxnet = rxrpc_net(sock_net(&call->socket->sk));
trace_rxrpc_client(conn, channel, rxrpc_client_chan_disconnect);
call->conn = NULL;
@@ -750,7 +770,7 @@
/* We must deactivate or idle the connection if it's now
* waiting for nothing.
*/
- spin_lock(&rxrpc_client_conn_cache_lock);
+ spin_lock(&rxnet->client_conn_cache_lock);
if (conn->cache_state == RXRPC_CONN_CLIENT_WAITING &&
list_empty(&conn->waiting_calls) &&
!conn->active_chans)
@@ -787,14 +807,23 @@
* list. It might even get moved back to the active list whilst we're
* waiting for the lock.
*/
- spin_lock(&rxrpc_client_conn_cache_lock);
+ spin_lock(&rxnet->client_conn_cache_lock);
switch (conn->cache_state) {
+ case RXRPC_CONN_CLIENT_UPGRADE:
+ /* Deal with termination of a service upgrade probe. */
+ if (test_bit(RXRPC_CONN_EXPOSED, &conn->flags)) {
+ clear_bit(RXRPC_CONN_PROBING_FOR_UPGRADE, &conn->flags);
+ trace_rxrpc_client(conn, channel, rxrpc_client_to_active);
+ conn->cache_state = RXRPC_CONN_CLIENT_ACTIVE;
+ rxrpc_activate_channels_locked(conn);
+ }
+ /* fall through */
case RXRPC_CONN_CLIENT_ACTIVE:
if (list_empty(&conn->waiting_calls)) {
rxrpc_deactivate_one_channel(conn, channel);
if (!conn->active_chans) {
- rxrpc_nr_active_client_conns--;
+ rxnet->nr_active_client_conns--;
goto idle_connection;
}
goto out;
@@ -820,7 +849,7 @@
}
out:
- spin_unlock(&rxrpc_client_conn_cache_lock);
+ spin_unlock(&rxnet->client_conn_cache_lock);
out_2:
spin_unlock(&conn->channel_lock);
rxrpc_put_connection(conn);
@@ -835,11 +864,11 @@
trace_rxrpc_client(conn, channel, rxrpc_client_to_idle);
conn->idle_timestamp = jiffies;
conn->cache_state = RXRPC_CONN_CLIENT_IDLE;
- list_move_tail(&conn->cache_link, &rxrpc_idle_client_conns);
- if (rxrpc_idle_client_conns.next == &conn->cache_link &&
- !rxrpc_kill_all_client_conns)
+ list_move_tail(&conn->cache_link, &rxnet->idle_client_conns);
+ if (rxnet->idle_client_conns.next == &conn->cache_link &&
+ !rxnet->kill_all_client_conns)
queue_delayed_work(rxrpc_workqueue,
- &rxrpc_client_conn_reap,
+ &rxnet->client_conn_reaper,
rxrpc_conn_idle_client_expiry);
} else {
trace_rxrpc_client(conn, channel, rxrpc_client_to_inactive);
@@ -857,6 +886,7 @@
{
struct rxrpc_connection *next = NULL;
struct rxrpc_local *local = conn->params.local;
+ struct rxrpc_net *rxnet = local->rxnet;
unsigned int nr_conns;
trace_rxrpc_client(conn, -1, rxrpc_client_cleanup);
@@ -875,18 +905,18 @@
if (test_bit(RXRPC_CONN_COUNTED, &conn->flags)) {
trace_rxrpc_client(conn, -1, rxrpc_client_uncount);
- spin_lock(&rxrpc_client_conn_cache_lock);
- nr_conns = --rxrpc_nr_client_conns;
+ spin_lock(&rxnet->client_conn_cache_lock);
+ nr_conns = --rxnet->nr_client_conns;
if (nr_conns < rxrpc_max_client_connections &&
- !list_empty(&rxrpc_waiting_client_conns)) {
- next = list_entry(rxrpc_waiting_client_conns.next,
+ !list_empty(&rxnet->waiting_client_conns)) {
+ next = list_entry(rxnet->waiting_client_conns.next,
struct rxrpc_connection, cache_link);
rxrpc_get_connection(next);
- rxrpc_activate_conn(next);
+ rxrpc_activate_conn(rxnet, next);
}
- spin_unlock(&rxrpc_client_conn_cache_lock);
+ spin_unlock(&rxnet->client_conn_cache_lock);
}
rxrpc_kill_connection(conn);
@@ -921,10 +951,10 @@
/*
* Kill the longest-active client connections to make room for new ones.
*/
-static void rxrpc_cull_active_client_conns(void)
+static void rxrpc_cull_active_client_conns(struct rxrpc_net *rxnet)
{
struct rxrpc_connection *conn;
- unsigned int nr_conns = rxrpc_nr_client_conns;
+ unsigned int nr_conns = rxnet->nr_client_conns;
unsigned int nr_active, limit;
_enter("");
@@ -936,14 +966,15 @@
}
limit = rxrpc_reap_client_connections;
- spin_lock(&rxrpc_client_conn_cache_lock);
- nr_active = rxrpc_nr_active_client_conns;
+ spin_lock(&rxnet->client_conn_cache_lock);
+ nr_active = rxnet->nr_active_client_conns;
while (nr_active > limit) {
- ASSERT(!list_empty(&rxrpc_active_client_conns));
- conn = list_entry(rxrpc_active_client_conns.next,
+ ASSERT(!list_empty(&rxnet->active_client_conns));
+ conn = list_entry(rxnet->active_client_conns.next,
struct rxrpc_connection, cache_link);
- ASSERTCMP(conn->cache_state, ==, RXRPC_CONN_CLIENT_ACTIVE);
+ ASSERTIFCMP(conn->cache_state != RXRPC_CONN_CLIENT_ACTIVE,
+ conn->cache_state, ==, RXRPC_CONN_CLIENT_UPGRADE);
if (list_empty(&conn->waiting_calls)) {
trace_rxrpc_client(conn, -1, rxrpc_client_to_culled);
@@ -953,14 +984,14 @@
trace_rxrpc_client(conn, -1, rxrpc_client_to_waiting);
conn->cache_state = RXRPC_CONN_CLIENT_WAITING;
list_move_tail(&conn->cache_link,
- &rxrpc_waiting_client_conns);
+ &rxnet->waiting_client_conns);
}
nr_active--;
}
- rxrpc_nr_active_client_conns = nr_active;
- spin_unlock(&rxrpc_client_conn_cache_lock);
+ rxnet->nr_active_client_conns = nr_active;
+ spin_unlock(&rxnet->client_conn_cache_lock);
ASSERTCMP(nr_active, >=, 0);
_leave(" [culled]");
}
@@ -972,22 +1003,25 @@
* This may be called from conn setup or from a work item so cannot be
* considered non-reentrant.
*/
-static void rxrpc_discard_expired_client_conns(struct work_struct *work)
+void rxrpc_discard_expired_client_conns(struct work_struct *work)
{
struct rxrpc_connection *conn;
+ struct rxrpc_net *rxnet =
+ container_of(to_delayed_work(work),
+ struct rxrpc_net, client_conn_reaper);
unsigned long expiry, conn_expires_at, now;
unsigned int nr_conns;
bool did_discard = false;
- _enter("%c", work ? 'w' : 'n');
+ _enter("");
- if (list_empty(&rxrpc_idle_client_conns)) {
+ if (list_empty(&rxnet->idle_client_conns)) {
_leave(" [empty]");
return;
}
/* Don't double up on the discarding */
- if (!spin_trylock(&rxrpc_client_conn_discard_mutex)) {
+ if (!spin_trylock(&rxnet->client_conn_discard_lock)) {
_leave(" [already]");
return;
}
@@ -995,19 +1029,19 @@
/* We keep an estimate of what the number of conns ought to be after
* we've discarded some so that we don't overdo the discarding.
*/
- nr_conns = rxrpc_nr_client_conns;
+ nr_conns = rxnet->nr_client_conns;
next:
- spin_lock(&rxrpc_client_conn_cache_lock);
+ spin_lock(&rxnet->client_conn_cache_lock);
- if (list_empty(&rxrpc_idle_client_conns))
+ if (list_empty(&rxnet->idle_client_conns))
goto out;
- conn = list_entry(rxrpc_idle_client_conns.next,
+ conn = list_entry(rxnet->idle_client_conns.next,
struct rxrpc_connection, cache_link);
ASSERT(test_bit(RXRPC_CONN_EXPOSED, &conn->flags));
- if (!rxrpc_kill_all_client_conns) {
+ if (!rxnet->kill_all_client_conns) {
/* If the number of connections is over the reap limit, we
* expedite discard by reducing the expiry timeout. We must,
* however, have at least a short grace period to be able to do
@@ -1030,7 +1064,7 @@
conn->cache_state = RXRPC_CONN_CLIENT_INACTIVE;
list_del_init(&conn->cache_link);
- spin_unlock(&rxrpc_client_conn_cache_lock);
+ spin_unlock(&rxnet->client_conn_cache_lock);
/* When we cleared the EXPOSED flag, we took on responsibility for the
* reference that that had on the usage count. We deal with that here.
@@ -1050,14 +1084,14 @@
* then things get messier.
*/
_debug("not yet");
- if (!rxrpc_kill_all_client_conns)
+ if (!rxnet->kill_all_client_conns)
queue_delayed_work(rxrpc_workqueue,
- &rxrpc_client_conn_reap,
+ &rxnet->client_conn_reaper,
conn_expires_at - now);
out:
- spin_unlock(&rxrpc_client_conn_cache_lock);
- spin_unlock(&rxrpc_client_conn_discard_mutex);
+ spin_unlock(&rxnet->client_conn_cache_lock);
+ spin_unlock(&rxnet->client_conn_discard_lock);
_leave("");
}
@@ -1065,17 +1099,17 @@
* Preemptively destroy all the client connection records rather than waiting
* for them to time out
*/
-void __exit rxrpc_destroy_all_client_connections(void)
+void rxrpc_destroy_all_client_connections(struct rxrpc_net *rxnet)
{
_enter("");
- spin_lock(&rxrpc_client_conn_cache_lock);
- rxrpc_kill_all_client_conns = true;
- spin_unlock(&rxrpc_client_conn_cache_lock);
+ spin_lock(&rxnet->client_conn_cache_lock);
+ rxnet->kill_all_client_conns = true;
+ spin_unlock(&rxnet->client_conn_cache_lock);
- cancel_delayed_work(&rxrpc_client_conn_reap);
+ cancel_delayed_work(&rxnet->client_conn_reaper);
- if (!queue_delayed_work(rxrpc_workqueue, &rxrpc_client_conn_reap, 0))
+ if (!queue_delayed_work(rxrpc_workqueue, &rxnet->client_conn_reaper, 0))
_debug("destroy: queue failed");
_leave("");
diff --git a/net/rxrpc/conn_event.c b/net/rxrpc/conn_event.c
index 46babcf..59a51a5 100644
--- a/net/rxrpc/conn_event.c
+++ b/net/rxrpc/conn_event.c
@@ -74,7 +74,7 @@
pkt.whdr.userStatus = 0;
pkt.whdr.securityIndex = conn->security_ix;
pkt.whdr._rsvd = 0;
- pkt.whdr.serviceId = htons(chan->last_service_id);
+ pkt.whdr.serviceId = htons(conn->service_id);
len = sizeof(pkt.whdr);
switch (chan->last_type) {
@@ -208,7 +208,7 @@
whdr.userStatus = 0;
whdr.securityIndex = conn->security_ix;
whdr._rsvd = 0;
- whdr.serviceId = htons(conn->params.service_id);
+ whdr.serviceId = htons(conn->service_id);
word = htonl(conn->local_abort);
diff --git a/net/rxrpc/conn_object.c b/net/rxrpc/conn_object.c
index b0ecb77..929b50d 100644
--- a/net/rxrpc/conn_object.c
+++ b/net/rxrpc/conn_object.c
@@ -22,13 +22,6 @@
*/
unsigned int rxrpc_connection_expiry = 10 * 60;
-static void rxrpc_connection_reaper(struct work_struct *work);
-
-LIST_HEAD(rxrpc_connections);
-LIST_HEAD(rxrpc_connection_proc_list);
-DEFINE_RWLOCK(rxrpc_connection_lock);
-static DECLARE_DELAYED_WORK(rxrpc_connection_reap, rxrpc_connection_reaper);
-
static void rxrpc_destroy_connection(struct rcu_head *);
/*
@@ -174,7 +167,6 @@
* through the channel, whilst disposing of the actual call record.
*/
trace_rxrpc_disconnect_call(call);
- chan->last_service_id = call->service_id;
if (call->abort_code) {
chan->last_abort = call->abort_code;
chan->last_type = RXRPC_PACKET_TYPE_ABORT;
@@ -201,6 +193,8 @@
{
struct rxrpc_connection *conn = call->conn;
+ call->peer->cong_cwnd = call->cong_cwnd;
+
spin_lock_bh(&conn->params.peer->lock);
hlist_del_init(&call->error_link);
spin_unlock_bh(&conn->params.peer->lock);
@@ -222,15 +216,17 @@
*/
void rxrpc_kill_connection(struct rxrpc_connection *conn)
{
+ struct rxrpc_net *rxnet = conn->params.local->rxnet;
+
ASSERT(!rcu_access_pointer(conn->channels[0].call) &&
!rcu_access_pointer(conn->channels[1].call) &&
!rcu_access_pointer(conn->channels[2].call) &&
!rcu_access_pointer(conn->channels[3].call));
ASSERT(list_empty(&conn->cache_link));
- write_lock(&rxrpc_connection_lock);
+ write_lock(&rxnet->conn_lock);
list_del_init(&conn->proc_link);
- write_unlock(&rxrpc_connection_lock);
+ write_unlock(&rxnet->conn_lock);
/* Drain the Rx queue. Note that even though we've unpublished, an
* incoming packet could still be being added to our Rx queue, so we
@@ -309,14 +305,17 @@
*/
void rxrpc_put_service_conn(struct rxrpc_connection *conn)
{
+ struct rxrpc_net *rxnet;
const void *here = __builtin_return_address(0);
int n;
n = atomic_dec_return(&conn->usage);
trace_rxrpc_conn(conn, rxrpc_conn_put_service, n, here);
ASSERTCMP(n, >=, 0);
- if (n == 0)
- rxrpc_queue_delayed_work(&rxrpc_connection_reap, 0);
+ if (n == 0) {
+ rxnet = conn->params.local->rxnet;
+ rxrpc_queue_delayed_work(&rxnet->service_conn_reaper, 0);
+ }
}
/*
@@ -348,9 +347,12 @@
/*
* reap dead service connections
*/
-static void rxrpc_connection_reaper(struct work_struct *work)
+void rxrpc_service_connection_reaper(struct work_struct *work)
{
struct rxrpc_connection *conn, *_p;
+ struct rxrpc_net *rxnet =
+ container_of(to_delayed_work(work),
+ struct rxrpc_net, service_conn_reaper);
unsigned long reap_older_than, earliest, idle_timestamp, now;
LIST_HEAD(graveyard);
@@ -361,8 +363,8 @@
reap_older_than = now - rxrpc_connection_expiry * HZ;
earliest = ULONG_MAX;
- write_lock(&rxrpc_connection_lock);
- list_for_each_entry_safe(conn, _p, &rxrpc_connections, link) {
+ write_lock(&rxnet->conn_lock);
+ list_for_each_entry_safe(conn, _p, &rxnet->service_conns, link) {
ASSERTCMP(atomic_read(&conn->usage), >, 0);
if (likely(atomic_read(&conn->usage) > 1))
continue;
@@ -393,12 +395,12 @@
list_move_tail(&conn->link, &graveyard);
}
- write_unlock(&rxrpc_connection_lock);
+ write_unlock(&rxnet->conn_lock);
if (earliest != ULONG_MAX) {
_debug("reschedule reaper %ld", (long) earliest - now);
ASSERT(time_after(earliest, now));
- rxrpc_queue_delayed_work(&rxrpc_connection_reap,
+ rxrpc_queue_delayed_work(&rxnet->client_conn_reaper,
earliest - now);
}
@@ -418,36 +420,30 @@
* preemptively destroy all the service connection records rather than
* waiting for them to time out
*/
-void __exit rxrpc_destroy_all_connections(void)
+void rxrpc_destroy_all_connections(struct rxrpc_net *rxnet)
{
struct rxrpc_connection *conn, *_p;
bool leak = false;
_enter("");
- rxrpc_destroy_all_client_connections();
+ rxrpc_destroy_all_client_connections(rxnet);
rxrpc_connection_expiry = 0;
- cancel_delayed_work(&rxrpc_connection_reap);
- rxrpc_queue_delayed_work(&rxrpc_connection_reap, 0);
+ cancel_delayed_work(&rxnet->client_conn_reaper);
+ rxrpc_queue_delayed_work(&rxnet->client_conn_reaper, 0);
flush_workqueue(rxrpc_workqueue);
- write_lock(&rxrpc_connection_lock);
- list_for_each_entry_safe(conn, _p, &rxrpc_connections, link) {
+ write_lock(&rxnet->conn_lock);
+ list_for_each_entry_safe(conn, _p, &rxnet->service_conns, link) {
pr_err("AF_RXRPC: Leaked conn %p {%d}\n",
conn, atomic_read(&conn->usage));
leak = true;
}
- write_unlock(&rxrpc_connection_lock);
+ write_unlock(&rxnet->conn_lock);
BUG_ON(leak);
- ASSERT(list_empty(&rxrpc_connection_proc_list));
-
- /* Make sure the local and peer records pinned by any dying connections
- * are released.
- */
- rcu_barrier();
- rxrpc_destroy_client_conn_ids();
+ ASSERT(list_empty(&rxnet->conn_proc_list));
_leave("");
}
diff --git a/net/rxrpc/conn_service.c b/net/rxrpc/conn_service.c
index eef551f..e60fcd2 100644
--- a/net/rxrpc/conn_service.c
+++ b/net/rxrpc/conn_service.c
@@ -121,7 +121,8 @@
* Preallocate a service connection. The connection is placed on the proc and
* reap lists so that we don't have to get the lock from BH context.
*/
-struct rxrpc_connection *rxrpc_prealloc_service_connection(gfp_t gfp)
+struct rxrpc_connection *rxrpc_prealloc_service_connection(struct rxrpc_net *rxnet,
+ gfp_t gfp)
{
struct rxrpc_connection *conn = rxrpc_alloc_connection(gfp);
@@ -132,10 +133,10 @@
conn->state = RXRPC_CONN_SERVICE_PREALLOC;
atomic_set(&conn->usage, 2);
- write_lock(&rxrpc_connection_lock);
- list_add_tail(&conn->link, &rxrpc_connections);
- list_add_tail(&conn->proc_link, &rxrpc_connection_proc_list);
- write_unlock(&rxrpc_connection_lock);
+ write_lock(&rxnet->conn_lock);
+ list_add_tail(&conn->link, &rxnet->service_conns);
+ list_add_tail(&conn->proc_link, &rxnet->conn_proc_list);
+ write_unlock(&rxnet->conn_lock);
trace_rxrpc_conn(conn, rxrpc_conn_new_service,
atomic_read(&conn->usage),
@@ -149,7 +150,8 @@
* Set up an incoming connection. This is called in BH context with the RCU
* read lock held.
*/
-void rxrpc_new_incoming_connection(struct rxrpc_connection *conn,
+void rxrpc_new_incoming_connection(struct rxrpc_sock *rx,
+ struct rxrpc_connection *conn,
struct sk_buff *skb)
{
struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
@@ -159,6 +161,7 @@
conn->proto.epoch = sp->hdr.epoch;
conn->proto.cid = sp->hdr.cid & RXRPC_CIDMASK;
conn->params.service_id = sp->hdr.serviceId;
+ conn->service_id = sp->hdr.serviceId;
conn->security_ix = sp->hdr.securityIndex;
conn->out_clientflag = 0;
if (conn->security_ix)
@@ -166,6 +169,14 @@
else
conn->state = RXRPC_CONN_SERVICE;
+ /* See if we should upgrade the service. This can only happen on the
+ * first packet on a new connection. Once done, it applies to all
+ * subsequent calls on that connection.
+ */
+ if (sp->hdr.userStatus == RXRPC_USERSTATUS_SERVICE_UPGRADE &&
+ conn->service_id == rx->service_upgrade.from)
+ conn->service_id = rx->service_upgrade.to;
+
/* Make the connection a target for incoming packets. */
rxrpc_publish_service_conn(conn->params.peer, conn);
diff --git a/net/rxrpc/input.c b/net/rxrpc/input.c
index 45dba73..e56e23e 100644
--- a/net/rxrpc/input.c
+++ b/net/rxrpc/input.c
@@ -1142,6 +1142,13 @@
if (sp->hdr.securityIndex != conn->security_ix)
goto wrong_security;
+ if (sp->hdr.serviceId != conn->service_id) {
+ if (!test_bit(RXRPC_CONN_PROBING_FOR_UPGRADE, &conn->flags) ||
+ conn->service_id != conn->params.service_id)
+ goto reupgrade;
+ conn->service_id = sp->hdr.serviceId;
+ }
+
if (sp->hdr.callNumber == 0) {
/* Connection-level packet */
_debug("CONN %p {%d}", conn, conn->debug_id);
@@ -1194,6 +1201,9 @@
rxrpc_input_implicit_end_call(conn, call);
call = NULL;
}
+
+ if (call && sp->hdr.serviceId != call->service_id)
+ call->service_id = sp->hdr.serviceId;
} else {
skew = 0;
call = NULL;
@@ -1237,11 +1247,18 @@
skb->priority = RXKADINCONSISTENCY;
goto post_abort;
+reupgrade:
+ rcu_read_unlock();
+ trace_rxrpc_abort("UPG", sp->hdr.cid, sp->hdr.callNumber, sp->hdr.seq,
+ RX_PROTOCOL_ERROR, EBADMSG);
+ goto protocol_error;
+
bad_message_unlock:
rcu_read_unlock();
bad_message:
trace_rxrpc_abort("BAD", sp->hdr.cid, sp->hdr.callNumber, sp->hdr.seq,
RX_PROTOCOL_ERROR, EBADMSG);
+protocol_error:
skb->priority = RX_PROTOCOL_ERROR;
post_abort:
skb->mark = RXRPC_SKB_MARK_LOCAL_ABORT;
diff --git a/net/rxrpc/local_object.c b/net/rxrpc/local_object.c
index ff4864d5..38b99db 100644
--- a/net/rxrpc/local_object.c
+++ b/net/rxrpc/local_object.c
@@ -25,9 +25,6 @@
static void rxrpc_local_processor(struct work_struct *);
static void rxrpc_local_rcu(struct rcu_head *);
-static DEFINE_MUTEX(rxrpc_local_mutex);
-static LIST_HEAD(rxrpc_local_endpoints);
-
/*
* Compare a local to an address. Return -ve, 0 or +ve to indicate less than,
* same or greater than.
@@ -77,13 +74,15 @@
/*
* Allocate a new local endpoint.
*/
-static struct rxrpc_local *rxrpc_alloc_local(const struct sockaddr_rxrpc *srx)
+static struct rxrpc_local *rxrpc_alloc_local(struct rxrpc_net *rxnet,
+ const struct sockaddr_rxrpc *srx)
{
struct rxrpc_local *local;
local = kzalloc(sizeof(struct rxrpc_local), GFP_KERNEL);
if (local) {
atomic_set(&local->usage, 1);
+ local->rxnet = rxnet;
INIT_LIST_HEAD(&local->link);
INIT_WORK(&local->processor, rxrpc_local_processor);
init_rwsem(&local->defrag_sem);
@@ -95,6 +94,7 @@
rwlock_init(&local->services_lock);
local->debug_id = atomic_inc_return(&rxrpc_debug_id);
memcpy(&local->srx, srx, sizeof(*srx));
+ local->srx.srx_service = 0;
}
_leave(" = %p", local);
@@ -105,7 +105,7 @@
* create the local socket
* - must be called with rxrpc_local_mutex locked
*/
-static int rxrpc_open_socket(struct rxrpc_local *local)
+static int rxrpc_open_socket(struct rxrpc_local *local, struct net *net)
{
struct sock *sock;
int ret, opt;
@@ -114,7 +114,7 @@
local, local->srx.transport_type, local->srx.transport.family);
/* create a socket to represent the local endpoint */
- ret = sock_create_kern(&init_net, local->srx.transport.family,
+ ret = sock_create_kern(net, local->srx.transport.family,
local->srx.transport_type, 0, &local->socket);
if (ret < 0) {
_leave(" = %d [socket]", ret);
@@ -172,9 +172,11 @@
/*
* Look up or create a new local endpoint using the specified local address.
*/
-struct rxrpc_local *rxrpc_lookup_local(const struct sockaddr_rxrpc *srx)
+struct rxrpc_local *rxrpc_lookup_local(struct net *net,
+ const struct sockaddr_rxrpc *srx)
{
struct rxrpc_local *local;
+ struct rxrpc_net *rxnet = rxrpc_net(net);
struct list_head *cursor;
const char *age;
long diff;
@@ -183,10 +185,10 @@
_enter("{%d,%d,%pISp}",
srx->transport_type, srx->transport.family, &srx->transport);
- mutex_lock(&rxrpc_local_mutex);
+ mutex_lock(&rxnet->local_mutex);
- for (cursor = rxrpc_local_endpoints.next;
- cursor != &rxrpc_local_endpoints;
+ for (cursor = rxnet->local_endpoints.next;
+ cursor != &rxnet->local_endpoints;
cursor = cursor->next) {
local = list_entry(cursor, struct rxrpc_local, link);
@@ -220,11 +222,11 @@
goto found;
}
- local = rxrpc_alloc_local(srx);
+ local = rxrpc_alloc_local(rxnet, srx);
if (!local)
goto nomem;
- ret = rxrpc_open_socket(local);
+ ret = rxrpc_open_socket(local, net);
if (ret < 0)
goto sock_error;
@@ -232,7 +234,7 @@
age = "new";
found:
- mutex_unlock(&rxrpc_local_mutex);
+ mutex_unlock(&rxnet->local_mutex);
_net("LOCAL %s %d {%pISp}",
age, local->debug_id, &local->srx.transport);
@@ -243,13 +245,13 @@
nomem:
ret = -ENOMEM;
sock_error:
- mutex_unlock(&rxrpc_local_mutex);
+ mutex_unlock(&rxnet->local_mutex);
kfree(local);
_leave(" = %d", ret);
return ERR_PTR(ret);
addr_in_use:
- mutex_unlock(&rxrpc_local_mutex);
+ mutex_unlock(&rxnet->local_mutex);
_leave(" = -EADDRINUSE");
return ERR_PTR(-EADDRINUSE);
}
@@ -273,6 +275,7 @@
static void rxrpc_local_destroyer(struct rxrpc_local *local)
{
struct socket *socket = local->socket;
+ struct rxrpc_net *rxnet = local->rxnet;
_enter("%d", local->debug_id);
@@ -286,9 +289,9 @@
}
local->dead = true;
- mutex_lock(&rxrpc_local_mutex);
+ mutex_lock(&rxnet->local_mutex);
list_del_init(&local->link);
- mutex_unlock(&rxrpc_local_mutex);
+ mutex_unlock(&rxnet->local_mutex);
ASSERT(RB_EMPTY_ROOT(&local->client_conns));
ASSERT(!local->service);
@@ -357,7 +360,7 @@
/*
* Verify the local endpoint list is empty by this point.
*/
-void __exit rxrpc_destroy_all_locals(void)
+void rxrpc_destroy_all_locals(struct rxrpc_net *rxnet)
{
struct rxrpc_local *local;
@@ -365,15 +368,13 @@
flush_workqueue(rxrpc_workqueue);
- if (!list_empty(&rxrpc_local_endpoints)) {
- mutex_lock(&rxrpc_local_mutex);
- list_for_each_entry(local, &rxrpc_local_endpoints, link) {
+ if (!list_empty(&rxnet->local_endpoints)) {
+ mutex_lock(&rxnet->local_mutex);
+ list_for_each_entry(local, &rxnet->local_endpoints, link) {
pr_err("AF_RXRPC: Leaked local %p {%d}\n",
local, atomic_read(&local->usage));
}
- mutex_unlock(&rxrpc_local_mutex);
+ mutex_unlock(&rxnet->local_mutex);
BUG();
}
-
- rcu_barrier();
}
diff --git a/net/rxrpc/net_ns.c b/net/rxrpc/net_ns.c
new file mode 100644
index 0000000..7edceb8
--- /dev/null
+++ b/net/rxrpc/net_ns.c
@@ -0,0 +1,84 @@
+/* rxrpc network namespace handling.
+ *
+ * Copyright (C) 2017 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public Licence
+ * as published by the Free Software Foundation; either version
+ * 2 of the Licence, or (at your option) any later version.
+ */
+
+#include <linux/proc_fs.h>
+#include "ar-internal.h"
+
+unsigned int rxrpc_net_id;
+
+/*
+ * Initialise a per-network namespace record.
+ */
+static __net_init int rxrpc_init_net(struct net *net)
+{
+ struct rxrpc_net *rxnet = rxrpc_net(net);
+ int ret;
+
+ get_random_bytes(&rxnet->epoch, sizeof(rxnet->epoch));
+ rxnet->epoch |= RXRPC_RANDOM_EPOCH;
+
+ INIT_LIST_HEAD(&rxnet->calls);
+ rwlock_init(&rxnet->call_lock);
+
+ INIT_LIST_HEAD(&rxnet->conn_proc_list);
+ INIT_LIST_HEAD(&rxnet->service_conns);
+ rwlock_init(&rxnet->conn_lock);
+ INIT_DELAYED_WORK(&rxnet->service_conn_reaper,
+ rxrpc_service_connection_reaper);
+
+ rxnet->nr_client_conns = 0;
+ rxnet->nr_active_client_conns = 0;
+ rxnet->kill_all_client_conns = false;
+ spin_lock_init(&rxnet->client_conn_cache_lock);
+ spin_lock_init(&rxnet->client_conn_discard_lock);
+ INIT_LIST_HEAD(&rxnet->waiting_client_conns);
+ INIT_LIST_HEAD(&rxnet->active_client_conns);
+ INIT_LIST_HEAD(&rxnet->idle_client_conns);
+ INIT_DELAYED_WORK(&rxnet->client_conn_reaper,
+ rxrpc_discard_expired_client_conns);
+
+ INIT_LIST_HEAD(&rxnet->local_endpoints);
+ mutex_init(&rxnet->local_mutex);
+ hash_init(rxnet->peer_hash);
+ spin_lock_init(&rxnet->peer_hash_lock);
+
+ ret = -ENOMEM;
+ rxnet->proc_net = proc_net_mkdir(net, "rxrpc", net->proc_net);
+ if (!rxnet->proc_net)
+ goto err_proc;
+
+ proc_create("calls", 0444, rxnet->proc_net, &rxrpc_call_seq_fops);
+ proc_create("conns", 0444, rxnet->proc_net, &rxrpc_connection_seq_fops);
+ return 0;
+
+err_proc:
+ return ret;
+}
+
+/*
+ * Clean up a per-network namespace record.
+ */
+static __net_exit void rxrpc_exit_net(struct net *net)
+{
+ struct rxrpc_net *rxnet = rxrpc_net(net);
+
+ rxrpc_destroy_all_calls(rxnet);
+ rxrpc_destroy_all_connections(rxnet);
+ rxrpc_destroy_all_locals(rxnet);
+ proc_remove(rxnet->proc_net);
+}
+
+struct pernet_operations rxrpc_net_ops = {
+ .init = rxrpc_init_net,
+ .exit = rxrpc_exit_net,
+ .id = &rxrpc_net_id,
+ .size = sizeof(struct rxrpc_net),
+};
diff --git a/net/rxrpc/output.c b/net/rxrpc/output.c
index 5dab1ff..5bd2d0f 100644
--- a/net/rxrpc/output.c
+++ b/net/rxrpc/output.c
@@ -292,6 +292,10 @@
whdr._rsvd = htons(sp->hdr._rsvd);
whdr.serviceId = htons(call->service_id);
+ if (test_bit(RXRPC_CONN_PROBING_FOR_UPGRADE, &conn->flags) &&
+ sp->hdr.seq == 1)
+ whdr.userStatus = RXRPC_USERSTATUS_SERVICE_UPGRADE;
+
iov[0].iov_base = &whdr;
iov[0].iov_len = sizeof(whdr);
iov[1].iov_base = skb->head;
diff --git a/net/rxrpc/peer_object.c b/net/rxrpc/peer_object.c
index 862eea6..5787f97 100644
--- a/net/rxrpc/peer_object.c
+++ b/net/rxrpc/peer_object.c
@@ -26,9 +26,6 @@
#include <net/ip6_route.h>
#include "ar-internal.h"
-static DEFINE_HASHTABLE(rxrpc_peer_hash, 10);
-static DEFINE_SPINLOCK(rxrpc_peer_hash_lock);
-
/*
* Hash a peer key.
*/
@@ -124,8 +121,9 @@
unsigned long hash_key)
{
struct rxrpc_peer *peer;
+ struct rxrpc_net *rxnet = local->rxnet;
- hash_for_each_possible_rcu(rxrpc_peer_hash, peer, hash_link, hash_key) {
+ hash_for_each_possible_rcu(rxnet->peer_hash, peer, hash_link, hash_key) {
if (rxrpc_peer_cmp_key(peer, local, srx, hash_key) == 0) {
if (atomic_read(&peer->usage) == 0)
return NULL;
@@ -230,6 +228,13 @@
seqlock_init(&peer->service_conn_lock);
spin_lock_init(&peer->lock);
peer->debug_id = atomic_inc_return(&rxrpc_debug_id);
+
+ if (RXRPC_TX_SMSS > 2190)
+ peer->cong_cwnd = 2;
+ else if (RXRPC_TX_SMSS > 1095)
+ peer->cong_cwnd = 3;
+ else
+ peer->cong_cwnd = 4;
}
_leave(" = %p", peer);
@@ -301,13 +306,14 @@
struct rxrpc_peer *prealloc)
{
struct rxrpc_peer *peer;
+ struct rxrpc_net *rxnet = local->rxnet;
unsigned long hash_key;
hash_key = rxrpc_peer_hash_key(local, &prealloc->srx);
prealloc->local = local;
rxrpc_init_peer(prealloc, hash_key);
- spin_lock(&rxrpc_peer_hash_lock);
+ spin_lock(&rxnet->peer_hash_lock);
/* Need to check that we aren't racing with someone else */
peer = __rxrpc_lookup_peer_rcu(local, &prealloc->srx, hash_key);
@@ -315,10 +321,10 @@
peer = NULL;
if (!peer) {
peer = prealloc;
- hash_add_rcu(rxrpc_peer_hash, &peer->hash_link, hash_key);
+ hash_add_rcu(rxnet->peer_hash, &peer->hash_link, hash_key);
}
- spin_unlock(&rxrpc_peer_hash_lock);
+ spin_unlock(&rxnet->peer_hash_lock);
return peer;
}
@@ -329,6 +335,7 @@
struct sockaddr_rxrpc *srx, gfp_t gfp)
{
struct rxrpc_peer *peer, *candidate;
+ struct rxrpc_net *rxnet = local->rxnet;
unsigned long hash_key = rxrpc_peer_hash_key(local, srx);
_enter("{%pISp}", &srx->transport);
@@ -350,17 +357,17 @@
return NULL;
}
- spin_lock_bh(&rxrpc_peer_hash_lock);
+ spin_lock_bh(&rxnet->peer_hash_lock);
/* Need to check that we aren't racing with someone else */
peer = __rxrpc_lookup_peer_rcu(local, srx, hash_key);
if (peer && !rxrpc_get_peer_maybe(peer))
peer = NULL;
if (!peer)
- hash_add_rcu(rxrpc_peer_hash,
+ hash_add_rcu(rxnet->peer_hash,
&candidate->hash_link, hash_key);
- spin_unlock_bh(&rxrpc_peer_hash_lock);
+ spin_unlock_bh(&rxnet->peer_hash_lock);
if (peer)
kfree(candidate);
@@ -379,11 +386,13 @@
*/
void __rxrpc_put_peer(struct rxrpc_peer *peer)
{
+ struct rxrpc_net *rxnet = peer->local->rxnet;
+
ASSERT(hlist_empty(&peer->error_targets));
- spin_lock_bh(&rxrpc_peer_hash_lock);
+ spin_lock_bh(&rxnet->peer_hash_lock);
hash_del_rcu(&peer->hash_link);
- spin_unlock_bh(&rxrpc_peer_hash_lock);
+ spin_unlock_bh(&rxnet->peer_hash_lock);
kfree_rcu(peer, rcu);
}
diff --git a/net/rxrpc/proc.c b/net/rxrpc/proc.c
index b9bcfbf..7421656 100644
--- a/net/rxrpc/proc.c
+++ b/net/rxrpc/proc.c
@@ -30,19 +30,25 @@
*/
static void *rxrpc_call_seq_start(struct seq_file *seq, loff_t *_pos)
{
+ struct rxrpc_net *rxnet = rxrpc_net(seq_file_net(seq));
+
rcu_read_lock();
- read_lock(&rxrpc_call_lock);
- return seq_list_start_head(&rxrpc_calls, *_pos);
+ read_lock(&rxnet->call_lock);
+ return seq_list_start_head(&rxnet->calls, *_pos);
}
static void *rxrpc_call_seq_next(struct seq_file *seq, void *v, loff_t *pos)
{
- return seq_list_next(v, &rxrpc_calls, pos);
+ struct rxrpc_net *rxnet = rxrpc_net(seq_file_net(seq));
+
+ return seq_list_next(v, &rxnet->calls, pos);
}
static void rxrpc_call_seq_stop(struct seq_file *seq, void *v)
{
- read_unlock(&rxrpc_call_lock);
+ struct rxrpc_net *rxnet = rxrpc_net(seq_file_net(seq));
+
+ read_unlock(&rxnet->call_lock);
rcu_read_unlock();
}
@@ -52,10 +58,11 @@
struct rxrpc_sock *rx;
struct rxrpc_peer *peer;
struct rxrpc_call *call;
+ struct rxrpc_net *rxnet = rxrpc_net(seq_file_net(seq));
rxrpc_seq_t tx_hard_ack, rx_hard_ack;
char lbuff[50], rbuff[50];
- if (v == &rxrpc_calls) {
+ if (v == &rxnet->calls) {
seq_puts(seq,
"Proto Local "
" Remote "
@@ -113,7 +120,8 @@
static int rxrpc_call_seq_open(struct inode *inode, struct file *file)
{
- return seq_open(file, &rxrpc_call_seq_ops);
+ return seq_open_net(inode, file, &rxrpc_call_seq_ops,
+ sizeof(struct seq_net_private));
}
const struct file_operations rxrpc_call_seq_fops = {
@@ -129,27 +137,34 @@
*/
static void *rxrpc_connection_seq_start(struct seq_file *seq, loff_t *_pos)
{
- read_lock(&rxrpc_connection_lock);
- return seq_list_start_head(&rxrpc_connection_proc_list, *_pos);
+ struct rxrpc_net *rxnet = rxrpc_net(seq_file_net(seq));
+
+ read_lock(&rxnet->conn_lock);
+ return seq_list_start_head(&rxnet->conn_proc_list, *_pos);
}
static void *rxrpc_connection_seq_next(struct seq_file *seq, void *v,
loff_t *pos)
{
- return seq_list_next(v, &rxrpc_connection_proc_list, pos);
+ struct rxrpc_net *rxnet = rxrpc_net(seq_file_net(seq));
+
+ return seq_list_next(v, &rxnet->conn_proc_list, pos);
}
static void rxrpc_connection_seq_stop(struct seq_file *seq, void *v)
{
- read_unlock(&rxrpc_connection_lock);
+ struct rxrpc_net *rxnet = rxrpc_net(seq_file_net(seq));
+
+ read_unlock(&rxnet->conn_lock);
}
static int rxrpc_connection_seq_show(struct seq_file *seq, void *v)
{
struct rxrpc_connection *conn;
+ struct rxrpc_net *rxnet = rxrpc_net(seq_file_net(seq));
char lbuff[50], rbuff[50];
- if (v == &rxrpc_connection_proc_list) {
+ if (v == &rxnet->conn_proc_list) {
seq_puts(seq,
"Proto Local "
" Remote "
@@ -175,7 +190,7 @@
" %s %08x %08x %08x\n",
lbuff,
rbuff,
- conn->params.service_id,
+ conn->service_id,
conn->proto.cid,
rxrpc_conn_is_service(conn) ? "Svc" : "Clt",
atomic_read(&conn->usage),
@@ -197,7 +212,8 @@
static int rxrpc_connection_seq_open(struct inode *inode, struct file *file)
{
- return seq_open(file, &rxrpc_connection_seq_ops);
+ return seq_open_net(inode, file, &rxrpc_connection_seq_ops,
+ sizeof(struct seq_net_private));
}
const struct file_operations rxrpc_connection_seq_fops = {
diff --git a/net/rxrpc/recvmsg.c b/net/rxrpc/recvmsg.c
index f9caf3b..bdece21 100644
--- a/net/rxrpc/recvmsg.c
+++ b/net/rxrpc/recvmsg.c
@@ -522,8 +522,11 @@
}
if (msg->msg_name) {
- size_t len = sizeof(call->conn->params.peer->srx);
- memcpy(msg->msg_name, &call->conn->params.peer->srx, len);
+ struct sockaddr_rxrpc *srx = msg->msg_name;
+ size_t len = sizeof(call->peer->srx);
+
+ memcpy(msg->msg_name, &call->peer->srx, len);
+ srx->srx_service = call->service_id;
msg->msg_namelen = len;
}
diff --git a/net/rxrpc/rxkad.c b/net/rxrpc/rxkad.c
index 1bb9b2c..46d1a1f 100644
--- a/net/rxrpc/rxkad.c
+++ b/net/rxrpc/rxkad.c
@@ -227,7 +227,9 @@
len &= ~(call->conn->size_align - 1);
sg_init_table(sg, nsg);
- skb_to_sgvec(skb, sg, 0, len);
+ err = skb_to_sgvec(skb, sg, 0, len);
+ if (unlikely(err < 0))
+ goto out;
skcipher_request_set_crypt(req, sg, sg, len, iv.x);
crypto_skcipher_encrypt(req);
@@ -324,7 +326,7 @@
bool aborted;
u32 data_size, buf;
u16 check;
- int nsg;
+ int nsg, ret;
_enter("");
@@ -342,7 +344,9 @@
goto nomem;
sg_init_table(sg, nsg);
- skb_to_sgvec(skb, sg, offset, 8);
+ ret = skb_to_sgvec(skb, sg, offset, 8);
+ if (unlikely(ret < 0))
+ return ret;
/* start the decryption afresh */
memset(&iv, 0, sizeof(iv));
@@ -409,7 +413,7 @@
bool aborted;
u32 data_size, buf;
u16 check;
- int nsg;
+ int nsg, ret;
_enter(",{%d}", skb->len);
@@ -434,7 +438,12 @@
}
sg_init_table(sg, nsg);
- skb_to_sgvec(skb, sg, offset, len);
+ ret = skb_to_sgvec(skb, sg, offset, len);
+ if (unlikely(ret < 0)) {
+ if (sg != _sg)
+ kfree(sg);
+ return ret;
+ }
/* decrypt from the session key */
token = call->conn->params.key->payload.data[0];
@@ -640,7 +649,7 @@
whdr.userStatus = 0;
whdr.securityIndex = conn->security_ix;
whdr._rsvd = 0;
- whdr.serviceId = htons(conn->params.service_id);
+ whdr.serviceId = htons(conn->service_id);
iov[0].iov_base = &whdr;
iov[0].iov_len = sizeof(whdr);
diff --git a/net/rxrpc/security.c b/net/rxrpc/security.c
index 7d921e5..e9f4283 100644
--- a/net/rxrpc/security.c
+++ b/net/rxrpc/security.c
@@ -121,7 +121,7 @@
_enter("");
- sprintf(kdesc, "%u:%u", conn->params.service_id, conn->security_ix);
+ sprintf(kdesc, "%u:%u", conn->service_id, conn->security_ix);
sec = rxrpc_security_lookup(conn->security_ix);
if (!sec) {
@@ -133,7 +133,8 @@
read_lock(&local->services_lock);
rx = rcu_dereference_protected(local->service,
lockdep_is_held(&local->services_lock));
- if (rx && rx->srx.srx_service == conn->params.service_id)
+ if (rx && (rx->srx.srx_service == conn->service_id ||
+ rx->second_service == conn->service_id))
goto found_service;
/* the service appears to have died */
diff --git a/net/rxrpc/sendmsg.c b/net/rxrpc/sendmsg.c
index 96ffa5d..b0d2cda 100644
--- a/net/rxrpc/sendmsg.c
+++ b/net/rxrpc/sendmsg.c
@@ -28,6 +28,15 @@
RXRPC_CMD_REJECT_BUSY, /* [server] reject a call as busy */
};
+struct rxrpc_send_params {
+ s64 tx_total_len; /* Total Tx data length (if send data) */
+ unsigned long user_call_ID; /* User's call ID */
+ u32 abort_code; /* Abort code to Tx (if abort) */
+ enum rxrpc_command command : 8; /* The command to implement */
+ bool exclusive; /* Shared or exclusive call */
+ bool upgrade; /* If the connection is upgradeable */
+};
+
/*
* wait for space to appear in the transmit/ACK window
* - caller holds the socket locked
@@ -199,6 +208,13 @@
more = msg->msg_flags & MSG_MORE;
+ if (call->tx_total_len != -1) {
+ if (len > call->tx_total_len)
+ return -EMSGSIZE;
+ if (!more && len != call->tx_total_len)
+ return -EMSGSIZE;
+ }
+
skb = call->tx_pending;
call->tx_pending = NULL;
rxrpc_see_skb(skb, rxrpc_skb_tx_seen);
@@ -291,6 +307,8 @@
sp->remain -= copy;
skb->mark += copy;
copied += copy;
+ if (call->tx_total_len != -1)
+ call->tx_total_len -= copy;
}
/* check for the far side aborting the call or a network error
@@ -312,7 +330,7 @@
pad &= conn->size_align - 1;
_debug("pad %zu", pad);
if (pad)
- memset(skb_put(skb, pad), 0, pad);
+ skb_put_zero(skb, pad);
}
seq = call->tx_top + 1;
@@ -362,18 +380,12 @@
/*
* extract control messages from the sendmsg() control buffer
*/
-static int rxrpc_sendmsg_cmsg(struct msghdr *msg,
- unsigned long *user_call_ID,
- enum rxrpc_command *command,
- u32 *abort_code,
- bool *_exclusive)
+static int rxrpc_sendmsg_cmsg(struct msghdr *msg, struct rxrpc_send_params *p)
{
struct cmsghdr *cmsg;
bool got_user_ID = false;
int len;
- *command = RXRPC_CMD_SEND_DATA;
-
if (msg->msg_controllen == 0)
return -EINVAL;
@@ -393,42 +405,55 @@
if (msg->msg_flags & MSG_CMSG_COMPAT) {
if (len != sizeof(u32))
return -EINVAL;
- *user_call_ID = *(u32 *) CMSG_DATA(cmsg);
+ p->user_call_ID = *(u32 *)CMSG_DATA(cmsg);
} else {
if (len != sizeof(unsigned long))
return -EINVAL;
- *user_call_ID = *(unsigned long *)
+ p->user_call_ID = *(unsigned long *)
CMSG_DATA(cmsg);
}
- _debug("User Call ID %lx", *user_call_ID);
got_user_ID = true;
break;
case RXRPC_ABORT:
- if (*command != RXRPC_CMD_SEND_DATA)
+ if (p->command != RXRPC_CMD_SEND_DATA)
return -EINVAL;
- *command = RXRPC_CMD_SEND_ABORT;
- if (len != sizeof(*abort_code))
+ p->command = RXRPC_CMD_SEND_ABORT;
+ if (len != sizeof(p->abort_code))
return -EINVAL;
- *abort_code = *(unsigned int *) CMSG_DATA(cmsg);
- _debug("Abort %x", *abort_code);
- if (*abort_code == 0)
+ p->abort_code = *(unsigned int *)CMSG_DATA(cmsg);
+ if (p->abort_code == 0)
return -EINVAL;
break;
case RXRPC_ACCEPT:
- if (*command != RXRPC_CMD_SEND_DATA)
+ if (p->command != RXRPC_CMD_SEND_DATA)
return -EINVAL;
- *command = RXRPC_CMD_ACCEPT;
+ p->command = RXRPC_CMD_ACCEPT;
if (len != 0)
return -EINVAL;
break;
case RXRPC_EXCLUSIVE_CALL:
- *_exclusive = true;
+ p->exclusive = true;
if (len != 0)
return -EINVAL;
break;
+
+ case RXRPC_UPGRADE_SERVICE:
+ p->upgrade = true;
+ if (len != 0)
+ return -EINVAL;
+ break;
+
+ case RXRPC_TX_LENGTH:
+ if (p->tx_total_len != -1 || len != sizeof(__s64))
+ return -EINVAL;
+ p->tx_total_len = *(__s64 *)CMSG_DATA(cmsg);
+ if (p->tx_total_len < 0)
+ return -EINVAL;
+ break;
+
default:
return -EINVAL;
}
@@ -436,6 +461,8 @@
if (!got_user_ID)
return -EINVAL;
+ if (p->tx_total_len != -1 && p->command != RXRPC_CMD_SEND_DATA)
+ return -EINVAL;
_leave(" = 0");
return 0;
}
@@ -447,7 +474,7 @@
*/
static struct rxrpc_call *
rxrpc_new_client_call_for_sendmsg(struct rxrpc_sock *rx, struct msghdr *msg,
- unsigned long user_call_ID, bool exclusive)
+ struct rxrpc_send_params *p)
__releases(&rx->sk.sk_lock.slock)
{
struct rxrpc_conn_parameters cp;
@@ -471,9 +498,11 @@
cp.local = rx->local;
cp.key = rx->key;
cp.security_level = rx->min_sec_level;
- cp.exclusive = rx->exclusive | exclusive;
+ cp.exclusive = rx->exclusive | p->exclusive;
+ cp.upgrade = p->upgrade;
cp.service_id = srx->srx_service;
- call = rxrpc_new_client_call(rx, &cp, srx, user_call_ID, GFP_KERNEL);
+ call = rxrpc_new_client_call(rx, &cp, srx, p->user_call_ID,
+ p->tx_total_len, GFP_KERNEL);
/* The socket is now unlocked */
_leave(" = %p\n", call);
@@ -489,25 +518,29 @@
__releases(&rx->sk.sk_lock.slock)
{
enum rxrpc_call_state state;
- enum rxrpc_command cmd;
struct rxrpc_call *call;
- unsigned long user_call_ID = 0;
- bool exclusive = false;
- u32 abort_code = 0;
int ret;
+ struct rxrpc_send_params p = {
+ .tx_total_len = -1,
+ .user_call_ID = 0,
+ .abort_code = 0,
+ .command = RXRPC_CMD_SEND_DATA,
+ .exclusive = false,
+ .upgrade = true,
+ };
+
_enter("");
- ret = rxrpc_sendmsg_cmsg(msg, &user_call_ID, &cmd, &abort_code,
- &exclusive);
+ ret = rxrpc_sendmsg_cmsg(msg, &p);
if (ret < 0)
goto error_release_sock;
- if (cmd == RXRPC_CMD_ACCEPT) {
+ if (p.command == RXRPC_CMD_ACCEPT) {
ret = -EINVAL;
if (rx->sk.sk_state != RXRPC_SERVER_LISTENING)
goto error_release_sock;
- call = rxrpc_accept_call(rx, user_call_ID, NULL);
+ call = rxrpc_accept_call(rx, p.user_call_ID, NULL);
/* The socket is now unlocked. */
if (IS_ERR(call))
return PTR_ERR(call);
@@ -515,13 +548,12 @@
return 0;
}
- call = rxrpc_find_call_by_user_ID(rx, user_call_ID);
+ call = rxrpc_find_call_by_user_ID(rx, p.user_call_ID);
if (!call) {
ret = -EBADSLT;
- if (cmd != RXRPC_CMD_SEND_DATA)
+ if (p.command != RXRPC_CMD_SEND_DATA)
goto error_release_sock;
- call = rxrpc_new_client_call_for_sendmsg(rx, msg, user_call_ID,
- exclusive);
+ call = rxrpc_new_client_call_for_sendmsg(rx, msg, &p);
/* The socket is now unlocked... */
if (IS_ERR(call))
return PTR_ERR(call);
@@ -545,6 +577,15 @@
ret = -ERESTARTSYS;
goto error_put;
}
+
+ if (p.tx_total_len != -1) {
+ ret = -EINVAL;
+ if (call->tx_total_len != -1 ||
+ call->tx_pending ||
+ call->tx_top != 0)
+ goto error_put;
+ call->tx_total_len = p.tx_total_len;
+ }
}
state = READ_ONCE(call->state);
@@ -554,11 +595,11 @@
if (state >= RXRPC_CALL_COMPLETE) {
/* it's too late for this call */
ret = -ESHUTDOWN;
- } else if (cmd == RXRPC_CMD_SEND_ABORT) {
+ } else if (p.command == RXRPC_CMD_SEND_ABORT) {
ret = 0;
- if (rxrpc_abort_call("CMD", call, 0, abort_code, -ECONNABORTED))
+ if (rxrpc_abort_call("CMD", call, 0, p.abort_code, -ECONNABORTED))
ret = rxrpc_send_abort_packet(call);
- } else if (cmd != RXRPC_CMD_SEND_DATA) {
+ } else if (p.command != RXRPC_CMD_SEND_DATA) {
ret = -EINVAL;
} else if (rxrpc_is_client_call(call) &&
state != RXRPC_CALL_CLIENT_SEND_REQUEST) {
@@ -662,5 +703,24 @@
mutex_unlock(&call->user_mutex);
return aborted;
}
-
EXPORT_SYMBOL(rxrpc_kernel_abort_call);
+
+/**
+ * rxrpc_kernel_set_tx_length - Set the total Tx length on a call
+ * @sock: The socket the call is on
+ * @call: The call to be informed
+ * @tx_total_len: The amount of data to be transmitted for this call
+ *
+ * Allow a kernel service to set the total transmit length on a call. This
+ * allows buffer-to-packet encrypt-and-copy to be performed.
+ *
+ * This function is primarily for use for setting the reply length since the
+ * request length can be set when beginning the call.
+ */
+void rxrpc_kernel_set_tx_length(struct socket *sock, struct rxrpc_call *call,
+ s64 tx_total_len)
+{
+ WARN_ON(call->tx_total_len != -1);
+ call->tx_total_len = tx_total_len;
+}
+EXPORT_SYMBOL(rxrpc_kernel_set_tx_length);
diff --git a/net/sched/Kconfig b/net/sched/Kconfig
index 9fb84f0..e70ed26 100644
--- a/net/sched/Kconfig
+++ b/net/sched/Kconfig
@@ -649,6 +649,7 @@
config NET_CLS_ACT
bool "Actions"
+ select NET_CLS
---help---
Say Y here if you want to use traffic control actions. Actions
get attached to classifiers and are invoked after a successful
diff --git a/net/sched/act_api.c b/net/sched/act_api.c
index a90e8f3..aed6cf2 100644
--- a/net/sched/act_api.c
+++ b/net/sched/act_api.c
@@ -28,6 +28,31 @@
#include <net/act_api.h>
#include <net/netlink.h>
+static int tcf_action_goto_chain_init(struct tc_action *a, struct tcf_proto *tp)
+{
+ u32 chain_index = a->tcfa_action & TC_ACT_EXT_VAL_MASK;
+
+ if (!tp)
+ return -EINVAL;
+ a->goto_chain = tcf_chain_get(tp->chain->block, chain_index, true);
+ if (!a->goto_chain)
+ return -ENOMEM;
+ return 0;
+}
+
+static void tcf_action_goto_chain_fini(struct tc_action *a)
+{
+ tcf_chain_put(a->goto_chain);
+}
+
+static void tcf_action_goto_chain_exec(const struct tc_action *a,
+ struct tcf_result *res)
+{
+ const struct tcf_chain *chain = a->goto_chain;
+
+ res->goto_tp = rcu_dereference_bh(chain->filter_chain);
+}
+
static void free_tcf(struct rcu_head *head)
{
struct tc_action *p = container_of(head, struct tc_action, tcfa_rcu);
@@ -39,6 +64,8 @@
kfree(p->act_cookie->data);
kfree(p->act_cookie);
}
+ if (p->goto_chain)
+ tcf_action_goto_chain_fini(p);
kfree(p);
}
@@ -465,6 +492,8 @@
else /* faulty graph, stop pipeline */
return TC_ACT_OK;
}
+ } else if (TC_ACT_EXT_CMP(ret, TC_ACT_GOTO_CHAIN)) {
+ tcf_action_goto_chain_exec(a, res);
}
if (ret != TC_ACT_PIPE)
@@ -570,9 +599,9 @@
return c;
}
-struct tc_action *tcf_action_init_1(struct net *net, struct nlattr *nla,
- struct nlattr *est, char *name, int ovr,
- int bind)
+struct tc_action *tcf_action_init_1(struct net *net, struct tcf_proto *tp,
+ struct nlattr *nla, struct nlattr *est,
+ char *name, int ovr, int bind)
{
struct tc_action *a;
struct tc_action_ops *a_o;
@@ -657,6 +686,17 @@
if (err != ACT_P_CREATED)
module_put(a_o->owner);
+ if (TC_ACT_EXT_CMP(a->tcfa_action, TC_ACT_GOTO_CHAIN)) {
+ err = tcf_action_goto_chain_init(a, tp);
+ if (err) {
+ LIST_HEAD(actions);
+
+ list_add_tail(&a->list, &actions);
+ tcf_action_destroy(&actions, bind);
+ return ERR_PTR(err);
+ }
+ }
+
return a;
err_mod:
@@ -680,8 +720,9 @@
a->tcfa_refcnt--;
}
-int tcf_action_init(struct net *net, struct nlattr *nla, struct nlattr *est,
- char *name, int ovr, int bind, struct list_head *actions)
+int tcf_action_init(struct net *net, struct tcf_proto *tp, struct nlattr *nla,
+ struct nlattr *est, char *name, int ovr, int bind,
+ struct list_head *actions)
{
struct nlattr *tb[TCA_ACT_MAX_PRIO + 1];
struct tc_action *act;
@@ -693,7 +734,7 @@
return err;
for (i = 1; i <= TCA_ACT_MAX_PRIO && tb[i]; i++) {
- act = tcf_action_init_1(net, tb[i], est, name, ovr, bind);
+ act = tcf_action_init_1(net, tp, tb[i], est, name, ovr, bind);
if (IS_ERR(act)) {
err = PTR_ERR(act);
goto err;
@@ -1020,7 +1061,7 @@
int ret = 0;
LIST_HEAD(actions);
- ret = tcf_action_init(net, nla, NULL, NULL, ovr, 0, &actions);
+ ret = tcf_action_init(net, NULL, nla, NULL, NULL, ovr, 0, &actions);
if (ret)
return ret;
diff --git a/net/sched/act_bpf.c b/net/sched/act_bpf.c
index d33947d..9afe133 100644
--- a/net/sched/act_bpf.c
+++ b/net/sched/act_bpf.c
@@ -123,6 +123,9 @@
nla_put_string(skb, TCA_ACT_BPF_NAME, prog->bpf_name))
return -EMSGSIZE;
+ if (nla_put_u32(skb, TCA_ACT_BPF_ID, prog->filter->aux->id))
+ return -EMSGSIZE;
+
nla = nla_reserve(skb, TCA_ACT_BPF_TAG, sizeof(prog->filter->tag));
if (nla == NULL)
return -EMSGSIZE;
diff --git a/net/sched/act_csum.c b/net/sched/act_csum.c
index ab6fdbd..3317a2f 100644
--- a/net/sched/act_csum.c
+++ b/net/sched/act_csum.c
@@ -350,6 +350,7 @@
sctph->checksum = sctp_compute_cksum(skb,
skb_network_offset(skb) + ihl);
skb->ip_summed = CHECKSUM_NONE;
+ skb->csum_not_inet = 0;
return 1;
}
diff --git a/net/sched/act_tunnel_key.c b/net/sched/act_tunnel_key.c
index b9a2f24..fd7e756 100644
--- a/net/sched/act_tunnel_key.c
+++ b/net/sched/act_tunnel_key.c
@@ -67,6 +67,7 @@
[TCA_TUNNEL_KEY_ENC_IPV6_DST] = { .len = sizeof(struct in6_addr) },
[TCA_TUNNEL_KEY_ENC_KEY_ID] = { .type = NLA_U32 },
[TCA_TUNNEL_KEY_ENC_DST_PORT] = {.type = NLA_U16},
+ [TCA_TUNNEL_KEY_NO_CSUM] = { .type = NLA_U8 },
};
static int tunnel_key_init(struct net *net, struct nlattr *nla,
@@ -83,6 +84,7 @@
bool exists = false;
__be16 dst_port = 0;
__be64 key_id;
+ __be16 flags;
int ret = 0;
int err;
@@ -113,6 +115,11 @@
key_id = key32_to_tunnel_id(nla_get_be32(tb[TCA_TUNNEL_KEY_ENC_KEY_ID]));
+ flags = TUNNEL_KEY | TUNNEL_CSUM;
+ if (tb[TCA_TUNNEL_KEY_NO_CSUM] &&
+ nla_get_u8(tb[TCA_TUNNEL_KEY_NO_CSUM]))
+ flags &= ~TUNNEL_CSUM;
+
if (tb[TCA_TUNNEL_KEY_ENC_DST_PORT])
dst_port = nla_get_be16(tb[TCA_TUNNEL_KEY_ENC_DST_PORT]);
@@ -125,7 +132,7 @@
daddr = nla_get_in_addr(tb[TCA_TUNNEL_KEY_ENC_IPV4_DST]);
metadata = __ip_tun_set_dst(saddr, daddr, 0, 0,
- dst_port, TUNNEL_KEY,
+ dst_port, flags,
key_id, 0);
} else if (tb[TCA_TUNNEL_KEY_ENC_IPV6_SRC] &&
tb[TCA_TUNNEL_KEY_ENC_IPV6_DST]) {
@@ -136,7 +143,7 @@
daddr = nla_get_in6_addr(tb[TCA_TUNNEL_KEY_ENC_IPV6_DST]);
metadata = __ipv6_tun_set_dst(&saddr, &daddr, 0, 0, dst_port,
- 0, TUNNEL_KEY,
+ 0, flags,
key_id, 0);
}
@@ -266,7 +273,9 @@
if (nla_put_be32(skb, TCA_TUNNEL_KEY_ENC_KEY_ID, key_id) ||
tunnel_key_dump_addresses(skb,
¶ms->tcft_enc_metadata->u.tun_info) ||
- nla_put_be16(skb, TCA_TUNNEL_KEY_ENC_DST_PORT, key->tp_dst))
+ nla_put_be16(skb, TCA_TUNNEL_KEY_ENC_DST_PORT, key->tp_dst) ||
+ nla_put_u8(skb, TCA_TUNNEL_KEY_NO_CSUM,
+ !(key->tun_flags & TUNNEL_CSUM)))
goto nla_put_failure;
}
diff --git a/net/sched/cls_api.c b/net/sched/cls_api.c
index 22f88b3..39da0c5 100644
--- a/net/sched/cls_api.c
+++ b/net/sched/cls_api.c
@@ -106,13 +106,12 @@
static void tfilter_notify_chain(struct net *net, struct sk_buff *oskb,
struct nlmsghdr *n,
- struct tcf_proto __rcu **chain, int event)
+ struct tcf_chain *chain, int event)
{
- struct tcf_proto __rcu **it_chain;
struct tcf_proto *tp;
- for (it_chain = chain; (tp = rtnl_dereference(*it_chain)) != NULL;
- it_chain = &tp->next)
+ for (tp = rtnl_dereference(chain->filter_chain);
+ tp; tp = rtnl_dereference(tp->next))
tfilter_notify(net, oskb, n, tp, 0, event, false);
}
@@ -125,11 +124,12 @@
if (tp)
first = tp->prio - 1;
- return first;
+ return TC_H_MAJ(first);
}
static struct tcf_proto *tcf_proto_create(const char *kind, u32 protocol,
- u32 prio, u32 parent, struct Qdisc *q)
+ u32 prio, u32 parent, struct Qdisc *q,
+ struct tcf_chain *chain)
{
struct tcf_proto *tp;
int err;
@@ -165,6 +165,7 @@
tp->prio = prio;
tp->classid = parent;
tp->q = q;
+ tp->chain = chain;
err = tp->ops->init(tp);
if (err) {
@@ -185,16 +186,226 @@
kfree_rcu(tp, rcu);
}
-void tcf_destroy_chain(struct tcf_proto __rcu **fl)
+static struct tcf_chain *tcf_chain_create(struct tcf_block *block,
+ u32 chain_index)
+{
+ struct tcf_chain *chain;
+
+ chain = kzalloc(sizeof(*chain), GFP_KERNEL);
+ if (!chain)
+ return NULL;
+ list_add_tail(&chain->list, &block->chain_list);
+ chain->block = block;
+ chain->index = chain_index;
+ chain->refcnt = 1;
+ return chain;
+}
+
+static void tcf_chain_flush(struct tcf_chain *chain)
{
struct tcf_proto *tp;
- while ((tp = rtnl_dereference(*fl)) != NULL) {
- RCU_INIT_POINTER(*fl, tp->next);
+ if (*chain->p_filter_chain)
+ RCU_INIT_POINTER(*chain->p_filter_chain, NULL);
+ while ((tp = rtnl_dereference(chain->filter_chain)) != NULL) {
+ RCU_INIT_POINTER(chain->filter_chain, tp->next);
tcf_proto_destroy(tp);
}
}
-EXPORT_SYMBOL(tcf_destroy_chain);
+
+static void tcf_chain_destroy(struct tcf_chain *chain)
+{
+ list_del(&chain->list);
+ tcf_chain_flush(chain);
+ kfree(chain);
+}
+
+struct tcf_chain *tcf_chain_get(struct tcf_block *block, u32 chain_index,
+ bool create)
+{
+ struct tcf_chain *chain;
+
+ list_for_each_entry(chain, &block->chain_list, list) {
+ if (chain->index == chain_index) {
+ chain->refcnt++;
+ return chain;
+ }
+ }
+ if (create)
+ return tcf_chain_create(block, chain_index);
+ else
+ return NULL;
+}
+EXPORT_SYMBOL(tcf_chain_get);
+
+void tcf_chain_put(struct tcf_chain *chain)
+{
+ /* Destroy unused chain, with exception of chain 0, which is the
+ * default one and has to be always present.
+ */
+ if (--chain->refcnt == 0 && !chain->filter_chain && chain->index != 0)
+ tcf_chain_destroy(chain);
+}
+EXPORT_SYMBOL(tcf_chain_put);
+
+static void
+tcf_chain_filter_chain_ptr_set(struct tcf_chain *chain,
+ struct tcf_proto __rcu **p_filter_chain)
+{
+ chain->p_filter_chain = p_filter_chain;
+}
+
+int tcf_block_get(struct tcf_block **p_block,
+ struct tcf_proto __rcu **p_filter_chain)
+{
+ struct tcf_block *block = kzalloc(sizeof(*block), GFP_KERNEL);
+ struct tcf_chain *chain;
+ int err;
+
+ if (!block)
+ return -ENOMEM;
+ INIT_LIST_HEAD(&block->chain_list);
+ /* Create chain 0 by default, it has to be always present. */
+ chain = tcf_chain_create(block, 0);
+ if (!chain) {
+ err = -ENOMEM;
+ goto err_chain_create;
+ }
+ tcf_chain_filter_chain_ptr_set(chain, p_filter_chain);
+ *p_block = block;
+ return 0;
+
+err_chain_create:
+ kfree(block);
+ return err;
+}
+EXPORT_SYMBOL(tcf_block_get);
+
+void tcf_block_put(struct tcf_block *block)
+{
+ struct tcf_chain *chain, *tmp;
+
+ if (!block)
+ return;
+
+ list_for_each_entry_safe(chain, tmp, &block->chain_list, list)
+ tcf_chain_destroy(chain);
+ kfree(block);
+}
+EXPORT_SYMBOL(tcf_block_put);
+
+/* Main classifier routine: scans classifier chain attached
+ * to this qdisc, (optionally) tests for protocol and asks
+ * specific classifiers.
+ */
+int tcf_classify(struct sk_buff *skb, const struct tcf_proto *tp,
+ struct tcf_result *res, bool compat_mode)
+{
+ __be16 protocol = tc_skb_protocol(skb);
+#ifdef CONFIG_NET_CLS_ACT
+ const int max_reclassify_loop = 4;
+ const struct tcf_proto *orig_tp = tp;
+ const struct tcf_proto *first_tp;
+ int limit = 0;
+
+reclassify:
+#endif
+ for (; tp; tp = rcu_dereference_bh(tp->next)) {
+ int err;
+
+ if (tp->protocol != protocol &&
+ tp->protocol != htons(ETH_P_ALL))
+ continue;
+
+ err = tp->classify(skb, tp, res);
+#ifdef CONFIG_NET_CLS_ACT
+ if (unlikely(err == TC_ACT_RECLASSIFY && !compat_mode)) {
+ first_tp = orig_tp;
+ goto reset;
+ } else if (unlikely(TC_ACT_EXT_CMP(err, TC_ACT_GOTO_CHAIN))) {
+ first_tp = res->goto_tp;
+ goto reset;
+ }
+#endif
+ if (err >= 0)
+ return err;
+ }
+
+ return TC_ACT_UNSPEC; /* signal: continue lookup */
+#ifdef CONFIG_NET_CLS_ACT
+reset:
+ if (unlikely(limit++ >= max_reclassify_loop)) {
+ net_notice_ratelimited("%s: reclassify loop, rule prio %u, protocol %02x\n",
+ tp->q->ops->id, tp->prio & 0xffff,
+ ntohs(tp->protocol));
+ return TC_ACT_SHOT;
+ }
+
+ tp = first_tp;
+ protocol = tc_skb_protocol(skb);
+ goto reclassify;
+#endif
+}
+EXPORT_SYMBOL(tcf_classify);
+
+struct tcf_chain_info {
+ struct tcf_proto __rcu **pprev;
+ struct tcf_proto __rcu *next;
+};
+
+static struct tcf_proto *tcf_chain_tp_prev(struct tcf_chain_info *chain_info)
+{
+ return rtnl_dereference(*chain_info->pprev);
+}
+
+static void tcf_chain_tp_insert(struct tcf_chain *chain,
+ struct tcf_chain_info *chain_info,
+ struct tcf_proto *tp)
+{
+ if (chain->p_filter_chain &&
+ *chain_info->pprev == chain->filter_chain)
+ rcu_assign_pointer(*chain->p_filter_chain, tp);
+ RCU_INIT_POINTER(tp->next, tcf_chain_tp_prev(chain_info));
+ rcu_assign_pointer(*chain_info->pprev, tp);
+}
+
+static void tcf_chain_tp_remove(struct tcf_chain *chain,
+ struct tcf_chain_info *chain_info,
+ struct tcf_proto *tp)
+{
+ struct tcf_proto *next = rtnl_dereference(chain_info->next);
+
+ if (chain->p_filter_chain && tp == chain->filter_chain)
+ RCU_INIT_POINTER(*chain->p_filter_chain, next);
+ RCU_INIT_POINTER(*chain_info->pprev, next);
+}
+
+static struct tcf_proto *tcf_chain_tp_find(struct tcf_chain *chain,
+ struct tcf_chain_info *chain_info,
+ u32 protocol, u32 prio,
+ bool prio_allocate)
+{
+ struct tcf_proto **pprev;
+ struct tcf_proto *tp;
+
+ /* Check the chain for existence of proto-tcf with this priority */
+ for (pprev = &chain->filter_chain;
+ (tp = rtnl_dereference(*pprev)); pprev = &tp->next) {
+ if (tp->prio >= prio) {
+ if (tp->prio == prio) {
+ if (prio_allocate ||
+ (tp->protocol != protocol && protocol))
+ return ERR_PTR(-EINVAL);
+ } else {
+ tp = NULL;
+ }
+ break;
+ }
+ }
+ chain_info->pprev = pprev;
+ chain_info->next = tp ? tp->next : NULL;
+ return tp;
+}
/* Add/change/delete/get a filter node */
@@ -206,13 +417,14 @@
struct tcmsg *t;
u32 protocol;
u32 prio;
- u32 nprio;
+ bool prio_allocate;
u32 parent;
+ u32 chain_index;
struct net_device *dev;
struct Qdisc *q;
- struct tcf_proto __rcu **back;
- struct tcf_proto __rcu **chain;
- struct tcf_proto *next;
+ struct tcf_chain_info chain_info;
+ struct tcf_chain *chain = NULL;
+ struct tcf_block *block;
struct tcf_proto *tp;
const struct Qdisc_class_ops *cops;
unsigned long cl;
@@ -234,7 +446,7 @@
t = nlmsg_data(n);
protocol = TC_H_MIN(t->tcm_info);
prio = TC_H_MAJ(t->tcm_info);
- nprio = prio;
+ prio_allocate = false;
parent = t->tcm_parent;
cl = 0;
@@ -250,6 +462,7 @@
*/
if (n->nlmsg_flags & NLM_F_CREATE) {
prio = TC_H_MAKE(0x80000000U, 0U);
+ prio_allocate = true;
break;
}
/* fall-through */
@@ -280,7 +493,7 @@
if (!cops)
return -EINVAL;
- if (cops->tcf_chain == NULL)
+ if (!cops->tcf_block)
return -EOPNOTSUPP;
/* Do we search for filter, attached to class? */
@@ -291,34 +504,36 @@
}
/* And the last stroke */
- chain = cops->tcf_chain(q, cl);
- if (chain == NULL) {
+ block = cops->tcf_block(q, cl);
+ if (!block) {
err = -EINVAL;
goto errout;
}
+
+ chain_index = tca[TCA_CHAIN] ? nla_get_u32(tca[TCA_CHAIN]) : 0;
+ if (chain_index > TC_ACT_EXT_VAL_MASK) {
+ err = -EINVAL;
+ goto errout;
+ }
+ chain = tcf_chain_get(block, chain_index,
+ n->nlmsg_type == RTM_NEWTFILTER);
+ if (!chain) {
+ err = n->nlmsg_type == RTM_NEWTFILTER ? -ENOMEM : -EINVAL;
+ goto errout;
+ }
+
if (n->nlmsg_type == RTM_DELTFILTER && prio == 0) {
tfilter_notify_chain(net, skb, n, chain, RTM_DELTFILTER);
- tcf_destroy_chain(chain);
+ tcf_chain_flush(chain);
err = 0;
goto errout;
}
- /* Check the chain for existence of proto-tcf with this priority */
- for (back = chain;
- (tp = rtnl_dereference(*back)) != NULL;
- back = &tp->next) {
- if (tp->prio >= prio) {
- if (tp->prio == prio) {
- if (!nprio ||
- (tp->protocol != protocol && protocol)) {
- err = -EINVAL;
- goto errout;
- }
- } else {
- tp = NULL;
- }
- break;
- }
+ tp = tcf_chain_tp_find(chain, &chain_info, protocol,
+ prio, prio_allocate);
+ if (IS_ERR(tp)) {
+ err = PTR_ERR(tp);
+ goto errout;
}
if (tp == NULL) {
@@ -335,11 +550,11 @@
goto errout;
}
- if (!nprio)
- nprio = TC_H_MAJ(tcf_auto_prio(rtnl_dereference(*back)));
+ if (prio_allocate)
+ prio = tcf_auto_prio(tcf_chain_tp_prev(&chain_info));
tp = tcf_proto_create(nla_data(tca[TCA_KIND]),
- protocol, nprio, parent, q);
+ protocol, prio, parent, q, chain);
if (IS_ERR(tp)) {
err = PTR_ERR(tp);
goto errout;
@@ -354,8 +569,7 @@
if (fh == 0) {
if (n->nlmsg_type == RTM_DELTFILTER && t->tcm_handle == 0) {
- next = rtnl_dereference(tp->next);
- RCU_INIT_POINTER(*back, next);
+ tcf_chain_tp_remove(chain, &chain_info, tp);
tfilter_notify(net, skb, n, tp, fh,
RTM_DELTFILTER, false);
tcf_proto_destroy(tp);
@@ -384,11 +598,10 @@
err = tp->ops->delete(tp, fh, &last);
if (err)
goto errout;
- next = rtnl_dereference(tp->next);
tfilter_notify(net, skb, n, tp, t->tcm_handle,
RTM_DELTFILTER, false);
if (last) {
- RCU_INIT_POINTER(*back, next);
+ tcf_chain_tp_remove(chain, &chain_info, tp);
tcf_proto_destroy(tp);
}
goto errout;
@@ -405,10 +618,8 @@
err = tp->ops->change(net, skb, tp, cl, t->tcm_handle, tca, &fh,
n->nlmsg_flags & NLM_F_CREATE ? TCA_ACT_NOREPLACE : TCA_ACT_REPLACE);
if (err == 0) {
- if (tp_created) {
- RCU_INIT_POINTER(tp->next, rtnl_dereference(*back));
- rcu_assign_pointer(*back, tp);
- }
+ if (tp_created)
+ tcf_chain_tp_insert(chain, &chain_info, tp);
tfilter_notify(net, skb, n, tp, fh, RTM_NEWTFILTER, false);
} else {
if (tp_created)
@@ -416,6 +627,8 @@
}
errout:
+ if (chain)
+ tcf_chain_put(chain);
if (cl)
cops->put(q, cl);
if (err == -EAGAIN)
@@ -444,6 +657,8 @@
tcm->tcm_info = TC_H_MAKE(tp->prio, tp->protocol);
if (nla_put_string(skb, TCA_KIND, tp->ops->kind))
goto nla_put_failure;
+ if (nla_put_u32(skb, TCA_CHAIN, tp->chain->index))
+ goto nla_put_failure;
tcm->tcm_handle = fh;
if (RTM_DELTFILTER != event) {
tcm->tcm_handle = 0;
@@ -500,22 +715,76 @@
RTM_NEWTFILTER);
}
+static bool tcf_chain_dump(struct tcf_chain *chain, struct sk_buff *skb,
+ struct netlink_callback *cb,
+ long index_start, long *p_index)
+{
+ struct net *net = sock_net(skb->sk);
+ struct tcmsg *tcm = nlmsg_data(cb->nlh);
+ struct tcf_dump_args arg;
+ struct tcf_proto *tp;
+
+ for (tp = rtnl_dereference(chain->filter_chain);
+ tp; tp = rtnl_dereference(tp->next), (*p_index)++) {
+ if (*p_index < index_start)
+ continue;
+ if (TC_H_MAJ(tcm->tcm_info) &&
+ TC_H_MAJ(tcm->tcm_info) != tp->prio)
+ continue;
+ if (TC_H_MIN(tcm->tcm_info) &&
+ TC_H_MIN(tcm->tcm_info) != tp->protocol)
+ continue;
+ if (*p_index > index_start)
+ memset(&cb->args[1], 0,
+ sizeof(cb->args) - sizeof(cb->args[0]));
+ if (cb->args[1] == 0) {
+ if (tcf_fill_node(net, skb, tp, 0,
+ NETLINK_CB(cb->skb).portid,
+ cb->nlh->nlmsg_seq, NLM_F_MULTI,
+ RTM_NEWTFILTER) <= 0)
+ return false;
+
+ cb->args[1] = 1;
+ }
+ if (!tp->ops->walk)
+ continue;
+ arg.w.fn = tcf_node_dump;
+ arg.skb = skb;
+ arg.cb = cb;
+ arg.w.stop = 0;
+ arg.w.skip = cb->args[1] - 1;
+ arg.w.count = 0;
+ tp->ops->walk(tp, &arg.w);
+ cb->args[1] = arg.w.count + 1;
+ if (arg.w.stop)
+ return false;
+ }
+ return true;
+}
+
/* called with RTNL */
static int tc_dump_tfilter(struct sk_buff *skb, struct netlink_callback *cb)
{
struct net *net = sock_net(skb->sk);
- int t;
- int s_t;
+ struct nlattr *tca[TCA_MAX + 1];
struct net_device *dev;
struct Qdisc *q;
- struct tcf_proto *tp, __rcu **chain;
+ struct tcf_block *block;
+ struct tcf_chain *chain;
struct tcmsg *tcm = nlmsg_data(cb->nlh);
unsigned long cl = 0;
const struct Qdisc_class_ops *cops;
- struct tcf_dump_args arg;
+ long index_start;
+ long index;
+ int err;
if (nlmsg_len(cb->nlh) < sizeof(*tcm))
return skb->len;
+
+ err = nlmsg_parse(cb->nlh, sizeof(*tcm), tca, TCA_MAX, NULL, NULL);
+ if (err)
+ return err;
+
dev = __dev_get_by_index(net, tcm->tcm_ifindex);
if (!dev)
return skb->len;
@@ -529,56 +798,29 @@
cops = q->ops->cl_ops;
if (!cops)
goto errout;
- if (cops->tcf_chain == NULL)
+ if (!cops->tcf_block)
goto errout;
if (TC_H_MIN(tcm->tcm_parent)) {
cl = cops->get(q, tcm->tcm_parent);
if (cl == 0)
goto errout;
}
- chain = cops->tcf_chain(q, cl);
- if (chain == NULL)
+ block = cops->tcf_block(q, cl);
+ if (!block)
goto errout;
- s_t = cb->args[0];
+ index_start = cb->args[0];
+ index = 0;
- for (tp = rtnl_dereference(*chain), t = 0;
- tp; tp = rtnl_dereference(tp->next), t++) {
- if (t < s_t)
+ list_for_each_entry(chain, &block->chain_list, list) {
+ if (tca[TCA_CHAIN] &&
+ nla_get_u32(tca[TCA_CHAIN]) != chain->index)
continue;
- if (TC_H_MAJ(tcm->tcm_info) &&
- TC_H_MAJ(tcm->tcm_info) != tp->prio)
- continue;
- if (TC_H_MIN(tcm->tcm_info) &&
- TC_H_MIN(tcm->tcm_info) != tp->protocol)
- continue;
- if (t > s_t)
- memset(&cb->args[1], 0,
- sizeof(cb->args)-sizeof(cb->args[0]));
- if (cb->args[1] == 0) {
- if (tcf_fill_node(net, skb, tp, 0,
- NETLINK_CB(cb->skb).portid,
- cb->nlh->nlmsg_seq, NLM_F_MULTI,
- RTM_NEWTFILTER) <= 0)
- break;
-
- cb->args[1] = 1;
- }
- if (tp->ops->walk == NULL)
- continue;
- arg.w.fn = tcf_node_dump;
- arg.skb = skb;
- arg.cb = cb;
- arg.w.stop = 0;
- arg.w.skip = cb->args[1] - 1;
- arg.w.count = 0;
- tp->ops->walk(tp, &arg.w);
- cb->args[1] = arg.w.count + 1;
- if (arg.w.stop)
+ if (!tcf_chain_dump(chain, skb, cb, index_start, &index))
break;
}
- cb->args[0] = t;
+ cb->args[0] = index;
errout:
if (cl)
@@ -608,8 +850,9 @@
struct tc_action *act;
if (exts->police && tb[exts->police]) {
- act = tcf_action_init_1(net, tb[exts->police], rate_tlv,
- "police", ovr, TCA_ACT_BIND);
+ act = tcf_action_init_1(net, tp, tb[exts->police],
+ rate_tlv, "police", ovr,
+ TCA_ACT_BIND);
if (IS_ERR(act))
return PTR_ERR(act);
@@ -620,8 +863,8 @@
LIST_HEAD(actions);
int err, i = 0;
- err = tcf_action_init(net, tb[exts->action], rate_tlv,
- NULL, ovr, TCA_ACT_BIND,
+ err = tcf_action_init(net, tp, tb[exts->action],
+ rate_tlv, NULL, ovr, TCA_ACT_BIND,
&actions);
if (err)
return err;
diff --git a/net/sched/cls_bpf.c b/net/sched/cls_bpf.c
index 5ebeae9..f57bd53 100644
--- a/net/sched/cls_bpf.c
+++ b/net/sched/cls_bpf.c
@@ -70,6 +70,7 @@
case TC_ACT_OK:
case TC_ACT_SHOT:
case TC_ACT_STOLEN:
+ case TC_ACT_TRAP:
case TC_ACT_REDIRECT:
case TC_ACT_UNSPEC:
return code;
@@ -161,6 +162,7 @@
bpf_offload.gen_flags = prog->gen_flags;
err = dev->netdev_ops->ndo_setup_tc(dev, tp->q->handle,
+ tp->chain->index,
tp->protocol, &offload);
if (!err && (cmd == TC_CLSBPF_ADD || cmd == TC_CLSBPF_REPLACE))
@@ -564,6 +566,9 @@
nla_put_string(skb, TCA_BPF_NAME, prog->bpf_name))
return -EMSGSIZE;
+ if (nla_put_u32(skb, TCA_BPF_ID, prog->filter->aux->id))
+ return -EMSGSIZE;
+
nla = nla_reserve(skb, TCA_BPF_TAG, sizeof(prog->filter->tag));
if (nla == NULL)
return -EMSGSIZE;
diff --git a/net/sched/cls_flower.c b/net/sched/cls_flower.c
index ca526c0..7832eb9 100644
--- a/net/sched/cls_flower.c
+++ b/net/sched/cls_flower.c
@@ -49,6 +49,8 @@
};
struct flow_dissector_key_ports enc_tp;
struct flow_dissector_key_mpls mpls;
+ struct flow_dissector_key_tcp tcp;
+ struct flow_dissector_key_ip ip;
} __aligned(BITS_PER_LONG / 8); /* Ensure that we can do comparisons as longs. */
struct fl_flow_mask_range {
@@ -237,7 +239,8 @@
tc->type = TC_SETUP_CLSFLOWER;
tc->cls_flower = &offload;
- dev->netdev_ops->ndo_setup_tc(dev, tp->q->handle, tp->protocol, tc);
+ dev->netdev_ops->ndo_setup_tc(dev, tp->q->handle, tp->chain->index,
+ tp->protocol, tc);
}
static int fl_hw_replace_filter(struct tcf_proto *tp,
@@ -273,8 +276,8 @@
tc->type = TC_SETUP_CLSFLOWER;
tc->cls_flower = &offload;
- err = dev->netdev_ops->ndo_setup_tc(dev, tp->q->handle, tp->protocol,
- tc);
+ err = dev->netdev_ops->ndo_setup_tc(dev, tp->q->handle,
+ tp->chain->index, tp->protocol, tc);
if (!err)
f->flags |= TCA_CLS_FLAGS_IN_HW;
@@ -300,7 +303,8 @@
tc->type = TC_SETUP_CLSFLOWER;
tc->cls_flower = &offload;
- dev->netdev_ops->ndo_setup_tc(dev, tp->q->handle, tp->protocol, tc);
+ dev->netdev_ops->ndo_setup_tc(dev, tp->q->handle,
+ tp->chain->index, tp->protocol, tc);
}
static void __fl_delete(struct tcf_proto *tp, struct cls_fl_filter *f)
@@ -424,6 +428,12 @@
[TCA_FLOWER_KEY_MPLS_BOS] = { .type = NLA_U8 },
[TCA_FLOWER_KEY_MPLS_TC] = { .type = NLA_U8 },
[TCA_FLOWER_KEY_MPLS_LABEL] = { .type = NLA_U32 },
+ [TCA_FLOWER_KEY_TCP_FLAGS] = { .type = NLA_U16 },
+ [TCA_FLOWER_KEY_TCP_FLAGS_MASK] = { .type = NLA_U16 },
+ [TCA_FLOWER_KEY_IP_TOS] = { .type = NLA_U8 },
+ [TCA_FLOWER_KEY_IP_TOS_MASK] = { .type = NLA_U8 },
+ [TCA_FLOWER_KEY_IP_TTL] = { .type = NLA_U8 },
+ [TCA_FLOWER_KEY_IP_TTL_MASK] = { .type = NLA_U8 },
};
static void fl_set_key_val(struct nlattr **tb,
@@ -525,6 +535,19 @@
return 0;
}
+static void fl_set_key_ip(struct nlattr **tb,
+ struct flow_dissector_key_ip *key,
+ struct flow_dissector_key_ip *mask)
+{
+ fl_set_key_val(tb, &key->tos, TCA_FLOWER_KEY_IP_TOS,
+ &mask->tos, TCA_FLOWER_KEY_IP_TOS_MASK,
+ sizeof(key->tos));
+
+ fl_set_key_val(tb, &key->ttl, TCA_FLOWER_KEY_IP_TTL,
+ &mask->ttl, TCA_FLOWER_KEY_IP_TTL_MASK,
+ sizeof(key->ttl));
+}
+
static int fl_set_key(struct net *net, struct nlattr **tb,
struct fl_flow_key *key, struct fl_flow_key *mask)
{
@@ -567,6 +590,7 @@
fl_set_key_val(tb, &key->basic.ip_proto, TCA_FLOWER_KEY_IP_PROTO,
&mask->basic.ip_proto, TCA_FLOWER_UNSPEC,
sizeof(key->basic.ip_proto));
+ fl_set_key_ip(tb, &key->ip, &mask->ip);
}
if (tb[TCA_FLOWER_KEY_IPV4_SRC] || tb[TCA_FLOWER_KEY_IPV4_DST]) {
@@ -596,6 +620,9 @@
fl_set_key_val(tb, &key->tp.dst, TCA_FLOWER_KEY_TCP_DST,
&mask->tp.dst, TCA_FLOWER_KEY_TCP_DST_MASK,
sizeof(key->tp.dst));
+ fl_set_key_val(tb, &key->tcp.flags, TCA_FLOWER_KEY_TCP_FLAGS,
+ &mask->tcp.flags, TCA_FLOWER_KEY_TCP_FLAGS_MASK,
+ sizeof(key->tcp.flags));
} else if (key->basic.ip_proto == IPPROTO_UDP) {
fl_set_key_val(tb, &key->tp.src, TCA_FLOWER_KEY_UDP_SRC,
&mask->tp.src, TCA_FLOWER_KEY_UDP_SRC_MASK,
@@ -767,6 +794,10 @@
FL_KEY_SET_IF_MASKED(&mask->key, keys, cnt,
FLOW_DISSECTOR_KEY_PORTS, tp);
FL_KEY_SET_IF_MASKED(&mask->key, keys, cnt,
+ FLOW_DISSECTOR_KEY_IP, ip);
+ FL_KEY_SET_IF_MASKED(&mask->key, keys, cnt,
+ FLOW_DISSECTOR_KEY_TCP, tcp);
+ FL_KEY_SET_IF_MASKED(&mask->key, keys, cnt,
FLOW_DISSECTOR_KEY_ICMP, icmp);
FL_KEY_SET_IF_MASKED(&mask->key, keys, cnt,
FLOW_DISSECTOR_KEY_ARP, arp);
@@ -1074,6 +1105,19 @@
return 0;
}
+static int fl_dump_key_ip(struct sk_buff *skb,
+ struct flow_dissector_key_ip *key,
+ struct flow_dissector_key_ip *mask)
+{
+ if (fl_dump_key_val(skb, &key->tos, TCA_FLOWER_KEY_IP_TOS, &mask->tos,
+ TCA_FLOWER_KEY_IP_TOS_MASK, sizeof(key->tos)) ||
+ fl_dump_key_val(skb, &key->ttl, TCA_FLOWER_KEY_IP_TTL, &mask->ttl,
+ TCA_FLOWER_KEY_IP_TTL_MASK, sizeof(key->ttl)))
+ return -1;
+
+ return 0;
+}
+
static int fl_dump_key_vlan(struct sk_buff *skb,
struct flow_dissector_key_vlan *vlan_key,
struct flow_dissector_key_vlan *vlan_mask)
@@ -1187,9 +1231,10 @@
if ((key->basic.n_proto == htons(ETH_P_IP) ||
key->basic.n_proto == htons(ETH_P_IPV6)) &&
- fl_dump_key_val(skb, &key->basic.ip_proto, TCA_FLOWER_KEY_IP_PROTO,
+ (fl_dump_key_val(skb, &key->basic.ip_proto, TCA_FLOWER_KEY_IP_PROTO,
&mask->basic.ip_proto, TCA_FLOWER_UNSPEC,
- sizeof(key->basic.ip_proto)))
+ sizeof(key->basic.ip_proto)) ||
+ fl_dump_key_ip(skb, &key->ip, &mask->ip)))
goto nla_put_failure;
if (key->control.addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS &&
@@ -1215,7 +1260,10 @@
sizeof(key->tp.src)) ||
fl_dump_key_val(skb, &key->tp.dst, TCA_FLOWER_KEY_TCP_DST,
&mask->tp.dst, TCA_FLOWER_KEY_TCP_DST_MASK,
- sizeof(key->tp.dst))))
+ sizeof(key->tp.dst)) ||
+ fl_dump_key_val(skb, &key->tcp.flags, TCA_FLOWER_KEY_TCP_FLAGS,
+ &mask->tcp.flags, TCA_FLOWER_KEY_TCP_FLAGS_MASK,
+ sizeof(key->tcp.flags))))
goto nla_put_failure;
else if (key->basic.ip_proto == IPPROTO_UDP &&
(fl_dump_key_val(skb, &key->tp.src, TCA_FLOWER_KEY_UDP_SRC,
diff --git a/net/sched/cls_matchall.c b/net/sched/cls_matchall.c
index 51859b8..9dc26c3 100644
--- a/net/sched/cls_matchall.c
+++ b/net/sched/cls_matchall.c
@@ -64,8 +64,9 @@
offload.cls_mall->exts = &head->exts;
offload.cls_mall->cookie = cookie;
- err = dev->netdev_ops->ndo_setup_tc(dev, tp->q->handle, tp->protocol,
- &offload);
+ err = dev->netdev_ops->ndo_setup_tc(dev, tp->q->handle,
+ tp->chain->index,
+ tp->protocol, &offload);
if (!err)
head->flags |= TCA_CLS_FLAGS_IN_HW;
@@ -86,8 +87,8 @@
offload.cls_mall->exts = NULL;
offload.cls_mall->cookie = cookie;
- dev->netdev_ops->ndo_setup_tc(dev, tp->q->handle, tp->protocol,
- &offload);
+ dev->netdev_ops->ndo_setup_tc(dev, tp->q->handle, tp->chain->index,
+ tp->protocol, &offload);
}
static void mall_destroy(struct tcf_proto *tp)
diff --git a/net/sched/cls_u32.c b/net/sched/cls_u32.c
index d20e72a..2d01195 100644
--- a/net/sched/cls_u32.c
+++ b/net/sched/cls_u32.c
@@ -441,7 +441,8 @@
offload.cls_u32->command = TC_CLSU32_DELETE_KNODE;
offload.cls_u32->knode.handle = handle;
dev->netdev_ops->ndo_setup_tc(dev, tp->q->handle,
- tp->protocol, &offload);
+ tp->chain->index, tp->protocol,
+ &offload);
}
}
@@ -465,7 +466,8 @@
offload.cls_u32->hnode.prio = h->prio;
err = dev->netdev_ops->ndo_setup_tc(dev, tp->q->handle,
- tp->protocol, &offload);
+ tp->chain->index, tp->protocol,
+ &offload);
if (tc_skip_sw(flags))
return err;
@@ -488,7 +490,8 @@
offload.cls_u32->hnode.prio = h->prio;
dev->netdev_ops->ndo_setup_tc(dev, tp->q->handle,
- tp->protocol, &offload);
+ tp->chain->index, tp->protocol,
+ &offload);
}
}
@@ -522,7 +525,8 @@
offload.cls_u32->knode.link_handle = n->ht_down->handle;
err = dev->netdev_ops->ndo_setup_tc(dev, tp->q->handle,
- tp->protocol, &offload);
+ tp->chain->index, tp->protocol,
+ &offload);
if (!err)
n->flags |= TCA_CLS_FLAGS_IN_HW;
diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c
index e88342f..5d95401 100644
--- a/net/sched/sch_api.c
+++ b/net/sched/sch_api.c
@@ -163,7 +163,7 @@
if (!(cops->get && cops->put && cops->walk && cops->leaf))
goto out_einval;
- if (cops->tcf_chain && !(cops->bind_tcf && cops->unbind_tcf))
+ if (cops->tcf_block && !(cops->bind_tcf && cops->unbind_tcf))
goto out_einval;
}
@@ -1878,54 +1878,6 @@
return skb->len;
}
-/* Main classifier routine: scans classifier chain attached
- * to this qdisc, (optionally) tests for protocol and asks
- * specific classifiers.
- */
-int tc_classify(struct sk_buff *skb, const struct tcf_proto *tp,
- struct tcf_result *res, bool compat_mode)
-{
- __be16 protocol = tc_skb_protocol(skb);
-#ifdef CONFIG_NET_CLS_ACT
- const int max_reclassify_loop = 4;
- const struct tcf_proto *old_tp = tp;
- int limit = 0;
-
-reclassify:
-#endif
- for (; tp; tp = rcu_dereference_bh(tp->next)) {
- int err;
-
- if (tp->protocol != protocol &&
- tp->protocol != htons(ETH_P_ALL))
- continue;
-
- err = tp->classify(skb, tp, res);
-#ifdef CONFIG_NET_CLS_ACT
- if (unlikely(err == TC_ACT_RECLASSIFY && !compat_mode))
- goto reset;
-#endif
- if (err >= 0)
- return err;
- }
-
- return TC_ACT_UNSPEC; /* signal: continue lookup */
-#ifdef CONFIG_NET_CLS_ACT
-reset:
- if (unlikely(limit++ >= max_reclassify_loop)) {
- net_notice_ratelimited("%s: reclassify loop, rule prio %u, protocol %02x\n",
- tp->q->ops->id, tp->prio & 0xffff,
- ntohs(tp->protocol));
- return TC_ACT_SHOT;
- }
-
- tp = old_tp;
- protocol = tc_skb_protocol(skb);
- goto reclassify;
-#endif
-}
-EXPORT_SYMBOL(tc_classify);
-
#ifdef CONFIG_PROC_FS
static int psched_show(struct seq_file *seq, void *v)
{
diff --git a/net/sched/sch_atm.c b/net/sched/sch_atm.c
index 40cbcee..de16259 100644
--- a/net/sched/sch_atm.c
+++ b/net/sched/sch_atm.c
@@ -43,6 +43,7 @@
struct atm_flow_data {
struct Qdisc *q; /* FIFO, TBF, etc. */
struct tcf_proto __rcu *filter_list;
+ struct tcf_block *block;
struct atm_vcc *vcc; /* VCC; NULL if VCC is closed */
void (*old_pop)(struct atm_vcc *vcc,
struct sk_buff *skb); /* chaining */
@@ -143,7 +144,7 @@
list_del_init(&flow->list);
pr_debug("atm_tc_put: qdisc %p\n", flow->q);
qdisc_destroy(flow->q);
- tcf_destroy_chain(&flow->filter_list);
+ tcf_block_put(flow->block);
if (flow->sock) {
pr_debug("atm_tc_put: f_count %ld\n",
file_count(flow->sock->file));
@@ -274,7 +275,13 @@
error = -ENOBUFS;
goto err_out;
}
- RCU_INIT_POINTER(flow->filter_list, NULL);
+
+ error = tcf_block_get(&flow->block, &flow->filter_list);
+ if (error) {
+ kfree(flow);
+ goto err_out;
+ }
+
flow->q = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops, classid);
if (!flow->q)
flow->q = &noop_qdisc;
@@ -346,14 +353,13 @@
}
}
-static struct tcf_proto __rcu **atm_tc_find_tcf(struct Qdisc *sch,
- unsigned long cl)
+static struct tcf_block *atm_tc_tcf_block(struct Qdisc *sch, unsigned long cl)
{
struct atm_qdisc_data *p = qdisc_priv(sch);
struct atm_flow_data *flow = (struct atm_flow_data *)cl;
pr_debug("atm_tc_find_tcf(sch %p,[qdisc %p],flow %p)\n", sch, p, flow);
- return flow ? &flow->filter_list : &p->link.filter_list;
+ return flow ? flow->block : p->link.block;
}
/* --------------------------- Qdisc operations ---------------------------- */
@@ -377,7 +383,7 @@
list_for_each_entry(flow, &p->flows, list) {
fl = rcu_dereference_bh(flow->filter_list);
if (fl) {
- result = tc_classify(skb, fl, &res, true);
+ result = tcf_classify(skb, fl, &res, true);
if (result < 0)
continue;
flow = (struct atm_flow_data *)res.class;
@@ -400,6 +406,7 @@
switch (result) {
case TC_ACT_QUEUED:
case TC_ACT_STOLEN:
+ case TC_ACT_TRAP:
__qdisc_drop(skb, to_free);
return NET_XMIT_SUCCESS | __NET_XMIT_STOLEN;
case TC_ACT_SHOT:
@@ -524,6 +531,7 @@
static int atm_tc_init(struct Qdisc *sch, struct nlattr *opt)
{
struct atm_qdisc_data *p = qdisc_priv(sch);
+ int err;
pr_debug("atm_tc_init(sch %p,[qdisc %p],opt %p)\n", sch, p, opt);
INIT_LIST_HEAD(&p->flows);
@@ -534,7 +542,11 @@
if (!p->link.q)
p->link.q = &noop_qdisc;
pr_debug("atm_tc_init: link (%p) qdisc %p\n", &p->link, p->link.q);
- RCU_INIT_POINTER(p->link.filter_list, NULL);
+
+ err = tcf_block_get(&p->link.block, &p->link.filter_list);
+ if (err)
+ return err;
+
p->link.vcc = NULL;
p->link.sock = NULL;
p->link.classid = sch->handle;
@@ -561,7 +573,7 @@
pr_debug("atm_tc_destroy(sch %p,[qdisc %p])\n", sch, p);
list_for_each_entry(flow, &p->flows, list)
- tcf_destroy_chain(&flow->filter_list);
+ tcf_block_put(flow->block);
list_for_each_entry_safe(flow, tmp, &p->flows, list) {
if (flow->ref > 1)
@@ -646,7 +658,7 @@
.change = atm_tc_change,
.delete = atm_tc_delete,
.walk = atm_tc_walk,
- .tcf_chain = atm_tc_find_tcf,
+ .tcf_block = atm_tc_tcf_block,
.bind_tcf = atm_tc_bind_filter,
.unbind_tcf = atm_tc_put,
.dump = atm_tc_dump_class,
diff --git a/net/sched/sch_cbq.c b/net/sched/sch_cbq.c
index 7415859..481036f 100644
--- a/net/sched/sch_cbq.c
+++ b/net/sched/sch_cbq.c
@@ -127,6 +127,7 @@
struct tc_cbq_xstats xstats;
struct tcf_proto __rcu *filter_list;
+ struct tcf_block *block;
int refcnt;
int filters;
@@ -233,7 +234,7 @@
/*
* Step 2+n. Apply classifier.
*/
- result = tc_classify(skb, fl, &res, true);
+ result = tcf_classify(skb, fl, &res, true);
if (!fl || result < 0)
goto fallback;
@@ -253,6 +254,7 @@
switch (result) {
case TC_ACT_QUEUED:
case TC_ACT_STOLEN:
+ case TC_ACT_TRAP:
*qerr = NET_XMIT_SUCCESS | __NET_XMIT_STOLEN;
case TC_ACT_SHOT:
return NULL;
@@ -1405,7 +1407,7 @@
WARN_ON(cl->filters);
- tcf_destroy_chain(&cl->filter_list);
+ tcf_block_put(cl->block);
qdisc_destroy(cl->q);
qdisc_put_rtab(cl->R_tab);
gen_kill_estimator(&cl->rate_est);
@@ -1430,7 +1432,7 @@
*/
for (h = 0; h < q->clhash.hashsize; h++) {
hlist_for_each_entry(cl, &q->clhash.hash[h], common.hnode)
- tcf_destroy_chain(&cl->filter_list);
+ tcf_block_put(cl->block);
}
for (h = 0; h < q->clhash.hashsize; h++) {
hlist_for_each_entry_safe(cl, next, &q->clhash.hash[h],
@@ -1585,12 +1587,19 @@
if (cl == NULL)
goto failure;
+ err = tcf_block_get(&cl->block, &cl->filter_list);
+ if (err) {
+ kfree(cl);
+ return err;
+ }
+
if (tca[TCA_RATE]) {
err = gen_new_estimator(&cl->bstats, NULL, &cl->rate_est,
NULL,
qdisc_root_sleeping_running(sch),
tca[TCA_RATE]);
if (err) {
+ tcf_block_put(cl->block);
kfree(cl);
goto failure;
}
@@ -1688,8 +1697,7 @@
return 0;
}
-static struct tcf_proto __rcu **cbq_find_tcf(struct Qdisc *sch,
- unsigned long arg)
+static struct tcf_block *cbq_tcf_block(struct Qdisc *sch, unsigned long arg)
{
struct cbq_sched_data *q = qdisc_priv(sch);
struct cbq_class *cl = (struct cbq_class *)arg;
@@ -1697,7 +1705,7 @@
if (cl == NULL)
cl = &q->link;
- return &cl->filter_list;
+ return cl->block;
}
static unsigned long cbq_bind_filter(struct Qdisc *sch, unsigned long parent,
@@ -1756,7 +1764,7 @@
.change = cbq_change_class,
.delete = cbq_delete,
.walk = cbq_walk,
- .tcf_chain = cbq_find_tcf,
+ .tcf_block = cbq_tcf_block,
.bind_tcf = cbq_bind_filter,
.unbind_tcf = cbq_unbind_filter,
.dump = cbq_dump_class,
diff --git a/net/sched/sch_drr.c b/net/sched/sch_drr.c
index 58a8c32..a413dc1 100644
--- a/net/sched/sch_drr.c
+++ b/net/sched/sch_drr.c
@@ -36,6 +36,7 @@
struct drr_sched {
struct list_head active;
struct tcf_proto __rcu *filter_list;
+ struct tcf_block *block;
struct Qdisc_class_hash clhash;
};
@@ -190,15 +191,14 @@
drr_destroy_class(sch, cl);
}
-static struct tcf_proto __rcu **drr_tcf_chain(struct Qdisc *sch,
- unsigned long cl)
+static struct tcf_block *drr_tcf_block(struct Qdisc *sch, unsigned long cl)
{
struct drr_sched *q = qdisc_priv(sch);
if (cl)
return NULL;
- return &q->filter_list;
+ return q->block;
}
static unsigned long drr_bind_tcf(struct Qdisc *sch, unsigned long parent,
@@ -333,12 +333,13 @@
*qerr = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
fl = rcu_dereference_bh(q->filter_list);
- result = tc_classify(skb, fl, &res, false);
+ result = tcf_classify(skb, fl, &res, false);
if (result >= 0) {
#ifdef CONFIG_NET_CLS_ACT
switch (result) {
case TC_ACT_QUEUED:
case TC_ACT_STOLEN:
+ case TC_ACT_TRAP:
*qerr = NET_XMIT_SUCCESS | __NET_XMIT_STOLEN;
case TC_ACT_SHOT:
return NULL;
@@ -431,6 +432,9 @@
struct drr_sched *q = qdisc_priv(sch);
int err;
+ err = tcf_block_get(&q->block, &q->filter_list);
+ if (err)
+ return err;
err = qdisc_class_hash_init(&q->clhash);
if (err < 0)
return err;
@@ -462,7 +466,7 @@
struct hlist_node *next;
unsigned int i;
- tcf_destroy_chain(&q->filter_list);
+ tcf_block_put(q->block);
for (i = 0; i < q->clhash.hashsize; i++) {
hlist_for_each_entry_safe(cl, next, &q->clhash.hash[i],
@@ -477,7 +481,7 @@
.delete = drr_delete_class,
.get = drr_get_class,
.put = drr_put_class,
- .tcf_chain = drr_tcf_chain,
+ .tcf_block = drr_tcf_block,
.bind_tcf = drr_bind_tcf,
.unbind_tcf = drr_unbind_tcf,
.graft = drr_graft_class,
diff --git a/net/sched/sch_dsmark.c b/net/sched/sch_dsmark.c
index 1c0f877..6d94fcc 100644
--- a/net/sched/sch_dsmark.c
+++ b/net/sched/sch_dsmark.c
@@ -44,6 +44,7 @@
struct dsmark_qdisc_data {
struct Qdisc *q;
struct tcf_proto __rcu *filter_list;
+ struct tcf_block *block;
struct mask_value *mv;
u16 indices;
u8 set_tc_index;
@@ -183,11 +184,11 @@
}
}
-static inline struct tcf_proto __rcu **dsmark_find_tcf(struct Qdisc *sch,
- unsigned long cl)
+static struct tcf_block *dsmark_tcf_block(struct Qdisc *sch, unsigned long cl)
{
struct dsmark_qdisc_data *p = qdisc_priv(sch);
- return &p->filter_list;
+
+ return p->block;
}
/* --------------------------- Qdisc operations ---------------------------- */
@@ -234,7 +235,7 @@
else {
struct tcf_result res;
struct tcf_proto *fl = rcu_dereference_bh(p->filter_list);
- int result = tc_classify(skb, fl, &res, false);
+ int result = tcf_classify(skb, fl, &res, false);
pr_debug("result %d class 0x%04x\n", result, res.classid);
@@ -242,6 +243,7 @@
#ifdef CONFIG_NET_CLS_ACT
case TC_ACT_QUEUED:
case TC_ACT_STOLEN:
+ case TC_ACT_TRAP:
__qdisc_drop(skb, to_free);
return NET_XMIT_SUCCESS | __NET_XMIT_STOLEN;
@@ -342,6 +344,10 @@
if (!opt)
goto errout;
+ err = tcf_block_get(&p->block, &p->filter_list);
+ if (err)
+ return err;
+
err = nla_parse_nested(tb, TCA_DSMARK_MAX, opt, dsmark_policy, NULL);
if (err < 0)
goto errout;
@@ -400,7 +406,7 @@
pr_debug("%s(sch %p,[qdisc %p])\n", __func__, sch, p);
- tcf_destroy_chain(&p->filter_list);
+ tcf_block_put(p->block);
qdisc_destroy(p->q);
if (p->mv != p->embedded)
kfree(p->mv);
@@ -468,7 +474,7 @@
.change = dsmark_change,
.delete = dsmark_delete,
.walk = dsmark_walk,
- .tcf_chain = dsmark_find_tcf,
+ .tcf_block = dsmark_tcf_block,
.bind_tcf = dsmark_bind_filter,
.unbind_tcf = dsmark_put,
.dump = dsmark_dump_class,
diff --git a/net/sched/sch_fq.c b/net/sched/sch_fq.c
index b488721..147fde7 100644
--- a/net/sched/sch_fq.c
+++ b/net/sched/sch_fq.c
@@ -390,9 +390,17 @@
q->stat_tcp_retrans++;
qdisc_qstats_backlog_inc(sch, skb);
if (fq_flow_is_detached(f)) {
+ struct sock *sk = skb->sk;
+
fq_flow_add_tail(&q->new_flows, f);
if (time_after(jiffies, f->age + q->flow_refill_delay))
f->credit = max_t(u32, f->credit, q->quantum);
+ if (sk && q->rate_enable) {
+ if (unlikely(smp_load_acquire(&sk->sk_pacing_status) !=
+ SK_PACING_FQ))
+ smp_store_release(&sk->sk_pacing_status,
+ SK_PACING_FQ);
+ }
q->inactive_flows--;
}
diff --git a/net/sched/sch_fq_codel.c b/net/sched/sch_fq_codel.c
index 9201abc..337f2d6 100644
--- a/net/sched/sch_fq_codel.c
+++ b/net/sched/sch_fq_codel.c
@@ -55,6 +55,7 @@
struct fq_codel_sched_data {
struct tcf_proto __rcu *filter_list; /* optional external classifier */
+ struct tcf_block *block;
struct fq_codel_flow *flows; /* Flows table [flows_cnt] */
u32 *backlogs; /* backlog table [flows_cnt] */
u32 flows_cnt; /* number of flows */
@@ -96,12 +97,13 @@
return fq_codel_hash(q, skb) + 1;
*qerr = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
- result = tc_classify(skb, filter, &res, false);
+ result = tcf_classify(skb, filter, &res, false);
if (result >= 0) {
#ifdef CONFIG_NET_CLS_ACT
switch (result) {
case TC_ACT_STOLEN:
case TC_ACT_QUEUED:
+ case TC_ACT_TRAP:
*qerr = NET_XMIT_SUCCESS | __NET_XMIT_STOLEN;
case TC_ACT_SHOT:
return 0;
@@ -450,7 +452,7 @@
{
struct fq_codel_sched_data *q = qdisc_priv(sch);
- tcf_destroy_chain(&q->filter_list);
+ tcf_block_put(q->block);
kvfree(q->backlogs);
kvfree(q->flows);
}
@@ -459,6 +461,7 @@
{
struct fq_codel_sched_data *q = qdisc_priv(sch);
int i;
+ int err;
sch->limit = 10*1024;
q->flows_cnt = 1024;
@@ -478,6 +481,10 @@
return err;
}
+ err = tcf_block_get(&q->block, &q->filter_list);
+ if (err)
+ return err;
+
if (!q->flows) {
q->flows = kvzalloc(q->flows_cnt *
sizeof(struct fq_codel_flow), GFP_KERNEL);
@@ -589,14 +596,13 @@
{
}
-static struct tcf_proto __rcu **fq_codel_find_tcf(struct Qdisc *sch,
- unsigned long cl)
+static struct tcf_block *fq_codel_tcf_block(struct Qdisc *sch, unsigned long cl)
{
struct fq_codel_sched_data *q = qdisc_priv(sch);
if (cl)
return NULL;
- return &q->filter_list;
+ return q->block;
}
static int fq_codel_dump_class(struct Qdisc *sch, unsigned long cl,
@@ -679,7 +685,7 @@
.leaf = fq_codel_leaf,
.get = fq_codel_get,
.put = fq_codel_put,
- .tcf_chain = fq_codel_find_tcf,
+ .tcf_block = fq_codel_tcf_block,
.bind_tcf = fq_codel_bind,
.unbind_tcf = fq_codel_put,
.dump = fq_codel_dump_class,
diff --git a/net/sched/sch_hfsc.c b/net/sched/sch_hfsc.c
index 5cb82f6..b52f746 100644
--- a/net/sched/sch_hfsc.c
+++ b/net/sched/sch_hfsc.c
@@ -116,6 +116,7 @@
struct gnet_stats_queue qstats;
struct net_rate_estimator __rcu *rate_est;
struct tcf_proto __rcu *filter_list; /* filter list */
+ struct tcf_block *block;
unsigned int filter_cnt; /* filter count */
unsigned int level; /* class level in hierarchy */
@@ -1040,12 +1041,19 @@
if (cl == NULL)
return -ENOBUFS;
+ err = tcf_block_get(&cl->block, &cl->filter_list);
+ if (err) {
+ kfree(cl);
+ return err;
+ }
+
if (tca[TCA_RATE]) {
err = gen_new_estimator(&cl->bstats, NULL, &cl->rate_est,
NULL,
qdisc_root_sleeping_running(sch),
tca[TCA_RATE]);
if (err) {
+ tcf_block_put(cl->block);
kfree(cl);
return err;
}
@@ -1091,7 +1099,7 @@
{
struct hfsc_sched *q = qdisc_priv(sch);
- tcf_destroy_chain(&cl->filter_list);
+ tcf_block_put(cl->block);
qdisc_destroy(cl->qdisc);
gen_kill_estimator(&cl->rate_est);
if (cl != &q->root)
@@ -1142,11 +1150,12 @@
*qerr = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
head = &q->root;
tcf = rcu_dereference_bh(q->root.filter_list);
- while (tcf && (result = tc_classify(skb, tcf, &res, false)) >= 0) {
+ while (tcf && (result = tcf_classify(skb, tcf, &res, false)) >= 0) {
#ifdef CONFIG_NET_CLS_ACT
switch (result) {
case TC_ACT_QUEUED:
case TC_ACT_STOLEN:
+ case TC_ACT_TRAP:
*qerr = NET_XMIT_SUCCESS | __NET_XMIT_STOLEN;
case TC_ACT_SHOT:
return NULL;
@@ -1261,8 +1270,7 @@
cl->filter_cnt--;
}
-static struct tcf_proto __rcu **
-hfsc_tcf_chain(struct Qdisc *sch, unsigned long arg)
+static struct tcf_block *hfsc_tcf_block(struct Qdisc *sch, unsigned long arg)
{
struct hfsc_sched *q = qdisc_priv(sch);
struct hfsc_class *cl = (struct hfsc_class *)arg;
@@ -1270,7 +1278,7 @@
if (cl == NULL)
cl = &q->root;
- return &cl->filter_list;
+ return cl->block;
}
static int
@@ -1515,7 +1523,7 @@
for (i = 0; i < q->clhash.hashsize; i++) {
hlist_for_each_entry(cl, &q->clhash.hash[i], cl_common.hnode)
- tcf_destroy_chain(&cl->filter_list);
+ tcf_block_put(cl->block);
}
for (i = 0; i < q->clhash.hashsize; i++) {
hlist_for_each_entry_safe(cl, next, &q->clhash.hash[i],
@@ -1662,7 +1670,7 @@
.put = hfsc_put_class,
.bind_tcf = hfsc_bind_tcf,
.unbind_tcf = hfsc_unbind_tcf,
- .tcf_chain = hfsc_tcf_chain,
+ .tcf_block = hfsc_tcf_block,
.dump = hfsc_dump_class,
.dump_stats = hfsc_dump_class_stats,
.walk = hfsc_walk
diff --git a/net/sched/sch_htb.c b/net/sched/sch_htb.c
index 570ef3b..203286a 100644
--- a/net/sched/sch_htb.c
+++ b/net/sched/sch_htb.c
@@ -105,6 +105,7 @@
int quantum; /* but stored for parent-to-leaf return */
struct tcf_proto __rcu *filter_list; /* class attached filters */
+ struct tcf_block *block;
int filter_cnt;
int refcnt; /* usage count of this class */
@@ -156,6 +157,7 @@
/* filters for qdisc itself */
struct tcf_proto __rcu *filter_list;
+ struct tcf_block *block;
#define HTB_WARN_TOOMANYEVENTS 0x1
unsigned int warned; /* only one warning */
@@ -231,11 +233,12 @@
}
*qerr = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
- while (tcf && (result = tc_classify(skb, tcf, &res, false)) >= 0) {
+ while (tcf && (result = tcf_classify(skb, tcf, &res, false)) >= 0) {
#ifdef CONFIG_NET_CLS_ACT
switch (result) {
case TC_ACT_QUEUED:
case TC_ACT_STOLEN:
+ case TC_ACT_TRAP:
*qerr = NET_XMIT_SUCCESS | __NET_XMIT_STOLEN;
case TC_ACT_SHOT:
return NULL;
@@ -1017,6 +1020,10 @@
if (!opt)
return -EINVAL;
+ err = tcf_block_get(&q->block, &q->filter_list);
+ if (err)
+ return err;
+
err = nla_parse_nested(tb, TCA_HTB_MAX, opt, htb_policy, NULL);
if (err < 0)
return err;
@@ -1230,7 +1237,7 @@
qdisc_destroy(cl->un.leaf.q);
}
gen_kill_estimator(&cl->rate_est);
- tcf_destroy_chain(&cl->filter_list);
+ tcf_block_put(cl->block);
kfree(cl);
}
@@ -1248,11 +1255,11 @@
* because filter need its target class alive to be able to call
* unbind_filter on it (without Oops).
*/
- tcf_destroy_chain(&q->filter_list);
+ tcf_block_put(q->block);
for (i = 0; i < q->clhash.hashsize; i++) {
hlist_for_each_entry(cl, &q->clhash.hash[i], common.hnode)
- tcf_destroy_chain(&cl->filter_list);
+ tcf_block_put(cl->block);
}
for (i = 0; i < q->clhash.hashsize; i++) {
hlist_for_each_entry_safe(cl, next, &q->clhash.hash[i],
@@ -1396,6 +1403,11 @@
if (!cl)
goto failure;
+ err = tcf_block_get(&cl->block, &cl->filter_list);
+ if (err) {
+ kfree(cl);
+ goto failure;
+ }
if (htb_rate_est || tca[TCA_RATE]) {
err = gen_new_estimator(&cl->bstats, NULL,
&cl->rate_est,
@@ -1403,6 +1415,7 @@
qdisc_root_sleeping_running(sch),
tca[TCA_RATE] ? : &est.nla);
if (err) {
+ tcf_block_put(cl->block);
kfree(cl);
goto failure;
}
@@ -1521,14 +1534,12 @@
return err;
}
-static struct tcf_proto __rcu **htb_find_tcf(struct Qdisc *sch,
- unsigned long arg)
+static struct tcf_block *htb_tcf_block(struct Qdisc *sch, unsigned long arg)
{
struct htb_sched *q = qdisc_priv(sch);
struct htb_class *cl = (struct htb_class *)arg;
- struct tcf_proto __rcu **fl = cl ? &cl->filter_list : &q->filter_list;
- return fl;
+ return cl ? cl->block : q->block;
}
static unsigned long htb_bind_filter(struct Qdisc *sch, unsigned long parent,
@@ -1591,7 +1602,7 @@
.change = htb_change_class,
.delete = htb_delete,
.walk = htb_walk,
- .tcf_chain = htb_find_tcf,
+ .tcf_block = htb_tcf_block,
.bind_tcf = htb_bind_filter,
.unbind_tcf = htb_unbind_filter,
.dump = htb_dump_class,
diff --git a/net/sched/sch_ingress.c b/net/sched/sch_ingress.c
index 3bab5f6..d8a9beb 100644
--- a/net/sched/sch_ingress.c
+++ b/net/sched/sch_ingress.c
@@ -18,6 +18,10 @@
#include <net/pkt_sched.h>
#include <net/pkt_cls.h>
+struct ingress_sched_data {
+ struct tcf_block *block;
+};
+
static struct Qdisc *ingress_leaf(struct Qdisc *sch, unsigned long arg)
{
return NULL;
@@ -47,16 +51,23 @@
{
}
-static struct tcf_proto __rcu **ingress_find_tcf(struct Qdisc *sch,
- unsigned long cl)
+static struct tcf_block *ingress_tcf_block(struct Qdisc *sch, unsigned long cl)
{
- struct net_device *dev = qdisc_dev(sch);
+ struct ingress_sched_data *q = qdisc_priv(sch);
- return &dev->ingress_cl_list;
+ return q->block;
}
static int ingress_init(struct Qdisc *sch, struct nlattr *opt)
{
+ struct ingress_sched_data *q = qdisc_priv(sch);
+ struct net_device *dev = qdisc_dev(sch);
+ int err;
+
+ err = tcf_block_get(&q->block, &dev->ingress_cl_list);
+ if (err)
+ return err;
+
net_inc_ingress_queue();
sch->flags |= TCQ_F_CPUSTATS;
@@ -65,9 +76,9 @@
static void ingress_destroy(struct Qdisc *sch)
{
- struct net_device *dev = qdisc_dev(sch);
+ struct ingress_sched_data *q = qdisc_priv(sch);
- tcf_destroy_chain(&dev->ingress_cl_list);
+ tcf_block_put(q->block);
net_dec_ingress_queue();
}
@@ -91,7 +102,7 @@
.get = ingress_get,
.put = ingress_put,
.walk = ingress_walk,
- .tcf_chain = ingress_find_tcf,
+ .tcf_block = ingress_tcf_block,
.tcf_cl_offload = ingress_cl_offload,
.bind_tcf = ingress_bind_filter,
.unbind_tcf = ingress_put,
@@ -100,12 +111,18 @@
static struct Qdisc_ops ingress_qdisc_ops __read_mostly = {
.cl_ops = &ingress_class_ops,
.id = "ingress",
+ .priv_size = sizeof(struct ingress_sched_data),
.init = ingress_init,
.destroy = ingress_destroy,
.dump = ingress_dump,
.owner = THIS_MODULE,
};
+struct clsact_sched_data {
+ struct tcf_block *ingress_block;
+ struct tcf_block *egress_block;
+};
+
static unsigned long clsact_get(struct Qdisc *sch, u32 classid)
{
switch (TC_H_MIN(classid)) {
@@ -128,16 +145,15 @@
return clsact_get(sch, classid);
}
-static struct tcf_proto __rcu **clsact_find_tcf(struct Qdisc *sch,
- unsigned long cl)
+static struct tcf_block *clsact_tcf_block(struct Qdisc *sch, unsigned long cl)
{
- struct net_device *dev = qdisc_dev(sch);
+ struct clsact_sched_data *q = qdisc_priv(sch);
switch (cl) {
case TC_H_MIN(TC_H_MIN_INGRESS):
- return &dev->ingress_cl_list;
+ return q->ingress_block;
case TC_H_MIN(TC_H_MIN_EGRESS):
- return &dev->egress_cl_list;
+ return q->egress_block;
default:
return NULL;
}
@@ -145,6 +161,18 @@
static int clsact_init(struct Qdisc *sch, struct nlattr *opt)
{
+ struct clsact_sched_data *q = qdisc_priv(sch);
+ struct net_device *dev = qdisc_dev(sch);
+ int err;
+
+ err = tcf_block_get(&q->ingress_block, &dev->ingress_cl_list);
+ if (err)
+ return err;
+
+ err = tcf_block_get(&q->egress_block, &dev->egress_cl_list);
+ if (err)
+ return err;
+
net_inc_ingress_queue();
net_inc_egress_queue();
@@ -155,10 +183,10 @@
static void clsact_destroy(struct Qdisc *sch)
{
- struct net_device *dev = qdisc_dev(sch);
+ struct clsact_sched_data *q = qdisc_priv(sch);
- tcf_destroy_chain(&dev->ingress_cl_list);
- tcf_destroy_chain(&dev->egress_cl_list);
+ tcf_block_put(q->egress_block);
+ tcf_block_put(q->ingress_block);
net_dec_ingress_queue();
net_dec_egress_queue();
@@ -169,7 +197,7 @@
.get = clsact_get,
.put = ingress_put,
.walk = ingress_walk,
- .tcf_chain = clsact_find_tcf,
+ .tcf_block = clsact_tcf_block,
.tcf_cl_offload = clsact_cl_offload,
.bind_tcf = clsact_bind_filter,
.unbind_tcf = ingress_put,
@@ -178,6 +206,7 @@
static struct Qdisc_ops clsact_qdisc_ops __read_mostly = {
.cl_ops = &clsact_class_ops,
.id = "clsact",
+ .priv_size = sizeof(struct clsact_sched_data),
.init = clsact_init,
.destroy = clsact_destroy,
.dump = ingress_dump,
diff --git a/net/sched/sch_mqprio.c b/net/sched/sch_mqprio.c
index 0a4cf27..e0c0272 100644
--- a/net/sched/sch_mqprio.c
+++ b/net/sched/sch_mqprio.c
@@ -43,7 +43,7 @@
struct tc_to_netdev tc = { .type = TC_SETUP_MQPRIO,
{ .mqprio = &offload } };
- dev->netdev_ops->ndo_setup_tc(dev, sch->handle, 0, &tc);
+ dev->netdev_ops->ndo_setup_tc(dev, sch->handle, 0, 0, &tc);
} else {
netdev_set_num_tc(dev, 0);
}
@@ -152,7 +152,8 @@
struct tc_to_netdev tc = { .type = TC_SETUP_MQPRIO,
{ .mqprio = &offload } };
- err = dev->netdev_ops->ndo_setup_tc(dev, sch->handle, 0, &tc);
+ err = dev->netdev_ops->ndo_setup_tc(dev, sch->handle,
+ 0, 0, &tc);
if (err)
return err;
diff --git a/net/sched/sch_multiq.c b/net/sched/sch_multiq.c
index 43a3a10..f143b7b 100644
--- a/net/sched/sch_multiq.c
+++ b/net/sched/sch_multiq.c
@@ -32,6 +32,7 @@
u16 max_bands;
u16 curband;
struct tcf_proto __rcu *filter_list;
+ struct tcf_block *block;
struct Qdisc **queues;
};
@@ -46,11 +47,12 @@
int err;
*qerr = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
- err = tc_classify(skb, fl, &res, false);
+ err = tcf_classify(skb, fl, &res, false);
#ifdef CONFIG_NET_CLS_ACT
switch (err) {
case TC_ACT_STOLEN:
case TC_ACT_QUEUED:
+ case TC_ACT_TRAP:
*qerr = NET_XMIT_SUCCESS | __NET_XMIT_STOLEN;
case TC_ACT_SHOT:
return NULL;
@@ -170,7 +172,7 @@
int band;
struct multiq_sched_data *q = qdisc_priv(sch);
- tcf_destroy_chain(&q->filter_list);
+ tcf_block_put(q->block);
for (band = 0; band < q->bands; band++)
qdisc_destroy(q->queues[band]);
@@ -243,6 +245,10 @@
if (opt == NULL)
return -EINVAL;
+ err = tcf_block_get(&q->block, &q->filter_list);
+ if (err)
+ return err;
+
q->max_bands = qdisc_dev(sch)->num_tx_queues;
q->queues = kcalloc(q->max_bands, sizeof(struct Qdisc *), GFP_KERNEL);
@@ -367,14 +373,13 @@
}
}
-static struct tcf_proto __rcu **multiq_find_tcf(struct Qdisc *sch,
- unsigned long cl)
+static struct tcf_block *multiq_tcf_block(struct Qdisc *sch, unsigned long cl)
{
struct multiq_sched_data *q = qdisc_priv(sch);
if (cl)
return NULL;
- return &q->filter_list;
+ return q->block;
}
static const struct Qdisc_class_ops multiq_class_ops = {
@@ -383,7 +388,7 @@
.get = multiq_get,
.put = multiq_put,
.walk = multiq_walk,
- .tcf_chain = multiq_find_tcf,
+ .tcf_block = multiq_tcf_block,
.bind_tcf = multiq_bind,
.unbind_tcf = multiq_put,
.dump = multiq_dump_class,
diff --git a/net/sched/sch_prio.c b/net/sched/sch_prio.c
index 92c2e6d..e3e364c 100644
--- a/net/sched/sch_prio.c
+++ b/net/sched/sch_prio.c
@@ -25,6 +25,7 @@
struct prio_sched_data {
int bands;
struct tcf_proto __rcu *filter_list;
+ struct tcf_block *block;
u8 prio2band[TC_PRIO_MAX+1];
struct Qdisc *queues[TCQ_PRIO_BANDS];
};
@@ -42,11 +43,12 @@
*qerr = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
if (TC_H_MAJ(skb->priority) != sch->handle) {
fl = rcu_dereference_bh(q->filter_list);
- err = tc_classify(skb, fl, &res, false);
+ err = tcf_classify(skb, fl, &res, false);
#ifdef CONFIG_NET_CLS_ACT
switch (err) {
case TC_ACT_STOLEN:
case TC_ACT_QUEUED:
+ case TC_ACT_TRAP:
*qerr = NET_XMIT_SUCCESS | __NET_XMIT_STOLEN;
case TC_ACT_SHOT:
return NULL;
@@ -145,7 +147,7 @@
int prio;
struct prio_sched_data *q = qdisc_priv(sch);
- tcf_destroy_chain(&q->filter_list);
+ tcf_block_put(q->block);
for (prio = 0; prio < q->bands; prio++)
qdisc_destroy(q->queues[prio]);
}
@@ -204,9 +206,16 @@
static int prio_init(struct Qdisc *sch, struct nlattr *opt)
{
+ struct prio_sched_data *q = qdisc_priv(sch);
+ int err;
+
if (!opt)
return -EINVAL;
+ err = tcf_block_get(&q->block, &q->filter_list);
+ if (err)
+ return err;
+
return prio_tune(sch, opt);
}
@@ -317,14 +326,13 @@
}
}
-static struct tcf_proto __rcu **prio_find_tcf(struct Qdisc *sch,
- unsigned long cl)
+static struct tcf_block *prio_tcf_block(struct Qdisc *sch, unsigned long cl)
{
struct prio_sched_data *q = qdisc_priv(sch);
if (cl)
return NULL;
- return &q->filter_list;
+ return q->block;
}
static const struct Qdisc_class_ops prio_class_ops = {
@@ -333,7 +341,7 @@
.get = prio_get,
.put = prio_put,
.walk = prio_walk,
- .tcf_chain = prio_find_tcf,
+ .tcf_block = prio_tcf_block,
.bind_tcf = prio_bind,
.unbind_tcf = prio_put,
.dump = prio_dump_class,
diff --git a/net/sched/sch_qfq.c b/net/sched/sch_qfq.c
index 041eba3..0e16dfd 100644
--- a/net/sched/sch_qfq.c
+++ b/net/sched/sch_qfq.c
@@ -182,6 +182,7 @@
struct qfq_sched {
struct tcf_proto __rcu *filter_list;
+ struct tcf_block *block;
struct Qdisc_class_hash clhash;
u64 oldV, V; /* Precise virtual times. */
@@ -582,15 +583,14 @@
qfq_destroy_class(sch, cl);
}
-static struct tcf_proto __rcu **qfq_tcf_chain(struct Qdisc *sch,
- unsigned long cl)
+static struct tcf_block *qfq_tcf_block(struct Qdisc *sch, unsigned long cl)
{
struct qfq_sched *q = qdisc_priv(sch);
if (cl)
return NULL;
- return &q->filter_list;
+ return q->block;
}
static unsigned long qfq_bind_tcf(struct Qdisc *sch, unsigned long parent,
@@ -720,12 +720,13 @@
*qerr = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
fl = rcu_dereference_bh(q->filter_list);
- result = tc_classify(skb, fl, &res, false);
+ result = tcf_classify(skb, fl, &res, false);
if (result >= 0) {
#ifdef CONFIG_NET_CLS_ACT
switch (result) {
case TC_ACT_QUEUED:
case TC_ACT_STOLEN:
+ case TC_ACT_TRAP:
*qerr = NET_XMIT_SUCCESS | __NET_XMIT_STOLEN;
case TC_ACT_SHOT:
return NULL;
@@ -1438,6 +1439,10 @@
int i, j, err;
u32 max_cl_shift, maxbudg_shift, max_classes;
+ err = tcf_block_get(&q->block, &q->filter_list);
+ if (err)
+ return err;
+
err = qdisc_class_hash_init(&q->clhash);
if (err < 0)
return err;
@@ -1492,7 +1497,7 @@
struct hlist_node *next;
unsigned int i;
- tcf_destroy_chain(&q->filter_list);
+ tcf_block_put(q->block);
for (i = 0; i < q->clhash.hashsize; i++) {
hlist_for_each_entry_safe(cl, next, &q->clhash.hash[i],
@@ -1508,7 +1513,7 @@
.delete = qfq_delete_class,
.get = qfq_get_class,
.put = qfq_put_class,
- .tcf_chain = qfq_tcf_chain,
+ .tcf_block = qfq_tcf_block,
.bind_tcf = qfq_bind_tcf,
.unbind_tcf = qfq_unbind_tcf,
.graft = qfq_graft_class,
diff --git a/net/sched/sch_sfb.c b/net/sched/sch_sfb.c
index 0f77727..11fb6ec 100644
--- a/net/sched/sch_sfb.c
+++ b/net/sched/sch_sfb.c
@@ -56,6 +56,7 @@
struct sfb_sched_data {
struct Qdisc *qdisc;
struct tcf_proto __rcu *filter_list;
+ struct tcf_block *block;
unsigned long rehash_interval;
unsigned long warmup_time; /* double buffering warmup time in jiffies */
u32 max;
@@ -259,12 +260,13 @@
struct tcf_result res;
int result;
- result = tc_classify(skb, fl, &res, false);
+ result = tcf_classify(skb, fl, &res, false);
if (result >= 0) {
#ifdef CONFIG_NET_CLS_ACT
switch (result) {
case TC_ACT_STOLEN:
case TC_ACT_QUEUED:
+ case TC_ACT_TRAP:
*qerr = NET_XMIT_SUCCESS | __NET_XMIT_STOLEN;
case TC_ACT_SHOT:
return false;
@@ -465,7 +467,7 @@
{
struct sfb_sched_data *q = qdisc_priv(sch);
- tcf_destroy_chain(&q->filter_list);
+ tcf_block_put(q->block);
qdisc_destroy(q->qdisc);
}
@@ -549,6 +551,11 @@
static int sfb_init(struct Qdisc *sch, struct nlattr *opt)
{
struct sfb_sched_data *q = qdisc_priv(sch);
+ int err;
+
+ err = tcf_block_get(&q->block, &q->filter_list);
+ if (err)
+ return err;
q->qdisc = &noop_qdisc;
return sfb_change(sch, opt);
@@ -657,14 +664,13 @@
}
}
-static struct tcf_proto __rcu **sfb_find_tcf(struct Qdisc *sch,
- unsigned long cl)
+static struct tcf_block *sfb_tcf_block(struct Qdisc *sch, unsigned long cl)
{
struct sfb_sched_data *q = qdisc_priv(sch);
if (cl)
return NULL;
- return &q->filter_list;
+ return q->block;
}
static unsigned long sfb_bind(struct Qdisc *sch, unsigned long parent,
@@ -682,7 +688,7 @@
.change = sfb_change_class,
.delete = sfb_delete,
.walk = sfb_walk,
- .tcf_chain = sfb_find_tcf,
+ .tcf_block = sfb_tcf_block,
.bind_tcf = sfb_bind,
.unbind_tcf = sfb_put,
.dump = sfb_dump_class,
diff --git a/net/sched/sch_sfq.c b/net/sched/sch_sfq.c
index 332d94b..f80ea2c 100644
--- a/net/sched/sch_sfq.c
+++ b/net/sched/sch_sfq.c
@@ -126,6 +126,7 @@
u8 flags;
unsigned short scaled_quantum; /* SFQ_ALLOT_SIZE(quantum) */
struct tcf_proto __rcu *filter_list;
+ struct tcf_block *block;
sfq_index *ht; /* Hash table ('divisor' slots) */
struct sfq_slot *slots; /* Flows table ('maxflows' entries) */
@@ -180,12 +181,13 @@
return sfq_hash(q, skb) + 1;
*qerr = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
- result = tc_classify(skb, fl, &res, false);
+ result = tcf_classify(skb, fl, &res, false);
if (result >= 0) {
#ifdef CONFIG_NET_CLS_ACT
switch (result) {
case TC_ACT_STOLEN:
case TC_ACT_QUEUED:
+ case TC_ACT_TRAP:
*qerr = NET_XMIT_SUCCESS | __NET_XMIT_STOLEN;
case TC_ACT_SHOT:
return 0;
@@ -697,7 +699,7 @@
{
struct sfq_sched_data *q = qdisc_priv(sch);
- tcf_destroy_chain(&q->filter_list);
+ tcf_block_put(q->block);
q->perturb_period = 0;
del_timer_sync(&q->perturb_timer);
sfq_free(q->ht);
@@ -709,6 +711,11 @@
{
struct sfq_sched_data *q = qdisc_priv(sch);
int i;
+ int err;
+
+ err = tcf_block_get(&q->block, &q->filter_list);
+ if (err)
+ return err;
setup_deferrable_timer(&q->perturb_timer, sfq_perturbation,
(unsigned long)sch);
@@ -815,14 +822,13 @@
{
}
-static struct tcf_proto __rcu **sfq_find_tcf(struct Qdisc *sch,
- unsigned long cl)
+static struct tcf_block *sfq_tcf_block(struct Qdisc *sch, unsigned long cl)
{
struct sfq_sched_data *q = qdisc_priv(sch);
if (cl)
return NULL;
- return &q->filter_list;
+ return q->block;
}
static int sfq_dump_class(struct Qdisc *sch, unsigned long cl,
@@ -878,7 +884,7 @@
.leaf = sfq_leaf,
.get = sfq_get,
.put = sfq_put,
- .tcf_chain = sfq_find_tcf,
+ .tcf_block = sfq_tcf_block,
.bind_tcf = sfq_bind,
.unbind_tcf = sfq_put,
.dump = sfq_dump_class,
diff --git a/net/sctp/associola.c b/net/sctp/associola.c
index 9523828..757be41 100644
--- a/net/sctp/associola.c
+++ b/net/sctp/associola.c
@@ -246,7 +246,8 @@
if (!sctp_ulpq_init(&asoc->ulpq, asoc))
goto fail_init;
- if (sctp_stream_new(asoc, gfp))
+ if (sctp_stream_init(&asoc->stream, asoc->c.sinit_num_ostreams,
+ 0, gfp))
goto fail_init;
/* Assume that peer would support both address types unless we are
@@ -291,7 +292,7 @@
return asoc;
stream_free:
- sctp_stream_free(asoc->stream);
+ sctp_stream_free(&asoc->stream);
fail_init:
sock_put(asoc->base.sk);
sctp_endpoint_put(asoc->ep);
@@ -365,7 +366,7 @@
sctp_tsnmap_free(&asoc->peer.tsn_map);
/* Free stream information. */
- sctp_stream_free(asoc->stream);
+ sctp_stream_free(&asoc->stream);
if (asoc->strreset_chunk)
sctp_chunk_free(asoc->strreset_chunk);
@@ -1111,8 +1112,8 @@
}
/* Update an association (possibly from unexpected COOKIE-ECHO processing). */
-void sctp_assoc_update(struct sctp_association *asoc,
- struct sctp_association *new)
+int sctp_assoc_update(struct sctp_association *asoc,
+ struct sctp_association *new)
{
struct sctp_transport *trans;
struct list_head *pos, *temp;
@@ -1123,8 +1124,10 @@
asoc->peer.sack_needed = new->peer.sack_needed;
asoc->peer.auth_capable = new->peer.auth_capable;
asoc->peer.i = new->peer.i;
- sctp_tsnmap_init(&asoc->peer.tsn_map, SCTP_TSN_MAP_INITIAL,
- asoc->peer.i.initial_tsn, GFP_ATOMIC);
+
+ if (!sctp_tsnmap_init(&asoc->peer.tsn_map, SCTP_TSN_MAP_INITIAL,
+ asoc->peer.i.initial_tsn, GFP_ATOMIC))
+ return -ENOMEM;
/* Remove any peer addresses not present in the new association. */
list_for_each_safe(pos, temp, &asoc->peer.transport_addr_list) {
@@ -1151,7 +1154,7 @@
/* Reinitialize SSN for both local streams
* and peer's streams.
*/
- sctp_stream_clear(asoc->stream);
+ sctp_stream_clear(&asoc->stream);
/* Flush the ULP reassembly and ordered queue.
* Any data there will now be stale and will
@@ -1168,27 +1171,21 @@
} else {
/* Add any peer addresses from the new association. */
list_for_each_entry(trans, &new->peer.transport_addr_list,
- transports) {
- if (!sctp_assoc_lookup_paddr(asoc, &trans->ipaddr))
- sctp_assoc_add_peer(asoc, &trans->ipaddr,
- GFP_ATOMIC, trans->state);
- }
+ transports)
+ if (!sctp_assoc_lookup_paddr(asoc, &trans->ipaddr) &&
+ !sctp_assoc_add_peer(asoc, &trans->ipaddr,
+ GFP_ATOMIC, trans->state))
+ return -ENOMEM;
asoc->ctsn_ack_point = asoc->next_tsn - 1;
asoc->adv_peer_ack_point = asoc->ctsn_ack_point;
- if (sctp_state(asoc, COOKIE_WAIT)) {
- sctp_stream_free(asoc->stream);
- asoc->stream = new->stream;
- new->stream = NULL;
- }
+ if (sctp_state(asoc, COOKIE_WAIT))
+ sctp_stream_update(&asoc->stream, &new->stream);
- if (!asoc->assoc_id) {
- /* get a new association id since we don't have one
- * yet.
- */
- sctp_assoc_set_id(asoc, GFP_ATOMIC);
- }
+ /* get a new assoc id if we don't have one yet. */
+ if (sctp_assoc_set_id(asoc, GFP_ATOMIC))
+ return -ENOMEM;
}
/* SCTP-AUTH: Save the peer parameters from the new associations
@@ -1206,7 +1203,7 @@
asoc->peer.peer_hmacs = new->peer.peer_hmacs;
new->peer.peer_hmacs = NULL;
- sctp_auth_asoc_init_active_key(asoc, GFP_ATOMIC);
+ return sctp_auth_asoc_init_active_key(asoc, GFP_ATOMIC);
}
/* Update the retran path for sending a retransmitted packet.
diff --git a/net/sctp/chunk.c b/net/sctp/chunk.c
index 697721a..81466f6 100644
--- a/net/sctp/chunk.c
+++ b/net/sctp/chunk.c
@@ -307,7 +307,7 @@
if (SCTP_PR_TTL_ENABLED(chunk->sinfo.sinfo_flags) &&
time_after(jiffies, chunk->msg->expires_at)) {
struct sctp_stream_out *streamout =
- &chunk->asoc->stream->out[chunk->sinfo.sinfo_stream];
+ &chunk->asoc->stream.out[chunk->sinfo.sinfo_stream];
if (chunk->sent_count) {
chunk->asoc->abandoned_sent[SCTP_PR_INDEX(TTL)]++;
@@ -320,7 +320,7 @@
} else if (SCTP_PR_RTX_ENABLED(chunk->sinfo.sinfo_flags) &&
chunk->sent_count > chunk->sinfo.sinfo_timetolive) {
struct sctp_stream_out *streamout =
- &chunk->asoc->stream->out[chunk->sinfo.sinfo_stream];
+ &chunk->asoc->stream.out[chunk->sinfo.sinfo_stream];
chunk->asoc->abandoned_sent[SCTP_PR_INDEX(RTX)]++;
streamout->abandoned_sent[SCTP_PR_INDEX(RTX)]++;
diff --git a/net/sctp/offload.c b/net/sctp/offload.c
index 4f5a2b5..275925b 100644
--- a/net/sctp/offload.c
+++ b/net/sctp/offload.c
@@ -35,6 +35,7 @@
static __le32 sctp_gso_make_checksum(struct sk_buff *skb)
{
skb->ip_summed = CHECKSUM_NONE;
+ skb->csum_not_inet = 0;
return sctp_compute_cksum(skb, skb_transport_offset(skb));
}
@@ -98,6 +99,11 @@
},
};
+static const struct skb_checksum_ops crc32c_csum_ops = {
+ .update = sctp_csum_update,
+ .combine = sctp_csum_combine,
+};
+
int __init sctp_offload_init(void)
{
int ret;
@@ -110,6 +116,7 @@
if (ret)
goto ipv4;
+ crc32c_csum_stub = &crc32c_csum_ops;
return ret;
ipv4:
diff --git a/net/sctp/output.c b/net/sctp/output.c
index 1409a87..89cee14 100644
--- a/net/sctp/output.c
+++ b/net/sctp/output.c
@@ -463,14 +463,13 @@
padding = SCTP_PAD4(chunk->skb->len) - chunk->skb->len;
if (padding)
- memset(skb_put(chunk->skb, padding), 0, padding);
+ skb_put_zero(chunk->skb, padding);
if (chunk == packet->auth)
auth = (struct sctp_auth_chunk *)
skb_tail_pointer(nskb);
- memcpy(skb_put(nskb, chunk->skb->len), chunk->skb->data,
- chunk->skb->len);
+ skb_put_data(nskb, chunk->skb->data, chunk->skb->len);
pr_debug("*** Chunk:%p[%s] %s 0x%x, length:%d, chunk->skb->len:%d, rtt_in_progress:%d\n",
chunk,
@@ -538,6 +537,7 @@
} else {
chksum:
head->ip_summed = CHECKSUM_PARTIAL;
+ head->csum_not_inet = 1;
head->csum_start = skb_transport_header(head) - head->head;
head->csum_offset = offsetof(struct sctphdr, checksum);
}
@@ -585,7 +585,7 @@
sctp_packet_set_owner_w(head, sk);
/* set sctp header */
- sh = (struct sctphdr *)skb_push(head, sizeof(struct sctphdr));
+ sh = skb_push(head, sizeof(struct sctphdr));
skb_reset_transport_header(head);
sh->source = htons(packet->source_port);
sh->dest = htons(packet->destination_port);
diff --git a/net/sctp/outqueue.c b/net/sctp/outqueue.c
index fe4c3d46..20299df 100644
--- a/net/sctp/outqueue.c
+++ b/net/sctp/outqueue.c
@@ -363,7 +363,7 @@
sctp_insert_list(&asoc->outqueue.abandoned,
&chk->transmitted_list);
- streamout = &asoc->stream->out[chk->sinfo.sinfo_stream];
+ streamout = &asoc->stream.out[chk->sinfo.sinfo_stream];
asoc->sent_cnt_removable--;
asoc->abandoned_sent[SCTP_PR_INDEX(PRIO)]++;
streamout->abandoned_sent[SCTP_PR_INDEX(PRIO)]++;
@@ -400,9 +400,9 @@
q->out_qlen -= chk->skb->len;
asoc->sent_cnt_removable--;
asoc->abandoned_unsent[SCTP_PR_INDEX(PRIO)]++;
- if (chk->sinfo.sinfo_stream < asoc->stream->outcnt) {
+ if (chk->sinfo.sinfo_stream < asoc->stream.outcnt) {
struct sctp_stream_out *streamout =
- &asoc->stream->out[chk->sinfo.sinfo_stream];
+ &asoc->stream.out[chk->sinfo.sinfo_stream];
streamout->abandoned_unsent[SCTP_PR_INDEX(PRIO)]++;
}
@@ -1036,7 +1036,7 @@
/* RFC 2960 6.5 Every DATA chunk MUST carry a valid
* stream identifier.
*/
- if (chunk->sinfo.sinfo_stream >= asoc->stream->outcnt) {
+ if (chunk->sinfo.sinfo_stream >= asoc->stream.outcnt) {
/* Mark as failed send. */
sctp_chunk_fail(chunk, SCTP_ERROR_INV_STRM);
@@ -1054,7 +1054,7 @@
continue;
}
- if (asoc->stream->out[sid].state == SCTP_STREAM_CLOSED) {
+ if (asoc->stream.out[sid].state == SCTP_STREAM_CLOSED) {
sctp_outq_head_data(q, chunk);
goto sctp_flush_out;
}
diff --git a/net/sctp/proc.c b/net/sctp/proc.c
index a0b29d4..8e34db5 100644
--- a/net/sctp/proc.c
+++ b/net/sctp/proc.c
@@ -218,8 +218,7 @@
return -ENOMEM;
head = &sctp_ep_hashtable[hash];
- local_bh_disable();
- read_lock(&head->lock);
+ read_lock_bh(&head->lock);
sctp_for_each_hentry(epb, &head->chain) {
ep = sctp_ep(epb);
sk = epb->sk;
@@ -234,8 +233,7 @@
sctp_seq_dump_local_addrs(seq, epb);
seq_printf(seq, "\n");
}
- read_unlock(&head->lock);
- local_bh_enable();
+ read_unlock_bh(&head->lock);
return 0;
}
@@ -361,8 +359,8 @@
sctp_seq_dump_remote_addrs(seq, assoc);
seq_printf(seq, "\t%8lu %5d %5d %4d %4d %4d %8d "
"%8d %8d %8d %8d",
- assoc->hbinterval, assoc->stream->incnt,
- assoc->stream->outcnt, assoc->max_retrans,
+ assoc->hbinterval, assoc->stream.incnt,
+ assoc->stream.outcnt, assoc->max_retrans,
assoc->init_retries, assoc->shutdown_retries,
assoc->rtx_data_chunks,
atomic_read(&sk->sk_wmem_alloc),
diff --git a/net/sctp/sm_make_chunk.c b/net/sctp/sm_make_chunk.c
index 92e332e..4b19679 100644
--- a/net/sctp/sm_make_chunk.c
+++ b/net/sctp/sm_make_chunk.c
@@ -1296,8 +1296,7 @@
retval->subh.auth_hdr = sctp_addto_chunk(retval, sizeof(sctp_authhdr_t),
&auth_hdr);
- hmac = skb_put(retval->skb, hmac_desc->hmac_len);
- memset(hmac, 0, hmac_desc->hmac_len);
+ hmac = skb_put_zero(retval->skb, hmac_desc->hmac_len);
/* Adjust the chunk header to include the empty MAC */
retval->chunk_hdr->length =
@@ -1390,7 +1389,7 @@
goto nodata;
/* Make room for the chunk header. */
- chunk_hdr = (sctp_chunkhdr_t *)skb_put(skb, sizeof(sctp_chunkhdr_t));
+ chunk_hdr = skb_put(skb, sizeof(sctp_chunkhdr_t));
chunk_hdr->type = type;
chunk_hdr->flags = flags;
chunk_hdr->length = htons(sizeof(sctp_chunkhdr_t));
@@ -1475,15 +1474,11 @@
void *sctp_addto_chunk(struct sctp_chunk *chunk, int len, const void *data)
{
void *target;
- void *padding;
int chunklen = ntohs(chunk->chunk_hdr->length);
int padlen = SCTP_PAD4(chunklen) - chunklen;
- padding = skb_put(chunk->skb, padlen);
- target = skb_put(chunk->skb, len);
-
- memset(padding, 0, padlen);
- memcpy(target, data, len);
+ skb_put_zero(chunk->skb, padlen);
+ target = skb_put_data(chunk->skb, data, len);
/* Adjust the chunk length field. */
chunk->chunk_hdr->length = htons(chunklen + padlen + len);
@@ -1544,7 +1539,7 @@
/* All fragments will be on the same stream */
sid = ntohs(chunk->subh.data_hdr->stream);
- stream = chunk->asoc->stream;
+ stream = &chunk->asoc->stream;
/* Now assign the sequence number to the entire message.
* All fragments must have the same stream sequence number.
@@ -2454,7 +2449,8 @@
* stream sequence number shall be set to 0.
*/
- if (sctp_stream_init(asoc, gfp))
+ if (sctp_stream_init(&asoc->stream, asoc->c.sinit_num_ostreams,
+ asoc->c.sinit_max_instreams, gfp))
goto clean_up;
if (!asoc->temp && sctp_assoc_set_id(asoc, gfp))
diff --git a/net/sctp/sm_sideeffect.c b/net/sctp/sm_sideeffect.c
index 25384fa..dfe1fcb 100644
--- a/net/sctp/sm_sideeffect.c
+++ b/net/sctp/sm_sideeffect.c
@@ -818,6 +818,28 @@
asoc->timeouts[SCTP_EVENT_TIMEOUT_T2_SHUTDOWN] = t->rto;
}
+static void sctp_cmd_assoc_update(sctp_cmd_seq_t *cmds,
+ struct sctp_association *asoc,
+ struct sctp_association *new)
+{
+ struct net *net = sock_net(asoc->base.sk);
+ struct sctp_chunk *abort;
+
+ if (!sctp_assoc_update(asoc, new))
+ return;
+
+ abort = sctp_make_abort(asoc, NULL, sizeof(sctp_errhdr_t));
+ if (abort) {
+ sctp_init_cause(abort, SCTP_ERROR_RSRC_LOW, 0);
+ sctp_add_cmd_sf(cmds, SCTP_CMD_REPLY, SCTP_CHUNK(abort));
+ }
+ sctp_add_cmd_sf(cmds, SCTP_CMD_SET_SK_ERR, SCTP_ERROR(ECONNABORTED));
+ sctp_add_cmd_sf(cmds, SCTP_CMD_ASSOC_FAILED,
+ SCTP_PERR(SCTP_ERROR_RSRC_LOW));
+ SCTP_INC_STATS(net, SCTP_MIB_ABORTEDS);
+ SCTP_DEC_STATS(net, SCTP_MIB_CURRESTAB);
+}
+
/* Helper function to change the state of an association. */
static void sctp_cmd_new_state(sctp_cmd_seq_t *cmds,
struct sctp_association *asoc,
@@ -1294,7 +1316,7 @@
break;
case SCTP_CMD_UPDATE_ASSOC:
- sctp_assoc_update(asoc, cmd->obj.asoc);
+ sctp_cmd_assoc_update(commands, asoc, cmd->obj.asoc);
break;
case SCTP_CMD_PURGE_OUTQUEUE:
@@ -1748,6 +1770,10 @@
break;
case SCTP_CMD_SET_ASOC:
+ if (asoc && local_cork) {
+ sctp_outq_uncork(&asoc->outqueue, gfp);
+ local_cork = 0;
+ }
asoc = cmd->obj.asoc;
break;
diff --git a/net/sctp/sm_statefuns.c b/net/sctp/sm_statefuns.c
index f863b55..8feff96 100644
--- a/net/sctp/sm_statefuns.c
+++ b/net/sctp/sm_statefuns.c
@@ -770,8 +770,8 @@
auth.skb = chunk->auth_chunk;
auth.asoc = chunk->asoc;
auth.sctp_hdr = chunk->sctp_hdr;
- auth.chunk_hdr = (sctp_chunkhdr_t *)skb_push(chunk->auth_chunk,
- sizeof(sctp_chunkhdr_t));
+ auth.chunk_hdr = skb_push(chunk->auth_chunk,
+ sizeof(sctp_chunkhdr_t));
skb_pull(chunk->auth_chunk, sizeof(sctp_chunkhdr_t));
auth.transport = chunk->transport;
@@ -3958,7 +3958,7 @@
/* Silently discard the chunk if stream-id is not valid */
sctp_walk_fwdtsn(skip, chunk) {
- if (ntohs(skip->stream) >= asoc->stream->incnt)
+ if (ntohs(skip->stream) >= asoc->stream.incnt)
goto discard_noforce;
}
@@ -4029,7 +4029,7 @@
/* Silently discard the chunk if stream-id is not valid */
sctp_walk_fwdtsn(skip, chunk) {
- if (ntohs(skip->stream) >= asoc->stream->incnt)
+ if (ntohs(skip->stream) >= asoc->stream.incnt)
goto gen_shutdown;
}
@@ -6365,7 +6365,7 @@
* and discard the DATA chunk.
*/
sid = ntohs(data_hdr->stream);
- if (sid >= asoc->stream->incnt) {
+ if (sid >= asoc->stream.incnt) {
/* Mark tsn as received even though we drop it */
sctp_add_cmd_sf(commands, SCTP_CMD_REPORT_TSN, SCTP_U32(tsn));
@@ -6387,7 +6387,7 @@
* and is invalid.
*/
ssn = ntohs(data_hdr->ssn);
- if (ordered && SSN_lt(ssn, sctp_ssn_peek(asoc->stream, in, sid)))
+ if (ordered && SSN_lt(ssn, sctp_ssn_peek(&asoc->stream, in, sid)))
return SCTP_IERROR_PROTO_VIOLATION;
/* Send the data up to the user. Note: Schedule the
diff --git a/net/sctp/socket.c b/net/sctp/socket.c
index 3a8318e..7b6e20e 100644
--- a/net/sctp/socket.c
+++ b/net/sctp/socket.c
@@ -103,7 +103,7 @@
static void sctp_sock_migrate(struct sock *, struct sock *,
struct sctp_association *, sctp_socket_type_t);
-static int sctp_memory_pressure;
+static unsigned long sctp_memory_pressure;
static atomic_long_t sctp_memory_allocated;
struct percpu_counter sctp_sockets_allocated;
@@ -1494,7 +1494,7 @@
pr_debug("%s: sk:%p, timeout:%ld\n", __func__, sk, timeout);
- lock_sock(sk);
+ lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
sk->sk_shutdown = SHUTDOWN_MASK;
sk->sk_state = SCTP_SS_CLOSING;
@@ -1544,7 +1544,7 @@
* held and that should be grabbed before socket lock.
*/
spin_lock_bh(&net->sctp.addr_wq_lock);
- bh_lock_sock(sk);
+ bh_lock_sock_nested(sk);
/* Hold the sock, since sk_common_release() will put sock_put()
* and we have just a little more cleanup.
@@ -1920,7 +1920,7 @@
}
/* Check for invalid stream. */
- if (sinfo->sinfo_stream >= asoc->stream->outcnt) {
+ if (sinfo->sinfo_stream >= asoc->stream.outcnt) {
err = -EINVAL;
goto out_free;
}
@@ -4497,8 +4497,8 @@
info->sctpi_rwnd = asoc->a_rwnd;
info->sctpi_unackdata = asoc->unack_data;
info->sctpi_penddata = sctp_tsnmap_pending(&asoc->peer.tsn_map);
- info->sctpi_instrms = asoc->stream->incnt;
- info->sctpi_outstrms = asoc->stream->outcnt;
+ info->sctpi_instrms = asoc->stream.incnt;
+ info->sctpi_outstrms = asoc->stream.outcnt;
list_for_each(pos, &asoc->base.inqueue.in_chunk_list)
info->sctpi_inqueue++;
list_for_each(pos, &asoc->outqueue.out_chunk_list)
@@ -4726,8 +4726,8 @@
status.sstat_unackdata = asoc->unack_data;
status.sstat_penddata = sctp_tsnmap_pending(&asoc->peer.tsn_map);
- status.sstat_instrms = asoc->stream->incnt;
- status.sstat_outstrms = asoc->stream->outcnt;
+ status.sstat_instrms = asoc->stream.incnt;
+ status.sstat_outstrms = asoc->stream.outcnt;
status.sstat_fragmentation_point = asoc->frag_point;
status.sstat_primary.spinfo_assoc_id = sctp_assoc2id(transport->asoc);
memcpy(&status.sstat_primary.spinfo_address, &transport->ipaddr,
@@ -6599,10 +6599,10 @@
goto out;
asoc = sctp_id2assoc(sk, params.sprstat_assoc_id);
- if (!asoc || params.sprstat_sid >= asoc->stream->outcnt)
+ if (!asoc || params.sprstat_sid >= asoc->stream.outcnt)
goto out;
- streamout = &asoc->stream->out[params.sprstat_sid];
+ streamout = &asoc->stream.out[params.sprstat_sid];
if (policy == SCTP_PR_SCTP_NONE) {
params.sprstat_abandoned_unsent = 0;
params.sprstat_abandoned_sent = 0;
diff --git a/net/sctp/stream.c b/net/sctp/stream.c
index dda53a2..82e6d40 100644
--- a/net/sctp/stream.c
+++ b/net/sctp/stream.c
@@ -35,70 +35,43 @@
#include <net/sctp/sctp.h>
#include <net/sctp/sm.h>
-int sctp_stream_new(struct sctp_association *asoc, gfp_t gfp)
+int sctp_stream_init(struct sctp_stream *stream, __u16 outcnt, __u16 incnt,
+ gfp_t gfp)
{
- struct sctp_stream *stream;
- int i;
-
- stream = kzalloc(sizeof(*stream), gfp);
- if (!stream)
- return -ENOMEM;
-
- stream->outcnt = asoc->c.sinit_num_ostreams;
- stream->out = kcalloc(stream->outcnt, sizeof(*stream->out), gfp);
- if (!stream->out) {
- kfree(stream);
- return -ENOMEM;
- }
- for (i = 0; i < stream->outcnt; i++)
- stream->out[i].state = SCTP_STREAM_OPEN;
-
- asoc->stream = stream;
-
- return 0;
-}
-
-int sctp_stream_init(struct sctp_association *asoc, gfp_t gfp)
-{
- struct sctp_stream *stream = asoc->stream;
int i;
/* Initial stream->out size may be very big, so free it and alloc
* a new one with new outcnt to save memory.
*/
kfree(stream->out);
- stream->outcnt = asoc->c.sinit_num_ostreams;
- stream->out = kcalloc(stream->outcnt, sizeof(*stream->out), gfp);
- if (!stream->out)
- goto nomem;
+ stream->out = kcalloc(outcnt, sizeof(*stream->out), gfp);
+ if (!stream->out)
+ return -ENOMEM;
+
+ stream->outcnt = outcnt;
for (i = 0; i < stream->outcnt; i++)
stream->out[i].state = SCTP_STREAM_OPEN;
- stream->incnt = asoc->c.sinit_max_instreams;
- stream->in = kcalloc(stream->incnt, sizeof(*stream->in), gfp);
+ if (!incnt)
+ return 0;
+
+ stream->in = kcalloc(incnt, sizeof(*stream->in), gfp);
if (!stream->in) {
kfree(stream->out);
- goto nomem;
+ stream->out = NULL;
+ return -ENOMEM;
}
+ stream->incnt = incnt;
+
return 0;
-
-nomem:
- asoc->stream = NULL;
- kfree(stream);
-
- return -ENOMEM;
}
void sctp_stream_free(struct sctp_stream *stream)
{
- if (unlikely(!stream))
- return;
-
kfree(stream->out);
kfree(stream->in);
- kfree(stream);
}
void sctp_stream_clear(struct sctp_stream *stream)
@@ -112,6 +85,19 @@
stream->in[i].ssn = 0;
}
+void sctp_stream_update(struct sctp_stream *stream, struct sctp_stream *new)
+{
+ sctp_stream_free(stream);
+
+ stream->out = new->out;
+ stream->in = new->in;
+ stream->outcnt = new->outcnt;
+ stream->incnt = new->incnt;
+
+ new->out = NULL;
+ new->in = NULL;
+}
+
static int sctp_send_reconf(struct sctp_association *asoc,
struct sctp_chunk *chunk)
{
@@ -128,7 +114,7 @@
int sctp_send_reset_streams(struct sctp_association *asoc,
struct sctp_reset_streams *params)
{
- struct sctp_stream *stream = asoc->stream;
+ struct sctp_stream *stream = &asoc->stream;
__u16 i, str_nums, *str_list;
struct sctp_chunk *chunk;
int retval = -EINVAL;
@@ -214,6 +200,7 @@
int sctp_send_reset_assoc(struct sctp_association *asoc)
{
+ struct sctp_stream *stream = &asoc->stream;
struct sctp_chunk *chunk = NULL;
int retval;
__u16 i;
@@ -230,8 +217,8 @@
return -ENOMEM;
/* Block further xmit of data until this request is completed */
- for (i = 0; i < asoc->stream->outcnt; i++)
- asoc->stream->out[i].state = SCTP_STREAM_CLOSED;
+ for (i = 0; i < stream->outcnt; i++)
+ stream->out[i].state = SCTP_STREAM_CLOSED;
asoc->strreset_chunk = chunk;
sctp_chunk_hold(asoc->strreset_chunk);
@@ -241,8 +228,8 @@
sctp_chunk_put(asoc->strreset_chunk);
asoc->strreset_chunk = NULL;
- for (i = 0; i < asoc->stream->outcnt; i++)
- asoc->stream->out[i].state = SCTP_STREAM_OPEN;
+ for (i = 0; i < stream->outcnt; i++)
+ stream->out[i].state = SCTP_STREAM_OPEN;
return retval;
}
@@ -255,7 +242,7 @@
int sctp_send_add_streams(struct sctp_association *asoc,
struct sctp_add_streams *params)
{
- struct sctp_stream *stream = asoc->stream;
+ struct sctp_stream *stream = &asoc->stream;
struct sctp_chunk *chunk = NULL;
int retval = -ENOMEM;
__u32 outcnt, incnt;
@@ -357,7 +344,7 @@
struct sctp_ulpevent **evp)
{
struct sctp_strreset_outreq *outreq = param.v;
- struct sctp_stream *stream = asoc->stream;
+ struct sctp_stream *stream = &asoc->stream;
__u16 i, nums, flags = 0, *str_p = NULL;
__u32 result = SCTP_STRRESET_DENIED;
__u32 request_seq;
@@ -449,7 +436,7 @@
struct sctp_ulpevent **evp)
{
struct sctp_strreset_inreq *inreq = param.v;
- struct sctp_stream *stream = asoc->stream;
+ struct sctp_stream *stream = &asoc->stream;
__u32 result = SCTP_STRRESET_DENIED;
struct sctp_chunk *chunk = NULL;
__u16 i, nums, *str_p;
@@ -523,7 +510,7 @@
{
__u32 init_tsn = 0, next_tsn = 0, max_tsn_seen;
struct sctp_strreset_tsnreq *tsnreq = param.v;
- struct sctp_stream *stream = asoc->stream;
+ struct sctp_stream *stream = &asoc->stream;
__u32 result = SCTP_STRRESET_DENIED;
__u32 request_seq;
__u16 i;
@@ -612,7 +599,7 @@
struct sctp_ulpevent **evp)
{
struct sctp_strreset_addstrm *addstrm = param.v;
- struct sctp_stream *stream = asoc->stream;
+ struct sctp_stream *stream = &asoc->stream;
__u32 result = SCTP_STRRESET_DENIED;
struct sctp_stream_in *streamin;
__u32 request_seq, incnt;
@@ -687,7 +674,7 @@
struct sctp_ulpevent **evp)
{
struct sctp_strreset_addstrm *addstrm = param.v;
- struct sctp_stream *stream = asoc->stream;
+ struct sctp_stream *stream = &asoc->stream;
__u32 result = SCTP_STRRESET_DENIED;
struct sctp_stream_out *streamout;
struct sctp_chunk *chunk = NULL;
@@ -758,8 +745,8 @@
union sctp_params param,
struct sctp_ulpevent **evp)
{
+ struct sctp_stream *stream = &asoc->stream;
struct sctp_strreset_resp *resp = param.v;
- struct sctp_stream *stream = asoc->stream;
struct sctp_transport *t;
__u16 i, nums, flags = 0;
sctp_paramhdr_t *req;
diff --git a/net/sctp/ulpevent.c b/net/sctp/ulpevent.c
index ec2b3e0..17854fb 100644
--- a/net/sctp/ulpevent.c
+++ b/net/sctp/ulpevent.c
@@ -153,8 +153,7 @@
sctp_ulpevent_init(event, MSG_NOTIFICATION, skb->truesize);
/* Include the notification structure */
- sac = (struct sctp_assoc_change *)
- skb_push(skb, sizeof(struct sctp_assoc_change));
+ sac = skb_push(skb, sizeof(struct sctp_assoc_change));
/* Trim the buffer to the right length. */
skb_trim(skb, sizeof(struct sctp_assoc_change) +
@@ -167,8 +166,7 @@
goto fail;
skb = sctp_event2skb(event);
- sac = (struct sctp_assoc_change *) skb_put(skb,
- sizeof(struct sctp_assoc_change));
+ sac = skb_put(skb, sizeof(struct sctp_assoc_change));
}
/* Socket Extensions for SCTP
@@ -270,8 +268,7 @@
goto fail;
skb = sctp_event2skb(event);
- spc = (struct sctp_paddr_change *)
- skb_put(skb, sizeof(struct sctp_paddr_change));
+ spc = skb_put(skb, sizeof(struct sctp_paddr_change));
/* Sockets API Extensions for SCTP
* Section 5.3.1.2 SCTP_PEER_ADDR_CHANGE
@@ -402,7 +399,7 @@
event = sctp_skb2event(skb);
sctp_ulpevent_init(event, MSG_NOTIFICATION, skb->truesize);
- sre = (struct sctp_remote_error *) skb_push(skb, sizeof(*sre));
+ sre = skb_push(skb, sizeof(*sre));
/* Trim the buffer to the right length. */
skb_trim(skb, sizeof(*sre) + elen);
@@ -453,8 +450,7 @@
event = sctp_skb2event(skb);
sctp_ulpevent_init(event, MSG_NOTIFICATION, skb->truesize);
- ssf = (struct sctp_send_failed *)
- skb_push(skb, sizeof(struct sctp_send_failed));
+ ssf = skb_push(skb, sizeof(struct sctp_send_failed));
/* Socket Extensions for SCTP
* 5.3.1.4 SCTP_SEND_FAILED
@@ -549,8 +545,7 @@
goto fail;
skb = sctp_event2skb(event);
- sse = (struct sctp_shutdown_event *)
- skb_put(skb, sizeof(struct sctp_shutdown_event));
+ sse = skb_put(skb, sizeof(struct sctp_shutdown_event));
/* Socket Extensions for SCTP
* 5.3.1.5 SCTP_SHUTDOWN_EVENT
@@ -612,8 +607,7 @@
goto fail;
skb = sctp_event2skb(event);
- sai = (struct sctp_adaptation_event *)
- skb_put(skb, sizeof(struct sctp_adaptation_event));
+ sai = skb_put(skb, sizeof(struct sctp_adaptation_event));
sai->sai_type = SCTP_ADAPTATION_INDICATION;
sai->sai_flags = 0;
@@ -751,8 +745,7 @@
goto fail;
skb = sctp_event2skb(event);
- pd = (struct sctp_pdapi_event *)
- skb_put(skb, sizeof(struct sctp_pdapi_event));
+ pd = skb_put(skb, sizeof(struct sctp_pdapi_event));
/* pdapi_type
* It should be SCTP_PARTIAL_DELIVERY_EVENT
@@ -803,8 +796,7 @@
goto fail;
skb = sctp_event2skb(event);
- ak = (struct sctp_authkey_event *)
- skb_put(skb, sizeof(struct sctp_authkey_event));
+ ak = skb_put(skb, sizeof(struct sctp_authkey_event));
ak->auth_type = SCTP_AUTHENTICATION_EVENT;
ak->auth_flags = 0;
@@ -842,8 +834,7 @@
return NULL;
skb = sctp_event2skb(event);
- sdry = (struct sctp_sender_dry_event *)
- skb_put(skb, sizeof(struct sctp_sender_dry_event));
+ sdry = skb_put(skb, sizeof(struct sctp_sender_dry_event));
sdry->sender_dry_type = SCTP_SENDER_DRY_EVENT;
sdry->sender_dry_flags = 0;
@@ -869,7 +860,7 @@
return NULL;
skb = sctp_event2skb(event);
- sreset = (struct sctp_stream_reset_event *)skb_put(skb, length);
+ sreset = skb_put(skb, length);
sreset->strreset_type = SCTP_STREAM_RESET_EVENT;
sreset->strreset_flags = flags;
@@ -897,8 +888,7 @@
return NULL;
skb = sctp_event2skb(event);
- areset = (struct sctp_assoc_reset_event *)
- skb_put(skb, sizeof(struct sctp_assoc_reset_event));
+ areset = skb_put(skb, sizeof(struct sctp_assoc_reset_event));
areset->assocreset_type = SCTP_ASSOC_RESET_EVENT;
areset->assocreset_flags = flags;
@@ -925,8 +915,7 @@
return NULL;
skb = sctp_event2skb(event);
- schange = (struct sctp_stream_change_event *)
- skb_put(skb, sizeof(struct sctp_stream_change_event));
+ schange = skb_put(skb, sizeof(struct sctp_stream_change_event));
schange->strchange_type = SCTP_STREAM_CHANGE_EVENT;
schange->strchange_flags = flags;
diff --git a/net/sctp/ulpqueue.c b/net/sctp/ulpqueue.c
index aa3624d..25f7e41 100644
--- a/net/sctp/ulpqueue.c
+++ b/net/sctp/ulpqueue.c
@@ -764,7 +764,7 @@
__u16 sid, csid, cssn;
sid = event->stream;
- stream = ulpq->asoc->stream;
+ stream = &ulpq->asoc->stream;
event_list = (struct sk_buff_head *) sctp_event2skb(event)->prev;
@@ -858,7 +858,7 @@
/* Note: The stream ID must be verified before this routine. */
sid = event->stream;
ssn = event->ssn;
- stream = ulpq->asoc->stream;
+ stream = &ulpq->asoc->stream;
/* Is this the expected SSN for this stream ID? */
if (ssn != sctp_ssn_peek(stream, in, sid)) {
@@ -893,7 +893,7 @@
struct sk_buff_head *lobby = &ulpq->lobby;
__u16 csid, cssn;
- stream = ulpq->asoc->stream;
+ stream = &ulpq->asoc->stream;
/* We are holding the chunks by stream, by SSN. */
skb_queue_head_init(&temp);
@@ -958,7 +958,7 @@
struct sctp_stream *stream;
/* Note: The stream ID must be verified before this routine. */
- stream = ulpq->asoc->stream;
+ stream = &ulpq->asoc->stream;
/* Is this an old SSN? If so ignore. */
if (SSN_lt(ssn, sctp_ssn_peek(stream, in, sid)))
diff --git a/net/socket.c b/net/socket.c
index c2564eb..8f9dab3 100644
--- a/net/socket.c
+++ b/net/socket.c
@@ -461,7 +461,7 @@
* @err: pointer to an error code return
*
* The file handle passed in is locked and the socket it is bound
- * too is returned. If an error occurs the err pointer is overwritten
+ * to is returned. If an error occurs the err pointer is overwritten
* with a negative errno code and NULL is returned. The function checks
* for both invalid handles and passing a handle which is not a socket.
*
@@ -662,6 +662,40 @@
return skb->pkt_type == PACKET_OUTGOING;
}
+/* On transmit, software and hardware timestamps are returned independently.
+ * As the two skb clones share the hardware timestamp, which may be updated
+ * before the software timestamp is received, a hardware TX timestamp may be
+ * returned only if there is no software TX timestamp. Ignore false software
+ * timestamps, which may be made in the __sock_recv_timestamp() call when the
+ * option SO_TIMESTAMP(NS) is enabled on the socket, even when the skb has a
+ * hardware timestamp.
+ */
+static bool skb_is_swtx_tstamp(const struct sk_buff *skb, int false_tstamp)
+{
+ return skb->tstamp && !false_tstamp && skb_is_err_queue(skb);
+}
+
+static void put_ts_pktinfo(struct msghdr *msg, struct sk_buff *skb)
+{
+ struct scm_ts_pktinfo ts_pktinfo;
+ struct net_device *orig_dev;
+
+ if (!skb_mac_header_was_set(skb))
+ return;
+
+ memset(&ts_pktinfo, 0, sizeof(ts_pktinfo));
+
+ rcu_read_lock();
+ orig_dev = dev_get_by_napi_id(skb_napi_id(skb));
+ if (orig_dev)
+ ts_pktinfo.if_index = orig_dev->ifindex;
+ rcu_read_unlock();
+
+ ts_pktinfo.pkt_length = skb->len - skb_mac_offset(skb);
+ put_cmsg(msg, SOL_SOCKET, SCM_TIMESTAMPING_PKTINFO,
+ sizeof(ts_pktinfo), &ts_pktinfo);
+}
+
/*
* called from sock_recv_timestamp() if sock_flag(sk, SOCK_RCVTSTAMP)
*/
@@ -670,14 +704,16 @@
{
int need_software_tstamp = sock_flag(sk, SOCK_RCVTSTAMP);
struct scm_timestamping tss;
- int empty = 1;
+ int empty = 1, false_tstamp = 0;
struct skb_shared_hwtstamps *shhwtstamps =
skb_hwtstamps(skb);
/* Race occurred between timestamp enabling and packet
receiving. Fill in the current time for now. */
- if (need_software_tstamp && skb->tstamp == 0)
+ if (need_software_tstamp && skb->tstamp == 0) {
__net_timestamp(skb);
+ false_tstamp = 1;
+ }
if (need_software_tstamp) {
if (!sock_flag(sk, SOCK_RCVTSTAMPNS)) {
@@ -699,8 +735,13 @@
empty = 0;
if (shhwtstamps &&
(sk->sk_tsflags & SOF_TIMESTAMPING_RAW_HARDWARE) &&
- ktime_to_timespec_cond(shhwtstamps->hwtstamp, tss.ts + 2))
+ !skb_is_swtx_tstamp(skb, false_tstamp) &&
+ ktime_to_timespec_cond(shhwtstamps->hwtstamp, tss.ts + 2)) {
empty = 0;
+ if ((sk->sk_tsflags & SOF_TIMESTAMPING_OPT_PKTINFO) &&
+ !skb_is_err_queue(skb))
+ put_ts_pktinfo(msg, skb);
+ }
if (!empty) {
put_cmsg(msg, SOL_SOCKET,
SCM_TIMESTAMPING, sizeof(tss), &tss);
diff --git a/net/switchdev/switchdev.c b/net/switchdev/switchdev.c
index 8d40a7d..25dc67ef 100644
--- a/net/switchdev/switchdev.c
+++ b/net/switchdev/switchdev.c
@@ -571,24 +571,17 @@
}
EXPORT_SYMBOL_GPL(switchdev_port_obj_dump);
-static RAW_NOTIFIER_HEAD(switchdev_notif_chain);
+static ATOMIC_NOTIFIER_HEAD(switchdev_notif_chain);
/**
* register_switchdev_notifier - Register notifier
* @nb: notifier_block
*
- * Register switch device notifier. This should be used by code
- * which needs to monitor events happening in particular device.
- * Return values are same as for atomic_notifier_chain_register().
+ * Register switch device notifier.
*/
int register_switchdev_notifier(struct notifier_block *nb)
{
- int err;
-
- rtnl_lock();
- err = raw_notifier_chain_register(&switchdev_notif_chain, nb);
- rtnl_unlock();
- return err;
+ return atomic_notifier_chain_register(&switchdev_notif_chain, nb);
}
EXPORT_SYMBOL_GPL(register_switchdev_notifier);
@@ -597,16 +590,10 @@
* @nb: notifier_block
*
* Unregister switch device notifier.
- * Return values are same as for atomic_notifier_chain_unregister().
*/
int unregister_switchdev_notifier(struct notifier_block *nb)
{
- int err;
-
- rtnl_lock();
- err = raw_notifier_chain_unregister(&switchdev_notif_chain, nb);
- rtnl_unlock();
- return err;
+ return atomic_notifier_chain_unregister(&switchdev_notif_chain, nb);
}
EXPORT_SYMBOL_GPL(unregister_switchdev_notifier);
@@ -616,18 +603,13 @@
* @dev: port device
* @info: notifier information data
*
- * Call all network notifier blocks. This should be called by driver
- * when it needs to propagate hardware event.
- * Return values are same as for atomic_notifier_call_chain().
- * rtnl_lock must be held.
+ * Call all network notifier blocks.
*/
int call_switchdev_notifiers(unsigned long val, struct net_device *dev,
struct switchdev_notifier_info *info)
{
- ASSERT_RTNL();
-
info->dev = dev;
- return raw_notifier_call_chain(&switchdev_notif_chain, val, info);
+ return atomic_notifier_call_chain(&switchdev_notif_chain, val, info);
}
EXPORT_SYMBOL_GPL(call_switchdev_notifiers);
diff --git a/net/tls/Kconfig b/net/tls/Kconfig
new file mode 100644
index 0000000..eb58303
--- /dev/null
+++ b/net/tls/Kconfig
@@ -0,0 +1,15 @@
+#
+# TLS configuration
+#
+config TLS
+ tristate "Transport Layer Security support"
+ depends on INET
+ select CRYPTO
+ select CRYPTO_AES
+ select CRYPTO_GCM
+ default n
+ ---help---
+ Enable kernel support for TLS protocol. This allows symmetric
+ encryption handling of the TLS protocol to be done in-kernel.
+
+ If unsure, say N.
diff --git a/net/tls/Makefile b/net/tls/Makefile
new file mode 100644
index 0000000..a930fd1
--- /dev/null
+++ b/net/tls/Makefile
@@ -0,0 +1,7 @@
+#
+# Makefile for the TLS subsystem.
+#
+
+obj-$(CONFIG_TLS) += tls.o
+
+tls-y := tls_main.o tls_sw.o
diff --git a/net/tls/tls_main.c b/net/tls/tls_main.c
new file mode 100644
index 0000000..2ebc328
--- /dev/null
+++ b/net/tls/tls_main.c
@@ -0,0 +1,487 @@
+/*
+ * Copyright (c) 2016-2017, Mellanox Technologies. All rights reserved.
+ * Copyright (c) 2016-2017, Dave Watson <davejwatson@fb.com>. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/module.h>
+
+#include <net/tcp.h>
+#include <net/inet_common.h>
+#include <linux/highmem.h>
+#include <linux/netdevice.h>
+#include <linux/sched/signal.h>
+
+#include <net/tls.h>
+
+MODULE_AUTHOR("Mellanox Technologies");
+MODULE_DESCRIPTION("Transport Layer Security Support");
+MODULE_LICENSE("Dual BSD/GPL");
+
+static struct proto tls_base_prot;
+static struct proto tls_sw_prot;
+
+int wait_on_pending_writer(struct sock *sk, long *timeo)
+{
+ int rc = 0;
+ DEFINE_WAIT_FUNC(wait, woken_wake_function);
+
+ add_wait_queue(sk_sleep(sk), &wait);
+ while (1) {
+ if (!*timeo) {
+ rc = -EAGAIN;
+ break;
+ }
+
+ if (signal_pending(current)) {
+ rc = sock_intr_errno(*timeo);
+ break;
+ }
+
+ if (sk_wait_event(sk, timeo, !sk->sk_write_pending, &wait))
+ break;
+ }
+ remove_wait_queue(sk_sleep(sk), &wait);
+ return rc;
+}
+
+int tls_push_sg(struct sock *sk,
+ struct tls_context *ctx,
+ struct scatterlist *sg,
+ u16 first_offset,
+ int flags)
+{
+ int sendpage_flags = flags | MSG_SENDPAGE_NOTLAST;
+ int ret = 0;
+ struct page *p;
+ size_t size;
+ int offset = first_offset;
+
+ size = sg->length - offset;
+ offset += sg->offset;
+
+ while (1) {
+ if (sg_is_last(sg))
+ sendpage_flags = flags;
+
+ /* is sending application-limited? */
+ tcp_rate_check_app_limited(sk);
+ p = sg_page(sg);
+retry:
+ ret = do_tcp_sendpages(sk, p, offset, size, sendpage_flags);
+
+ if (ret != size) {
+ if (ret > 0) {
+ offset += ret;
+ size -= ret;
+ goto retry;
+ }
+
+ offset -= sg->offset;
+ ctx->partially_sent_offset = offset;
+ ctx->partially_sent_record = (void *)sg;
+ return ret;
+ }
+
+ put_page(p);
+ sk_mem_uncharge(sk, sg->length);
+ sg = sg_next(sg);
+ if (!sg)
+ break;
+
+ offset = sg->offset;
+ size = sg->length;
+ }
+
+ clear_bit(TLS_PENDING_CLOSED_RECORD, &ctx->flags);
+
+ return 0;
+}
+
+static int tls_handle_open_record(struct sock *sk, int flags)
+{
+ struct tls_context *ctx = tls_get_ctx(sk);
+
+ if (tls_is_pending_open_record(ctx))
+ return ctx->push_pending_record(sk, flags);
+
+ return 0;
+}
+
+int tls_proccess_cmsg(struct sock *sk, struct msghdr *msg,
+ unsigned char *record_type)
+{
+ struct cmsghdr *cmsg;
+ int rc = -EINVAL;
+
+ for_each_cmsghdr(cmsg, msg) {
+ if (!CMSG_OK(msg, cmsg))
+ return -EINVAL;
+ if (cmsg->cmsg_level != SOL_TLS)
+ continue;
+
+ switch (cmsg->cmsg_type) {
+ case TLS_SET_RECORD_TYPE:
+ if (cmsg->cmsg_len < CMSG_LEN(sizeof(*record_type)))
+ return -EINVAL;
+
+ if (msg->msg_flags & MSG_MORE)
+ return -EINVAL;
+
+ rc = tls_handle_open_record(sk, msg->msg_flags);
+ if (rc)
+ return rc;
+
+ *record_type = *(unsigned char *)CMSG_DATA(cmsg);
+ rc = 0;
+ break;
+ default:
+ return -EINVAL;
+ }
+ }
+
+ return rc;
+}
+
+int tls_push_pending_closed_record(struct sock *sk, struct tls_context *ctx,
+ int flags, long *timeo)
+{
+ struct scatterlist *sg;
+ u16 offset;
+
+ if (!tls_is_partially_sent_record(ctx))
+ return ctx->push_pending_record(sk, flags);
+
+ sg = ctx->partially_sent_record;
+ offset = ctx->partially_sent_offset;
+
+ ctx->partially_sent_record = NULL;
+ return tls_push_sg(sk, ctx, sg, offset, flags);
+}
+
+static void tls_write_space(struct sock *sk)
+{
+ struct tls_context *ctx = tls_get_ctx(sk);
+
+ if (!sk->sk_write_pending && tls_is_pending_closed_record(ctx)) {
+ gfp_t sk_allocation = sk->sk_allocation;
+ int rc;
+ long timeo = 0;
+
+ sk->sk_allocation = GFP_ATOMIC;
+ rc = tls_push_pending_closed_record(sk, ctx,
+ MSG_DONTWAIT |
+ MSG_NOSIGNAL,
+ &timeo);
+ sk->sk_allocation = sk_allocation;
+
+ if (rc < 0)
+ return;
+ }
+
+ ctx->sk_write_space(sk);
+}
+
+static void tls_sk_proto_close(struct sock *sk, long timeout)
+{
+ struct tls_context *ctx = tls_get_ctx(sk);
+ long timeo = sock_sndtimeo(sk, 0);
+ void (*sk_proto_close)(struct sock *sk, long timeout);
+
+ lock_sock(sk);
+
+ if (!tls_complete_pending_work(sk, ctx, 0, &timeo))
+ tls_handle_open_record(sk, 0);
+
+ if (ctx->partially_sent_record) {
+ struct scatterlist *sg = ctx->partially_sent_record;
+
+ while (1) {
+ put_page(sg_page(sg));
+ sk_mem_uncharge(sk, sg->length);
+
+ if (sg_is_last(sg))
+ break;
+ sg++;
+ }
+ }
+ ctx->free_resources(sk);
+ kfree(ctx->rec_seq);
+ kfree(ctx->iv);
+
+ sk_proto_close = ctx->sk_proto_close;
+ kfree(ctx);
+
+ release_sock(sk);
+ sk_proto_close(sk, timeout);
+}
+
+static int do_tls_getsockopt_tx(struct sock *sk, char __user *optval,
+ int __user *optlen)
+{
+ int rc = 0;
+ struct tls_context *ctx = tls_get_ctx(sk);
+ struct tls_crypto_info *crypto_info;
+ int len;
+
+ if (get_user(len, optlen))
+ return -EFAULT;
+
+ if (!optval || (len < sizeof(*crypto_info))) {
+ rc = -EINVAL;
+ goto out;
+ }
+
+ if (!ctx) {
+ rc = -EBUSY;
+ goto out;
+ }
+
+ /* get user crypto info */
+ crypto_info = &ctx->crypto_send;
+
+ if (!TLS_CRYPTO_INFO_READY(crypto_info)) {
+ rc = -EBUSY;
+ goto out;
+ }
+
+ if (len == sizeof(crypto_info)) {
+ rc = copy_to_user(optval, crypto_info, sizeof(*crypto_info));
+ goto out;
+ }
+
+ switch (crypto_info->cipher_type) {
+ case TLS_CIPHER_AES_GCM_128: {
+ struct tls12_crypto_info_aes_gcm_128 *
+ crypto_info_aes_gcm_128 =
+ container_of(crypto_info,
+ struct tls12_crypto_info_aes_gcm_128,
+ info);
+
+ if (len != sizeof(*crypto_info_aes_gcm_128)) {
+ rc = -EINVAL;
+ goto out;
+ }
+ lock_sock(sk);
+ memcpy(crypto_info_aes_gcm_128->iv, ctx->iv,
+ TLS_CIPHER_AES_GCM_128_IV_SIZE);
+ release_sock(sk);
+ rc = copy_to_user(optval,
+ crypto_info_aes_gcm_128,
+ sizeof(*crypto_info_aes_gcm_128));
+ break;
+ }
+ default:
+ rc = -EINVAL;
+ }
+
+out:
+ return rc;
+}
+
+static int do_tls_getsockopt(struct sock *sk, int optname,
+ char __user *optval, int __user *optlen)
+{
+ int rc = 0;
+
+ switch (optname) {
+ case TLS_TX:
+ rc = do_tls_getsockopt_tx(sk, optval, optlen);
+ break;
+ default:
+ rc = -ENOPROTOOPT;
+ break;
+ }
+ return rc;
+}
+
+static int tls_getsockopt(struct sock *sk, int level, int optname,
+ char __user *optval, int __user *optlen)
+{
+ struct tls_context *ctx = tls_get_ctx(sk);
+
+ if (level != SOL_TLS)
+ return ctx->getsockopt(sk, level, optname, optval, optlen);
+
+ return do_tls_getsockopt(sk, optname, optval, optlen);
+}
+
+static int do_tls_setsockopt_tx(struct sock *sk, char __user *optval,
+ unsigned int optlen)
+{
+ struct tls_crypto_info *crypto_info, tmp_crypto_info;
+ struct tls_context *ctx = tls_get_ctx(sk);
+ struct proto *prot = NULL;
+ int rc = 0;
+
+ if (!optval || (optlen < sizeof(*crypto_info))) {
+ rc = -EINVAL;
+ goto out;
+ }
+
+ rc = copy_from_user(&tmp_crypto_info, optval, sizeof(*crypto_info));
+ if (rc) {
+ rc = -EFAULT;
+ goto out;
+ }
+
+ /* check version */
+ if (tmp_crypto_info.version != TLS_1_2_VERSION) {
+ rc = -ENOTSUPP;
+ goto out;
+ }
+
+ /* get user crypto info */
+ crypto_info = &ctx->crypto_send;
+
+ /* Currently we don't support set crypto info more than one time */
+ if (TLS_CRYPTO_INFO_READY(crypto_info))
+ goto out;
+
+ switch (tmp_crypto_info.cipher_type) {
+ case TLS_CIPHER_AES_GCM_128: {
+ if (optlen != sizeof(struct tls12_crypto_info_aes_gcm_128)) {
+ rc = -EINVAL;
+ goto out;
+ }
+ rc = copy_from_user(
+ crypto_info,
+ optval,
+ sizeof(struct tls12_crypto_info_aes_gcm_128));
+
+ if (rc) {
+ rc = -EFAULT;
+ goto err_crypto_info;
+ }
+ break;
+ }
+ default:
+ rc = -EINVAL;
+ goto out;
+ }
+
+ ctx->sk_write_space = sk->sk_write_space;
+ sk->sk_write_space = tls_write_space;
+
+ ctx->sk_proto_close = sk->sk_prot->close;
+
+ /* currently SW is default, we will have ethtool in future */
+ rc = tls_set_sw_offload(sk, ctx);
+ prot = &tls_sw_prot;
+ if (rc)
+ goto err_crypto_info;
+
+ sk->sk_prot = prot;
+ goto out;
+
+err_crypto_info:
+ memset(crypto_info, 0, sizeof(*crypto_info));
+out:
+ return rc;
+}
+
+static int do_tls_setsockopt(struct sock *sk, int optname,
+ char __user *optval, unsigned int optlen)
+{
+ int rc = 0;
+
+ switch (optname) {
+ case TLS_TX:
+ lock_sock(sk);
+ rc = do_tls_setsockopt_tx(sk, optval, optlen);
+ release_sock(sk);
+ break;
+ default:
+ rc = -ENOPROTOOPT;
+ break;
+ }
+ return rc;
+}
+
+static int tls_setsockopt(struct sock *sk, int level, int optname,
+ char __user *optval, unsigned int optlen)
+{
+ struct tls_context *ctx = tls_get_ctx(sk);
+
+ if (level != SOL_TLS)
+ return ctx->setsockopt(sk, level, optname, optval, optlen);
+
+ return do_tls_setsockopt(sk, optname, optval, optlen);
+}
+
+static int tls_init(struct sock *sk)
+{
+ struct inet_connection_sock *icsk = inet_csk(sk);
+ struct tls_context *ctx;
+ int rc = 0;
+
+ /* allocate tls context */
+ ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
+ if (!ctx) {
+ rc = -ENOMEM;
+ goto out;
+ }
+ icsk->icsk_ulp_data = ctx;
+ ctx->setsockopt = sk->sk_prot->setsockopt;
+ ctx->getsockopt = sk->sk_prot->getsockopt;
+ sk->sk_prot = &tls_base_prot;
+out:
+ return rc;
+}
+
+static struct tcp_ulp_ops tcp_tls_ulp_ops __read_mostly = {
+ .name = "tls",
+ .owner = THIS_MODULE,
+ .init = tls_init,
+};
+
+static int __init tls_register(void)
+{
+ tls_base_prot = tcp_prot;
+ tls_base_prot.setsockopt = tls_setsockopt;
+ tls_base_prot.getsockopt = tls_getsockopt;
+
+ tls_sw_prot = tls_base_prot;
+ tls_sw_prot.sendmsg = tls_sw_sendmsg;
+ tls_sw_prot.sendpage = tls_sw_sendpage;
+ tls_sw_prot.close = tls_sk_proto_close;
+
+ tcp_register_ulp(&tcp_tls_ulp_ops);
+
+ return 0;
+}
+
+static void __exit tls_unregister(void)
+{
+ tcp_unregister_ulp(&tcp_tls_ulp_ops);
+}
+
+module_init(tls_register);
+module_exit(tls_unregister);
diff --git a/net/tls/tls_sw.c b/net/tls/tls_sw.c
new file mode 100644
index 0000000..fa596fa
--- /dev/null
+++ b/net/tls/tls_sw.c
@@ -0,0 +1,772 @@
+/*
+ * Copyright (c) 2016-2017, Mellanox Technologies. All rights reserved.
+ * Copyright (c) 2016-2017, Dave Watson <davejwatson@fb.com>. All rights reserved.
+ * Copyright (c) 2016-2017, Lance Chao <lancerchao@fb.com>. All rights reserved.
+ * Copyright (c) 2016, Fridolin Pokorny <fridolin.pokorny@gmail.com>. All rights reserved.
+ * Copyright (c) 2016, Nikos Mavrogiannopoulos <nmav@gnutls.org>. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/module.h>
+#include <crypto/aead.h>
+
+#include <net/tls.h>
+
+static inline void tls_make_aad(int recv,
+ char *buf,
+ size_t size,
+ char *record_sequence,
+ int record_sequence_size,
+ unsigned char record_type)
+{
+ memcpy(buf, record_sequence, record_sequence_size);
+
+ buf[8] = record_type;
+ buf[9] = TLS_1_2_VERSION_MAJOR;
+ buf[10] = TLS_1_2_VERSION_MINOR;
+ buf[11] = size >> 8;
+ buf[12] = size & 0xFF;
+}
+
+static void trim_sg(struct sock *sk, struct scatterlist *sg,
+ int *sg_num_elem, unsigned int *sg_size, int target_size)
+{
+ int i = *sg_num_elem - 1;
+ int trim = *sg_size - target_size;
+
+ if (trim <= 0) {
+ WARN_ON(trim < 0);
+ return;
+ }
+
+ *sg_size = target_size;
+ while (trim >= sg[i].length) {
+ trim -= sg[i].length;
+ sk_mem_uncharge(sk, sg[i].length);
+ put_page(sg_page(&sg[i]));
+ i--;
+
+ if (i < 0)
+ goto out;
+ }
+
+ sg[i].length -= trim;
+ sk_mem_uncharge(sk, trim);
+
+out:
+ *sg_num_elem = i + 1;
+}
+
+static void trim_both_sgl(struct sock *sk, int target_size)
+{
+ struct tls_context *tls_ctx = tls_get_ctx(sk);
+ struct tls_sw_context *ctx = tls_sw_ctx(tls_ctx);
+
+ trim_sg(sk, ctx->sg_plaintext_data,
+ &ctx->sg_plaintext_num_elem,
+ &ctx->sg_plaintext_size,
+ target_size);
+
+ if (target_size > 0)
+ target_size += tls_ctx->overhead_size;
+
+ trim_sg(sk, ctx->sg_encrypted_data,
+ &ctx->sg_encrypted_num_elem,
+ &ctx->sg_encrypted_size,
+ target_size);
+}
+
+static int alloc_sg(struct sock *sk, int len, struct scatterlist *sg,
+ int *sg_num_elem, unsigned int *sg_size,
+ int first_coalesce)
+{
+ struct page_frag *pfrag;
+ unsigned int size = *sg_size;
+ int num_elem = *sg_num_elem, use = 0, rc = 0;
+ struct scatterlist *sge;
+ unsigned int orig_offset;
+
+ len -= size;
+ pfrag = sk_page_frag(sk);
+
+ while (len > 0) {
+ if (!sk_page_frag_refill(sk, pfrag)) {
+ rc = -ENOMEM;
+ goto out;
+ }
+
+ use = min_t(int, len, pfrag->size - pfrag->offset);
+
+ if (!sk_wmem_schedule(sk, use)) {
+ rc = -ENOMEM;
+ goto out;
+ }
+
+ sk_mem_charge(sk, use);
+ size += use;
+ orig_offset = pfrag->offset;
+ pfrag->offset += use;
+
+ sge = sg + num_elem - 1;
+ if (num_elem > first_coalesce && sg_page(sg) == pfrag->page &&
+ sg->offset + sg->length == orig_offset) {
+ sg->length += use;
+ } else {
+ sge++;
+ sg_unmark_end(sge);
+ sg_set_page(sge, pfrag->page, use, orig_offset);
+ get_page(pfrag->page);
+ ++num_elem;
+ if (num_elem == MAX_SKB_FRAGS) {
+ rc = -ENOSPC;
+ break;
+ }
+ }
+
+ len -= use;
+ }
+ goto out;
+
+out:
+ *sg_size = size;
+ *sg_num_elem = num_elem;
+ return rc;
+}
+
+static int alloc_encrypted_sg(struct sock *sk, int len)
+{
+ struct tls_context *tls_ctx = tls_get_ctx(sk);
+ struct tls_sw_context *ctx = tls_sw_ctx(tls_ctx);
+ int rc = 0;
+
+ rc = alloc_sg(sk, len, ctx->sg_encrypted_data,
+ &ctx->sg_encrypted_num_elem, &ctx->sg_encrypted_size, 0);
+
+ return rc;
+}
+
+static int alloc_plaintext_sg(struct sock *sk, int len)
+{
+ struct tls_context *tls_ctx = tls_get_ctx(sk);
+ struct tls_sw_context *ctx = tls_sw_ctx(tls_ctx);
+ int rc = 0;
+
+ rc = alloc_sg(sk, len, ctx->sg_plaintext_data,
+ &ctx->sg_plaintext_num_elem, &ctx->sg_plaintext_size,
+ tls_ctx->pending_open_record_frags);
+
+ return rc;
+}
+
+static void free_sg(struct sock *sk, struct scatterlist *sg,
+ int *sg_num_elem, unsigned int *sg_size)
+{
+ int i, n = *sg_num_elem;
+
+ for (i = 0; i < n; ++i) {
+ sk_mem_uncharge(sk, sg[i].length);
+ put_page(sg_page(&sg[i]));
+ }
+ *sg_num_elem = 0;
+ *sg_size = 0;
+}
+
+static void tls_free_both_sg(struct sock *sk)
+{
+ struct tls_context *tls_ctx = tls_get_ctx(sk);
+ struct tls_sw_context *ctx = tls_sw_ctx(tls_ctx);
+
+ free_sg(sk, ctx->sg_encrypted_data, &ctx->sg_encrypted_num_elem,
+ &ctx->sg_encrypted_size);
+
+ free_sg(sk, ctx->sg_plaintext_data, &ctx->sg_plaintext_num_elem,
+ &ctx->sg_plaintext_size);
+}
+
+static int tls_do_encryption(struct tls_context *tls_ctx,
+ struct tls_sw_context *ctx, size_t data_len,
+ gfp_t flags)
+{
+ unsigned int req_size = sizeof(struct aead_request) +
+ crypto_aead_reqsize(ctx->aead_send);
+ struct aead_request *aead_req;
+ int rc;
+
+ aead_req = kmalloc(req_size, flags);
+ if (!aead_req)
+ return -ENOMEM;
+
+ ctx->sg_encrypted_data[0].offset += tls_ctx->prepend_size;
+ ctx->sg_encrypted_data[0].length -= tls_ctx->prepend_size;
+
+ aead_request_set_tfm(aead_req, ctx->aead_send);
+ aead_request_set_ad(aead_req, TLS_AAD_SPACE_SIZE);
+ aead_request_set_crypt(aead_req, ctx->sg_aead_in, ctx->sg_aead_out,
+ data_len, tls_ctx->iv);
+ rc = crypto_aead_encrypt(aead_req);
+
+ ctx->sg_encrypted_data[0].offset -= tls_ctx->prepend_size;
+ ctx->sg_encrypted_data[0].length += tls_ctx->prepend_size;
+
+ kfree(aead_req);
+ return rc;
+}
+
+static int tls_push_record(struct sock *sk, int flags,
+ unsigned char record_type)
+{
+ struct tls_context *tls_ctx = tls_get_ctx(sk);
+ struct tls_sw_context *ctx = tls_sw_ctx(tls_ctx);
+ int rc;
+
+ sg_mark_end(ctx->sg_plaintext_data + ctx->sg_plaintext_num_elem - 1);
+ sg_mark_end(ctx->sg_encrypted_data + ctx->sg_encrypted_num_elem - 1);
+
+ tls_make_aad(0, ctx->aad_space, ctx->sg_plaintext_size,
+ tls_ctx->rec_seq, tls_ctx->rec_seq_size,
+ record_type);
+
+ tls_fill_prepend(tls_ctx,
+ page_address(sg_page(&ctx->sg_encrypted_data[0])) +
+ ctx->sg_encrypted_data[0].offset,
+ ctx->sg_plaintext_size, record_type);
+
+ tls_ctx->pending_open_record_frags = 0;
+ set_bit(TLS_PENDING_CLOSED_RECORD, &tls_ctx->flags);
+
+ rc = tls_do_encryption(tls_ctx, ctx, ctx->sg_plaintext_size,
+ sk->sk_allocation);
+ if (rc < 0) {
+ /* If we are called from write_space and
+ * we fail, we need to set this SOCK_NOSPACE
+ * to trigger another write_space in the future.
+ */
+ set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
+ return rc;
+ }
+
+ free_sg(sk, ctx->sg_plaintext_data, &ctx->sg_plaintext_num_elem,
+ &ctx->sg_plaintext_size);
+
+ ctx->sg_encrypted_num_elem = 0;
+ ctx->sg_encrypted_size = 0;
+
+ /* Only pass through MSG_DONTWAIT and MSG_NOSIGNAL flags */
+ rc = tls_push_sg(sk, tls_ctx, ctx->sg_encrypted_data, 0, flags);
+ if (rc < 0 && rc != -EAGAIN)
+ tls_err_abort(sk);
+
+ tls_advance_record_sn(sk, tls_ctx);
+ return rc;
+}
+
+static int tls_sw_push_pending_record(struct sock *sk, int flags)
+{
+ return tls_push_record(sk, flags, TLS_RECORD_TYPE_DATA);
+}
+
+static int zerocopy_from_iter(struct sock *sk, struct iov_iter *from,
+ int length)
+{
+ struct tls_context *tls_ctx = tls_get_ctx(sk);
+ struct tls_sw_context *ctx = tls_sw_ctx(tls_ctx);
+ struct page *pages[MAX_SKB_FRAGS];
+
+ size_t offset;
+ ssize_t copied, use;
+ int i = 0;
+ unsigned int size = ctx->sg_plaintext_size;
+ int num_elem = ctx->sg_plaintext_num_elem;
+ int rc = 0;
+ int maxpages;
+
+ while (length > 0) {
+ i = 0;
+ maxpages = ARRAY_SIZE(ctx->sg_plaintext_data) - num_elem;
+ if (maxpages == 0) {
+ rc = -EFAULT;
+ goto out;
+ }
+ copied = iov_iter_get_pages(from, pages,
+ length,
+ maxpages, &offset);
+ if (copied <= 0) {
+ rc = -EFAULT;
+ goto out;
+ }
+
+ iov_iter_advance(from, copied);
+
+ length -= copied;
+ size += copied;
+ while (copied) {
+ use = min_t(int, copied, PAGE_SIZE - offset);
+
+ sg_set_page(&ctx->sg_plaintext_data[num_elem],
+ pages[i], use, offset);
+ sg_unmark_end(&ctx->sg_plaintext_data[num_elem]);
+ sk_mem_charge(sk, use);
+
+ offset = 0;
+ copied -= use;
+
+ ++i;
+ ++num_elem;
+ }
+ }
+
+out:
+ ctx->sg_plaintext_size = size;
+ ctx->sg_plaintext_num_elem = num_elem;
+ return rc;
+}
+
+static int memcopy_from_iter(struct sock *sk, struct iov_iter *from,
+ int bytes)
+{
+ struct tls_context *tls_ctx = tls_get_ctx(sk);
+ struct tls_sw_context *ctx = tls_sw_ctx(tls_ctx);
+ struct scatterlist *sg = ctx->sg_plaintext_data;
+ int copy, i, rc = 0;
+
+ for (i = tls_ctx->pending_open_record_frags;
+ i < ctx->sg_plaintext_num_elem; ++i) {
+ copy = sg[i].length;
+ if (copy_from_iter(
+ page_address(sg_page(&sg[i])) + sg[i].offset,
+ copy, from) != copy) {
+ rc = -EFAULT;
+ goto out;
+ }
+ bytes -= copy;
+
+ ++tls_ctx->pending_open_record_frags;
+
+ if (!bytes)
+ break;
+ }
+
+out:
+ return rc;
+}
+
+int tls_sw_sendmsg(struct sock *sk, struct msghdr *msg, size_t size)
+{
+ struct tls_context *tls_ctx = tls_get_ctx(sk);
+ struct tls_sw_context *ctx = tls_sw_ctx(tls_ctx);
+ int ret = 0;
+ int required_size;
+ long timeo = sock_sndtimeo(sk, msg->msg_flags & MSG_DONTWAIT);
+ bool eor = !(msg->msg_flags & MSG_MORE);
+ size_t try_to_copy, copied = 0;
+ unsigned char record_type = TLS_RECORD_TYPE_DATA;
+ int record_room;
+ bool full_record;
+ int orig_size;
+
+ if (msg->msg_flags & ~(MSG_MORE | MSG_DONTWAIT | MSG_NOSIGNAL))
+ return -ENOTSUPP;
+
+ lock_sock(sk);
+
+ if (tls_complete_pending_work(sk, tls_ctx, msg->msg_flags, &timeo))
+ goto send_end;
+
+ if (unlikely(msg->msg_controllen)) {
+ ret = tls_proccess_cmsg(sk, msg, &record_type);
+ if (ret)
+ goto send_end;
+ }
+
+ while (msg_data_left(msg)) {
+ if (sk->sk_err) {
+ ret = sk->sk_err;
+ goto send_end;
+ }
+
+ orig_size = ctx->sg_plaintext_size;
+ full_record = false;
+ try_to_copy = msg_data_left(msg);
+ record_room = TLS_MAX_PAYLOAD_SIZE - ctx->sg_plaintext_size;
+ if (try_to_copy >= record_room) {
+ try_to_copy = record_room;
+ full_record = true;
+ }
+
+ required_size = ctx->sg_plaintext_size + try_to_copy +
+ tls_ctx->overhead_size;
+
+ if (!sk_stream_memory_free(sk))
+ goto wait_for_sndbuf;
+alloc_encrypted:
+ ret = alloc_encrypted_sg(sk, required_size);
+ if (ret) {
+ if (ret != -ENOSPC)
+ goto wait_for_memory;
+
+ /* Adjust try_to_copy according to the amount that was
+ * actually allocated. The difference is due
+ * to max sg elements limit
+ */
+ try_to_copy -= required_size - ctx->sg_encrypted_size;
+ full_record = true;
+ }
+
+ if (full_record || eor) {
+ ret = zerocopy_from_iter(sk, &msg->msg_iter,
+ try_to_copy);
+ if (ret)
+ goto fallback_to_reg_send;
+
+ copied += try_to_copy;
+ ret = tls_push_record(sk, msg->msg_flags, record_type);
+ if (!ret)
+ continue;
+ if (ret == -EAGAIN)
+ goto send_end;
+
+ copied -= try_to_copy;
+fallback_to_reg_send:
+ iov_iter_revert(&msg->msg_iter,
+ ctx->sg_plaintext_size - orig_size);
+ trim_sg(sk, ctx->sg_plaintext_data,
+ &ctx->sg_plaintext_num_elem,
+ &ctx->sg_plaintext_size,
+ orig_size);
+ }
+
+ required_size = ctx->sg_plaintext_size + try_to_copy;
+alloc_plaintext:
+ ret = alloc_plaintext_sg(sk, required_size);
+ if (ret) {
+ if (ret != -ENOSPC)
+ goto wait_for_memory;
+
+ /* Adjust try_to_copy according to the amount that was
+ * actually allocated. The difference is due
+ * to max sg elements limit
+ */
+ try_to_copy -= required_size - ctx->sg_plaintext_size;
+ full_record = true;
+
+ trim_sg(sk, ctx->sg_encrypted_data,
+ &ctx->sg_encrypted_num_elem,
+ &ctx->sg_encrypted_size,
+ ctx->sg_plaintext_size +
+ tls_ctx->overhead_size);
+ }
+
+ ret = memcopy_from_iter(sk, &msg->msg_iter, try_to_copy);
+ if (ret)
+ goto trim_sgl;
+
+ copied += try_to_copy;
+ if (full_record || eor) {
+push_record:
+ ret = tls_push_record(sk, msg->msg_flags, record_type);
+ if (ret) {
+ if (ret == -ENOMEM)
+ goto wait_for_memory;
+
+ goto send_end;
+ }
+ }
+
+ continue;
+
+wait_for_sndbuf:
+ set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
+wait_for_memory:
+ ret = sk_stream_wait_memory(sk, &timeo);
+ if (ret) {
+trim_sgl:
+ trim_both_sgl(sk, orig_size);
+ goto send_end;
+ }
+
+ if (tls_is_pending_closed_record(tls_ctx))
+ goto push_record;
+
+ if (ctx->sg_encrypted_size < required_size)
+ goto alloc_encrypted;
+
+ goto alloc_plaintext;
+ }
+
+send_end:
+ ret = sk_stream_error(sk, msg->msg_flags, ret);
+
+ release_sock(sk);
+ return copied ? copied : ret;
+}
+
+int tls_sw_sendpage(struct sock *sk, struct page *page,
+ int offset, size_t size, int flags)
+{
+ struct tls_context *tls_ctx = tls_get_ctx(sk);
+ struct tls_sw_context *ctx = tls_sw_ctx(tls_ctx);
+ int ret = 0;
+ long timeo = sock_sndtimeo(sk, flags & MSG_DONTWAIT);
+ bool eor;
+ size_t orig_size = size;
+ unsigned char record_type = TLS_RECORD_TYPE_DATA;
+ struct scatterlist *sg;
+ bool full_record;
+ int record_room;
+
+ if (flags & ~(MSG_MORE | MSG_DONTWAIT | MSG_NOSIGNAL |
+ MSG_SENDPAGE_NOTLAST))
+ return -ENOTSUPP;
+
+ /* No MSG_EOR from splice, only look at MSG_MORE */
+ eor = !(flags & (MSG_MORE | MSG_SENDPAGE_NOTLAST));
+
+ lock_sock(sk);
+
+ sk_clear_bit(SOCKWQ_ASYNC_NOSPACE, sk);
+
+ if (tls_complete_pending_work(sk, tls_ctx, flags, &timeo))
+ goto sendpage_end;
+
+ /* Call the sk_stream functions to manage the sndbuf mem. */
+ while (size > 0) {
+ size_t copy, required_size;
+
+ if (sk->sk_err) {
+ ret = sk->sk_err;
+ goto sendpage_end;
+ }
+
+ full_record = false;
+ record_room = TLS_MAX_PAYLOAD_SIZE - ctx->sg_plaintext_size;
+ copy = size;
+ if (copy >= record_room) {
+ copy = record_room;
+ full_record = true;
+ }
+ required_size = ctx->sg_plaintext_size + copy +
+ tls_ctx->overhead_size;
+
+ if (!sk_stream_memory_free(sk))
+ goto wait_for_sndbuf;
+alloc_payload:
+ ret = alloc_encrypted_sg(sk, required_size);
+ if (ret) {
+ if (ret != -ENOSPC)
+ goto wait_for_memory;
+
+ /* Adjust copy according to the amount that was
+ * actually allocated. The difference is due
+ * to max sg elements limit
+ */
+ copy -= required_size - ctx->sg_plaintext_size;
+ full_record = true;
+ }
+
+ get_page(page);
+ sg = ctx->sg_plaintext_data + ctx->sg_plaintext_num_elem;
+ sg_set_page(sg, page, copy, offset);
+ ctx->sg_plaintext_num_elem++;
+
+ sk_mem_charge(sk, copy);
+ offset += copy;
+ size -= copy;
+ ctx->sg_plaintext_size += copy;
+ tls_ctx->pending_open_record_frags = ctx->sg_plaintext_num_elem;
+
+ if (full_record || eor ||
+ ctx->sg_plaintext_num_elem ==
+ ARRAY_SIZE(ctx->sg_plaintext_data)) {
+push_record:
+ ret = tls_push_record(sk, flags, record_type);
+ if (ret) {
+ if (ret == -ENOMEM)
+ goto wait_for_memory;
+
+ goto sendpage_end;
+ }
+ }
+ continue;
+wait_for_sndbuf:
+ set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
+wait_for_memory:
+ ret = sk_stream_wait_memory(sk, &timeo);
+ if (ret) {
+ trim_both_sgl(sk, ctx->sg_plaintext_size);
+ goto sendpage_end;
+ }
+
+ if (tls_is_pending_closed_record(tls_ctx))
+ goto push_record;
+
+ goto alloc_payload;
+ }
+
+sendpage_end:
+ if (orig_size > size)
+ ret = orig_size - size;
+ else
+ ret = sk_stream_error(sk, flags, ret);
+
+ release_sock(sk);
+ return ret;
+}
+
+void tls_sw_free_resources(struct sock *sk)
+{
+ struct tls_context *tls_ctx = tls_get_ctx(sk);
+ struct tls_sw_context *ctx = tls_sw_ctx(tls_ctx);
+
+ if (ctx->aead_send)
+ crypto_free_aead(ctx->aead_send);
+
+ tls_free_both_sg(sk);
+
+ kfree(ctx);
+}
+
+int tls_set_sw_offload(struct sock *sk, struct tls_context *ctx)
+{
+ char keyval[TLS_CIPHER_AES_GCM_128_KEY_SIZE];
+ struct tls_crypto_info *crypto_info;
+ struct tls12_crypto_info_aes_gcm_128 *gcm_128_info;
+ struct tls_sw_context *sw_ctx;
+ u16 nonce_size, tag_size, iv_size, rec_seq_size;
+ char *iv, *rec_seq;
+ int rc = 0;
+
+ if (!ctx) {
+ rc = -EINVAL;
+ goto out;
+ }
+
+ if (ctx->priv_ctx) {
+ rc = -EEXIST;
+ goto out;
+ }
+
+ sw_ctx = kzalloc(sizeof(*sw_ctx), GFP_KERNEL);
+ if (!sw_ctx) {
+ rc = -ENOMEM;
+ goto out;
+ }
+
+ ctx->priv_ctx = (struct tls_offload_context *)sw_ctx;
+ ctx->free_resources = tls_sw_free_resources;
+
+ crypto_info = &ctx->crypto_send;
+ switch (crypto_info->cipher_type) {
+ case TLS_CIPHER_AES_GCM_128: {
+ nonce_size = TLS_CIPHER_AES_GCM_128_IV_SIZE;
+ tag_size = TLS_CIPHER_AES_GCM_128_TAG_SIZE;
+ iv_size = TLS_CIPHER_AES_GCM_128_IV_SIZE;
+ iv = ((struct tls12_crypto_info_aes_gcm_128 *)crypto_info)->iv;
+ rec_seq_size = TLS_CIPHER_AES_GCM_128_REC_SEQ_SIZE;
+ rec_seq =
+ ((struct tls12_crypto_info_aes_gcm_128 *)crypto_info)->rec_seq;
+ gcm_128_info =
+ (struct tls12_crypto_info_aes_gcm_128 *)crypto_info;
+ break;
+ }
+ default:
+ rc = -EINVAL;
+ goto out;
+ }
+
+ ctx->prepend_size = TLS_HEADER_SIZE + nonce_size;
+ ctx->tag_size = tag_size;
+ ctx->overhead_size = ctx->prepend_size + ctx->tag_size;
+ ctx->iv_size = iv_size;
+ ctx->iv = kmalloc(iv_size + TLS_CIPHER_AES_GCM_128_SALT_SIZE,
+ GFP_KERNEL);
+ if (!ctx->iv) {
+ rc = -ENOMEM;
+ goto out;
+ }
+ memcpy(ctx->iv, gcm_128_info->salt, TLS_CIPHER_AES_GCM_128_SALT_SIZE);
+ memcpy(ctx->iv + TLS_CIPHER_AES_GCM_128_SALT_SIZE, iv, iv_size);
+ ctx->rec_seq_size = rec_seq_size;
+ ctx->rec_seq = kmalloc(rec_seq_size, GFP_KERNEL);
+ if (!ctx->rec_seq) {
+ rc = -ENOMEM;
+ goto free_iv;
+ }
+ memcpy(ctx->rec_seq, rec_seq, rec_seq_size);
+
+ sg_init_table(sw_ctx->sg_encrypted_data,
+ ARRAY_SIZE(sw_ctx->sg_encrypted_data));
+ sg_init_table(sw_ctx->sg_plaintext_data,
+ ARRAY_SIZE(sw_ctx->sg_plaintext_data));
+
+ sg_init_table(sw_ctx->sg_aead_in, 2);
+ sg_set_buf(&sw_ctx->sg_aead_in[0], sw_ctx->aad_space,
+ sizeof(sw_ctx->aad_space));
+ sg_unmark_end(&sw_ctx->sg_aead_in[1]);
+ sg_chain(sw_ctx->sg_aead_in, 2, sw_ctx->sg_plaintext_data);
+ sg_init_table(sw_ctx->sg_aead_out, 2);
+ sg_set_buf(&sw_ctx->sg_aead_out[0], sw_ctx->aad_space,
+ sizeof(sw_ctx->aad_space));
+ sg_unmark_end(&sw_ctx->sg_aead_out[1]);
+ sg_chain(sw_ctx->sg_aead_out, 2, sw_ctx->sg_encrypted_data);
+
+ if (!sw_ctx->aead_send) {
+ sw_ctx->aead_send = crypto_alloc_aead("gcm(aes)", 0, 0);
+ if (IS_ERR(sw_ctx->aead_send)) {
+ rc = PTR_ERR(sw_ctx->aead_send);
+ sw_ctx->aead_send = NULL;
+ goto free_rec_seq;
+ }
+ }
+
+ ctx->push_pending_record = tls_sw_push_pending_record;
+
+ memcpy(keyval, gcm_128_info->key, TLS_CIPHER_AES_GCM_128_KEY_SIZE);
+
+ rc = crypto_aead_setkey(sw_ctx->aead_send, keyval,
+ TLS_CIPHER_AES_GCM_128_KEY_SIZE);
+ if (rc)
+ goto free_aead;
+
+ rc = crypto_aead_setauthsize(sw_ctx->aead_send, ctx->tag_size);
+ if (!rc)
+ goto out;
+
+free_aead:
+ crypto_free_aead(sw_ctx->aead_send);
+ sw_ctx->aead_send = NULL;
+free_rec_seq:
+ kfree(ctx->rec_seq);
+ ctx->rec_seq = NULL;
+free_iv:
+ kfree(ctx->iv);
+ ctx->iv = NULL;
+out:
+ return rc;
+}
diff --git a/net/vmw_vsock/virtio_transport_common.c b/net/vmw_vsock/virtio_transport_common.c
index 18e2479..edba7ab 100644
--- a/net/vmw_vsock/virtio_transport_common.c
+++ b/net/vmw_vsock/virtio_transport_common.c
@@ -90,7 +90,6 @@
static struct sk_buff *virtio_transport_build_skb(void *opaque)
{
struct virtio_vsock_pkt *pkt = opaque;
- unsigned char *t_hdr, *payload;
struct af_vsockmon_hdr *hdr;
struct sk_buff *skb;
@@ -99,7 +98,7 @@
if (!skb)
return NULL;
- hdr = (struct af_vsockmon_hdr *)skb_put(skb, sizeof(*hdr));
+ hdr = skb_put(skb, sizeof(*hdr));
/* pkt->hdr is little-endian so no need to byteswap here */
hdr->src_cid = pkt->hdr.src_cid;
@@ -132,12 +131,10 @@
break;
}
- t_hdr = skb_put(skb, sizeof(pkt->hdr));
- memcpy(t_hdr, &pkt->hdr, sizeof(pkt->hdr));
+ skb_put_data(skb, &pkt->hdr, sizeof(pkt->hdr));
if (pkt->len) {
- payload = skb_put(skb, pkt->len);
- memcpy(payload, pkt->buf, pkt->len);
+ skb_put_data(skb, pkt->buf, pkt->len);
}
return skb;
diff --git a/net/wireless/core.c b/net/wireless/core.c
index 83ea164..7b33e8c 100644
--- a/net/wireless/core.c
+++ b/net/wireless/core.c
@@ -711,6 +711,11 @@
(wiphy->bss_select_support & ~(BIT(__NL80211_BSS_SELECT_ATTR_AFTER_LAST) - 2))))
return -EINVAL;
+ if (WARN_ON(wiphy_ext_feature_isset(&rdev->wiphy,
+ NL80211_EXT_FEATURE_4WAY_HANDSHAKE_STA_1X) &&
+ (!rdev->ops->set_pmk || !rdev->ops->del_pmk)))
+ return -EINVAL;
+
if (wiphy->addresses)
memcpy(wiphy->perm_addr, wiphy->addresses[0].addr, ETH_ALEN);
diff --git a/net/wireless/mesh.c b/net/wireless/mesh.c
index ec0b1c2..421a6b8 100644
--- a/net/wireless/mesh.c
+++ b/net/wireless/mesh.c
@@ -174,6 +174,14 @@
scan_width);
}
+ err = cfg80211_chandef_dfs_required(&rdev->wiphy,
+ &setup->chandef,
+ NL80211_IFTYPE_MESH_POINT);
+ if (err < 0)
+ return err;
+ if (err > 0 && !setup->userspace_handles_dfs)
+ return -EINVAL;
+
if (!cfg80211_reg_can_beacon(&rdev->wiphy, &setup->chandef,
NL80211_IFTYPE_MESH_POINT))
return -EINVAL;
diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
index c3bc9da..5487cd7 100644
--- a/net/wireless/nl80211.c
+++ b/net/wireless/nl80211.c
@@ -7501,6 +7501,7 @@
static struct nlattr *csa_attrs[NL80211_ATTR_MAX+1];
int err;
bool need_new_beacon = false;
+ bool need_handle_dfs_flag = true;
int len, i;
u32 cs_count;
@@ -7512,6 +7513,12 @@
case NL80211_IFTYPE_AP:
case NL80211_IFTYPE_P2P_GO:
need_new_beacon = true;
+ /* For all modes except AP the handle_dfs flag needs to be
+ * supplied to tell the kernel that userspace will handle radar
+ * events when they happen. Otherwise a switch to a channel
+ * requiring DFS will be rejected.
+ */
+ need_handle_dfs_flag = false;
/* useless if AP is not running */
if (!wdev->beacon_interval)
@@ -7634,8 +7641,13 @@
if (err < 0)
return err;
- if (err > 0)
+ if (err > 0) {
params.radar_required = true;
+ if (need_handle_dfs_flag &&
+ !nla_get_flag(info->attrs[NL80211_ATTR_HANDLE_DFS])) {
+ return -EINVAL;
+ }
+ }
if (info->attrs[NL80211_ATTR_CH_SWITCH_BLOCK_TX])
params.block_tx = true;
@@ -8156,6 +8168,15 @@
memcpy(settings->akm_suites, data, len);
}
+ if (info->attrs[NL80211_ATTR_PMK]) {
+ if (nla_len(info->attrs[NL80211_ATTR_PMK]) != WLAN_PMK_LEN)
+ return -EINVAL;
+ if (!wiphy_ext_feature_isset(&rdev->wiphy,
+ NL80211_EXT_FEATURE_4WAY_HANDSHAKE_STA_PSK))
+ return -EINVAL;
+ settings->psk = nla_data(info->attrs[NL80211_ATTR_PMK]);
+ }
+
return 0;
}
@@ -8860,6 +8881,12 @@
connect.privacy = info->attrs[NL80211_ATTR_PRIVACY];
+ if (info->attrs[NL80211_ATTR_WANT_1X_4WAY_HS] &&
+ !wiphy_ext_feature_isset(&rdev->wiphy,
+ NL80211_EXT_FEATURE_4WAY_HANDSHAKE_STA_1X))
+ return -EINVAL;
+ connect.want_1x = info->attrs[NL80211_ATTR_WANT_1X_4WAY_HS];
+
err = nl80211_crypto_settings(rdev, info, &connect.crypto,
NL80211_MAX_NR_CIPHER_SUITES);
if (err)
@@ -9962,6 +9989,9 @@
return err;
}
+ setup.userspace_handles_dfs =
+ nla_get_flag(info->attrs[NL80211_ATTR_HANDLE_DFS]);
+
return cfg80211_join_mesh(rdev, dev, &setup, &cfg);
}
@@ -12241,6 +12271,90 @@
return rdev_set_multicast_to_unicast(rdev, dev, enabled);
}
+static int nl80211_set_pmk(struct sk_buff *skb, struct genl_info *info)
+{
+ struct cfg80211_registered_device *rdev = info->user_ptr[0];
+ struct net_device *dev = info->user_ptr[1];
+ struct wireless_dev *wdev = dev->ieee80211_ptr;
+ struct cfg80211_pmk_conf pmk_conf = {};
+ int ret;
+
+ if (wdev->iftype != NL80211_IFTYPE_STATION &&
+ wdev->iftype != NL80211_IFTYPE_P2P_CLIENT)
+ return -EOPNOTSUPP;
+
+ if (!wiphy_ext_feature_isset(&rdev->wiphy,
+ NL80211_EXT_FEATURE_4WAY_HANDSHAKE_STA_1X))
+ return -EOPNOTSUPP;
+
+ if (!info->attrs[NL80211_ATTR_MAC] || !info->attrs[NL80211_ATTR_PMK])
+ return -EINVAL;
+
+ wdev_lock(wdev);
+ if (!wdev->current_bss) {
+ ret = -ENOTCONN;
+ goto out;
+ }
+
+ pmk_conf.aa = nla_data(info->attrs[NL80211_ATTR_MAC]);
+ if (memcmp(pmk_conf.aa, wdev->current_bss->pub.bssid, ETH_ALEN)) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ pmk_conf.pmk = nla_data(info->attrs[NL80211_ATTR_PMK]);
+ pmk_conf.pmk_len = nla_len(info->attrs[NL80211_ATTR_PMK]);
+ if (pmk_conf.pmk_len != WLAN_PMK_LEN &&
+ pmk_conf.pmk_len != WLAN_PMK_LEN_SUITE_B_192) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ if (info->attrs[NL80211_ATTR_PMKR0_NAME]) {
+ int r0_name_len = nla_len(info->attrs[NL80211_ATTR_PMKR0_NAME]);
+
+ if (r0_name_len != WLAN_PMK_NAME_LEN) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ pmk_conf.pmk_r0_name =
+ nla_data(info->attrs[NL80211_ATTR_PMKR0_NAME]);
+ }
+
+ ret = rdev_set_pmk(rdev, dev, &pmk_conf);
+out:
+ wdev_unlock(wdev);
+ return ret;
+}
+
+static int nl80211_del_pmk(struct sk_buff *skb, struct genl_info *info)
+{
+ struct cfg80211_registered_device *rdev = info->user_ptr[0];
+ struct net_device *dev = info->user_ptr[1];
+ struct wireless_dev *wdev = dev->ieee80211_ptr;
+ const u8 *aa;
+ int ret;
+
+ if (wdev->iftype != NL80211_IFTYPE_STATION &&
+ wdev->iftype != NL80211_IFTYPE_P2P_CLIENT)
+ return -EOPNOTSUPP;
+
+ if (!wiphy_ext_feature_isset(&rdev->wiphy,
+ NL80211_EXT_FEATURE_4WAY_HANDSHAKE_STA_1X))
+ return -EOPNOTSUPP;
+
+ if (!info->attrs[NL80211_ATTR_MAC])
+ return -EINVAL;
+
+ wdev_lock(wdev);
+ aa = nla_data(info->attrs[NL80211_ATTR_MAC]);
+ ret = rdev_del_pmk(rdev, dev, aa);
+ wdev_unlock(wdev);
+
+ return ret;
+}
+
#define NL80211_FLAG_NEED_WIPHY 0x01
#define NL80211_FLAG_NEED_NETDEV 0x02
#define NL80211_FLAG_NEED_RTNL 0x04
@@ -13116,6 +13230,21 @@
.internal_flags = NL80211_FLAG_NEED_NETDEV |
NL80211_FLAG_NEED_RTNL,
},
+ {
+ .cmd = NL80211_CMD_SET_PMK,
+ .doit = nl80211_set_pmk,
+ .policy = nl80211_policy,
+ .internal_flags = NL80211_FLAG_NEED_NETDEV_UP |
+ NL80211_FLAG_NEED_RTNL,
+ },
+ {
+ .cmd = NL80211_CMD_DEL_PMK,
+ .doit = nl80211_del_pmk,
+ .policy = nl80211_policy,
+ .internal_flags = NL80211_FLAG_NEED_NETDEV_UP |
+ NL80211_FLAG_NEED_RTNL,
+ },
+
};
static struct genl_family nl80211_fam __ro_after_init = {
@@ -13671,7 +13800,9 @@
info->req_ie)) ||
(info->resp_ie &&
nla_put(msg, NL80211_ATTR_RESP_IE, info->resp_ie_len,
- info->resp_ie)))
+ info->resp_ie)) ||
+ (info->authorized &&
+ nla_put_flag(msg, NL80211_ATTR_PORT_AUTHORIZED)))
goto nla_put_failure;
genlmsg_end(msg, hdr);
diff --git a/net/wireless/rdev-ops.h b/net/wireless/rdev-ops.h
index 0598c1e..ce23d7d 100644
--- a/net/wireless/rdev-ops.h
+++ b/net/wireless/rdev-ops.h
@@ -1164,4 +1164,29 @@
trace_rdev_return_int(&rdev->wiphy, ret);
return ret;
}
+
+static inline int rdev_set_pmk(struct cfg80211_registered_device *rdev,
+ struct net_device *dev,
+ struct cfg80211_pmk_conf *pmk_conf)
+{
+ int ret = -EOPNOTSUPP;
+
+ trace_rdev_set_pmk(&rdev->wiphy, dev, pmk_conf);
+ if (rdev->ops->set_pmk)
+ ret = rdev->ops->set_pmk(&rdev->wiphy, dev, pmk_conf);
+ trace_rdev_return_int(&rdev->wiphy, ret);
+ return ret;
+}
+
+static inline int rdev_del_pmk(struct cfg80211_registered_device *rdev,
+ struct net_device *dev, const u8 *aa)
+{
+ int ret = -EOPNOTSUPP;
+
+ trace_rdev_del_pmk(&rdev->wiphy, dev, aa);
+ if (rdev->ops->del_pmk)
+ ret = rdev->ops->del_pmk(&rdev->wiphy, dev, aa);
+ trace_rdev_return_int(&rdev->wiphy, ret);
+ return ret;
+}
#endif /* __CFG80211_RDEV_OPS */
diff --git a/net/wireless/sme.c b/net/wireless/sme.c
index 532a000..0a49b88 100644
--- a/net/wireless/sme.c
+++ b/net/wireless/sme.c
@@ -960,6 +960,7 @@
ev->rm.resp_ie_len = info->resp_ie_len;
memcpy((void *)ev->rm.resp_ie, info->resp_ie, info->resp_ie_len);
ev->rm.bss = info->bss;
+ ev->rm.authorized = info->authorized;
spin_lock_irqsave(&wdev->event_lock, flags);
list_add_tail(&ev->list, &wdev->event_list);
diff --git a/net/wireless/trace.h b/net/wireless/trace.h
index ca8b205..0f8db41 100644
--- a/net/wireless/trace.h
+++ b/net/wireless/trace.h
@@ -2258,6 +2258,66 @@
WIPHY_PR_ARG, NETDEV_PR_ARG, MAC_PR_ARG(addr))
);
+TRACE_EVENT(rdev_set_pmk,
+ TP_PROTO(struct wiphy *wiphy, struct net_device *netdev,
+ struct cfg80211_pmk_conf *pmk_conf),
+
+ TP_ARGS(wiphy, netdev, pmk_conf),
+
+ TP_STRUCT__entry(
+ WIPHY_ENTRY
+ NETDEV_ENTRY
+ MAC_ENTRY(aa)
+ __field(u8, pmk_len)
+ __field(u8, pmk_r0_name_len)
+ __dynamic_array(u8, pmk, pmk_conf->pmk_len)
+ __dynamic_array(u8, pmk_r0_name, WLAN_PMK_NAME_LEN)
+ ),
+
+ TP_fast_assign(
+ WIPHY_ASSIGN;
+ NETDEV_ASSIGN;
+ MAC_ASSIGN(aa, pmk_conf->aa);
+ __entry->pmk_len = pmk_conf->pmk_len;
+ __entry->pmk_r0_name_len =
+ pmk_conf->pmk_r0_name ? WLAN_PMK_NAME_LEN : 0;
+ memcpy(__get_dynamic_array(pmk), pmk_conf->pmk,
+ pmk_conf->pmk_len);
+ memcpy(__get_dynamic_array(pmk_r0_name), pmk_conf->pmk_r0_name,
+ pmk_conf->pmk_r0_name ? WLAN_PMK_NAME_LEN : 0);
+ ),
+
+ TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT ", " MAC_PR_FMT
+ "pmk_len=%u, pmk: %s pmk_r0_name: %s", WIPHY_PR_ARG,
+ NETDEV_PR_ARG, MAC_PR_ARG(aa), __entry->pmk_len,
+ __print_array(__get_dynamic_array(pmk),
+ __get_dynamic_array_len(pmk), 1),
+ __entry->pmk_r0_name_len ?
+ __print_array(__get_dynamic_array(pmk_r0_name),
+ __get_dynamic_array_len(pmk_r0_name), 1) : "")
+);
+
+TRACE_EVENT(rdev_del_pmk,
+ TP_PROTO(struct wiphy *wiphy, struct net_device *netdev, const u8 *aa),
+
+ TP_ARGS(wiphy, netdev, aa),
+
+ TP_STRUCT__entry(
+ WIPHY_ENTRY
+ NETDEV_ENTRY
+ MAC_ENTRY(aa)
+ ),
+
+ TP_fast_assign(
+ WIPHY_ASSIGN;
+ NETDEV_ASSIGN;
+ MAC_ASSIGN(aa, aa);
+ ),
+
+ TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT ", " MAC_PR_FMT,
+ WIPHY_PR_ARG, NETDEV_PR_ARG, MAC_PR_ARG(aa))
+);
+
/*************************************************************
* cfg80211 exported functions traces *
*************************************************************/
diff --git a/net/wireless/util.c b/net/wireless/util.c
index 4992f10..bcb1284 100644
--- a/net/wireless/util.c
+++ b/net/wireless/util.c
@@ -522,7 +522,7 @@
pskb_pull(skb, hdrlen);
if (!ehdr)
- ehdr = (struct ethhdr *) skb_push(skb, sizeof(struct ethhdr));
+ ehdr = skb_push(skb, sizeof(struct ethhdr));
memcpy(ehdr, &tmp, sizeof(tmp));
return 0;
@@ -1219,8 +1219,8 @@
u32 bitrate;
int idx;
- if (WARN_ON_ONCE(rate->mcs > 9))
- return 0;
+ if (rate->mcs > 9)
+ goto warn;
switch (rate->bw) {
case RATE_INFO_BW_160:
@@ -1235,8 +1235,7 @@
case RATE_INFO_BW_5:
case RATE_INFO_BW_10:
default:
- WARN_ON(1);
- /* fall through */
+ goto warn;
case RATE_INFO_BW_20:
idx = 0;
}
@@ -1249,6 +1248,10 @@
/* do NOT round down here */
return (bitrate + 50000) / 100000;
+ warn:
+ WARN_ONCE(1, "invalid rate bw=%d, mcs=%d, nss=%d\n",
+ rate->bw, rate->mcs, rate->nss);
+ return 0;
}
u32 cfg80211_calculate_bitrate(struct rate_info *rate)
diff --git a/net/x25/x25_subr.c b/net/x25/x25_subr.c
index 6b5af65..db0b131 100644
--- a/net/x25/x25_subr.c
+++ b/net/x25/x25_subr.c
@@ -188,17 +188,14 @@
*dptr++ = X25_CALL_REQUEST;
len = x25_addr_aton(addresses, &x25->dest_addr,
&x25->source_addr);
- dptr = skb_put(skb, len);
- memcpy(dptr, addresses, len);
+ skb_put_data(skb, addresses, len);
len = x25_create_facilities(facilities,
&x25->facilities,
&x25->dte_facilities,
x25->neighbour->global_facil_mask);
- dptr = skb_put(skb, len);
- memcpy(dptr, facilities, len);
- dptr = skb_put(skb, x25->calluserdata.cudlength);
- memcpy(dptr, x25->calluserdata.cuddata,
- x25->calluserdata.cudlength);
+ skb_put_data(skb, facilities, len);
+ skb_put_data(skb, x25->calluserdata.cuddata,
+ x25->calluserdata.cudlength);
x25->calluserdata.cudlength = 0;
break;
@@ -210,17 +207,15 @@
&x25->facilities,
&x25->dte_facilities,
x25->vc_facil_mask);
- dptr = skb_put(skb, len);
- memcpy(dptr, facilities, len);
+ skb_put_data(skb, facilities, len);
/* fast select with no restriction on response
allows call user data. Userland must
ensure it is ours and not theirs */
if(x25->facilities.reverse & 0x80) {
- dptr = skb_put(skb,
- x25->calluserdata.cudlength);
- memcpy(dptr, x25->calluserdata.cuddata,
- x25->calluserdata.cudlength);
+ skb_put_data(skb,
+ x25->calluserdata.cuddata,
+ x25->calluserdata.cudlength);
}
x25->calluserdata.cudlength = 0;
break;
diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c
index ed4e52d..af8e38f 100644
--- a/net/xfrm/xfrm_policy.c
+++ b/net/xfrm/xfrm_policy.c
@@ -1590,7 +1590,9 @@
struct xfrm_dst *xdst = container_of(flo, struct xfrm_dst, flo);
struct dst_entry *dst = &xdst->u.dst;
- dst_free(dst);
+ /* Mark DST_OBSOLETE_DEAD to fail the next xfrm_dst_check() */
+ dst->obsolete = DST_OBSOLETE_DEAD;
+ dst_release_immediate(dst);
}
static const struct flow_cache_ops xfrm_bundle_fc_ops = {
@@ -1620,7 +1622,7 @@
default:
BUG();
}
- xdst = dst_alloc(dst_ops, NULL, 0, DST_OBSOLETE_NONE, 0);
+ xdst = dst_alloc(dst_ops, NULL, 1, DST_OBSOLETE_NONE, 0);
if (likely(xdst)) {
struct dst_entry *dst = &xdst->u.dst;
@@ -1723,10 +1725,11 @@
if (!dst_prev)
dst0 = dst1;
- else {
- dst_prev->child = dst_clone(dst1);
- dst1->flags |= DST_NOHASH;
- }
+ else
+ /* Ref count is taken during xfrm_alloc_dst()
+ * No need to do dst_clone() on dst1
+ */
+ dst_prev->child = dst1;
xdst->route = dst;
dst_copy_metrics(dst1, dst);
@@ -1792,7 +1795,7 @@
xfrm_state_put(xfrm[i]);
free_dst:
if (dst0)
- dst_free(dst0);
+ dst_release_immediate(dst0);
dst0 = ERR_PTR(err);
goto out;
}
@@ -2073,7 +2076,11 @@
pol_dead |= pols[i]->walk.dead;
}
if (pol_dead) {
- dst_free(&xdst->u.dst);
+ /* Mark DST_OBSOLETE_DEAD to fail the next
+ * xfrm_dst_check()
+ */
+ xdst->u.dst.obsolete = DST_OBSOLETE_DEAD;
+ dst_release_immediate(&xdst->u.dst);
xdst = NULL;
num_pols = 0;
num_xfrms = 0;
@@ -2120,11 +2127,12 @@
if (xdst) {
/* The policies were stolen for newly generated bundle */
xdst->num_pols = 0;
- dst_free(&xdst->u.dst);
+ /* Mark DST_OBSOLETE_DEAD to fail the next xfrm_dst_check() */
+ xdst->u.dst.obsolete = DST_OBSOLETE_DEAD;
+ dst_release_immediate(&xdst->u.dst);
}
- /* Flow cache does not have reference, it dst_free()'s,
- * but we do need to return one reference for original caller */
+ /* We do need to return one reference for original caller */
dst_hold(&new_xdst->u.dst);
return &new_xdst->flo;
@@ -2147,9 +2155,11 @@
inc_error:
XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTPOLERROR);
error:
- if (xdst != NULL)
- dst_free(&xdst->u.dst);
- else
+ if (xdst != NULL) {
+ /* Mark DST_OBSOLETE_DEAD to fail the next xfrm_dst_check() */
+ xdst->u.dst.obsolete = DST_OBSOLETE_DEAD;
+ dst_release_immediate(&xdst->u.dst);
+ } else
xfrm_pols_put(pols, num_pols);
return ERR_PTR(err);
}
@@ -2221,7 +2231,6 @@
}
dst_hold(&xdst->u.dst);
- xdst->u.dst.flags |= DST_NOCACHE;
route = xdst->route;
}
}
@@ -2636,10 +2645,12 @@
* notice. That's what we are validating here via the
* stale_bundle() check.
*
- * When a policy's bundle is pruned, we dst_free() the XFRM
- * dst which causes it's ->obsolete field to be set to
- * DST_OBSOLETE_DEAD. If an XFRM dst has been pruned like
- * this, we want to force a new route lookup.
+ * When an xdst is removed from flow cache, DST_OBSOLETE_DEAD will
+ * be marked on it.
+ * When a dst is removed from the fib tree, DST_OBSOLETE_DEAD will
+ * be marked on it.
+ * Both will force stable_bundle() to fail on any xdst bundle with
+ * this dst linked in it.
*/
if (dst->obsolete < 0 && !stale_bundle(dst))
return dst;
diff --git a/samples/bpf/Makefile b/samples/bpf/Makefile
index 6c7468e..a0561dc 100644
--- a/samples/bpf/Makefile
+++ b/samples/bpf/Makefile
@@ -160,6 +160,17 @@
$(MAKE) -C ../../ M=$(CURDIR) clean
@rm -f *~
+$(obj)/syscall_nrs.s: $(src)/syscall_nrs.c
+ $(call if_changed_dep,cc_s_c)
+
+$(obj)/syscall_nrs.h: $(obj)/syscall_nrs.s FORCE
+ $(call filechk,offsets,__SYSCALL_NRS_H__)
+
+clean-files += syscall_nrs.h
+
+FORCE:
+
+
# Verify LLVM compiler tools are available and bpf target is supported by llc
.PHONY: verify_cmds verify_target_bpf $(CLANG) $(LLC)
@@ -180,6 +191,8 @@
$(src)/*.c: verify_target_bpf
+$(obj)/tracex5_kern.o: $(obj)/syscall_nrs.h
+
# asm/sysreg.h - inline assembly used by it is incompatible with llvm.
# But, there is no easy way to fix it, so just exclude it since it is
# useless for BPF samples.
diff --git a/samples/bpf/bpf_helpers.h b/samples/bpf/bpf_helpers.h
index 9a9c95f..f4840b8 100644
--- a/samples/bpf/bpf_helpers.h
+++ b/samples/bpf/bpf_helpers.h
@@ -31,7 +31,8 @@
(void *) BPF_FUNC_get_current_uid_gid;
static int (*bpf_get_current_comm)(void *buf, int buf_size) =
(void *) BPF_FUNC_get_current_comm;
-static int (*bpf_perf_event_read)(void *map, int index) =
+static unsigned long long (*bpf_perf_event_read)(void *map,
+ unsigned long long flags) =
(void *) BPF_FUNC_perf_event_read;
static int (*bpf_clone_redirect)(void *ctx, int ifindex, int flags) =
(void *) BPF_FUNC_clone_redirect;
@@ -135,6 +136,19 @@
#define PT_REGS_SP(x) ((x)->sp)
#define PT_REGS_IP(x) ((x)->pc)
+#elif defined(__mips__)
+
+#define PT_REGS_PARM1(x) ((x)->regs[4])
+#define PT_REGS_PARM2(x) ((x)->regs[5])
+#define PT_REGS_PARM3(x) ((x)->regs[6])
+#define PT_REGS_PARM4(x) ((x)->regs[7])
+#define PT_REGS_PARM5(x) ((x)->regs[8])
+#define PT_REGS_RET(x) ((x)->regs[31])
+#define PT_REGS_FP(x) ((x)->regs[30]) /* Works only with CONFIG_FRAME_POINTER */
+#define PT_REGS_RC(x) ((x)->regs[1])
+#define PT_REGS_SP(x) ((x)->regs[29])
+#define PT_REGS_IP(x) ((x)->cp0_epc)
+
#elif defined(__powerpc__)
#define PT_REGS_PARM1(x) ((x)->gpr[3])
diff --git a/samples/bpf/bpf_load.c b/samples/bpf/bpf_load.c
index 74456b3..a91c57d 100644
--- a/samples/bpf/bpf_load.c
+++ b/samples/bpf/bpf_load.c
@@ -516,16 +516,18 @@
processed_sec[maps_shndx] = true;
}
- /* load programs that need map fixup (relocations) */
+ /* process all relo sections, and rewrite bpf insns for maps */
for (i = 1; i < ehdr.e_shnum; i++) {
if (processed_sec[i])
continue;
if (get_sec(elf, i, &ehdr, &shname, &shdr, &data))
continue;
+
if (shdr.sh_type == SHT_REL) {
struct bpf_insn *insns;
+ /* locate prog sec that need map fixup (relocations) */
if (get_sec(elf, shdr.sh_info, &ehdr, &shname_prog,
&shdr_prog, &data_prog))
continue;
@@ -535,26 +537,15 @@
continue;
insns = (struct bpf_insn *) data_prog->d_buf;
-
- processed_sec[shdr.sh_info] = true;
- processed_sec[i] = true;
+ processed_sec[i] = true; /* relo section */
if (parse_relo_and_apply(data, symbols, &shdr, insns,
map_data, nr_maps))
continue;
-
- if (memcmp(shname_prog, "kprobe/", 7) == 0 ||
- memcmp(shname_prog, "kretprobe/", 10) == 0 ||
- memcmp(shname_prog, "tracepoint/", 11) == 0 ||
- memcmp(shname_prog, "xdp", 3) == 0 ||
- memcmp(shname_prog, "perf_event", 10) == 0 ||
- memcmp(shname_prog, "socket", 6) == 0 ||
- memcmp(shname_prog, "cgroup/", 7) == 0)
- load_and_attach(shname_prog, insns, data_prog->d_size);
}
}
- /* load programs that don't use maps */
+ /* load programs */
for (i = 1; i < ehdr.e_shnum; i++) {
if (processed_sec[i])
diff --git a/samples/bpf/syscall_nrs.c b/samples/bpf/syscall_nrs.c
new file mode 100644
index 0000000..ce2a30b
--- /dev/null
+++ b/samples/bpf/syscall_nrs.c
@@ -0,0 +1,12 @@
+#include <uapi/linux/unistd.h>
+#include <linux/kbuild.h>
+
+#define SYSNR(_NR) DEFINE(SYS ## _NR, _NR)
+
+void syscall_defines(void)
+{
+ COMMENT("Linux system call numbers.");
+ SYSNR(__NR_write);
+ SYSNR(__NR_read);
+ SYSNR(__NR_mmap);
+}
diff --git a/samples/bpf/trace_event_user.c b/samples/bpf/trace_event_user.c
index fa43364..7bd827b 100644
--- a/samples/bpf/trace_event_user.c
+++ b/samples/bpf/trace_event_user.c
@@ -75,7 +75,10 @@
for (i = PERF_MAX_STACK_DEPTH - 1; i >= 0; i--)
print_addr(ip[i]);
}
- printf("\n");
+ if (count < 6)
+ printf("\r");
+ else
+ printf("\n");
if (key->kernstack == -EEXIST && !warned) {
printf("stackmap collisions seen. Consider increasing size\n");
@@ -105,7 +108,7 @@
bpf_map_delete_elem(fd, &next_key);
key = next_key;
}
-
+ printf("\n");
if (!sys_read_seen || !sys_write_seen) {
printf("BUG kernel stack doesn't contain sys_read() and sys_write()\n");
int_exit(0);
@@ -122,24 +125,29 @@
{
int nr_cpus = sysconf(_SC_NPROCESSORS_CONF);
int *pmu_fd = malloc(nr_cpus * sizeof(int));
- int i;
+ int i, error = 0;
/* open perf_event on all cpus */
for (i = 0; i < nr_cpus; i++) {
pmu_fd[i] = sys_perf_event_open(attr, -1, i, -1, 0);
if (pmu_fd[i] < 0) {
printf("sys_perf_event_open failed\n");
+ error = 1;
goto all_cpu_err;
}
assert(ioctl(pmu_fd[i], PERF_EVENT_IOC_SET_BPF, prog_fd[0]) == 0);
- assert(ioctl(pmu_fd[i], PERF_EVENT_IOC_ENABLE, 0) == 0);
+ assert(ioctl(pmu_fd[i], PERF_EVENT_IOC_ENABLE) == 0);
}
- system("dd if=/dev/zero of=/dev/null count=5000k");
+ system("dd if=/dev/zero of=/dev/null count=5000k status=none");
print_stacks();
all_cpu_err:
- for (i--; i >= 0; i--)
+ for (i--; i >= 0; i--) {
+ ioctl(pmu_fd[i], PERF_EVENT_IOC_DISABLE);
close(pmu_fd[i]);
+ }
free(pmu_fd);
+ if (error)
+ int_exit(0);
}
static void test_perf_event_task(struct perf_event_attr *attr)
@@ -150,12 +158,13 @@
pmu_fd = sys_perf_event_open(attr, 0, -1, -1, 0);
if (pmu_fd < 0) {
printf("sys_perf_event_open failed\n");
- return;
+ int_exit(0);
}
assert(ioctl(pmu_fd, PERF_EVENT_IOC_SET_BPF, prog_fd[0]) == 0);
- assert(ioctl(pmu_fd, PERF_EVENT_IOC_ENABLE, 0) == 0);
- system("dd if=/dev/zero of=/dev/null count=5000k");
+ assert(ioctl(pmu_fd, PERF_EVENT_IOC_ENABLE) == 0);
+ system("dd if=/dev/zero of=/dev/null count=5000k status=none");
print_stacks();
+ ioctl(pmu_fd, PERF_EVENT_IOC_DISABLE);
close(pmu_fd);
}
@@ -175,11 +184,56 @@
.config = PERF_COUNT_SW_CPU_CLOCK,
.inherit = 1,
};
+ struct perf_event_attr attr_hw_cache_l1d = {
+ .sample_freq = SAMPLE_FREQ,
+ .freq = 1,
+ .type = PERF_TYPE_HW_CACHE,
+ .config =
+ PERF_COUNT_HW_CACHE_L1D |
+ (PERF_COUNT_HW_CACHE_OP_READ << 8) |
+ (PERF_COUNT_HW_CACHE_RESULT_ACCESS << 16),
+ .inherit = 1,
+ };
+ struct perf_event_attr attr_hw_cache_branch_miss = {
+ .sample_freq = SAMPLE_FREQ,
+ .freq = 1,
+ .type = PERF_TYPE_HW_CACHE,
+ .config =
+ PERF_COUNT_HW_CACHE_BPU |
+ (PERF_COUNT_HW_CACHE_OP_READ << 8) |
+ (PERF_COUNT_HW_CACHE_RESULT_MISS << 16),
+ .inherit = 1,
+ };
+ struct perf_event_attr attr_type_raw = {
+ .sample_freq = SAMPLE_FREQ,
+ .freq = 1,
+ .type = PERF_TYPE_RAW,
+ /* Intel Instruction Retired */
+ .config = 0xc0,
+ .inherit = 1,
+ };
+ printf("Test HW_CPU_CYCLES\n");
test_perf_event_all_cpu(&attr_type_hw);
test_perf_event_task(&attr_type_hw);
+
+ printf("Test SW_CPU_CLOCK\n");
test_perf_event_all_cpu(&attr_type_sw);
test_perf_event_task(&attr_type_sw);
+
+ printf("Test HW_CACHE_L1D\n");
+ test_perf_event_all_cpu(&attr_hw_cache_l1d);
+ test_perf_event_task(&attr_hw_cache_l1d);
+
+ printf("Test HW_CACHE_BPU\n");
+ test_perf_event_all_cpu(&attr_hw_cache_branch_miss);
+ test_perf_event_task(&attr_hw_cache_branch_miss);
+
+ printf("Test Instruction Retired\n");
+ test_perf_event_all_cpu(&attr_type_raw);
+ test_perf_event_task(&attr_type_raw);
+
+ printf("*** PASS ***\n");
}
@@ -209,7 +263,6 @@
return 0;
}
test_bpf_perf_event();
-
int_exit(0);
return 0;
}
diff --git a/samples/bpf/tracex5_kern.c b/samples/bpf/tracex5_kern.c
index 7e4cf74..f57f4e1 100644
--- a/samples/bpf/tracex5_kern.c
+++ b/samples/bpf/tracex5_kern.c
@@ -9,6 +9,7 @@
#include <uapi/linux/bpf.h>
#include <uapi/linux/seccomp.h>
#include <uapi/linux/unistd.h>
+#include "syscall_nrs.h"
#include "bpf_helpers.h"
#define PROG(F) SEC("kprobe/"__stringify(F)) int bpf_func_##F
@@ -17,7 +18,11 @@
.type = BPF_MAP_TYPE_PROG_ARRAY,
.key_size = sizeof(u32),
.value_size = sizeof(u32),
+#ifdef __mips__
+ .max_entries = 6000, /* MIPS n64 syscalls start at 5000 */
+#else
.max_entries = 1024,
+#endif
};
SEC("kprobe/__seccomp_filter")
@@ -37,7 +42,7 @@
}
/* we jump here when syscall number == __NR_write */
-PROG(__NR_write)(struct pt_regs *ctx)
+PROG(SYS__NR_write)(struct pt_regs *ctx)
{
struct seccomp_data sd;
@@ -50,7 +55,7 @@
return 0;
}
-PROG(__NR_read)(struct pt_regs *ctx)
+PROG(SYS__NR_read)(struct pt_regs *ctx)
{
struct seccomp_data sd;
@@ -63,7 +68,7 @@
return 0;
}
-PROG(__NR_mmap)(struct pt_regs *ctx)
+PROG(SYS__NR_mmap)(struct pt_regs *ctx)
{
char fmt[] = "mmap\n";
bpf_trace_printk(fmt, sizeof(fmt));
diff --git a/samples/bpf/tracex6_kern.c b/samples/bpf/tracex6_kern.c
index be479c4..e7d1803 100644
--- a/samples/bpf/tracex6_kern.c
+++ b/samples/bpf/tracex6_kern.c
@@ -3,22 +3,36 @@
#include <uapi/linux/bpf.h>
#include "bpf_helpers.h"
-struct bpf_map_def SEC("maps") my_map = {
+struct bpf_map_def SEC("maps") counters = {
.type = BPF_MAP_TYPE_PERF_EVENT_ARRAY,
.key_size = sizeof(int),
.value_size = sizeof(u32),
- .max_entries = 32,
+ .max_entries = 64,
+};
+struct bpf_map_def SEC("maps") values = {
+ .type = BPF_MAP_TYPE_HASH,
+ .key_size = sizeof(int),
+ .value_size = sizeof(u64),
+ .max_entries = 64,
};
-SEC("kprobe/sys_write")
+SEC("kprobe/htab_map_get_next_key")
int bpf_prog1(struct pt_regs *ctx)
{
- u64 count;
u32 key = bpf_get_smp_processor_id();
- char fmt[] = "CPU-%d %llu\n";
+ u64 count, *val;
+ s64 error;
- count = bpf_perf_event_read(&my_map, key);
- bpf_trace_printk(fmt, sizeof(fmt), key, count);
+ count = bpf_perf_event_read(&counters, key);
+ error = (s64)count;
+ if (error <= -2 && error >= -22)
+ return 0;
+
+ val = bpf_map_lookup_elem(&values, &key);
+ if (val)
+ *val = count;
+ else
+ bpf_map_update_elem(&values, &key, &count, BPF_NOEXIST);
return 0;
}
diff --git a/samples/bpf/tracex6_user.c b/samples/bpf/tracex6_user.c
index ca7874e..a05a99a 100644
--- a/samples/bpf/tracex6_user.c
+++ b/samples/bpf/tracex6_user.c
@@ -1,73 +1,177 @@
-#include <stdio.h>
-#include <unistd.h>
-#include <stdlib.h>
-#include <stdbool.h>
-#include <string.h>
+#define _GNU_SOURCE
+
+#include <assert.h>
#include <fcntl.h>
-#include <poll.h>
-#include <sys/ioctl.h>
#include <linux/perf_event.h>
#include <linux/bpf.h>
-#include "libbpf.h"
+#include <sched.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <sys/ioctl.h>
+#include <sys/resource.h>
+#include <sys/time.h>
+#include <sys/types.h>
+#include <sys/wait.h>
+#include <unistd.h>
+
#include "bpf_load.h"
+#include "libbpf.h"
#include "perf-sys.h"
#define SAMPLE_PERIOD 0x7fffffffffffffffULL
+static void check_on_cpu(int cpu, struct perf_event_attr *attr)
+{
+ int pmu_fd, error = 0;
+ cpu_set_t set;
+ __u64 value;
+
+ /* Move to target CPU */
+ CPU_ZERO(&set);
+ CPU_SET(cpu, &set);
+ assert(sched_setaffinity(0, sizeof(set), &set) == 0);
+ /* Open perf event and attach to the perf_event_array */
+ pmu_fd = sys_perf_event_open(attr, -1/*pid*/, cpu/*cpu*/, -1/*group_fd*/, 0);
+ if (pmu_fd < 0) {
+ fprintf(stderr, "sys_perf_event_open failed on CPU %d\n", cpu);
+ error = 1;
+ goto on_exit;
+ }
+ assert(bpf_map_update_elem(map_fd[0], &cpu, &pmu_fd, BPF_ANY) == 0);
+ assert(ioctl(pmu_fd, PERF_EVENT_IOC_ENABLE, 0) == 0);
+ /* Trigger the kprobe */
+ bpf_map_get_next_key(map_fd[1], &cpu, NULL);
+ /* Check the value */
+ if (bpf_map_lookup_elem(map_fd[1], &cpu, &value)) {
+ fprintf(stderr, "Value missing for CPU %d\n", cpu);
+ error = 1;
+ goto on_exit;
+ }
+ fprintf(stderr, "CPU %d: %llu\n", cpu, value);
+
+on_exit:
+ assert(bpf_map_delete_elem(map_fd[0], &cpu) == 0 || error);
+ assert(ioctl(pmu_fd, PERF_EVENT_IOC_DISABLE, 0) == 0 || error);
+ assert(close(pmu_fd) == 0 || error);
+ assert(bpf_map_delete_elem(map_fd[1], &cpu) == 0 || error);
+ exit(error);
+}
+
+static void test_perf_event_array(struct perf_event_attr *attr,
+ const char *name)
+{
+ int i, status, nr_cpus = sysconf(_SC_NPROCESSORS_CONF);
+ pid_t pid[nr_cpus];
+ int err = 0;
+
+ printf("Test reading %s counters\n", name);
+
+ for (i = 0; i < nr_cpus; i++) {
+ pid[i] = fork();
+ assert(pid[i] >= 0);
+ if (pid[i] == 0) {
+ check_on_cpu(i, attr);
+ exit(1);
+ }
+ }
+
+ for (i = 0; i < nr_cpus; i++) {
+ assert(waitpid(pid[i], &status, 0) == pid[i]);
+ err |= status;
+ }
+
+ if (err)
+ printf("Test: %s FAILED\n", name);
+}
+
static void test_bpf_perf_event(void)
{
- int nr_cpus = sysconf(_SC_NPROCESSORS_CONF);
- int *pmu_fd = malloc(nr_cpus * sizeof(int));
- int status, i;
-
- struct perf_event_attr attr_insn_pmu = {
+ struct perf_event_attr attr_cycles = {
.freq = 0,
.sample_period = SAMPLE_PERIOD,
.inherit = 0,
.type = PERF_TYPE_HARDWARE,
.read_format = 0,
.sample_type = 0,
- .config = 0,/* PMU: cycles */
+ .config = PERF_COUNT_HW_CPU_CYCLES,
+ };
+ struct perf_event_attr attr_clock = {
+ .freq = 0,
+ .sample_period = SAMPLE_PERIOD,
+ .inherit = 0,
+ .type = PERF_TYPE_SOFTWARE,
+ .read_format = 0,
+ .sample_type = 0,
+ .config = PERF_COUNT_SW_CPU_CLOCK,
+ };
+ struct perf_event_attr attr_raw = {
+ .freq = 0,
+ .sample_period = SAMPLE_PERIOD,
+ .inherit = 0,
+ .type = PERF_TYPE_RAW,
+ .read_format = 0,
+ .sample_type = 0,
+ /* Intel Instruction Retired */
+ .config = 0xc0,
+ };
+ struct perf_event_attr attr_l1d_load = {
+ .freq = 0,
+ .sample_period = SAMPLE_PERIOD,
+ .inherit = 0,
+ .type = PERF_TYPE_HW_CACHE,
+ .read_format = 0,
+ .sample_type = 0,
+ .config =
+ PERF_COUNT_HW_CACHE_L1D |
+ (PERF_COUNT_HW_CACHE_OP_READ << 8) |
+ (PERF_COUNT_HW_CACHE_RESULT_ACCESS << 16),
+ };
+ struct perf_event_attr attr_llc_miss = {
+ .freq = 0,
+ .sample_period = SAMPLE_PERIOD,
+ .inherit = 0,
+ .type = PERF_TYPE_HW_CACHE,
+ .read_format = 0,
+ .sample_type = 0,
+ .config =
+ PERF_COUNT_HW_CACHE_LL |
+ (PERF_COUNT_HW_CACHE_OP_READ << 8) |
+ (PERF_COUNT_HW_CACHE_RESULT_MISS << 16),
+ };
+ struct perf_event_attr attr_msr_tsc = {
+ .freq = 0,
+ .sample_period = 0,
+ .inherit = 0,
+ /* From /sys/bus/event_source/devices/msr/ */
+ .type = 7,
+ .read_format = 0,
+ .sample_type = 0,
+ .config = 0,
};
- for (i = 0; i < nr_cpus; i++) {
- pmu_fd[i] = sys_perf_event_open(&attr_insn_pmu, -1/*pid*/, i/*cpu*/, -1/*group_fd*/, 0);
- if (pmu_fd[i] < 0) {
- printf("event syscall failed\n");
- goto exit;
- }
+ test_perf_event_array(&attr_cycles, "HARDWARE-cycles");
+ test_perf_event_array(&attr_clock, "SOFTWARE-clock");
+ test_perf_event_array(&attr_raw, "RAW-instruction-retired");
+ test_perf_event_array(&attr_l1d_load, "HW_CACHE-L1D-load");
- bpf_map_update_elem(map_fd[0], &i, &pmu_fd[i], BPF_ANY);
- ioctl(pmu_fd[i], PERF_EVENT_IOC_ENABLE, 0);
- }
-
- status = system("ls > /dev/null");
- if (status)
- goto exit;
- status = system("sleep 2");
- if (status)
- goto exit;
-
-exit:
- for (i = 0; i < nr_cpus; i++)
- close(pmu_fd[i]);
- close(map_fd[0]);
- free(pmu_fd);
+ /* below tests may fail in qemu */
+ test_perf_event_array(&attr_llc_miss, "HW_CACHE-LLC-miss");
+ test_perf_event_array(&attr_msr_tsc, "Dynamic-msr-tsc");
}
int main(int argc, char **argv)
{
+ struct rlimit r = {RLIM_INFINITY, RLIM_INFINITY};
char filename[256];
snprintf(filename, sizeof(filename), "%s_kern.o", argv[0]);
+ setrlimit(RLIMIT_MEMLOCK, &r);
if (load_bpf_file(filename)) {
printf("%s", bpf_log_buf);
return 1;
}
test_bpf_perf_event();
- read_trace_pipe();
-
return 0;
}
diff --git a/samples/pktgen/README.rst b/samples/pktgen/README.rst
index 8365c4e..ff8929d 100644
--- a/samples/pktgen/README.rst
+++ b/samples/pktgen/README.rst
@@ -21,7 +21,9 @@
-d : ($DEST_IP) destination IP
-m : ($DST_MAC) destination MAC-addr
-t : ($THREADS) threads to start
+ -f : ($F_THREAD) index of first thread (zero indexed CPU number)
-c : ($SKB_CLONE) SKB clones send before alloc new SKB
+ -n : ($COUNT) num messages to send per thread, 0 means indefinitely
-b : ($BURST) HW level bursting of SKBs
-v : ($VERBOSE) verbose
-x : ($DEBUG) debug
diff --git a/samples/pktgen/parameters.sh b/samples/pktgen/parameters.sh
index f70ea7d..3a6244d 100644
--- a/samples/pktgen/parameters.sh
+++ b/samples/pktgen/parameters.sh
@@ -10,7 +10,9 @@
echo " -d : (\$DEST_IP) destination IP"
echo " -m : (\$DST_MAC) destination MAC-addr"
echo " -t : (\$THREADS) threads to start"
+ echo " -f : (\$F_THREAD) index of first thread (zero indexed CPU number)"
echo " -c : (\$SKB_CLONE) SKB clones send before alloc new SKB"
+ echo " -n : (\$COUNT) num messages to send per thread, 0 means indefinitely"
echo " -b : (\$BURST) HW level bursting of SKBs"
echo " -v : (\$VERBOSE) verbose"
echo " -x : (\$DEBUG) debug"
@@ -20,7 +22,7 @@
## --- Parse command line arguments / parameters ---
## echo "Commandline options:"
-while getopts "s:i:d:m:t:c:b:vxh6" option; do
+while getopts "s:i:d:m:f:t:c:n:b:vxh6" option; do
case $option in
i) # interface
export DEV=$OPTARG
@@ -38,16 +40,22 @@
export DST_MAC=$OPTARG
info "Destination MAC set to: DST_MAC=$DST_MAC"
;;
+ f)
+ export F_THREAD=$OPTARG
+ info "Index of first thread (zero indexed CPU number): $F_THREAD"
+ ;;
t)
export THREADS=$OPTARG
- export CPU_THREADS=$OPTARG
- let "CPU_THREADS -= 1"
- info "Number of threads to start: $THREADS (0 to $CPU_THREADS)"
+ info "Number of threads to start: $THREADS"
;;
c)
export CLONE_SKB=$OPTARG
info "CLONE_SKB=$CLONE_SKB"
;;
+ n)
+ export COUNT=$OPTARG
+ info "COUNT=$COUNT"
+ ;;
b)
export BURST=$OPTARG
info "SKB bursting: BURST=$BURST"
@@ -77,12 +85,17 @@
info "Default packet size set to: set to: $PKT_SIZE bytes"
fi
+if [ -z "$F_THREAD" ]; then
+ # First thread (F_THREAD) reference the zero indexed CPU number
+ export F_THREAD=0
+fi
+
if [ -z "$THREADS" ]; then
- # Zero CPU threads means one thread, because CPU numbers are zero indexed
- export CPU_THREADS=0
export THREADS=1
fi
+export L_THREAD=$(( THREADS + F_THREAD - 1 ))
+
if [ -z "$DEV" ]; then
usage
err 2 "Please specify output device"
diff --git a/samples/pktgen/pktgen_bench_xmit_mode_netif_receive.sh b/samples/pktgen/pktgen_bench_xmit_mode_netif_receive.sh
index f3e1bed..e5bfe75 100755
--- a/samples/pktgen/pktgen_bench_xmit_mode_netif_receive.sh
+++ b/samples/pktgen/pktgen_bench_xmit_mode_netif_receive.sh
@@ -39,16 +39,16 @@
fi
[ -z "$DST_MAC" ] && DST_MAC="90:e2:ba:ff:ff:ff"
[ -z "$BURST" ] && BURST=1024
+[ -z "$COUNT" ] && COUNT="10000000" # Zero means indefinitely
# Base Config
DELAY="0" # Zero means max speed
-COUNT="10000000" # Zero means indefinitely
# General cleanup everything since last run
pg_ctrl "reset"
# Threads are specified with parameter -t value in $THREADS
-for ((thread = 0; thread < $THREADS; thread++)); do
+for ((thread = $F_THREAD; thread <= $L_THREAD; thread++)); do
# The device name is extended with @name, using thread number to
# make then unique, but any name will do.
dev=${DEV}@${thread}
@@ -81,7 +81,7 @@
echo "Done" >&2
# Print results
-for ((thread = 0; thread < $THREADS; thread++)); do
+for ((thread = $F_THREAD; thread <= $L_THREAD; thread++)); do
dev=${DEV}@${thread}
echo "Device: $dev"
cat /proc/net/pktgen/$dev | grep -A2 "Result:"
diff --git a/samples/pktgen/pktgen_bench_xmit_mode_queue_xmit.sh b/samples/pktgen/pktgen_bench_xmit_mode_queue_xmit.sh
index cc102e9..1ad878e 100755
--- a/samples/pktgen/pktgen_bench_xmit_mode_queue_xmit.sh
+++ b/samples/pktgen/pktgen_bench_xmit_mode_queue_xmit.sh
@@ -22,16 +22,16 @@
if [[ -n "$BURST" ]]; then
err 1 "Bursting not supported for this mode"
fi
+[ -z "$COUNT" ] && COUNT="10000000" # Zero means indefinitely
# Base Config
DELAY="0" # Zero means max speed
-COUNT="10000000" # Zero means indefinitely
# General cleanup everything since last run
pg_ctrl "reset"
# Threads are specified with parameter -t value in $THREADS
-for ((thread = 0; thread < $THREADS; thread++)); do
+for ((thread = $F_THREAD; thread <= $L_THREAD; thread++)); do
# The device name is extended with @name, using thread number to
# make then unique, but any name will do.
dev=${DEV}@${thread}
@@ -61,7 +61,7 @@
echo "Done" >&2
# Print results
-for ((thread = 0; thread < $THREADS; thread++)); do
+for ((thread = $F_THREAD; thread <= $L_THREAD; thread++)); do
dev=${DEV}@${thread}
echo "Device: $dev"
cat /proc/net/pktgen/$dev | grep -A2 "Result:"
diff --git a/samples/pktgen/pktgen_sample01_simple.sh b/samples/pktgen/pktgen_sample01_simple.sh
index 29ef4ba..35b7fe3 100755
--- a/samples/pktgen/pktgen_sample01_simple.sh
+++ b/samples/pktgen/pktgen_sample01_simple.sh
@@ -20,10 +20,10 @@
[ -z "$CLONE_SKB" ] && CLONE_SKB="0"
# Example enforce param "-m" for dst_mac
[ -z "$DST_MAC" ] && usage && err 2 "Must specify -m dst_mac"
+[ -z "$COUNT" ] && COUNT="100000" # Zero means indefinitely
# Base Config
DELAY="0" # Zero means max speed
-COUNT="100000" # Zero means indefinitely
# Flow variation random source port between min and max
UDP_MIN=9
diff --git a/samples/pktgen/pktgen_sample02_multiqueue.sh b/samples/pktgen/pktgen_sample02_multiqueue.sh
index c88a161..cbdd3e2 100755
--- a/samples/pktgen/pktgen_sample02_multiqueue.sh
+++ b/samples/pktgen/pktgen_sample02_multiqueue.sh
@@ -13,9 +13,10 @@
# Required param: -i dev in $DEV
source ${basedir}/parameters.sh
+[ -z "$COUNT" ] && COUNT="100000" # Zero means indefinitely
+
# Base Config
DELAY="0" # Zero means max speed
-COUNT="100000" # Zero means indefinitely
[ -z "$CLONE_SKB" ] && CLONE_SKB="0"
# Flow variation random source port between min and max
@@ -32,7 +33,7 @@
pg_ctrl "reset"
# Threads are specified with parameter -t value in $THREADS
-for ((thread = 0; thread < $THREADS; thread++)); do
+for ((thread = $F_THREAD; thread <= $L_THREAD; thread++)); do
# The device name is extended with @name, using thread number to
# make then unique, but any name will do.
dev=${DEV}@${thread}
@@ -70,7 +71,7 @@
echo "Done" >&2
# Print results
-for ((thread = 0; thread < $THREADS; thread++)); do
+for ((thread = $F_THREAD; thread <= $L_THREAD; thread++)); do
dev=${DEV}@${thread}
echo "Device: $dev"
cat /proc/net/pktgen/$dev | grep -A2 "Result:"
diff --git a/samples/pktgen/pktgen_sample03_burst_single_flow.sh b/samples/pktgen/pktgen_sample03_burst_single_flow.sh
index 80cf8f5..8d26e0c 100755
--- a/samples/pktgen/pktgen_sample03_burst_single_flow.sh
+++ b/samples/pktgen/pktgen_sample03_burst_single_flow.sh
@@ -31,16 +31,16 @@
[ -z "$DST_MAC" ] && DST_MAC="90:e2:ba:ff:ff:ff"
[ -z "$BURST" ] && BURST=32
[ -z "$CLONE_SKB" ] && CLONE_SKB="100000"
+[ -z "$COUNT" ] && COUNT="0" # Zero means indefinitely
# Base Config
DELAY="0" # Zero means max speed
-COUNT="0" # Zero means indefinitely
# General cleanup everything since last run
pg_ctrl "reset"
# Threads are specified with parameter -t value in $THREADS
-for ((thread = 0; thread < $THREADS; thread++)); do
+for ((thread = $F_THREAD; thread <= $L_THREAD; thread++)); do
dev=${DEV}@${thread}
# Add remove all other devices and add_device $dev to thread
@@ -71,7 +71,7 @@
# Run if user hits control-c
function control_c() {
# Print results
- for ((thread = 0; thread < $THREADS; thread++)); do
+ for ((thread = $F_THREAD; thread <= $L_THREAD; thread++)); do
dev=${DEV}@${thread}
echo "Device: $dev"
cat /proc/net/pktgen/$dev | grep -A2 "Result:"
diff --git a/samples/pktgen/pktgen_sample04_many_flows.sh b/samples/pktgen/pktgen_sample04_many_flows.sh
index f60412e..497fb75 100755
--- a/samples/pktgen/pktgen_sample04_many_flows.sh
+++ b/samples/pktgen/pktgen_sample04_many_flows.sh
@@ -15,6 +15,7 @@
[ -z "$DEST_IP" ] && DEST_IP="198.18.0.42"
[ -z "$DST_MAC" ] && DST_MAC="90:e2:ba:ff:ff:ff"
[ -z "$CLONE_SKB" ] && CLONE_SKB="0"
+[ -z "$COUNT" ] && COUNT="0" # Zero means indefinitely
# NOTICE: Script specific settings
# =======
@@ -26,7 +27,6 @@
# Base Config
DELAY="0" # Zero means max speed
-COUNT="0" # Zero means indefinitely
if [[ -n "$BURST" ]]; then
err 1 "Bursting not supported for this mode"
@@ -36,7 +36,7 @@
pg_ctrl "reset"
# Threads are specified with parameter -t value in $THREADS
-for ((thread = 0; thread < $THREADS; thread++)); do
+for ((thread = $F_THREAD; thread <= $L_THREAD; thread++)); do
dev=${DEV}@${thread}
# Add remove all other devices and add_device $dev to thread
@@ -78,7 +78,7 @@
# Run if user hits control-c
function print_result() {
# Print results
- for ((thread = 0; thread < $THREADS; thread++)); do
+ for ((thread = $F_THREAD; thread <= $L_THREAD; thread++)); do
dev=${DEV}@${thread}
echo "Device: $dev"
cat /proc/net/pktgen/$dev | grep -A2 "Result:"
diff --git a/samples/pktgen/pktgen_sample05_flow_per_thread.sh b/samples/pktgen/pktgen_sample05_flow_per_thread.sh
index 32ad818..ac9cfd6 100755
--- a/samples/pktgen/pktgen_sample05_flow_per_thread.sh
+++ b/samples/pktgen/pktgen_sample05_flow_per_thread.sh
@@ -20,17 +20,17 @@
[ -z "$DST_MAC" ] && DST_MAC="90:e2:ba:ff:ff:ff"
[ -z "$CLONE_SKB" ] && CLONE_SKB="0"
[ -z "$BURST" ] && BURST=32
+[ -z "$COUNT" ] && COUNT="0" # Zero means indefinitely
# Base Config
DELAY="0" # Zero means max speed
-COUNT="0" # Zero means indefinitely
# General cleanup everything since last run
pg_ctrl "reset"
# Threads are specified with parameter -t value in $THREADS
-for ((thread = 0; thread < $THREADS; thread++)); do
+for ((thread = $F_THREAD; thread <= $L_THREAD; thread++)); do
dev=${DEV}@${thread}
# Add remove all other devices and add_device $dev to thread
@@ -66,7 +66,7 @@
# Run if user hits control-c
function print_result() {
# Print results
- for ((thread = 0; thread < $THREADS; thread++)); do
+ for ((thread = $F_THREAD; thread <= $L_THREAD; thread++)); do
dev=${DEV}@${thread}
echo "Device: $dev"
cat /proc/net/pktgen/$dev | grep -A2 "Result:"
diff --git a/security/selinux/nlmsgtab.c b/security/selinux/nlmsgtab.c
index 5aeaf30..7b7433a 100644
--- a/security/selinux/nlmsgtab.c
+++ b/security/selinux/nlmsgtab.c
@@ -79,6 +79,7 @@
{ RTM_GETNSID, NETLINK_ROUTE_SOCKET__NLMSG_READ },
{ RTM_NEWSTATS, NETLINK_ROUTE_SOCKET__NLMSG_READ },
{ RTM_GETSTATS, NETLINK_ROUTE_SOCKET__NLMSG_READ },
+ { RTM_NEWCACHEREPORT, NETLINK_ROUTE_SOCKET__NLMSG_READ },
};
static const struct nlmsg_perm nlmsg_tcpdiag_perms[] =
@@ -158,7 +159,7 @@
switch (sclass) {
case SECCLASS_NETLINK_ROUTE_SOCKET:
/* RTM_MAX always point to RTM_SETxxxx, ie RTM_NEWxxx + 3 */
- BUILD_BUG_ON(RTM_MAX != (RTM_NEWSTATS + 3));
+ BUILD_BUG_ON(RTM_MAX != (RTM_NEWCACHEREPORT + 3));
err = nlmsg_perm(nlmsg_type, perm, nlmsg_route_perms,
sizeof(nlmsg_route_perms));
break;
diff --git a/tools/hv/bondvf.sh b/tools/hv/bondvf.sh
index d85968c..89b2506 100755
--- a/tools/hv/bondvf.sh
+++ b/tools/hv/bondvf.sh
@@ -102,15 +102,30 @@
}
function del_eth_cfg_ubuntu {
- local fn=$cfgdir/interfaces
+ local mainfn=$cfgdir/interfaces
+ local fnlist=( $mainfn )
+
+ local dirlist=(`awk '/^[ \t]*source/{print $2}' $mainfn`)
+
+ local i
+ for i in "${dirlist[@]}"
+ do
+ fnlist+=(`ls $i 2>/dev/null`)
+ done
+
local tmpfl=$(mktemp)
local nic_start='^[ \t]*(auto|iface|mapping|allow-.*)[ \t]+'$1
local nic_end='^[ \t]*(auto|iface|mapping|allow-.*|source)'
- awk "/$nic_end/{x=0} x{next} /$nic_start/{x=1;next} 1" $fn >$tmpfl
+ local fn
+ for fn in "${fnlist[@]}"
+ do
+ awk "/$nic_end/{x=0} x{next} /$nic_start/{x=1;next} 1" \
+ $fn >$tmpfl
- cp $tmpfl $fn
+ cp $tmpfl $fn
+ done
rm $tmpfl
}
@@ -119,7 +134,6 @@
local fn=$cfgdir/interfaces
del_eth_cfg_ubuntu $1
-
echo $'\n'auto $1 >>$fn
echo iface $1 inet manual >>$fn
echo bond-master $2 >>$fn
@@ -128,7 +142,10 @@
function create_eth_cfg_pri_ubuntu {
local fn=$cfgdir/interfaces
- create_eth_cfg_ubuntu $1 $2
+ del_eth_cfg_ubuntu $1
+ echo $'\n'allow-hotplug $1 >>$fn
+ echo iface $1 inet manual >>$fn
+ echo bond-master $2 >>$fn
echo bond-primary $1 >>$fn
}
@@ -153,7 +170,11 @@
}
function create_eth_cfg_pri_suse {
- create_eth_cfg_suse $1
+ local fn=$cfgdir/ifcfg-$1
+
+ rm -f $fn
+ echo BOOTPROTO=none >>$fn
+ echo STARTMODE=hotplug >>$fn
}
function create_bond_cfg_suse {
diff --git a/tools/include/uapi/linux/bpf.h b/tools/include/uapi/linux/bpf.h
index 94dfa9d..f94b48b 100644
--- a/tools/include/uapi/linux/bpf.h
+++ b/tools/include/uapi/linux/bpf.h
@@ -82,6 +82,11 @@
BPF_PROG_ATTACH,
BPF_PROG_DETACH,
BPF_PROG_TEST_RUN,
+ BPF_PROG_GET_NEXT_ID,
+ BPF_MAP_GET_NEXT_ID,
+ BPF_PROG_GET_FD_BY_ID,
+ BPF_MAP_GET_FD_BY_ID,
+ BPF_OBJ_GET_INFO_BY_FD,
};
enum bpf_map_type {
@@ -209,6 +214,21 @@
__u32 repeat;
__u32 duration;
} test;
+
+ struct { /* anonymous struct used by BPF_*_GET_*_ID */
+ union {
+ __u32 start_id;
+ __u32 prog_id;
+ __u32 map_id;
+ };
+ __u32 next_id;
+ };
+
+ struct { /* anonymous struct used by BPF_OBJ_GET_INFO_BY_FD */
+ __u32 bpf_fd;
+ __u32 info_len;
+ __aligned_u64 info;
+ } info;
} __attribute__((aligned(8)));
/* BPF helper function descriptions:
@@ -313,8 +333,11 @@
* @flags: room for future extensions
* Return: 0 on success or negative error
*
- * u64 bpf_perf_event_read(&map, index)
- * Return: Number events read or error code
+ * u64 bpf_perf_event_read(map, flags)
+ * read perf event counter value
+ * @map: pointer to perf_event_array map
+ * @flags: index of event in the map or bitmask flags
+ * Return: value of perf event counter read or error code
*
* int bpf_redirect(ifindex, flags)
* redirect to another netdev
@@ -328,11 +351,11 @@
* @skb: pointer to skb
* Return: realm if != 0
*
- * int bpf_perf_event_output(ctx, map, index, data, size)
+ * int bpf_perf_event_output(ctx, map, flags, data, size)
* output perf raw sample
* @ctx: struct pt_regs*
* @map: pointer to perf_event_array map
- * @index: index of event in the map
+ * @flags: index of event in the map or bitmask flags
* @data: data on stack to be output as raw data
* @size: size of data
* Return: 0 on success or negative error
@@ -490,6 +513,11 @@
* Get the owner uid of the socket stored inside sk_buff.
* @skb: pointer to skb
* Return: uid of the socket owner on success or overflowuid if failed.
+ *
+ * u32 bpf_set_hash(skb, hash)
+ * Set full skb->hash.
+ * @skb: pointer to skb
+ * @hash: hash to set
*/
#define __BPF_FUNC_MAPPER(FN) \
FN(unspec), \
@@ -539,7 +567,8 @@
FN(xdp_adjust_head), \
FN(probe_read_str), \
FN(get_socket_cookie), \
- FN(get_socket_uid),
+ FN(get_socket_uid), \
+ FN(set_hash),
/* integer value in 'imm' field of BPF_CALL instruction selects which helper
* function eBPF program intends to call
@@ -670,4 +699,25 @@
__u32 data_end;
};
+#define BPF_TAG_SIZE 8
+
+struct bpf_prog_info {
+ __u32 type;
+ __u32 id;
+ __u8 tag[BPF_TAG_SIZE];
+ __u32 jited_prog_len;
+ __u32 xlated_prog_len;
+ __aligned_u64 jited_prog_insns;
+ __aligned_u64 xlated_prog_insns;
+} __attribute__((aligned(8)));
+
+struct bpf_map_info {
+ __u32 type;
+ __u32 id;
+ __u32 key_size;
+ __u32 value_size;
+ __u32 max_entries;
+ __u32 map_flags;
+} __attribute__((aligned(8)));
+
#endif /* _UAPI__LINUX_BPF_H__ */
diff --git a/tools/lib/bpf/bpf.c b/tools/lib/bpf/bpf.c
index 6e17898..7e0405e 100644
--- a/tools/lib/bpf/bpf.c
+++ b/tools/lib/bpf/bpf.c
@@ -257,3 +257,71 @@
*duration = attr.test.duration;
return ret;
}
+
+int bpf_prog_get_next_id(__u32 start_id, __u32 *next_id)
+{
+ union bpf_attr attr;
+ int err;
+
+ bzero(&attr, sizeof(attr));
+ attr.start_id = start_id;
+
+ err = sys_bpf(BPF_PROG_GET_NEXT_ID, &attr, sizeof(attr));
+ if (!err)
+ *next_id = attr.next_id;
+
+ return err;
+}
+
+int bpf_map_get_next_id(__u32 start_id, __u32 *next_id)
+{
+ union bpf_attr attr;
+ int err;
+
+ bzero(&attr, sizeof(attr));
+ attr.start_id = start_id;
+
+ err = sys_bpf(BPF_MAP_GET_NEXT_ID, &attr, sizeof(attr));
+ if (!err)
+ *next_id = attr.next_id;
+
+ return err;
+}
+
+int bpf_prog_get_fd_by_id(__u32 id)
+{
+ union bpf_attr attr;
+
+ bzero(&attr, sizeof(attr));
+ attr.prog_id = id;
+
+ return sys_bpf(BPF_PROG_GET_FD_BY_ID, &attr, sizeof(attr));
+}
+
+int bpf_map_get_fd_by_id(__u32 id)
+{
+ union bpf_attr attr;
+
+ bzero(&attr, sizeof(attr));
+ attr.map_id = id;
+
+ return sys_bpf(BPF_MAP_GET_FD_BY_ID, &attr, sizeof(attr));
+}
+
+int bpf_obj_get_info_by_fd(int prog_fd, void *info, __u32 *info_len)
+{
+ union bpf_attr attr;
+ int err;
+
+ bzero(&attr, sizeof(attr));
+ bzero(info, *info_len);
+ attr.info.bpf_fd = prog_fd;
+ attr.info.info_len = *info_len;
+ attr.info.info = ptr_to_u64(info);
+
+ err = sys_bpf(BPF_OBJ_GET_INFO_BY_FD, &attr, sizeof(attr));
+ if (!err)
+ *info_len = attr.info.info_len;
+
+ return err;
+}
diff --git a/tools/lib/bpf/bpf.h b/tools/lib/bpf/bpf.h
index 972bd83..16de44a 100644
--- a/tools/lib/bpf/bpf.h
+++ b/tools/lib/bpf/bpf.h
@@ -54,5 +54,10 @@
int bpf_prog_test_run(int prog_fd, int repeat, void *data, __u32 size,
void *data_out, __u32 *size_out, __u32 *retval,
__u32 *duration);
+int bpf_prog_get_next_id(__u32 start_id, __u32 *next_id);
+int bpf_map_get_next_id(__u32 start_id, __u32 *next_id);
+int bpf_prog_get_fd_by_id(__u32 id);
+int bpf_map_get_fd_by_id(__u32 id);
+int bpf_obj_get_info_by_fd(int prog_fd, void *info, __u32 *info_len);
#endif
diff --git a/tools/net/bpf_jit_disasm.c b/tools/net/bpf_jit_disasm.c
index ad572e6..422d9abd 100644
--- a/tools/net/bpf_jit_disasm.c
+++ b/tools/net/bpf_jit_disasm.c
@@ -159,8 +159,8 @@
free(buff);
}
-static unsigned int get_last_jit_image(char *haystack, size_t hlen,
- uint8_t *image, size_t ilen)
+static uint8_t *get_last_jit_image(char *haystack, size_t hlen,
+ unsigned int *ilen)
{
char *ptr, *pptr, *tmp;
off_t off = 0;
@@ -168,9 +168,10 @@
regmatch_t pmatch[1];
unsigned long base;
regex_t regex;
+ uint8_t *image;
if (hlen == 0)
- return 0;
+ return NULL;
ret = regcomp(®ex, "flen=[[:alnum:]]+ proglen=[[:digit:]]+ "
"pass=[[:digit:]]+ image=[[:xdigit:]]+", REG_EXTENDED);
@@ -194,11 +195,22 @@
&flen, &proglen, &pass, &base);
if (ret != 4) {
regfree(®ex);
- return 0;
+ return NULL;
+ }
+ if (proglen > 1000000) {
+ printf("proglen of %d too big, stopping\n", proglen);
+ return NULL;
}
+ image = malloc(proglen);
+ if (!image) {
+ printf("Out of memory\n");
+ return NULL;
+ }
+ memset(image, 0, proglen);
+
tmp = ptr = haystack + off;
- while ((ptr = strtok(tmp, "\n")) != NULL && ulen < ilen) {
+ while ((ptr = strtok(tmp, "\n")) != NULL && ulen < proglen) {
tmp = NULL;
if (!strstr(ptr, "JIT code"))
continue;
@@ -208,10 +220,12 @@
ptr = pptr;
do {
image[ulen++] = (uint8_t) strtoul(pptr, &pptr, 16);
- if (ptr == pptr || ulen >= ilen) {
+ if (ptr == pptr) {
ulen--;
break;
}
+ if (ulen >= proglen)
+ break;
ptr = pptr;
} while (1);
}
@@ -222,7 +236,8 @@
printf("%lx + <x>:\n", base);
regfree(®ex);
- return ulen;
+ *ilen = ulen;
+ return image;
}
static void usage(void)
@@ -237,12 +252,12 @@
int main(int argc, char **argv)
{
unsigned int len, klen, opt, opcodes = 0;
- static uint8_t image[32768];
char *kbuff, *file = NULL;
char *ofile = NULL;
int ofd;
ssize_t nr;
uint8_t *pos;
+ uint8_t *image = NULL;
while ((opt = getopt(argc, argv, "of:O:")) != -1) {
switch (opt) {
@@ -262,7 +277,6 @@
}
bfd_init();
- memset(image, 0, sizeof(image));
kbuff = get_log_buff(file, &klen);
if (!kbuff) {
@@ -270,8 +284,8 @@
return -1;
}
- len = get_last_jit_image(kbuff, klen, image, sizeof(image));
- if (len <= 0) {
+ image = get_last_jit_image(kbuff, klen, &len);
+ if (!image) {
fprintf(stderr, "No JIT image found!\n");
goto done;
}
@@ -301,5 +315,6 @@
done:
put_log_buff(kbuff);
+ free(image);
return 0;
}
diff --git a/tools/testing/selftests/bpf/Makefile b/tools/testing/selftests/bpf/Makefile
index f389b02..2ca51a8 100644
--- a/tools/testing/selftests/bpf/Makefile
+++ b/tools/testing/selftests/bpf/Makefile
@@ -14,7 +14,8 @@
TEST_GEN_PROGS = test_verifier test_tag test_maps test_lru_map test_lpm_map test_progs \
test_align
-TEST_GEN_FILES = test_pkt_access.o test_xdp.o test_l4lb.o test_tcp_estats.o
+TEST_GEN_FILES = test_pkt_access.o test_xdp.o test_l4lb.o test_tcp_estats.o test_obj_id.o \
+ test_pkt_md_access.o
TEST_PROGS := test_kmod.sh
diff --git a/tools/testing/selftests/bpf/test_align.c b/tools/testing/selftests/bpf/test_align.c
index 9644d4e..bccebd9 100644
--- a/tools/testing/selftests/bpf/test_align.c
+++ b/tools/testing/selftests/bpf/test_align.c
@@ -9,6 +9,8 @@
#include <stddef.h>
#include <stdbool.h>
+#include <sys/resource.h>
+
#include <linux/unistd.h>
#include <linux/filter.h>
#include <linux/bpf_perf_event.h>
@@ -426,12 +428,15 @@
}
printf("Results: %d pass %d fail\n",
all_pass, all_fail);
- return 0;
+ return all_fail ? EXIT_FAILURE : EXIT_SUCCESS;
}
int main(int argc, char **argv)
{
unsigned int from = 0, to = ARRAY_SIZE(tests);
+ struct rlimit rinf = { RLIM_INFINITY, RLIM_INFINITY };
+
+ setrlimit(RLIMIT_MEMLOCK, &rinf);
if (argc == 3) {
unsigned int l = atoi(argv[argc - 2]);
diff --git a/tools/testing/selftests/bpf/test_maps.c b/tools/testing/selftests/bpf/test_maps.c
index 9331452..79601c8 100644
--- a/tools/testing/selftests/bpf/test_maps.c
+++ b/tools/testing/selftests/bpf/test_maps.c
@@ -239,6 +239,54 @@
close(fd);
}
+static void test_hashmap_walk(int task, void *data)
+{
+ int fd, i, max_entries = 100000;
+ long long key, value, next_key;
+ bool next_key_valid = true;
+
+ fd = bpf_create_map(BPF_MAP_TYPE_HASH, sizeof(key), sizeof(value),
+ max_entries, map_flags);
+ if (fd < 0) {
+ printf("Failed to create hashmap '%s'!\n", strerror(errno));
+ exit(1);
+ }
+
+ for (i = 0; i < max_entries; i++) {
+ key = i; value = key;
+ assert(bpf_map_update_elem(fd, &key, &value, BPF_NOEXIST) == 0);
+ }
+
+ for (i = 0; bpf_map_get_next_key(fd, !i ? NULL : &key,
+ &next_key) == 0; i++) {
+ key = next_key;
+ assert(bpf_map_lookup_elem(fd, &key, &value) == 0);
+ }
+
+ assert(i == max_entries);
+
+ assert(bpf_map_get_next_key(fd, NULL, &key) == 0);
+ for (i = 0; next_key_valid; i++) {
+ next_key_valid = bpf_map_get_next_key(fd, &key, &next_key) == 0;
+ assert(bpf_map_lookup_elem(fd, &key, &value) == 0);
+ value++;
+ assert(bpf_map_update_elem(fd, &key, &value, BPF_EXIST) == 0);
+ key = next_key;
+ }
+
+ assert(i == max_entries);
+
+ for (i = 0; bpf_map_get_next_key(fd, !i ? NULL : &key,
+ &next_key) == 0; i++) {
+ key = next_key;
+ assert(bpf_map_lookup_elem(fd, &key, &value) == 0);
+ assert(value - 1 == key);
+ }
+
+ assert(i == max_entries);
+ close(fd);
+}
+
static void test_arraymap(int task, void *data)
{
int key, next_key, fd;
@@ -464,6 +512,7 @@
run_parallel(100, test_hashmap, NULL);
run_parallel(100, test_hashmap_percpu, NULL);
run_parallel(100, test_hashmap_sizes, NULL);
+ run_parallel(100, test_hashmap_walk, NULL);
run_parallel(100, test_arraymap, NULL);
run_parallel(100, test_arraymap_percpu, NULL);
@@ -549,6 +598,7 @@
{
test_hashmap(0, NULL);
test_hashmap_percpu(0, NULL);
+ test_hashmap_walk(0, NULL);
test_arraymap(0, NULL);
test_arraymap_percpu(0, NULL);
diff --git a/tools/testing/selftests/bpf/test_obj_id.c b/tools/testing/selftests/bpf/test_obj_id.c
new file mode 100644
index 0000000..880d296
--- /dev/null
+++ b/tools/testing/selftests/bpf/test_obj_id.c
@@ -0,0 +1,35 @@
+/* Copyright (c) 2017 Facebook
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of version 2 of the GNU General Public
+ * License as published by the Free Software Foundation.
+ */
+#include <stddef.h>
+#include <linux/bpf.h>
+#include <linux/pkt_cls.h>
+#include "bpf_helpers.h"
+
+/* It is a dumb bpf program such that it must have no
+ * issue to be loaded since testing the verifier is
+ * not the focus here.
+ */
+
+int _version SEC("version") = 1;
+
+struct bpf_map_def SEC("maps") test_map_id = {
+ .type = BPF_MAP_TYPE_ARRAY,
+ .key_size = sizeof(__u32),
+ .value_size = sizeof(__u64),
+ .max_entries = 1,
+};
+
+SEC("test_obj_id_dummy")
+int test_obj_id(struct __sk_buff *skb)
+{
+ __u32 key = 0;
+ __u64 *value;
+
+ value = bpf_map_lookup_elem(&test_map_id, &key);
+
+ return TC_ACT_OK;
+}
diff --git a/tools/testing/selftests/bpf/test_pkt_md_access.c b/tools/testing/selftests/bpf/test_pkt_md_access.c
new file mode 100644
index 0000000..71729d4
--- /dev/null
+++ b/tools/testing/selftests/bpf/test_pkt_md_access.c
@@ -0,0 +1,35 @@
+/* Copyright (c) 2017 Facebook
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of version 2 of the GNU General Public
+ * License as published by the Free Software Foundation.
+ */
+#include <stddef.h>
+#include <string.h>
+#include <linux/bpf.h>
+#include <linux/pkt_cls.h>
+#include "bpf_helpers.h"
+
+int _version SEC("version") = 1;
+
+#define TEST_FIELD(TYPE, FIELD, MASK) \
+ { \
+ TYPE tmp = *(volatile TYPE *)&skb->FIELD; \
+ if (tmp != ((*(volatile __u32 *)&skb->FIELD) & MASK)) \
+ return TC_ACT_SHOT; \
+ }
+
+SEC("test1")
+int process(struct __sk_buff *skb)
+{
+ TEST_FIELD(__u8, len, 0xFF);
+ TEST_FIELD(__u16, len, 0xFFFF);
+ TEST_FIELD(__u32, len, 0xFFFFFFFF);
+ TEST_FIELD(__u16, protocol, 0xFFFF);
+ TEST_FIELD(__u32, protocol, 0xFFFFFFFF);
+ TEST_FIELD(__u8, hash, 0xFF);
+ TEST_FIELD(__u16, hash, 0xFFFF);
+ TEST_FIELD(__u32, hash, 0xFFFFFFFF);
+
+ return TC_ACT_OK;
+}
diff --git a/tools/testing/selftests/bpf/test_progs.c b/tools/testing/selftests/bpf/test_progs.c
index b59f5ed..5855cd3 100644
--- a/tools/testing/selftests/bpf/test_progs.c
+++ b/tools/testing/selftests/bpf/test_progs.c
@@ -22,6 +22,8 @@
#include <sys/wait.h>
#include <sys/resource.h>
+#include <sys/types.h>
+#include <fcntl.h>
#include <linux/bpf.h>
#include <linux/err.h>
@@ -70,6 +72,7 @@
pass_cnt++; \
printf("%s:PASS:%s %d nsec\n", __func__, tag, duration);\
} \
+ __ret; \
})
static int bpf_prog_load(const char *file, enum bpf_prog_type type,
@@ -283,6 +286,224 @@
bpf_object__close(obj);
}
+static inline __u64 ptr_to_u64(const void *ptr)
+{
+ return (__u64) (unsigned long) ptr;
+}
+
+static void test_bpf_obj_id(void)
+{
+ const __u64 array_magic_value = 0xfaceb00c;
+ const __u32 array_key = 0;
+ const int nr_iters = 2;
+ const char *file = "./test_obj_id.o";
+ const char *jit_sysctl = "/proc/sys/net/core/bpf_jit_enable";
+
+ struct bpf_object *objs[nr_iters];
+ int prog_fds[nr_iters], map_fds[nr_iters];
+ /* +1 to test for the info_len returned by kernel */
+ struct bpf_prog_info prog_infos[nr_iters + 1];
+ struct bpf_map_info map_infos[nr_iters + 1];
+ char jited_insns[128], xlated_insns[128];
+ __u32 i, next_id, info_len, nr_id_found, duration = 0;
+ int sysctl_fd, jit_enabled = 0, err = 0;
+ __u64 array_value;
+
+ sysctl_fd = open(jit_sysctl, 0, O_RDONLY);
+ if (sysctl_fd != -1) {
+ char tmpc;
+
+ if (read(sysctl_fd, &tmpc, sizeof(tmpc)) == 1)
+ jit_enabled = (tmpc != '0');
+ close(sysctl_fd);
+ }
+
+ err = bpf_prog_get_fd_by_id(0);
+ CHECK(err >= 0 || errno != ENOENT,
+ "get-fd-by-notexist-prog-id", "err %d errno %d\n", err, errno);
+
+ err = bpf_map_get_fd_by_id(0);
+ CHECK(err >= 0 || errno != ENOENT,
+ "get-fd-by-notexist-map-id", "err %d errno %d\n", err, errno);
+
+ for (i = 0; i < nr_iters; i++)
+ objs[i] = NULL;
+
+ /* Check bpf_obj_get_info_by_fd() */
+ for (i = 0; i < nr_iters; i++) {
+ err = bpf_prog_load(file, BPF_PROG_TYPE_SOCKET_FILTER,
+ &objs[i], &prog_fds[i]);
+ /* test_obj_id.o is a dumb prog. It should never fail
+ * to load.
+ */
+ assert(!err);
+
+ /* Check getting prog info */
+ info_len = sizeof(struct bpf_prog_info) * 2;
+ prog_infos[i].jited_prog_insns = ptr_to_u64(jited_insns);
+ prog_infos[i].jited_prog_len = sizeof(jited_insns);
+ prog_infos[i].xlated_prog_insns = ptr_to_u64(xlated_insns);
+ prog_infos[i].xlated_prog_len = sizeof(xlated_insns);
+ err = bpf_obj_get_info_by_fd(prog_fds[i], &prog_infos[i],
+ &info_len);
+ if (CHECK(err ||
+ prog_infos[i].type != BPF_PROG_TYPE_SOCKET_FILTER ||
+ info_len != sizeof(struct bpf_prog_info) ||
+ (jit_enabled && !prog_infos[i].jited_prog_len) ||
+ !prog_infos[i].xlated_prog_len,
+ "get-prog-info(fd)",
+ "err %d errno %d i %d type %d(%d) info_len %u(%lu) jit_enabled %d jited_prog_len %u xlated_prog_len %u\n",
+ err, errno, i,
+ prog_infos[i].type, BPF_PROG_TYPE_SOCKET_FILTER,
+ info_len, sizeof(struct bpf_prog_info),
+ jit_enabled,
+ prog_infos[i].jited_prog_len,
+ prog_infos[i].xlated_prog_len))
+ goto done;
+
+ map_fds[i] = bpf_find_map(__func__, objs[i], "test_map_id");
+ assert(map_fds[i] >= 0);
+ err = bpf_map_update_elem(map_fds[i], &array_key,
+ &array_magic_value, 0);
+ assert(!err);
+
+ /* Check getting map info */
+ info_len = sizeof(struct bpf_map_info) * 2;
+ err = bpf_obj_get_info_by_fd(map_fds[i], &map_infos[i],
+ &info_len);
+ if (CHECK(err ||
+ map_infos[i].type != BPF_MAP_TYPE_ARRAY ||
+ map_infos[i].key_size != sizeof(__u32) ||
+ map_infos[i].value_size != sizeof(__u64) ||
+ map_infos[i].max_entries != 1 ||
+ map_infos[i].map_flags != 0 ||
+ info_len != sizeof(struct bpf_map_info),
+ "get-map-info(fd)",
+ "err %d errno %d type %d(%d) info_len %u(%lu) key_size %u value_size %u max_entries %u map_flags %X\n",
+ err, errno,
+ map_infos[i].type, BPF_MAP_TYPE_ARRAY,
+ info_len, sizeof(struct bpf_map_info),
+ map_infos[i].key_size,
+ map_infos[i].value_size,
+ map_infos[i].max_entries,
+ map_infos[i].map_flags))
+ goto done;
+ }
+
+ /* Check bpf_prog_get_next_id() */
+ nr_id_found = 0;
+ next_id = 0;
+ while (!bpf_prog_get_next_id(next_id, &next_id)) {
+ struct bpf_prog_info prog_info;
+ int prog_fd;
+
+ info_len = sizeof(prog_info);
+
+ prog_fd = bpf_prog_get_fd_by_id(next_id);
+ if (prog_fd < 0 && errno == ENOENT)
+ /* The bpf_prog is in the dead row */
+ continue;
+ if (CHECK(prog_fd < 0, "get-prog-fd(next_id)",
+ "prog_fd %d next_id %d errno %d\n",
+ prog_fd, next_id, errno))
+ break;
+
+ for (i = 0; i < nr_iters; i++)
+ if (prog_infos[i].id == next_id)
+ break;
+
+ if (i == nr_iters)
+ continue;
+
+ nr_id_found++;
+
+ err = bpf_obj_get_info_by_fd(prog_fd, &prog_info, &info_len);
+ CHECK(err || info_len != sizeof(struct bpf_prog_info) ||
+ memcmp(&prog_info, &prog_infos[i], info_len),
+ "get-prog-info(next_id->fd)",
+ "err %d errno %d info_len %u(%lu) memcmp %d\n",
+ err, errno, info_len, sizeof(struct bpf_prog_info),
+ memcmp(&prog_info, &prog_infos[i], info_len));
+
+ close(prog_fd);
+ }
+ CHECK(nr_id_found != nr_iters,
+ "check total prog id found by get_next_id",
+ "nr_id_found %u(%u)\n",
+ nr_id_found, nr_iters);
+
+ /* Check bpf_map_get_next_id() */
+ nr_id_found = 0;
+ next_id = 0;
+ while (!bpf_map_get_next_id(next_id, &next_id)) {
+ struct bpf_map_info map_info;
+ int map_fd;
+
+ info_len = sizeof(map_info);
+
+ map_fd = bpf_map_get_fd_by_id(next_id);
+ if (map_fd < 0 && errno == ENOENT)
+ /* The bpf_map is in the dead row */
+ continue;
+ if (CHECK(map_fd < 0, "get-map-fd(next_id)",
+ "map_fd %d next_id %u errno %d\n",
+ map_fd, next_id, errno))
+ break;
+
+ for (i = 0; i < nr_iters; i++)
+ if (map_infos[i].id == next_id)
+ break;
+
+ if (i == nr_iters)
+ continue;
+
+ nr_id_found++;
+
+ err = bpf_map_lookup_elem(map_fd, &array_key, &array_value);
+ assert(!err);
+
+ err = bpf_obj_get_info_by_fd(map_fd, &map_info, &info_len);
+ CHECK(err || info_len != sizeof(struct bpf_map_info) ||
+ memcmp(&map_info, &map_infos[i], info_len) ||
+ array_value != array_magic_value,
+ "check get-map-info(next_id->fd)",
+ "err %d errno %d info_len %u(%lu) memcmp %d array_value %llu(%llu)\n",
+ err, errno, info_len, sizeof(struct bpf_map_info),
+ memcmp(&map_info, &map_infos[i], info_len),
+ array_value, array_magic_value);
+
+ close(map_fd);
+ }
+ CHECK(nr_id_found != nr_iters,
+ "check total map id found by get_next_id",
+ "nr_id_found %u(%u)\n",
+ nr_id_found, nr_iters);
+
+done:
+ for (i = 0; i < nr_iters; i++)
+ bpf_object__close(objs[i]);
+}
+
+static void test_pkt_md_access(void)
+{
+ const char *file = "./test_pkt_md_access.o";
+ struct bpf_object *obj;
+ __u32 duration, retval;
+ int err, prog_fd;
+
+ err = bpf_prog_load(file, BPF_PROG_TYPE_SCHED_CLS, &obj, &prog_fd);
+ if (err)
+ return;
+
+ err = bpf_prog_test_run(prog_fd, 10, &pkt_v4, sizeof(pkt_v4),
+ NULL, NULL, &retval, &duration);
+ CHECK(err || retval, "",
+ "err %d errno %d retval %d duration %d\n",
+ err, errno, retval, duration);
+
+ bpf_object__close(obj);
+}
+
int main(void)
{
struct rlimit rinf = { RLIM_INFINITY, RLIM_INFINITY };
@@ -293,7 +514,9 @@
test_xdp();
test_l4lb();
test_tcp_estats();
+ test_bpf_obj_id();
+ test_pkt_md_access();
printf("Summary: %d PASSED, %d FAILED\n", pass_cnt, error_cnt);
- return 0;
+ return error_cnt ? EXIT_FAILURE : EXIT_SUCCESS;
}
diff --git a/tools/testing/selftests/bpf/test_verifier.c b/tools/testing/selftests/bpf/test_verifier.c
index cabb19b..c0af019 100644
--- a/tools/testing/selftests/bpf/test_verifier.c
+++ b/tools/testing/selftests/bpf/test_verifier.c
@@ -1073,44 +1073,75 @@
.result = ACCEPT,
},
{
- "check cb access: byte, oob 1",
+ "__sk_buff->hash, offset 0, byte store not permitted",
.insns = {
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
- offsetof(struct __sk_buff, cb[4]) + 4),
+ offsetof(struct __sk_buff, hash)),
BPF_EXIT_INSN(),
},
.errstr = "invalid bpf_context access",
.result = REJECT,
},
{
- "check cb access: byte, oob 2",
+ "__sk_buff->tc_index, offset 3, byte store not permitted",
.insns = {
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
- offsetof(struct __sk_buff, cb[0]) - 1),
+ offsetof(struct __sk_buff, tc_index) + 3),
BPF_EXIT_INSN(),
},
.errstr = "invalid bpf_context access",
.result = REJECT,
},
{
- "check cb access: byte, oob 3",
+ "check skb->hash byte load permitted",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+#ifdef __LITTLE_ENDIAN
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, hash)),
+#else
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, hash) + 3),
+#endif
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ },
+ {
+ "check skb->hash byte load not permitted 1",
.insns = {
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
- offsetof(struct __sk_buff, cb[4]) + 4),
+ offsetof(struct __sk_buff, hash) + 1),
BPF_EXIT_INSN(),
},
.errstr = "invalid bpf_context access",
.result = REJECT,
},
{
- "check cb access: byte, oob 4",
+ "check skb->hash byte load not permitted 2",
.insns = {
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
- offsetof(struct __sk_buff, cb[0]) - 1),
+ offsetof(struct __sk_buff, hash) + 2),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "invalid bpf_context access",
+ .result = REJECT,
+ },
+ {
+ "check skb->hash byte load not permitted 3",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+#ifdef __LITTLE_ENDIAN
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, hash) + 3),
+#else
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, hash)),
+#endif
BPF_EXIT_INSN(),
},
.errstr = "invalid bpf_context access",
@@ -1188,44 +1219,53 @@
.result = REJECT,
},
{
- "check cb access: half, oob 1",
+ "check __sk_buff->hash, offset 0, half store not permitted",
.insns = {
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
- offsetof(struct __sk_buff, cb[4]) + 4),
+ offsetof(struct __sk_buff, hash)),
BPF_EXIT_INSN(),
},
.errstr = "invalid bpf_context access",
.result = REJECT,
},
{
- "check cb access: half, oob 2",
+ "check __sk_buff->tc_index, offset 2, half store not permitted",
.insns = {
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
- offsetof(struct __sk_buff, cb[0]) - 2),
+ offsetof(struct __sk_buff, tc_index) + 2),
BPF_EXIT_INSN(),
},
.errstr = "invalid bpf_context access",
.result = REJECT,
},
{
- "check cb access: half, oob 3",
+ "check skb->hash half load permitted",
.insns = {
BPF_MOV64_IMM(BPF_REG_0, 0),
+#ifdef __LITTLE_ENDIAN
BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
- offsetof(struct __sk_buff, cb[4]) + 4),
+ offsetof(struct __sk_buff, hash)),
+#else
+ BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, hash) + 2),
+#endif
BPF_EXIT_INSN(),
},
- .errstr = "invalid bpf_context access",
- .result = REJECT,
+ .result = ACCEPT,
},
{
- "check cb access: half, oob 4",
+ "check skb->hash half load not permitted",
.insns = {
BPF_MOV64_IMM(BPF_REG_0, 0),
+#ifdef __LITTLE_ENDIAN
BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
- offsetof(struct __sk_buff, cb[0]) - 2),
+ offsetof(struct __sk_buff, hash) + 2),
+#else
+ BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, hash)),
+#endif
BPF_EXIT_INSN(),
},
.errstr = "invalid bpf_context access",
@@ -1368,28 +1408,6 @@
"check cb access: double, oob 2",
.insns = {
BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
- offsetof(struct __sk_buff, cb[4]) + 8),
- BPF_EXIT_INSN(),
- },
- .errstr = "invalid bpf_context access",
- .result = REJECT,
- },
- {
- "check cb access: double, oob 3",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
- offsetof(struct __sk_buff, cb[0]) - 8),
- BPF_EXIT_INSN(),
- },
- .errstr = "invalid bpf_context access",
- .result = REJECT,
- },
- {
- "check cb access: double, oob 4",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1,
offsetof(struct __sk_buff, cb[4])),
BPF_EXIT_INSN(),
@@ -1398,22 +1416,22 @@
.result = REJECT,
},
{
- "check cb access: double, oob 5",
+ "check __sk_buff->ifindex dw store not permitted",
.insns = {
BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1,
- offsetof(struct __sk_buff, cb[4]) + 8),
+ BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
+ offsetof(struct __sk_buff, ifindex)),
BPF_EXIT_INSN(),
},
.errstr = "invalid bpf_context access",
.result = REJECT,
},
{
- "check cb access: double, oob 6",
+ "check __sk_buff->ifindex dw load not permitted",
.insns = {
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1,
- offsetof(struct __sk_buff, cb[0]) - 8),
+ offsetof(struct __sk_buff, ifindex)),
BPF_EXIT_INSN(),
},
.errstr = "invalid bpf_context access",
@@ -5169,6 +5187,98 @@
},
.result = ACCEPT,
},
+ {
+ "check bpf_perf_event_data->sample_period byte load permitted",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+#ifdef __LITTLE_ENDIAN
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
+ offsetof(struct bpf_perf_event_data, sample_period)),
+#else
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
+ offsetof(struct bpf_perf_event_data, sample_period) + 7),
+#endif
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_PERF_EVENT,
+ },
+ {
+ "check bpf_perf_event_data->sample_period half load permitted",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+#ifdef __LITTLE_ENDIAN
+ BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
+ offsetof(struct bpf_perf_event_data, sample_period)),
+#else
+ BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
+ offsetof(struct bpf_perf_event_data, sample_period) + 6),
+#endif
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_PERF_EVENT,
+ },
+ {
+ "check bpf_perf_event_data->sample_period word load permitted",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+#ifdef __LITTLE_ENDIAN
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+ offsetof(struct bpf_perf_event_data, sample_period)),
+#else
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+ offsetof(struct bpf_perf_event_data, sample_period) + 4),
+#endif
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_PERF_EVENT,
+ },
+ {
+ "check bpf_perf_event_data->sample_period dword load permitted",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1,
+ offsetof(struct bpf_perf_event_data, sample_period)),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_PERF_EVENT,
+ },
+ {
+ "check skb->data half load not permitted",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+#ifdef __LITTLE_ENDIAN
+ BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, data)),
+#else
+ BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, data) + 2),
+#endif
+ BPF_EXIT_INSN(),
+ },
+ .result = REJECT,
+ .errstr = "invalid bpf_context access",
+ },
+ {
+ "check skb->tc_classid half load not permitted for lwt prog",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+#ifdef __LITTLE_ENDIAN
+ BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, tc_classid)),
+#else
+ BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, tc_classid) + 2),
+#endif
+ BPF_EXIT_INSN(),
+ },
+ .result = REJECT,
+ .errstr = "invalid bpf_context access",
+ .prog_type = BPF_PROG_TYPE_LWT_IN,
+ },
};
static int probe_filter_length(const struct bpf_insn *fp)
@@ -5418,7 +5528,7 @@
}
printf("Summary: %d PASSED, %d FAILED\n", passes, errors);
- return errors ? -errors : 0;
+ return errors ? EXIT_FAILURE : EXIT_SUCCESS;
}
int main(int argc, char **argv)
diff --git a/tools/testing/selftests/tc-testing/.gitignore b/tools/testing/selftests/tc-testing/.gitignore
new file mode 100644
index 0000000..c18dd8d
--- /dev/null
+++ b/tools/testing/selftests/tc-testing/.gitignore
@@ -0,0 +1 @@
+__pycache__/
diff --git a/tools/testing/selftests/tc-testing/README b/tools/testing/selftests/tc-testing/README
new file mode 100644
index 0000000..970ff29
--- /dev/null
+++ b/tools/testing/selftests/tc-testing/README
@@ -0,0 +1,102 @@
+tdc - Linux Traffic Control (tc) unit testing suite
+
+Author: Lucas Bates - lucasb@mojatatu.com
+
+tdc is a Python script to load tc unit tests from a separate JSON file and
+execute them inside a network namespace dedicated to the task.
+
+
+REQUIREMENTS
+------------
+
+* Minimum Python version of 3.4. Earlier 3.X versions may work but are not
+ guaranteed.
+
+* The kernel must have network namespace support
+
+* The kernel must have veth support available, as a veth pair is created
+ prior to running the tests.
+
+* All tc-related features must be built in or available as modules.
+ To check what is required in current setup run:
+ ./tdc.py -c
+
+ Note:
+ In the current release, tdc run will abort due to a failure in setup or
+ teardown commands - which includes not being able to run a test simply
+ because the kernel did not support a specific feature. (This will be
+ handled in a future version - the current workaround is to run the tests
+ on specific test categories that your kernel supports)
+
+
+BEFORE YOU RUN
+--------------
+
+The path to the tc executable that will be most commonly tested can be defined
+in the tdc_config.py file. Find the 'TC' entry in the NAMES dictionary and
+define the path.
+
+If you need to test a different tc executable on the fly, you can do so by
+using the -p option when running tdc:
+ ./tdc.py -p /path/to/tc
+
+
+RUNNING TDC
+-----------
+
+To use tdc, root privileges are required. tdc will not run otherwise.
+
+All tests are executed inside a network namespace to prevent conflicts
+within the host.
+
+Running tdc without any arguments will run all tests. Refer to the section
+on command line arguments for more information, or run:
+ ./tdc.py -h
+
+tdc will list the test names as they are being run, and print a summary in
+TAP (Test Anything Protocol) format when they are done. If tests fail,
+output captured from the failing test will be printed immediately following
+the failed test in the TAP output.
+
+
+USER-DEFINED CONSTANTS
+----------------------
+
+The tdc_config.py file contains multiple values that can be altered to suit
+your needs. Any value in the NAMES dictionary can be altered without affecting
+the tests to be run. These values are used in the tc commands that will be
+executed as part of the test. More will be added as test cases require.
+
+Example:
+ $TC qdisc add dev $DEV1 ingress
+
+
+COMMAND LINE ARGUMENTS
+----------------------
+
+Run tdc.py -h to see the full list of available arguments.
+
+-p PATH Specify the tc executable located at PATH to be used on this
+ test run
+-c Show the available test case categories in this test file
+-c CATEGORY Run only tests that belong to CATEGORY
+-f FILE Read test cases from the JSON file named FILE
+-l [CATEGORY] List all test cases in the JSON file. If CATEGORY is
+ specified, list test cases matching that category.
+-s ID Show the test case matching ID
+-e ID Execute the test case identified by ID
+-i Generate unique ID numbers for test cases with no existing
+ ID number
+
+
+ACKNOWLEDGEMENTS
+----------------
+
+Thanks to:
+
+Jamal Hadi Salim, for providing valuable test cases
+Keara Leibovitz, who wrote the CLI test driver that I used as a base for the
+ first version of the tc testing suite. This work was presented at
+ Netdev 1.2 Tokyo in October 2016.
+Samir Hussain, for providing help while I dove into Python for the first time
+ and being a second eye for this code.
diff --git a/tools/testing/selftests/tc-testing/TODO.txt b/tools/testing/selftests/tc-testing/TODO.txt
new file mode 100644
index 0000000..6a266d8
--- /dev/null
+++ b/tools/testing/selftests/tc-testing/TODO.txt
@@ -0,0 +1,10 @@
+tc Testing Suite To-Do list:
+
+- Determine what tc features are supported in the kernel. If features are not
+ present, prevent the related categories from running.
+
+- Add support for multiple versions of tc to run successively
+
+- Improve error messages when tdc aborts its run
+
+- Allow tdc to write its results to file
diff --git a/tools/testing/selftests/tc-testing/creating-testcases/AddingTestCases.txt b/tools/testing/selftests/tc-testing/creating-testcases/AddingTestCases.txt
new file mode 100644
index 0000000..4e09257
--- /dev/null
+++ b/tools/testing/selftests/tc-testing/creating-testcases/AddingTestCases.txt
@@ -0,0 +1,69 @@
+tdc - Adding test cases for tdc
+
+Author: Lucas Bates - lucasb@mojatatu.com
+
+ADDING TEST CASES
+-----------------
+
+User-defined tests should be added by defining a separate JSON file. This
+will help prevent conflicts when updating the repository. Refer to
+template.json for the required JSON format for test cases.
+
+Include the 'id' field, but do not assign a value. Running tdc with the -i
+option will generate a unique ID for that test case.
+
+tdc will recursively search the 'tc' subdirectory for .json files. Any
+test case files you create in these directories will automatically be included.
+If you wish to store your custom test cases elsewhere, be sure to run tdc
+with the -f argument and the path to your file.
+
+Be aware of required escape characters in the JSON data - particularly when
+defining the match pattern. Refer to the tctests.json file for examples when
+in doubt.
+
+
+TEST CASE STRUCTURE
+-------------------
+
+Each test case has required data:
+
+id: A unique alphanumeric value to identify a particular test case
+name: Descriptive name that explains the command under test
+category: A list of single-word descriptions covering what the command
+ under test is testing. Example: filter, actions, u32, gact, etc.
+setup: The list of commands required to ensure the command under test
+ succeeds. For example: if testing a filter, the command to create
+ the qdisc would appear here.
+cmdUnderTest: The tc command being tested itself.
+expExitCode: The code returned by the command under test upon its termination.
+ tdc will compare this value against the actual returned value.
+verifyCmd: The tc command to be run to verify successful execution.
+ For example: if the command under test creates a gact action,
+ verifyCmd should be "$TC actions show action gact"
+matchPattern: A regular expression to be applied against the output of the
+ verifyCmd to prove the command under test succeeded. This pattern
+ should be as specific as possible so that a false positive is not
+ matched.
+matchCount: How many times the regex in matchPattern should match. A value
+ of 0 is acceptable.
+teardown: The list of commands to clean up after the test is completed.
+ The environment should be returned to the same state as when
+ this test was started: qdiscs deleted, actions flushed, etc.
+
+
+SETUP/TEARDOWN ERRORS
+---------------------
+
+If an error is detected during the setup/teardown process, execution of the
+tests will immediately stop with an error message and the namespace in which
+the tests are run will be destroyed. This is to prevent inaccurate results
+in the test cases.
+
+Repeated failures of the setup/teardown may indicate a problem with the test
+case, or possibly even a bug in one of the commands that are not being tested.
+
+It's possible to include acceptable exit codes with the setup/teardown command
+so that it doesn't halt the script for an error that doesn't matter. Turn the
+individual command into a list, with the command being first, followed by all
+acceptable exit codes for the command.
+
diff --git a/tools/testing/selftests/tc-testing/creating-testcases/template.json b/tools/testing/selftests/tc-testing/creating-testcases/template.json
new file mode 100644
index 0000000..8797174
--- /dev/null
+++ b/tools/testing/selftests/tc-testing/creating-testcases/template.json
@@ -0,0 +1,40 @@
+[
+ {
+ "id": "",
+ "name": "",
+ "category": [
+ "",
+ ""
+ ],
+ "setup": [
+ ""
+ ],
+ "cmdUnderTest": "",
+ "expExitCode": "",
+ "verifyCmd": "",
+ "matchPattern": "",
+ "matchCount": "",
+ "teardown": [
+ ""
+ ]
+ },
+ {
+ "id": "",
+ "name": "",
+ "category": [
+ "",
+ ""
+ ],
+ "setup": [
+ ""
+ ],
+ "cmdUnderTest": "",
+ "expExitCode": "",
+ "verifyCmd": "",
+ "matchPattern": "",
+ "matchCount": "",
+ "teardown": [
+ ""
+ ]
+ }
+]
diff --git a/tools/testing/selftests/tc-testing/tc-tests/actions/tests.json b/tools/testing/selftests/tc-testing/tc-tests/actions/tests.json
new file mode 100644
index 0000000..af519bc
--- /dev/null
+++ b/tools/testing/selftests/tc-testing/tc-tests/actions/tests.json
@@ -0,0 +1,1115 @@
+[
+ {
+ "id": "e89a",
+ "name": "Add valid pass action",
+ "category": [
+ "actions",
+ "gact"
+ ],
+ "setup": [
+ [
+ "$TC actions flush action gact",
+ 0,
+ 1,
+ 255
+ ]
+ ],
+ "cmdUnderTest": "$TC actions add action pass index 8",
+ "expExitCode": "0",
+ "verifyCmd": "$TC actions list action gact",
+ "matchPattern": "action order [0-9]*: gact action pass.*index 8 ref",
+ "matchCount": "1",
+ "teardown": [
+ "$TC actions flush action gact"
+ ]
+ },
+ {
+ "id": "a02c",
+ "name": "Add valid pipe action",
+ "category": [
+ "actions",
+ "gact"
+ ],
+ "setup": [
+ [
+ "$TC actions flush action gact",
+ 0,
+ 1,
+ 255
+ ]
+ ],
+ "cmdUnderTest": "$TC actions add action pipe index 6",
+ "expExitCode": "0",
+ "verifyCmd": "$TC actions list action gact",
+ "matchPattern": "action order [0-9]*: gact action pipe.*index 6 ref",
+ "matchCount": "1",
+ "teardown": [
+ "$TC actions flush action gact"
+ ]
+ },
+ {
+ "id": "feef",
+ "name": "Add valid reclassify action",
+ "category": [
+ "actions",
+ "gact"
+ ],
+ "setup": [
+ [
+ "$TC actions flush action gact",
+ 0,
+ 1,
+ 255
+ ]
+ ],
+ "cmdUnderTest": "$TC actions add action reclassify index 5",
+ "expExitCode": "0",
+ "verifyCmd": "$TC actions list action gact",
+ "matchPattern": "action order [0-9]*: gact action reclassify.*index 5 ref",
+ "matchCount": "1",
+ "teardown": [
+ "$TC actions flush action gact"
+ ]
+ },
+ {
+ "id": "8a7a",
+ "name": "Add valid drop action",
+ "category": [
+ "actions",
+ "gact"
+ ],
+ "setup": [
+ [
+ "$TC actions flush action gact",
+ 0,
+ 1,
+ 255
+ ]
+ ],
+ "cmdUnderTest": "$TC actions add action drop index 30",
+ "expExitCode": "0",
+ "verifyCmd": "$TC actions list action gact",
+ "matchPattern": "action order [0-9]*: gact action drop.*index 30 ref",
+ "matchCount": "1",
+ "teardown": [
+ "$TC actions flush action gact"
+ ]
+ },
+ {
+ "id": "9a52",
+ "name": "Add valid continue action",
+ "category": [
+ "actions",
+ "gact"
+ ],
+ "setup": [
+ [
+ "$TC actions flush action gact",
+ 0,
+ 1,
+ 255
+ ]
+ ],
+ "cmdUnderTest": "$TC actions add action continue index 432",
+ "expExitCode": "0",
+ "verifyCmd": "$TC actions list action gact",
+ "matchPattern": "action order [0-9]*: gact action continue.*index 432 ref",
+ "matchCount": "1",
+ "teardown": [
+ "$TC actions flush action gact"
+ ]
+ },
+ {
+ "id": "d700",
+ "name": "Add invalid action",
+ "category": [
+ "actions",
+ "gact"
+ ],
+ "setup": [
+ [
+ "$TC actions flush action gact",
+ 0,
+ 1,
+ 255
+ ]
+ ],
+ "cmdUnderTest": "$TC actions add action pump index 386",
+ "expExitCode": "255",
+ "verifyCmd": "$TC actions list action gact",
+ "matchPattern": "action order [0-9]*: gact action.*index 386 ref",
+ "matchCount": "0",
+ "teardown": [
+ "$TC actions flush action gact"
+ ]
+ },
+ {
+ "id": "9215",
+ "name": "Add action with duplicate index",
+ "category": [
+ "actions",
+ "gact"
+ ],
+ "setup": [
+ [
+ "$TC actions flush action gact",
+ 0,
+ 1,
+ 255
+ ],
+ "$TC actions add action pipe index 15"
+ ],
+ "cmdUnderTest": "$TC actions add action drop index 15",
+ "expExitCode": "255",
+ "verifyCmd": "$TC actions list action gact",
+ "matchPattern": "action order [0-9]*: gact action drop.*index 15 ref",
+ "matchCount": "0",
+ "teardown": [
+ "$TC actions flush action gact"
+ ]
+ },
+ {
+ "id": "798e",
+ "name": "Add action with index exceeding 32-bit maximum",
+ "category": [
+ "actions",
+ "gact"
+ ],
+ "setup": [
+ [
+ "$TC actions flush action gact",
+ 0,
+ 1,
+ 255
+ ]
+ ],
+ "cmdUnderTest": "$TC actions add action drop index 4294967296",
+ "expExitCode": "255",
+ "verifyCmd": "actions list action gact",
+ "matchPattern": "action order [0-9]*: gact action drop.*index 4294967296 ref",
+ "matchCount": "0",
+ "teardown": [
+ "$TC actions flush action gact"
+ ]
+ },
+ {
+ "id": "22be",
+ "name": "Add action with index at 32-bit maximum",
+ "category": [
+ "actions",
+ "gact"
+ ],
+ "setup": [
+ [
+ "$TC actions flush action gact",
+ 0,
+ 1,
+ 255
+ ]
+ ],
+ "cmdUnderTest": "$TC actions add action drop index 4294967295",
+ "expExitCode": "0",
+ "verifyCmd": "$TC actions list action gact",
+ "matchPattern": "action order [0-9]*: gact action drop.*index 4294967295 ref",
+ "matchCount": "1",
+ "teardown": [
+ "$TC actions flush action gact"
+ ]
+ },
+ {
+ "id": "ac2a",
+ "name": "List actions",
+ "category": [
+ "actions",
+ "gact"
+ ],
+ "setup": [
+ [
+ "$TC actions flush action gact",
+ 0,
+ 1,
+ 255
+ ],
+ "$TC actions add action reclassify index 101",
+ "$TC actions add action reclassify index 102",
+ "$TC actions add action reclassify index 103",
+ "$TC actions add action reclassify index 104",
+ "$TC actions add action reclassify index 105"
+ ],
+ "cmdUnderTest": "$TC actions list action gact",
+ "expExitCode": "0",
+ "verifyCmd": "$TC actions list action gact",
+ "matchPattern": "action order [0-9]*: gact action reclassify",
+ "matchCount": "5",
+ "teardown": [
+ "$TC actions flush action gact"
+ ]
+ },
+ {
+ "id": "63ec",
+ "name": "Delete pass action",
+ "category": [
+ "actions",
+ "gact"
+ ],
+ "setup": [
+ [
+ "$TC actions flush action gact",
+ 0,
+ 1,
+ 255
+ ],
+ "$TC actions add action pass index 1"
+ ],
+ "cmdUnderTest": "$TC actions del action gact index 1",
+ "expExitCode": "0",
+ "verifyCmd": "$TC actions list action gact",
+ "matchPattern": "action order [0-9]*: gact action pass.*index 1 ref",
+ "matchCount": "0",
+ "teardown": [
+ "$TC actions flush action gact"
+ ]
+ },
+ {
+ "id": "46be",
+ "name": "Delete pipe action",
+ "category": [
+ "actions",
+ "gact"
+ ],
+ "setup": [
+ [
+ "$TC actions flush action gact",
+ 0,
+ 1,
+ 255
+ ],
+ "$TC actions add action pipe index 9"
+ ],
+ "cmdUnderTest": "$TC actions del action gact index 9",
+ "expExitCode": "0",
+ "verifyCmd": "$TC actions list action gact",
+ "matchPattern": "action order [0-9]*: gact action pipe.*index 9 ref",
+ "matchCount": "0",
+ "teardown": [
+ "$TC actions flush action gact"
+ ]
+ },
+ {
+ "id": "2e08",
+ "name": "Delete reclassify action",
+ "category": [
+ "actions",
+ "gact"
+ ],
+ "setup": [
+ [
+ "$TC actions flush action gact",
+ 0,
+ 1,
+ 255
+ ],
+ "$TC actions add action reclassify index 65536"
+ ],
+ "cmdUnderTest": "$TC actions del action gact index 65536",
+ "expExitCode": "0",
+ "verifyCmd": "$TC actions list action gact",
+ "matchPattern": "action order [0-9]*: gact action reclassify.*index 65536 ref",
+ "matchCount": "0",
+ "teardown": [
+ "$TC actions flush action gact"
+ ]
+ },
+ {
+ "id": "99c4",
+ "name": "Delete drop action",
+ "category": [
+ "actions",
+ "gact"
+ ],
+ "setup": [
+ [
+ "$TC actions flush action gact",
+ 0,
+ 1,
+ 255
+ ],
+ "$TC actions add action drop index 16"
+ ],
+ "cmdUnderTest": "$TC actions del action gact index 16",
+ "expExitCode": "0",
+ "verifyCmd": "$TC actions list action gact",
+ "matchPattern": "action order [0-9]*: gact action drop.*index 16 ref",
+ "matchCount": "0",
+ "teardown": [
+ "$TC actions flush action gact"
+ ]
+ },
+ {
+ "id": "fb6b",
+ "name": "Delete continue action",
+ "category": [
+ "actions",
+ "gact"
+ ],
+ "setup": [
+ [
+ "$TC actions flush action gact",
+ 0,
+ 1,
+ 255
+ ],
+ "$TC actions add action continue index 32"
+ ],
+ "cmdUnderTest": "$TC actions del action gact index 32",
+ "expExitCode": "0",
+ "verifyCmd": "actions list action gact",
+ "matchPattern": "action order [0-9]*: gact action continue.*index 32 ref",
+ "matchCount": "0",
+ "teardown": [
+ "$TC actions flush action gact"
+ ]
+ },
+ {
+ "id": "0eb3",
+ "name": "Delete non-existent action",
+ "category": [
+ "actions",
+ "gact"
+ ],
+ "setup": [
+ [
+ "$TC actions flush action gact",
+ 0,
+ 1,
+ 255
+ ]
+ ],
+ "cmdUnderTest": "$TC actions del action gact index 2",
+ "expExitCode": "255",
+ "verifyCmd": "$TC actions list action gact",
+ "matchPattern": "action order [0-9]*: gact action",
+ "matchCount": "0",
+ "teardown": [
+ "$TC actions flush action gact"
+ ]
+ },
+ {
+ "id": "5124",
+ "name": "Add mirred mirror to egress action",
+ "category": [
+ "actions",
+ "mirred"
+ ],
+ "setup": [
+ [
+ "$TC actions flush action mirred",
+ 0,
+ 1,
+ 255
+ ]
+ ],
+ "cmdUnderTest": "$TC actions add action mirred egress mirror index 1 dev lo",
+ "expExitCode": "0",
+ "verifyCmd": "$TC actions list action mirred",
+ "matchPattern": "action order [0-9]*: mirred \\(Egress Mirror to device lo\\).*index 1 ref",
+ "matchCount": "1",
+ "teardown": [
+ "$TC actions flush action mirred"
+ ]
+ },
+ {
+ "id": "6fb4",
+ "name": "Add mirred redirect to egress action",
+ "category": [
+ "actions",
+ "mirred"
+ ],
+ "setup": [
+ [
+ "$TC actions flush action mirred",
+ 0,
+ 1,
+ 255
+ ]
+ ],
+ "cmdUnderTest": "$TC actions add action mirred egress redirect index 2 dev lo action pipe",
+ "expExitCode": "0",
+ "verifyCmd": "$TC actions list action mirred",
+ "matchPattern": "action order [0-9]*: mirred \\(Egress Redirect to device lo\\).*index 2 ref",
+ "matchCount": "1",
+ "teardown": [
+ "$TC actions flush action mirred"
+ ]
+ },
+ {
+ "id": "ba38",
+ "name": "Get mirred actions",
+ "category": [
+ "actions",
+ "mirred"
+ ],
+ "setup": [
+ [
+ "$TC actions flush action mirred",
+ 0,
+ 1,
+ 255
+ ],
+ "$TC actions add action mirred egress mirror index 1 dev lo",
+ "$TC actions add action mirred egress redirect index 2 dev lo"
+ ],
+ "cmdUnderTest": "$TC actions show action mirred",
+ "expExitCode": "0",
+ "verifyCmd": "$TC actions list action mirred",
+ "matchPattern": "[Mirror|Redirect] to device lo",
+ "matchCount": "2",
+ "teardown": [
+ "$TC actions flush action mirred"
+ ]
+ },
+ {
+ "id": "d7c0",
+ "name": "Add invalid mirred direction",
+ "category": [
+ "actions",
+ "mirred"
+ ],
+ "setup": [
+ [
+ "$TC actions flush action mirred",
+ 0,
+ 1,
+ 255
+ ]
+ ],
+ "cmdUnderTest": "$TC actions add action mirred inbound mirror index 20 dev lo",
+ "expExitCode": "255",
+ "verifyCmd": "$TC actions list action mirred",
+ "matchPattern": "action order [0-9]*: mirred \\(.*to device lo\\).*index 20 ref",
+ "matchCount": "0",
+ "teardown": [
+ "$TC actions flush action mirred"
+ ]
+ },
+ {
+ "id": "e213",
+ "name": "Add invalid mirred action",
+ "category": [
+ "actions",
+ "mirred"
+ ],
+ "setup": [
+ [
+ "$TC actions flush action mirred",
+ 0,
+ 1,
+ 255
+ ]
+ ],
+ "cmdUnderTest": "$TC actions add action mirred egress remirror index 20 dev lo",
+ "expExitCode": "255",
+ "verifyCmd": "$TC actions list action mirred",
+ "matchPattern": "action order [0-9]*: mirred \\(Egress.*to device lo\\).*index 20 ref",
+ "matchCount": "0",
+ "teardown": [
+ "$TC actions flush action mirred"
+ ]
+ },
+ {
+ "id": "2d89",
+ "name": "Add mirred action with invalid device",
+ "category": [
+ "actions",
+ "mirred"
+ ],
+ "setup": [
+ [
+ "$TC actions flush action mirred",
+ 0,
+ 1,
+ 255
+ ]
+ ],
+ "cmdUnderTest": "$TC actions add action mirred egress mirror index 20 dev eltoh",
+ "expExitCode": "255",
+ "verifyCmd": "$TC actions list action mirred",
+ "matchPattern": "action order [0-9]*: mirred \\(.*to device eltoh\\).*index 20 ref",
+ "matchCount": "0",
+ "teardown": [
+ "$TC actions flush action mirred"
+ ]
+ },
+ {
+ "id": "300b",
+ "name": "Add mirred action with duplicate index",
+ "category": [
+ "actions",
+ "mirred"
+ ],
+ "setup": [
+ [
+ "$TC actions flush action mirred",
+ 0,
+ 1,
+ 255
+ ],
+ "$TC actions add action mirred egress redirect index 15 dev lo"
+ ],
+ "cmdUnderTest": "$TC actions add action mirred egress mirror index 15 dev lo",
+ "expExitCode": "255",
+ "verifyCmd": "$TC actions list action mirred",
+ "matchPattern": "action order [0-9]*: mirred \\(.*to device lo\\).*index 15 ref",
+ "matchCount": "1",
+ "teardown": [
+ "$TC actions flush action mirred"
+ ]
+ },
+ {
+ "id": "a70e",
+ "name": "Delete mirred mirror action",
+ "category": [
+ "actions",
+ "mirred"
+ ],
+ "setup": [
+ [
+ "$TC actions flush action mirred",
+ 0,
+ 1,
+ 255
+ ],
+ "$TC actions add action mirred egress mirror index 5 dev lo"
+ ],
+ "cmdUnderTest": "$TC actions del action mirred index 5",
+ "expExitCode": "0",
+ "verifyCmd": "$TC actions list action mirred",
+ "matchPattern": "action order [0-9]*: mirred \\(Egress Mirror to device lo\\).*index 5 ref",
+ "matchCount": "0",
+ "teardown": [
+ "$TC actions flush action mirred"
+ ]
+ },
+ {
+ "id": "3fb3",
+ "name": "Delete mirred redirect action",
+ "category": [
+ "actions",
+ "mirred"
+ ],
+ "setup": [
+ [
+ "$TC actions flush action mirred",
+ 0,
+ 1,
+ 255
+ ],
+ "$TC actions add action mirred egress redirect index 5 dev lo"
+ ],
+ "cmdUnderTest": "$TC actions del action mirred index 5",
+ "expExitCode": "0",
+ "verifyCmd": "$TC actions list action mirred",
+ "matchPattern": "action order [0-9]*: mirred \\(Egress Redirect to device lo\\).*index 5 ref",
+ "matchCount": "0",
+ "teardown": [
+ "$TC actions flush action mirred"
+ ]
+ },
+ {
+ "id": "b078",
+ "name": "Add simple action",
+ "category": [
+ "actions",
+ "simple"
+ ],
+ "setup": [
+ [
+ "$TC actions flush action simple",
+ 0,
+ 1,
+ 255
+ ]
+ ],
+ "cmdUnderTest": "$TC actions add action simple sdata \"A triumph\" index 60",
+ "expExitCode": "0",
+ "verifyCmd": "$TC actions list action simple",
+ "matchPattern": "action order [0-9]*: Simple <A triumph>.*index 60 ref",
+ "matchCount": "1",
+ "teardown": [
+ "$TC actions flush action simple"
+ ]
+ },
+ {
+ "id": "6d4c",
+ "name": "Add simple action with duplicate index",
+ "category": [
+ "actions",
+ "simple"
+ ],
+ "setup": [
+ [
+ "$TC actions flush action simple",
+ 0,
+ 1,
+ 255
+ ],
+ "$TC actions add action simple sdata \"Aruba\" index 4"
+ ],
+ "cmdUnderTest": "$TC actions add action simple sdata \"Jamaica\" index 4",
+ "expExitCode": "255",
+ "verifyCmd": "$TC actions list action simple",
+ "matchPattern": "action order [0-9]*: Simple <Jamaica>.*ref",
+ "matchCount": "0",
+ "teardown": [
+ "$TC actions flush action simple"
+ ]
+ },
+ {
+ "id": "2542",
+ "name": "List simple actions",
+ "category": [
+ "actions",
+ "simple"
+ ],
+ "setup": [
+ [
+ "$TC actions flush action simple",
+ 0,
+ 1,
+ 255
+ ],
+ "$TC actions add action simple sdata \"Rock\"",
+ "$TC actions add action simple sdata \"Paper\"",
+ "$TC actions add action simple sdata \"Scissors\" index 98"
+ ],
+ "cmdUnderTest": "$TC actions list action simple",
+ "expExitCode": "0",
+ "verifyCmd": "$TC actions list action simple",
+ "matchPattern": "action order [0-9]*: Simple <[A-Z][a-z]*>",
+ "matchCount": "3",
+ "teardown": [
+ "$TC actions flush action simple"
+ ]
+ },
+ {
+ "id": "ea67",
+ "name": "Delete simple action",
+ "category": [
+ "actions",
+ "simple"
+ ],
+ "setup": [
+ [
+ "$TC actions flush action simple",
+ 0,
+ 1,
+ 255
+ ],
+ "$TC actions add action simple sdata \"Blinkenlights\" index 1"
+ ],
+ "cmdUnderTest": "$TC actions delete action simple index 1",
+ "expExitCode": "0",
+ "verifyCmd": "$TC actions list action simple",
+ "matchPattern": "action order [0-9]*: Simple <Blinkenlights>.*index 1 ref",
+ "matchCount": "0",
+ "teardown": [
+ "$TC actions flush action simple"
+ ]
+ },
+ {
+ "id": "8ff1",
+ "name": "Flush simple actions",
+ "category": [
+ "actions",
+ "simple"
+ ],
+ "setup": [
+ [
+ "$TC actions flush action simple",
+ 0,
+ 1,
+ 255
+ ],
+ "$TC actions add action simple sdata \"Kirk\"",
+ "$TC actions add action simple sdata \"Spock\" index 50",
+ "$TC actions add action simple sdata \"McCoy\" index 9"
+ ],
+ "cmdUnderTest": "$TC actions flush action simple",
+ "expExitCode": "0",
+ "verifyCmd": "$TC actions list action simple",
+ "matchPattern": "action order [0-9]*: Simple <[A-Z][a-z]*>",
+ "matchCount": "0",
+ "teardown": [
+ ""
+ ]
+ },
+ {
+ "id": "6236",
+ "name": "Add skbedit action with valid mark",
+ "category": [
+ "actions",
+ "skbedit"
+ ],
+ "setup": [
+ [
+ "$TC actions flush action skbedit",
+ 0,
+ 1,
+ 255
+ ]
+ ],
+ "cmdUnderTest": "$TC actions add action skbedit mark 1",
+ "expExitCode": "0",
+ "verifyCmd": "$TC actions list action skbedit",
+ "matchPattern": "action order [0-9]*: skbedit mark 1",
+ "matchCount": "1",
+ "teardown": [
+ "$TC actions flush action skbedit"
+ ]
+ },
+ {
+ "id": "407b",
+ "name": "Add skbedit action with invalid mark",
+ "category": [
+ "actions",
+ "skbedit"
+ ],
+ "setup": [
+ [
+ "$TC actions flush action skbedit",
+ 0,
+ 1,
+ 255
+ ]
+ ],
+ "cmdUnderTest": "$TC actions add action skbedit mark 666777888999",
+ "expExitCode": "255",
+ "verifyCmd": "$TC actions list action skbedit",
+ "matchPattern": "action order [0-9]*: skbedit mark",
+ "matchCount": "0",
+ "teardown": [
+ "$TC actions flush action skbedit"
+ ]
+ },
+ {
+ "id": "081d",
+ "name": "Add skbedit action with priority",
+ "category": [
+ "actions",
+ "skbedit"
+ ],
+ "setup": [
+ [
+ "$TC actions flush action skbedit",
+ 0,
+ 1,
+ 255
+ ]
+ ],
+ "cmdUnderTest": "$TC actions add action skbedit prio 99",
+ "expExitCode": "0",
+ "verifyCmd": "$TC actions list action skbedit",
+ "matchPattern": "action order [0-9]*: skbedit priority :99",
+ "matchCount": "1",
+ "teardown": [
+ "$TC actions flush action skbedit"
+ ]
+ },
+ {
+ "id": "cc37",
+ "name": "Add skbedit action with invalid priority",
+ "category": [
+ "actions",
+ "skbedit"
+ ],
+ "setup": [
+ [
+ "$TC actions flush action skbedit",
+ 0,
+ 1,
+ 255
+ ]
+ ],
+ "cmdUnderTest": "$TC actions add action skbedit prio foo",
+ "expExitCode": "255",
+ "verifyCmd": "$TC actions list action skbedit",
+ "matchPattern": "action order [0-9]*: skbedit priority",
+ "matchCount": "0",
+ "teardown": [
+ "$TC actions flush action skbedit"
+ ]
+ },
+ {
+ "id": "3c95",
+ "name": "Add skbedit action with queue_mapping",
+ "category": [
+ "actions",
+ "skbedit"
+ ],
+ "setup": [
+ [
+ "$TC actions flush action skbedit",
+ 0,
+ 1,
+ 255
+ ]
+ ],
+ "cmdUnderTest": "$TC actions add action skbedit queue_mapping 909",
+ "expExitCode": "0",
+ "verifyCmd": "$TC actions list action skbedit",
+ "matchPattern": "action order [0-9]*: skbedit queue_mapping 909",
+ "matchCount": "1",
+ "teardown": [
+ "$TC actions flush action skbedit"
+ ]
+ },
+ {
+ "id": "985c",
+ "name": "Add skbedit action with invalid queue_mapping",
+ "category": [
+ "actions",
+ "skbedit"
+ ],
+ "setup": [
+ [
+ "$TC actions flush action skbedit",
+ 0,
+ 1,
+ 255
+ ]
+ ],
+ "cmdUnderTest": "$TC actions add action skbedit queue_mapping 67000",
+ "expExitCode": "255",
+ "verifyCmd": "$TC actions list action skbedit",
+ "matchPattern": "action order [0-9]*: skbedit queue_mapping",
+ "matchCount": "0",
+ "teardown": [
+ "$TC actions flush action skbedit"
+ ]
+ },
+ {
+ "id": "224f",
+ "name": "Add skbedit action with ptype host",
+ "category": [
+ "actions",
+ "skbedit"
+ ],
+ "setup": [
+ [
+ "$TC actions flush action skbedit",
+ 0,
+ 1,
+ 255
+ ]
+ ],
+ "cmdUnderTest": "$TC actions add action skbedit ptype host",
+ "expExitCode": "0",
+ "verifyCmd": "$TC actions list action skbedit",
+ "matchPattern": "action order [0-9]*: skbedit ptype host",
+ "matchCount": "1",
+ "teardown": [
+ "$TC actions flush action skbedit"
+ ]
+ },
+ {
+ "id": "d1a3",
+ "name": "Add skbedit action with ptype otherhost",
+ "category": [
+ "actions",
+ "skbedit"
+ ],
+ "setup": [
+ [
+ "$TC actions flush action skbedit",
+ 0,
+ 1,
+ 255
+ ]
+ ],
+ "cmdUnderTest": "$TC actions add action skbedit ptype otherhost",
+ "expExitCode": "0",
+ "verifyCmd": "$TC actions list action skbedit",
+ "matchPattern": "action order [0-9]*: skbedit ptype otherhost",
+ "matchCount": "1",
+ "teardown": [
+ "$TC actions flush action skbedit"
+ ]
+ },
+ {
+ "id": "b9c6",
+ "name": "Add skbedit action with invalid ptype",
+ "category": [
+ "actions",
+ "skbedit"
+ ],
+ "setup": [
+ [
+ "$TC actions flush action skbedit",
+ 0,
+ 1,
+ 255
+ ]
+ ],
+ "cmdUnderTest": "$TC actions add action skbedit ptype openair",
+ "expExitCode": "255",
+ "verifyCmd": "$TC actions list action skbedit",
+ "matchPattern": "action order [0-9]*: skbedit ptype openair",
+ "matchCount": "0",
+ "teardown": [
+ "$TC actions flush action skbedit"
+ ]
+ },
+ {
+ "id": "5172",
+ "name": "List skbedit actions",
+ "category": [
+ "actions",
+ "skbedit"
+ ],
+ "setup": [
+ [
+ "$TC actions flush action skbedit",
+ 0,
+ 1,
+ 255
+ ],
+ "$TC actions add action skbedit ptype otherhost",
+ "$TC actions add action skbedit ptype broadcast",
+ "$TC actions add action skbedit mark 59",
+ "$TC actions add action skbedit mark 409"
+ ],
+ "cmdUnderTest": "$TC actions list action skbedit",
+ "expExitCode": "0",
+ "verifyCmd": "$TC actions list action skbedit",
+ "matchPattern": "action order [0-9]*: skbedit",
+ "matchCount": "4",
+ "teardown": [
+ "$TC actions flush action skbedit"
+ ]
+ },
+ {
+ "id": "a6d6",
+ "name": "Add skbedit action with index",
+ "category": [
+ "actions",
+ "skbedit"
+ ],
+ "setup": [
+ [
+ "$TC actions flush action skbedit",
+ 0,
+ 1,
+ 255
+ ]
+ ],
+ "cmdUnderTest": "$TC actions add action skbedit mark 808 index 4040404040",
+ "expExitCode": "0",
+ "verifyCmd": "$TC actions list action skbedit",
+ "matchPattern": "index 4040404040",
+ "matchCount": "1",
+ "teardown": [
+ "$TC actions flush action skbedit"
+ ]
+ },
+ {
+ "id": "38f3",
+ "name": "Delete skbedit action",
+ "category": [
+ "actions",
+ "skbedit"
+ ],
+ "setup": [
+ [
+ "$TC actions flush action skbedit",
+ 0,
+ 1,
+ 255
+ ],
+ "$TC actions add action skbedit mark 42 index 9009"
+ ],
+ "cmdUnderTest": "$TC actions del action skbedit index 9009",
+ "expExitCode": "0",
+ "verifyCmd": "$TC actions list action skbedit",
+ "matchPattern": "action order [0-9]*: skbedit mark 42",
+ "matchCount": "0",
+ "teardown": [
+ "$TC actions flush action skbedit"
+ ]
+ },
+ {
+ "id": "ce97",
+ "name": "Flush skbedit actions",
+ "category": [
+ "actions",
+ "skbedit"
+ ],
+ "setup": [
+ "$TC actions add action skbedit mark 500",
+ "$TC actions add action skbedit mark 501",
+ "$TC actions add action skbedit mark 502",
+ "$TC actions add action skbedit mark 503",
+ "$TC actions add action skbedit mark 504",
+ "$TC actions add action skbedit mark 505",
+ "$TC actions add action skbedit mark 506"
+ ],
+ "cmdUnderTest": "$TC actions flush action skbedit",
+ "expExitCode": "0",
+ "verifyCmd": "$TC actions list action skbedit",
+ "matchPattern": "action order [0-9]*: skbedit",
+ "matchCount": "0",
+ "teardown": [
+ "$TC actions flush action skbedit"
+ ]
+ },
+ {
+ "id": "f02c",
+ "name": "Replace gact action",
+ "category": [
+ "actions",
+ "gact"
+ ],
+ "setup": [
+ [
+ "$TC actions flush action gact",
+ 0,
+ 1,
+ 255
+ ],
+ "$TC actions add action drop index 10",
+ "$TC actions add action drop index 12"
+ ],
+ "cmdUnderTest": "$TC actions replace action ok index 12",
+ "expExitCode": "0",
+ "verifyCmd": "$TC actions ls action gact",
+ "matchPattern": "action order [0-9]*: gact action pass",
+ "matchCount": "1",
+ "teardown": [
+ "$TC actions flush action gact"
+ ]
+ },
+ {
+ "id": "525f",
+ "name": "Get gact action by index",
+ "category": [
+ "actions",
+ "gact"
+ ],
+ "setup": [
+ [
+ "$TC actions flush action gact",
+ 0,
+ 1,
+ 255
+ ],
+ "$TC actions add action drop index 3900800700"
+ ],
+ "cmdUnderTest": "$TC actions get action gact index 3900800700",
+ "expExitCode": "0",
+ "verifyCmd": "$TC actions get action gact index 3900800700",
+ "matchPattern": "index 3900800700",
+ "matchCount": "1",
+ "teardown": [
+ "$TC actions flush action gact"
+ ]
+ }
+]
\ No newline at end of file
diff --git a/tools/testing/selftests/tc-testing/tc-tests/filters/tests.json b/tools/testing/selftests/tc-testing/tc-tests/filters/tests.json
new file mode 100644
index 0000000..c727b96
--- /dev/null
+++ b/tools/testing/selftests/tc-testing/tc-tests/filters/tests.json
@@ -0,0 +1,21 @@
+[
+ {
+ "id": "e9a3",
+ "name": "Add u32 with source match",
+ "category": [
+ "filter",
+ "u32"
+ ],
+ "setup": [
+ "$TC qdisc add dev $DEV1 ingress"
+ ],
+ "cmdUnderTest": "$TC filter add dev $DEV1 parent ffff: protocol ip prio 1 u32 match ip src 127.0.0.1/32 flowid 1:1 action ok",
+ "expExitCode": "0",
+ "verifyCmd": "$TC filter show dev $DEV1 parent ffff:",
+ "matchPattern": "match 7f000002/ffffffff at 12",
+ "matchCount": "0",
+ "teardown": [
+ "$TC qdisc del dev $DEV1 ingress"
+ ]
+ }
+]
\ No newline at end of file
diff --git a/tools/testing/selftests/tc-testing/tdc.py b/tools/testing/selftests/tc-testing/tdc.py
new file mode 100755
index 0000000..cd61b78
--- /dev/null
+++ b/tools/testing/selftests/tc-testing/tdc.py
@@ -0,0 +1,413 @@
+#!/usr/bin/env python3
+
+"""
+tdc.py - Linux tc (Traffic Control) unit test driver
+
+Copyright (C) 2017 Lucas Bates <lucasb@mojatatu.com>
+"""
+
+import re
+import os
+import sys
+import argparse
+import json
+import subprocess
+from collections import OrderedDict
+from string import Template
+
+from tdc_config import *
+from tdc_helper import *
+
+
+USE_NS = True
+
+
+def replace_keywords(cmd):
+ """
+ For a given executable command, substitute any known
+ variables contained within NAMES with the correct values
+ """
+ tcmd = Template(cmd)
+ subcmd = tcmd.safe_substitute(NAMES)
+ return subcmd
+
+
+def exec_cmd(command, nsonly=True):
+ """
+ Perform any required modifications on an executable command, then run
+ it in a subprocess and return the results.
+ """
+ if (USE_NS and nsonly):
+ command = 'ip netns exec $NS ' + command
+
+ if '$' in command:
+ command = replace_keywords(command)
+
+ proc = subprocess.Popen(command,
+ shell=True,
+ stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE)
+ (rawout, serr) = proc.communicate()
+
+ if proc.returncode != 0:
+ foutput = serr.decode("utf-8")
+ else:
+ foutput = rawout.decode("utf-8")
+
+ proc.stdout.close()
+ proc.stderr.close()
+ return proc, foutput
+
+
+def prepare_env(cmdlist):
+ """
+ Execute the setup/teardown commands for a test case. Optionally
+ terminate test execution if the command fails.
+ """
+ for cmdinfo in cmdlist:
+ if (type(cmdinfo) == list):
+ exit_codes = cmdinfo[1:]
+ cmd = cmdinfo[0]
+ else:
+ exit_codes = [0]
+ cmd = cmdinfo
+
+ if (len(cmd) == 0):
+ continue
+
+ (proc, foutput) = exec_cmd(cmd)
+
+ if proc.returncode not in exit_codes:
+ print
+ print("Could not execute:")
+ print(cmd)
+ print("\nError message:")
+ print(foutput)
+ print("\nAborting test run.")
+ ns_destroy()
+ exit(1)
+
+
+def test_runner(filtered_tests):
+ """
+ Driver function for the unit tests.
+
+ Prints information about the tests being run, executes the setup and
+ teardown commands and the command under test itself. Also determines
+ success/failure based on the information in the test case and generates
+ TAP output accordingly.
+ """
+ testlist = filtered_tests
+ tcount = len(testlist)
+ index = 1
+ tap = str(index) + ".." + str(tcount) + "\n"
+
+ for tidx in testlist:
+ result = True
+ tresult = ""
+ print("Test " + tidx["id"] + ": " + tidx["name"])
+ prepare_env(tidx["setup"])
+ (p, procout) = exec_cmd(tidx["cmdUnderTest"])
+ exit_code = p.returncode
+
+ if (exit_code != int(tidx["expExitCode"])):
+ result = False
+ print("exit:", exit_code, int(tidx["expExitCode"]))
+ print(procout)
+ else:
+ match_pattern = re.compile(str(tidx["matchPattern"]), re.DOTALL)
+ (p, procout) = exec_cmd(tidx["verifyCmd"])
+ match_index = re.findall(match_pattern, procout)
+ if len(match_index) != int(tidx["matchCount"]):
+ result = False
+
+ if result == True:
+ tresult += "ok "
+ else:
+ tresult += "not ok "
+ tap += tresult + str(index) + " " + tidx["id"] + " " + tidx["name"] + "\n"
+
+ if result == False:
+ tap += procout
+
+ prepare_env(tidx["teardown"])
+ index += 1
+
+ return tap
+
+
+def ns_create():
+ """
+ Create the network namespace in which the tests will be run and set up
+ the required network devices for it.
+ """
+ if (USE_NS):
+ cmd = 'ip netns add $NS'
+ exec_cmd(cmd, False)
+ cmd = 'ip link add $DEV0 type veth peer name $DEV1'
+ exec_cmd(cmd, False)
+ cmd = 'ip link set $DEV1 netns $NS'
+ exec_cmd(cmd, False)
+ cmd = 'ip link set $DEV0 up'
+ exec_cmd(cmd, False)
+ cmd = 'ip -s $NS link set $DEV1 up'
+ exec_cmd(cmd, False)
+
+
+def ns_destroy():
+ """
+ Destroy the network namespace for testing (and any associated network
+ devices as well)
+ """
+ if (USE_NS):
+ cmd = 'ip netns delete $NS'
+ exec_cmd(cmd, False)
+
+
+def has_blank_ids(idlist):
+ """
+ Search the list for empty ID fields and return true/false accordingly.
+ """
+ return not(all(k for k in idlist))
+
+
+def load_from_file(filename):
+ """
+ Open the JSON file containing the test cases and return them as an
+ ordered dictionary object.
+ """
+ with open(filename) as test_data:
+ testlist = json.load(test_data, object_pairs_hook=OrderedDict)
+ idlist = get_id_list(testlist)
+ if (has_blank_ids(idlist)):
+ for k in testlist:
+ k['filename'] = filename
+ return testlist
+
+
+def args_parse():
+ """
+ Create the argument parser.
+ """
+ parser = argparse.ArgumentParser(description='Linux TC unit tests')
+ return parser
+
+
+def set_args(parser):
+ """
+ Set the command line arguments for tdc.
+ """
+ parser.add_argument('-p', '--path', type=str,
+ help='The full path to the tc executable to use')
+ parser.add_argument('-c', '--category', type=str, nargs='?', const='+c',
+ help='Run tests only from the specified category, or if no category is specified, list known categories.')
+ parser.add_argument('-f', '--file', type=str,
+ help='Run tests from the specified file')
+ parser.add_argument('-l', '--list', type=str, nargs='?', const="", metavar='CATEGORY',
+ help='List all test cases, or those only within the specified category')
+ parser.add_argument('-s', '--show', type=str, nargs=1, metavar='ID', dest='showID',
+ help='Display the test case with specified id')
+ parser.add_argument('-e', '--execute', type=str, nargs=1, metavar='ID',
+ help='Execute the single test case with specified ID')
+ parser.add_argument('-i', '--id', action='store_true', dest='gen_id',
+ help='Generate ID numbers for new test cases')
+ return parser
+ return parser
+
+
+def check_default_settings(args):
+ """
+ Process any arguments overriding the default settings, and ensure the
+ settings are correct.
+ """
+ # Allow for overriding specific settings
+ global NAMES
+
+ if args.path != None:
+ NAMES['TC'] = args.path
+ if not os.path.isfile(NAMES['TC']):
+ print("The specified tc path " + NAMES['TC'] + " does not exist.")
+ exit(1)
+
+
+def get_id_list(alltests):
+ """
+ Generate a list of all IDs in the test cases.
+ """
+ return [x["id"] for x in alltests]
+
+
+def check_case_id(alltests):
+ """
+ Check for duplicate test case IDs.
+ """
+ idl = get_id_list(alltests)
+ return [x for x in idl if idl.count(x) > 1]
+
+
+def does_id_exist(alltests, newid):
+ """
+ Check if a given ID already exists in the list of test cases.
+ """
+ idl = get_id_list(alltests)
+ return (any(newid == x for x in idl))
+
+
+def generate_case_ids(alltests):
+ """
+ If a test case has a blank ID field, generate a random hex ID for it
+ and then write the test cases back to disk.
+ """
+ import random
+ for c in alltests:
+ if (c["id"] == ""):
+ while True:
+ newid = str('%04x' % random.randrange(16**4))
+ if (does_id_exist(alltests, newid)):
+ continue
+ else:
+ c['id'] = newid
+ break
+
+ ufilename = []
+ for c in alltests:
+ if ('filename' in c):
+ ufilename.append(c['filename'])
+ ufilename = get_unique_item(ufilename)
+ for f in ufilename:
+ testlist = []
+ for t in alltests:
+ if 'filename' in t:
+ if t['filename'] == f:
+ del t['filename']
+ testlist.append(t)
+ outfile = open(f, "w")
+ json.dump(testlist, outfile, indent=4)
+ outfile.close()
+
+
+def get_test_cases(args):
+ """
+ If a test case file is specified, retrieve tests from that file.
+ Otherwise, glob for all json files in subdirectories and load from
+ each one.
+ """
+ import fnmatch
+ if args.file != None:
+ if not os.path.isfile(args.file):
+ print("The specified test case file " + args.file + " does not exist.")
+ exit(1)
+ flist = [args.file]
+ else:
+ flist = []
+ for root, dirnames, filenames in os.walk('tc-tests'):
+ for filename in fnmatch.filter(filenames, '*.json'):
+ flist.append(os.path.join(root, filename))
+ alltests = list()
+ for casefile in flist:
+ alltests = alltests + (load_from_file(casefile))
+ return alltests
+
+
+def set_operation_mode(args):
+ """
+ Load the test case data and process remaining arguments to determine
+ what the script should do for this run, and call the appropriate
+ function.
+ """
+ alltests = get_test_cases(args)
+
+ if args.gen_id:
+ idlist = get_id_list(alltests)
+ if (has_blank_ids(idlist)):
+ alltests = generate_case_ids(alltests)
+ else:
+ print("No empty ID fields found in test files.")
+ exit(0)
+
+ duplicate_ids = check_case_id(alltests)
+ if (len(duplicate_ids) > 0):
+ print("The following test case IDs are not unique:")
+ print(str(set(duplicate_ids)))
+ print("Please correct them before continuing.")
+ exit(1)
+
+ ucat = get_test_categories(alltests)
+
+ if args.showID:
+ show_test_case_by_id(alltests, args.showID[0])
+ exit(0)
+
+ if args.execute:
+ target_id = args.execute[0]
+ else:
+ target_id = ""
+
+ if args.category:
+ if (args.category == '+c'):
+ print("Available categories:")
+ print_sll(ucat)
+ exit(0)
+ else:
+ target_category = args.category
+ else:
+ target_category = ""
+
+
+ testcases = get_categorized_testlist(alltests, ucat)
+
+ if args.list:
+ if (len(args.list) == 0):
+ list_test_cases(alltests)
+ exit(0)
+ elif(len(args.list > 0)):
+ if (args.list not in ucat):
+ print("Unknown category " + args.list)
+ print("Available categories:")
+ print_sll(ucat)
+ exit(1)
+ list_test_cases(testcases[args.list])
+ exit(0)
+
+ if (os.geteuid() != 0):
+ print("This script must be run with root privileges.\n")
+ exit(1)
+
+ ns_create()
+
+ if (len(target_category) == 0):
+ if (len(target_id) > 0):
+ alltests = list(filter(lambda x: target_id in x['id'], alltests))
+ if (len(alltests) == 0):
+ print("Cannot find a test case with ID matching " + target_id)
+ exit(1)
+ catresults = test_runner(alltests)
+ print("All test results: " + "\n\n" + catresults)
+ elif (len(target_category) > 0):
+ if (target_category not in ucat):
+ print("Specified category is not present in this file.")
+ exit(1)
+ else:
+ catresults = test_runner(testcases[target_category])
+ print("Category " + target_category + "\n\n" + catresults)
+
+ ns_destroy()
+
+
+def main():
+ """
+ Start of execution; set up argument parser and get the arguments,
+ and start operations.
+ """
+ parser = args_parse()
+ parser = set_args(parser)
+ (args, remaining) = parser.parse_known_args()
+ check_default_settings(args)
+
+ set_operation_mode(args)
+
+ exit(0)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/tools/testing/selftests/tc-testing/tdc_config.py b/tools/testing/selftests/tc-testing/tdc_config.py
new file mode 100644
index 0000000..0108737
--- /dev/null
+++ b/tools/testing/selftests/tc-testing/tdc_config.py
@@ -0,0 +1,17 @@
+"""
+tdc_config.py - tdc user-specified values
+
+Copyright (C) 2017 Lucas Bates <lucasb@mojatatu.com>
+"""
+
+# Dictionary containing all values that can be substituted in executable
+# commands.
+NAMES = {
+ # Substitute your own tc path here
+ 'TC': '/sbin/tc',
+ # Name of veth devices to be created for the namespace
+ 'DEV0': 'v0p0',
+ 'DEV1': 'v0p1',
+ # Name of the namespace to use
+ 'NS': 'tcut'
+ }
diff --git a/tools/testing/selftests/tc-testing/tdc_helper.py b/tools/testing/selftests/tc-testing/tdc_helper.py
new file mode 100644
index 0000000..c3254f8
--- /dev/null
+++ b/tools/testing/selftests/tc-testing/tdc_helper.py
@@ -0,0 +1,75 @@
+"""
+tdc_helper.py - tdc helper functions
+
+Copyright (C) 2017 Lucas Bates <lucasb@mojatatu.com>
+"""
+
+def get_categorized_testlist(alltests, ucat):
+ """ Sort the master test list into categories. """
+ testcases = dict()
+
+ for category in ucat:
+ testcases[category] = list(filter(lambda x: category in x['category'], alltests))
+
+ return(testcases)
+
+
+def get_unique_item(lst):
+ """ For a list, return a set of the unique items in the list. """
+ return list(set(lst))
+
+
+def get_test_categories(alltests):
+ """ Discover all unique test categories present in the test case file. """
+ ucat = []
+ for t in alltests:
+ ucat.extend(get_unique_item(t['category']))
+ ucat = get_unique_item(ucat)
+ return ucat
+
+def list_test_cases(testlist):
+ """ Print IDs and names of all test cases. """
+ for curcase in testlist:
+ print(curcase['id'] + ': (' + ', '.join(curcase['category']) + ") " + curcase['name'])
+
+
+def list_categories(testlist):
+ """ Show all categories that are present in a test case file. """
+ categories = set(map(lambda x: x['category'], testlist))
+ print("Available categories:")
+ print(", ".join(str(s) for s in categories))
+ print("")
+
+
+def print_list(cmdlist):
+ """ Print a list of strings prepended with a tab. """
+ for l in cmdlist:
+ if (type(l) == list):
+ print("\t" + str(l[0]))
+ else:
+ print("\t" + str(l))
+
+
+def print_sll(items):
+ print("\n".join(str(s) for s in items))
+
+
+def print_test_case(tcase):
+ """ Pretty-printing of a given test case. """
+ for k in tcase.keys():
+ if (type(tcase[k]) == list):
+ print(k + ":")
+ print_list(tcase[k])
+ else:
+ print(k + ": " + tcase[k])
+
+
+def show_test_case_by_id(testlist, caseID):
+ """ Find the specified test case to pretty-print. """
+ if not any(d.get('id', None) == caseID for d in testlist):
+ print("That ID does not exist.")
+ exit(1)
+ else:
+ print_test_case(next((d for d in testlist if d['id'] == caseID)))
+
+